From 574bf1bfd66d6dba5826bb7190e409a331fff29e Mon Sep 17 00:00:00 2001 From: Andrew Gallant Date: Wed, 19 Apr 2023 22:44:43 -0400 Subject: [PATCH] impl: cut over to regex-automata --- .github/workflows/ci.yml | 206 +- Cargo.toml | 37 - Cross.toml | 7 + bench/src/bench.rs | 3 - bench/src/rust_compile.rs | 67 - examples/shootout-regex-dna-replace.rs | 17 - newtests/bytes.rs | 6 + newtests/bytes_set.rs | 6 + newtests/misc.rs | 8 + newtests/string.rs | 6 + newtests/string_set.rs | 6 + newtests/tests.rs | 1 + regex-automata/src/dfa/regex.rs | 1 + regex-automata/src/dfa/sparse.rs | 76 +- regex-automata/src/hybrid/regex.rs | 5 +- regex-automata/src/hybrid/search.rs | 4 +- regex-automata/src/meta/error.rs | 55 +- regex-automata/src/meta/regex.rs | 44 + regex-automata/src/meta/strategy.rs | 4 +- regex-automata/src/meta/wrappers.rs | 6 +- regex-automata/src/nfa/thompson/builder.rs | 3 + regex-automata/src/nfa/thompson/error.rs | 12 + regex-automata/src/nfa/thompson/map.rs | 9 +- regex-automata/src/nfa/thompson/nfa.rs | 3 + regex-automata/src/util/captures.rs | 4 +- regex-automata/src/util/pool.rs | 4 +- regex-automata/src/util/search.rs | 31 +- regex-automata/test | 5 - .../tests/gen/sparse/multi_pattern_v2.rs | 2 +- .../sparse/multi_pattern_v2_fwd.bigendian.dfa | Bin 3476 -> 3476 bytes .../sparse/multi_pattern_v2_rev.bigendian.dfa | Bin 1920 -> 1920 bytes regex-automata/tests/hybrid/suite.rs | 14 + regex-automata/tests/meta/suite.rs | 71 +- scripts/scrape-crates-io | 180 - src/backtrack.rs | 282 -- src/compile.rs | 1324 ------- src/dfa.rs | 1945 ---------- src/error.rs | 50 +- src/exec.rs | 1748 --------- src/expand.rs | 247 -- src/find_byte.rs | 5 +- src/input.rs | 432 --- src/lib.rs | 27 - src/literal/imp.rs | 413 --- src/literal/mod.rs | 55 - src/pikevm.rs | 360 -- src/pool.rs | 333 -- src/prog.rs | 451 --- src/re_builder.rs | 121 +- src/re_bytes.rs | 268 +- src/re_set.rs | 133 +- src/re_trait.rs | 294 -- src/re_unicode.rs | 274 +- src/sparse.rs | 84 - src/utf8.rs | 264 -- test | 10 +- testdata/regression.toml | 27 + tests/bytes.rs | 2 +- tests/consistent.rs | 238 -- tests/crates_regex.rs | 3287 ----------------- tests/fowler.rs | 33 +- tests/multiline.rs | 6 +- tests/regression_fuzz.rs | 3 +- tests/test_backtrack.rs | 56 - tests/test_backtrack_bytes.rs | 55 - tests/test_backtrack_utf8bytes.rs | 58 - tests/test_crates_regex.rs | 54 - tests/test_default.rs | 8 +- tests/test_nfa.rs | 50 - tests/test_nfa_bytes.rs | 55 - tests/test_nfa_utf8bytes.rs | 54 - 71 files changed, 972 insertions(+), 13037 deletions(-) create mode 100644 Cross.toml delete mode 100644 bench/src/rust_compile.rs delete mode 100644 examples/shootout-regex-dna-replace.rs create mode 100644 newtests/misc.rs delete mode 100755 scripts/scrape-crates-io delete mode 100644 src/backtrack.rs delete mode 100644 src/compile.rs delete mode 100644 src/dfa.rs delete mode 100644 src/exec.rs delete mode 100644 src/expand.rs delete mode 100644 src/input.rs delete mode 100644 src/literal/imp.rs delete mode 100644 src/literal/mod.rs delete mode 100644 src/pikevm.rs delete mode 100644 src/pool.rs delete mode 100644 src/prog.rs delete mode 100644 src/re_trait.rs delete mode 100644 src/sparse.rs delete mode 100644 src/utf8.rs delete mode 100644 tests/consistent.rs delete mode 100644 tests/crates_regex.rs delete mode 100644 tests/test_backtrack.rs delete mode 100644 tests/test_backtrack_bytes.rs delete mode 100644 tests/test_backtrack_utf8bytes.rs delete mode 100644 tests/test_crates_regex.rs delete mode 100644 tests/test_nfa.rs delete mode 100644 tests/test_nfa_bytes.rs delete mode 100644 tests/test_nfa_utf8bytes.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 15ff835d50..5d5b24c4c2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,31 +28,25 @@ permissions: contents: read jobs: + # This job does our basic build+test for supported platforms. test: - name: test env: # For some builds, we use cross to test on 32-bit and big-endian # systems. CARGO: cargo # When CARGO is set to CROSS, TARGET is set to `--target matrix.target`. + # Note that we only use cross on Linux, so setting a target on a + # different OS will just use normal cargo. TARGET: + # Bump this as appropriate. We pin to a version to make sure CI + # continues to work as cross releases in the past have broken things + # in subtle ways. + CROSS_VERSION: v0.2.5 runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: - build: - - pinned - - stable - - stable-32 - - stable-mips - - beta - - nightly - - macos - - win-msvc - - win-gnu include: - - build: pinned - os: ubuntu-latest - rust: 1.60.0 - build: stable os: ubuntu-latest rust: stable @@ -80,107 +74,161 @@ jobs: os: windows-latest rust: stable-x86_64-gnu steps: - - name: Checkout repository uses: actions/checkout@v3 - - name: Install Rust uses: dtolnay/rust-toolchain@v1 with: toolchain: ${{ matrix.rust }} - - name: Install and configure Cross - if: matrix.target != '' + if: matrix.os == 'ubuntu-latest' && matrix.target != '' run: | + # In the past, new releases of 'cross' have broken CI. So for now, we + # pin it. We also use their pre-compiled binary releases because cross + # has over 100 dependencies and takes a bit to compile. + dir="$RUNNER_TEMP/cross-download" + mkdir "$dir" + echo "$dir" >> $GITHUB_PATH + cd "$dir" + curl -LO "https://github.com/cross-rs/cross/releases/download/$CROSS_VERSION/cross-x86_64-unknown-linux-musl.tar.gz" + tar xf cross-x86_64-unknown-linux-musl.tar.gz + # We used to install 'cross' from master, but it kept failing. So now # we build from a known-good version until 'cross' becomes more stable # or we find an alternative. Notably, between v0.2.1 and current # master (2022-06-14), the number of Cross's dependencies has doubled. - cargo install --bins --git https://github.com/rust-embedded/cross --tag v0.2.1 + # cargo install --bins --git https://github.com/rust-embedded/cross --tag v0.2.1 echo "CARGO=cross" >> $GITHUB_ENV echo "TARGET=--target ${{ matrix.target }}" >> $GITHUB_ENV - - name: Show command used for Cargo run: | - echo "cargo command is: ${{ env.CARGO }}" - echo "target flag is: ${{ env.TARGET }}" - + echo "cargo command is: $CARGO" + echo "target flag is: $TARGET" - name: Show CPU info for debugging if: matrix.os == 'ubuntu-latest' run: lscpu - - name: Basic build run: ${{ env.CARGO }} build --verbose $TARGET - - name: Build docs run: ${{ env.CARGO }} doc --verbose $TARGET - - # Our dev dependencies evolve more rapidly than we'd like, so only run - # tests when we aren't pinning the Rust version. - # - # Also, our "full" test suite does quite a lot of work, so we only run it - # on one build. Otherwise, we just run the "default" set of tests. - name: Run subset of tests - if: matrix.build != 'pinned' && matrix.build != 'stable' run: ${{ env.CARGO }} test --verbose --test default $TARGET - - - name: Run full test suite - if: matrix.build == 'stable' - # 'stable' is Linux only, so we have bash. - run: ./test - - - name: Run randomized tests against regexes from the wild - if: matrix.build == 'stable' - run: | - # We run the tests in release mode since it winds up being faster. - RUST_REGEX_RANDOM_TEST=1 ${{ env.CARGO }} test --release --verbose --test crates-regex $TARGET - - name: Build regex-syntax docs - if: matrix.build != 'pinned' - run: | - ${{ env.CARGO }} doc --verbose --manifest-path regex-syntax/Cargo.toml $TARGET - + run: ${{ env.CARGO }} doc --verbose --manifest-path regex-syntax/Cargo.toml $TARGET - name: Run subset of regex-syntax tests - if: matrix.build != 'pinned' && matrix.build != 'stable' - run: | - ${{ env.CARGO }} test --verbose --manifest-path regex-syntax/Cargo.toml $TARGET - - - name: Run full regex-syntax test suite - if: matrix.build == 'stable' - run: | - # 'stable' is Linux only, so we have bash. - ./regex-syntax/test - + run: ${{ env.CARGO }} test --verbose --manifest-path regex-syntax/Cargo.toml $TARGET - name: Build regex-automata docs - if: matrix.build != 'pinned' - run: | - ${{ env.CARGO }} doc --verbose --manifest-path regex-automata/Cargo.toml $TARGET - + run: ${{ env.CARGO }} doc --verbose --manifest-path regex-automata/Cargo.toml $TARGET - name: Run subset of regex-automata tests - if: matrix.build != 'pinned' && matrix.build != 'stable' - run: | - ${{ env.CARGO }} test --verbose --manifest-path regex-automata/Cargo.toml $TARGET - - - name: Run full regex-automata test suite - if: matrix.build == 'stable' - run: | - # 'stable' is Linux only, so we have bash. - ./regex-automata/test - - - name: Run regex-capi tests - if: matrix.build == 'stable' - run: | - # 'stable' is Linux only, so we have bash. - ./regex-capi/test - + if: matrix.build != 'win-gnu' # Just horrifically slow. + run: ${{ env.CARGO }} test --verbose --manifest-path regex-automata/Cargo.toml $TARGET - if: matrix.build == 'nightly' name: Run benchmarks as tests run: | cd bench ./run rust --no-run --verbose + # This job runs a stripped down version of CI to test the MSRV. The specific + # reason for doing this is that the regex crate's dev-dependencies tend to + # evolve more quickly. There isn't as tight of a control on them because, + # well, they're only used in tests and their MSRV doesn't matter as much. + # + # It is a bit unfortunate that our MSRV test is basically just "build it" + # and pass if that works. But usually MSRV is broken by compilation problems + # and not runtime behavior. So this is in practice good enough. + msrv: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust + uses: dtolnay/rust-toolchain@v1 + with: + toolchain: 1.60.0 + - name: Basic build + run: cargo build --verbose + - name: Build docs + run: cargo doc --verbose + + # This job runs many more tests for the regex crate proper. Basically, + # it repeats the same test suite for a bunch of different crate feature + # combinations. There are so many features that exhaustive testing isn't + # really possible, but we cover as much as is feasible. + # + # If there is a feature combo that should be tested but isn't, you'll want to + # add it to the appropriate 'test' script in this repo. + testfull-regex: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust + uses: dtolnay/rust-toolchain@v1 + with: + toolchain: stable + - name: Run full test suite + run: ./test + + # Same as above, but for regex-automata, which has even more crate features! + testfull-regex-automata: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust + uses: dtolnay/rust-toolchain@v1 + with: + toolchain: stable + - name: Run full test suite + run: ./regex-automata/test + + # Same as above, but for regex-syntax. + testfull-regex-syntax: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust + uses: dtolnay/rust-toolchain@v1 + with: + toolchain: stable + - name: Run full test suite + run: ./regex-syntax/test + + # Same as above, but for regex-capi. + testfull-regex-capi: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust + uses: dtolnay/rust-toolchain@v1 + with: + toolchain: stable + - name: Run full test suite + run: ./regex-capi/test + + # Runs miri on regex-automata's test suite. This doesn't quite cover + # everything. Many tests are disabled when building with miri because of + # how slow miri runs. But it still gives us decent coverage. + miri-regex-automata: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install Rust + uses: dtolnay/rust-toolchain@v1 + with: + # We use nightly here so that we can use miri I guess? + # It caught me by surprise that miri seems to only be + # available on nightly. + toolchain: nightly + components: miri + - name: Run full test suite + run: cargo miri test --manifest-path regex-automata/Cargo.toml + + # Tests that everything is formatted correctly. rustfmt: - name: rustfmt runs-on: ubuntu-latest steps: - name: Checkout repository diff --git a/Cargo.toml b/Cargo.toml index 89ab9fff06..50f6ca6de0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -238,43 +238,6 @@ name = "default" path = "tests/test_default_bytes.rs" name = "default-bytes" -# Run the test suite on the NFA algorithm over Unicode codepoints. -[[test]] -path = "tests/test_nfa.rs" -name = "nfa" - -# Run the test suite on the NFA algorithm over bytes that match UTF-8 only. -[[test]] -path = "tests/test_nfa_utf8bytes.rs" -name = "nfa-utf8bytes" - -# Run the test suite on the NFA algorithm over arbitrary bytes. -[[test]] -path = "tests/test_nfa_bytes.rs" -name = "nfa-bytes" - -# Run the test suite on the backtracking engine over Unicode codepoints. -[[test]] -path = "tests/test_backtrack.rs" -name = "backtrack" - -# Run the test suite on the backtracking engine over bytes that match UTF-8 -# only. -[[test]] -path = "tests/test_backtrack_utf8bytes.rs" -name = "backtrack-utf8bytes" - -# Run the test suite on the backtracking engine over arbitrary bytes. -[[test]] -path = "tests/test_backtrack_bytes.rs" -name = "backtrack-bytes" - -# Run all backends against each regex found on crates.io and make sure -# that they all do the same thing. -[[test]] -path = "tests/test_crates_regex.rs" -name = "crates-regex" - [package.metadata.docs.rs] # We want to document all features. all-features = true diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 0000000000..5415e7a451 --- /dev/null +++ b/Cross.toml @@ -0,0 +1,7 @@ +[build.env] +passthrough = [ + "RUST_BACKTRACE", + "RUST_LOG", + "REGEX_TEST", + "REGEX_TEST_VERBOSE", +] diff --git a/bench/src/bench.rs b/bench/src/bench.rs index cae1d90f6f..620fbecdb5 100644 --- a/bench/src/bench.rs +++ b/bench/src/bench.rs @@ -304,6 +304,3 @@ cfg_if! { mod sherlock; } } - -#[cfg(any(feature = "re-rust", feature = "re-rust-bytes"))] -mod rust_compile; diff --git a/bench/src/rust_compile.rs b/bench/src/rust_compile.rs deleted file mode 100644 index f88e9b181e..0000000000 --- a/bench/src/rust_compile.rs +++ /dev/null @@ -1,67 +0,0 @@ -use regex_syntax::Parser; -use test::Bencher; - -use regex::internal::Compiler; - -#[bench] -fn compile_simple(b: &mut Bencher) { - b.iter(|| { - let re = Parser::new().parse(r"^bc(d|e)*$").unwrap(); - Compiler::new().compile(&[re]).unwrap() - }); -} - -#[bench] -fn compile_simple_bytes(b: &mut Bencher) { - b.iter(|| { - let re = Parser::new().parse(r"^bc(d|e)*$").unwrap(); - Compiler::new().bytes(true).compile(&[re]).unwrap() - }); -} - -#[bench] -fn compile_simple_full(b: &mut Bencher) { - b.iter(|| regex!(r"^bc(d|e)*$")); -} - -#[bench] -fn compile_small(b: &mut Bencher) { - b.iter(|| { - let re = Parser::new().parse(r"\p{L}|\p{N}|\s|.|\d").unwrap(); - Compiler::new().compile(&[re]).unwrap() - }); -} - -#[bench] -fn compile_small_bytes(b: &mut Bencher) { - b.iter(|| { - let re = Parser::new().parse(r"\p{L}|\p{N}|\s|.|\d").unwrap(); - Compiler::new().bytes(true).compile(&[re]).unwrap() - }); -} - -#[bench] -fn compile_small_full(b: &mut Bencher) { - b.iter(|| regex!(r"\p{L}|\p{N}|\s|.|\d")); -} - -#[bench] -fn compile_huge(b: &mut Bencher) { - b.iter(|| { - let re = Parser::new().parse(r"\p{L}{50}").unwrap(); - Compiler::new().size_limit(1 << 30).compile(&[re]).unwrap() - }); -} - -#[bench] -fn compile_huge_bytes(b: &mut Bencher) { - b.iter(|| { - let re = Parser::new().parse(r"\p{L}{50}").unwrap(); - Compiler::new().size_limit(1 << 30).bytes(true).compile(&[re]).unwrap() - }); -} - -#[bench] -fn compile_huge_full(b: &mut Bencher) { - b.iter(|| regex!(r"\p{L}{50}")); -} diff --git a/examples/shootout-regex-dna-replace.rs b/examples/shootout-regex-dna-replace.rs deleted file mode 100644 index 20694e06f3..0000000000 --- a/examples/shootout-regex-dna-replace.rs +++ /dev/null @@ -1,17 +0,0 @@ -use std::io::{self, Read}; - -macro_rules! regex { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new($re).build().unwrap().into_regex() - }}; -} - -fn main() { - let mut seq = String::with_capacity(50 * (1 << 20)); - io::stdin().read_to_string(&mut seq).unwrap(); - let ilen = seq.len(); - - seq = regex!(">[^\n]*\n|\n").replace_all(&seq, "").into_owned(); - println!("original: {}, replaced: {}", ilen, seq.len()); -} diff --git a/newtests/bytes.rs b/newtests/bytes.rs index c4bf3520f7..590f00d87e 100644 --- a/newtests/bytes.rs +++ b/newtests/bytes.rs @@ -80,6 +80,12 @@ fn compiler( if test.utf8() { return skip; } + // If the test requires Unicode but the Unicode feature isn't enabled, + // skip it. This is a little aggressive, but the test suite doesn't + // have any easy way of communicating which Unicode features are needed. + if test.unicode() && !cfg!(feature = "unicode") { + return skip; + } let re = RegexBuilder::new(pattern) .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) diff --git a/newtests/bytes_set.rs b/newtests/bytes_set.rs index ef20d9d85f..f8c5199e45 100644 --- a/newtests/bytes_set.rs +++ b/newtests/bytes_set.rs @@ -56,6 +56,12 @@ fn compiler( if test.utf8() { return skip; } + // If the test requires Unicode but the Unicode feature isn't enabled, + // skip it. This is a little aggressive, but the test suite doesn't + // have any easy way of communicating which Unicode features are needed. + if test.unicode() && !cfg!(feature = "unicode") { + return skip; + } let re = RegexSetBuilder::new(test.regexes()) .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) diff --git a/newtests/misc.rs b/newtests/misc.rs new file mode 100644 index 0000000000..8badf8f185 --- /dev/null +++ b/newtests/misc.rs @@ -0,0 +1,8 @@ +use regex::Regex; + +#[test] +fn unclosed_group_error() { + let err = Regex::new(r"(").unwrap_err(); + let msg = err.to_string(); + assert!(msg.contains("unclosed group"), "error message: {:?}", msg); +} diff --git a/newtests/string.rs b/newtests/string.rs index d303d25672..efc20c338c 100644 --- a/newtests/string.rs +++ b/newtests/string.rs @@ -88,6 +88,12 @@ fn compiler( if !test.utf8() { return skip; } + // If the test requires Unicode but the Unicode feature isn't enabled, + // skip it. This is a little aggressive, but the test suite doesn't + // have any easy way of communicating which Unicode features are needed. + if test.unicode() && !cfg!(feature = "unicode") { + return skip; + } let re = RegexBuilder::new(pattern) .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) diff --git a/newtests/string_set.rs b/newtests/string_set.rs index 49d6514962..545f0f1cef 100644 --- a/newtests/string_set.rs +++ b/newtests/string_set.rs @@ -64,6 +64,12 @@ fn compiler( if !test.utf8() { return skip; } + // If the test requires Unicode but the Unicode feature isn't enabled, + // skip it. This is a little aggressive, but the test suite doesn't + // have any easy way of communicating which Unicode features are needed. + if test.unicode() && !cfg!(feature = "unicode") { + return skip; + } let re = RegexSetBuilder::new(test.regexes()) .case_insensitive(test.case_insensitive()) .unicode(test.unicode()) diff --git a/newtests/tests.rs b/newtests/tests.rs index 450af99d94..eee929c513 100644 --- a/newtests/tests.rs +++ b/newtests/tests.rs @@ -1,5 +1,6 @@ mod bytes; mod bytes_set; +mod misc; mod string; mod string_set; diff --git a/regex-automata/src/dfa/regex.rs b/regex-automata/src/dfa/regex.rs index 07959d3c18..2c3f4c21a2 100644 --- a/regex-automata/src/dfa/regex.rs +++ b/regex-automata/src/dfa/regex.rs @@ -738,6 +738,7 @@ impl Builder { .configure( dense::Config::new() .prefilter(None) + .specialize_start_states(false) .start_kind(StartKind::Anchored) .match_kind(MatchKind::All), ) diff --git a/regex-automata/src/dfa/sparse.rs b/regex-automata/src/dfa/sparse.rs index fc996f8049..04942f0503 100644 --- a/regex-automata/src/dfa/sparse.rs +++ b/regex-automata/src/dfa/sparse.rs @@ -1386,7 +1386,14 @@ impl> Transitions { dst = &mut dst[size_of::()..]; // write actual transitions - dst.copy_from_slice(self.sparse()); + let mut id = DEAD; + while id.as_usize() < self.sparse().len() { + let state = self.state(id); + let n = state.write_to::(&mut dst)?; + dst = &mut dst[n..]; + // The next ID is the offset immediately following `state`. + id = StateID::new(id.as_usize() + state.write_to_len()).unwrap(); + } Ok(nwrite) } @@ -1462,7 +1469,7 @@ impl> Transitions { // The next ID should be the offset immediately following `state`. id = StateID::new(wire::add( id.as_usize(), - state.bytes_len(), + state.write_to_len(), "next state ID offset", )?) .map_err(|err| { @@ -1550,7 +1557,9 @@ impl> Transitions { /// every pattern ID is valid. fn try_state(&self, id: StateID) -> Result, DeserializeError> { if id.as_usize() > self.sparse().len() { - return Err(DeserializeError::generic("invalid sparse state ID")); + return Err(DeserializeError::generic( + "invalid caller provided sparse state ID", + )); } let mut state = &self.sparse()[id.as_usize()..]; // Encoding format starts with a u16 that stores the total number of @@ -1964,7 +1973,10 @@ impl> StartTable { ); dst = &mut dst[size_of::()..]; // write start IDs - dst.copy_from_slice(self.table()); + for (sid, _, _) in self.iter() { + E::write_u32(sid.as_u32(), dst); + dst = &mut dst[StateID::SIZE..]; + } Ok(nwrite) } @@ -2185,7 +2197,7 @@ impl<'a, T: AsRef<[u8]>> Iterator for StateIter<'a, T> { return None; } let state = self.trans.state(StateID::new_unchecked(self.id)); - self.id = self.id + state.bytes_len(); + self.id = self.id + state.write_to_len(); Some(state) } } @@ -2291,9 +2303,56 @@ impl<'a> State<'a> { self.pattern_ids.len() / 4 } + /// Return an accelerator for this state. + fn accelerator(&self) -> &'a [u8] { + self.accel + } + + /// Write the raw representation of this state to the given buffer using + /// the given endianness. + fn write_to( + &self, + mut dst: &mut [u8], + ) -> Result { + let nwrite = self.write_to_len(); + if dst.len() < nwrite { + return Err(SerializeError::buffer_too_small( + "sparse state transitions", + )); + } + + let ntrans = + if self.is_match { self.ntrans | (1 << 15) } else { self.ntrans }; + E::write_u16(u16::try_from(ntrans).unwrap(), dst); + dst = &mut dst[size_of::()..]; + + dst[..self.input_ranges.len()].copy_from_slice(self.input_ranges); + dst = &mut dst[self.input_ranges.len()..]; + + for i in 0..self.ntrans { + E::write_u32(self.next_at(i).as_u32(), dst); + dst = &mut dst[StateID::SIZE..]; + } + + if self.is_match { + E::write_u32(u32::try_from(self.pattern_len()).unwrap(), dst); + dst = &mut dst[size_of::()..]; + for i in 0..self.pattern_len() { + let pid = self.pattern_id(i); + E::write_u32(pid.as_u32(), dst); + dst = &mut dst[PatternID::SIZE..]; + } + } + + dst[0] = u8::try_from(self.accel.len()).unwrap(); + dst[1..][..self.accel.len()].copy_from_slice(self.accel); + + Ok(nwrite) + } + /// Return the total number of bytes that this state consumes in its /// encoded form. - fn bytes_len(&self) -> usize { + fn write_to_len(&self) -> usize { let mut len = 2 + (self.ntrans * 2) + (self.ntrans * StateID::SIZE) @@ -2303,11 +2362,6 @@ impl<'a> State<'a> { } len } - - /// Return an accelerator for this state. - fn accelerator(&self) -> &'a [u8] { - self.accel - } } impl<'a> fmt::Debug for State<'a> { diff --git a/regex-automata/src/hybrid/regex.rs b/regex-automata/src/hybrid/regex.rs index c763cb7de2..75667daf91 100644 --- a/regex-automata/src/hybrid/regex.rs +++ b/regex-automata/src/hybrid/regex.rs @@ -794,7 +794,10 @@ impl Builder { .dfa .clone() .configure( - DFA::config().prefilter(None).match_kind(MatchKind::All), + DFA::config() + .prefilter(None) + .specialize_start_states(false) + .match_kind(MatchKind::All), ) .thompson(thompson::Config::new().reverse(true)) .build_many(patterns)?; diff --git a/regex-automata/src/hybrid/search.rs b/regex-automata/src/hybrid/search.rs index ed7fc4d407..26bec8d7f4 100644 --- a/regex-automata/src/hybrid/search.rs +++ b/regex-automata/src/hybrid/search.rs @@ -416,7 +416,7 @@ fn find_rev_imp( } if sid.is_tagged() { if sid.is_start() { - continue; + // do nothing } else if sid.is_match() { let pattern = dfa.match_pattern(cache, sid, 0); // Since reverse searches report the beginning of a match @@ -639,7 +639,7 @@ pub(crate) fn find_overlapping_rev( if sid.is_tagged() { state.id = Some(sid); if sid.is_start() { - continue; + // do nothing } else if sid.is_match() { state.next_match_index = Some(1); let pattern = dfa.match_pattern(cache, sid, 0); diff --git a/regex-automata/src/meta/error.rs b/regex-automata/src/meta/error.rs index 5773adf26a..f5911aebbf 100644 --- a/regex-automata/src/meta/error.rs +++ b/regex-automata/src/meta/error.rs @@ -15,12 +15,12 @@ use crate::{nfa, util::search::MatchError, PatternID}; /// * Ask for the [`PatternID`] of the pattern that caused an error, if one /// is available. This is available for things like syntax errors, but not for /// cases where build limits are exceeded. +/// * Ask for the underlying syntax error, but only if the error is a syntax +/// error. /// * Ask for a human readable message corresponding to the underlying error. /// * The `BuildError::source` method (from the `std::error::Error` -/// trait implementation) may be used to query for -/// [`regex-syntax::ast::Error`](regex_syntax::ast::Error) or -/// [`regex-syntax::hir::Error`](regex_syntax::hir::Error). (This requires the -/// `std` feature.) +/// trait implementation) may be used to query for an underlying error if one +/// exists. There are no API guarantees about which error is returned. /// /// When the `std` feature is enabled, this implements `std::error::Error`. #[derive(Clone, Debug)] @@ -30,8 +30,7 @@ pub struct BuildError { #[derive(Clone, Debug)] enum BuildErrorKind { - Ast { pid: PatternID, err: ast::Error }, - Hir { pid: PatternID, err: hir::Error }, + Syntax { pid: PatternID, err: regex_syntax::Error }, NFA(nfa::thompson::BuildError), } @@ -53,18 +52,40 @@ impl BuildError { /// ``` pub fn pattern(&self) -> Option { match self.kind { - BuildErrorKind::Ast { pid, .. } => Some(pid), - BuildErrorKind::Hir { pid, .. } => Some(pid), + BuildErrorKind::Syntax { pid, .. } => Some(pid), + _ => None, + } + } + + /// If this error occurred because the regex exceeded the configured size + /// limit before being built, then this returns the configured size limit. + /// + /// The limit returned is what was configured, and corresponds to the + /// maximum amount of heap usage in bytes. + pub fn size_limit(&self) -> Option { + match self.kind { + BuildErrorKind::NFA(ref err) => err.size_limit(), + _ => None, + } + } + + /// If this error corresponds to a syntax error, then a reference to it is + /// returned by this method. + pub fn syntax_error(&self) -> Option<®ex_syntax::Error> { + match self.kind { + BuildErrorKind::Syntax { ref err, .. } => Some(err), _ => None, } } pub(crate) fn ast(pid: PatternID, err: ast::Error) -> BuildError { - BuildError { kind: BuildErrorKind::Ast { pid, err } } + let err = regex_syntax::Error::from(err); + BuildError { kind: BuildErrorKind::Syntax { pid, err } } } pub(crate) fn hir(pid: PatternID, err: hir::Error) -> BuildError { - BuildError { kind: BuildErrorKind::Hir { pid, err } } + let err = regex_syntax::Error::from(err); + BuildError { kind: BuildErrorKind::Syntax { pid, err } } } pub(crate) fn nfa(err: nfa::thompson::BuildError) -> BuildError { @@ -76,8 +97,7 @@ impl BuildError { impl std::error::Error for BuildError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self.kind { - BuildErrorKind::Ast { ref err, .. } => Some(err), - BuildErrorKind::Hir { ref err, .. } => Some(err), + BuildErrorKind::Syntax { ref err, .. } => Some(err), BuildErrorKind::NFA(ref err) => Some(err), } } @@ -86,15 +106,8 @@ impl std::error::Error for BuildError { impl core::fmt::Display for BuildError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self.kind { - BuildErrorKind::Ast { pid, .. } => { - write!(f, "error parsing pattern {} into AST", pid.as_usize()) - } - BuildErrorKind::Hir { pid, .. } => { - write!( - f, - "error translating pattern {} to HIR", - pid.as_usize() - ) + BuildErrorKind::Syntax { pid, .. } => { + write!(f, "error parsing pattern {}", pid.as_usize()) } BuildErrorKind::NFA(_) => write!(f, "error building NFA"), } diff --git a/regex-automata/src/meta/regex.rs b/regex-automata/src/meta/regex.rs index 35c1b7c334..750209f0a9 100644 --- a/regex-automata/src/meta/regex.rs +++ b/regex-automata/src/meta/regex.rs @@ -1995,6 +1995,17 @@ pub struct FindMatches<'r, 'h> { it: iter::Searcher<'h>, } +impl<'r, 'h> FindMatches<'r, 'h> { + /// Returns the current `Input` associated with this iterator. + /// + /// The `start` position on the given `Input` may change during iteration, + /// but all other values are guaranteed to remain invariant. + #[inline] + pub fn input<'s>(&'s self) -> &'s Input<'h> { + self.it.input() + } +} + impl<'r, 'h> Iterator for FindMatches<'r, 'h> { type Item = Match; @@ -2042,6 +2053,17 @@ pub struct CapturesMatches<'r, 'h> { it: iter::Searcher<'h>, } +impl<'r, 'h> CapturesMatches<'r, 'h> { + /// Returns the current `Input` associated with this iterator. + /// + /// The `start` position on the given `Input` may change during iteration, + /// but all other values are guaranteed to remain invariant. + #[inline] + pub fn input<'s>(&'s self) -> &'s Input<'h> { + self.it.input() + } +} + impl<'r, 'h> Iterator for CapturesMatches<'r, 'h> { type Item = Captures; @@ -2091,6 +2113,17 @@ pub struct Split<'r, 'h> { last: usize, } +impl<'r, 'h> Split<'r, 'h> { + /// Returns the current `Input` associated with this iterator. + /// + /// The `start` position on the given `Input` may change during iteration, + /// but all other values are guaranteed to remain invariant. + #[inline] + pub fn input<'s>(&'s self) -> &'s Input<'h> { + self.finder.input() + } +} + impl<'r, 'h> Iterator for Split<'r, 'h> { type Item = Span; @@ -2134,6 +2167,17 @@ pub struct SplitN<'r, 'h> { limit: usize, } +impl<'r, 'h> SplitN<'r, 'h> { + /// Returns the current `Input` associated with this iterator. + /// + /// The `start` position on the given `Input` may change during iteration, + /// but all other values are guaranteed to remain invariant. + #[inline] + pub fn input<'s>(&'s self) -> &'s Input<'h> { + self.splits.input() + } +} + impl<'r, 'h> Iterator for SplitN<'r, 'h> { type Item = Span; diff --git a/regex-automata/src/meta/strategy.rs b/regex-automata/src/meta/strategy.rs index 91601e49ac..0942ed35e1 100644 --- a/regex-automata/src/meta/strategy.rs +++ b/regex-automata/src/meta/strategy.rs @@ -782,7 +782,9 @@ impl Strategy for Core { input, patset, ) { - Ok(()) => return, + Ok(()) => { + return; + } Err(err) => err, }; trace!("fast overlapping search failed: {}", _err); diff --git a/regex-automata/src/meta/wrappers.rs b/regex-automata/src/meta/wrappers.rs index 1a59db7bb3..8f58363a17 100644 --- a/regex-automata/src/meta/wrappers.rs +++ b/regex-automata/src/meta/wrappers.rs @@ -580,7 +580,8 @@ impl HybridEngine { dfa_config .clone() .match_kind(MatchKind::All) - .prefilter(None), + .prefilter(None) + .specialize_start_states(false), ) .build_from_nfa(nfarev.clone()); let rev = match result { @@ -881,7 +882,8 @@ impl DFAEngine { // don't.) .start_kind(dfa::StartKind::Anchored) .match_kind(MatchKind::All) - .prefilter(None), + .prefilter(None) + .specialize_start_states(false), ) .build_from_nfa(&nfarev); let rev = match result { diff --git a/regex-automata/src/nfa/thompson/builder.rs b/regex-automata/src/nfa/thompson/builder.rs index ead6d85bea..91bae4db81 100644 --- a/regex-automata/src/nfa/thompson/builder.rs +++ b/regex-automata/src/nfa/thompson/builder.rs @@ -1309,6 +1309,9 @@ mod tests { // is built. #[test] fn state_has_small_size() { + #[cfg(target_pointer_width = "64")] assert_eq!(32, core::mem::size_of::()); + #[cfg(target_pointer_width = "32")] + assert_eq!(16, core::mem::size_of::()); } } diff --git a/regex-automata/src/nfa/thompson/error.rs b/regex-automata/src/nfa/thompson/error.rs index aa3ac2add7..645bb065a0 100644 --- a/regex-automata/src/nfa/thompson/error.rs +++ b/regex-automata/src/nfa/thompson/error.rs @@ -79,6 +79,18 @@ enum BuildErrorKind { } impl BuildError { + /// If this error occurred because the NFA exceeded the configured size + /// limit before being built, then this returns the configured size limit. + /// + /// The limit returned is what was configured, and corresponds to the + /// maximum amount of heap usage in bytes. + pub fn size_limit(&self) -> Option { + match self.kind { + BuildErrorKind::ExceededSizeLimit { limit } => Some(limit), + _ => None, + } + } + fn kind(&self) -> &BuildErrorKind { &self.kind } diff --git a/regex-automata/src/nfa/thompson/map.rs b/regex-automata/src/nfa/thompson/map.rs index 86350f1210..c36ce53866 100644 --- a/regex-automata/src/nfa/thompson/map.rs +++ b/regex-automata/src/nfa/thompson/map.rs @@ -37,7 +37,10 @@ use alloc::{vec, vec::Vec}; use crate::{ nfa::thompson::Transition, - util::{int::U64, primitives::StateID}, + util::{ + int::{Usize, U64}, + primitives::StateID, + }, }; // Basic FNV-1a hash constants as described in: @@ -144,7 +147,7 @@ impl Utf8BoundedMap { h = (h ^ u64::from(t.end)).wrapping_mul(PRIME); h = (h ^ t.next.as_u64()).wrapping_mul(PRIME); } - h.as_usize() % self.map.len() + (h % self.map.len().as_u64()).as_usize() } /// Retrieve the cached state ID corresponding to the given key. The hash @@ -258,7 +261,7 @@ impl Utf8SuffixMap { h = (h ^ key.from.as_u64()).wrapping_mul(PRIME); h = (h ^ u64::from(key.start)).wrapping_mul(PRIME); h = (h ^ u64::from(key.end)).wrapping_mul(PRIME); - h.as_usize() % self.map.len() + (h % self.map.len().as_u64()).as_usize() } /// Retrieve the cached state ID corresponding to the given key. The hash diff --git a/regex-automata/src/nfa/thompson/nfa.rs b/regex-automata/src/nfa/thompson/nfa.rs index 9caba89abc..bb188ed2e1 100644 --- a/regex-automata/src/nfa/thompson/nfa.rs +++ b/regex-automata/src/nfa/thompson/nfa.rs @@ -2046,7 +2046,10 @@ mod tests { // intentionally. #[test] fn state_has_small_size() { + #[cfg(target_pointer_width = "64")] assert_eq!(24, core::mem::size_of::()); + #[cfg(target_pointer_width = "32")] + assert_eq!(20, core::mem::size_of::()); } #[test] diff --git a/regex-automata/src/util/captures.rs b/regex-automata/src/util/captures.rs index 35f8f793aa..49f7d1a598 100644 --- a/regex-automata/src/util/captures.rs +++ b/regex-automata/src/util/captures.rs @@ -1140,7 +1140,7 @@ impl<'a> core::fmt::Debug for CapturesDebugMap<'a> { /// /// The lifetime parameter `'a` refers to the lifetime of the underlying /// `Captures` value. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct CapturesPatternIter<'a> { caps: &'a Captures, names: core::iter::Enumerate>, @@ -2359,7 +2359,7 @@ impl core::fmt::Display for GroupInfoError { /// /// The lifetime parameter `'a` refers to the lifetime of the `GroupInfo` /// from which this iterator was created. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct GroupInfoPatternNames<'a> { it: core::slice::Iter<'a, Option>>, } diff --git a/regex-automata/src/util/pool.rs b/regex-automata/src/util/pool.rs index 7c20385ac2..50dbc51411 100644 --- a/regex-automata/src/util/pool.rs +++ b/regex-automata/src/util/pool.rs @@ -151,13 +151,13 @@ being quite expensive. /// let expected = Some(Match::must(0, 3..14)); /// assert_eq!(expected, RE.find(&mut CACHE.get(), b"zzzfoo12345barzzz")); /// ``` -pub struct Pool T>(inner::Pool); +pub struct Pool T>(alloc::boxed::Box>); impl Pool { /// Create a new pool. The given closure is used to create values in /// the pool when necessary. pub fn new(create: F) -> Pool { - Pool(inner::Pool::new(create)) + Pool(alloc::boxed::Box::new(inner::Pool::new(create))) } } diff --git a/regex-automata/src/util/search.rs b/regex-automata/src/util/search.rs index b0a9b8652d..e9903d3436 100644 --- a/regex-automata/src/util/search.rs +++ b/regex-automata/src/util/search.rs @@ -1355,7 +1355,7 @@ impl core::fmt::Display for PatternSetInsertError { /// /// This iterator is created by the [`PatternSet::iter`] method. #[cfg(feature = "alloc")] -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct PatternSetIter<'a> { it: core::iter::Enumerate>, } @@ -1376,6 +1376,26 @@ impl<'a> Iterator for PatternSetIter<'a> { } None } + + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } +} + +#[cfg(feature = "alloc")] +impl<'a> DoubleEndedIterator for PatternSetIter<'a> { + fn next_back(&mut self) -> Option { + while let Some((index, &yes)) = self.it.next_back() { + if yes { + // Only valid 'PatternID' values can be inserted into the set + // and construction of the set panics if the capacity would + // permit storing invalid pattern IDs. Thus, 'yes' is only true + // precisely when 'index' corresponds to a valid 'PatternID'. + return Some(PatternID::new_unchecked(index)); + } + } + None + } } /// The type of anchored search to perform. @@ -1422,6 +1442,7 @@ impl<'a> Iterator for PatternSetIter<'a> { /// searches. /// /// ``` +/// # if cfg!(miri) { return Ok(()); } // miri takes too long /// use regex_automata::{ /// nfa::thompson::pikevm::PikeVM, /// Anchored, Input, Match, PatternID, @@ -1930,9 +1951,17 @@ mod tests { } // Same as above, but for the underlying match error kind. + #[cfg(target_pointer_width = "64")] #[test] fn match_error_kind_size() { let expected_size = 2 * core::mem::size_of::(); assert_eq!(expected_size, core::mem::size_of::()); } + + #[cfg(target_pointer_width = "32")] + #[test] + fn match_error_kind_size() { + let expected_size = 3 * core::mem::size_of::(); + assert_eq!(expected_size, core::mem::size_of::()); + } } diff --git a/regex-automata/test b/regex-automata/test index a26f4113f4..df3e5ae98d 100755 --- a/regex-automata/test +++ b/regex-automata/test @@ -93,8 +93,3 @@ for f in "${features[@]}"; do cargo build --no-default-features --lib --features "$f" cargo test --no-default-features --test integration --features "$f" done - -# There are a lot of 'cfg(not(miri))' configs spread out through the code and -# the tests to make this Just Work. -echo "===== MIRI ===" -cargo miri test diff --git a/regex-automata/tests/gen/sparse/multi_pattern_v2.rs b/regex-automata/tests/gen/sparse/multi_pattern_v2.rs index 181caa6db3..911e3f5ddc 100644 --- a/regex-automata/tests/gen/sparse/multi_pattern_v2.rs +++ b/regex-automata/tests/gen/sparse/multi_pattern_v2.rs @@ -1,6 +1,6 @@ // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // -// regex-cli generate serialize sparse regex MULTI_PATTERN_V2 tests/gen/sparse/ --rustfmt --safe --starts-for-each-pattern --specialize-start-states --start-kind both --unicode-word-boundary --minimize \b[a-zA-Z]+\b (?m)^\S+$ (?Rm)^\S+$ +// regex-cli generate serialize sparse regex MULTI_PATTERN_V2 regex-automata/tests/gen/sparse/ --rustfmt --safe --starts-for-each-pattern --specialize-start-states --start-kind both --unicode-word-boundary --minimize \b[a-zA-Z]+\b (?m)^\S+$ (?Rm)^\S+$ // // regex-cli 0.0.1 is available on crates.io. diff --git a/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa b/regex-automata/tests/gen/sparse/multi_pattern_v2_fwd.bigendian.dfa index 6f06fe200f0fe61e27dfcf9b4f4dd02a3bbfbaab..aa04f63162709f12d140017f42fdad986ecb4162 100644 GIT binary patch literal 3476 zcmcguy>1gh5Z*iAkMEo~iTMwK1QH;@SmF_=LPtYEZ7G2Y5had9>53{9JOEEYfv6~W z2Rb@x9snhL-|WuWUf&%T5i!#G+nL?@*_oX^A08i#28a8v_TLVM$D=o|hoj+O@8xiC zbTB+T+BcxnGyH^{fr#4}Eiytu*lI=b93|;2wF}BD<(1R{Ei85`X^FJF((CmuT_#;2 zT`lO^b9koG5LuJiX^znI9wjc zG;;#(2u-OtC>I+uPM{OUak^xUCBI>0#~7l_X&k(BKTf$lLJd?QdWyxK9h?hKCCY5f zB}%7Qq*ZK+Fy}C{o=lYI-L4HP>S7CqYyo%&ha%$1^1OpWqFRy_HA{^9)`ggEMN38>mJx` zyd8}jxMM1cL`(8K0C~sDcnKBjwMOJ>(+E0DLdOjibi5jw54+|GsrN`lc;sUso;+SX zHoUG{JU(aIaw?lCD6VT6FYcgw!S3|l_^9q`Q$!`EhL=!M=p54ku3CP)tDKCjU>{75y-p|&@3&XN&THq+v}UduaPqJqu^yVIA(M|D?QGAglD ze5Qy#uO(|3vw3ID@QgGks4}3K2ex;yR>Et{4c+Pcu1!RDDw}!pzgv0n4RE};Yu-d^ z`mgTv)8e7JtE~uQWa={BZKB7Yw<2GxnWBwY8I=LWJP|3O+~*s`%uhFC<^Ket&uh$B zoe=(>M-#4U78K-m6#U$T8v^`SsG=$t|688GVe8D1eM;oN5C6De@XuH4Bli5laXJ{Q qZafE`Oypdj9#jEfQz6)uHRT00Ud2DC#r@p? literal 3476 zcmchZyKdA#6o$Q5?Q>1ZgZS4yBlM9D^?OhrXS9w1Lafv6~W z2Rb@x9sni$pY1a-vtF+wMI7n)pL2X}GiPS({lkOFXn*(3?)%aBaPsc$crqUCydIAZ z_Qv}MyMgHROphuf5x6C;A;eUv)@t>}6g9(tspY7hP-iA}MGK3)nY08guk`!2%ddbxS@L792Jb+0eyd;5*tR zz%Sw~7CPFJffG4q(K)ITGP<=kr(A<ssX$)Ei-|)n?M&g@r{o@lbo4?IrMmoJ!ZBb-iWI`ESi ztSDuke8F^k+5mosOb7lHgDE||08k8wm7Xf8VM~5 zh{Vj%4C80DakIEVo^)dDLP@&UD!Z z9(xTnnRNIj7v|j4VWH=xvwh^FOIOqsoFbLPCIo$%An1b?afO=oIhb{xU&$sCQ!!{V zNMMk~$dg^QdSBurRdNmOc++572AVE=#GiXd-eju6=aSReP2;0WSJX0`B9+871^O~U z(3h(yP~1G3boiE>orRv4 z&OR+Zx^zVy!6{Nne5XKPCJ6eHHGfUb2Pv|!rUMBNvP5|@>F_PQ8~Jocvf)1g^ksse z_eg&kx>V;;#ZhYM8pm63_IP)L; z0O7srF2^%oloL-XS9Q7Tb=P7qk5}t#xp==g$@1g%#}D~B&)&Yvv(=}3xmpQch z7C^lRiMSA07e#TWf^KgU{Q`+c+K2%#9E}?>A$E4B)9IC~#5Ll2fg3l0TYI-X_V@1) zckgZC;Nbp)HavXv`0$D1>1_6FKA%6&E?`W2Vr)@_V4S+0Ow1%WxWk!Rx2<;@bJ|6W z>vVb(Pv$;@WCkC!*d)g~9A6>VUg3ZqnPRj@xuOx`re0{vntb&6bQ<}PN0o1duvdW( zb_6lt6JGX zO$Qh~%$M|$=@fDn?4b1~^?{+bJi&cOU`e+ImhT=?c z3G*8ahoe%C<_EGrg8VLN^+8LKpqU2uZm;E5h^@{Ba&DTn#Ps}KHg0m{ZyfXP{-Msn77fX3YvPb~hBfz1*+EAzy>mHM9&YG#hSe)>3N5 z$$yv{z~>ZutLI6E^2>|EufV;8t$ymV ia4+Rs5Vh6+6Mhx=$R&;8wDDIMKu&q54$*pF)&BrJ9)}?S literal 1920 zcmcgsOH$iF5S@`F%a(uO0O2131Y(RXz=HCIGq6Qf7*-UNZ7Ni;@rp$bK_zRh!kT+< zfxvqs^;lSz%_^^|-P1kYZ+f~%?%nnWVXu4Hy$hq;!Sz)%h{E%WDD2-vy?$2#|K9N9 za0O7WLJBktv{NX!UIs<~BT5$K7(p&7K($uSMT3}|Z#J7>7KlY+$>QrWu(G-qV|{&t z*!(tzt*!6d)3CGqV{c#Zv(-9ix7&x|2uitk+V(&e&TuYNs+^eIH;v1>O={L{5{pBm zIiBY?NcIa1E%>P^B&Tf;|Hy&}1GDW9$>d@@F;@*^Yhs(iHHCb%XYd)`alStCPmsub zt|{cBFNY5i2HZBB+4Aj{tAe0jXA_Q)Mj^R@qtcC59-MMXX_Pd}#HRz~=5l)zKxQzM zgP?+^hxdh3>dAttOI#TY4K6BbZVAA$!1l|RX*giZ;f;G<61_QbU;AioX6KZg}%%w z97)~_{A34S6Yf1d>V){Hc?30Iz$BZsF;~q)G#fsp-KU7@Kpqk;HZ_M&&4Ev{c@Xmm zgbGj=5@tbLB$3w6EEu41lRV5i>VNI?J1D#*$UF25levdHC*q-V#}M&Tr-iYU--*at f{7>+U(1%eR Result<()> { Ok(()) } +/// Tests the hybrid NFA/DFA when 'specialize_start_states' is enabled. +#[test] +fn specialize_start_states() -> Result<()> { + let mut builder = Regex::builder(); + builder.dfa(DFA::config().specialize_start_states(true)); + TestRunner::new()? + .expand(EXPANSIONS, |t| t.compiles()) + // Without NFA shrinking, this test blows the default cache capacity. + .blacklist("expensive/regression-many-repeat-no-stack-overflow") + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + /// Tests the hybrid NFA/DFA when byte classes are disabled. /// /// N.B. Disabling byte classes doesn't avoid any indirection at search time. diff --git a/regex-automata/tests/meta/suite.rs b/regex-automata/tests/meta/suite.rs index 91e2908696..9ee2b33ddf 100644 --- a/regex-automata/tests/meta/suite.rs +++ b/regex-automata/tests/meta/suite.rs @@ -23,13 +23,10 @@ const BLACKLIST: &[&str] = &[ // the literal searchers also don't have the ability to quit fully or it's // otherwise not worth doing. (A literal searcher not quitting as early as // possible usually means looking at a few more bytes. That's no biggie.) - // - // Other 'earliest' tests can be added here if the need arises. - "earliest/no-leftmost-first-100/", - "earliest/no-leftmost-first-200/", + "earliest/", ]; -/// Tests the default configuration of the hybrid NFA/DFA. +/// Tests the default configuration of the meta regex engine. #[test] fn default() -> Result<()> { let builder = Regex::builder(); @@ -42,6 +39,70 @@ fn default() -> Result<()> { Ok(()) } +/// Tests the default configuration minus the full DFA. +#[test] +fn no_dfa() -> Result<()> { + let mut builder = Regex::builder(); + builder.configure(Regex::config().dfa(false)); + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(BLACKLIST) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the default configuration minus the full DFA and lazy DFA. +#[test] +fn no_dfa_hybrid() -> Result<()> { + let mut builder = Regex::builder(); + builder.configure(Regex::config().dfa(false).hybrid(false)); + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(BLACKLIST) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the default configuration minus the full DFA, lazy DFA and one-pass +/// DFA. +#[test] +fn no_dfa_hybrid_onepass() -> Result<()> { + let mut builder = Regex::builder(); + builder.configure(Regex::config().dfa(false).hybrid(false).onepass(false)); + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(BLACKLIST) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + +/// Tests the default configuration minus the full DFA, lazy DFA, one-pass +/// DFA and backtracker. +#[test] +fn no_dfa_hybrid_onepass_backtrack() -> Result<()> { + let mut builder = Regex::builder(); + builder.configure( + Regex::config() + .dfa(false) + .hybrid(false) + .onepass(false) + .backtrack(false), + ); + let mut runner = TestRunner::new()?; + runner + .expand(&["is_match", "find", "captures"], |test| test.compiles()) + .blacklist_iter(BLACKLIST) + .test_iter(suite()?.iter(), compiler(builder)) + .assert(); + Ok(()) +} + fn compiler( mut builder: meta::Builder, ) -> impl FnMut(&RegexTest, &[String]) -> Result { diff --git a/scripts/scrape-crates-io b/scripts/scrape-crates-io deleted file mode 100755 index da21553a7e..0000000000 --- a/scripts/scrape-crates-io +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/env python3 - -from subprocess import call -import argparse -import datetime -import glob -import json -import os -import re -import shutil -import tempfile -import time -import urllib3 - -CRATES_IO_INDEX_GIT_LOC = "https://github.com/rust-lang/crates.io-index.git" -RE_REGEX = re.compile(r"Regex::new\((r?\".*?\")\)") -KNOWN_UNMAINTAINED_CRATES = set(["queryst-prime", "oozz"]) - -# if only requests was in the standard library... -urllib3.disable_warnings() -http = urllib3.PoolManager() - - -def argparser(): - p = argparse.ArgumentParser("A script to scrape crates.io for regex.") - p.add_argument("-c", "--crates-index", metavar="CRATES_INDEX_DIR", - help=("A directory where we can find crates.io-index " - + "(if this isn't set it will be automatically " - + "downloaded).")) - p.add_argument("-o", "--output-file", metavar="OUTPUT", - default="crates_regex.rs", - help="The name of the output file to create.") - return p - - -PRELUDE = """ -// DO NOT EDIT. Automatically generated by 'scripts/scrape_crates_io.py' -// on {date}. - - - -""".lstrip() - - -def main(): - args = argparser().parse_args() - out = open(os.path.abspath(args.output_file), "w") - out.write(PRELUDE.format(date=str(datetime.datetime.now()))) - if args.crates_index: - args.crates_index = os.path.abspath(args.crates_index) - - # enter our scratch directory - old_dir = os.getcwd() - work_dir = tempfile.mkdtemp(prefix="scrape-crates-io") - os.chdir(work_dir) - - crates_index = (args.crates_index - if os.path.join(old_dir, args.crates_index) - else download_crates_index()) - - for (name, vers) in iter_crates(crates_index): - if name in KNOWN_UNMAINTAINED_CRATES: - continue - - with Crate(work_dir, name, vers) as c: - i = 0 - for line in c.iter_lines(): - for r in RE_REGEX.findall(line): - print((name, vers, r)) - if len(r) >= 2 and r[-2] == "\\": - continue - out.write("// {}-{}: {}\n".format(name, vers, r)) - out.write("consistent!({}_{}, {});\n\n".format( - name.replace("-", "_"), i, r)) - out.flush() - i += 1 - - # Leave the scratch directory - os.chdir(old_dir) - shutil.rmtree(work_dir) - out.close() - - -def download_crates_index(): - if call(["git", "clone", CRATES_IO_INDEX_GIT_LOC]) != 0: - print("Error cloning the crates.io index") - exit(1) - return "crates.io-index" - - -def iter_crates(crates_index): - exclude = set(["config.json", ".git"]) - for crate_index_file in iter_files(crates_index, exclude=exclude): - with open(crate_index_file) as f: - most_recent = list(f) - most_recent = most_recent[len(most_recent) - 1] - - crate_info = json.loads(most_recent) - if "regex" not in set(d["name"] for d in crate_info["deps"]): - continue - - if crate_info["yanked"]: - continue - yield (crate_info["name"], crate_info["vers"]) - - -def iter_files(d, exclude=set()): - for x in os.listdir(d): - if x in exclude: - continue - - fullfp = os.path.abspath(d + "/" + x) - if os.path.isfile(fullfp): - yield fullfp - elif os.path.isdir(fullfp): - for f in iter_files(fullfp, exclude): - yield f - - -class Crate(object): - def __init__(self, work_dir, name, version): - self.name = name - self.version = version - self.url = ("https://crates.io/api/v1/crates/{name}/{version}/download" - .format(name=self.name, version=self.version)) - self.filename = "{}/{}-{}.tar.gz".format( - work_dir, self.name, self.version) - - def __enter__(self): - max_retries = 1 - retries = 0 - while retries < max_retries: - retries += 1 - - r = http.request("GET", self.url, preload_content=False) - try: - print("[{}/{}] Downloading {}".format( - retries, max_retries + 1, self.url)) - with open(self.filename, "wb") as f: - while True: - data = r.read(1024) - if not data: - break - f.write(data) - except Exception: - time.sleep(1) - r.release_conn() - continue - - r.release_conn() - break - - call(["tar", "-xf", self.filename]) - - return self - - def __exit__(self, ty, value, tb): - # We are going to clean up the whole temp dir anyway, so - # we don't really need to do this. Its nice to clean up - # after ourselves though. - try: - shutil.rmtree(self.filename[:-len(".tar.gz")]) - os.remove(self.filename) - except Exception: - pass - - def iter_srcs(self): - g = "{crate}/**/*.rs".format(crate=self.filename[:-len(".tar.gz")]) - for rsrc in glob.iglob(g): - yield rsrc - - def iter_lines(self): - for src in self.iter_srcs(): - with open(src) as f: - for line in f: - yield line - - -if __name__ == "__main__": - main() diff --git a/src/backtrack.rs b/src/backtrack.rs deleted file mode 100644 index 4d83856ca0..0000000000 --- a/src/backtrack.rs +++ /dev/null @@ -1,282 +0,0 @@ -// This is the backtracking matching engine. It has the same exact capability -// as the full NFA simulation, except it is artificially restricted to small -// regexes on small inputs because of its memory requirements. -// -// In particular, this is a *bounded* backtracking engine. It retains worst -// case linear time by keeping track of the states that it has visited (using a -// bitmap). Namely, once a state is visited, it is never visited again. Since a -// state is keyed by `(instruction index, input index)`, we have that its time -// complexity is `O(mn)` (i.e., linear in the size of the search text). -// -// The backtracking engine can beat out the NFA simulation on small -// regexes/inputs because it doesn't have to keep track of multiple copies of -// the capture groups. In benchmarks, the backtracking engine is roughly twice -// as fast as the full NFA simulation. Note though that its performance doesn't -// scale, even if you're willing to live with the memory requirements. Namely, -// the bitset has to be zeroed on each execution, which becomes quite expensive -// on large bitsets. - -use crate::exec::ProgramCache; -use crate::input::{Input, InputAt}; -use crate::prog::{InstPtr, Program}; -use crate::re_trait::Slot; - -type Bits = u32; - -const BIT_SIZE: usize = 32; -const MAX_SIZE_BYTES: usize = 256 * (1 << 10); // 256 KB - -/// Returns true iff the given regex and input should be executed by this -/// engine with reasonable memory usage. -pub fn should_exec(num_insts: usize, text_len: usize) -> bool { - // Total memory usage in bytes is determined by: - // - // ((len(insts) * (len(input) + 1) + bits - 1) / bits) * (size_of(u32)) - // - // The actual limit picked is pretty much a heuristic. - // See: https://github.com/rust-lang/regex/issues/215 - let size = ((num_insts * (text_len + 1) + BIT_SIZE - 1) / BIT_SIZE) * 4; - size <= MAX_SIZE_BYTES -} - -/// A backtracking matching engine. -#[derive(Debug)] -pub struct Bounded<'a, 'm, 'r, 's, I> { - prog: &'r Program, - input: I, - matches: &'m mut [bool], - slots: &'s mut [Slot], - m: &'a mut Cache, -} - -/// Shared cached state between multiple invocations of a backtracking engine -/// in the same thread. -#[derive(Clone, Debug)] -pub struct Cache { - jobs: Vec, - visited: Vec, -} - -impl Cache { - /// Create new empty cache for the backtracking engine. - pub fn new(_prog: &Program) -> Self { - Cache { jobs: vec![], visited: vec![] } - } -} - -/// A job is an explicit unit of stack space in the backtracking engine. -/// -/// The "normal" representation is a single state transition, which corresponds -/// to an NFA state and a character in the input. However, the backtracking -/// engine must keep track of old capture group values. We use the explicit -/// stack to do it. -#[derive(Clone, Copy, Debug)] -enum Job { - Inst { ip: InstPtr, at: InputAt }, - SaveRestore { slot: usize, old_pos: Option }, -} - -impl<'a, 'm, 'r, 's, I: Input> Bounded<'a, 'm, 'r, 's, I> { - /// Execute the backtracking matching engine. - /// - /// If there's a match, `exec` returns `true` and populates the given - /// captures accordingly. - pub fn exec( - prog: &'r Program, - cache: &ProgramCache, - matches: &'m mut [bool], - slots: &'s mut [Slot], - input: I, - start: usize, - end: usize, - ) -> bool { - let mut cache = cache.borrow_mut(); - let cache = &mut cache.backtrack; - let start = input.at(start); - let mut b = Bounded { prog, input, matches, slots, m: cache }; - b.exec_(start, end) - } - - /// Clears the cache such that the backtracking engine can be executed - /// on some input of fixed length. - fn clear(&mut self) { - // Reset the job memory so that we start fresh. - self.m.jobs.clear(); - - // Now we need to clear the bit state set. - // We do this by figuring out how much space we need to keep track - // of the states we've visited. - // Then we reset all existing allocated space to 0. - // Finally, we request more space if we need it. - // - // This is all a little circuitous, but doing this using unchecked - // operations doesn't seem to have a measurable impact on performance. - // (Probably because backtracking is limited to such small - // inputs/regexes in the first place.) - let visited_len = - (self.prog.len() * (self.input.len() + 1) + BIT_SIZE - 1) - / BIT_SIZE; - self.m.visited.truncate(visited_len); - for v in &mut self.m.visited { - *v = 0; - } - if visited_len > self.m.visited.len() { - let len = self.m.visited.len(); - self.m.visited.reserve_exact(visited_len - len); - for _ in 0..(visited_len - len) { - self.m.visited.push(0); - } - } - } - - /// Start backtracking at the given position in the input, but also look - /// for literal prefixes. - fn exec_(&mut self, mut at: InputAt, end: usize) -> bool { - self.clear(); - // If this is an anchored regex at the beginning of the input, then - // we're either already done or we only need to try backtracking once. - if self.prog.is_anchored_start { - return if !at.is_start() { false } else { self.backtrack(at) }; - } - let mut matched = false; - loop { - if !self.prog.prefixes.is_empty() { - at = match self.input.prefix_at(&self.prog.prefixes, at) { - None => break, - Some(at) => at, - }; - } - matched = self.backtrack(at) || matched; - if matched && self.prog.matches.len() == 1 { - return true; - } - if at.pos() >= end { - break; - } - at = self.input.at(at.next_pos()); - } - matched - } - - /// The main backtracking loop starting at the given input position. - fn backtrack(&mut self, start: InputAt) -> bool { - // N.B. We use an explicit stack to avoid recursion. - // To avoid excessive pushing and popping, most transitions are handled - // in the `step` helper function, which only pushes to the stack when - // there's a capture or a branch. - let mut matched = false; - self.m.jobs.push(Job::Inst { ip: 0, at: start }); - while let Some(job) = self.m.jobs.pop() { - match job { - Job::Inst { ip, at } => { - if self.step(ip, at) { - // Only quit if we're matching one regex. - // If we're matching a regex set, then mush on and - // try to find other matches (if we want them). - if self.prog.matches.len() == 1 { - return true; - } - matched = true; - } - } - Job::SaveRestore { slot, old_pos } => { - if slot < self.slots.len() { - self.slots[slot] = old_pos; - } - } - } - } - matched - } - - fn step(&mut self, mut ip: InstPtr, mut at: InputAt) -> bool { - use crate::prog::Inst::*; - loop { - // This loop is an optimization to avoid constantly pushing/popping - // from the stack. Namely, if we're pushing a job only to run it - // next, avoid the push and just mutate `ip` (and possibly `at`) - // in place. - if self.has_visited(ip, at) { - return false; - } - match self.prog[ip] { - Match(slot) => { - if slot < self.matches.len() { - self.matches[slot] = true; - } - return true; - } - Save(ref inst) => { - if let Some(&old_pos) = self.slots.get(inst.slot) { - // If this path doesn't work out, then we save the old - // capture index (if one exists) in an alternate - // job. If the next path fails, then the alternate - // job is popped and the old capture index is restored. - self.m.jobs.push(Job::SaveRestore { - slot: inst.slot, - old_pos, - }); - self.slots[inst.slot] = Some(at.pos()); - } - ip = inst.goto; - } - Split(ref inst) => { - self.m.jobs.push(Job::Inst { ip: inst.goto2, at }); - ip = inst.goto1; - } - EmptyLook(ref inst) => { - if self.input.is_empty_match(at, inst) { - ip = inst.goto; - } else { - return false; - } - } - Char(ref inst) => { - if inst.c == at.char() { - ip = inst.goto; - at = self.input.at(at.next_pos()); - } else { - return false; - } - } - Ranges(ref inst) => { - if inst.matches(at.char()) { - ip = inst.goto; - at = self.input.at(at.next_pos()); - } else { - return false; - } - } - Bytes(ref inst) => { - if let Some(b) = at.byte() { - if inst.matches(b) { - ip = inst.goto; - at = self.input.at(at.next_pos()); - continue; - } - } - return false; - } - } - } - } - - fn has_visited(&mut self, ip: InstPtr, at: InputAt) -> bool { - let k = ip * (self.input.len() + 1) + at.pos(); - let k1 = k / BIT_SIZE; - let k2 = usize_to_u32(1 << (k & (BIT_SIZE - 1))); - if self.m.visited[k1] & k2 == 0 { - self.m.visited[k1] |= k2; - false - } else { - true - } - } -} - -fn usize_to_u32(n: usize) -> u32 { - if (n as u64) > (::std::u32::MAX as u64) { - panic!("BUG: {} is too big to fit into u32", n) - } - n as u32 -} diff --git a/src/compile.rs b/src/compile.rs deleted file mode 100644 index 0030cfb108..0000000000 --- a/src/compile.rs +++ /dev/null @@ -1,1324 +0,0 @@ -use std::collections::HashMap; -use std::fmt; -use std::iter; -use std::result; -use std::sync::Arc; - -use regex_syntax::hir::{self, Hir, Look}; -use regex_syntax::is_word_byte; -use regex_syntax::utf8::{Utf8Range, Utf8Sequence, Utf8Sequences}; - -use crate::prog::{ - EmptyLook, Inst, InstBytes, InstChar, InstEmptyLook, InstPtr, InstRanges, - InstSave, InstSplit, Program, -}; - -use crate::Error; - -type Result = result::Result; -type ResultOrEmpty = result::Result, Error>; - -#[derive(Debug)] -struct Patch { - hole: Hole, - entry: InstPtr, -} - -/// A compiler translates a regular expression AST to a sequence of -/// instructions. The sequence of instructions represents an NFA. -// `Compiler` is only public via the `internal` module, so avoid deriving -// `Debug`. -#[allow(missing_debug_implementations)] -pub struct Compiler { - insts: Vec, - compiled: Program, - capture_name_idx: HashMap, - num_exprs: usize, - size_limit: usize, - suffix_cache: SuffixCache, - utf8_seqs: Option, - byte_classes: ByteClassSet, - // This keeps track of extra bytes allocated while compiling the regex - // program. Currently, this corresponds to two things. First is the heap - // memory allocated by Unicode character classes ('InstRanges'). Second is - // a "fake" amount of memory used by empty sub-expressions, so that enough - // empty sub-expressions will ultimately trigger the compiler to bail - // because of a size limit restriction. (That empty sub-expressions don't - // add to heap memory usage is more-or-less an implementation detail.) In - // the second case, if we don't bail, then an excessively large repetition - // on an empty sub-expression can result in the compiler using a very large - // amount of CPU time. - extra_inst_bytes: usize, -} - -impl Compiler { - /// Create a new regular expression compiler. - /// - /// Various options can be set before calling `compile` on an expression. - pub fn new() -> Self { - Compiler { - insts: vec![], - compiled: Program::new(), - capture_name_idx: HashMap::new(), - num_exprs: 0, - size_limit: 10 * (1 << 20), - suffix_cache: SuffixCache::new(1000), - utf8_seqs: Some(Utf8Sequences::new('\x00', '\x00')), - byte_classes: ByteClassSet::new(), - extra_inst_bytes: 0, - } - } - - /// The size of the resulting program is limited by size_limit. If - /// the program approximately exceeds the given size (in bytes), then - /// compilation will stop and return an error. - pub fn size_limit(mut self, size_limit: usize) -> Self { - self.size_limit = size_limit; - self - } - - /// If bytes is true, then the program is compiled as a byte based - /// automaton, which incorporates UTF-8 decoding into the machine. If it's - /// false, then the automaton is Unicode scalar value based, e.g., an - /// engine utilizing such an automaton is responsible for UTF-8 decoding. - /// - /// The specific invariant is that when returning a byte based machine, - /// the neither the `Char` nor `Ranges` instructions are produced. - /// Conversely, when producing a Unicode scalar value machine, the `Bytes` - /// instruction is never produced. - /// - /// Note that `dfa(true)` implies `bytes(true)`. - pub fn bytes(mut self, yes: bool) -> Self { - self.compiled.is_bytes = yes; - self - } - - /// When disabled, the program compiled may match arbitrary bytes. - /// - /// When enabled (the default), all compiled programs exclusively match - /// valid UTF-8 bytes. - pub fn only_utf8(mut self, yes: bool) -> Self { - self.compiled.only_utf8 = yes; - self - } - - /// When set, the machine returned is suitable for use in the DFA matching - /// engine. - /// - /// In particular, this ensures that if the regex is not anchored in the - /// beginning, then a preceding `.*?` is included in the program. (The NFA - /// based engines handle the preceding `.*?` explicitly, which is difficult - /// or impossible in the DFA engine.) - pub fn dfa(mut self, yes: bool) -> Self { - self.compiled.is_dfa = yes; - self - } - - /// When set, the machine returned is suitable for matching text in - /// reverse. In particular, all concatenations are flipped. - pub fn reverse(mut self, yes: bool) -> Self { - self.compiled.is_reverse = yes; - self - } - - /// Compile a regular expression given its AST. - /// - /// The compiler is guaranteed to succeed unless the program exceeds the - /// specified size limit. If the size limit is exceeded, then compilation - /// stops and returns an error. - pub fn compile(mut self, exprs: &[Hir]) -> result::Result { - debug_assert!(!exprs.is_empty()); - self.num_exprs = exprs.len(); - if exprs.len() == 1 { - self.compile_one(&exprs[0]) - } else { - self.compile_many(exprs) - } - } - - fn compile_one(mut self, expr: &Hir) -> result::Result { - // If we're compiling a forward DFA and we aren't anchored, then - // add a `.*?` before the first capture group. - // Other matching engines handle this by baking the logic into the - // matching engine itself. - let mut dotstar_patch = Patch { hole: Hole::None, entry: 0 }; - self.compiled.is_anchored_start = - expr.properties().look_set_prefix().contains(Look::Start); - self.compiled.is_anchored_end = - expr.properties().look_set_suffix().contains(Look::End); - if self.compiled.needs_dotstar() { - dotstar_patch = self.c_dotstar()?; - self.compiled.start = dotstar_patch.entry; - } - self.compiled.captures = vec![None]; - let patch = - self.c_capture(0, expr)?.unwrap_or_else(|| self.next_inst()); - if self.compiled.needs_dotstar() { - self.fill(dotstar_patch.hole, patch.entry); - } else { - self.compiled.start = patch.entry; - } - self.fill_to_next(patch.hole); - self.compiled.matches = vec![self.insts.len()]; - self.push_compiled(Inst::Match(0)); - self.compiled.static_captures_len = - expr.properties().static_explicit_captures_len(); - self.compile_finish() - } - - fn compile_many( - mut self, - exprs: &[Hir], - ) -> result::Result { - debug_assert!(exprs.len() > 1); - - self.compiled.is_anchored_start = exprs - .iter() - .all(|e| e.properties().look_set_prefix().contains(Look::Start)); - self.compiled.is_anchored_end = exprs - .iter() - .all(|e| e.properties().look_set_suffix().contains(Look::End)); - let mut dotstar_patch = Patch { hole: Hole::None, entry: 0 }; - if self.compiled.needs_dotstar() { - dotstar_patch = self.c_dotstar()?; - self.compiled.start = dotstar_patch.entry; - } else { - self.compiled.start = 0; // first instruction is always split - } - self.fill_to_next(dotstar_patch.hole); - - let mut prev_hole = Hole::None; - for (i, expr) in exprs[0..exprs.len() - 1].iter().enumerate() { - self.fill_to_next(prev_hole); - let split = self.push_split_hole(); - let Patch { hole, entry } = - self.c_capture(0, expr)?.unwrap_or_else(|| self.next_inst()); - self.fill_to_next(hole); - self.compiled.matches.push(self.insts.len()); - self.push_compiled(Inst::Match(i)); - prev_hole = self.fill_split(split, Some(entry), None); - } - let i = exprs.len() - 1; - let Patch { hole, entry } = - self.c_capture(0, &exprs[i])?.unwrap_or_else(|| self.next_inst()); - self.fill(prev_hole, entry); - self.fill_to_next(hole); - self.compiled.matches.push(self.insts.len()); - self.push_compiled(Inst::Match(i)); - self.compile_finish() - } - - fn compile_finish(mut self) -> result::Result { - self.compiled.insts = - self.insts.into_iter().map(|inst| inst.unwrap()).collect(); - self.compiled.byte_classes = self.byte_classes.byte_classes(); - self.compiled.capture_name_idx = Arc::new(self.capture_name_idx); - Ok(self.compiled) - } - - /// Compile expr into self.insts, returning a patch on success, - /// or an error if we run out of memory. - /// - /// All of the c_* methods of the compiler share the contract outlined - /// here. - /// - /// The main thing that a c_* method does is mutate `self.insts` - /// to add a list of mostly compiled instructions required to execute - /// the given expression. `self.insts` contains MaybeInsts rather than - /// Insts because there is some backpatching required. - /// - /// The `Patch` value returned by each c_* method provides metadata - /// about the compiled instructions emitted to `self.insts`. The - /// `entry` member of the patch refers to the first instruction - /// (the entry point), while the `hole` member contains zero or - /// more offsets to partial instructions that need to be backpatched. - /// The c_* routine can't know where its list of instructions are going to - /// jump to after execution, so it is up to the caller to patch - /// these jumps to point to the right place. So compiling some - /// expression, e, we would end up with a situation that looked like: - /// - /// ```text - /// self.insts = [ ..., i1, i2, ..., iexit1, ..., iexitn, ...] - /// ^ ^ ^ - /// | \ / - /// entry \ / - /// hole - /// ``` - /// - /// To compile two expressions, e1 and e2, concatenated together we - /// would do: - /// - /// ```ignore - /// let patch1 = self.c(e1); - /// let patch2 = self.c(e2); - /// ``` - /// - /// while leaves us with a situation that looks like - /// - /// ```text - /// self.insts = [ ..., i1, ..., iexit1, ..., i2, ..., iexit2 ] - /// ^ ^ ^ ^ - /// | | | | - /// entry1 hole1 entry2 hole2 - /// ``` - /// - /// Then to merge the two patches together into one we would backpatch - /// hole1 with entry2 and return a new patch that enters at entry1 - /// and has hole2 for a hole. In fact, if you look at the c_concat - /// method you will see that it does exactly this, though it handles - /// a list of expressions rather than just the two that we use for - /// an example. - /// - /// Ok(None) is returned when an expression is compiled to no - /// instruction, and so no patch.entry value makes sense. - fn c(&mut self, expr: &Hir) -> ResultOrEmpty { - use crate::prog; - use regex_syntax::hir::HirKind::*; - - self.check_size()?; - match *expr.kind() { - Empty => self.c_empty(), - Literal(hir::Literal(ref bytes)) => { - if self.compiled.is_reverse { - let mut bytes = bytes.to_vec(); - bytes.reverse(); - self.c_literal(&bytes) - } else { - self.c_literal(bytes) - } - } - Class(hir::Class::Unicode(ref cls)) => self.c_class(cls.ranges()), - Class(hir::Class::Bytes(ref cls)) => { - if self.compiled.uses_bytes() { - self.c_class_bytes(cls.ranges()) - } else { - assert!(cls.is_ascii()); - let mut char_ranges = vec![]; - for r in cls.iter() { - let (s, e) = (r.start() as char, r.end() as char); - char_ranges.push(hir::ClassUnicodeRange::new(s, e)); - } - self.c_class(&char_ranges) - } - } - Look(ref look) => match *look { - hir::Look::Start if self.compiled.is_reverse => { - self.c_empty_look(prog::EmptyLook::EndText) - } - hir::Look::Start => { - self.c_empty_look(prog::EmptyLook::StartText) - } - hir::Look::End if self.compiled.is_reverse => { - self.c_empty_look(prog::EmptyLook::StartText) - } - hir::Look::End => self.c_empty_look(prog::EmptyLook::EndText), - hir::Look::StartLF if self.compiled.is_reverse => { - self.byte_classes.set_range(b'\n', b'\n'); - self.c_empty_look(prog::EmptyLook::EndLine) - } - hir::Look::StartLF => { - self.byte_classes.set_range(b'\n', b'\n'); - self.c_empty_look(prog::EmptyLook::StartLine) - } - hir::Look::EndLF if self.compiled.is_reverse => { - self.byte_classes.set_range(b'\n', b'\n'); - self.c_empty_look(prog::EmptyLook::StartLine) - } - hir::Look::EndLF => { - self.byte_classes.set_range(b'\n', b'\n'); - self.c_empty_look(prog::EmptyLook::EndLine) - } - hir::Look::StartCRLF | hir::Look::EndCRLF => { - return Err(Error::Syntax( - "CRLF-aware line anchors are not supported yet" - .to_string(), - )); - } - hir::Look::WordAscii => { - self.byte_classes.set_word_boundary(); - self.c_empty_look(prog::EmptyLook::WordBoundaryAscii) - } - hir::Look::WordAsciiNegate => { - self.byte_classes.set_word_boundary(); - self.c_empty_look(prog::EmptyLook::NotWordBoundaryAscii) - } - hir::Look::WordUnicode => { - if !cfg!(feature = "unicode-perl") { - return Err(Error::Syntax( - "Unicode word boundaries are unavailable when \ - the unicode-perl feature is disabled" - .to_string(), - )); - } - self.compiled.has_unicode_word_boundary = true; - self.byte_classes.set_word_boundary(); - // We also make sure that all ASCII bytes are in a different - // class from non-ASCII bytes. Otherwise, it's possible for - // ASCII bytes to get lumped into the same class as non-ASCII - // bytes. This in turn may cause the lazy DFA to falsely start - // when it sees an ASCII byte that maps to a byte class with - // non-ASCII bytes. This ensures that never happens. - self.byte_classes.set_range(0, 0x7F); - self.c_empty_look(prog::EmptyLook::WordBoundary) - } - hir::Look::WordUnicodeNegate => { - if !cfg!(feature = "unicode-perl") { - return Err(Error::Syntax( - "Unicode word boundaries are unavailable when \ - the unicode-perl feature is disabled" - .to_string(), - )); - } - self.compiled.has_unicode_word_boundary = true; - self.byte_classes.set_word_boundary(); - // See comments above for why we set the ASCII range here. - self.byte_classes.set_range(0, 0x7F); - self.c_empty_look(prog::EmptyLook::NotWordBoundary) - } - }, - Capture(hir::Capture { index, ref name, ref sub }) => { - if index as usize >= self.compiled.captures.len() { - let name = match *name { - None => None, - Some(ref boxed_str) => Some(boxed_str.to_string()), - }; - self.compiled.captures.push(name.clone()); - if let Some(name) = name { - self.capture_name_idx.insert(name, index as usize); - } - } - self.c_capture(2 * index as usize, sub) - } - Concat(ref es) => { - if self.compiled.is_reverse { - self.c_concat(es.iter().rev()) - } else { - self.c_concat(es) - } - } - Alternation(ref es) => self.c_alternate(&**es), - Repetition(ref rep) => self.c_repeat(rep), - } - } - - fn c_empty(&mut self) -> ResultOrEmpty { - // See: https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8 - // See: CVE-2022-24713 - // - // Since 'empty' sub-expressions don't increase the size of - // the actual compiled object, we "fake" an increase in its - // size so that our 'check_size_limit' routine will eventually - // stop compilation if there are too many empty sub-expressions - // (e.g., via a large repetition). - self.extra_inst_bytes += std::mem::size_of::(); - Ok(None) - } - - fn c_capture(&mut self, first_slot: usize, expr: &Hir) -> ResultOrEmpty { - if self.num_exprs > 1 || self.compiled.is_dfa { - // Don't ever compile Save instructions for regex sets because - // they are never used. They are also never used in DFA programs - // because DFAs can't handle captures. - self.c(expr) - } else { - let entry = self.insts.len(); - let hole = self.push_hole(InstHole::Save { slot: first_slot }); - let patch = self.c(expr)?.unwrap_or_else(|| self.next_inst()); - self.fill(hole, patch.entry); - self.fill_to_next(patch.hole); - let hole = self.push_hole(InstHole::Save { slot: first_slot + 1 }); - Ok(Some(Patch { hole, entry })) - } - } - - fn c_dotstar(&mut self) -> Result { - let hir = if self.compiled.only_utf8() { - Hir::dot(hir::Dot::AnyChar) - } else { - Hir::dot(hir::Dot::AnyByte) - }; - Ok(self - .c(&Hir::repetition(hir::Repetition { - min: 0, - max: None, - greedy: false, - sub: Box::new(hir), - }))? - .unwrap()) - } - - fn c_char(&mut self, c: char) -> ResultOrEmpty { - if self.compiled.uses_bytes() { - if c.is_ascii() { - let b = c as u8; - let hole = - self.push_hole(InstHole::Bytes { start: b, end: b }); - self.byte_classes.set_range(b, b); - Ok(Some(Patch { hole, entry: self.insts.len() - 1 })) - } else { - self.c_class(&[hir::ClassUnicodeRange::new(c, c)]) - } - } else { - let hole = self.push_hole(InstHole::Char { c }); - Ok(Some(Patch { hole, entry: self.insts.len() - 1 })) - } - } - - fn c_class(&mut self, ranges: &[hir::ClassUnicodeRange]) -> ResultOrEmpty { - use std::mem::size_of; - - if ranges.is_empty() { - return Err(Error::Syntax( - "empty character classes are not allowed".to_string(), - )); - } - if self.compiled.uses_bytes() { - Ok(Some(CompileClass { c: self, ranges }.compile()?)) - } else { - let ranges: Vec<(char, char)> = - ranges.iter().map(|r| (r.start(), r.end())).collect(); - let hole = if ranges.len() == 1 && ranges[0].0 == ranges[0].1 { - self.push_hole(InstHole::Char { c: ranges[0].0 }) - } else { - self.extra_inst_bytes += - ranges.len() * (size_of::() * 2); - self.push_hole(InstHole::Ranges { ranges }) - }; - Ok(Some(Patch { hole, entry: self.insts.len() - 1 })) - } - } - - fn c_byte(&mut self, b: u8) -> ResultOrEmpty { - self.c_class_bytes(&[hir::ClassBytesRange::new(b, b)]) - } - - fn c_class_bytes( - &mut self, - ranges: &[hir::ClassBytesRange], - ) -> ResultOrEmpty { - if ranges.is_empty() { - return Err(Error::Syntax( - "empty character classes are not allowed".to_string(), - )); - } - - let first_split_entry = self.insts.len(); - let mut holes = vec![]; - let mut prev_hole = Hole::None; - for r in &ranges[0..ranges.len() - 1] { - self.fill_to_next(prev_hole); - let split = self.push_split_hole(); - let next = self.insts.len(); - self.byte_classes.set_range(r.start(), r.end()); - holes.push(self.push_hole(InstHole::Bytes { - start: r.start(), - end: r.end(), - })); - prev_hole = self.fill_split(split, Some(next), None); - } - let next = self.insts.len(); - let r = &ranges[ranges.len() - 1]; - self.byte_classes.set_range(r.start(), r.end()); - holes.push( - self.push_hole(InstHole::Bytes { start: r.start(), end: r.end() }), - ); - self.fill(prev_hole, next); - Ok(Some(Patch { hole: Hole::Many(holes), entry: first_split_entry })) - } - - fn c_empty_look(&mut self, look: EmptyLook) -> ResultOrEmpty { - let hole = self.push_hole(InstHole::EmptyLook { look }); - Ok(Some(Patch { hole, entry: self.insts.len() - 1 })) - } - - fn c_literal(&mut self, bytes: &[u8]) -> ResultOrEmpty { - match core::str::from_utf8(bytes) { - Ok(string) => { - let mut it = string.chars(); - let Patch { mut hole, entry } = loop { - match it.next() { - None => return self.c_empty(), - Some(ch) => { - if let Some(p) = self.c_char(ch)? { - break p; - } - } - } - }; - for ch in it { - if let Some(p) = self.c_char(ch)? { - self.fill(hole, p.entry); - hole = p.hole; - } - } - Ok(Some(Patch { hole, entry })) - } - Err(_) => { - assert!(self.compiled.uses_bytes()); - let mut it = bytes.iter().copied(); - let Patch { mut hole, entry } = loop { - match it.next() { - None => return self.c_empty(), - Some(byte) => { - if let Some(p) = self.c_byte(byte)? { - break p; - } - } - } - }; - for byte in it { - if let Some(p) = self.c_byte(byte)? { - self.fill(hole, p.entry); - hole = p.hole; - } - } - Ok(Some(Patch { hole, entry })) - } - } - } - - fn c_concat<'a, I>(&mut self, exprs: I) -> ResultOrEmpty - where - I: IntoIterator, - { - let mut exprs = exprs.into_iter(); - let Patch { mut hole, entry } = loop { - match exprs.next() { - None => return self.c_empty(), - Some(e) => { - if let Some(p) = self.c(e)? { - break p; - } - } - } - }; - for e in exprs { - if let Some(p) = self.c(e)? { - self.fill(hole, p.entry); - hole = p.hole; - } - } - Ok(Some(Patch { hole, entry })) - } - - fn c_alternate(&mut self, exprs: &[Hir]) -> ResultOrEmpty { - debug_assert!( - exprs.len() >= 2, - "alternates must have at least 2 exprs" - ); - - // Initial entry point is always the first split. - let first_split_entry = self.insts.len(); - - // Save up all of the holes from each alternate. They will all get - // patched to point to the same location. - let mut holes = vec![]; - - // true indicates that the hole is a split where we want to fill - // the second branch. - let mut prev_hole = (Hole::None, false); - for e in &exprs[0..exprs.len() - 1] { - if prev_hole.1 { - let next = self.insts.len(); - self.fill_split(prev_hole.0, None, Some(next)); - } else { - self.fill_to_next(prev_hole.0); - } - let split = self.push_split_hole(); - if let Some(Patch { hole, entry }) = self.c(e)? { - holes.push(hole); - prev_hole = (self.fill_split(split, Some(entry), None), false); - } else { - let (split1, split2) = split.dup_one(); - holes.push(split1); - prev_hole = (split2, true); - } - } - if let Some(Patch { hole, entry }) = self.c(&exprs[exprs.len() - 1])? { - holes.push(hole); - if prev_hole.1 { - self.fill_split(prev_hole.0, None, Some(entry)); - } else { - self.fill(prev_hole.0, entry); - } - } else { - // We ignore prev_hole.1. When it's true, it means we have two - // empty branches both pushing prev_hole.0 into holes, so both - // branches will go to the same place anyway. - holes.push(prev_hole.0); - } - Ok(Some(Patch { hole: Hole::Many(holes), entry: first_split_entry })) - } - - fn c_repeat(&mut self, rep: &hir::Repetition) -> ResultOrEmpty { - match (rep.min, rep.max) { - (0, Some(1)) => self.c_repeat_zero_or_one(&rep.sub, rep.greedy), - (0, None) => self.c_repeat_zero_or_more(&rep.sub, rep.greedy), - (1, None) => self.c_repeat_one_or_more(&rep.sub, rep.greedy), - (min, None) => { - self.c_repeat_range_min_or_more(&rep.sub, rep.greedy, min) - } - (min, Some(max)) => { - self.c_repeat_range(&rep.sub, rep.greedy, min, max) - } - } - } - - fn c_repeat_zero_or_one( - &mut self, - expr: &Hir, - greedy: bool, - ) -> ResultOrEmpty { - let split_entry = self.insts.len(); - let split = self.push_split_hole(); - let Patch { hole: hole_rep, entry: entry_rep } = match self.c(expr)? { - Some(p) => p, - None => return self.pop_split_hole(), - }; - let split_hole = if greedy { - self.fill_split(split, Some(entry_rep), None) - } else { - self.fill_split(split, None, Some(entry_rep)) - }; - let holes = vec![hole_rep, split_hole]; - Ok(Some(Patch { hole: Hole::Many(holes), entry: split_entry })) - } - - fn c_repeat_zero_or_more( - &mut self, - expr: &Hir, - greedy: bool, - ) -> ResultOrEmpty { - let split_entry = self.insts.len(); - let split = self.push_split_hole(); - let Patch { hole: hole_rep, entry: entry_rep } = match self.c(expr)? { - Some(p) => p, - None => return self.pop_split_hole(), - }; - - self.fill(hole_rep, split_entry); - let split_hole = if greedy { - self.fill_split(split, Some(entry_rep), None) - } else { - self.fill_split(split, None, Some(entry_rep)) - }; - Ok(Some(Patch { hole: split_hole, entry: split_entry })) - } - - fn c_repeat_one_or_more( - &mut self, - expr: &Hir, - greedy: bool, - ) -> ResultOrEmpty { - let Patch { hole: hole_rep, entry: entry_rep } = match self.c(expr)? { - Some(p) => p, - None => return Ok(None), - }; - self.fill_to_next(hole_rep); - let split = self.push_split_hole(); - - let split_hole = if greedy { - self.fill_split(split, Some(entry_rep), None) - } else { - self.fill_split(split, None, Some(entry_rep)) - }; - Ok(Some(Patch { hole: split_hole, entry: entry_rep })) - } - - fn c_repeat_range_min_or_more( - &mut self, - expr: &Hir, - greedy: bool, - min: u32, - ) -> ResultOrEmpty { - let min = u32_to_usize(min); - // Using next_inst() is ok, because we can't return it (concat would - // have to return Some(_) while c_repeat_range_min_or_more returns - // None). - let patch_concat = self - .c_concat(iter::repeat(expr).take(min))? - .unwrap_or_else(|| self.next_inst()); - if let Some(patch_rep) = self.c_repeat_zero_or_more(expr, greedy)? { - self.fill(patch_concat.hole, patch_rep.entry); - Ok(Some(Patch { hole: patch_rep.hole, entry: patch_concat.entry })) - } else { - Ok(None) - } - } - - fn c_repeat_range( - &mut self, - expr: &Hir, - greedy: bool, - min: u32, - max: u32, - ) -> ResultOrEmpty { - let (min, max) = (u32_to_usize(min), u32_to_usize(max)); - debug_assert!(min <= max); - let patch_concat = self.c_concat(iter::repeat(expr).take(min))?; - if min == max { - return Ok(patch_concat); - } - // Same reasoning as in c_repeat_range_min_or_more (we know that min < - // max at this point). - let patch_concat = patch_concat.unwrap_or_else(|| self.next_inst()); - let initial_entry = patch_concat.entry; - // It is much simpler to compile, e.g., `a{2,5}` as: - // - // aaa?a?a? - // - // But you end up with a sequence of instructions like this: - // - // 0: 'a' - // 1: 'a', - // 2: split(3, 4) - // 3: 'a' - // 4: split(5, 6) - // 5: 'a' - // 6: split(7, 8) - // 7: 'a' - // 8: MATCH - // - // This is *incredibly* inefficient because the splits end - // up forming a chain, which has to be resolved everything a - // transition is followed. - let mut holes = vec![]; - let mut prev_hole = patch_concat.hole; - for _ in min..max { - self.fill_to_next(prev_hole); - let split = self.push_split_hole(); - let Patch { hole, entry } = match self.c(expr)? { - Some(p) => p, - None => return self.pop_split_hole(), - }; - prev_hole = hole; - if greedy { - holes.push(self.fill_split(split, Some(entry), None)); - } else { - holes.push(self.fill_split(split, None, Some(entry))); - } - } - holes.push(prev_hole); - Ok(Some(Patch { hole: Hole::Many(holes), entry: initial_entry })) - } - - /// Can be used as a default value for the c_* functions when the call to - /// c_function is followed by inserting at least one instruction that is - /// always executed after the ones written by the c* function. - fn next_inst(&self) -> Patch { - Patch { hole: Hole::None, entry: self.insts.len() } - } - - fn fill(&mut self, hole: Hole, goto: InstPtr) { - match hole { - Hole::None => {} - Hole::One(pc) => { - self.insts[pc].fill(goto); - } - Hole::Many(holes) => { - for hole in holes { - self.fill(hole, goto); - } - } - } - } - - fn fill_to_next(&mut self, hole: Hole) { - let next = self.insts.len(); - self.fill(hole, next); - } - - fn fill_split( - &mut self, - hole: Hole, - goto1: Option, - goto2: Option, - ) -> Hole { - match hole { - Hole::None => Hole::None, - Hole::One(pc) => match (goto1, goto2) { - (Some(goto1), Some(goto2)) => { - self.insts[pc].fill_split(goto1, goto2); - Hole::None - } - (Some(goto1), None) => { - self.insts[pc].half_fill_split_goto1(goto1); - Hole::One(pc) - } - (None, Some(goto2)) => { - self.insts[pc].half_fill_split_goto2(goto2); - Hole::One(pc) - } - (None, None) => unreachable!( - "at least one of the split \ - holes must be filled" - ), - }, - Hole::Many(holes) => { - let mut new_holes = vec![]; - for hole in holes { - new_holes.push(self.fill_split(hole, goto1, goto2)); - } - if new_holes.is_empty() { - Hole::None - } else if new_holes.len() == 1 { - new_holes.pop().unwrap() - } else { - Hole::Many(new_holes) - } - } - } - } - - fn push_compiled(&mut self, inst: Inst) { - self.insts.push(MaybeInst::Compiled(inst)); - } - - fn push_hole(&mut self, inst: InstHole) -> Hole { - let hole = self.insts.len(); - self.insts.push(MaybeInst::Uncompiled(inst)); - Hole::One(hole) - } - - fn push_split_hole(&mut self) -> Hole { - let hole = self.insts.len(); - self.insts.push(MaybeInst::Split); - Hole::One(hole) - } - - fn pop_split_hole(&mut self) -> ResultOrEmpty { - self.insts.pop(); - Ok(None) - } - - fn check_size(&self) -> result::Result<(), Error> { - use std::mem::size_of; - - let size = - self.extra_inst_bytes + (self.insts.len() * size_of::()); - if size > self.size_limit { - Err(Error::CompiledTooBig(self.size_limit)) - } else { - Ok(()) - } - } -} - -#[derive(Debug)] -enum Hole { - None, - One(InstPtr), - Many(Vec), -} - -impl Hole { - fn dup_one(self) -> (Self, Self) { - match self { - Hole::One(pc) => (Hole::One(pc), Hole::One(pc)), - Hole::None | Hole::Many(_) => { - unreachable!("must be called on single hole") - } - } - } -} - -#[derive(Clone, Debug)] -enum MaybeInst { - Compiled(Inst), - Uncompiled(InstHole), - Split, - Split1(InstPtr), - Split2(InstPtr), -} - -impl MaybeInst { - fn fill(&mut self, goto: InstPtr) { - let maybeinst = match *self { - MaybeInst::Split => MaybeInst::Split1(goto), - MaybeInst::Uncompiled(ref inst) => { - MaybeInst::Compiled(inst.fill(goto)) - } - MaybeInst::Split1(goto1) => { - MaybeInst::Compiled(Inst::Split(InstSplit { - goto1, - goto2: goto, - })) - } - MaybeInst::Split2(goto2) => { - MaybeInst::Compiled(Inst::Split(InstSplit { - goto1: goto, - goto2, - })) - } - _ => unreachable!( - "not all instructions were compiled! \ - found uncompiled instruction: {:?}", - self - ), - }; - *self = maybeinst; - } - - fn fill_split(&mut self, goto1: InstPtr, goto2: InstPtr) { - let filled = match *self { - MaybeInst::Split => Inst::Split(InstSplit { goto1, goto2 }), - _ => unreachable!( - "must be called on Split instruction, \ - instead it was called on: {:?}", - self - ), - }; - *self = MaybeInst::Compiled(filled); - } - - fn half_fill_split_goto1(&mut self, goto1: InstPtr) { - let half_filled = match *self { - MaybeInst::Split => goto1, - _ => unreachable!( - "must be called on Split instruction, \ - instead it was called on: {:?}", - self - ), - }; - *self = MaybeInst::Split1(half_filled); - } - - fn half_fill_split_goto2(&mut self, goto2: InstPtr) { - let half_filled = match *self { - MaybeInst::Split => goto2, - _ => unreachable!( - "must be called on Split instruction, \ - instead it was called on: {:?}", - self - ), - }; - *self = MaybeInst::Split2(half_filled); - } - - fn unwrap(self) -> Inst { - match self { - MaybeInst::Compiled(inst) => inst, - _ => unreachable!( - "must be called on a compiled instruction, \ - instead it was called on: {:?}", - self - ), - } - } -} - -#[derive(Clone, Debug)] -enum InstHole { - Save { slot: usize }, - EmptyLook { look: EmptyLook }, - Char { c: char }, - Ranges { ranges: Vec<(char, char)> }, - Bytes { start: u8, end: u8 }, -} - -impl InstHole { - fn fill(&self, goto: InstPtr) -> Inst { - match *self { - InstHole::Save { slot } => Inst::Save(InstSave { goto, slot }), - InstHole::EmptyLook { look } => { - Inst::EmptyLook(InstEmptyLook { goto, look }) - } - InstHole::Char { c } => Inst::Char(InstChar { goto, c }), - InstHole::Ranges { ref ranges } => Inst::Ranges(InstRanges { - goto, - ranges: ranges.clone().into_boxed_slice(), - }), - InstHole::Bytes { start, end } => { - Inst::Bytes(InstBytes { goto, start, end }) - } - } - } -} - -struct CompileClass<'a, 'b> { - c: &'a mut Compiler, - ranges: &'b [hir::ClassUnicodeRange], -} - -impl<'a, 'b> CompileClass<'a, 'b> { - fn compile(mut self) -> Result { - let mut holes = vec![]; - let mut initial_entry = None; - let mut last_split = Hole::None; - let mut utf8_seqs = self.c.utf8_seqs.take().unwrap(); - self.c.suffix_cache.clear(); - - for (i, range) in self.ranges.iter().enumerate() { - let is_last_range = i + 1 == self.ranges.len(); - utf8_seqs.reset(range.start(), range.end()); - let mut it = (&mut utf8_seqs).peekable(); - loop { - let utf8_seq = match it.next() { - None => break, - Some(utf8_seq) => utf8_seq, - }; - if is_last_range && it.peek().is_none() { - let Patch { hole, entry } = self.c_utf8_seq(&utf8_seq)?; - holes.push(hole); - self.c.fill(last_split, entry); - last_split = Hole::None; - if initial_entry.is_none() { - initial_entry = Some(entry); - } - } else { - if initial_entry.is_none() { - initial_entry = Some(self.c.insts.len()); - } - self.c.fill_to_next(last_split); - last_split = self.c.push_split_hole(); - let Patch { hole, entry } = self.c_utf8_seq(&utf8_seq)?; - holes.push(hole); - last_split = - self.c.fill_split(last_split, Some(entry), None); - } - } - } - self.c.utf8_seqs = Some(utf8_seqs); - Ok(Patch { hole: Hole::Many(holes), entry: initial_entry.unwrap() }) - } - - fn c_utf8_seq(&mut self, seq: &Utf8Sequence) -> Result { - if self.c.compiled.is_reverse { - self.c_utf8_seq_(seq) - } else { - self.c_utf8_seq_(seq.into_iter().rev()) - } - } - - fn c_utf8_seq_<'r, I>(&mut self, seq: I) -> Result - where - I: IntoIterator, - { - // The initial instruction for each UTF-8 sequence should be the same. - let mut from_inst = ::std::usize::MAX; - let mut last_hole = Hole::None; - for byte_range in seq { - let key = SuffixCacheKey { - from_inst, - start: byte_range.start, - end: byte_range.end, - }; - { - let pc = self.c.insts.len(); - if let Some(cached_pc) = self.c.suffix_cache.get(key, pc) { - from_inst = cached_pc; - continue; - } - } - self.c.byte_classes.set_range(byte_range.start, byte_range.end); - if from_inst == ::std::usize::MAX { - last_hole = self.c.push_hole(InstHole::Bytes { - start: byte_range.start, - end: byte_range.end, - }); - } else { - self.c.push_compiled(Inst::Bytes(InstBytes { - goto: from_inst, - start: byte_range.start, - end: byte_range.end, - })); - } - from_inst = self.c.insts.len().checked_sub(1).unwrap(); - debug_assert!(from_inst < ::std::usize::MAX); - } - debug_assert!(from_inst < ::std::usize::MAX); - Ok(Patch { hole: last_hole, entry: from_inst }) - } -} - -/// `SuffixCache` is a simple bounded hash map for caching suffix entries in -/// UTF-8 automata. For example, consider the Unicode range \u{0}-\u{FFFF}. -/// The set of byte ranges looks like this: -/// -/// [0-7F] -/// [C2-DF][80-BF] -/// [E0][A0-BF][80-BF] -/// [E1-EC][80-BF][80-BF] -/// [ED][80-9F][80-BF] -/// [EE-EF][80-BF][80-BF] -/// -/// Each line above translates to one alternate in the compiled regex program. -/// However, all but one of the alternates end in the same suffix, which is -/// a waste of an instruction. The suffix cache facilitates reusing them across -/// alternates. -/// -/// Note that a HashMap could be trivially used for this, but we don't need its -/// overhead. Some small bounded space (LRU style) is more than enough. -/// -/// This uses similar idea to [`SparseSet`](../sparse/struct.SparseSet.html), -/// except it uses hashes as original indices and then compares full keys for -/// validation against `dense` array. -#[derive(Debug)] -struct SuffixCache { - sparse: Box<[usize]>, - dense: Vec, -} - -#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] -struct SuffixCacheEntry { - key: SuffixCacheKey, - pc: InstPtr, -} - -#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] -struct SuffixCacheKey { - from_inst: InstPtr, - start: u8, - end: u8, -} - -impl SuffixCache { - fn new(size: usize) -> Self { - SuffixCache { - sparse: vec![0usize; size].into(), - dense: Vec::with_capacity(size), - } - } - - fn get(&mut self, key: SuffixCacheKey, pc: InstPtr) -> Option { - let hash = self.hash(&key); - let pos = &mut self.sparse[hash]; - if let Some(entry) = self.dense.get(*pos) { - if entry.key == key { - return Some(entry.pc); - } - } - *pos = self.dense.len(); - self.dense.push(SuffixCacheEntry { key, pc }); - None - } - - fn clear(&mut self) { - self.dense.clear(); - } - - fn hash(&self, suffix: &SuffixCacheKey) -> usize { - // Basic FNV-1a hash as described: - // https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function - const FNV_PRIME: u64 = 1_099_511_628_211; - let mut h = 14_695_981_039_346_656_037; - h = (h ^ (suffix.from_inst as u64)).wrapping_mul(FNV_PRIME); - h = (h ^ (suffix.start as u64)).wrapping_mul(FNV_PRIME); - h = (h ^ (suffix.end as u64)).wrapping_mul(FNV_PRIME); - (h as usize) % self.sparse.len() - } -} - -struct ByteClassSet([bool; 256]); - -impl ByteClassSet { - fn new() -> Self { - ByteClassSet([false; 256]) - } - - fn set_range(&mut self, start: u8, end: u8) { - debug_assert!(start <= end); - if start > 0 { - self.0[start as usize - 1] = true; - } - self.0[end as usize] = true; - } - - fn set_word_boundary(&mut self) { - // We need to mark all ranges of bytes whose pairs result in - // evaluating \b differently. - let iswb = is_word_byte; - let mut b1: u16 = 0; - let mut b2: u16; - while b1 <= 255 { - b2 = b1 + 1; - while b2 <= 255 && iswb(b1 as u8) == iswb(b2 as u8) { - b2 += 1; - } - self.set_range(b1 as u8, (b2 - 1) as u8); - b1 = b2; - } - } - - fn byte_classes(&self) -> Vec { - // N.B. If you're debugging the DFA, it's useful to simply return - // `(0..256).collect()`, which effectively removes the byte classes - // and makes the transitions easier to read. - // (0usize..256).map(|x| x as u8).collect() - let mut byte_classes = vec![0; 256]; - let mut class = 0u8; - let mut i = 0; - loop { - byte_classes[i] = class as u8; - if i >= 255 { - break; - } - if self.0[i] { - class = class.checked_add(1).unwrap(); - } - i += 1; - } - byte_classes - } -} - -impl fmt::Debug for ByteClassSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("ByteClassSet").field(&&self.0[..]).finish() - } -} - -fn u32_to_usize(n: u32) -> usize { - // In case usize is less than 32 bits, we need to guard against overflow. - // On most platforms this compiles to nothing. - // TODO Use `std::convert::TryFrom` once it's stable. - if (n as u64) > (::std::usize::MAX as u64) { - panic!("BUG: {} is too big to be pointer sized", n) - } - n as usize -} - -#[cfg(test)] -mod tests { - use super::ByteClassSet; - - #[test] - fn byte_classes() { - let mut set = ByteClassSet::new(); - set.set_range(b'a', b'z'); - let classes = set.byte_classes(); - assert_eq!(classes[0], 0); - assert_eq!(classes[1], 0); - assert_eq!(classes[2], 0); - assert_eq!(classes[b'a' as usize - 1], 0); - assert_eq!(classes[b'a' as usize], 1); - assert_eq!(classes[b'm' as usize], 1); - assert_eq!(classes[b'z' as usize], 1); - assert_eq!(classes[b'z' as usize + 1], 2); - assert_eq!(classes[254], 2); - assert_eq!(classes[255], 2); - - let mut set = ByteClassSet::new(); - set.set_range(0, 2); - set.set_range(4, 6); - let classes = set.byte_classes(); - assert_eq!(classes[0], 0); - assert_eq!(classes[1], 0); - assert_eq!(classes[2], 0); - assert_eq!(classes[3], 1); - assert_eq!(classes[4], 2); - assert_eq!(classes[5], 2); - assert_eq!(classes[6], 2); - assert_eq!(classes[7], 3); - assert_eq!(classes[255], 3); - } - - #[test] - fn full_byte_classes() { - let mut set = ByteClassSet::new(); - for i in 0..256u16 { - set.set_range(i as u8, i as u8); - } - assert_eq!(set.byte_classes().len(), 256); - } -} diff --git a/src/dfa.rs b/src/dfa.rs deleted file mode 100644 index 78ed71021e..0000000000 --- a/src/dfa.rs +++ /dev/null @@ -1,1945 +0,0 @@ -/*! -The DFA matching engine. - -A DFA provides faster matching because the engine is in exactly one state at -any point in time. In the NFA, there may be multiple active states, and -considerable CPU cycles are spent shuffling them around. In finite automata -speak, the DFA follows epsilon transitions in the regex far less than the NFA. - -A DFA is a classic trade off between time and space. The NFA is slower, but -its memory requirements are typically small and predictable. The DFA is faster, -but given the right regex and the right input, the number of states in the -DFA can grow exponentially. To mitigate this space problem, we do two things: - -1. We implement an *online* DFA. That is, the DFA is constructed from the NFA - during a search. When a new state is computed, it is stored in a cache so - that it may be reused. An important consequence of this implementation - is that states that are never reached for a particular input are never - computed. (This is impossible in an "offline" DFA which needs to compute - all possible states up front.) -2. If the cache gets too big, we wipe it and continue matching. - -In pathological cases, a new state can be created for every byte of input. -(e.g., The regex `(a|b)*a(a|b){20}` on a long sequence of a's and b's.) -In this case, performance regresses to slightly slower than the full NFA -simulation, in large part because the cache becomes useless. If the cache -is wiped too frequently, the DFA quits and control falls back to one of the -NFA simulations. - -Because of the "lazy" nature of this DFA, the inner matching loop is -considerably more complex than one might expect out of a DFA. A number of -tricks are employed to make it fast. Tread carefully. - -N.B. While this implementation is heavily commented, Russ Cox's series of -articles on regexes is strongly recommended: -(As is the DFA implementation in RE2, which heavily influenced this -implementation.) -*/ - -use std::collections::HashMap; -use std::fmt; -use std::iter::repeat; -use std::mem; -use std::sync::Arc; - -use crate::exec::ProgramCache; -use crate::prog::{Inst, Program}; -use crate::sparse::SparseSet; - -/// Return true if and only if the given program can be executed by a DFA. -/// -/// Generally, a DFA is always possible. A pathological case where it is not -/// possible is if the number of NFA states exceeds `u32::MAX`, in which case, -/// this function will return false. -/// -/// This function will also return false if the given program has any Unicode -/// instructions (Char or Ranges) since the DFA operates on bytes only. -pub fn can_exec(insts: &Program) -> bool { - use crate::prog::Inst::*; - // If for some reason we manage to allocate a regex program with more - // than i32::MAX instructions, then we can't execute the DFA because we - // use 32 bit instruction pointer deltas for memory savings. - // If i32::MAX is the largest positive delta, - // then -i32::MAX == i32::MIN + 1 is the largest negative delta, - // and we are OK to use 32 bits. - if insts.dfa_size_limit == 0 || insts.len() > ::std::i32::MAX as usize { - return false; - } - for inst in insts { - match *inst { - Char(_) | Ranges(_) => return false, - EmptyLook(_) | Match(_) | Save(_) | Split(_) | Bytes(_) => {} - } - } - true -} - -/// A reusable cache of DFA states. -/// -/// This cache is reused between multiple invocations of the same regex -/// program. (It is not shared simultaneously between threads. If there is -/// contention, then new caches are created.) -#[derive(Debug)] -pub struct Cache { - /// Group persistent DFA related cache state together. The sparse sets - /// listed below are used as scratch space while computing uncached states. - inner: CacheInner, - /// qcur and qnext are ordered sets with constant time - /// addition/membership/clearing-whole-set and linear time iteration. They - /// are used to manage the sets of NFA states in DFA states when computing - /// cached DFA states. In particular, the order of the NFA states matters - /// for leftmost-first style matching. Namely, when computing a cached - /// state, the set of NFA states stops growing as soon as the first Match - /// instruction is observed. - qcur: SparseSet, - qnext: SparseSet, -} - -/// `CacheInner` is logically just a part of Cache, but groups together fields -/// that aren't passed as function parameters throughout search. (This split -/// is mostly an artifact of the borrow checker. It is happily paid.) -#[derive(Debug)] -struct CacheInner { - /// A cache of pre-compiled DFA states, keyed by the set of NFA states - /// and the set of empty-width flags set at the byte in the input when the - /// state was observed. - /// - /// A StatePtr is effectively a `*State`, but to avoid various inconvenient - /// things, we just pass indexes around manually. The performance impact of - /// this is probably an instruction or two in the inner loop. However, on - /// 64 bit, each StatePtr is half the size of a *State. - compiled: StateMap, - /// The transition table. - /// - /// The transition table is laid out in row-major order, where states are - /// rows and the transitions for each state are columns. At a high level, - /// given state `s` and byte `b`, the next state can be found at index - /// `s * 256 + b`. - /// - /// This is, of course, a lie. A StatePtr is actually a pointer to the - /// *start* of a row in this table. When indexing in the DFA's inner loop, - /// this removes the need to multiply the StatePtr by the stride. Yes, it - /// matters. This reduces the number of states we can store, but: the - /// stride is rarely 256 since we define transitions in terms of - /// *equivalence classes* of bytes. Each class corresponds to a set of - /// bytes that never discriminate a distinct path through the DFA from each - /// other. - trans: Transitions, - /// A set of cached start states, which are limited to the number of - /// permutations of flags set just before the initial byte of input. (The - /// index into this vec is a `EmptyFlags`.) - /// - /// N.B. A start state can be "dead" (i.e., no possible match), so we - /// represent it with a StatePtr. - start_states: Vec, - /// Stack scratch space used to follow epsilon transitions in the NFA. - /// (This permits us to avoid recursion.) - /// - /// The maximum stack size is the number of NFA states. - stack: Vec, - /// The total number of times this cache has been flushed by the DFA - /// because of space constraints. - flush_count: u64, - /// The total heap size of the DFA's cache. We use this to determine when - /// we should flush the cache. - size: usize, - /// Scratch space used when building instruction pointer lists for new - /// states. This helps amortize allocation. - insts_scratch_space: Vec, -} - -/// The transition table. -/// -/// It is laid out in row-major order, with states as rows and byte class -/// transitions as columns. -/// -/// The transition table is responsible for producing valid `StatePtrs`. A -/// `StatePtr` points to the start of a particular row in this table. When -/// indexing to find the next state this allows us to avoid a multiplication -/// when computing an index into the table. -#[derive(Clone)] -struct Transitions { - /// The table. - table: Vec, - /// The stride. - num_byte_classes: usize, -} - -/// Fsm encapsulates the actual execution of the DFA. -#[derive(Debug)] -pub struct Fsm<'a> { - /// prog contains the NFA instruction opcodes. DFA execution uses either - /// the `dfa` instructions or the `dfa_reverse` instructions from - /// `exec::ExecReadOnly`. (It never uses `ExecReadOnly.nfa`, which may have - /// Unicode opcodes that cannot be executed by the DFA.) - prog: &'a Program, - /// The start state. We record it here because the pointer may change - /// when the cache is wiped. - start: StatePtr, - /// The current position in the input. - at: usize, - /// Should we quit after seeing the first match? e.g., When the caller - /// uses `is_match` or `shortest_match`. - quit_after_match: bool, - /// The last state that matched. - /// - /// When no match has occurred, this is set to STATE_UNKNOWN. - /// - /// This is only useful when matching regex sets. The last match state - /// is useful because it contains all of the match instructions seen, - /// thereby allowing us to enumerate which regexes in the set matched. - last_match_si: StatePtr, - /// The input position of the last cache flush. We use this to determine - /// if we're thrashing in the cache too often. If so, the DFA quits so - /// that we can fall back to the NFA algorithm. - last_cache_flush: usize, - /// All cached DFA information that is persisted between searches. - cache: &'a mut CacheInner, -} - -/// The result of running the DFA. -/// -/// Generally, the result is either a match or not a match, but sometimes the -/// DFA runs too slowly because the cache size is too small. In that case, it -/// gives up with the intent of falling back to the NFA algorithm. -/// -/// The DFA can also give up if it runs out of room to create new states, or if -/// it sees non-ASCII bytes in the presence of a Unicode word boundary. -#[derive(Clone, Debug)] -pub enum Result { - Match(T), - NoMatch(usize), - Quit, -} - -impl Result { - /// Returns true if this result corresponds to a match. - pub fn is_match(&self) -> bool { - match *self { - Result::Match(_) => true, - Result::NoMatch(_) | Result::Quit => false, - } - } - - /// Maps the given function onto T and returns the result. - /// - /// If this isn't a match, then this is a no-op. - #[cfg(feature = "perf-literal")] - pub fn map U>(self, mut f: F) -> Result { - match self { - Result::Match(t) => Result::Match(f(t)), - Result::NoMatch(x) => Result::NoMatch(x), - Result::Quit => Result::Quit, - } - } - - /// Sets the non-match position. - /// - /// If this isn't a non-match, then this is a no-op. - fn set_non_match(self, at: usize) -> Result { - match self { - Result::NoMatch(_) => Result::NoMatch(at), - r => r, - } - } -} - -/// `State` is a DFA state. It contains an ordered set of NFA states (not -/// necessarily complete) and a smattering of flags. -/// -/// The flags are packed into the first byte of data. -/// -/// States don't carry their transitions. Instead, transitions are stored in -/// a single row-major table. -/// -/// Delta encoding is used to store the instruction pointers. -/// The first instruction pointer is stored directly starting -/// at data[1], and each following pointer is stored as an offset -/// to the previous one. If a delta is in the range -127..127, -/// it is packed into a single byte; Otherwise the byte 128 (-128 as an i8) -/// is coded as a flag, followed by 4 bytes encoding the delta. -#[derive(Clone, Eq, Hash, PartialEq)] -struct State { - data: Arc<[u8]>, -} - -/// `InstPtr` is a 32 bit pointer into a sequence of opcodes (i.e., it indexes -/// an NFA state). -/// -/// Throughout this library, this is usually set to `usize`, but we force a -/// `u32` here for the DFA to save on space. -type InstPtr = u32; - -/// Adds ip to data using delta encoding with respect to prev. -/// -/// After completion, `data` will contain `ip` and `prev` will be set to `ip`. -fn push_inst_ptr(data: &mut Vec, prev: &mut InstPtr, ip: InstPtr) { - let delta = (ip as i32) - (*prev as i32); - write_vari32(data, delta); - *prev = ip; -} - -struct InstPtrs<'a> { - base: usize, - data: &'a [u8], -} - -impl<'a> Iterator for InstPtrs<'a> { - type Item = usize; - - fn next(&mut self) -> Option { - if self.data.is_empty() { - return None; - } - let (delta, nread) = read_vari32(self.data); - let base = self.base as i32 + delta; - debug_assert!(base >= 0); - debug_assert!(nread > 0); - self.data = &self.data[nread..]; - self.base = base as usize; - Some(self.base) - } -} - -impl State { - fn flags(&self) -> StateFlags { - StateFlags(self.data[0]) - } - - fn inst_ptrs(&self) -> InstPtrs<'_> { - InstPtrs { base: 0, data: &self.data[1..] } - } -} - -/// `StatePtr` is a 32 bit pointer to the start of a row in the transition -/// table. -/// -/// It has many special values. There are two types of special values: -/// sentinels and flags. -/// -/// Sentinels corresponds to special states that carry some kind of -/// significance. There are three such states: unknown, dead and quit states. -/// -/// Unknown states are states that haven't been computed yet. They indicate -/// that a transition should be filled in that points to either an existing -/// cached state or a new state altogether. In general, an unknown state means -/// "follow the NFA's epsilon transitions." -/// -/// Dead states are states that can never lead to a match, no matter what -/// subsequent input is observed. This means that the DFA should quit -/// immediately and return the longest match it has found thus far. -/// -/// Quit states are states that imply the DFA is not capable of matching the -/// regex correctly. Currently, this is only used when a Unicode word boundary -/// exists in the regex *and* a non-ASCII byte is observed. -/// -/// The other type of state pointer is a state pointer with special flag bits. -/// There are two flags: a start flag and a match flag. The lower bits of both -/// kinds always contain a "valid" `StatePtr` (indicated by the `STATE_MAX` -/// mask). -/// -/// The start flag means that the state is a start state, and therefore may be -/// subject to special prefix scanning optimizations. -/// -/// The match flag means that the state is a match state, and therefore the -/// current position in the input (while searching) should be recorded. -/// -/// The above exists mostly in the service of making the inner loop fast. -/// In particular, the inner *inner* loop looks something like this: -/// -/// ```ignore -/// while state <= STATE_MAX and i < len(text): -/// state = state.next[i] -/// ``` -/// -/// This is nice because it lets us execute a lazy DFA as if it were an -/// entirely offline DFA (i.e., with very few instructions). The loop will -/// quit only when we need to examine a case that needs special attention. -type StatePtr = u32; - -/// An unknown state means that the state has not been computed yet, and that -/// the only way to progress is to compute it. -const STATE_UNKNOWN: StatePtr = 1 << 31; - -/// A dead state means that the state has been computed and it is known that -/// once it is entered, no future match can ever occur. -const STATE_DEAD: StatePtr = STATE_UNKNOWN + 1; - -/// A quit state means that the DFA came across some input that it doesn't -/// know how to process correctly. The DFA should quit and another matching -/// engine should be run in its place. -const STATE_QUIT: StatePtr = STATE_DEAD + 1; - -/// A start state is a state that the DFA can start in. -/// -/// Note that start states have their lower bits set to a state pointer. -const STATE_START: StatePtr = 1 << 30; - -/// A match state means that the regex has successfully matched. -/// -/// Note that match states have their lower bits set to a state pointer. -const STATE_MATCH: StatePtr = 1 << 29; - -/// The maximum state pointer. This is useful to mask out the "valid" state -/// pointer from a state with the "start" or "match" bits set. -/// -/// It doesn't make sense to use this with unknown, dead or quit state -/// pointers, since those pointers are sentinels and never have their lower -/// bits set to anything meaningful. -const STATE_MAX: StatePtr = STATE_MATCH - 1; - -/// Byte is a u8 in spirit, but a u16 in practice so that we can represent the -/// special EOF sentinel value. -#[derive(Copy, Clone, Debug)] -struct Byte(u16); - -/// A set of flags for zero-width assertions. -#[derive(Clone, Copy, Eq, Debug, Default, Hash, PartialEq)] -struct EmptyFlags { - start: bool, - end: bool, - start_line: bool, - end_line: bool, - word_boundary: bool, - not_word_boundary: bool, -} - -/// A set of flags describing various configurations of a DFA state. This is -/// represented by a `u8` so that it is compact. -#[derive(Clone, Copy, Eq, Default, Hash, PartialEq)] -struct StateFlags(u8); - -impl Cache { - /// Create new empty cache for the DFA engine. - pub fn new(prog: &Program) -> Self { - // We add 1 to account for the special EOF byte. - let num_byte_classes = (prog.byte_classes[255] as usize + 1) + 1; - let starts = vec![STATE_UNKNOWN; 256]; - let mut cache = Cache { - inner: CacheInner { - compiled: StateMap::new(num_byte_classes), - trans: Transitions::new(num_byte_classes), - start_states: starts, - stack: vec![], - flush_count: 0, - size: 0, - insts_scratch_space: vec![], - }, - qcur: SparseSet::new(prog.insts.len()), - qnext: SparseSet::new(prog.insts.len()), - }; - cache.inner.reset_size(); - cache - } -} - -impl CacheInner { - /// Resets the cache size to account for fixed costs, such as the program - /// and stack sizes. - fn reset_size(&mut self) { - self.size = (self.start_states.len() * mem::size_of::()) - + (self.stack.len() * mem::size_of::()); - } -} - -impl<'a> Fsm<'a> { - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn forward( - prog: &'a Program, - cache: &ProgramCache, - quit_after_match: bool, - text: &[u8], - at: usize, - ) -> Result { - let mut cache = cache.borrow_mut(); - let cache = &mut cache.dfa; - let mut dfa = Fsm { - prog, - start: 0, // filled in below - at, - quit_after_match, - last_match_si: STATE_UNKNOWN, - last_cache_flush: at, - cache: &mut cache.inner, - }; - let (empty_flags, state_flags) = dfa.start_flags(text, at); - dfa.start = - match dfa.start_state(&mut cache.qcur, empty_flags, state_flags) { - None => return Result::Quit, - Some(STATE_DEAD) => return Result::NoMatch(at), - Some(si) => si, - }; - debug_assert!(dfa.start != STATE_UNKNOWN); - dfa.exec_at(&mut cache.qcur, &mut cache.qnext, text) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn reverse( - prog: &'a Program, - cache: &ProgramCache, - quit_after_match: bool, - text: &[u8], - at: usize, - ) -> Result { - let mut cache = cache.borrow_mut(); - let cache = &mut cache.dfa_reverse; - let mut dfa = Fsm { - prog, - start: 0, // filled in below - at, - quit_after_match, - last_match_si: STATE_UNKNOWN, - last_cache_flush: at, - cache: &mut cache.inner, - }; - let (empty_flags, state_flags) = dfa.start_flags_reverse(text, at); - dfa.start = - match dfa.start_state(&mut cache.qcur, empty_flags, state_flags) { - None => return Result::Quit, - Some(STATE_DEAD) => return Result::NoMatch(at), - Some(si) => si, - }; - debug_assert!(dfa.start != STATE_UNKNOWN); - dfa.exec_at_reverse(&mut cache.qcur, &mut cache.qnext, text) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn forward_many( - prog: &'a Program, - cache: &ProgramCache, - matches: &mut [bool], - text: &[u8], - at: usize, - ) -> Result { - debug_assert!(matches.len() == prog.matches.len()); - let mut cache = cache.borrow_mut(); - let cache = &mut cache.dfa; - let mut dfa = Fsm { - prog, - start: 0, // filled in below - at, - quit_after_match: false, - last_match_si: STATE_UNKNOWN, - last_cache_flush: at, - cache: &mut cache.inner, - }; - let (empty_flags, state_flags) = dfa.start_flags(text, at); - dfa.start = - match dfa.start_state(&mut cache.qcur, empty_flags, state_flags) { - None => return Result::Quit, - Some(STATE_DEAD) => return Result::NoMatch(at), - Some(si) => si, - }; - debug_assert!(dfa.start != STATE_UNKNOWN); - let result = dfa.exec_at(&mut cache.qcur, &mut cache.qnext, text); - if result.is_match() { - if matches.len() == 1 { - matches[0] = true; - } else { - debug_assert!(dfa.last_match_si != STATE_UNKNOWN); - debug_assert!(dfa.last_match_si != STATE_DEAD); - for ip in dfa.state(dfa.last_match_si).inst_ptrs() { - if let Inst::Match(slot) = dfa.prog[ip] { - matches[slot] = true; - } - } - } - } - result - } - - /// Executes the DFA on a forward NFA. - /// - /// {qcur,qnext} are scratch ordered sets which may be non-empty. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn exec_at( - &mut self, - qcur: &mut SparseSet, - qnext: &mut SparseSet, - text: &[u8], - ) -> Result { - // For the most part, the DFA is basically: - // - // last_match = null - // while current_byte != EOF: - // si = current_state.next[current_byte] - // if si is match - // last_match = si - // return last_match - // - // However, we need to deal with a few things: - // - // 1. This is an *online* DFA, so the current state's next list - // may not point to anywhere yet, so we must go out and compute - // them. (They are then cached into the current state's next list - // to avoid re-computation.) - // 2. If we come across a state that is known to be dead (i.e., never - // leads to a match), then we can quit early. - // 3. If the caller just wants to know if a match occurs, then we - // can quit as soon as we know we have a match. (Full leftmost - // first semantics require continuing on.) - // 4. If we're in the start state, then we can use a pre-computed set - // of prefix literals to skip quickly along the input. - // 5. After the input is exhausted, we run the DFA on one symbol - // that stands for EOF. This is useful for handling empty width - // assertions. - // 6. We can't actually do state.next[byte]. Instead, we have to do - // state.next[byte_classes[byte]], which permits us to keep the - // 'next' list very small. - // - // Since there's a bunch of extra stuff we need to consider, we do some - // pretty hairy tricks to get the inner loop to run as fast as - // possible. - debug_assert!(!self.prog.is_reverse); - - // The last match is the currently known ending match position. It is - // reported as an index to the most recent byte that resulted in a - // transition to a match state and is always stored in capture slot `1` - // when searching forwards. Its maximum value is `text.len()`. - let mut result = Result::NoMatch(self.at); - let (mut prev_si, mut next_si) = (self.start, self.start); - let mut at = self.at; - while at < text.len() { - // This is the real inner loop. We take advantage of special bits - // set in the state pointer to determine whether a state is in the - // "common" case or not. Specifically, the common case is a - // non-match non-start non-dead state that has already been - // computed. So long as we remain in the common case, this inner - // loop will chew through the input. - // - // We also unroll the loop 4 times to amortize the cost of checking - // whether we've consumed the entire input. We are also careful - // to make sure that `prev_si` always represents the previous state - // and `next_si` always represents the next state after the loop - // exits, even if it isn't always true inside the loop. - while next_si <= STATE_MAX && at < text.len() { - // Argument for safety is in the definition of next_si. - prev_si = unsafe { self.next_si(next_si, text, at) }; - at += 1; - if prev_si > STATE_MAX || at + 2 >= text.len() { - mem::swap(&mut prev_si, &mut next_si); - break; - } - next_si = unsafe { self.next_si(prev_si, text, at) }; - at += 1; - if next_si > STATE_MAX { - break; - } - prev_si = unsafe { self.next_si(next_si, text, at) }; - at += 1; - if prev_si > STATE_MAX { - mem::swap(&mut prev_si, &mut next_si); - break; - } - next_si = unsafe { self.next_si(prev_si, text, at) }; - at += 1; - } - if next_si & STATE_MATCH > 0 { - // A match state is outside of the common case because it needs - // special case analysis. In particular, we need to record the - // last position as having matched and possibly quit the DFA if - // we don't need to keep matching. - next_si &= !STATE_MATCH; - result = Result::Match(at - 1); - if self.quit_after_match { - return result; - } - self.last_match_si = next_si; - prev_si = next_si; - - // This permits short-circuiting when matching a regex set. - // In particular, if this DFA state contains only match states, - // then it's impossible to extend the set of matches since - // match states are final. Therefore, we can quit. - if self.prog.matches.len() > 1 { - let state = self.state(next_si); - let just_matches = - state.inst_ptrs().all(|ip| self.prog[ip].is_match()); - if just_matches { - return result; - } - } - - // Another inner loop! If the DFA stays in this particular - // match state, then we can rip through all of the input - // very quickly, and only recording the match location once - // we've left this particular state. - let cur = at; - while (next_si & !STATE_MATCH) == prev_si - && at + 2 < text.len() - { - // Argument for safety is in the definition of next_si. - next_si = unsafe { - self.next_si(next_si & !STATE_MATCH, text, at) - }; - at += 1; - } - if at > cur { - result = Result::Match(at - 2); - } - } else if next_si & STATE_START > 0 { - // A start state isn't in the common case because we may - // want to do quick prefix scanning. If the program doesn't - // have a detected prefix, then start states are actually - // considered common and this case is never reached. - debug_assert!(self.has_prefix()); - next_si &= !STATE_START; - prev_si = next_si; - at = match self.prefix_at(text, at) { - None => return Result::NoMatch(text.len()), - Some(i) => i, - }; - } else if next_si >= STATE_UNKNOWN { - if next_si == STATE_QUIT { - return Result::Quit; - } - // Finally, this corresponds to the case where the transition - // entered a state that can never lead to a match or a state - // that hasn't been computed yet. The latter being the "slow" - // path. - let byte = Byte::byte(text[at - 1]); - // We no longer care about the special bits in the state - // pointer. - prev_si &= STATE_MAX; - // Record where we are. This is used to track progress for - // determining whether we should quit if we've flushed the - // cache too much. - self.at = at; - next_si = match self.next_state(qcur, qnext, prev_si, byte) { - None => return Result::Quit, - Some(STATE_DEAD) => return result.set_non_match(at), - Some(si) => si, - }; - debug_assert!(next_si != STATE_UNKNOWN); - if next_si & STATE_MATCH > 0 { - next_si &= !STATE_MATCH; - result = Result::Match(at - 1); - if self.quit_after_match { - return result; - } - self.last_match_si = next_si; - } - prev_si = next_si; - } else { - prev_si = next_si; - } - } - - // Run the DFA once more on the special EOF sentinel value. - // We don't care about the special bits in the state pointer any more, - // so get rid of them. - prev_si &= STATE_MAX; - prev_si = match self.next_state(qcur, qnext, prev_si, Byte::eof()) { - None => return Result::Quit, - Some(STATE_DEAD) => return result.set_non_match(text.len()), - Some(si) => si & !STATE_START, - }; - debug_assert!(prev_si != STATE_UNKNOWN); - if prev_si & STATE_MATCH > 0 { - prev_si &= !STATE_MATCH; - self.last_match_si = prev_si; - result = Result::Match(text.len()); - } - result - } - - /// Executes the DFA on a reverse NFA. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn exec_at_reverse( - &mut self, - qcur: &mut SparseSet, - qnext: &mut SparseSet, - text: &[u8], - ) -> Result { - // The comments in `exec_at` above mostly apply here too. The main - // difference is that we move backwards over the input and we look for - // the longest possible match instead of the leftmost-first match. - // - // N.B. The code duplication here is regrettable. Efforts to improve - // it without sacrificing performance are welcome. ---AG - debug_assert!(self.prog.is_reverse); - let mut result = Result::NoMatch(self.at); - let (mut prev_si, mut next_si) = (self.start, self.start); - let mut at = self.at; - while at > 0 { - while next_si <= STATE_MAX && at > 0 { - // Argument for safety is in the definition of next_si. - at -= 1; - prev_si = unsafe { self.next_si(next_si, text, at) }; - if prev_si > STATE_MAX || at <= 4 { - mem::swap(&mut prev_si, &mut next_si); - break; - } - at -= 1; - next_si = unsafe { self.next_si(prev_si, text, at) }; - if next_si > STATE_MAX { - break; - } - at -= 1; - prev_si = unsafe { self.next_si(next_si, text, at) }; - if prev_si > STATE_MAX { - mem::swap(&mut prev_si, &mut next_si); - break; - } - at -= 1; - next_si = unsafe { self.next_si(prev_si, text, at) }; - } - if next_si & STATE_MATCH > 0 { - next_si &= !STATE_MATCH; - result = Result::Match(at + 1); - if self.quit_after_match { - return result; - } - self.last_match_si = next_si; - prev_si = next_si; - let cur = at; - while (next_si & !STATE_MATCH) == prev_si && at >= 2 { - // Argument for safety is in the definition of next_si. - at -= 1; - next_si = unsafe { - self.next_si(next_si & !STATE_MATCH, text, at) - }; - } - if at < cur { - result = Result::Match(at + 2); - } - } else if next_si >= STATE_UNKNOWN { - if next_si == STATE_QUIT { - return Result::Quit; - } - let byte = Byte::byte(text[at]); - prev_si &= STATE_MAX; - self.at = at; - next_si = match self.next_state(qcur, qnext, prev_si, byte) { - None => return Result::Quit, - Some(STATE_DEAD) => return result.set_non_match(at), - Some(si) => si, - }; - debug_assert!(next_si != STATE_UNKNOWN); - if next_si & STATE_MATCH > 0 { - next_si &= !STATE_MATCH; - result = Result::Match(at + 1); - if self.quit_after_match { - return result; - } - self.last_match_si = next_si; - } - prev_si = next_si; - } else { - prev_si = next_si; - } - } - - // Run the DFA once more on the special EOF sentinel value. - prev_si = match self.next_state(qcur, qnext, prev_si, Byte::eof()) { - None => return Result::Quit, - Some(STATE_DEAD) => return result.set_non_match(0), - Some(si) => si, - }; - debug_assert!(prev_si != STATE_UNKNOWN); - if prev_si & STATE_MATCH > 0 { - prev_si &= !STATE_MATCH; - self.last_match_si = prev_si; - result = Result::Match(0); - } - result - } - - /// next_si transitions to the next state, where the transition input - /// corresponds to text[i]. - /// - /// This elides bounds checks, and is therefore not safe. - #[cfg_attr(feature = "perf-inline", inline(always))] - unsafe fn next_si(&self, si: StatePtr, text: &[u8], i: usize) -> StatePtr { - // What is the argument for safety here? - // We have three unchecked accesses that could possibly violate safety: - // - // 1. The given byte of input (`text[i]`). - // 2. The class of the byte of input (`classes[text[i]]`). - // 3. The transition for the class (`trans[si + cls]`). - // - // (1) is only safe when calling next_si is guarded by - // `i < text.len()`. - // - // (2) is the easiest case to guarantee since `text[i]` is always a - // `u8` and `self.prog.byte_classes` always has length `u8::MAX`. - // (See `ByteClassSet.byte_classes` in `compile.rs`.) - // - // (3) is only safe if (1)+(2) are safe. Namely, the transitions - // of every state are defined to have length equal to the number of - // byte classes in the program. Therefore, a valid class leads to a - // valid transition. (All possible transitions are valid lookups, even - // if it points to a state that hasn't been computed yet.) (3) also - // relies on `si` being correct, but StatePtrs should only ever be - // retrieved from the transition table, which ensures they are correct. - debug_assert!(i < text.len()); - let b = *text.get_unchecked(i); - debug_assert!((b as usize) < self.prog.byte_classes.len()); - let cls = *self.prog.byte_classes.get_unchecked(b as usize); - self.cache.trans.next_unchecked(si, cls as usize) - } - - /// Computes the next state given the current state and the current input - /// byte (which may be EOF). - /// - /// If STATE_DEAD is returned, then there is no valid state transition. - /// This implies that no permutation of future input can lead to a match - /// state. - /// - /// STATE_UNKNOWN can never be returned. - fn exec_byte( - &mut self, - qcur: &mut SparseSet, - qnext: &mut SparseSet, - mut si: StatePtr, - b: Byte, - ) -> Option { - use crate::prog::Inst::*; - - // Initialize a queue with the current DFA state's NFA states. - qcur.clear(); - for ip in self.state(si).inst_ptrs() { - qcur.insert(ip); - } - - // Before inspecting the current byte, we may need to also inspect - // whether the position immediately preceding the current byte - // satisfies the empty assertions found in the current state. - // - // We only need to do this step if there are any empty assertions in - // the current state. - let is_word_last = self.state(si).flags().is_word(); - let is_word = b.is_ascii_word(); - if self.state(si).flags().has_empty() { - // Compute the flags immediately preceding the current byte. - // This means we only care about the "end" or "end line" flags. - // (The "start" flags are computed immediately following the - // current byte and are handled below.) - let mut flags = EmptyFlags::default(); - if b.is_eof() { - flags.end = true; - flags.end_line = true; - } else if b.as_byte().map_or(false, |b| b == b'\n') { - flags.end_line = true; - } - if is_word_last == is_word { - flags.not_word_boundary = true; - } else { - flags.word_boundary = true; - } - // Now follow epsilon transitions from every NFA state, but make - // sure we only follow transitions that satisfy our flags. - qnext.clear(); - for &ip in &*qcur { - self.follow_epsilons(usize_to_u32(ip), qnext, flags); - } - mem::swap(qcur, qnext); - } - - // Now we set flags for immediately after the current byte. Since start - // states are processed separately, and are the only states that can - // have the StartText flag set, we therefore only need to worry about - // the StartLine flag here. - // - // We do also keep track of whether this DFA state contains a NFA state - // that is a matching state. This is precisely how we delay the DFA - // matching by one byte in order to process the special EOF sentinel - // byte. Namely, if this DFA state containing a matching NFA state, - // then it is the *next* DFA state that is marked as a match. - let mut empty_flags = EmptyFlags::default(); - let mut state_flags = StateFlags::default(); - empty_flags.start_line = b.as_byte().map_or(false, |b| b == b'\n'); - if b.is_ascii_word() { - state_flags.set_word(); - } - // Now follow all epsilon transitions again, but only after consuming - // the current byte. - qnext.clear(); - for &ip in &*qcur { - match self.prog[ip as usize] { - // These states never happen in a byte-based program. - Char(_) | Ranges(_) => unreachable!(), - // These states are handled when following epsilon transitions. - Save(_) | Split(_) | EmptyLook(_) => {} - Match(_) => { - state_flags.set_match(); - if !self.continue_past_first_match() { - break; - } else if self.prog.matches.len() > 1 - && !qnext.contains(ip as usize) - { - // If we are continuing on to find other matches, - // then keep a record of the match states we've seen. - qnext.insert(ip); - } - } - Bytes(ref inst) => { - if b.as_byte().map_or(false, |b| inst.matches(b)) { - self.follow_epsilons( - inst.goto as InstPtr, - qnext, - empty_flags, - ); - } - } - } - } - - let cache = if b.is_eof() && self.prog.matches.len() > 1 { - // If we're processing the last byte of the input and we're - // matching a regex set, then make the next state contain the - // previous states transitions. We do this so that the main - // matching loop can extract all of the match instructions. - mem::swap(qcur, qnext); - // And don't cache this state because it's totally bunk. - false - } else { - true - }; - - // We've now built up the set of NFA states that ought to comprise the - // next DFA state, so try to find it in the cache, and if it doesn't - // exist, cache it. - // - // N.B. We pass `&mut si` here because the cache may clear itself if - // it has gotten too full. When that happens, the location of the - // current state may change. - let mut next = - match self.cached_state(qnext, state_flags, Some(&mut si)) { - None => return None, - Some(next) => next, - }; - if (self.start & !STATE_START) == next { - // Start states can never be match states since all matches are - // delayed by one byte. - debug_assert!(!self.state(next).flags().is_match()); - next = self.start_ptr(next); - } - if next <= STATE_MAX && self.state(next).flags().is_match() { - next |= STATE_MATCH; - } - debug_assert!(next != STATE_UNKNOWN); - // And now store our state in the current state's next list. - if cache { - let cls = self.byte_class(b); - self.cache.trans.set_next(si, cls, next); - } - Some(next) - } - - /// Follows the epsilon transitions starting at (and including) `ip`. The - /// resulting states are inserted into the ordered set `q`. - /// - /// Conditional epsilon transitions (i.e., empty width assertions) are only - /// followed if they are satisfied by the given flags, which should - /// represent the flags set at the current location in the input. - /// - /// If the current location corresponds to the empty string, then only the - /// end line and/or end text flags may be set. If the current location - /// corresponds to a real byte in the input, then only the start line - /// and/or start text flags may be set. - /// - /// As an exception to the above, when finding the initial state, any of - /// the above flags may be set: - /// - /// If matching starts at the beginning of the input, then start text and - /// start line should be set. If the input is empty, then end text and end - /// line should also be set. - /// - /// If matching starts after the beginning of the input, then only start - /// line should be set if the preceding byte is `\n`. End line should never - /// be set in this case. (Even if the following byte is a `\n`, it will - /// be handled in a subsequent DFA state.) - fn follow_epsilons( - &mut self, - ip: InstPtr, - q: &mut SparseSet, - flags: EmptyFlags, - ) { - use crate::prog::EmptyLook::*; - use crate::prog::Inst::*; - - // We need to traverse the NFA to follow epsilon transitions, so avoid - // recursion with an explicit stack. - self.cache.stack.push(ip); - while let Some(mut ip) = self.cache.stack.pop() { - // Try to munch through as many states as possible without - // pushes/pops to the stack. - loop { - // Don't visit states we've already added. - if q.contains(ip as usize) { - break; - } - q.insert(ip as usize); - match self.prog[ip as usize] { - Char(_) | Ranges(_) => unreachable!(), - Match(_) | Bytes(_) => { - break; - } - EmptyLook(ref inst) => { - // Only follow empty assertion states if our flags - // satisfy the assertion. - match inst.look { - StartLine if flags.start_line => { - ip = inst.goto as InstPtr; - } - EndLine if flags.end_line => { - ip = inst.goto as InstPtr; - } - StartText if flags.start => { - ip = inst.goto as InstPtr; - } - EndText if flags.end => { - ip = inst.goto as InstPtr; - } - WordBoundaryAscii if flags.word_boundary => { - ip = inst.goto as InstPtr; - } - NotWordBoundaryAscii - if flags.not_word_boundary => - { - ip = inst.goto as InstPtr; - } - WordBoundary if flags.word_boundary => { - ip = inst.goto as InstPtr; - } - NotWordBoundary if flags.not_word_boundary => { - ip = inst.goto as InstPtr; - } - StartLine | EndLine | StartText | EndText - | WordBoundaryAscii | NotWordBoundaryAscii - | WordBoundary | NotWordBoundary => { - break; - } - } - } - Save(ref inst) => { - ip = inst.goto as InstPtr; - } - Split(ref inst) => { - self.cache.stack.push(inst.goto2 as InstPtr); - ip = inst.goto1 as InstPtr; - } - } - } - } - } - - /// Find a previously computed state matching the given set of instructions - /// and is_match bool. - /// - /// The given set of instructions should represent a single state in the - /// NFA along with all states reachable without consuming any input. - /// - /// The is_match bool should be true if and only if the preceding DFA state - /// contains an NFA matching state. The cached state produced here will - /// then signify a match. (This enables us to delay a match by one byte, - /// in order to account for the EOF sentinel byte.) - /// - /// If the cache is full, then it is wiped before caching a new state. - /// - /// The current state should be specified if it exists, since it will need - /// to be preserved if the cache clears itself. (Start states are - /// always saved, so they should not be passed here.) It takes a mutable - /// pointer to the index because if the cache is cleared, the state's - /// location may change. - fn cached_state( - &mut self, - q: &SparseSet, - mut state_flags: StateFlags, - current_state: Option<&mut StatePtr>, - ) -> Option { - // If we couldn't come up with a non-empty key to represent this state, - // then it is dead and can never lead to a match. - // - // Note that inst_flags represent the set of empty width assertions - // in q. We use this as an optimization in exec_byte to determine when - // we should follow epsilon transitions at the empty string preceding - // the current byte. - let key = match self.cached_state_key(q, &mut state_flags) { - None => return Some(STATE_DEAD), - Some(v) => v, - }; - // In the cache? Cool. Done. - if let Some(si) = self.cache.compiled.get_ptr(&key) { - return Some(si); - } - // If the cache has gotten too big, wipe it. - if self.approximate_size() > self.prog.dfa_size_limit - && !self.clear_cache_and_save(current_state) - { - // Ooops. DFA is giving up. - return None; - } - // Allocate room for our state and add it. - self.add_state(key) - } - - /// Produces a key suitable for describing a state in the DFA cache. - /// - /// The key invariant here is that equivalent keys are produced for any two - /// sets of ordered NFA states (and toggling of whether the previous NFA - /// states contain a match state) that do not discriminate a match for any - /// input. - /// - /// Specifically, q should be an ordered set of NFA states and is_match - /// should be true if and only if the previous NFA states contained a match - /// state. - fn cached_state_key( - &mut self, - q: &SparseSet, - state_flags: &mut StateFlags, - ) -> Option { - use crate::prog::Inst::*; - - // We need to build up enough information to recognize pre-built states - // in the DFA. Generally speaking, this includes every instruction - // except for those which are purely epsilon transitions, e.g., the - // Save and Split instructions. - // - // Empty width assertions are also epsilon transitions, but since they - // are conditional, we need to make them part of a state's key in the - // cache. - - let mut insts = - mem::replace(&mut self.cache.insts_scratch_space, vec![]); - insts.clear(); - // Reserve 1 byte for flags. - insts.push(0); - - let mut prev = 0; - for &ip in q { - let ip = usize_to_u32(ip); - match self.prog[ip as usize] { - Char(_) | Ranges(_) => unreachable!(), - Save(_) | Split(_) => {} - Bytes(_) => push_inst_ptr(&mut insts, &mut prev, ip), - EmptyLook(_) => { - state_flags.set_empty(); - push_inst_ptr(&mut insts, &mut prev, ip) - } - Match(_) => { - push_inst_ptr(&mut insts, &mut prev, ip); - if !self.continue_past_first_match() { - break; - } - } - } - } - // If we couldn't transition to any other instructions and we didn't - // see a match when expanding NFA states previously, then this is a - // dead state and no amount of additional input can transition out - // of this state. - let opt_state = if insts.len() == 1 && !state_flags.is_match() { - None - } else { - let StateFlags(f) = *state_flags; - insts[0] = f; - Some(State { data: Arc::from(&*insts) }) - }; - self.cache.insts_scratch_space = insts; - opt_state - } - - /// Clears the cache, but saves and restores current_state if it is not - /// none. - /// - /// The current state must be provided here in case its location in the - /// cache changes. - /// - /// This returns false if the cache is not cleared and the DFA should - /// give up. - fn clear_cache_and_save( - &mut self, - current_state: Option<&mut StatePtr>, - ) -> bool { - if self.cache.compiled.is_empty() { - // Nothing to clear... - return true; - } - match current_state { - None => self.clear_cache(), - Some(si) => { - let cur = self.state(*si).clone(); - if !self.clear_cache() { - return false; - } - // The unwrap is OK because we just cleared the cache and - // therefore know that the next state pointer won't exceed - // STATE_MAX. - *si = self.restore_state(cur).unwrap(); - true - } - } - } - - /// Wipes the state cache, but saves and restores the current start state. - /// - /// This returns false if the cache is not cleared and the DFA should - /// give up. - fn clear_cache(&mut self) -> bool { - // Bail out of the DFA if we're moving too "slowly." - // A heuristic from RE2: assume the DFA is too slow if it is processing - // 10 or fewer bytes per state. - // Additionally, we permit the cache to be flushed a few times before - // caling it quits. - let nstates = self.cache.compiled.len(); - if self.cache.flush_count >= 3 - && self.at >= self.last_cache_flush - && (self.at - self.last_cache_flush) <= 10 * nstates - { - return false; - } - // Update statistics tracking cache flushes. - self.last_cache_flush = self.at; - self.cache.flush_count += 1; - - // OK, actually flush the cache. - let start = self.state(self.start & !STATE_START).clone(); - let last_match = if self.last_match_si <= STATE_MAX { - Some(self.state(self.last_match_si).clone()) - } else { - None - }; - self.cache.reset_size(); - self.cache.trans.clear(); - self.cache.compiled.clear(); - for s in &mut self.cache.start_states { - *s = STATE_UNKNOWN; - } - // The unwraps are OK because we just cleared the cache and therefore - // know that the next state pointer won't exceed STATE_MAX. - let start_ptr = self.restore_state(start).unwrap(); - self.start = self.start_ptr(start_ptr); - if let Some(last_match) = last_match { - self.last_match_si = self.restore_state(last_match).unwrap(); - } - true - } - - /// Restores the given state back into the cache, and returns a pointer - /// to it. - fn restore_state(&mut self, state: State) -> Option { - // If we've already stored this state, just return a pointer to it. - // None will be the wiser. - if let Some(si) = self.cache.compiled.get_ptr(&state) { - return Some(si); - } - self.add_state(state) - } - - /// Returns the next state given the current state si and current byte - /// b. {qcur,qnext} are used as scratch space for storing ordered NFA - /// states. - /// - /// This tries to fetch the next state from the cache, but if that fails, - /// it computes the next state, caches it and returns a pointer to it. - /// - /// The pointer can be to a real state, or it can be STATE_DEAD. - /// STATE_UNKNOWN cannot be returned. - /// - /// None is returned if a new state could not be allocated (i.e., the DFA - /// ran out of space and thinks it's running too slowly). - fn next_state( - &mut self, - qcur: &mut SparseSet, - qnext: &mut SparseSet, - si: StatePtr, - b: Byte, - ) -> Option { - if si == STATE_DEAD { - return Some(STATE_DEAD); - } - match self.cache.trans.next(si, self.byte_class(b)) { - STATE_UNKNOWN => self.exec_byte(qcur, qnext, si, b), - STATE_QUIT => None, - nsi => Some(nsi), - } - } - - /// Computes and returns the start state, where searching begins at - /// position `at` in `text`. If the state has already been computed, - /// then it is pulled from the cache. If the state hasn't been cached, - /// then it is computed, cached and a pointer to it is returned. - /// - /// This may return STATE_DEAD but never STATE_UNKNOWN. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn start_state( - &mut self, - q: &mut SparseSet, - empty_flags: EmptyFlags, - state_flags: StateFlags, - ) -> Option { - // Compute an index into our cache of start states based on the set - // of empty/state flags set at the current position in the input. We - // don't use every flag since not all flags matter. For example, since - // matches are delayed by one byte, start states can never be match - // states. - let flagi = { - (((empty_flags.start as u8) << 0) - | ((empty_flags.end as u8) << 1) - | ((empty_flags.start_line as u8) << 2) - | ((empty_flags.end_line as u8) << 3) - | ((empty_flags.word_boundary as u8) << 4) - | ((empty_flags.not_word_boundary as u8) << 5) - | ((state_flags.is_word() as u8) << 6)) as usize - }; - match self.cache.start_states[flagi] { - STATE_UNKNOWN => {} - si => return Some(si), - } - q.clear(); - let start = usize_to_u32(self.prog.start); - self.follow_epsilons(start, q, empty_flags); - // Start states can never be match states because we delay every match - // by one byte. Given an empty string and an empty match, the match - // won't actually occur until the DFA processes the special EOF - // sentinel byte. - let sp = match self.cached_state(q, state_flags, None) { - None => return None, - Some(sp) => self.start_ptr(sp), - }; - self.cache.start_states[flagi] = sp; - Some(sp) - } - - /// Computes the set of starting flags for the given position in text. - /// - /// This should only be used when executing the DFA forwards over the - /// input. - fn start_flags(&self, text: &[u8], at: usize) -> (EmptyFlags, StateFlags) { - let mut empty_flags = EmptyFlags::default(); - let mut state_flags = StateFlags::default(); - empty_flags.start = at == 0; - empty_flags.end = text.is_empty(); - empty_flags.start_line = at == 0 || text[at - 1] == b'\n'; - empty_flags.end_line = text.is_empty(); - - let is_word_last = at > 0 && Byte::byte(text[at - 1]).is_ascii_word(); - let is_word = at < text.len() && Byte::byte(text[at]).is_ascii_word(); - if is_word_last { - state_flags.set_word(); - } - if is_word == is_word_last { - empty_flags.not_word_boundary = true; - } else { - empty_flags.word_boundary = true; - } - (empty_flags, state_flags) - } - - /// Computes the set of starting flags for the given position in text. - /// - /// This should only be used when executing the DFA in reverse over the - /// input. - fn start_flags_reverse( - &self, - text: &[u8], - at: usize, - ) -> (EmptyFlags, StateFlags) { - let mut empty_flags = EmptyFlags::default(); - let mut state_flags = StateFlags::default(); - empty_flags.start = at == text.len(); - empty_flags.end = text.is_empty(); - empty_flags.start_line = at == text.len() || text[at] == b'\n'; - empty_flags.end_line = text.is_empty(); - - let is_word_last = - at < text.len() && Byte::byte(text[at]).is_ascii_word(); - let is_word = at > 0 && Byte::byte(text[at - 1]).is_ascii_word(); - if is_word_last { - state_flags.set_word(); - } - if is_word == is_word_last { - empty_flags.not_word_boundary = true; - } else { - empty_flags.word_boundary = true; - } - (empty_flags, state_flags) - } - - /// Returns a reference to a State given a pointer to it. - fn state(&self, si: StatePtr) -> &State { - self.cache.compiled.get_state(si).unwrap() - } - - /// Adds the given state to the DFA. - /// - /// This allocates room for transitions out of this state in - /// self.cache.trans. The transitions can be set with the returned - /// StatePtr. - /// - /// If None is returned, then the state limit was reached and the DFA - /// should quit. - fn add_state(&mut self, state: State) -> Option { - // This will fail if the next state pointer exceeds STATE_PTR. In - // practice, the cache limit will prevent us from ever getting here, - // but maybe callers will set the cache size to something ridiculous... - let si = match self.cache.trans.add() { - None => return None, - Some(si) => si, - }; - // If the program has a Unicode word boundary, then set any transitions - // for non-ASCII bytes to STATE_QUIT. If the DFA stumbles over such a - // transition, then it will quit and an alternative matching engine - // will take over. - if self.prog.has_unicode_word_boundary { - for b in 128..256 { - let cls = self.byte_class(Byte::byte(b as u8)); - self.cache.trans.set_next(si, cls, STATE_QUIT); - } - } - // Finally, put our actual state on to our heap of states and index it - // so we can find it later. - self.cache.size += self.cache.trans.state_heap_size() - + state.data.len() - + (2 * mem::size_of::()) - + mem::size_of::(); - self.cache.compiled.insert(state, si); - // Transition table and set of states and map should all be in sync. - debug_assert!( - self.cache.compiled.len() == self.cache.trans.num_states() - ); - Some(si) - } - - /// Quickly finds the next occurrence of any literal prefixes in the regex. - /// If there are no literal prefixes, then the current position is - /// returned. If there are literal prefixes and one could not be found, - /// then None is returned. - /// - /// This should only be called when the DFA is in a start state. - fn prefix_at(&self, text: &[u8], at: usize) -> Option { - self.prog.prefixes.find(&text[at..]).map(|(s, _)| at + s) - } - - /// Returns the number of byte classes required to discriminate transitions - /// in each state. - /// - /// invariant: num_byte_classes() == len(State.next) - fn num_byte_classes(&self) -> usize { - // We add 1 to account for the special EOF byte. - (self.prog.byte_classes[255] as usize + 1) + 1 - } - - /// Given an input byte or the special EOF sentinel, return its - /// corresponding byte class. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn byte_class(&self, b: Byte) -> usize { - match b.as_byte() { - None => self.num_byte_classes() - 1, - Some(b) => self.u8_class(b), - } - } - - /// Like byte_class, but explicitly for u8s. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn u8_class(&self, b: u8) -> usize { - self.prog.byte_classes[b as usize] as usize - } - - /// Returns true if the DFA should continue searching past the first match. - /// - /// Leftmost first semantics in the DFA are preserved by not following NFA - /// transitions after the first match is seen. - /// - /// On occasion, we want to avoid leftmost first semantics to find either - /// the longest match (for reverse search) or all possible matches (for - /// regex sets). - fn continue_past_first_match(&self) -> bool { - self.prog.is_reverse || self.prog.matches.len() > 1 - } - - /// Returns true if there is a prefix we can quickly search for. - fn has_prefix(&self) -> bool { - !self.prog.is_reverse - && !self.prog.prefixes.is_empty() - && !self.prog.is_anchored_start - } - - /// Sets the STATE_START bit in the given state pointer if and only if - /// we have a prefix to scan for. - /// - /// If there's no prefix, then it's a waste to treat the start state - /// specially. - fn start_ptr(&self, si: StatePtr) -> StatePtr { - if self.has_prefix() { - si | STATE_START - } else { - si - } - } - - /// Approximate size returns the approximate heap space currently used by - /// the DFA. It is used to determine whether the DFA's state cache needs to - /// be wiped. Namely, it is possible that for certain regexes on certain - /// inputs, a new state could be created for every byte of input. (This is - /// bad for memory use, so we bound it with a cache.) - fn approximate_size(&self) -> usize { - self.cache.size - } -} - -/// An abstraction for representing a map of states. The map supports two -/// different ways of state lookup. One is fast constant time access via a -/// state pointer. The other is a hashmap lookup based on the DFA's -/// constituent NFA states. -/// -/// A DFA state internally uses an Arc such that we only need to store the -/// set of NFA states on the heap once, even though we support looking up -/// states by two different means. A more natural way to express this might -/// use raw pointers, but an Arc is safe and effectively achieves the same -/// thing. -#[derive(Debug)] -struct StateMap { - /// The keys are not actually static but rely on always pointing to a - /// buffer in `states` which will never be moved except when clearing - /// the map or on drop, in which case the keys of this map will be - /// removed before - map: HashMap, - /// Our set of states. Note that `StatePtr / num_byte_classes` indexes - /// this Vec rather than just a `StatePtr`. - states: Vec, - /// The number of byte classes in the DFA. Used to index `states`. - num_byte_classes: usize, -} - -impl StateMap { - fn new(num_byte_classes: usize) -> StateMap { - StateMap { map: HashMap::new(), states: vec![], num_byte_classes } - } - - fn len(&self) -> usize { - self.states.len() - } - - fn is_empty(&self) -> bool { - self.states.is_empty() - } - - fn get_ptr(&self, state: &State) -> Option { - self.map.get(state).cloned() - } - - fn get_state(&self, si: StatePtr) -> Option<&State> { - self.states.get(si as usize / self.num_byte_classes) - } - - fn insert(&mut self, state: State, si: StatePtr) { - self.map.insert(state.clone(), si); - self.states.push(state); - } - - fn clear(&mut self) { - self.map.clear(); - self.states.clear(); - } -} - -impl Transitions { - /// Create a new transition table. - /// - /// The number of byte classes corresponds to the stride. Every state will - /// have `num_byte_classes` slots for transitions. - fn new(num_byte_classes: usize) -> Transitions { - Transitions { table: vec![], num_byte_classes } - } - - /// Returns the total number of states currently in this table. - fn num_states(&self) -> usize { - self.table.len() / self.num_byte_classes - } - - /// Allocates room for one additional state and returns a pointer to it. - /// - /// If there's no more room, None is returned. - fn add(&mut self) -> Option { - let si = self.table.len(); - if si > STATE_MAX as usize { - return None; - } - self.table.extend(repeat(STATE_UNKNOWN).take(self.num_byte_classes)); - Some(usize_to_u32(si)) - } - - /// Clears the table of all states. - fn clear(&mut self) { - self.table.clear(); - } - - /// Sets the transition from (si, cls) to next. - fn set_next(&mut self, si: StatePtr, cls: usize, next: StatePtr) { - self.table[si as usize + cls] = next; - } - - /// Returns the transition corresponding to (si, cls). - fn next(&self, si: StatePtr, cls: usize) -> StatePtr { - self.table[si as usize + cls] - } - - /// The heap size, in bytes, of a single state in the transition table. - fn state_heap_size(&self) -> usize { - self.num_byte_classes * mem::size_of::() - } - - /// Like `next`, but uses unchecked access and is therefore not safe. - unsafe fn next_unchecked(&self, si: StatePtr, cls: usize) -> StatePtr { - debug_assert!((si as usize) < self.table.len()); - debug_assert!(cls < self.num_byte_classes); - *self.table.get_unchecked(si as usize + cls) - } -} - -impl StateFlags { - fn is_match(&self) -> bool { - self.0 & 0b0000_0001 > 0 - } - - fn set_match(&mut self) { - self.0 |= 0b0000_0001; - } - - fn is_word(&self) -> bool { - self.0 & 0b0000_0010 > 0 - } - - fn set_word(&mut self) { - self.0 |= 0b0000_0010; - } - - fn has_empty(&self) -> bool { - self.0 & 0b0000_0100 > 0 - } - - fn set_empty(&mut self) { - self.0 |= 0b0000_0100; - } -} - -impl Byte { - fn byte(b: u8) -> Self { - Byte(b as u16) - } - fn eof() -> Self { - Byte(256) - } - fn is_eof(&self) -> bool { - self.0 == 256 - } - - fn is_ascii_word(&self) -> bool { - let b = match self.as_byte() { - None => return false, - Some(b) => b, - }; - match b { - b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'_' => true, - _ => false, - } - } - - fn as_byte(&self) -> Option { - if self.is_eof() { - None - } else { - Some(self.0 as u8) - } - } -} - -impl fmt::Debug for State { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let ips: Vec = self.inst_ptrs().collect(); - f.debug_struct("State") - .field("flags", &self.flags()) - .field("insts", &ips) - .finish() - } -} - -impl fmt::Debug for Transitions { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut fmtd = f.debug_map(); - for si in 0..self.num_states() { - let s = si * self.num_byte_classes; - let e = s + self.num_byte_classes; - fmtd.entry(&si.to_string(), &TransitionsRow(&self.table[s..e])); - } - fmtd.finish() - } -} - -struct TransitionsRow<'a>(&'a [StatePtr]); - -impl<'a> fmt::Debug for TransitionsRow<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut fmtd = f.debug_map(); - for (b, si) in self.0.iter().enumerate() { - match *si { - STATE_UNKNOWN => {} - STATE_DEAD => { - fmtd.entry(&vb(b as usize), &"DEAD"); - } - si => { - fmtd.entry(&vb(b as usize), &si.to_string()); - } - } - } - fmtd.finish() - } -} - -impl fmt::Debug for StateFlags { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("StateFlags") - .field("is_match", &self.is_match()) - .field("is_word", &self.is_word()) - .field("has_empty", &self.has_empty()) - .finish() - } -} - -/// Helper function for formatting a byte as a nice-to-read escaped string. -fn vb(b: usize) -> String { - use std::ascii::escape_default; - - if b > ::std::u8::MAX as usize { - "EOF".to_owned() - } else { - let escaped = escape_default(b as u8).collect::>(); - String::from_utf8_lossy(&escaped).into_owned() - } -} - -fn usize_to_u32(n: usize) -> u32 { - if (n as u64) > (::std::u32::MAX as u64) { - panic!("BUG: {} is too big to fit into u32", n) - } - n as u32 -} - -#[allow(dead_code)] // useful for debugging -fn show_state_ptr(si: StatePtr) -> String { - let mut s = format!("{:?}", si & STATE_MAX); - if si == STATE_UNKNOWN { - s = format!("{} (unknown)", s); - } - if si == STATE_DEAD { - s = format!("{} (dead)", s); - } - if si == STATE_QUIT { - s = format!("{} (quit)", s); - } - if si & STATE_START > 0 { - s = format!("{} (start)", s); - } - if si & STATE_MATCH > 0 { - s = format!("{} (match)", s); - } - s -} - -/// https://developers.google.com/protocol-buffers/docs/encoding#varints -fn write_vari32(data: &mut Vec, n: i32) { - let mut un = (n as u32) << 1; - if n < 0 { - un = !un; - } - write_varu32(data, un) -} - -/// https://developers.google.com/protocol-buffers/docs/encoding#varints -fn read_vari32(data: &[u8]) -> (i32, usize) { - let (un, i) = read_varu32(data); - let mut n = (un >> 1) as i32; - if un & 1 != 0 { - n = !n; - } - (n, i) -} - -/// https://developers.google.com/protocol-buffers/docs/encoding#varints -fn write_varu32(data: &mut Vec, mut n: u32) { - while n >= 0b1000_0000 { - data.push((n as u8) | 0b1000_0000); - n >>= 7; - } - data.push(n as u8); -} - -/// https://developers.google.com/protocol-buffers/docs/encoding#varints -fn read_varu32(data: &[u8]) -> (u32, usize) { - let mut n: u32 = 0; - let mut shift: u32 = 0; - for (i, &b) in data.iter().enumerate() { - if b < 0b1000_0000 { - return (n | ((b as u32) << shift), i + 1); - } - n |= ((b as u32) & 0b0111_1111) << shift; - shift += 7; - } - (0, 0) -} - -#[cfg(test)] -mod tests { - - use super::{ - push_inst_ptr, read_vari32, read_varu32, write_vari32, write_varu32, - State, StateFlags, - }; - use quickcheck::{quickcheck, Gen, QuickCheck}; - use std::sync::Arc; - - #[test] - fn prop_state_encode_decode() { - fn p(mut ips: Vec, flags: u8) -> bool { - // It looks like our encoding scheme can't handle instruction - // pointers at or above 2**31. We should fix that, but it seems - // unlikely to occur in real code due to the amount of memory - // required for such a state machine. So for now, we just clamp - // our test data. - for ip in &mut ips { - if *ip >= 1 << 31 { - *ip = (1 << 31) - 1; - } - } - let mut data = vec![flags]; - let mut prev = 0; - for &ip in ips.iter() { - push_inst_ptr(&mut data, &mut prev, ip); - } - let state = State { data: Arc::from(&data[..]) }; - - let expected: Vec = - ips.into_iter().map(|ip| ip as usize).collect(); - let got: Vec = state.inst_ptrs().collect(); - expected == got && state.flags() == StateFlags(flags) - } - QuickCheck::new() - .gen(Gen::new(10_000)) - .quickcheck(p as fn(Vec, u8) -> bool); - } - - #[test] - fn prop_read_write_u32() { - fn p(n: u32) -> bool { - let mut buf = vec![]; - write_varu32(&mut buf, n); - let (got, nread) = read_varu32(&buf); - nread == buf.len() && got == n - } - quickcheck(p as fn(u32) -> bool); - } - - #[test] - fn prop_read_write_i32() { - fn p(n: i32) -> bool { - let mut buf = vec![]; - write_vari32(&mut buf, n); - let (got, nread) = read_vari32(&buf); - nread == buf.len() && got == n - } - quickcheck(p as fn(i32) -> bool); - } -} diff --git a/src/error.rs b/src/error.rs index 6c341f604b..13e32d56d1 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,7 +1,7 @@ -use std::fmt; -use std::iter::repeat; +use regex_automata::meta; /// An error that occurred during parsing or compiling a regular expression. +#[non_exhaustive] #[derive(Clone, PartialEq)] pub enum Error { /// A syntax error. @@ -27,29 +27,43 @@ pub enum Error { /// approaches may be appropriate. Instead, you'll have to determine just /// how big of a regex you want to allow. CompiledTooBig(usize), - /// Hints that destructuring should not be exhaustive. - /// - /// This enum may grow additional variants, so this makes sure clients - /// don't count on exhaustive matching. (Otherwise, adding a new variant - /// could break existing code.) - #[doc(hidden)] - __Nonexhaustive, } -impl ::std::error::Error for Error { +impl Error { + pub(crate) fn from_meta_build_error(err: meta::BuildError) -> Error { + if let Some(size_limit) = err.size_limit() { + Error::CompiledTooBig(size_limit) + } else if let Some(ref err) = err.syntax_error() { + Error::Syntax(err.to_string()) + } else { + // This is a little suspect. Technically there are more ways for + // a meta regex to fail to build other than "exceeded size limit" + // and "syntax error." For example, if there are too many states + // or even too many patterns. But in practice this is probably + // good enough. The worst thing that happens is that Error::Syntax + // represents an error that isn't technically a syntax error, but + // the actual message will still be shown. So... it's not too bad. + // + // We really should have made the Error type in the regex crate + // completely opaque. Rookie mistake. + Error::Syntax(err.to_string()) + } + } +} + +impl std::error::Error for Error { // TODO: Remove this method entirely on the next breaking semver release. #[allow(deprecated)] fn description(&self) -> &str { match *self { Error::Syntax(ref err) => err, Error::CompiledTooBig(_) => "compiled program too big", - Error::__Nonexhaustive => unreachable!(), } } } -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { Error::Syntax(ref err) => err.fmt(f), Error::CompiledTooBig(limit) => write!( @@ -57,7 +71,6 @@ impl fmt::Display for Error { "Compiled regex exceeds size limit of {} bytes.", limit ), - Error::__Nonexhaustive => unreachable!(), } } } @@ -66,11 +79,11 @@ impl fmt::Display for Error { // errors when people use `Regex::new(...).unwrap()`. It's a little weird, // but the `Syntax` variant is already storing a `String` anyway, so we might // as well format it nicely. -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl std::fmt::Debug for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { Error::Syntax(ref err) => { - let hr: String = repeat('~').take(79).collect(); + let hr: String = core::iter::repeat('~').take(79).collect(); writeln!(f, "Syntax(")?; writeln!(f, "{}", hr)?; writeln!(f, "{}", err)?; @@ -81,9 +94,6 @@ impl fmt::Debug for Error { Error::CompiledTooBig(limit) => { f.debug_tuple("CompiledTooBig").field(&limit).finish() } - Error::__Nonexhaustive => { - f.debug_tuple("__Nonexhaustive").finish() - } } } } diff --git a/src/exec.rs b/src/exec.rs deleted file mode 100644 index c449b38cf3..0000000000 --- a/src/exec.rs +++ /dev/null @@ -1,1748 +0,0 @@ -use std::cell::RefCell; -use std::collections::HashMap; -use std::panic::AssertUnwindSafe; -use std::sync::Arc; - -#[cfg(feature = "perf-literal")] -use aho_corasick::{AhoCorasick, MatchKind}; -use regex_syntax::hir::literal; -use regex_syntax::hir::{Hir, Look}; -use regex_syntax::ParserBuilder; - -use crate::backtrack; -use crate::compile::Compiler; -#[cfg(feature = "perf-dfa")] -use crate::dfa; -use crate::error::Error; -use crate::input::{ByteInput, CharInput}; -use crate::literal::LiteralSearcher; -use crate::pikevm; -use crate::pool::{Pool, PoolGuard}; -use crate::prog::Program; -use crate::re_builder::RegexOptions; -use crate::re_bytes; -use crate::re_set; -use crate::re_trait::{Locations, RegularExpression, Slot}; -use crate::re_unicode; -use crate::utf8::next_utf8; - -/// `Exec` manages the execution of a regular expression. -/// -/// In particular, this manages the various compiled forms of a single regular -/// expression and the choice of which matching engine to use to execute a -/// regular expression. -#[derive(Debug)] -pub struct Exec { - /// All read only state. - ro: Arc, - /// A pool of reusable values for the various matching engines. - /// - /// Note that boxing this value is not strictly necessary, but it is an - /// easy way to ensure that T does not bloat the stack sized used by a pool - /// in the case where T is big. And this turns out to be the case at the - /// time of writing for regex's use of this pool. At the time of writing, - /// the size of a Regex on the stack is 856 bytes. Boxing this value - /// reduces that size to 16 bytes. - pool: Box>, -} - -/// `ExecNoSync` is like `Exec`, except it embeds a reference to a cache. This -/// means it is no longer Sync, but we can now avoid the overhead of -/// synchronization to fetch the cache. -#[derive(Debug)] -pub struct ExecNoSync<'c> { - /// All read only state. - ro: &'c Arc, - /// Caches for the various matching engines. - cache: PoolGuard<'c, ProgramCache>, -} - -/// `ExecNoSyncStr` is like `ExecNoSync`, but matches on &str instead of &[u8]. -#[derive(Debug)] -pub struct ExecNoSyncStr<'c>(ExecNoSync<'c>); - -/// `ExecReadOnly` comprises all read only state for a regex. Namely, all such -/// state is determined at compile time and never changes during search. -#[derive(Debug)] -struct ExecReadOnly { - /// The original regular expressions given by the caller to compile. - res: Vec, - /// A compiled program that is used in the NFA simulation and backtracking. - /// It can be byte-based or Unicode codepoint based. - /// - /// N.B. It is not possibly to make this byte-based from the public API. - /// It is only used for testing byte based programs in the NFA simulations. - nfa: Program, - /// A compiled byte based program for DFA execution. This is only used - /// if a DFA can be executed. (Currently, only word boundary assertions are - /// not supported.) Note that this program contains an embedded `.*?` - /// preceding the first capture group, unless the regex is anchored at the - /// beginning. - #[allow(dead_code)] - dfa: Program, - /// The same as above, except the program is reversed (and there is no - /// preceding `.*?`). This is used by the DFA to find the starting location - /// of matches. - #[allow(dead_code)] - dfa_reverse: Program, - /// A set of suffix literals extracted from the regex. - /// - /// Prefix literals are stored on the `Program`, since they are used inside - /// the matching engines. - #[allow(dead_code)] - suffixes: LiteralSearcher, - /// An Aho-Corasick automaton with leftmost-first match semantics. - /// - /// This is only set when the entire regex is a simple unanchored - /// alternation of literals. We could probably use it more circumstances, - /// but this is already hacky enough in this architecture. - /// - /// N.B. We use u32 as a state ID representation under the assumption that - /// if we were to exhaust the ID space, we probably would have long - /// surpassed the compilation size limit. - #[cfg(feature = "perf-literal")] - ac: Option, - /// match_type encodes as much upfront knowledge about how we're going to - /// execute a search as possible. - match_type: MatchType, -} - -/// Facilitates the construction of an executor by exposing various knobs -/// to control how a regex is executed and what kinds of resources it's -/// permitted to use. -// `ExecBuilder` is only public via the `internal` module, so avoid deriving -// `Debug`. -#[allow(missing_debug_implementations)] -pub struct ExecBuilder { - options: RegexOptions, - match_type: Option, - bytes: bool, - only_utf8: bool, -} - -/// Parsed represents a set of parsed regular expressions and their detected -/// literals. -struct Parsed { - exprs: Vec, - prefixes: literal::Seq, - suffixes: literal::Seq, - bytes: bool, -} - -impl ExecBuilder { - /// Create a regex execution builder. - /// - /// This uses default settings for everything except the regex itself, - /// which must be provided. Further knobs can be set by calling methods, - /// and then finally, `build` to actually create the executor. - pub fn new(re: &str) -> Self { - Self::new_many(&[re]) - } - - /// Like new, but compiles the union of the given regular expressions. - /// - /// Note that when compiling 2 or more regular expressions, capture groups - /// are completely unsupported. (This means both `find` and `captures` - /// won't work.) - pub fn new_many(res: I) -> Self - where - S: AsRef, - I: IntoIterator, - { - let mut opts = RegexOptions::default(); - opts.pats = res.into_iter().map(|s| s.as_ref().to_owned()).collect(); - Self::new_options(opts) - } - - /// Create a regex execution builder. - pub fn new_options(opts: RegexOptions) -> Self { - ExecBuilder { - options: opts, - match_type: None, - bytes: false, - only_utf8: true, - } - } - - /// Set the matching engine to be automatically determined. - /// - /// This is the default state and will apply whatever optimizations are - /// possible, such as running a DFA. - /// - /// This overrides whatever was previously set via the `nfa` or - /// `bounded_backtracking` methods. - pub fn automatic(mut self) -> Self { - self.match_type = None; - self - } - - /// Sets the matching engine to use the NFA algorithm no matter what - /// optimizations are possible. - /// - /// This overrides whatever was previously set via the `automatic` or - /// `bounded_backtracking` methods. - pub fn nfa(mut self) -> Self { - self.match_type = Some(MatchType::Nfa(MatchNfaType::PikeVM)); - self - } - - /// Sets the matching engine to use a bounded backtracking engine no - /// matter what optimizations are possible. - /// - /// One must use this with care, since the bounded backtracking engine - /// uses memory proportion to `len(regex) * len(text)`. - /// - /// This overrides whatever was previously set via the `automatic` or - /// `nfa` methods. - pub fn bounded_backtracking(mut self) -> Self { - self.match_type = Some(MatchType::Nfa(MatchNfaType::Backtrack)); - self - } - - /// Compiles byte based programs for use with the NFA matching engines. - /// - /// By default, the NFA engines match on Unicode scalar values. They can - /// be made to use byte based programs instead. In general, the byte based - /// programs are slower because of a less efficient encoding of character - /// classes. - /// - /// Note that this does not impact DFA matching engines, which always - /// execute on bytes. - pub fn bytes(mut self, yes: bool) -> Self { - self.bytes = yes; - self - } - - /// When disabled, the program compiled may match arbitrary bytes. - /// - /// When enabled (the default), all compiled programs exclusively match - /// valid UTF-8 bytes. - pub fn only_utf8(mut self, yes: bool) -> Self { - self.only_utf8 = yes; - self - } - - /// Set the Unicode flag. - pub fn unicode(mut self, yes: bool) -> Self { - self.options.unicode = yes; - self - } - - /// Parse the current set of patterns into their AST and extract literals. - fn parse(&self) -> Result { - let mut exprs = Vec::with_capacity(self.options.pats.len()); - let mut prefixes = Some(literal::Seq::empty()); - let mut suffixes = Some(literal::Seq::empty()); - let mut bytes = false; - let is_set = self.options.pats.len() > 1; - // If we're compiling a regex set and that set has any anchored - // expressions, then disable all literal optimizations. - for pat in &self.options.pats { - let mut parser = ParserBuilder::new() - .octal(self.options.octal) - .case_insensitive(self.options.case_insensitive) - .multi_line(self.options.multi_line) - .dot_matches_new_line(self.options.dot_matches_new_line) - .swap_greed(self.options.swap_greed) - .ignore_whitespace(self.options.ignore_whitespace) - .unicode(self.options.unicode) - .utf8(self.only_utf8) - .nest_limit(self.options.nest_limit) - .build(); - let expr = - parser.parse(pat).map_err(|e| Error::Syntax(e.to_string()))?; - let props = expr.properties(); - // This used to just check whether the HIR matched valid UTF-8 - // or not, but in regex-syntax 0.7, we changed our definition of - // "matches valid UTF-8" to exclude zero-width matches. And in - // particular, previously, we considered WordAsciiNegate (that - // is '(?-u:\B)') to be capable of matching invalid UTF-8. Our - // matcher engines were built under this assumption and fixing - // them is not worth it with the imminent plan to switch over to - // regex-automata. So for now, we retain the previous behavior by - // just explicitly treating the presence of a negated ASCII word - // boundary as forcing use to use a byte oriented automaton. - bytes = bytes - || !props.is_utf8() - || props.look_set().contains(Look::WordAsciiNegate); - - if cfg!(feature = "perf-literal") { - if !props.look_set_prefix().contains(Look::Start) - && props.look_set().contains(Look::Start) - { - // Partial anchors unfortunately make it hard to use - // prefixes, so disable them. - prefixes = None; - } else if is_set - && props.look_set_prefix_any().contains(Look::Start) - { - // Regex sets with anchors do not go well with literal - // optimizations. - prefixes = None; - } else if props.look_set_prefix_any().contains_word() { - // The new literal extractor ignores look-around while - // the old one refused to extract prefixes from regexes - // that began with a \b. These old creaky regex internals - // can't deal with it, so we drop it. - prefixes = None; - } else if props.look_set_prefix_any().contains(Look::StartLF) { - // Similar to the reasoning for word boundaries, this old - // regex engine can't handle literal prefixes with '(?m:^)' - // at the beginning of a regex. - prefixes = None; - } - - if !props.look_set_suffix().contains(Look::End) - && props.look_set().contains(Look::End) - { - // Partial anchors unfortunately make it hard to use - // suffixes, so disable them. - suffixes = None; - } else if is_set - && props.look_set_suffix_any().contains(Look::End) - { - // Regex sets with anchors do not go well with literal - // optimizations. - suffixes = None; - } else if props.look_set_suffix_any().contains_word() { - // See the prefix case for reasoning here. - suffixes = None; - } else if props.look_set_suffix_any().contains(Look::EndLF) { - // See the prefix case for reasoning here. - suffixes = None; - } - - let (mut pres, mut suffs) = - if prefixes.is_none() && suffixes.is_none() { - (literal::Seq::infinite(), literal::Seq::infinite()) - } else { - literal_analysis(&expr) - }; - // These old creaky regex internals can't handle cases where - // the literal sequences are exact but there are look-around - // assertions. So we make sure the sequences are inexact if - // there are look-around assertions anywhere. This forces the - // regex engines to run instead of assuming that a literal - // match implies an overall match. - if !props.look_set().is_empty() { - pres.make_inexact(); - suffs.make_inexact(); - } - prefixes = prefixes.and_then(|mut prefixes| { - prefixes.union(&mut pres); - Some(prefixes) - }); - suffixes = suffixes.and_then(|mut suffixes| { - suffixes.union(&mut suffs); - Some(suffixes) - }); - } - exprs.push(expr); - } - Ok(Parsed { - exprs, - prefixes: prefixes.unwrap_or_else(literal::Seq::empty), - suffixes: suffixes.unwrap_or_else(literal::Seq::empty), - bytes, - }) - } - - /// Build an executor that can run a regular expression. - pub fn build(self) -> Result { - // Special case when we have no patterns to compile. - // This can happen when compiling a regex set. - if self.options.pats.is_empty() { - let ro = Arc::new(ExecReadOnly { - res: vec![], - nfa: Program::new(), - dfa: Program::new(), - dfa_reverse: Program::new(), - suffixes: LiteralSearcher::empty(), - #[cfg(feature = "perf-literal")] - ac: None, - match_type: MatchType::Nothing, - }); - let pool = ExecReadOnly::new_pool(&ro); - return Ok(Exec { ro, pool }); - } - let parsed = self.parse()?; - let mut nfa = Compiler::new() - .size_limit(self.options.size_limit) - .bytes(self.bytes || parsed.bytes) - .only_utf8(self.only_utf8) - .compile(&parsed.exprs)?; - let mut dfa = Compiler::new() - .size_limit(self.options.size_limit) - .dfa(true) - .only_utf8(self.only_utf8) - .compile(&parsed.exprs)?; - let mut dfa_reverse = Compiler::new() - .size_limit(self.options.size_limit) - .dfa(true) - .only_utf8(self.only_utf8) - .reverse(true) - .compile(&parsed.exprs)?; - - #[cfg(feature = "perf-literal")] - let ac = self.build_aho_corasick(&parsed); - nfa.prefixes = LiteralSearcher::prefixes(parsed.prefixes); - dfa.prefixes = nfa.prefixes.clone(); - dfa.dfa_size_limit = self.options.dfa_size_limit; - dfa_reverse.dfa_size_limit = self.options.dfa_size_limit; - - let mut ro = ExecReadOnly { - res: self.options.pats, - nfa, - dfa, - dfa_reverse, - suffixes: LiteralSearcher::suffixes(parsed.suffixes), - #[cfg(feature = "perf-literal")] - ac, - match_type: MatchType::Nothing, - }; - ro.match_type = ro.choose_match_type(self.match_type); - - let ro = Arc::new(ro); - let pool = ExecReadOnly::new_pool(&ro); - Ok(Exec { ro, pool }) - } - - #[cfg(feature = "perf-literal")] - fn build_aho_corasick(&self, parsed: &Parsed) -> Option { - if parsed.exprs.len() != 1 { - return None; - } - let lits = match alternation_literals(&parsed.exprs[0]) { - None => return None, - Some(lits) => lits, - }; - // If we have a small number of literals, then let Teddy handle - // things (see literal/mod.rs). - if lits.len() <= 32 { - return None; - } - Some( - AhoCorasick::builder() - .match_kind(MatchKind::LeftmostFirst) - .build(&lits) - // This should never happen because we'd long exceed the - // compilation limit for regexes first. - .expect("AC automaton too big"), - ) - } -} - -impl<'c> RegularExpression for ExecNoSyncStr<'c> { - type Text = str; - - fn slots_len(&self) -> usize { - self.0.slots_len() - } - - fn next_after_empty(&self, text: &str, i: usize) -> usize { - next_utf8(text.as_bytes(), i) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn shortest_match_at(&self, text: &str, start: usize) -> Option { - self.0.shortest_match_at(text.as_bytes(), start) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_match_at(&self, text: &str, start: usize) -> bool { - self.0.is_match_at(text.as_bytes(), start) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn find_at(&self, text: &str, start: usize) -> Option<(usize, usize)> { - self.0.find_at(text.as_bytes(), start) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn captures_read_at( - &self, - locs: &mut Locations, - text: &str, - start: usize, - ) -> Option<(usize, usize)> { - self.0.captures_read_at(locs, text.as_bytes(), start) - } -} - -impl<'c> RegularExpression for ExecNoSync<'c> { - type Text = [u8]; - - /// Returns the number of capture slots in the regular expression. (There - /// are two slots for every capture group, corresponding to possibly empty - /// start and end locations of the capture.) - fn slots_len(&self) -> usize { - self.ro.nfa.captures.len() * 2 - } - - fn next_after_empty(&self, _text: &[u8], i: usize) -> usize { - i + 1 - } - - /// Returns the end of a match location, possibly occurring before the - /// end location of the correct leftmost-first match. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn shortest_match_at(&self, text: &[u8], start: usize) -> Option { - if !self.is_anchor_end_match(text) { - return None; - } - match self.ro.match_type { - #[cfg(feature = "perf-literal")] - MatchType::Literal(ty) => { - self.find_literals(ty, text, start).map(|(_, e)| e) - } - #[cfg(feature = "perf-dfa")] - MatchType::Dfa | MatchType::DfaMany => { - match self.shortest_dfa(text, start) { - dfa::Result::Match(end) => Some(end), - dfa::Result::NoMatch(_) => None, - dfa::Result::Quit => self.shortest_nfa(text, start), - } - } - #[cfg(feature = "perf-dfa")] - MatchType::DfaAnchoredReverse => { - match dfa::Fsm::reverse( - &self.ro.dfa_reverse, - self.cache.value(), - true, - &text[start..], - text.len() - start, - ) { - dfa::Result::Match(_) => Some(text.len()), - dfa::Result::NoMatch(_) => None, - dfa::Result::Quit => self.shortest_nfa(text, start), - } - } - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - MatchType::DfaSuffix => { - match self.shortest_dfa_reverse_suffix(text, start) { - dfa::Result::Match(e) => Some(e), - dfa::Result::NoMatch(_) => None, - dfa::Result::Quit => self.shortest_nfa(text, start), - } - } - MatchType::Nfa(ty) => self.shortest_nfa_type(ty, text, start), - MatchType::Nothing => None, - } - } - - /// Returns true if and only if the regex matches text. - /// - /// For single regular expressions, this is equivalent to calling - /// shortest_match(...).is_some(). - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_match_at(&self, text: &[u8], start: usize) -> bool { - if !self.is_anchor_end_match(text) { - return false; - } - // We need to do this dance because shortest_match relies on the NFA - // filling in captures[1], but a RegexSet has no captures. In other - // words, a RegexSet can't (currently) use shortest_match. ---AG - match self.ro.match_type { - #[cfg(feature = "perf-literal")] - MatchType::Literal(ty) => { - self.find_literals(ty, text, start).is_some() - } - #[cfg(feature = "perf-dfa")] - MatchType::Dfa | MatchType::DfaMany => { - match self.shortest_dfa(text, start) { - dfa::Result::Match(_) => true, - dfa::Result::NoMatch(_) => false, - dfa::Result::Quit => self.match_nfa(text, start), - } - } - #[cfg(feature = "perf-dfa")] - MatchType::DfaAnchoredReverse => { - match dfa::Fsm::reverse( - &self.ro.dfa_reverse, - self.cache.value(), - true, - &text[start..], - text.len() - start, - ) { - dfa::Result::Match(_) => true, - dfa::Result::NoMatch(_) => false, - dfa::Result::Quit => self.match_nfa(text, start), - } - } - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - MatchType::DfaSuffix => { - match self.shortest_dfa_reverse_suffix(text, start) { - dfa::Result::Match(_) => true, - dfa::Result::NoMatch(_) => false, - dfa::Result::Quit => self.match_nfa(text, start), - } - } - MatchType::Nfa(ty) => self.match_nfa_type(ty, text, start), - MatchType::Nothing => false, - } - } - - /// Finds the start and end location of the leftmost-first match, starting - /// at the given location. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn find_at(&self, text: &[u8], start: usize) -> Option<(usize, usize)> { - if !self.is_anchor_end_match(text) { - return None; - } - match self.ro.match_type { - #[cfg(feature = "perf-literal")] - MatchType::Literal(ty) => self.find_literals(ty, text, start), - #[cfg(feature = "perf-dfa")] - MatchType::Dfa => match self.find_dfa_forward(text, start) { - dfa::Result::Match((s, e)) => Some((s, e)), - dfa::Result::NoMatch(_) => None, - dfa::Result::Quit => { - self.find_nfa(MatchNfaType::Auto, text, start) - } - }, - #[cfg(feature = "perf-dfa")] - MatchType::DfaAnchoredReverse => { - match self.find_dfa_anchored_reverse(text, start) { - dfa::Result::Match((s, e)) => Some((s, e)), - dfa::Result::NoMatch(_) => None, - dfa::Result::Quit => { - self.find_nfa(MatchNfaType::Auto, text, start) - } - } - } - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - MatchType::DfaSuffix => { - match self.find_dfa_reverse_suffix(text, start) { - dfa::Result::Match((s, e)) => Some((s, e)), - dfa::Result::NoMatch(_) => None, - dfa::Result::Quit => { - self.find_nfa(MatchNfaType::Auto, text, start) - } - } - } - MatchType::Nfa(ty) => self.find_nfa(ty, text, start), - MatchType::Nothing => None, - #[cfg(feature = "perf-dfa")] - MatchType::DfaMany => { - unreachable!("BUG: RegexSet cannot be used with find") - } - } - } - - /// Finds the start and end location of the leftmost-first match and also - /// fills in all matching capture groups. - /// - /// The number of capture slots given should be equal to the total number - /// of capture slots in the compiled program. - /// - /// Note that the first two slots always correspond to the start and end - /// locations of the overall match. - fn captures_read_at( - &self, - locs: &mut Locations, - text: &[u8], - start: usize, - ) -> Option<(usize, usize)> { - let slots = locs.as_slots(); - for slot in slots.iter_mut() { - *slot = None; - } - // If the caller unnecessarily uses this, then we try to save them - // from themselves. - match slots.len() { - 0 => return self.find_at(text, start), - 2 => { - return self.find_at(text, start).map(|(s, e)| { - slots[0] = Some(s); - slots[1] = Some(e); - (s, e) - }); - } - _ => {} // fallthrough - } - if !self.is_anchor_end_match(text) { - return None; - } - match self.ro.match_type { - #[cfg(feature = "perf-literal")] - MatchType::Literal(ty) => { - self.find_literals(ty, text, start).and_then(|(s, e)| { - self.captures_nfa_type( - MatchNfaType::Auto, - slots, - text, - s, - e, - ) - }) - } - #[cfg(feature = "perf-dfa")] - MatchType::Dfa => { - if self.ro.nfa.is_anchored_start { - self.captures_nfa(slots, text, start) - } else { - match self.find_dfa_forward(text, start) { - dfa::Result::Match((s, e)) => self.captures_nfa_type( - MatchNfaType::Auto, - slots, - text, - s, - e, - ), - dfa::Result::NoMatch(_) => None, - dfa::Result::Quit => { - self.captures_nfa(slots, text, start) - } - } - } - } - #[cfg(feature = "perf-dfa")] - MatchType::DfaAnchoredReverse => { - match self.find_dfa_anchored_reverse(text, start) { - dfa::Result::Match((s, e)) => self.captures_nfa_type( - MatchNfaType::Auto, - slots, - text, - s, - e, - ), - dfa::Result::NoMatch(_) => None, - dfa::Result::Quit => self.captures_nfa(slots, text, start), - } - } - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - MatchType::DfaSuffix => { - match self.find_dfa_reverse_suffix(text, start) { - dfa::Result::Match((s, e)) => self.captures_nfa_type( - MatchNfaType::Auto, - slots, - text, - s, - e, - ), - dfa::Result::NoMatch(_) => None, - dfa::Result::Quit => self.captures_nfa(slots, text, start), - } - } - MatchType::Nfa(ty) => { - self.captures_nfa_type(ty, slots, text, start, text.len()) - } - MatchType::Nothing => None, - #[cfg(feature = "perf-dfa")] - MatchType::DfaMany => { - unreachable!("BUG: RegexSet cannot be used with captures") - } - } - } -} - -impl<'c> ExecNoSync<'c> { - /// Finds the leftmost-first match using only literal search. - #[cfg(feature = "perf-literal")] - #[cfg_attr(feature = "perf-inline", inline(always))] - fn find_literals( - &self, - ty: MatchLiteralType, - text: &[u8], - start: usize, - ) -> Option<(usize, usize)> { - use self::MatchLiteralType::*; - match ty { - Unanchored => { - let lits = &self.ro.nfa.prefixes; - lits.find(&text[start..]).map(|(s, e)| (start + s, start + e)) - } - AnchoredStart => { - let lits = &self.ro.nfa.prefixes; - if start == 0 || !self.ro.nfa.is_anchored_start { - lits.find_start(&text[start..]) - .map(|(s, e)| (start + s, start + e)) - } else { - None - } - } - AnchoredEnd => { - let lits = &self.ro.suffixes; - lits.find_end(&text[start..]) - .map(|(s, e)| (start + s, start + e)) - } - AhoCorasick => self - .ro - .ac - .as_ref() - .unwrap() - .find(&text[start..]) - .map(|m| (start + m.start(), start + m.end())), - } - } - - /// Finds the leftmost-first match (start and end) using only the DFA. - /// - /// If the result returned indicates that the DFA quit, then another - /// matching engine should be used. - #[cfg(feature = "perf-dfa")] - #[cfg_attr(feature = "perf-inline", inline(always))] - fn find_dfa_forward( - &self, - text: &[u8], - start: usize, - ) -> dfa::Result<(usize, usize)> { - use crate::dfa::Result::*; - let end = match dfa::Fsm::forward( - &self.ro.dfa, - self.cache.value(), - false, - text, - start, - ) { - NoMatch(i) => return NoMatch(i), - Quit => return Quit, - Match(end) if start == end => return Match((start, start)), - Match(end) => end, - }; - // Now run the DFA in reverse to find the start of the match. - match dfa::Fsm::reverse( - &self.ro.dfa_reverse, - self.cache.value(), - false, - &text[start..], - end - start, - ) { - Match(s) => Match((start + s, end)), - NoMatch(i) => NoMatch(i), - Quit => Quit, - } - } - - /// Finds the leftmost-first match (start and end) using only the DFA, - /// but assumes the regex is anchored at the end and therefore starts at - /// the end of the regex and matches in reverse. - /// - /// If the result returned indicates that the DFA quit, then another - /// matching engine should be used. - #[cfg(feature = "perf-dfa")] - #[cfg_attr(feature = "perf-inline", inline(always))] - fn find_dfa_anchored_reverse( - &self, - text: &[u8], - start: usize, - ) -> dfa::Result<(usize, usize)> { - use crate::dfa::Result::*; - match dfa::Fsm::reverse( - &self.ro.dfa_reverse, - self.cache.value(), - false, - &text[start..], - text.len() - start, - ) { - Match(s) => Match((start + s, text.len())), - NoMatch(i) => NoMatch(i), - Quit => Quit, - } - } - - /// Finds the end of the shortest match using only the DFA. - #[cfg(feature = "perf-dfa")] - #[cfg_attr(feature = "perf-inline", inline(always))] - fn shortest_dfa(&self, text: &[u8], start: usize) -> dfa::Result { - dfa::Fsm::forward(&self.ro.dfa, self.cache.value(), true, text, start) - } - - /// Finds the end of the shortest match using only the DFA by scanning for - /// suffix literals. - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - #[cfg_attr(feature = "perf-inline", inline(always))] - fn shortest_dfa_reverse_suffix( - &self, - text: &[u8], - start: usize, - ) -> dfa::Result { - match self.exec_dfa_reverse_suffix(text, start) { - None => self.shortest_dfa(text, start), - Some(r) => r.map(|(_, end)| end), - } - } - - /// Finds the end of the shortest match using only the DFA by scanning for - /// suffix literals. It also reports the start of the match. - /// - /// Note that if None is returned, then the optimization gave up to avoid - /// worst case quadratic behavior. A forward scanning DFA should be tried - /// next. - /// - /// If a match is returned and the full leftmost-first match is desired, - /// then a forward scan starting from the beginning of the match must be - /// done. - /// - /// If the result returned indicates that the DFA quit, then another - /// matching engine should be used. - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - #[cfg_attr(feature = "perf-inline", inline(always))] - fn exec_dfa_reverse_suffix( - &self, - text: &[u8], - original_start: usize, - ) -> Option> { - use crate::dfa::Result::*; - - let lcs = self.ro.suffixes.lcs(); - debug_assert!(lcs.len() >= 1); - let mut start = original_start; - let mut end = start; - let mut last_literal = start; - while end <= text.len() { - last_literal += match lcs.find(&text[last_literal..]) { - None => return Some(NoMatch(text.len())), - Some(i) => i, - }; - end = last_literal + lcs.len(); - match dfa::Fsm::reverse( - &self.ro.dfa_reverse, - self.cache.value(), - false, - &text[start..end], - end - start, - ) { - Match(0) | NoMatch(0) => return None, - Match(i) => return Some(Match((start + i, end))), - NoMatch(i) => { - start += i; - last_literal += 1; - continue; - } - Quit => return Some(Quit), - }; - } - Some(NoMatch(text.len())) - } - - /// Finds the leftmost-first match (start and end) using only the DFA - /// by scanning for suffix literals. - /// - /// If the result returned indicates that the DFA quit, then another - /// matching engine should be used. - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - #[cfg_attr(feature = "perf-inline", inline(always))] - fn find_dfa_reverse_suffix( - &self, - text: &[u8], - start: usize, - ) -> dfa::Result<(usize, usize)> { - use crate::dfa::Result::*; - - let match_start = match self.exec_dfa_reverse_suffix(text, start) { - None => return self.find_dfa_forward(text, start), - Some(Match((start, _))) => start, - Some(r) => return r, - }; - // At this point, we've found a match. The only way to quit now - // without a match is if the DFA gives up (seems unlikely). - // - // Now run the DFA forwards to find the proper end of the match. - // (The suffix literal match can only indicate the earliest - // possible end location, which may appear before the end of the - // leftmost-first match.) - match dfa::Fsm::forward( - &self.ro.dfa, - self.cache.value(), - false, - text, - match_start, - ) { - NoMatch(_) => panic!("BUG: reverse match implies forward match"), - Quit => Quit, - Match(e) => Match((match_start, e)), - } - } - - /// Executes the NFA engine to return whether there is a match or not. - /// - /// Ideally, we could use shortest_nfa(...).is_some() and get the same - /// performance characteristics, but regex sets don't have captures, which - /// shortest_nfa depends on. - #[cfg(feature = "perf-dfa")] - fn match_nfa(&self, text: &[u8], start: usize) -> bool { - self.match_nfa_type(MatchNfaType::Auto, text, start) - } - - /// Like match_nfa, but allows specification of the type of NFA engine. - fn match_nfa_type( - &self, - ty: MatchNfaType, - text: &[u8], - start: usize, - ) -> bool { - self.exec_nfa( - ty, - &mut [false], - &mut [], - true, - false, - text, - start, - text.len(), - ) - } - - /// Finds the shortest match using an NFA. - #[cfg(feature = "perf-dfa")] - fn shortest_nfa(&self, text: &[u8], start: usize) -> Option { - self.shortest_nfa_type(MatchNfaType::Auto, text, start) - } - - /// Like shortest_nfa, but allows specification of the type of NFA engine. - fn shortest_nfa_type( - &self, - ty: MatchNfaType, - text: &[u8], - start: usize, - ) -> Option { - let mut slots = [None, None]; - if self.exec_nfa( - ty, - &mut [false], - &mut slots, - true, - true, - text, - start, - text.len(), - ) { - slots[1] - } else { - None - } - } - - /// Like find, but executes an NFA engine. - fn find_nfa( - &self, - ty: MatchNfaType, - text: &[u8], - start: usize, - ) -> Option<(usize, usize)> { - let mut slots = [None, None]; - if self.exec_nfa( - ty, - &mut [false], - &mut slots, - false, - false, - text, - start, - text.len(), - ) { - match (slots[0], slots[1]) { - (Some(s), Some(e)) => Some((s, e)), - _ => None, - } - } else { - None - } - } - - /// Like find_nfa, but fills in captures. - /// - /// `slots` should have length equal to `2 * nfa.captures.len()`. - #[cfg(feature = "perf-dfa")] - fn captures_nfa( - &self, - slots: &mut [Slot], - text: &[u8], - start: usize, - ) -> Option<(usize, usize)> { - self.captures_nfa_type( - MatchNfaType::Auto, - slots, - text, - start, - text.len(), - ) - } - - /// Like captures_nfa, but allows specification of type of NFA engine. - fn captures_nfa_type( - &self, - ty: MatchNfaType, - slots: &mut [Slot], - text: &[u8], - start: usize, - end: usize, - ) -> Option<(usize, usize)> { - if self.exec_nfa( - ty, - &mut [false], - slots, - false, - false, - text, - start, - end, - ) { - match (slots[0], slots[1]) { - (Some(s), Some(e)) => Some((s, e)), - _ => None, - } - } else { - None - } - } - - fn exec_nfa( - &self, - mut ty: MatchNfaType, - matches: &mut [bool], - slots: &mut [Slot], - quit_after_match: bool, - quit_after_match_with_pos: bool, - text: &[u8], - start: usize, - end: usize, - ) -> bool { - use self::MatchNfaType::*; - if let Auto = ty { - if backtrack::should_exec(self.ro.nfa.len(), text.len()) { - ty = Backtrack; - } else { - ty = PikeVM; - } - } - // The backtracker can't return the shortest match position as it is - // implemented today. So if someone calls `shortest_match` and we need - // to run an NFA, then use the PikeVM. - if quit_after_match_with_pos || ty == PikeVM { - self.exec_pikevm( - matches, - slots, - quit_after_match, - text, - start, - end, - ) - } else { - self.exec_backtrack(matches, slots, text, start, end) - } - } - - /// Always run the NFA algorithm. - fn exec_pikevm( - &self, - matches: &mut [bool], - slots: &mut [Slot], - quit_after_match: bool, - text: &[u8], - start: usize, - end: usize, - ) -> bool { - if self.ro.nfa.uses_bytes() { - pikevm::Fsm::exec( - &self.ro.nfa, - self.cache.value(), - matches, - slots, - quit_after_match, - ByteInput::new(text, self.ro.nfa.only_utf8), - start, - end, - ) - } else { - pikevm::Fsm::exec( - &self.ro.nfa, - self.cache.value(), - matches, - slots, - quit_after_match, - CharInput::new(text), - start, - end, - ) - } - } - - /// Always runs the NFA using bounded backtracking. - fn exec_backtrack( - &self, - matches: &mut [bool], - slots: &mut [Slot], - text: &[u8], - start: usize, - end: usize, - ) -> bool { - if self.ro.nfa.uses_bytes() { - backtrack::Bounded::exec( - &self.ro.nfa, - self.cache.value(), - matches, - slots, - ByteInput::new(text, self.ro.nfa.only_utf8), - start, - end, - ) - } else { - backtrack::Bounded::exec( - &self.ro.nfa, - self.cache.value(), - matches, - slots, - CharInput::new(text), - start, - end, - ) - } - } - - /// Finds which regular expressions match the given text. - /// - /// `matches` should have length equal to the number of regexes being - /// searched. - /// - /// This is only useful when one wants to know which regexes in a set - /// match some text. - pub fn many_matches_at( - &self, - matches: &mut [bool], - text: &[u8], - start: usize, - ) -> bool { - use self::MatchType::*; - if !self.is_anchor_end_match(text) { - return false; - } - match self.ro.match_type { - #[cfg(feature = "perf-literal")] - Literal(ty) => { - debug_assert_eq!(matches.len(), 1); - matches[0] = self.find_literals(ty, text, start).is_some(); - matches[0] - } - #[cfg(feature = "perf-dfa")] - Dfa | DfaAnchoredReverse | DfaMany => { - match dfa::Fsm::forward_many( - &self.ro.dfa, - self.cache.value(), - matches, - text, - start, - ) { - dfa::Result::Match(_) => true, - dfa::Result::NoMatch(_) => false, - dfa::Result::Quit => self.exec_nfa( - MatchNfaType::Auto, - matches, - &mut [], - false, - false, - text, - start, - text.len(), - ), - } - } - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - DfaSuffix => { - match dfa::Fsm::forward_many( - &self.ro.dfa, - self.cache.value(), - matches, - text, - start, - ) { - dfa::Result::Match(_) => true, - dfa::Result::NoMatch(_) => false, - dfa::Result::Quit => self.exec_nfa( - MatchNfaType::Auto, - matches, - &mut [], - false, - false, - text, - start, - text.len(), - ), - } - } - Nfa(ty) => self.exec_nfa( - ty, - matches, - &mut [], - false, - false, - text, - start, - text.len(), - ), - Nothing => false, - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn is_anchor_end_match(&self, text: &[u8]) -> bool { - #[cfg(not(feature = "perf-literal"))] - fn imp(_: &ExecReadOnly, _: &[u8]) -> bool { - true - } - - #[cfg(feature = "perf-literal")] - fn imp(ro: &ExecReadOnly, text: &[u8]) -> bool { - // Only do this check if the haystack is big (>1MB). - if text.len() > (1 << 20) && ro.nfa.is_anchored_end { - let lcs = ro.suffixes.lcs(); - if lcs.len() >= 1 && !lcs.is_suffix(text) { - return false; - } - } - true - } - - imp(&self.ro, text) - } - - pub fn capture_name_idx(&self) -> &Arc> { - &self.ro.nfa.capture_name_idx - } -} - -impl<'c> ExecNoSyncStr<'c> { - pub fn capture_name_idx(&self) -> &Arc> { - self.0.capture_name_idx() - } -} - -impl Exec { - /// Get a searcher that isn't Sync. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn searcher(&self) -> ExecNoSync<'_> { - ExecNoSync { - ro: &self.ro, // a clone is too expensive here! (and not needed) - cache: self.pool.get(), - } - } - - /// Get a searcher that isn't Sync and can match on &str. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn searcher_str(&self) -> ExecNoSyncStr<'_> { - ExecNoSyncStr(self.searcher()) - } - - /// Build a Regex from this executor. - pub fn into_regex(self) -> re_unicode::Regex { - re_unicode::Regex::from(self) - } - - /// Build a RegexSet from this executor. - pub fn into_regex_set(self) -> re_set::unicode::RegexSet { - re_set::unicode::RegexSet::from(self) - } - - /// Build a Regex from this executor that can match arbitrary bytes. - pub fn into_byte_regex(self) -> re_bytes::Regex { - re_bytes::Regex::from(self) - } - - /// Build a RegexSet from this executor that can match arbitrary bytes. - pub fn into_byte_regex_set(self) -> re_set::bytes::RegexSet { - re_set::bytes::RegexSet::from(self) - } - - /// The original regular expressions given by the caller that were - /// compiled. - pub fn regex_strings(&self) -> &[String] { - &self.ro.res - } - - /// Return a slice of capture names. - /// - /// Any capture that isn't named is None. - pub fn capture_names(&self) -> &[Option] { - &self.ro.nfa.captures - } - - /// Return a reference to named groups mapping (from group name to - /// group position). - pub fn capture_name_idx(&self) -> &Arc> { - &self.ro.nfa.capture_name_idx - } - - /// If the number of capture groups in every match is always the same, then - /// return that number. Otherwise return `None`. - pub fn static_captures_len(&self) -> Option { - self.ro.nfa.static_captures_len - } -} - -impl Clone for Exec { - fn clone(&self) -> Exec { - let pool = ExecReadOnly::new_pool(&self.ro); - Exec { ro: self.ro.clone(), pool } - } -} - -impl ExecReadOnly { - fn choose_match_type(&self, hint: Option) -> MatchType { - if let Some(MatchType::Nfa(_)) = hint { - return hint.unwrap(); - } - // If the NFA is empty, then we'll never match anything. - if self.nfa.insts.is_empty() { - return MatchType::Nothing; - } - if let Some(literalty) = self.choose_literal_match_type() { - return literalty; - } - if let Some(dfaty) = self.choose_dfa_match_type() { - return dfaty; - } - // We're so totally hosed. - MatchType::Nfa(MatchNfaType::Auto) - } - - /// If a plain literal scan can be used, then a corresponding literal - /// search type is returned. - fn choose_literal_match_type(&self) -> Option { - #[cfg(not(feature = "perf-literal"))] - fn imp(_: &ExecReadOnly) -> Option { - None - } - - #[cfg(feature = "perf-literal")] - fn imp(ro: &ExecReadOnly) -> Option { - // If our set of prefixes is complete, then we can use it to find - // a match in lieu of a regex engine. This doesn't quite work well - // in the presence of multiple regexes, so only do it when there's - // one. - // - // TODO(burntsushi): Also, don't try to match literals if the regex - // is partially anchored. We could technically do it, but we'd need - // to create two sets of literals: all of them and then the subset - // that aren't anchored. We would then only search for all of them - // when at the beginning of the input and use the subset in all - // other cases. - if ro.res.len() != 1 { - return None; - } - if ro.ac.is_some() { - return Some(MatchType::Literal( - MatchLiteralType::AhoCorasick, - )); - } - if ro.nfa.prefixes.complete() { - return if ro.nfa.is_anchored_start { - Some(MatchType::Literal(MatchLiteralType::AnchoredStart)) - } else { - Some(MatchType::Literal(MatchLiteralType::Unanchored)) - }; - } - if ro.suffixes.complete() { - return if ro.nfa.is_anchored_end { - Some(MatchType::Literal(MatchLiteralType::AnchoredEnd)) - } else { - // This case shouldn't happen. When the regex isn't - // anchored, then complete prefixes should imply complete - // suffixes. - Some(MatchType::Literal(MatchLiteralType::Unanchored)) - }; - } - None - } - - imp(self) - } - - /// If a DFA scan can be used, then choose the appropriate DFA strategy. - fn choose_dfa_match_type(&self) -> Option { - #[cfg(not(feature = "perf-dfa"))] - fn imp(_: &ExecReadOnly) -> Option { - None - } - - #[cfg(feature = "perf-dfa")] - fn imp(ro: &ExecReadOnly) -> Option { - if !dfa::can_exec(&ro.dfa) { - return None; - } - // Regex sets require a slightly specialized path. - if ro.res.len() >= 2 { - return Some(MatchType::DfaMany); - } - // If the regex is anchored at the end but not the start, then - // just match in reverse from the end of the haystack. - if !ro.nfa.is_anchored_start && ro.nfa.is_anchored_end { - return Some(MatchType::DfaAnchoredReverse); - } - #[cfg(feature = "perf-literal")] - { - // If there's a longish suffix literal, then it might be faster - // to look for that first. - if ro.should_suffix_scan() { - return Some(MatchType::DfaSuffix); - } - } - // Fall back to your garden variety forward searching lazy DFA. - Some(MatchType::Dfa) - } - - imp(self) - } - - /// Returns true if the program is amenable to suffix scanning. - /// - /// When this is true, as a heuristic, we assume it is OK to quickly scan - /// for suffix literals and then do a *reverse* DFA match from any matches - /// produced by the literal scan. (And then followed by a forward DFA - /// search, since the previously found suffix literal maybe not actually be - /// the end of a match.) - /// - /// This is a bit of a specialized optimization, but can result in pretty - /// big performance wins if 1) there are no prefix literals and 2) the - /// suffix literals are pretty rare in the text. (1) is obviously easy to - /// account for but (2) is harder. As a proxy, we assume that longer - /// strings are generally rarer, so we only enable this optimization when - /// we have a meaty suffix. - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - fn should_suffix_scan(&self) -> bool { - if self.suffixes.is_empty() { - return false; - } - let lcs_len = self.suffixes.lcs().char_len(); - lcs_len >= 3 && lcs_len > self.dfa.prefixes.lcp().char_len() - } - - fn new_pool(ro: &Arc) -> Box> { - let ro = ro.clone(); - Box::new(Pool::new(Box::new(move || { - AssertUnwindSafe(RefCell::new(ProgramCacheInner::new(&ro))) - }))) - } -} - -#[derive(Clone, Copy, Debug)] -enum MatchType { - /// A single or multiple literal search. This is only used when the regex - /// can be decomposed into a literal search. - #[cfg(feature = "perf-literal")] - Literal(MatchLiteralType), - /// A normal DFA search. - #[cfg(feature = "perf-dfa")] - Dfa, - /// A reverse DFA search starting from the end of a haystack. - #[cfg(feature = "perf-dfa")] - DfaAnchoredReverse, - /// A reverse DFA search with suffix literal scanning. - #[cfg(all(feature = "perf-dfa", feature = "perf-literal"))] - DfaSuffix, - /// Use the DFA on two or more regular expressions. - #[cfg(feature = "perf-dfa")] - DfaMany, - /// An NFA variant. - Nfa(MatchNfaType), - /// No match is ever possible, so don't ever try to search. - Nothing, -} - -#[derive(Clone, Copy, Debug)] -#[cfg(feature = "perf-literal")] -enum MatchLiteralType { - /// Match literals anywhere in text. - Unanchored, - /// Match literals only at the start of text. - AnchoredStart, - /// Match literals only at the end of text. - AnchoredEnd, - /// Use an Aho-Corasick automaton. This requires `ac` to be Some on - /// ExecReadOnly. - AhoCorasick, -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -enum MatchNfaType { - /// Choose between Backtrack and PikeVM. - Auto, - /// NFA bounded backtracking. - /// - /// (This is only set by tests, since it never makes sense to always want - /// backtracking.) - Backtrack, - /// The Pike VM. - /// - /// (This is only set by tests, since it never makes sense to always want - /// the Pike VM.) - PikeVM, -} - -/// `ProgramCache` maintains reusable allocations for each matching engine -/// available to a particular program. -/// -/// We declare this as unwind safe since it's a cache that's only used for -/// performance purposes. If a panic occurs, it is (or should be) always safe -/// to continue using the same regex object. -pub type ProgramCache = AssertUnwindSafe>; - -#[derive(Debug)] -pub struct ProgramCacheInner { - pub pikevm: pikevm::Cache, - pub backtrack: backtrack::Cache, - #[cfg(feature = "perf-dfa")] - pub dfa: dfa::Cache, - #[cfg(feature = "perf-dfa")] - pub dfa_reverse: dfa::Cache, -} - -impl ProgramCacheInner { - fn new(ro: &ExecReadOnly) -> Self { - ProgramCacheInner { - pikevm: pikevm::Cache::new(&ro.nfa), - backtrack: backtrack::Cache::new(&ro.nfa), - #[cfg(feature = "perf-dfa")] - dfa: dfa::Cache::new(&ro.dfa), - #[cfg(feature = "perf-dfa")] - dfa_reverse: dfa::Cache::new(&ro.dfa_reverse), - } - } -} - -/// Alternation literals checks if the given HIR is a simple alternation of -/// literals, and if so, returns them. Otherwise, this returns None. -#[cfg(feature = "perf-literal")] -fn alternation_literals(expr: &Hir) -> Option>> { - use regex_syntax::hir::{HirKind, Literal}; - - // This is pretty hacky, but basically, if `is_alternation_literal` is - // true, then we can make several assumptions about the structure of our - // HIR. This is what justifies the `unreachable!` statements below. - // - // This code should be refactored once we overhaul this crate's - // optimization pipeline, because this is a terribly inflexible way to go - // about things. - - if !expr.properties().is_alternation_literal() { - return None; - } - let alts = match *expr.kind() { - HirKind::Alternation(ref alts) => alts, - _ => return None, // one literal isn't worth it - }; - - let mut lits = vec![]; - for alt in alts { - let mut lit = vec![]; - match *alt.kind() { - HirKind::Literal(Literal(ref bytes)) => { - lit.extend_from_slice(bytes) - } - HirKind::Concat(ref exprs) => { - for e in exprs { - match *e.kind() { - HirKind::Literal(Literal(ref bytes)) => { - lit.extend_from_slice(bytes); - } - _ => unreachable!("expected literal, got {:?}", e), - } - } - } - _ => unreachable!("expected literal or concat, got {:?}", alt), - } - lits.push(lit); - } - Some(lits) -} - -#[cfg(not(feature = "perf-literal"))] -fn literal_analysis(_: &Hir) -> (literal::Seq, literal::Seq) { - (literal::Seq::infinite(), literal::Seq::infinite()) -} - -#[cfg(feature = "perf-literal")] -fn literal_analysis(expr: &Hir) -> (literal::Seq, literal::Seq) { - const ATTEMPTS: [(usize, usize); 3] = [(5, 50), (4, 30), (3, 20)]; - - let mut prefixes = literal::Extractor::new() - .kind(literal::ExtractKind::Prefix) - .extract(expr); - for (keep, limit) in ATTEMPTS { - let len = match prefixes.len() { - None => break, - Some(len) => len, - }; - if len <= limit { - break; - } - prefixes.keep_first_bytes(keep); - prefixes.minimize_by_preference(); - } - - let mut suffixes = literal::Extractor::new() - .kind(literal::ExtractKind::Suffix) - .extract(expr); - for (keep, limit) in ATTEMPTS { - let len = match suffixes.len() { - None => break, - Some(len) => len, - }; - if len <= limit { - break; - } - suffixes.keep_last_bytes(keep); - suffixes.minimize_by_preference(); - } - - (prefixes, suffixes) -} - -#[cfg(test)] -mod test { - #[test] - fn uppercut_s_backtracking_bytes_default_bytes_mismatch() { - use crate::internal::ExecBuilder; - - let backtrack_bytes_re = ExecBuilder::new("^S") - .bounded_backtracking() - .only_utf8(false) - .build() - .map(|exec| exec.into_byte_regex()) - .map_err(|err| format!("{}", err)) - .unwrap(); - - let default_bytes_re = ExecBuilder::new("^S") - .only_utf8(false) - .build() - .map(|exec| exec.into_byte_regex()) - .map_err(|err| format!("{}", err)) - .unwrap(); - - let input = vec![83, 83]; - - let s1 = backtrack_bytes_re.split(&input); - let s2 = default_bytes_re.split(&input); - for (chunk1, chunk2) in s1.zip(s2) { - assert_eq!(chunk1, chunk2); - } - } - - #[test] - fn unicode_lit_star_backtracking_utf8bytes_default_utf8bytes_mismatch() { - use crate::internal::ExecBuilder; - - let backtrack_bytes_re = ExecBuilder::new(r"^(?u:\*)") - .bounded_backtracking() - .bytes(true) - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err)) - .unwrap(); - - let default_bytes_re = ExecBuilder::new(r"^(?u:\*)") - .bytes(true) - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err)) - .unwrap(); - - let input = "**"; - - let s1 = backtrack_bytes_re.split(input); - let s2 = default_bytes_re.split(input); - for (chunk1, chunk2) in s1.zip(s2) { - assert_eq!(chunk1, chunk2); - } - } -} diff --git a/src/expand.rs b/src/expand.rs deleted file mode 100644 index 98fafc949f..0000000000 --- a/src/expand.rs +++ /dev/null @@ -1,247 +0,0 @@ -use std::str; - -use crate::find_byte::find_byte; - -use crate::re_bytes; -use crate::re_unicode; - -pub fn expand_str( - caps: &re_unicode::Captures<'_>, - mut replacement: &str, - dst: &mut String, -) { - while !replacement.is_empty() { - match find_byte(b'$', replacement.as_bytes()) { - None => break, - Some(i) => { - dst.push_str(&replacement[..i]); - replacement = &replacement[i..]; - } - } - if replacement.as_bytes().get(1).map_or(false, |&b| b == b'$') { - dst.push_str("$"); - replacement = &replacement[2..]; - continue; - } - debug_assert!(!replacement.is_empty()); - let cap_ref = match find_cap_ref(replacement.as_bytes()) { - Some(cap_ref) => cap_ref, - None => { - dst.push_str("$"); - replacement = &replacement[1..]; - continue; - } - }; - replacement = &replacement[cap_ref.end..]; - match cap_ref.cap { - Ref::Number(i) => { - dst.push_str(caps.get(i).map(|m| m.as_str()).unwrap_or("")); - } - Ref::Named(name) => { - dst.push_str( - caps.name(name).map(|m| m.as_str()).unwrap_or(""), - ); - } - } - } - dst.push_str(replacement); -} - -pub fn expand_bytes( - caps: &re_bytes::Captures<'_>, - mut replacement: &[u8], - dst: &mut Vec, -) { - while !replacement.is_empty() { - match find_byte(b'$', replacement) { - None => break, - Some(i) => { - dst.extend(&replacement[..i]); - replacement = &replacement[i..]; - } - } - if replacement.get(1).map_or(false, |&b| b == b'$') { - dst.push(b'$'); - replacement = &replacement[2..]; - continue; - } - debug_assert!(!replacement.is_empty()); - let cap_ref = match find_cap_ref(replacement) { - Some(cap_ref) => cap_ref, - None => { - dst.push(b'$'); - replacement = &replacement[1..]; - continue; - } - }; - replacement = &replacement[cap_ref.end..]; - match cap_ref.cap { - Ref::Number(i) => { - dst.extend(caps.get(i).map(|m| m.as_bytes()).unwrap_or(b"")); - } - Ref::Named(name) => { - dst.extend( - caps.name(name).map(|m| m.as_bytes()).unwrap_or(b""), - ); - } - } - } - dst.extend(replacement); -} - -/// `CaptureRef` represents a reference to a capture group inside some text. -/// The reference is either a capture group name or a number. -/// -/// It is also tagged with the position in the text following the -/// capture reference. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -struct CaptureRef<'a> { - cap: Ref<'a>, - end: usize, -} - -/// A reference to a capture group in some text. -/// -/// e.g., `$2`, `$foo`, `${foo}`. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -enum Ref<'a> { - Named(&'a str), - Number(usize), -} - -impl<'a> From<&'a str> for Ref<'a> { - fn from(x: &'a str) -> Ref<'a> { - Ref::Named(x) - } -} - -impl From for Ref<'static> { - fn from(x: usize) -> Ref<'static> { - Ref::Number(x) - } -} - -/// Parses a possible reference to a capture group name in the given text, -/// starting at the beginning of `replacement`. -/// -/// If no such valid reference could be found, None is returned. -fn find_cap_ref(replacement: &[u8]) -> Option> { - let mut i = 0; - let rep: &[u8] = replacement; - if rep.len() <= 1 || rep[0] != b'$' { - return None; - } - i += 1; - if rep[i] == b'{' { - return find_cap_ref_braced(rep, i + 1); - } - let mut cap_end = i; - while rep.get(cap_end).copied().map_or(false, is_valid_cap_letter) { - cap_end += 1; - } - if cap_end == i { - return None; - } - // We just verified that the range 0..cap_end is valid ASCII, so it must - // therefore be valid UTF-8. If we really cared, we could avoid this UTF-8 - // check via an unchecked conversion or by parsing the number straight from - // &[u8]. - let cap = - str::from_utf8(&rep[i..cap_end]).expect("valid UTF-8 capture name"); - Some(CaptureRef { - cap: match cap.parse::() { - Ok(i) => Ref::Number(i as usize), - Err(_) => Ref::Named(cap), - }, - end: cap_end, - }) -} - -fn find_cap_ref_braced(rep: &[u8], mut i: usize) -> Option> { - let start = i; - while rep.get(i).map_or(false, |&b| b != b'}') { - i += 1; - } - if !rep.get(i).map_or(false, |&b| b == b'}') { - return None; - } - // When looking at braced names, we don't put any restrictions on the name, - // so it's possible it could be invalid UTF-8. But a capture group name - // can never be invalid UTF-8, so if we have invalid UTF-8, then we can - // safely return None. - let cap = match str::from_utf8(&rep[start..i]) { - Err(_) => return None, - Ok(cap) => cap, - }; - Some(CaptureRef { - cap: match cap.parse::() { - Ok(i) => Ref::Number(i as usize), - Err(_) => Ref::Named(cap), - }, - end: i + 1, - }) -} - -/// Returns true if and only if the given byte is allowed in a capture name -/// written in non-brace form. -fn is_valid_cap_letter(b: u8) -> bool { - match b { - b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' | b'_' => true, - _ => false, - } -} - -#[cfg(test)] -mod tests { - use super::{find_cap_ref, CaptureRef}; - - macro_rules! find { - ($name:ident, $text:expr) => { - #[test] - fn $name() { - assert_eq!(None, find_cap_ref($text.as_bytes())); - } - }; - ($name:ident, $text:expr, $capref:expr) => { - #[test] - fn $name() { - assert_eq!(Some($capref), find_cap_ref($text.as_bytes())); - } - }; - } - - macro_rules! c { - ($name_or_number:expr, $pos:expr) => { - CaptureRef { cap: $name_or_number.into(), end: $pos } - }; - } - - find!(find_cap_ref1, "$foo", c!("foo", 4)); - find!(find_cap_ref2, "${foo}", c!("foo", 6)); - find!(find_cap_ref3, "$0", c!(0, 2)); - find!(find_cap_ref4, "$5", c!(5, 2)); - find!(find_cap_ref5, "$10", c!(10, 3)); - // See https://github.com/rust-lang/regex/pull/585 - // for more on characters following numbers - find!(find_cap_ref6, "$42a", c!("42a", 4)); - find!(find_cap_ref7, "${42}a", c!(42, 5)); - find!(find_cap_ref8, "${42"); - find!(find_cap_ref9, "${42 "); - find!(find_cap_ref10, " $0 "); - find!(find_cap_ref11, "$"); - find!(find_cap_ref12, " "); - find!(find_cap_ref13, ""); - find!(find_cap_ref14, "$1-$2", c!(1, 2)); - find!(find_cap_ref15, "$1_$2", c!("1_", 3)); - find!(find_cap_ref16, "$x-$y", c!("x", 2)); - find!(find_cap_ref17, "$x_$y", c!("x_", 3)); - find!(find_cap_ref18, "${#}", c!("#", 4)); - find!(find_cap_ref19, "${Z[}", c!("Z[", 5)); - find!(find_cap_ref20, "${¾}", c!("¾", 5)); - find!(find_cap_ref21, "${¾a}", c!("¾a", 6)); - find!(find_cap_ref22, "${a¾}", c!("a¾", 6)); - find!(find_cap_ref23, "${☃}", c!("☃", 6)); - find!(find_cap_ref24, "${a☃}", c!("a☃", 7)); - find!(find_cap_ref25, "${☃a}", c!("☃a", 7)); - find!(find_cap_ref26, "${名字}", c!("名字", 9)); -} diff --git a/src/find_byte.rs b/src/find_byte.rs index e95f72afb9..9c6915db40 100644 --- a/src/find_byte.rs +++ b/src/find_byte.rs @@ -2,7 +2,7 @@ /// /// If the perf-literal feature is enabled, then this uses the super optimized /// memchr crate. Otherwise, it uses the naive byte-at-a-time implementation. -pub fn find_byte(needle: u8, haystack: &[u8]) -> Option { +pub(crate) fn find_byte(needle: u8, haystack: &[u8]) -> Option { #[cfg(not(feature = "perf-literal"))] fn imp(needle: u8, haystack: &[u8]) -> Option { haystack.iter().position(|&b| b == needle) @@ -10,8 +10,7 @@ pub fn find_byte(needle: u8, haystack: &[u8]) -> Option { #[cfg(feature = "perf-literal")] fn imp(needle: u8, haystack: &[u8]) -> Option { - use memchr::memchr; - memchr(needle, haystack) + memchr::memchr(needle, haystack) } imp(needle, haystack) diff --git a/src/input.rs b/src/input.rs deleted file mode 100644 index df6c3e0c91..0000000000 --- a/src/input.rs +++ /dev/null @@ -1,432 +0,0 @@ -use std::char; -use std::cmp::Ordering; -use std::fmt; -use std::ops; -use std::u32; - -use crate::literal::LiteralSearcher; -use crate::prog::InstEmptyLook; -use crate::utf8::{decode_last_utf8, decode_utf8}; - -/// Represents a location in the input. -#[derive(Clone, Copy, Debug)] -pub struct InputAt { - pos: usize, - c: Char, - byte: Option, - len: usize, -} - -impl InputAt { - /// Returns true iff this position is at the beginning of the input. - pub fn is_start(&self) -> bool { - self.pos == 0 - } - - /// Returns true iff this position is past the end of the input. - pub fn is_end(&self) -> bool { - self.c.is_none() && self.byte.is_none() - } - - /// Returns the character at this position. - /// - /// If this position is just before or after the input, then an absent - /// character is returned. - pub fn char(&self) -> Char { - self.c - } - - /// Returns the byte at this position. - pub fn byte(&self) -> Option { - self.byte - } - - /// Returns the UTF-8 width of the character at this position. - pub fn len(&self) -> usize { - self.len - } - - /// Returns whether the UTF-8 width of the character at this position - /// is zero. - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Returns the byte offset of this position. - pub fn pos(&self) -> usize { - self.pos - } - - /// Returns the byte offset of the next position in the input. - pub fn next_pos(&self) -> usize { - self.pos + self.len - } -} - -/// An abstraction over input used in the matching engines. -pub trait Input: fmt::Debug { - /// Return an encoding of the position at byte offset `i`. - fn at(&self, i: usize) -> InputAt; - - /// Return the Unicode character occurring next to `at`. - /// - /// If no such character could be decoded, then `Char` is absent. - fn next_char(&self, at: InputAt) -> Char; - - /// Return the Unicode character occurring previous to `at`. - /// - /// If no such character could be decoded, then `Char` is absent. - fn previous_char(&self, at: InputAt) -> Char; - - /// Return true if the given empty width instruction matches at the - /// input position given. - fn is_empty_match(&self, at: InputAt, empty: &InstEmptyLook) -> bool; - - /// Scan the input for a matching prefix. - fn prefix_at( - &self, - prefixes: &LiteralSearcher, - at: InputAt, - ) -> Option; - - /// The number of bytes in the input. - fn len(&self) -> usize; - - /// Whether the input is empty. - fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Return the given input as a sequence of bytes. - fn as_bytes(&self) -> &[u8]; -} - -impl<'a, T: Input> Input for &'a T { - fn at(&self, i: usize) -> InputAt { - (**self).at(i) - } - - fn next_char(&self, at: InputAt) -> Char { - (**self).next_char(at) - } - - fn previous_char(&self, at: InputAt) -> Char { - (**self).previous_char(at) - } - - fn is_empty_match(&self, at: InputAt, empty: &InstEmptyLook) -> bool { - (**self).is_empty_match(at, empty) - } - - fn prefix_at( - &self, - prefixes: &LiteralSearcher, - at: InputAt, - ) -> Option { - (**self).prefix_at(prefixes, at) - } - - fn len(&self) -> usize { - (**self).len() - } - - fn as_bytes(&self) -> &[u8] { - (**self).as_bytes() - } -} - -/// An input reader over characters. -#[derive(Clone, Copy, Debug)] -pub struct CharInput<'t>(&'t [u8]); - -impl<'t> CharInput<'t> { - /// Return a new character input reader for the given string. - pub fn new(s: &'t [u8]) -> CharInput<'t> { - CharInput(s) - } -} - -impl<'t> ops::Deref for CharInput<'t> { - type Target = [u8]; - - fn deref(&self) -> &[u8] { - self.0 - } -} - -impl<'t> Input for CharInput<'t> { - fn at(&self, i: usize) -> InputAt { - if i >= self.len() { - InputAt { pos: self.len(), c: None.into(), byte: None, len: 0 } - } else { - let c = decode_utf8(&self[i..]).map(|(c, _)| c).into(); - InputAt { pos: i, c, byte: None, len: c.len_utf8() } - } - } - - fn next_char(&self, at: InputAt) -> Char { - at.char() - } - - fn previous_char(&self, at: InputAt) -> Char { - decode_last_utf8(&self[..at.pos()]).map(|(c, _)| c).into() - } - - fn is_empty_match(&self, at: InputAt, empty: &InstEmptyLook) -> bool { - use crate::prog::EmptyLook::*; - match empty.look { - StartLine => { - let c = self.previous_char(at); - at.pos() == 0 || c == '\n' - } - EndLine => { - let c = self.next_char(at); - at.pos() == self.len() || c == '\n' - } - StartText => at.pos() == 0, - EndText => at.pos() == self.len(), - WordBoundary => { - let (c1, c2) = (self.previous_char(at), self.next_char(at)); - c1.is_word_char() != c2.is_word_char() - } - NotWordBoundary => { - let (c1, c2) = (self.previous_char(at), self.next_char(at)); - c1.is_word_char() == c2.is_word_char() - } - WordBoundaryAscii => { - let (c1, c2) = (self.previous_char(at), self.next_char(at)); - c1.is_word_byte() != c2.is_word_byte() - } - NotWordBoundaryAscii => { - let (c1, c2) = (self.previous_char(at), self.next_char(at)); - c1.is_word_byte() == c2.is_word_byte() - } - } - } - - fn prefix_at( - &self, - prefixes: &LiteralSearcher, - at: InputAt, - ) -> Option { - prefixes.find(&self[at.pos()..]).map(|(s, _)| self.at(at.pos() + s)) - } - - fn len(&self) -> usize { - self.0.len() - } - - fn as_bytes(&self) -> &[u8] { - self.0 - } -} - -/// An input reader over bytes. -#[derive(Clone, Copy, Debug)] -pub struct ByteInput<'t> { - text: &'t [u8], - only_utf8: bool, -} - -impl<'t> ByteInput<'t> { - /// Return a new byte-based input reader for the given string. - pub fn new(text: &'t [u8], only_utf8: bool) -> ByteInput<'t> { - ByteInput { text, only_utf8 } - } -} - -impl<'t> ops::Deref for ByteInput<'t> { - type Target = [u8]; - - fn deref(&self) -> &[u8] { - self.text - } -} - -impl<'t> Input for ByteInput<'t> { - fn at(&self, i: usize) -> InputAt { - if i >= self.len() { - InputAt { pos: self.len(), c: None.into(), byte: None, len: 0 } - } else { - InputAt { - pos: i, - c: None.into(), - byte: self.get(i).cloned(), - len: 1, - } - } - } - - fn next_char(&self, at: InputAt) -> Char { - decode_utf8(&self[at.pos()..]).map(|(c, _)| c).into() - } - - fn previous_char(&self, at: InputAt) -> Char { - decode_last_utf8(&self[..at.pos()]).map(|(c, _)| c).into() - } - - fn is_empty_match(&self, at: InputAt, empty: &InstEmptyLook) -> bool { - use crate::prog::EmptyLook::*; - match empty.look { - StartLine => { - let c = self.previous_char(at); - at.pos() == 0 || c == '\n' - } - EndLine => { - let c = self.next_char(at); - at.pos() == self.len() || c == '\n' - } - StartText => at.pos() == 0, - EndText => at.pos() == self.len(), - WordBoundary => { - let (c1, c2) = (self.previous_char(at), self.next_char(at)); - c1.is_word_char() != c2.is_word_char() - } - NotWordBoundary => { - let (c1, c2) = (self.previous_char(at), self.next_char(at)); - c1.is_word_char() == c2.is_word_char() - } - WordBoundaryAscii => { - let (c1, c2) = (self.previous_char(at), self.next_char(at)); - if self.only_utf8 { - // If we must match UTF-8, then we can't match word - // boundaries at invalid UTF-8. - if c1.is_none() && !at.is_start() { - return false; - } - if c2.is_none() && !at.is_end() { - return false; - } - } - c1.is_word_byte() != c2.is_word_byte() - } - NotWordBoundaryAscii => { - let (c1, c2) = (self.previous_char(at), self.next_char(at)); - if self.only_utf8 { - // If we must match UTF-8, then we can't match word - // boundaries at invalid UTF-8. - if c1.is_none() && !at.is_start() { - return false; - } - if c2.is_none() && !at.is_end() { - return false; - } - } - c1.is_word_byte() == c2.is_word_byte() - } - } - } - - fn prefix_at( - &self, - prefixes: &LiteralSearcher, - at: InputAt, - ) -> Option { - prefixes.find(&self[at.pos()..]).map(|(s, _)| self.at(at.pos() + s)) - } - - fn len(&self) -> usize { - self.text.len() - } - - fn as_bytes(&self) -> &[u8] { - self.text - } -} - -/// An inline representation of `Option`. -/// -/// This eliminates the need to do case analysis on `Option` to determine -/// ordinality with other characters. -/// -/// (The `Option` is not related to encoding. Instead, it is used in the -/// matching engines to represent the beginning and ending boundaries of the -/// search text.) -#[derive(Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct Char(u32); - -impl fmt::Debug for Char { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match char::from_u32(self.0) { - None => write!(f, "Empty"), - Some(c) => write!(f, "{:?}", c), - } - } -} - -impl Char { - /// Returns true iff the character is absent. - #[inline] - pub fn is_none(self) -> bool { - self.0 == u32::MAX - } - - /// Returns the length of the character's UTF-8 encoding. - /// - /// If the character is absent, then `1` is returned. - #[inline] - pub fn len_utf8(self) -> usize { - char::from_u32(self.0).map_or(1, |c| c.len_utf8()) - } - - /// Returns true iff the character is a word character. - /// - /// If the character is absent, then false is returned. - pub fn is_word_char(self) -> bool { - // is_word_character can panic if the Unicode data for \w isn't - // available. However, our compiler ensures that if a Unicode word - // boundary is used, then the data must also be available. If it isn't, - // then the compiler returns an error. - char::from_u32(self.0).map_or(false, regex_syntax::is_word_character) - } - - /// Returns true iff the byte is a word byte. - /// - /// If the byte is absent, then false is returned. - pub fn is_word_byte(self) -> bool { - match char::from_u32(self.0) { - Some(c) if c <= '\u{7F}' => regex_syntax::is_word_byte(c as u8), - None | Some(_) => false, - } - } -} - -impl From for Char { - fn from(c: char) -> Char { - Char(c as u32) - } -} - -impl From> for Char { - fn from(c: Option) -> Char { - c.map_or(Char(u32::MAX), |c| c.into()) - } -} - -impl PartialEq for Char { - #[inline] - fn eq(&self, other: &char) -> bool { - self.0 == *other as u32 - } -} - -impl PartialEq for char { - #[inline] - fn eq(&self, other: &Char) -> bool { - *self as u32 == other.0 - } -} - -impl PartialOrd for Char { - #[inline] - fn partial_cmp(&self, other: &char) -> Option { - self.0.partial_cmp(&(*other as u32)) - } -} - -impl PartialOrd for char { - #[inline] - fn partial_cmp(&self, other: &Char) -> Option { - (*self as u32).partial_cmp(&other.0) - } -} diff --git a/src/lib.rs b/src/lib.rs index efe871ee78..7c305eda44 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -761,38 +761,11 @@ pub mod bytes { pub use crate::re_set::bytes::*; } -mod backtrack; -mod compile; -#[cfg(feature = "perf-dfa")] -mod dfa; mod error; -mod exec; -mod expand; mod find_byte; -mod input; -mod literal; #[cfg(feature = "pattern")] mod pattern; -mod pikevm; -mod pool; -mod prog; mod re_builder; mod re_bytes; mod re_set; -mod re_trait; mod re_unicode; -mod sparse; -mod utf8; - -/// The `internal` module exists to support suspicious activity, such as -/// testing different matching engines and supporting the `regex-debug` CLI -/// utility. -#[doc(hidden)] -#[cfg(feature = "std")] -pub mod internal { - pub use crate::compile::Compiler; - pub use crate::exec::{Exec, ExecBuilder}; - pub use crate::input::{Char, CharInput, Input, InputAt}; - pub use crate::literal::LiteralSearcher; - pub use crate::prog::{EmptyLook, Inst, InstRanges, Program}; -} diff --git a/src/literal/imp.rs b/src/literal/imp.rs deleted file mode 100644 index 75fa6e37b2..0000000000 --- a/src/literal/imp.rs +++ /dev/null @@ -1,413 +0,0 @@ -use std::mem; - -use aho_corasick::{self, packed, AhoCorasick}; -use memchr::{memchr, memchr2, memchr3, memmem}; -use regex_syntax::hir::literal::{Literal, Seq}; - -/// A prefix extracted from a compiled regular expression. -/// -/// A regex prefix is a set of literal strings that *must* be matched at the -/// beginning of a regex in order for the entire regex to match. Similarly -/// for a regex suffix. -#[derive(Clone, Debug)] -pub struct LiteralSearcher { - complete: bool, - lcp: Memmem, - lcs: Memmem, - matcher: Matcher, -} - -#[derive(Clone, Debug)] -enum Matcher { - /// No literals. (Never advances through the input.) - Empty, - /// A set of four or more single byte literals. - Bytes(SingleByteSet), - /// A single substring, using vector accelerated routines when available. - Memmem(Memmem), - /// An Aho-Corasick automaton. - AC { ac: AhoCorasick, lits: Vec }, - /// A packed multiple substring searcher, using SIMD. - /// - /// Note that Aho-Corasick will actually use this packed searcher - /// internally automatically, however, there is some overhead associated - /// with going through the Aho-Corasick machinery. So using the packed - /// searcher directly results in some gains. - Packed { s: packed::Searcher, lits: Vec }, -} - -impl LiteralSearcher { - /// Returns a matcher that never matches and never advances the input. - pub fn empty() -> Self { - Self::new(Seq::infinite(), Matcher::Empty) - } - - /// Returns a matcher for literal prefixes from the given set. - pub fn prefixes(lits: Seq) -> Self { - let matcher = Matcher::prefixes(&lits); - Self::new(lits, matcher) - } - - /// Returns a matcher for literal suffixes from the given set. - pub fn suffixes(lits: Seq) -> Self { - let matcher = Matcher::suffixes(&lits); - Self::new(lits, matcher) - } - - fn new(lits: Seq, matcher: Matcher) -> Self { - LiteralSearcher { - complete: lits.is_exact(), - lcp: Memmem::new(lits.longest_common_prefix().unwrap_or(b"")), - lcs: Memmem::new(lits.longest_common_suffix().unwrap_or(b"")), - matcher, - } - } - - /// Returns true if all matches comprise the entire regular expression. - /// - /// This does not necessarily mean that a literal match implies a match - /// of the regular expression. For example, the regular expression `^a` - /// is comprised of a single complete literal `a`, but the regular - /// expression demands that it only match at the beginning of a string. - pub fn complete(&self) -> bool { - self.complete && !self.is_empty() - } - - /// Find the position of a literal in `haystack` if it exists. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn find(&self, haystack: &[u8]) -> Option<(usize, usize)> { - use self::Matcher::*; - match self.matcher { - Empty => Some((0, 0)), - Bytes(ref sset) => sset.find(haystack).map(|i| (i, i + 1)), - Memmem(ref s) => s.find(haystack).map(|i| (i, i + s.len())), - AC { ref ac, .. } => { - ac.find(haystack).map(|m| (m.start(), m.end())) - } - Packed { ref s, .. } => { - s.find(haystack).map(|m| (m.start(), m.end())) - } - } - } - - /// Like find, except matches must start at index `0`. - pub fn find_start(&self, haystack: &[u8]) -> Option<(usize, usize)> { - for lit in self.iter() { - if lit.len() > haystack.len() { - continue; - } - if lit == &haystack[0..lit.len()] { - return Some((0, lit.len())); - } - } - None - } - - /// Like find, except matches must end at index `haystack.len()`. - pub fn find_end(&self, haystack: &[u8]) -> Option<(usize, usize)> { - for lit in self.iter() { - if lit.len() > haystack.len() { - continue; - } - if lit == &haystack[haystack.len() - lit.len()..] { - return Some((haystack.len() - lit.len(), haystack.len())); - } - } - None - } - - /// Returns an iterator over all literals to be matched. - pub fn iter(&self) -> LiteralIter<'_> { - match self.matcher { - Matcher::Empty => LiteralIter::Empty, - Matcher::Bytes(ref sset) => LiteralIter::Bytes(&sset.dense), - Matcher::Memmem(ref s) => LiteralIter::Single(&s.finder.needle()), - Matcher::AC { ref lits, .. } => LiteralIter::AC(lits), - Matcher::Packed { ref lits, .. } => LiteralIter::Packed(lits), - } - } - - /// Returns a matcher for the longest common prefix of this matcher. - pub fn lcp(&self) -> &Memmem { - &self.lcp - } - - /// Returns a matcher for the longest common suffix of this matcher. - pub fn lcs(&self) -> &Memmem { - &self.lcs - } - - /// Returns true iff this prefix is empty. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the number of prefixes in this machine. - pub fn len(&self) -> usize { - use self::Matcher::*; - match self.matcher { - Empty => 0, - Bytes(ref sset) => sset.dense.len(), - Memmem(_) => 1, - AC { ref ac, .. } => ac.patterns_len(), - Packed { ref lits, .. } => lits.len(), - } - } - - /// Return the approximate heap usage of literals in bytes. - pub fn approximate_size(&self) -> usize { - use self::Matcher::*; - match self.matcher { - Empty => 0, - Bytes(ref sset) => sset.approximate_size(), - Memmem(ref single) => single.approximate_size(), - AC { ref ac, .. } => ac.memory_usage(), - Packed { ref s, .. } => s.memory_usage(), - } - } -} - -impl Matcher { - fn prefixes(lits: &Seq) -> Self { - let sset = SingleByteSet::prefixes(lits); - Matcher::new(lits, sset) - } - - fn suffixes(lits: &Seq) -> Self { - let sset = SingleByteSet::suffixes(lits); - Matcher::new(lits, sset) - } - - fn new(lits: &Seq, sset: SingleByteSet) -> Self { - if lits.is_empty() || lits.min_literal_len() == Some(0) { - return Matcher::Empty; - } - let lits = match lits.literals() { - None => return Matcher::Empty, - Some(members) => members, - }; - if sset.dense.len() >= 26 { - // Avoid trying to match a large number of single bytes. - // This is *very* sensitive to a frequency analysis comparison - // between the bytes in sset and the composition of the haystack. - // No matter the size of sset, if its members all are rare in the - // haystack, then it'd be worth using it. How to tune this... IDK. - // ---AG - return Matcher::Empty; - } - if sset.complete { - return Matcher::Bytes(sset); - } - if lits.len() == 1 { - return Matcher::Memmem(Memmem::new(lits[0].as_bytes())); - } - - let pats: Vec<&[u8]> = lits.iter().map(|lit| lit.as_bytes()).collect(); - let is_aho_corasick_fast = sset.dense.len() <= 1 && sset.all_ascii; - if lits.len() <= 100 && !is_aho_corasick_fast { - let mut builder = packed::Config::new() - .match_kind(packed::MatchKind::LeftmostFirst) - .builder(); - if let Some(s) = builder.extend(&pats).build() { - return Matcher::Packed { s, lits: lits.to_owned() }; - } - } - let ac = AhoCorasick::builder() - .match_kind(aho_corasick::MatchKind::LeftmostFirst) - .kind(Some(aho_corasick::AhoCorasickKind::DFA)) - .build(&pats) - .unwrap(); - Matcher::AC { ac, lits: lits.to_owned() } - } -} - -#[derive(Debug)] -pub enum LiteralIter<'a> { - Empty, - Bytes(&'a [u8]), - Single(&'a [u8]), - AC(&'a [Literal]), - Packed(&'a [Literal]), -} - -impl<'a> Iterator for LiteralIter<'a> { - type Item = &'a [u8]; - - fn next(&mut self) -> Option { - match *self { - LiteralIter::Empty => None, - LiteralIter::Bytes(ref mut many) => { - if many.is_empty() { - None - } else { - let next = &many[0..1]; - *many = &many[1..]; - Some(next) - } - } - LiteralIter::Single(ref mut one) => { - if one.is_empty() { - None - } else { - let next = &one[..]; - *one = &[]; - Some(next) - } - } - LiteralIter::AC(ref mut lits) => { - if lits.is_empty() { - None - } else { - let next = &lits[0]; - *lits = &lits[1..]; - Some(next.as_bytes()) - } - } - LiteralIter::Packed(ref mut lits) => { - if lits.is_empty() { - None - } else { - let next = &lits[0]; - *lits = &lits[1..]; - Some(next.as_bytes()) - } - } - } - } -} - -#[derive(Clone, Debug)] -struct SingleByteSet { - sparse: Vec, - dense: Vec, - complete: bool, - all_ascii: bool, -} - -impl SingleByteSet { - fn new() -> SingleByteSet { - SingleByteSet { - sparse: vec![false; 256], - dense: vec![], - complete: true, - all_ascii: true, - } - } - - fn prefixes(lits: &Seq) -> SingleByteSet { - let mut sset = SingleByteSet::new(); - let lits = match lits.literals() { - None => return sset, - Some(lits) => lits, - }; - for lit in lits.iter() { - sset.complete = sset.complete && lit.len() == 1; - if let Some(&b) = lit.as_bytes().get(0) { - if !sset.sparse[b as usize] { - if b > 0x7F { - sset.all_ascii = false; - } - sset.dense.push(b); - sset.sparse[b as usize] = true; - } - } - } - sset - } - - fn suffixes(lits: &Seq) -> SingleByteSet { - let mut sset = SingleByteSet::new(); - let lits = match lits.literals() { - None => return sset, - Some(lits) => lits, - }; - for lit in lits.iter() { - sset.complete = sset.complete && lit.len() == 1; - if let Some(&b) = lit.as_bytes().last() { - if !sset.sparse[b as usize] { - if b > 0x7F { - sset.all_ascii = false; - } - sset.dense.push(b); - sset.sparse[b as usize] = true; - } - } - } - sset - } - - /// Faster find that special cases certain sizes to use memchr. - #[cfg_attr(feature = "perf-inline", inline(always))] - fn find(&self, text: &[u8]) -> Option { - match self.dense.len() { - 0 => None, - 1 => memchr(self.dense[0], text), - 2 => memchr2(self.dense[0], self.dense[1], text), - 3 => memchr3(self.dense[0], self.dense[1], self.dense[2], text), - _ => self._find(text), - } - } - - /// Generic find that works on any sized set. - fn _find(&self, haystack: &[u8]) -> Option { - for (i, &b) in haystack.iter().enumerate() { - if self.sparse[b as usize] { - return Some(i); - } - } - None - } - - fn approximate_size(&self) -> usize { - (self.dense.len() * mem::size_of::()) - + (self.sparse.len() * mem::size_of::()) - } -} - -/// A simple wrapper around the memchr crate's memmem implementation. -/// -/// The API this exposes mirrors the API of previous substring searchers that -/// this supplanted. -#[derive(Clone, Debug)] -pub struct Memmem { - finder: memmem::Finder<'static>, - char_len: usize, -} - -impl Memmem { - fn new(pat: &[u8]) -> Memmem { - Memmem { - finder: memmem::Finder::new(pat).into_owned(), - char_len: char_len_lossy(pat), - } - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn find(&self, haystack: &[u8]) -> Option { - self.finder.find(haystack) - } - - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn is_suffix(&self, text: &[u8]) -> bool { - if text.len() < self.len() { - return false; - } - &text[text.len() - self.len()..] == self.finder.needle() - } - - pub fn len(&self) -> usize { - self.finder.needle().len() - } - - pub fn char_len(&self) -> usize { - self.char_len - } - - fn approximate_size(&self) -> usize { - self.finder.needle().len() * mem::size_of::() - } -} - -fn char_len_lossy(bytes: &[u8]) -> usize { - String::from_utf8_lossy(bytes).chars().count() -} diff --git a/src/literal/mod.rs b/src/literal/mod.rs deleted file mode 100644 index b9fb77aed9..0000000000 --- a/src/literal/mod.rs +++ /dev/null @@ -1,55 +0,0 @@ -pub use self::imp::*; - -#[cfg(feature = "perf-literal")] -mod imp; - -#[allow(missing_docs)] -#[cfg(not(feature = "perf-literal"))] -mod imp { - use regex_syntax::hir::literal::Seq; - - #[derive(Clone, Debug)] - pub struct LiteralSearcher(()); - - impl LiteralSearcher { - pub fn empty() -> Self { - LiteralSearcher(()) - } - - pub fn prefixes(_: Seq) -> Self { - LiteralSearcher(()) - } - - pub fn suffixes(_: Seq) -> Self { - LiteralSearcher(()) - } - - pub fn complete(&self) -> bool { - false - } - - pub fn find(&self, _: &[u8]) -> Option<(usize, usize)> { - unreachable!() - } - - pub fn find_start(&self, _: &[u8]) -> Option<(usize, usize)> { - unreachable!() - } - - pub fn find_end(&self, _: &[u8]) -> Option<(usize, usize)> { - unreachable!() - } - - pub fn is_empty(&self) -> bool { - true - } - - pub fn len(&self) -> usize { - 0 - } - - pub fn approximate_size(&self) -> usize { - 0 - } - } -} diff --git a/src/pikevm.rs b/src/pikevm.rs deleted file mode 100644 index 8c9eac2d39..0000000000 --- a/src/pikevm.rs +++ /dev/null @@ -1,360 +0,0 @@ -// This module implements the Pike VM. That is, it guarantees linear time -// search of a regex on any text with memory use proportional to the size of -// the regex. -// -// It is equal in power to the backtracking engine in this crate, except the -// backtracking engine is typically faster on small regexes/texts at the -// expense of a bigger memory footprint. -// -// It can do more than the DFA can (specifically, record capture locations -// and execute Unicode word boundary assertions), but at a slower speed. -// Specifically, the Pike VM executes a DFA implicitly by repeatedly expanding -// epsilon transitions. That is, the Pike VM engine can be in multiple states -// at once where as the DFA is only ever in one state at a time. -// -// Therefore, the Pike VM is generally treated as the fallback when the other -// matching engines either aren't feasible to run or are insufficient. - -use std::mem; - -use crate::exec::ProgramCache; -use crate::input::{Input, InputAt}; -use crate::prog::{InstPtr, Program}; -use crate::re_trait::Slot; -use crate::sparse::SparseSet; - -/// An NFA simulation matching engine. -#[derive(Debug)] -pub struct Fsm<'r, I> { - /// The sequence of opcodes (among other things) that is actually executed. - /// - /// The program may be byte oriented or Unicode codepoint oriented. - prog: &'r Program, - /// An explicit stack used for following epsilon transitions. (This is - /// borrowed from the cache.) - stack: &'r mut Vec, - /// The input to search. - input: I, -} - -/// A cached allocation that can be reused on each execution. -#[derive(Clone, Debug)] -pub struct Cache { - /// A pair of ordered sets for tracking NFA states. - clist: Threads, - nlist: Threads, - /// An explicit stack used for following epsilon transitions. - stack: Vec, -} - -/// An ordered set of NFA states and their captures. -#[derive(Clone, Debug)] -struct Threads { - /// An ordered set of opcodes (each opcode is an NFA state). - set: SparseSet, - /// Captures for every NFA state. - /// - /// It is stored in row-major order, where the columns are the capture - /// slots and the rows are the states. - caps: Vec, - /// The number of capture slots stored per thread. (Every capture has - /// two slots.) - slots_per_thread: usize, -} - -/// A representation of an explicit stack frame when following epsilon -/// transitions. This is used to avoid recursion. -#[derive(Clone, Debug)] -enum FollowEpsilon { - /// Follow transitions at the given instruction pointer. - IP(InstPtr), - /// Restore the capture slot with the given position in the input. - Capture { slot: usize, pos: Slot }, -} - -impl Cache { - /// Create a new allocation used by the NFA machine to record execution - /// and captures. - pub fn new(_prog: &Program) -> Self { - Cache { clist: Threads::new(), nlist: Threads::new(), stack: vec![] } - } -} - -impl<'r, I: Input> Fsm<'r, I> { - /// Execute the NFA matching engine. - /// - /// If there's a match, `exec` returns `true` and populates the given - /// captures accordingly. - pub fn exec( - prog: &'r Program, - cache: &ProgramCache, - matches: &mut [bool], - slots: &mut [Slot], - quit_after_match: bool, - input: I, - start: usize, - end: usize, - ) -> bool { - let mut cache = cache.borrow_mut(); - let cache = &mut cache.pikevm; - cache.clist.resize(prog.len(), prog.captures.len()); - cache.nlist.resize(prog.len(), prog.captures.len()); - let at = input.at(start); - Fsm { prog, stack: &mut cache.stack, input }.exec_( - &mut cache.clist, - &mut cache.nlist, - matches, - slots, - quit_after_match, - at, - end, - ) - } - - fn exec_( - &mut self, - mut clist: &mut Threads, - mut nlist: &mut Threads, - matches: &mut [bool], - slots: &mut [Slot], - quit_after_match: bool, - mut at: InputAt, - end: usize, - ) -> bool { - let mut matched = false; - let mut all_matched = false; - clist.set.clear(); - nlist.set.clear(); - 'LOOP: loop { - if clist.set.is_empty() { - // Three ways to bail out when our current set of threads is - // empty. - // - // 1. We have a match---so we're done exploring any possible - // alternatives. Time to quit. (We can't do this if we're - // looking for matches for multiple regexes, unless we know - // they all matched.) - // - // 2. If the expression starts with a '^' we can terminate as - // soon as the last thread dies. - if (matched && matches.len() <= 1) - || all_matched - || (!at.is_start() && self.prog.is_anchored_start) - { - break; - } - - // 3. If there's a literal prefix for the program, try to - // jump ahead quickly. If it can't be found, then we can - // bail out early. - if !self.prog.prefixes.is_empty() { - at = match self.input.prefix_at(&self.prog.prefixes, at) { - None => break, - Some(at) => at, - }; - } - } - - // This simulates a preceding '.*?' for every regex by adding - // a state starting at the current position in the input for the - // beginning of the program only if we don't already have a match. - if clist.set.is_empty() - || (!self.prog.is_anchored_start && !all_matched) - { - self.add(&mut clist, slots, 0, at); - } - // The previous call to "add" actually inspects the position just - // before the current character. For stepping through the machine, - // we can to look at the current character, so we advance the - // input. - let at_next = self.input.at(at.next_pos()); - for i in 0..clist.set.len() { - let ip = clist.set[i]; - if self.step( - &mut nlist, - matches, - slots, - clist.caps(ip), - ip, - at, - at_next, - ) { - matched = true; - all_matched = all_matched || matches.iter().all(|&b| b); - if quit_after_match { - // If we only care if a match occurs (not its - // position), then we can quit right now. - break 'LOOP; - } - if self.prog.matches.len() == 1 { - // We don't need to check the rest of the threads - // in this set because we've matched something - // ("leftmost-first"). However, we still need to check - // threads in the next set to support things like - // greedy matching. - // - // This is only true on normal regexes. For regex sets, - // we need to mush on to observe other matches. - break; - } - } - } - if at.pos() >= end { - break; - } - at = at_next; - mem::swap(clist, nlist); - nlist.set.clear(); - } - matched - } - - /// Step through the input, one token (byte or codepoint) at a time. - /// - /// nlist is the set of states that will be processed on the next token - /// in the input. - /// - /// caps is the set of captures passed by the caller of the NFA. They are - /// written to only when a match state is visited. - /// - /// thread_caps is the set of captures set for the current NFA state, ip. - /// - /// at and at_next are the current and next positions in the input. at or - /// at_next may be EOF. - fn step( - &mut self, - nlist: &mut Threads, - matches: &mut [bool], - slots: &mut [Slot], - thread_caps: &mut [Option], - ip: usize, - at: InputAt, - at_next: InputAt, - ) -> bool { - use crate::prog::Inst::*; - match self.prog[ip] { - Match(match_slot) => { - if match_slot < matches.len() { - matches[match_slot] = true; - } - for (slot, val) in slots.iter_mut().zip(thread_caps.iter()) { - *slot = *val; - } - true - } - Char(ref inst) => { - if inst.c == at.char() { - self.add(nlist, thread_caps, inst.goto, at_next); - } - false - } - Ranges(ref inst) => { - if inst.matches(at.char()) { - self.add(nlist, thread_caps, inst.goto, at_next); - } - false - } - Bytes(ref inst) => { - if let Some(b) = at.byte() { - if inst.matches(b) { - self.add(nlist, thread_caps, inst.goto, at_next); - } - } - false - } - EmptyLook(_) | Save(_) | Split(_) => false, - } - } - - /// Follows epsilon transitions and adds them for processing to nlist, - /// starting at and including ip. - fn add( - &mut self, - nlist: &mut Threads, - thread_caps: &mut [Option], - ip: usize, - at: InputAt, - ) { - self.stack.push(FollowEpsilon::IP(ip)); - while let Some(frame) = self.stack.pop() { - match frame { - FollowEpsilon::IP(ip) => { - self.add_step(nlist, thread_caps, ip, at); - } - FollowEpsilon::Capture { slot, pos } => { - thread_caps[slot] = pos; - } - } - } - } - - /// A helper function for add that avoids excessive pushing to the stack. - fn add_step( - &mut self, - nlist: &mut Threads, - thread_caps: &mut [Option], - mut ip: usize, - at: InputAt, - ) { - // Instead of pushing and popping to the stack, we mutate ip as we - // traverse the set of states. We only push to the stack when we - // absolutely need recursion (restoring captures or following a - // branch). - use crate::prog::Inst::*; - loop { - // Don't visit states we've already added. - if nlist.set.contains(ip) { - return; - } - nlist.set.insert(ip); - match self.prog[ip] { - EmptyLook(ref inst) => { - if self.input.is_empty_match(at, inst) { - ip = inst.goto; - } - } - Save(ref inst) => { - if inst.slot < thread_caps.len() { - self.stack.push(FollowEpsilon::Capture { - slot: inst.slot, - pos: thread_caps[inst.slot], - }); - thread_caps[inst.slot] = Some(at.pos()); - } - ip = inst.goto; - } - Split(ref inst) => { - self.stack.push(FollowEpsilon::IP(inst.goto2)); - ip = inst.goto1; - } - Match(_) | Char(_) | Ranges(_) | Bytes(_) => { - let t = &mut nlist.caps(ip); - for (slot, val) in t.iter_mut().zip(thread_caps.iter()) { - *slot = *val; - } - return; - } - } - } - } -} - -impl Threads { - fn new() -> Self { - Threads { set: SparseSet::new(0), caps: vec![], slots_per_thread: 0 } - } - - fn resize(&mut self, num_insts: usize, ncaps: usize) { - if num_insts == self.set.capacity() { - return; - } - self.slots_per_thread = ncaps * 2; - self.set = SparseSet::new(num_insts); - self.caps = vec![None; self.slots_per_thread * num_insts]; - } - - fn caps(&mut self, pc: usize) -> &mut [Option] { - let i = pc * self.slots_per_thread; - &mut self.caps[i..i + self.slots_per_thread] - } -} diff --git a/src/pool.rs b/src/pool.rs deleted file mode 100644 index 6a6f15b194..0000000000 --- a/src/pool.rs +++ /dev/null @@ -1,333 +0,0 @@ -// This module provides a relatively simple thread-safe pool of reusable -// objects. For the most part, it's implemented by a stack represented by a -// Mutex>. It has one small trick: because unlocking a mutex is somewhat -// costly, in the case where a pool is accessed by the first thread that tried -// to get a value, we bypass the mutex. Here are some benchmarks showing the -// difference. -// -// 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) -// 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) -// 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) -// 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) -// -// (1) represents our baseline: the master branch at the time of writing when -// using the 'thread_local' crate to implement the pool below. -// -// (2) represents a naive pool implemented completely via Mutex>. There -// is no special trick for bypassing the mutex. -// -// (3) is the same as (2), except it uses Mutex>>. It is twice as -// fast because a Box is much smaller than the T we use with a Pool in this -// crate. So pushing and popping a Box from a Vec is quite a bit faster -// than for T. -// -// (4) is the same as (3), but with the trick for bypassing the mutex in the -// case of the first-to-get thread. -// -// Why move off of thread_local? Even though (4) is a hair faster than (1) -// above, this was not the main goal. The main goal was to move off of -// thread_local and find a way to *simply* re-capture some of its speed for -// regex's specific case. So again, why move off of it? The *primary* reason is -// because of memory leaks. See https://github.com/rust-lang/regex/issues/362 -// for example. (Why do I want it to be simple? Well, I suppose what I mean is, -// "use as much safe code as possible to minimize risk and be as sure as I can -// be that it is correct.") -// -// My guess is that the thread_local design is probably not appropriate for -// regex since its memory usage scales to the number of active threads that -// have used a regex, where as the pool below scales to the number of threads -// that simultaneously use a regex. While neither case permits contraction, -// since we own the pool data structure below, we can add contraction if a -// clear use case pops up in the wild. More pressingly though, it seems that -// there are at least some use case patterns where one might have many threads -// sitting around that might have used a regex at one point. While thread_local -// does try to reuse space previously used by a thread that has since stopped, -// its maximal memory usage still scales with the total number of active -// threads. In contrast, the pool below scales with the total number of threads -// *simultaneously* using the pool. The hope is that this uses less memory -// overall. And if it doesn't, we can hopefully tune it somehow. -// -// It seems that these sort of conditions happen frequently -// in FFI inside of other more "managed" languages. This was -// mentioned in the issue linked above, and also mentioned here: -// https://github.com/BurntSushi/rure-go/issues/3. And in particular, users -// confirm that disabling the use of thread_local resolves the leak. -// -// There were other weaker reasons for moving off of thread_local as well. -// Namely, at the time, I was looking to reduce dependencies. And for something -// like regex, maintenance can be simpler when we own the full dependency tree. - -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Mutex; - -/// An atomic counter used to allocate thread IDs. -static COUNTER: AtomicUsize = AtomicUsize::new(1); - -thread_local!( - /// A thread local used to assign an ID to a thread. - static THREAD_ID: usize = { - let next = COUNTER.fetch_add(1, Ordering::Relaxed); - // SAFETY: We cannot permit the reuse of thread IDs since reusing a - // thread ID might result in more than one thread "owning" a pool, - // and thus, permit accessing a mutable value from multiple threads - // simultaneously without synchronization. The intent of this panic is - // to be a sanity check. It is not expected that the thread ID space - // will actually be exhausted in practice. - // - // This checks that the counter never wraps around, since atomic - // addition wraps around on overflow. - if next == 0 { - panic!("regex: thread ID allocation space exhausted"); - } - next - }; -); - -/// The type of the function used to create values in a pool when the pool is -/// empty and the caller requests one. -type CreateFn = - Box T + Send + Sync + UnwindSafe + RefUnwindSafe + 'static>; - -/// A simple thread safe pool for reusing values. -/// -/// Getting a value out comes with a guard. When that guard is dropped, the -/// value is automatically put back in the pool. -/// -/// A Pool impls Sync when T is Send (even if it's not Sync). This means -/// that T can use interior mutability. This is possible because a pool is -/// guaranteed to provide a value to exactly one thread at any time. -/// -/// Currently, a pool never contracts in size. Its size is proportional to the -/// number of simultaneous uses. -pub struct Pool { - /// A stack of T values to hand out. These are used when a Pool is - /// accessed by a thread that didn't create it. - stack: Mutex>>, - /// A function to create more T values when stack is empty and a caller - /// has requested a T. - create: CreateFn, - /// The ID of the thread that owns this pool. The owner is the thread - /// that makes the first call to 'get'. When the owner calls 'get', it - /// gets 'owner_val' directly instead of returning a T from 'stack'. - /// See comments elsewhere for details, but this is intended to be an - /// optimization for the common case that makes getting a T faster. - /// - /// It is initialized to a value of zero (an impossible thread ID) as a - /// sentinel to indicate that it is unowned. - owner: AtomicUsize, - /// A value to return when the caller is in the same thread that created - /// the Pool. - owner_val: T, -} - -// SAFETY: Since we want to use a Pool from multiple threads simultaneously -// behind an Arc, we need for it to be Sync. In cases where T is sync, Pool -// would be Sync. However, since we use a Pool to store mutable scratch space, -// we wind up using a T that has interior mutability and is thus itself not -// Sync. So what we *really* want is for our Pool to by Sync even when T is -// not Sync (but is at least Send). -// -// The only non-sync aspect of a Pool is its 'owner_val' field, which is used -// to implement faster access to a pool value in the common case of a pool -// being accessed in the same thread in which it was created. The 'stack' field -// is also shared, but a Mutex where T: Send is already Sync. So we only -// need to worry about 'owner_val'. -// -// The key is to guarantee that 'owner_val' can only ever be accessed from one -// thread. In our implementation below, we guarantee this by only returning the -// 'owner_val' when the ID of the current thread matches the ID of the thread -// that created the Pool. Since this can only ever be one thread, it follows -// that only one thread can access 'owner_val' at any point in time. Thus, it -// is safe to declare that Pool is Sync when T is Send. -// -// NOTE: It would also be possible to make the owning thread be the *first* -// thread that tries to get a value out of a Pool. However, the current -// implementation is a little simpler and it's not clear if making the first -// thread (rather than the creating thread) is meaningfully better. -// -// If there is a way to achieve our performance goals using safe code, then -// I would very much welcome a patch. As it stands, the implementation below -// tries to balance safety with performance. The case where a Regex is used -// from multiple threads simultaneously will suffer a bit since getting a cache -// will require unlocking a mutex. -unsafe impl Sync for Pool {} - -impl ::std::fmt::Debug for Pool { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - f.debug_struct("Pool") - .field("stack", &self.stack) - .field("owner", &self.owner) - .field("owner_val", &self.owner_val) - .finish() - } -} - -/// A guard that is returned when a caller requests a value from the pool. -/// -/// The purpose of the guard is to use RAII to automatically put the value back -/// in the pool once it's dropped. -#[derive(Debug)] -pub struct PoolGuard<'a, T: Send> { - /// The pool that this guard is attached to. - pool: &'a Pool, - /// This is None when the guard represents the special "owned" value. In - /// which case, the value is retrieved from 'pool.owner_val'. - value: Option>, -} - -impl Pool { - /// Create a new pool. The given closure is used to create values in the - /// pool when necessary. - pub fn new(create: CreateFn) -> Pool { - let owner = AtomicUsize::new(0); - let owner_val = create(); - Pool { stack: Mutex::new(vec![]), create, owner, owner_val } - } - - /// Get a value from the pool. The caller is guaranteed to have exclusive - /// access to the given value. - /// - /// Note that there is no guarantee provided about which value in the - /// pool is returned. That is, calling get, dropping the guard (causing - /// the value to go back into the pool) and then calling get again is NOT - /// guaranteed to return the same value received in the first get call. - #[cfg_attr(feature = "perf-inline", inline(always))] - pub fn get(&self) -> PoolGuard<'_, T> { - // Our fast path checks if the caller is the thread that "owns" this - // pool. Or stated differently, whether it is the first thread that - // tried to extract a value from the pool. If it is, then we can return - // a T to the caller without going through a mutex. - // - // SAFETY: We must guarantee that only one thread gets access to this - // value. Since a thread is uniquely identified by the THREAD_ID thread - // local, it follows that is the caller's thread ID is equal to the - // owner, then only one thread may receive this value. - let caller = THREAD_ID.with(|id| *id); - let owner = self.owner.load(Ordering::Relaxed); - if caller == owner { - return self.guard_owned(); - } - self.get_slow(caller, owner) - } - - /// This is the "slow" version that goes through a mutex to pop an - /// allocated value off a stack to return to the caller. (Or, if the stack - /// is empty, a new value is created.) - /// - /// If the pool has no owner, then this will set the owner. - #[cold] - fn get_slow(&self, caller: usize, owner: usize) -> PoolGuard<'_, T> { - use std::sync::atomic::Ordering::Relaxed; - - if owner == 0 { - // The sentinel 0 value means this pool is not yet owned. We - // try to atomically set the owner. If we do, then this thread - // becomes the owner and we can return a guard that represents - // the special T for the owner. - let res = self.owner.compare_exchange(0, caller, Relaxed, Relaxed); - if res.is_ok() { - return self.guard_owned(); - } - } - let mut stack = self.stack.lock().unwrap(); - let value = match stack.pop() { - None => Box::new((self.create)()), - Some(value) => value, - }; - self.guard_stack(value) - } - - /// Puts a value back into the pool. Callers don't need to call this. Once - /// the guard that's returned by 'get' is dropped, it is put back into the - /// pool automatically. - fn put(&self, value: Box) { - let mut stack = self.stack.lock().unwrap(); - stack.push(value); - } - - /// Create a guard that represents the special owned T. - fn guard_owned(&self) -> PoolGuard<'_, T> { - PoolGuard { pool: self, value: None } - } - - /// Create a guard that contains a value from the pool's stack. - fn guard_stack(&self, value: Box) -> PoolGuard<'_, T> { - PoolGuard { pool: self, value: Some(value) } - } -} - -impl<'a, T: Send> PoolGuard<'a, T> { - /// Return the underlying value. - pub fn value(&self) -> &T { - match self.value { - None => &self.pool.owner_val, - Some(ref v) => &**v, - } - } -} - -impl<'a, T: Send> Drop for PoolGuard<'a, T> { - #[cfg_attr(feature = "perf-inline", inline(always))] - fn drop(&mut self) { - if let Some(value) = self.value.take() { - self.pool.put(value); - } - } -} - -#[cfg(test)] -mod tests { - use std::panic::{RefUnwindSafe, UnwindSafe}; - - use super::*; - - #[test] - fn oibits() { - use crate::exec::ProgramCache; - - fn has_oibits() {} - has_oibits::>(); - } - - // Tests that Pool implements the "single owner" optimization. That is, the - // thread that first accesses the pool gets its own copy, while all other - // threads get distinct copies. - #[test] - fn thread_owner_optimization() { - use std::cell::RefCell; - use std::sync::Arc; - - let pool: Arc>>> = - Arc::new(Pool::new(Box::new(|| RefCell::new(vec!['a'])))); - pool.get().value().borrow_mut().push('x'); - - let pool1 = pool.clone(); - let t1 = std::thread::spawn(move || { - let guard = pool1.get(); - let v = guard.value(); - v.borrow_mut().push('y'); - }); - - let pool2 = pool.clone(); - let t2 = std::thread::spawn(move || { - let guard = pool2.get(); - let v = guard.value(); - v.borrow_mut().push('z'); - }); - - t1.join().unwrap(); - t2.join().unwrap(); - - // If we didn't implement the single owner optimization, then one of - // the threads above is likely to have mutated the [a, x] vec that - // we stuffed in the pool before spawning the threads. But since - // neither thread was first to access the pool, and because of the - // optimization, we should be guaranteed that neither thread mutates - // the special owned pool value. - // - // (Technically this is an implementation detail and not a contract of - // Pool's API.) - assert_eq!(vec!['a', 'x'], *pool.get().value().borrow()); - } -} diff --git a/src/prog.rs b/src/prog.rs deleted file mode 100644 index 100862cf1b..0000000000 --- a/src/prog.rs +++ /dev/null @@ -1,451 +0,0 @@ -use std::cmp::Ordering; -use std::collections::HashMap; -use std::fmt; -use std::mem; -use std::ops::Deref; -use std::slice; -use std::sync::Arc; - -use crate::input::Char; -use crate::literal::LiteralSearcher; - -/// `InstPtr` represents the index of an instruction in a regex program. -pub type InstPtr = usize; - -/// Program is a sequence of instructions and various facts about thos -/// instructions. -#[derive(Clone)] -pub struct Program { - /// A sequence of instructions that represents an NFA. - pub insts: Vec, - /// Pointers to each Match instruction in the sequence. - /// - /// This is always length 1 unless this program represents a regex set. - pub matches: Vec, - /// The ordered sequence of all capture groups extracted from the AST. - /// Unnamed groups are `None`. - pub captures: Vec>, - /// Pointers to all named capture groups into `captures`. - pub capture_name_idx: Arc>, - /// If the number of capture groups is the same for all possible matches, - /// then this is that number. - pub static_captures_len: Option, - /// A pointer to the start instruction. This can vary depending on how - /// the program was compiled. For example, programs for use with the DFA - /// engine have a `.*?` inserted at the beginning of unanchored regular - /// expressions. The actual starting point of the program is after the - /// `.*?`. - pub start: InstPtr, - /// A set of equivalence classes for discriminating bytes in the compiled - /// program. - pub byte_classes: Vec, - /// When true, this program can only match valid UTF-8. - pub only_utf8: bool, - /// When true, this program uses byte range instructions instead of Unicode - /// range instructions. - pub is_bytes: bool, - /// When true, the program is compiled for DFA matching. For example, this - /// implies `is_bytes` and also inserts a preceding `.*?` for unanchored - /// regexes. - pub is_dfa: bool, - /// When true, the program matches text in reverse (for use only in the - /// DFA). - pub is_reverse: bool, - /// Whether the regex must match from the start of the input. - pub is_anchored_start: bool, - /// Whether the regex must match at the end of the input. - pub is_anchored_end: bool, - /// Whether this program contains a Unicode word boundary instruction. - pub has_unicode_word_boundary: bool, - /// A possibly empty machine for very quickly matching prefix literals. - pub prefixes: LiteralSearcher, - /// A limit on the size of the cache that the DFA is allowed to use while - /// matching. - /// - /// The cache limit specifies approximately how much space we're willing to - /// give to the state cache. Once the state cache exceeds the size, it is - /// wiped and all states must be re-computed. - /// - /// Note that this value does not impact correctness. It can be set to 0 - /// and the DFA will run just fine. (It will only ever store exactly one - /// state in the cache, and will likely run very slowly, but it will work.) - /// - /// Also note that this limit is *per thread of execution*. That is, - /// if the same regex is used to search text across multiple threads - /// simultaneously, then the DFA cache is not shared. Instead, copies are - /// made. - pub dfa_size_limit: usize, -} - -impl Program { - /// Creates an empty instruction sequence. Fields are given default - /// values. - pub fn new() -> Self { - Program { - insts: vec![], - matches: vec![], - captures: vec![], - capture_name_idx: Arc::new(HashMap::new()), - static_captures_len: None, - start: 0, - byte_classes: vec![0; 256], - only_utf8: true, - is_bytes: false, - is_dfa: false, - is_reverse: false, - is_anchored_start: false, - is_anchored_end: false, - has_unicode_word_boundary: false, - prefixes: LiteralSearcher::empty(), - dfa_size_limit: 2 * (1 << 20), - } - } - - /// If pc is an index to a no-op instruction (like Save), then return the - /// next pc that is not a no-op instruction. - pub fn skip(&self, mut pc: usize) -> usize { - loop { - match self[pc] { - Inst::Save(ref i) => pc = i.goto, - _ => return pc, - } - } - } - - /// Return true if and only if an execution engine at instruction `pc` will - /// always lead to a match. - pub fn leads_to_match(&self, pc: usize) -> bool { - if self.matches.len() > 1 { - // If we have a regex set, then we have more than one ending - // state, so leading to one of those states is generally - // meaningless. - return false; - } - match self[self.skip(pc)] { - Inst::Match(_) => true, - _ => false, - } - } - - /// Returns true if the current configuration demands that an implicit - /// `.*?` be prepended to the instruction sequence. - pub fn needs_dotstar(&self) -> bool { - self.is_dfa && !self.is_reverse && !self.is_anchored_start - } - - /// Returns true if this program uses Byte instructions instead of - /// Char/Range instructions. - pub fn uses_bytes(&self) -> bool { - self.is_bytes || self.is_dfa - } - - /// Returns true if this program exclusively matches valid UTF-8 bytes. - /// - /// That is, if an invalid UTF-8 byte is seen, then no match is possible. - pub fn only_utf8(&self) -> bool { - self.only_utf8 - } - - /// Return the approximate heap usage of this instruction sequence in - /// bytes. - pub fn approximate_size(&self) -> usize { - // The only instruction that uses heap space is Ranges (for - // Unicode codepoint programs) to store non-overlapping codepoint - // ranges. To keep this operation constant time, we ignore them. - (self.len() * mem::size_of::()) - + (self.matches.len() * mem::size_of::()) - + (self.captures.len() * mem::size_of::>()) - + (self.capture_name_idx.len() - * (mem::size_of::() + mem::size_of::())) - + (self.byte_classes.len() * mem::size_of::()) - + self.prefixes.approximate_size() - } -} - -impl Deref for Program { - type Target = [Inst]; - - #[cfg_attr(feature = "perf-inline", inline(always))] - fn deref(&self) -> &Self::Target { - &*self.insts - } -} - -impl fmt::Debug for Program { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use self::Inst::*; - - fn with_goto(cur: usize, goto: usize, fmtd: String) -> String { - if goto == cur + 1 { - fmtd - } else { - format!("{} (goto: {})", fmtd, goto) - } - } - - fn visible_byte(b: u8) -> String { - use std::ascii::escape_default; - let escaped = escape_default(b).collect::>(); - String::from_utf8_lossy(&escaped).into_owned() - } - - for (pc, inst) in self.iter().enumerate() { - match *inst { - Match(slot) => write!(f, "{:04} Match({:?})", pc, slot)?, - Save(ref inst) => { - let s = format!("{:04} Save({})", pc, inst.slot); - write!(f, "{}", with_goto(pc, inst.goto, s))?; - } - Split(ref inst) => { - write!( - f, - "{:04} Split({}, {})", - pc, inst.goto1, inst.goto2 - )?; - } - EmptyLook(ref inst) => { - let s = format!("{:?}", inst.look); - write!(f, "{:04} {}", pc, with_goto(pc, inst.goto, s))?; - } - Char(ref inst) => { - let s = format!("{:?}", inst.c); - write!(f, "{:04} {}", pc, with_goto(pc, inst.goto, s))?; - } - Ranges(ref inst) => { - let ranges = inst - .ranges - .iter() - .map(|r| format!("{:?}-{:?}", r.0, r.1)) - .collect::>() - .join(", "); - write!( - f, - "{:04} {}", - pc, - with_goto(pc, inst.goto, ranges) - )?; - } - Bytes(ref inst) => { - let s = format!( - "Bytes({}, {})", - visible_byte(inst.start), - visible_byte(inst.end) - ); - write!(f, "{:04} {}", pc, with_goto(pc, inst.goto, s))?; - } - } - if pc == self.start { - write!(f, " (start)")?; - } - writeln!(f)?; - } - Ok(()) - } -} - -impl<'a> IntoIterator for &'a Program { - type Item = &'a Inst; - type IntoIter = slice::Iter<'a, Inst>; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -/// Inst is an instruction code in a Regex program. -/// -/// Regrettably, a regex program either contains Unicode codepoint -/// instructions (Char and Ranges) or it contains byte instructions (Bytes). -/// A regex program can never contain both. -/// -/// It would be worth investigating splitting this into two distinct types and -/// then figuring out how to make the matching engines polymorphic over those -/// types without sacrificing performance. -/// -/// Other than the benefit of moving invariants into the type system, another -/// benefit is the decreased size. If we remove the `Char` and `Ranges` -/// instructions from the `Inst` enum, then its size shrinks from 32 bytes to -/// 24 bytes. (This is because of the removal of a `Box<[]>` in the `Ranges` -/// variant.) Given that byte based machines are typically much bigger than -/// their Unicode analogues (because they can decode UTF-8 directly), this ends -/// up being a pretty significant savings. -#[derive(Clone, Debug)] -pub enum Inst { - /// Match indicates that the program has reached a match state. - /// - /// The number in the match corresponds to the Nth logical regular - /// expression in this program. This index is always 0 for normal regex - /// programs. Values greater than 0 appear when compiling regex sets, and - /// each match instruction gets its own unique value. The value corresponds - /// to the Nth regex in the set. - Match(usize), - /// Save causes the program to save the current location of the input in - /// the slot indicated by InstSave. - Save(InstSave), - /// Split causes the program to diverge to one of two paths in the - /// program, preferring goto1 in InstSplit. - Split(InstSplit), - /// EmptyLook represents a zero-width assertion in a regex program. A - /// zero-width assertion does not consume any of the input text. - EmptyLook(InstEmptyLook), - /// Char requires the regex program to match the character in InstChar at - /// the current position in the input. - Char(InstChar), - /// Ranges requires the regex program to match the character at the current - /// position in the input with one of the ranges specified in InstRanges. - Ranges(InstRanges), - /// Bytes is like Ranges, except it expresses a single byte range. It is - /// used in conjunction with Split instructions to implement multi-byte - /// character classes. - Bytes(InstBytes), -} - -impl Inst { - /// Returns true if and only if this is a match instruction. - pub fn is_match(&self) -> bool { - match *self { - Inst::Match(_) => true, - _ => false, - } - } -} - -/// Representation of the Save instruction. -#[derive(Clone, Debug)] -pub struct InstSave { - /// The next location to execute in the program. - pub goto: InstPtr, - /// The capture slot (there are two slots for every capture in a regex, - /// including the zeroth capture for the entire match). - pub slot: usize, -} - -/// Representation of the Split instruction. -#[derive(Clone, Debug)] -pub struct InstSplit { - /// The first instruction to try. A match resulting from following goto1 - /// has precedence over a match resulting from following goto2. - pub goto1: InstPtr, - /// The second instruction to try. A match resulting from following goto1 - /// has precedence over a match resulting from following goto2. - pub goto2: InstPtr, -} - -/// Representation of the `EmptyLook` instruction. -#[derive(Clone, Debug)] -pub struct InstEmptyLook { - /// The next location to execute in the program if this instruction - /// succeeds. - pub goto: InstPtr, - /// The type of zero-width assertion to check. - pub look: EmptyLook, -} - -/// The set of zero-width match instructions. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum EmptyLook { - /// Start of line or input. - StartLine, - /// End of line or input. - EndLine, - /// Start of input. - StartText, - /// End of input. - EndText, - /// Word character on one side and non-word character on other. - WordBoundary, - /// Word character on both sides or non-word character on both sides. - NotWordBoundary, - /// ASCII word boundary. - WordBoundaryAscii, - /// Not ASCII word boundary. - NotWordBoundaryAscii, -} - -/// Representation of the Char instruction. -#[derive(Clone, Debug)] -pub struct InstChar { - /// The next location to execute in the program if this instruction - /// succeeds. - pub goto: InstPtr, - /// The character to test. - pub c: char, -} - -/// Representation of the Ranges instruction. -#[derive(Clone, Debug)] -pub struct InstRanges { - /// The next location to execute in the program if this instruction - /// succeeds. - pub goto: InstPtr, - /// The set of Unicode scalar value ranges to test. - pub ranges: Box<[(char, char)]>, -} - -impl InstRanges { - /// Tests whether the given input character matches this instruction. - pub fn matches(&self, c: Char) -> bool { - // This speeds up the `match_class_unicode` benchmark by checking - // some common cases quickly without binary search. e.g., Matching - // a Unicode class on predominantly ASCII text. - for r in self.ranges.iter().take(4) { - if c < r.0 { - return false; - } - if c <= r.1 { - return true; - } - } - self.ranges - .binary_search_by(|r| { - if r.1 < c { - Ordering::Less - } else if r.0 > c { - Ordering::Greater - } else { - Ordering::Equal - } - }) - .is_ok() - } - - /// Return the number of distinct characters represented by all of the - /// ranges. - pub fn num_chars(&self) -> usize { - self.ranges - .iter() - .map(|&(s, e)| 1 + (e as u32) - (s as u32)) - .sum::() as usize - } -} - -/// Representation of the Bytes instruction. -#[derive(Clone, Debug)] -pub struct InstBytes { - /// The next location to execute in the program if this instruction - /// succeeds. - pub goto: InstPtr, - /// The start (inclusive) of this byte range. - pub start: u8, - /// The end (inclusive) of this byte range. - pub end: u8, -} - -impl InstBytes { - /// Returns true if and only if the given byte is in this range. - pub fn matches(&self, byte: u8) -> bool { - self.start <= byte && byte <= self.end - } -} - -#[cfg(test)] -mod test { - #[test] - #[cfg(target_pointer_width = "64")] - fn test_size_of_inst() { - use std::mem::size_of; - - use super::Inst; - - assert_eq!(32, size_of::()); - } -} diff --git a/src/re_builder.rs b/src/re_builder.rs index ee6383690d..5259ab0b0a 100644 --- a/src/re_builder.rs +++ b/src/re_builder.rs @@ -1,18 +1,14 @@ +use regex_automata::util::syntax; + /// The set of user configurable options for compiling zero or more regexes. +/// This is shared among all top-level regex APIs. #[derive(Clone, Debug)] #[allow(missing_docs)] -pub struct RegexOptions { - pub pats: Vec, - pub size_limit: usize, - pub dfa_size_limit: usize, - pub nest_limit: u32, - pub case_insensitive: bool, - pub multi_line: bool, - pub dot_matches_new_line: bool, - pub swap_greed: bool, - pub ignore_whitespace: bool, - pub unicode: bool, - pub octal: bool, +struct RegexOptions { + pats: Vec, + size_limit: usize, + dfa_size_limit: usize, + syntax: syntax::Config, } impl Default for RegexOptions { @@ -21,26 +17,21 @@ impl Default for RegexOptions { pats: vec![], size_limit: 10 * (1 << 20), dfa_size_limit: 2 * (1 << 20), - nest_limit: 250, - case_insensitive: false, - multi_line: false, - dot_matches_new_line: false, - swap_greed: false, - ignore_whitespace: false, - unicode: true, - octal: false, + syntax: syntax::Config::default(), } } } macro_rules! define_builder { - ($name:ident, $regex_mod:ident, $only_utf8:expr) => { + ($name:ident, $regex_mod:ident, $utf8:expr) => { pub mod $name { - use super::RegexOptions; - use crate::error::Error; - use crate::exec::ExecBuilder; + use std::sync::Arc; + + use regex_automata::meta; - use crate::$regex_mod::Regex; + use crate::{error::Error, $regex_mod::Regex}; + + use super::RegexOptions; /// A configurable builder for a regular expression. /// @@ -67,10 +58,20 @@ macro_rules! define_builder { /// pattern given to `new` verbatim. Notably, it will not incorporate any /// of the flags set on this builder. pub fn build(&self) -> Result { - ExecBuilder::new_options(self.0.clone()) - .only_utf8($only_utf8) - .build() - .map(Regex::from) + let config = meta::Config::new() + .match_kind(regex_automata::MatchKind::LeftmostFirst) + .utf8_empty($utf8) + .nfa_size_limit(Some(self.0.size_limit)) + .hybrid_cache_capacity(self.0.dfa_size_limit); + meta::Builder::new() + .configure(config) + .syntax(self.0.syntax.clone().utf8($utf8)) + .build(&self.0.pats[0]) + .map(|meta| Regex { + meta, + pattern: Arc::from(self.0.pats[0].as_str()), + }) + .map_err(Error::from_meta_build_error) } /// Set the value for the case insensitive (`i`) flag. @@ -81,7 +82,7 @@ macro_rules! define_builder { &mut self, yes: bool, ) -> &mut RegexBuilder { - self.0.case_insensitive = yes; + self.0.syntax = self.0.syntax.case_insensitive(yes); self } @@ -92,7 +93,7 @@ macro_rules! define_builder { /// /// By default, they match beginning/end of the input. pub fn multi_line(&mut self, yes: bool) -> &mut RegexBuilder { - self.0.multi_line = yes; + self.0.syntax = self.0.syntax.multi_line(yes); self } @@ -107,7 +108,7 @@ macro_rules! define_builder { &mut self, yes: bool, ) -> &mut RegexBuilder { - self.0.dot_matches_new_line = yes; + self.0.syntax = self.0.syntax.dot_matches_new_line(yes); self } @@ -118,7 +119,7 @@ macro_rules! define_builder { /// /// By default, `a*` is greedy and `a*?` is lazy. pub fn swap_greed(&mut self, yes: bool) -> &mut RegexBuilder { - self.0.swap_greed = yes; + self.0.syntax = self.0.syntax.swap_greed(yes); self } @@ -131,7 +132,7 @@ macro_rules! define_builder { &mut self, yes: bool, ) -> &mut RegexBuilder { - self.0.ignore_whitespace = yes; + self.0.syntax = self.0.syntax.ignore_whitespace(yes); self } @@ -140,7 +141,7 @@ macro_rules! define_builder { /// Enabled by default. When disabled, character classes such as `\w` only /// match ASCII word characters instead of all Unicode word characters. pub fn unicode(&mut self, yes: bool) -> &mut RegexBuilder { - self.0.unicode = yes; + self.0.syntax = self.0.syntax.unicode(yes); self } @@ -160,7 +161,7 @@ macro_rules! define_builder { /// /// Octal syntax is disabled by default. pub fn octal(&mut self, yes: bool) -> &mut RegexBuilder { - self.0.octal = yes; + self.0.syntax = self.0.syntax.octal(yes); self } @@ -220,7 +221,7 @@ macro_rules! define_builder { /// in an obvious way in the concrete syntax, therefore, it should not be /// used in a granular way. pub fn nest_limit(&mut self, limit: u32) -> &mut RegexBuilder { - self.0.nest_limit = limit; + self.0.syntax.nest_limit(limit); self } } @@ -232,13 +233,15 @@ define_builder!(bytes, re_bytes, false); define_builder!(unicode, re_unicode, true); macro_rules! define_set_builder { - ($name:ident, $regex_mod:ident, $only_utf8:expr) => { + ($name:ident, $regex_mod:ident, $utf8:expr) => { pub mod $name { - use super::RegexOptions; - use crate::error::Error; - use crate::exec::ExecBuilder; + use std::sync::Arc; + + use regex_automata::meta; - use crate::re_set::$regex_mod::RegexSet; + use crate::{error::Error, re_set::$regex_mod::RegexSet}; + + use super::RegexOptions; /// A configurable builder for a set of regular expressions. /// @@ -267,10 +270,20 @@ macro_rules! define_set_builder { /// Consume the builder and compile the regular expressions into a set. pub fn build(&self) -> Result { - ExecBuilder::new_options(self.0.clone()) - .only_utf8($only_utf8) - .build() - .map(RegexSet::from) + let config = meta::Config::new() + .match_kind(regex_automata::MatchKind::All) + .utf8_empty($utf8) + .nfa_size_limit(Some(self.0.size_limit)) + .hybrid_cache_capacity(self.0.dfa_size_limit); + meta::Builder::new() + .configure(config) + .syntax(self.0.syntax.clone().utf8($utf8)) + .build_many(&self.0.pats) + .map(|meta| RegexSet { + meta, + patterns: Arc::from(&*self.0.pats), + }) + .map_err(Error::from_meta_build_error) } /// Set the value for the case insensitive (`i`) flag. @@ -278,7 +291,7 @@ macro_rules! define_set_builder { &mut self, yes: bool, ) -> &mut RegexSetBuilder { - self.0.case_insensitive = yes; + self.0.syntax = self.0.syntax.case_insensitive(yes); self } @@ -287,7 +300,7 @@ macro_rules! define_set_builder { &mut self, yes: bool, ) -> &mut RegexSetBuilder { - self.0.multi_line = yes; + self.0.syntax = self.0.syntax.multi_line(yes); self } @@ -302,7 +315,7 @@ macro_rules! define_set_builder { &mut self, yes: bool, ) -> &mut RegexSetBuilder { - self.0.dot_matches_new_line = yes; + self.0.syntax = self.0.syntax.dot_matches_new_line(yes); self } @@ -311,7 +324,7 @@ macro_rules! define_set_builder { &mut self, yes: bool, ) -> &mut RegexSetBuilder { - self.0.swap_greed = yes; + self.0.syntax = self.0.syntax.swap_greed(yes); self } @@ -320,13 +333,13 @@ macro_rules! define_set_builder { &mut self, yes: bool, ) -> &mut RegexSetBuilder { - self.0.ignore_whitespace = yes; + self.0.syntax = self.0.syntax.ignore_whitespace(yes); self } /// Set the value for the Unicode (`u`) flag. pub fn unicode(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.0.unicode = yes; + self.0.syntax = self.0.syntax.unicode(yes); self } @@ -346,7 +359,7 @@ macro_rules! define_set_builder { /// /// Octal syntax is disabled by default. pub fn octal(&mut self, yes: bool) -> &mut RegexSetBuilder { - self.0.octal = yes; + self.0.syntax = self.0.syntax.octal(yes); self } @@ -409,7 +422,7 @@ macro_rules! define_set_builder { &mut self, limit: u32, ) -> &mut RegexSetBuilder { - self.0.nest_limit = limit; + self.0.syntax.nest_limit(limit); self } } diff --git a/src/re_bytes.rs b/src/re_bytes.rs index e3a3b019b5..38a6664100 100644 --- a/src/re_bytes.rs +++ b/src/re_bytes.rs @@ -1,18 +1,17 @@ -use std::borrow::Cow; -use std::collections::HashMap; -use std::fmt; -use std::iter::FusedIterator; -use std::ops::{Index, Range}; -use std::str::FromStr; -use std::sync::Arc; - -use crate::find_byte::find_byte; - -use crate::error::Error; -use crate::exec::{Exec, ExecNoSync}; -use crate::expand::expand_bytes; -use crate::re_builder::bytes::RegexBuilder; -use crate::re_trait::{self, RegularExpression, SubCapturesPosIter}; +use std::{ + borrow::Cow, + fmt, + iter::FusedIterator, + ops::{Index, Range}, + str::FromStr, + sync::Arc, +}; + +use regex_automata::{meta, util::captures, Input, PatternID}; + +use crate::{ + error::Error, find_byte::find_byte, re_builder::bytes::RegexBuilder, +}; /// Match represents a single match of a regex in a haystack. /// @@ -105,7 +104,10 @@ impl<'t> From> for Range { /// these byte offsets may not correspond to UTF-8 sequence boundaries since /// the regexes in this module can match arbitrary bytes. #[derive(Clone)] -pub struct Regex(Exec); +pub struct Regex { + pub(crate) meta: meta::Regex, + pub(crate) pattern: Arc, +} impl fmt::Display for Regex { /// Shows the original regular expression. @@ -121,16 +123,6 @@ impl fmt::Debug for Regex { } } -/// A constructor for Regex from an Exec. -/// -/// This is hidden because Exec isn't actually part of the public API. -#[doc(hidden)] -impl From for Regex { - fn from(exec: Exec) -> Regex { - Regex(exec) - } -} - impl FromStr for Regex { type Err = Error; @@ -169,6 +161,7 @@ impl Regex { /// assert!(Regex::new(r"\b\w{13}\b").unwrap().is_match(text)); /// # } /// ``` + #[inline] pub fn is_match(&self, text: &[u8]) -> bool { self.is_match_at(text, 0) } @@ -193,6 +186,7 @@ impl Regex { /// assert_eq!((mat.start(), mat.end()), (2, 15)); /// # } /// ``` + #[inline] pub fn find<'t>(&self, text: &'t [u8]) -> Option> { self.find_at(text, 0) } @@ -215,8 +209,9 @@ impl Regex { /// } /// # } /// ``` + #[inline] pub fn find_iter<'r, 't>(&'r self, text: &'t [u8]) -> Matches<'r, 't> { - Matches(self.0.searcher().find_iter(text)) + Matches { text, it: self.meta.find_iter(text) } } /// Returns the capture groups corresponding to the leftmost-first @@ -282,6 +277,7 @@ impl Regex { /// /// The `0`th capture group is always unnamed, so it must always be /// accessed with `get(0)` or `[0]`. + #[inline] pub fn captures<'t>(&self, text: &'t [u8]) -> Option> { self.captures_at(text, 0) } @@ -312,11 +308,12 @@ impl Regex { /// // Movie: M, Released: 1931 /// # } /// ``` + #[inline] pub fn captures_iter<'r, 't>( &'r self, text: &'t [u8], ) -> CaptureMatches<'r, 't> { - CaptureMatches(self.0.searcher().captures_iter(text)) + CaptureMatches { text, it: self.meta.captures_iter(text) } } /// Returns an iterator of substrings of `text` delimited by a match of the @@ -339,8 +336,9 @@ impl Regex { /// ]); /// # } /// ``` + #[inline] pub fn split<'r, 't>(&'r self, text: &'t [u8]) -> Split<'r, 't> { - Split { finder: self.find_iter(text), last: 0 } + Split { text, it: self.meta.split(text) } } /// Returns an iterator of at most `limit` substrings of `text` delimited @@ -363,12 +361,13 @@ impl Regex { /// assert_eq!(fields, vec![&b"Hey"[..], &b"How"[..], &b"are you?"[..]]); /// # } /// ``` + #[inline] pub fn splitn<'r, 't>( &'r self, text: &'t [u8], limit: usize, ) -> SplitN<'r, 't> { - SplitN { splits: self.split(text), n: limit } + SplitN { text, it: self.meta.splitn(text, limit) } } /// Replaces the leftmost-first match with the replacement provided. The @@ -479,6 +478,7 @@ impl Regex { /// assert_eq!(result, &b"$2 $last"[..]); /// # } /// ``` + #[inline] pub fn replace<'t, R: Replacer>( &self, text: &'t [u8], @@ -493,6 +493,7 @@ impl Regex { /// /// See the documentation for `replace` for details on how to access /// capturing group matches in the replacement text. + #[inline] pub fn replace_all<'t, R: Replacer>( &self, text: &'t [u8], @@ -585,6 +586,7 @@ impl Regex { /// assert_eq!(pos, Some(1)); /// # } /// ``` + #[inline] pub fn shortest_match(&self, text: &[u8]) -> Option { self.shortest_match_at(text, 0) } @@ -595,12 +597,15 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. + #[inline] pub fn shortest_match_at( &self, text: &[u8], start: usize, ) -> Option { - self.0.searcher().shortest_match_at(text, start) + let mut input = Input::new(text).earliest(true); + input.set_start(start); + self.meta.search_half(&input).map(|hm| hm.offset()) } /// Returns the same as is_match, but starts the search at the given @@ -609,8 +614,11 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. + #[inline] pub fn is_match_at(&self, text: &[u8], start: usize) -> bool { - self.0.searcher().is_match_at(text, start) + let mut input = Input::new(text); + input.set_start(start); + self.meta.is_match(input) } /// Returns the same as find, but starts the search at the given @@ -619,15 +627,15 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. + #[inline] pub fn find_at<'t>( &self, text: &'t [u8], start: usize, ) -> Option> { - self.0 - .searcher() - .find_at(text, start) - .map(|(s, e)| Match::new(text, s, e)) + let mut input = Input::new(text); + input.set_start(start); + self.meta.find(input).map(|m| Match::new(text, m.start(), m.end())) } /// Returns the same as [`Regex::captures`], but starts the search at the @@ -636,17 +644,21 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. + #[inline] pub fn captures_at<'t>( &self, text: &'t [u8], start: usize, ) -> Option> { - let mut locs = self.capture_locations(); - self.captures_read_at(&mut locs, text, start).map(move |_| Captures { - text, - locs: locs.0, - named_groups: self.0.capture_name_idx().clone(), - }) + let mut caps = self.meta.create_captures(); + let mut input = Input::new(text); + input.set_start(start); + self.meta.captures(input, &mut caps); + if caps.is_match() { + Some(Captures { text, caps }) + } else { + None + } } /// This is like `captures`, but uses @@ -659,6 +671,7 @@ impl Regex { /// /// This returns the overall match if this was successful, which is always /// equivalence to the `0`th capture group. + #[inline] pub fn captures_read<'t>( &self, locs: &mut CaptureLocations, @@ -673,16 +686,17 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. + #[inline] pub fn captures_read_at<'t>( &self, locs: &mut CaptureLocations, text: &'t [u8], start: usize, ) -> Option> { - self.0 - .searcher() - .captures_read_at(&mut locs.0, text, start) - .map(|(s, e)| Match::new(text, s, e)) + let mut input = Input::new(text); + input.set_start(start); + self.meta.captures(input, &mut locs.0); + locs.0.get_match().map(|m| Match::new(text, m.start(), m.end())) } /// An undocumented alias for `captures_read_at`. @@ -691,6 +705,7 @@ impl Regex { /// breaking that crate, we continue to provide the name as an undocumented /// alias. #[doc(hidden)] + #[inline] pub fn read_captures_at<'t>( &self, locs: &mut CaptureLocations, @@ -704,18 +719,21 @@ impl Regex { /// Auxiliary methods. impl Regex { /// Returns the original string of this regex. + #[inline] pub fn as_str(&self) -> &str { - &self.0.regex_strings()[0] + &self.pattern } /// Returns an iterator over the capture names. + #[inline] pub fn capture_names(&self) -> CaptureNames<'_> { - CaptureNames(self.0.capture_names().iter()) + CaptureNames(self.meta.group_info().pattern_names(PatternID::ZERO)) } /// Returns the number of captures. + #[inline] pub fn captures_len(&self) -> usize { - self.0.capture_names().len() + self.meta.group_info().group_len(PatternID::ZERO) } /// Returns the total number of capturing groups that appear in every @@ -755,13 +773,14 @@ impl Regex { /// ``` #[inline] pub fn static_captures_len(&self) -> Option { - self.0.static_captures_len().map(|len| len.saturating_add(1)) + self.meta.static_captures_len() } /// Returns an empty set of capture locations that can be reused in /// multiple calls to `captures_read` or `captures_read_at`. + #[inline] pub fn capture_locations(&self) -> CaptureLocations { - CaptureLocations(self.0.searcher().locations()) + CaptureLocations(self.meta.create_captures()) } /// An alias for `capture_locations` to preserve backward compatibility. @@ -769,8 +788,9 @@ impl Regex { /// The `regex-capi` crate uses this method, so to avoid breaking that /// crate, we continue to export it as an undocumented API. #[doc(hidden)] + #[inline] pub fn locations(&self) -> CaptureLocations { - CaptureLocations(self.0.searcher().locations()) + self.capture_locations() } } @@ -783,14 +803,22 @@ impl Regex { /// `'r` is the lifetime of the compiled regular expression and `'t` is the /// lifetime of the matched byte string. #[derive(Debug)] -pub struct Matches<'r, 't>(re_trait::Matches<'t, ExecNoSync<'r>>); +pub struct Matches<'r, 't> { + text: &'t [u8], + it: meta::FindMatches<'r, 't>, +} impl<'r, 't> Iterator for Matches<'r, 't> { type Item = Match<'t>; + #[inline] fn next(&mut self) -> Option> { - let text = self.0.text(); - self.0.next().map(|(s, e)| Match::new(text, s, e)) + self.it.next().map(|sp| Match::new(self.text, sp.start(), sp.end())) + } + + #[inline] + fn count(self) -> usize { + self.it.count() } } @@ -804,19 +832,22 @@ impl<'r, 't> FusedIterator for Matches<'r, 't> {} /// `'r` is the lifetime of the compiled regular expression and `'t` is the /// lifetime of the matched byte string. #[derive(Debug)] -pub struct CaptureMatches<'r, 't>( - re_trait::CaptureMatches<'t, ExecNoSync<'r>>, -); +pub struct CaptureMatches<'r, 't> { + text: &'t [u8], + it: meta::CapturesMatches<'r, 't>, +} impl<'r, 't> Iterator for CaptureMatches<'r, 't> { type Item = Captures<'t>; + #[inline] fn next(&mut self) -> Option> { - self.0.next().map(|locs| Captures { - text: self.0.text(), - locs, - named_groups: self.0.regex().capture_name_idx().clone(), - }) + self.it.next().map(|caps| Captures { text: self.text, caps }) + } + + #[inline] + fn count(self) -> usize { + self.it.count() } } @@ -828,31 +859,16 @@ impl<'r, 't> FusedIterator for CaptureMatches<'r, 't> {} /// lifetime of the byte string being split. #[derive(Debug)] pub struct Split<'r, 't> { - finder: Matches<'r, 't>, - last: usize, + text: &'t [u8], + it: meta::Split<'r, 't>, } impl<'r, 't> Iterator for Split<'r, 't> { type Item = &'t [u8]; + #[inline] fn next(&mut self) -> Option<&'t [u8]> { - let text = self.finder.0.text(); - match self.finder.next() { - None => { - if self.last > text.len() { - None - } else { - let s = &text[self.last..]; - self.last = text.len() + 1; // Next call will return None - Some(s) - } - } - Some(m) => { - let matched = &text[self.last..m.start()]; - self.last = m.end(); - Some(matched) - } - } + self.it.next().map(|span| &self.text[span]) } } @@ -866,35 +882,21 @@ impl<'r, 't> FusedIterator for Split<'r, 't> {} /// lifetime of the byte string being split. #[derive(Debug)] pub struct SplitN<'r, 't> { - splits: Split<'r, 't>, - n: usize, + text: &'t [u8], + it: meta::SplitN<'r, 't>, } impl<'r, 't> Iterator for SplitN<'r, 't> { type Item = &'t [u8]; + #[inline] fn next(&mut self) -> Option<&'t [u8]> { - if self.n == 0 { - return None; - } - - self.n -= 1; - if self.n > 0 { - return self.splits.next(); - } - - let text = self.splits.finder.0.text(); - if self.splits.last > text.len() { - // We've already returned all substrings. - None - } else { - // self.n == 0, so future calls will return None immediately - Some(&text[self.splits.last..]) - } + self.it.next().map(|span| &self.text[span]) } + #[inline] fn size_hint(&self) -> (usize, Option) { - (0, Some(self.n)) + self.it.size_hint() } } @@ -907,22 +909,22 @@ impl<'r, 't> FusedIterator for SplitN<'r, 't> {} /// /// `'r` is the lifetime of the compiled regular expression. #[derive(Clone, Debug)] -pub struct CaptureNames<'r>(::std::slice::Iter<'r, Option>); +pub struct CaptureNames<'r>(captures::GroupInfoPatternNames<'r>); impl<'r> Iterator for CaptureNames<'r> { type Item = Option<&'r str>; + #[inline] fn next(&mut self) -> Option> { - self.0 - .next() - .as_ref() - .map(|slot| slot.as_ref().map(|name| name.as_ref())) + self.0.next() } + #[inline] fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } + #[inline] fn count(self) -> usize { self.0.count() } @@ -969,7 +971,7 @@ impl<'r> FusedIterator for CaptureNames<'r> {} /// assert_eq!(None, locs.get(9944060567225171988)); /// ``` #[derive(Clone, Debug)] -pub struct CaptureLocations(re_trait::Locations); +pub struct CaptureLocations(captures::Captures); /// A type alias for `CaptureLocations` for backwards compatibility. /// @@ -986,7 +988,7 @@ impl CaptureLocations { /// with respect to the original string matched. #[inline] pub fn get(&self, i: usize) -> Option<(usize, usize)> { - self.0.pos(i) + self.0.get_group(i).map(|sp| (sp.start, sp.end)) } /// Returns the total number of capture groups (even if they didn't match). @@ -995,7 +997,7 @@ impl CaptureLocations { /// capturing group that corresponds to the entire match. #[inline] pub fn len(&self) -> usize { - self.0.len() + self.0.group_len() } /// An alias for the `get` method for backwards compatibility. @@ -1023,8 +1025,7 @@ impl CaptureLocations { /// `'t` is the lifetime of the matched text. pub struct Captures<'t> { text: &'t [u8], - locs: re_trait::Locations, - named_groups: Arc>, + caps: captures::Captures, } impl<'t> Captures<'t> { @@ -1047,14 +1048,20 @@ impl<'t> Captures<'t> { /// assert_eq!(text1, &b"123"[..]); /// assert_eq!(text2, &b""[..]); /// ``` + #[inline] pub fn get(&self, i: usize) -> Option> { - self.locs.pos(i).map(|(s, e)| Match::new(self.text, s, e)) + self.caps + .get_group(i) + .map(|sp| Match::new(self.text, sp.start, sp.end)) } /// Returns the match for the capture group named `name`. If `name` isn't a /// valid capture group or didn't match anything, then `None` is returned. + #[inline] pub fn name(&self, name: &str) -> Option> { - self.named_groups.get(name).and_then(|&i| self.get(i)) + self.caps + .get_group_by_name(name) + .map(|sp| Match::new(self.text, sp.start, sp.end)) } /// An iterator that yields all capturing matches in the order in which @@ -1062,8 +1069,9 @@ impl<'t> Captures<'t> { /// participate in the match, then `None` is yielded for that capture. /// /// The first match always corresponds to the overall match of the regex. + #[inline] pub fn iter<'c>(&'c self) -> SubCaptureMatches<'c, 't> { - SubCaptureMatches { caps: self, it: self.locs.iter() } + SubCaptureMatches { text: self.text, it: self.caps.iter() } } /// Expands all instances of `$name` in `replacement` to the corresponding @@ -1087,8 +1095,9 @@ impl<'t> Captures<'t> { /// regex, then it is replaced with an empty string. /// /// To write a literal `$` use `$$`. + #[inline] pub fn expand(&self, replacement: &[u8], dst: &mut Vec) { - expand_bytes(self, replacement, dst) + self.caps.interpolate_bytes_into(self.text, replacement, dst); } /// Returns the total number of capture groups (even if they didn't match). @@ -1097,16 +1106,17 @@ impl<'t> Captures<'t> { /// group that corresponds to the full match. #[inline] pub fn len(&self) -> usize { - self.locs.len() + self.caps.group_len() } } impl<'t> fmt::Debug for Captures<'t> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Captures").field(&CapturesDebug(self)).finish() + f.debug_tuple("Captures").field(&self.caps).finish() } } +/* struct CapturesDebug<'c, 't>(&'c Captures<'t>); impl<'c, 't> fmt::Debug for CapturesDebug<'c, 't> { @@ -1142,6 +1152,7 @@ impl<'c, 't> fmt::Debug for CapturesDebug<'c, 't> { map.finish() } } +*/ /// Get a group by index. /// @@ -1197,17 +1208,28 @@ impl<'t, 'i> Index<&'i str> for Captures<'t> { /// the lifetime `'t` corresponds to the originally matched text. #[derive(Clone, Debug)] pub struct SubCaptureMatches<'c, 't> { - caps: &'c Captures<'t>, - it: SubCapturesPosIter<'c>, + text: &'t [u8], + it: captures::CapturesPatternIter<'c>, } impl<'c, 't> Iterator for SubCaptureMatches<'c, 't> { type Item = Option>; + #[inline] fn next(&mut self) -> Option>> { - self.it - .next() - .map(|cap| cap.map(|(s, e)| Match::new(self.caps.text, s, e))) + self.it.next().map(|group| { + group.map(|sp| Match::new(self.text, sp.start, sp.end)) + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + #[inline] + fn count(self) -> usize { + self.it.count() } } diff --git a/src/re_set.rs b/src/re_set.rs index 7c8253f0ca..837be4d832 100644 --- a/src/re_set.rs +++ b/src/re_set.rs @@ -2,15 +2,17 @@ macro_rules! define_set { ($name:ident, $builder_mod:ident, $text_ty:ty, $as_bytes:expr, $(#[$doc_regexset_example:meta])* ) => { pub mod $name { - use std::fmt; - use std::iter; - use std::slice; - use std::vec; + use std::{fmt, iter, sync::Arc}; - use crate::error::Error; - use crate::exec::Exec; - use crate::re_builder::$builder_mod::RegexSetBuilder; - use crate::re_trait::RegularExpression; + use regex_automata::{ + meta, + Input, PatternID, PatternSet, PatternSetIter, + }; + + use crate::{ + error::Error, + re_builder::$builder_mod::RegexSetBuilder, + }; /// Match multiple (possibly overlapping) regular expressions in a single scan. /// @@ -105,7 +107,10 @@ $(#[$doc_regexset_example])* /// search takes `O(mn)` time, where `m` is proportional to the size of the /// regex set and `n` is proportional to the length of the search text. #[derive(Clone)] -pub struct RegexSet(Exec); +pub struct RegexSet { + pub(crate) meta: meta::Regex, + pub(crate) patterns: Arc<[String]>, +} impl RegexSet { /// Create a new regex set with the given regular expressions. @@ -138,7 +143,8 @@ impl RegexSet { /// assert!(set.is_empty()); /// ``` pub fn empty() -> RegexSet { - RegexSetBuilder::new(&[""; 0]).build().unwrap() + let empty: [&str; 0] = []; + RegexSetBuilder::new(empty).build().unwrap() } /// Returns true if and only if one of the regexes in this set matches @@ -165,6 +171,7 @@ impl RegexSet { /// assert!(set.is_match("foo")); /// assert!(!set.is_match("☃")); /// ``` + #[inline] pub fn is_match(&self, text: $text_ty) -> bool { self.is_match_at(text, 0) } @@ -176,8 +183,11 @@ impl RegexSet { /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. #[doc(hidden)] + #[inline] pub fn is_match_at(&self, text: $text_ty, start: usize) -> bool { - self.0.searcher().is_match_at($as_bytes(text), start) + let mut input = Input::new(text); + input.set_start(start); + self.meta.is_match(input) } /// Returns the set of regular expressions that match in the given text. @@ -217,12 +227,10 @@ impl RegexSet { /// assert!(matches.matched(6)); /// ``` pub fn matches(&self, text: $text_ty) -> SetMatches { - let mut matches = vec![false; self.0.regex_strings().len()]; - let any = self.read_matches_at(&mut matches, text, 0); - SetMatches { - matched_any: any, - matches: matches, - } + let mut patset = PatternSet::new(self.meta.pattern_len()); + let input = Input::new(text); + self.meta.which_overlapping_matches(&input, &mut patset); + SetMatches(patset) } /// Returns the same as matches, but starts the search at the given @@ -244,17 +252,31 @@ impl RegexSet { text: $text_ty, start: usize, ) -> bool { - self.0.searcher().many_matches_at(matches, $as_bytes(text), start) + // This is pretty dumb. We should try to fix this, but the + // regex-automata API doesn't provide a way to store matches in an + // arbitrary &mut [bool]. Thankfully, this API is is doc(hidden) and + // thus not public... But regex-capi currently uses it. We should + // fix regex-capi to use a PatternSet, maybe? Not sure... PatternSet + // is in regex-automata, not regex. So maybe we should just accept a + // 'SetMatches', which is basically just a newtype around PatternSet. + let mut patset = PatternSet::new(self.meta.pattern_len()); + let mut input = Input::new(text); + input.set_start(start); + self.meta.which_overlapping_matches(&input, &mut patset); + for pid in patset.iter() { + matches[pid] = true; + } + !patset.is_empty() } /// Returns the total number of regular expressions in this set. pub fn len(&self) -> usize { - self.0.regex_strings().len() + self.meta.pattern_len() } /// Returns `true` if this set contains no regular expressions. pub fn is_empty(&self) -> bool { - self.0.regex_strings().is_empty() + self.meta.pattern_len() == 0 } /// Returns the patterns that this set will match on. @@ -285,7 +307,7 @@ impl RegexSet { /// assert_eq!(matches, vec![r"\w+", r"\pL+", r"foo", r"bar", r"foobar"]); /// ``` pub fn patterns(&self) -> &[String] { - self.0.regex_strings() + &self.patterns } } @@ -297,15 +319,12 @@ impl Default for RegexSet { /// A set of matches returned by a regex set. #[derive(Clone, Debug)] -pub struct SetMatches { - matched_any: bool, - matches: Vec, -} +pub struct SetMatches(PatternSet); impl SetMatches { /// Whether this set contains any matches. pub fn matched_any(&self) -> bool { - self.matched_any + !self.0.is_empty() } /// Whether the regex at the given index matched. @@ -317,7 +336,7 @@ impl SetMatches { /// /// If `regex_index` is greater than or equal to `self.len()`. pub fn matched(&self, regex_index: usize) -> bool { - self.matches[regex_index] + self.0.contains(PatternID::new_unchecked(regex_index)) } /// The total number of regexes in the set that created these matches. @@ -327,7 +346,7 @@ impl SetMatches { /// [`SetMatches::iter`]. The only way to determine the total number of /// matched regexes is to iterate over them. pub fn len(&self) -> usize { - self.matches.len() + self.0.capacity() } /// Returns an iterator over indexes in the regex that matched. @@ -336,7 +355,7 @@ impl SetMatches { /// the index corresponds to the index of the regex that matched with /// respect to its position when initially building the set. pub fn iter(&self) -> SetMatchesIter<'_> { - SetMatchesIter((&*self.matches).into_iter().enumerate()) + SetMatchesIter(self.0.iter()) } } @@ -345,7 +364,10 @@ impl IntoIterator for SetMatches { type Item = usize; fn into_iter(self) -> Self::IntoIter { - SetMatchesIntoIter(self.matches.into_iter().enumerate()) + let it = 0..self.0.capacity(); + SetMatchesIntoIter { + patset: self.0, it + } } } @@ -364,33 +386,34 @@ impl<'a> IntoIterator for &'a SetMatches { /// index corresponds to the index of the regex that matched with respect to /// its position when initially building the set. #[derive(Debug)] -pub struct SetMatchesIntoIter(iter::Enumerate>); +pub struct SetMatchesIntoIter { + patset: PatternSet, + it: std::ops::Range, +} impl Iterator for SetMatchesIntoIter { type Item = usize; fn next(&mut self) -> Option { loop { - match self.0.next() { - None => return None, - Some((_, false)) => {} - Some((i, true)) => return Some(i), + let id = self.it.next()?; + if self.patset.contains(PatternID::new_unchecked(id)) { + return Some(id); } } } fn size_hint(&self) -> (usize, Option) { - self.0.size_hint() + self.it.size_hint() } } impl DoubleEndedIterator for SetMatchesIntoIter { fn next_back(&mut self) -> Option { loop { - match self.0.next_back() { - None => return None, - Some((_, false)) => {} - Some((i, true)) => return Some(i), + let id = self.it.next_back()?; + if self.patset.contains(PatternID::new_unchecked(id)) { + return Some(id); } } } @@ -406,19 +429,13 @@ impl iter::FusedIterator for SetMatchesIntoIter {} /// index corresponds to the index of the regex that matched with respect to /// its position when initially building the set. #[derive(Clone, Debug)] -pub struct SetMatchesIter<'a>(iter::Enumerate>); +pub struct SetMatchesIter<'a>(PatternSetIter<'a>); impl<'a> Iterator for SetMatchesIter<'a> { type Item = usize; fn next(&mut self) -> Option { - loop { - match self.0.next() { - None => return None, - Some((_, &false)) => {} - Some((i, &true)) => return Some(i), - } - } + self.0.next().map(|pid| pid.as_usize()) } fn size_hint(&self) -> (usize, Option) { @@ -428,33 +445,17 @@ impl<'a> Iterator for SetMatchesIter<'a> { impl<'a> DoubleEndedIterator for SetMatchesIter<'a> { fn next_back(&mut self) -> Option { - loop { - match self.0.next_back() { - None => return None, - Some((_, &false)) => {} - Some((i, &true)) => return Some(i), - } - } + self.0.next_back().map(|pid| pid.as_usize()) } } impl<'a> iter::FusedIterator for SetMatchesIter<'a> {} -#[doc(hidden)] -impl From for RegexSet { - fn from(exec: Exec) -> Self { - RegexSet(exec) - } -} - impl fmt::Debug for RegexSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "RegexSet({:?})", self.0.regex_strings()) + write!(f, "RegexSet({:?})", self.patterns()) } } - -#[allow(dead_code)] fn as_bytes_str(text: &str) -> &[u8] { text.as_bytes() } -#[allow(dead_code)] fn as_bytes_bytes(text: &[u8]) -> &[u8] { text } } } } diff --git a/src/re_trait.rs b/src/re_trait.rs deleted file mode 100644 index 505810c848..0000000000 --- a/src/re_trait.rs +++ /dev/null @@ -1,294 +0,0 @@ -use std::fmt; -use std::iter::FusedIterator; - -/// Slot is a single saved capture location. Note that there are two slots for -/// every capture in a regular expression (one slot each for the start and end -/// of the capture). -pub type Slot = Option; - -/// Locations represents the offsets of each capturing group in a regex for -/// a single match. -/// -/// Unlike `Captures`, a `Locations` value only stores offsets. -#[doc(hidden)] -#[derive(Clone, Debug)] -pub struct Locations(Vec); - -impl Locations { - /// Returns the start and end positions of the Nth capture group. Returns - /// `None` if `i` is not a valid capture group or if the capture group did - /// not match anything. The positions returned are *always* byte indices - /// with respect to the original string matched. - pub fn pos(&self, i: usize) -> Option<(usize, usize)> { - let (s, e) = (i.checked_mul(2)?, i.checked_mul(2)?.checked_add(1)?); - match (self.0.get(s), self.0.get(e)) { - (Some(&Some(s)), Some(&Some(e))) => Some((s, e)), - _ => None, - } - } - - /// Creates an iterator of all the capture group positions in order of - /// appearance in the regular expression. Positions are byte indices - /// in terms of the original string matched. - pub fn iter(&self) -> SubCapturesPosIter<'_> { - SubCapturesPosIter { idx: 0, locs: self } - } - - /// Returns the total number of capturing groups. - /// - /// This is always at least `1` since every regex has at least `1` - /// capturing group that corresponds to the entire match. - pub fn len(&self) -> usize { - self.0.len() / 2 - } - - /// Return the individual slots as a slice. - pub(crate) fn as_slots(&mut self) -> &mut [Slot] { - &mut self.0 - } -} - -/// An iterator over capture group positions for a particular match of a -/// regular expression. -/// -/// Positions are byte indices in terms of the original string matched. -/// -/// `'c` is the lifetime of the captures. -#[derive(Clone, Debug)] -pub struct SubCapturesPosIter<'c> { - idx: usize, - locs: &'c Locations, -} - -impl<'c> Iterator for SubCapturesPosIter<'c> { - type Item = Option<(usize, usize)>; - - fn next(&mut self) -> Option> { - if self.idx >= self.locs.len() { - return None; - } - let x = match self.locs.pos(self.idx) { - None => Some(None), - Some((s, e)) => Some(Some((s, e))), - }; - self.idx += 1; - x - } - - fn size_hint(&self) -> (usize, Option) { - let len = self.locs.len() - self.idx; - (len, Some(len)) - } - - fn count(self) -> usize { - self.len() - } -} - -impl<'c> ExactSizeIterator for SubCapturesPosIter<'c> {} - -impl<'c> FusedIterator for SubCapturesPosIter<'c> {} - -/// `RegularExpression` describes types that can implement regex searching. -/// -/// This trait is my attempt at reducing code duplication and to standardize -/// the internal API. Specific duplication that is avoided are the `find` -/// and `capture` iterators, which are slightly tricky. -/// -/// It's not clear whether this trait is worth it, and it also isn't -/// clear whether it's useful as a public trait or not. Methods like -/// `next_after_empty` reak of bad design, but the rest of the methods seem -/// somewhat reasonable. One particular thing this trait would expose would be -/// the ability to start the search of a regex anywhere in a haystack, which -/// isn't possible in the current public API. -pub trait RegularExpression: Sized + fmt::Debug { - /// The type of the haystack. - type Text: ?Sized + fmt::Debug; - - /// The number of capture slots in the compiled regular expression. This is - /// always two times the number of capture groups (two slots per group). - fn slots_len(&self) -> usize; - - /// Allocates fresh space for all capturing groups in this regex. - fn locations(&self) -> Locations { - Locations(vec![None; self.slots_len()]) - } - - /// Returns the position of the next character after `i`. - /// - /// For example, a haystack with type `&[u8]` probably returns `i+1`, - /// whereas a haystack with type `&str` probably returns `i` plus the - /// length of the next UTF-8 sequence. - fn next_after_empty(&self, text: &Self::Text, i: usize) -> usize; - - /// Returns the location of the shortest match. - fn shortest_match_at( - &self, - text: &Self::Text, - start: usize, - ) -> Option; - - /// Returns whether the regex matches the text given. - fn is_match_at(&self, text: &Self::Text, start: usize) -> bool; - - /// Returns the leftmost-first match location if one exists. - fn find_at( - &self, - text: &Self::Text, - start: usize, - ) -> Option<(usize, usize)>; - - /// Returns the leftmost-first match location if one exists, and also - /// fills in any matching capture slot locations. - fn captures_read_at( - &self, - locs: &mut Locations, - text: &Self::Text, - start: usize, - ) -> Option<(usize, usize)>; - - /// Returns an iterator over all non-overlapping successive leftmost-first - /// matches. - fn find_iter(self, text: &Self::Text) -> Matches<'_, Self> { - Matches { re: self, text, last_end: 0, last_match: None } - } - - /// Returns an iterator over all non-overlapping successive leftmost-first - /// matches with captures. - fn captures_iter(self, text: &Self::Text) -> CaptureMatches<'_, Self> { - CaptureMatches(self.find_iter(text)) - } -} - -/// An iterator over all non-overlapping successive leftmost-first matches. -#[derive(Debug)] -pub struct Matches<'t, R> -where - R: RegularExpression, - R::Text: 't, -{ - re: R, - text: &'t R::Text, - last_end: usize, - last_match: Option, -} - -impl<'t, R> Matches<'t, R> -where - R: RegularExpression, - R::Text: 't, -{ - /// Return the text being searched. - pub fn text(&self) -> &'t R::Text { - self.text - } - - /// Return the underlying regex. - pub fn regex(&self) -> &R { - &self.re - } -} - -impl<'t, R> Iterator for Matches<'t, R> -where - R: RegularExpression, - R::Text: 't + AsRef<[u8]>, -{ - type Item = (usize, usize); - - fn next(&mut self) -> Option<(usize, usize)> { - if self.last_end > self.text.as_ref().len() { - return None; - } - let (s, e) = match self.re.find_at(self.text, self.last_end) { - None => return None, - Some((s, e)) => (s, e), - }; - if s == e { - // This is an empty match. To ensure we make progress, start - // the next search at the smallest possible starting position - // of the next match following this one. - self.last_end = self.re.next_after_empty(self.text, e); - // Don't accept empty matches immediately following a match. - // Just move on to the next match. - if Some(e) == self.last_match { - return self.next(); - } - } else { - self.last_end = e; - } - self.last_match = Some(e); - Some((s, e)) - } -} - -impl<'t, R> FusedIterator for Matches<'t, R> -where - R: RegularExpression, - R::Text: 't + AsRef<[u8]>, -{ -} - -/// An iterator over all non-overlapping successive leftmost-first matches with -/// captures. -#[derive(Debug)] -pub struct CaptureMatches<'t, R>(Matches<'t, R>) -where - R: RegularExpression, - R::Text: 't; - -impl<'t, R> CaptureMatches<'t, R> -where - R: RegularExpression, - R::Text: 't, -{ - /// Return the text being searched. - pub fn text(&self) -> &'t R::Text { - self.0.text() - } - - /// Return the underlying regex. - pub fn regex(&self) -> &R { - self.0.regex() - } -} - -impl<'t, R> Iterator for CaptureMatches<'t, R> -where - R: RegularExpression, - R::Text: 't + AsRef<[u8]>, -{ - type Item = Locations; - - fn next(&mut self) -> Option { - if self.0.last_end > self.0.text.as_ref().len() { - return None; - } - let mut locs = self.0.re.locations(); - let (s, e) = match self.0.re.captures_read_at( - &mut locs, - self.0.text, - self.0.last_end, - ) { - None => return None, - Some((s, e)) => (s, e), - }; - if s == e { - self.0.last_end = self.0.re.next_after_empty(self.0.text, e); - if Some(e) == self.0.last_match { - return self.next(); - } - } else { - self.0.last_end = e; - } - self.0.last_match = Some(e); - Some(locs) - } -} - -impl<'t, R> FusedIterator for CaptureMatches<'t, R> -where - R: RegularExpression, - R::Text: 't + AsRef<[u8]>, -{ -} diff --git a/src/re_unicode.rs b/src/re_unicode.rs index 8286646cc2..581e50a5b0 100644 --- a/src/re_unicode.rs +++ b/src/re_unicode.rs @@ -1,18 +1,17 @@ -use std::borrow::Cow; -use std::collections::HashMap; -use std::fmt; -use std::iter::FusedIterator; -use std::ops::{Index, Range}; -use std::str::FromStr; -use std::sync::Arc; - -use crate::find_byte::find_byte; - -use crate::error::Error; -use crate::exec::{Exec, ExecNoSyncStr}; -use crate::expand::expand_str; -use crate::re_builder::unicode::RegexBuilder; -use crate::re_trait::{self, RegularExpression, SubCapturesPosIter}; +use std::{ + borrow::Cow, + fmt, + iter::FusedIterator, + ops::{Index, Range}, + str::FromStr, + sync::Arc, +}; + +use regex_automata::{meta, util::captures, Input, PatternID}; + +use crate::{ + error::Error, find_byte::find_byte, re_builder::unicode::RegexBuilder, +}; /// Escapes all regular expression meta characters in `text`. /// @@ -155,7 +154,10 @@ impl<'t> From> for Range { /// assert_eq!(haystack.split(&re).collect::>(), vec!["a", "b", "c"]); /// ``` #[derive(Clone)] -pub struct Regex(Exec); +pub struct Regex { + pub(crate) meta: meta::Regex, + pub(crate) pattern: Arc, +} impl fmt::Display for Regex { /// Shows the original regular expression. @@ -171,13 +173,6 @@ impl fmt::Debug for Regex { } } -#[doc(hidden)] -impl From for Regex { - fn from(exec: Exec) -> Regex { - Regex(exec) - } -} - impl FromStr for Regex { type Err = Error; @@ -216,6 +211,7 @@ impl Regex { /// assert!(Regex::new(r"\b\w{13}\b").unwrap().is_match(text)); /// # } /// ``` + #[inline] pub fn is_match(&self, text: &str) -> bool { self.is_match_at(text, 0) } @@ -241,6 +237,7 @@ impl Regex { /// assert_eq!(mat.end(), 15); /// # } /// ``` + #[inline] pub fn find<'t>(&self, text: &'t str) -> Option> { self.find_at(text, 0) } @@ -263,8 +260,9 @@ impl Regex { /// } /// # } /// ``` + #[inline] pub fn find_iter<'r, 't>(&'r self, text: &'t str) -> Matches<'r, 't> { - Matches(self.0.searcher_str().find_iter(text)) + Matches { text, it: self.meta.find_iter(text) } } /// Returns the capture groups corresponding to the leftmost-first @@ -330,6 +328,7 @@ impl Regex { /// /// The `0`th capture group is always unnamed, so it must always be /// accessed with `get(0)` or `[0]`. + #[inline] pub fn captures<'t>(&self, text: &'t str) -> Option> { self.captures_at(text, 0) } @@ -359,11 +358,12 @@ impl Regex { /// // Movie: M, Released: 1931 /// # } /// ``` + #[inline] pub fn captures_iter<'r, 't>( &'r self, text: &'t str, ) -> CaptureMatches<'r, 't> { - CaptureMatches(self.0.searcher_str().captures_iter(text)) + CaptureMatches { text, it: self.meta.captures_iter(text) } } /// Returns an iterator of substrings of `text` delimited by a match of the @@ -384,8 +384,9 @@ impl Regex { /// assert_eq!(fields, vec!["a", "b", "c", "d", "e"]); /// # } /// ``` + #[inline] pub fn split<'r, 't>(&'r self, text: &'t str) -> Split<'r, 't> { - Split { finder: self.find_iter(text), last: 0 } + Split { text, it: self.meta.split(text) } } /// Returns an iterator of at most `limit` substrings of `text` delimited @@ -408,12 +409,13 @@ impl Regex { /// assert_eq!(fields, vec!("Hey", "How", "are you?")); /// # } /// ``` + #[inline] pub fn splitn<'r, 't>( &'r self, text: &'t str, limit: usize, ) -> SplitN<'r, 't> { - SplitN { splits: self.split(text), n: limit } + SplitN { text, it: self.meta.splitn(text, limit) } } /// Replaces the leftmost-first match with the replacement provided. @@ -520,6 +522,7 @@ impl Regex { /// assert_eq!(result, "$2 $last"); /// # } /// ``` + #[inline] pub fn replace<'t, R: Replacer>( &self, text: &'t str, @@ -534,6 +537,7 @@ impl Regex { /// /// See the documentation for `replace` for details on how to access /// capturing group matches in the replacement string. + #[inline] pub fn replace_all<'t, R: Replacer>( &self, text: &'t str, @@ -635,6 +639,7 @@ impl Regex { /// assert_eq!(pos, Some(1)); /// # } /// ``` + #[inline] pub fn shortest_match(&self, text: &str) -> Option { self.shortest_match_at(text, 0) } @@ -645,12 +650,15 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only match /// when `start == 0`. + #[inline] pub fn shortest_match_at( &self, text: &str, start: usize, ) -> Option { - self.0.searcher_str().shortest_match_at(text, start) + let mut input = Input::new(text).earliest(true); + input.set_start(start); + self.meta.search_half(&input).map(|hm| hm.offset()) } /// Returns the same as is_match, but starts the search at the given @@ -659,8 +667,11 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. + #[inline] pub fn is_match_at(&self, text: &str, start: usize) -> bool { - self.0.searcher_str().is_match_at(text, start) + let mut input = Input::new(text); + input.set_start(start); + self.meta.is_match(input) } /// Returns the same as find, but starts the search at the given @@ -669,15 +680,15 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. + #[inline] pub fn find_at<'t>( &self, text: &'t str, start: usize, ) -> Option> { - self.0 - .searcher_str() - .find_at(text, start) - .map(|(s, e)| Match::new(text, s, e)) + let mut input = Input::new(text); + input.set_start(start); + self.meta.find(input).map(|m| Match::new(text, m.start(), m.end())) } /// Returns the same as [`Regex::captures`], but starts the search at the @@ -686,17 +697,21 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. + #[inline] pub fn captures_at<'t>( &self, text: &'t str, start: usize, ) -> Option> { - let mut locs = self.capture_locations(); - self.captures_read_at(&mut locs, text, start).map(move |_| Captures { - text, - locs: locs.0, - named_groups: self.0.capture_name_idx().clone(), - }) + let mut caps = self.meta.create_captures(); + let mut input = Input::new(text); + input.set_start(start); + self.meta.captures(input, &mut caps); + if caps.is_match() { + Some(Captures { text, caps }) + } else { + None + } } /// This is like `captures`, but uses @@ -709,6 +724,7 @@ impl Regex { /// /// This returns the overall match if this was successful, which is always /// equivalence to the `0`th capture group. + #[inline] pub fn captures_read<'t>( &self, locs: &mut CaptureLocations, @@ -723,16 +739,17 @@ impl Regex { /// The significance of the starting point is that it takes the surrounding /// context into consideration. For example, the `\A` anchor can only /// match when `start == 0`. + #[inline] pub fn captures_read_at<'t>( &self, locs: &mut CaptureLocations, text: &'t str, start: usize, ) -> Option> { - self.0 - .searcher_str() - .captures_read_at(&mut locs.0, text, start) - .map(|(s, e)| Match::new(text, s, e)) + let mut input = Input::new(text); + input.set_start(start); + self.meta.captures(input, &mut locs.0); + locs.0.get_match().map(|m| Match::new(text, m.start(), m.end())) } /// An undocumented alias for `captures_read_at`. @@ -741,6 +758,7 @@ impl Regex { /// breaking that crate, we continue to provide the name as an undocumented /// alias. #[doc(hidden)] + #[inline] pub fn read_captures_at<'t>( &self, locs: &mut CaptureLocations, @@ -754,18 +772,19 @@ impl Regex { /// Auxiliary methods. impl Regex { /// Returns the original string of this regex. + #[inline] pub fn as_str(&self) -> &str { - &self.0.regex_strings()[0] + &self.pattern } /// Returns an iterator over the capture names. pub fn capture_names(&self) -> CaptureNames<'_> { - CaptureNames(self.0.capture_names().iter()) + CaptureNames(self.meta.group_info().pattern_names(PatternID::ZERO)) } /// Returns the number of captures. pub fn captures_len(&self) -> usize { - self.0.capture_names().len() + self.meta.group_info().group_len(PatternID::ZERO) } /// Returns the total number of capturing groups that appear in every @@ -805,13 +824,14 @@ impl Regex { /// ``` #[inline] pub fn static_captures_len(&self) -> Option { - self.0.static_captures_len().map(|len| len.saturating_add(1)) + self.meta.static_captures_len() } /// Returns an empty set of capture locations that can be reused in /// multiple calls to `captures_read` or `captures_read_at`. + #[inline] pub fn capture_locations(&self) -> CaptureLocations { - CaptureLocations(self.0.searcher_str().locations()) + CaptureLocations(self.meta.create_captures()) } /// An alias for `capture_locations` to preserve backward compatibility. @@ -819,8 +839,9 @@ impl Regex { /// The `regex-capi` crate uses this method, so to avoid breaking that /// crate, we continue to export it as an undocumented API. #[doc(hidden)] + #[inline] pub fn locations(&self) -> CaptureLocations { - CaptureLocations(self.0.searcher_str().locations()) + self.capture_locations() } } @@ -831,22 +852,22 @@ impl Regex { /// /// `'r` is the lifetime of the compiled regular expression. #[derive(Clone, Debug)] -pub struct CaptureNames<'r>(::std::slice::Iter<'r, Option>); +pub struct CaptureNames<'r>(captures::GroupInfoPatternNames<'r>); impl<'r> Iterator for CaptureNames<'r> { type Item = Option<&'r str>; + #[inline] fn next(&mut self) -> Option> { - self.0 - .next() - .as_ref() - .map(|slot| slot.as_ref().map(|name| name.as_ref())) + self.0.next() } + #[inline] fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } + #[inline] fn count(self) -> usize { self.0.count() } @@ -862,31 +883,16 @@ impl<'r> FusedIterator for CaptureNames<'r> {} /// lifetime of the string being split. #[derive(Debug)] pub struct Split<'r, 't> { - finder: Matches<'r, 't>, - last: usize, + text: &'t str, + it: meta::Split<'r, 't>, } impl<'r, 't> Iterator for Split<'r, 't> { type Item = &'t str; + #[inline] fn next(&mut self) -> Option<&'t str> { - let text = self.finder.0.text(); - match self.finder.next() { - None => { - if self.last > text.len() { - None - } else { - let s = &text[self.last..]; - self.last = text.len() + 1; // Next call will return None - Some(s) - } - } - Some(m) => { - let matched = &text[self.last..m.start()]; - self.last = m.end(); - Some(matched) - } - } + self.it.next().map(|span| &self.text[span]) } } @@ -900,35 +906,21 @@ impl<'r, 't> FusedIterator for Split<'r, 't> {} /// lifetime of the string being split. #[derive(Debug)] pub struct SplitN<'r, 't> { - splits: Split<'r, 't>, - n: usize, + text: &'t str, + it: meta::SplitN<'r, 't>, } impl<'r, 't> Iterator for SplitN<'r, 't> { type Item = &'t str; + #[inline] fn next(&mut self) -> Option<&'t str> { - if self.n == 0 { - return None; - } - - self.n -= 1; - if self.n > 0 { - return self.splits.next(); - } - - let text = self.splits.finder.0.text(); - if self.splits.last > text.len() { - // We've already returned all substrings. - None - } else { - // self.n == 0, so future calls will return None immediately - Some(&text[self.splits.last..]) - } + self.it.next().map(|span| &self.text[span]) } + #[inline] fn size_hint(&self) -> (usize, Option) { - (0, Some(self.n)) + self.it.size_hint() } } @@ -970,7 +962,7 @@ impl<'r, 't> FusedIterator for SplitN<'r, 't> {} /// assert_eq!(None, locs.get(9944060567225171988)); /// ``` #[derive(Clone, Debug)] -pub struct CaptureLocations(re_trait::Locations); +pub struct CaptureLocations(captures::Captures); /// A type alias for `CaptureLocations` for backwards compatibility. /// @@ -987,7 +979,7 @@ impl CaptureLocations { /// with respect to the original string matched. #[inline] pub fn get(&self, i: usize) -> Option<(usize, usize)> { - self.0.pos(i) + self.0.get_group(i).map(|sp| (sp.start, sp.end)) } /// Returns the total number of capture groups (even if they didn't match). @@ -996,7 +988,7 @@ impl CaptureLocations { /// capturing group that corresponds to the entire match. #[inline] pub fn len(&self) -> usize { - self.0.len() + self.0.group_len() } /// An alias for the `get` method for backwards compatibility. @@ -1024,8 +1016,7 @@ impl CaptureLocations { /// `'t` is the lifetime of the matched text. pub struct Captures<'t> { text: &'t str, - locs: re_trait::Locations, - named_groups: Arc>, + caps: captures::Captures, } impl<'t> Captures<'t> { @@ -1048,14 +1039,20 @@ impl<'t> Captures<'t> { /// assert_eq!(text1, "123"); /// assert_eq!(text2, ""); /// ``` + #[inline] pub fn get(&self, i: usize) -> Option> { - self.locs.pos(i).map(|(s, e)| Match::new(self.text, s, e)) + self.caps + .get_group(i) + .map(|sp| Match::new(self.text, sp.start, sp.end)) } /// Returns the match for the capture group named `name`. If `name` isn't a /// valid capture group or didn't match anything, then `None` is returned. + #[inline] pub fn name(&self, name: &str) -> Option> { - self.named_groups.get(name).and_then(|&i| self.get(i)) + self.caps + .get_group_by_name(name) + .map(|sp| Match::new(self.text, sp.start, sp.end)) } /// An iterator that yields all capturing matches in the order in which @@ -1063,8 +1060,9 @@ impl<'t> Captures<'t> { /// participate in the match, then `None` is yielded for that capture. /// /// The first match always corresponds to the overall match of the regex. + #[inline] pub fn iter<'c>(&'c self) -> SubCaptureMatches<'c, 't> { - SubCaptureMatches { caps: self, it: self.locs.iter() } + SubCaptureMatches { text: self.text, it: self.caps.iter() } } /// Expands all instances of `$name` in `replacement` to the corresponding @@ -1088,8 +1086,9 @@ impl<'t> Captures<'t> { /// it is replaced with an empty string. /// /// To write a literal `$` use `$$`. + #[inline] pub fn expand(&self, replacement: &str, dst: &mut String) { - expand_str(self, replacement, dst) + self.caps.interpolate_string_into(self.text, replacement, dst); } /// Returns the total number of capture groups (even if they didn't match). @@ -1098,34 +1097,13 @@ impl<'t> Captures<'t> { /// group that corresponds to the full match. #[inline] pub fn len(&self) -> usize { - self.locs.len() + self.caps.group_len() } } impl<'t> fmt::Debug for Captures<'t> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Captures").field(&CapturesDebug(self)).finish() - } -} - -struct CapturesDebug<'c, 't>(&'c Captures<'t>); - -impl<'c, 't> fmt::Debug for CapturesDebug<'c, 't> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // We'd like to show something nice here, even if it means an - // allocation to build a reverse index. - let slot_to_name: HashMap<&usize, &String> = - self.0.named_groups.iter().map(|(a, b)| (b, a)).collect(); - let mut map = f.debug_map(); - for (slot, m) in self.0.locs.iter().enumerate() { - let m = m.map(|(s, e)| &self.0.text[s..e]); - if let Some(name) = slot_to_name.get(&slot) { - map.entry(&name, &m); - } else { - map.entry(&slot, &m); - } - } - map.finish() + f.debug_tuple("Captures").field(&self.caps).finish() } } @@ -1183,23 +1161,26 @@ impl<'t, 'i> Index<&'i str> for Captures<'t> { /// the lifetime `'t` corresponds to the originally matched text. #[derive(Clone, Debug)] pub struct SubCaptureMatches<'c, 't> { - caps: &'c Captures<'t>, - it: SubCapturesPosIter<'c>, + text: &'t str, + it: captures::CapturesPatternIter<'c>, } impl<'c, 't> Iterator for SubCaptureMatches<'c, 't> { type Item = Option>; + #[inline] fn next(&mut self) -> Option>> { - self.it - .next() - .map(|cap| cap.map(|(s, e)| Match::new(self.caps.text, s, e))) + self.it.next().map(|group| { + group.map(|sp| Match::new(self.text, sp.start, sp.end)) + }) } + #[inline] fn size_hint(&self) -> (usize, Option) { self.it.size_hint() } + #[inline] fn count(self) -> usize { self.it.count() } @@ -1217,19 +1198,22 @@ impl<'c, 't> FusedIterator for SubCaptureMatches<'c, 't> {} /// `'r` is the lifetime of the compiled regular expression and `'t` is the /// lifetime of the matched string. #[derive(Debug)] -pub struct CaptureMatches<'r, 't>( - re_trait::CaptureMatches<'t, ExecNoSyncStr<'r>>, -); +pub struct CaptureMatches<'r, 't> { + text: &'t str, + it: meta::CapturesMatches<'r, 't>, +} impl<'r, 't> Iterator for CaptureMatches<'r, 't> { type Item = Captures<'t>; + #[inline] fn next(&mut self) -> Option> { - self.0.next().map(|locs| Captures { - text: self.0.text(), - locs, - named_groups: self.0.regex().capture_name_idx().clone(), - }) + self.it.next().map(|caps| Captures { text: self.text, caps }) + } + + #[inline] + fn count(self) -> usize { + self.it.count() } } @@ -1243,14 +1227,22 @@ impl<'r, 't> FusedIterator for CaptureMatches<'r, 't> {} /// `'r` is the lifetime of the compiled regular expression and `'t` is the /// lifetime of the matched string. #[derive(Debug)] -pub struct Matches<'r, 't>(re_trait::Matches<'t, ExecNoSyncStr<'r>>); +pub struct Matches<'r, 't> { + text: &'t str, + it: meta::FindMatches<'r, 't>, +} impl<'r, 't> Iterator for Matches<'r, 't> { type Item = Match<'t>; + #[inline] fn next(&mut self) -> Option> { - let text = self.0.text(); - self.0.next().map(|(s, e)| Match::new(text, s, e)) + self.it.next().map(|sp| Match::new(self.text, sp.start(), sp.end())) + } + + #[inline] + fn count(self) -> usize { + self.it.count() } } diff --git a/src/sparse.rs b/src/sparse.rs deleted file mode 100644 index 98b726613d..0000000000 --- a/src/sparse.rs +++ /dev/null @@ -1,84 +0,0 @@ -use std::fmt; -use std::ops::Deref; -use std::slice; - -/// A sparse set used for representing ordered NFA states. -/// -/// This supports constant time addition and membership testing. Clearing an -/// entire set can also be done in constant time. Iteration yields elements -/// in the order in which they were inserted. -/// -/// The data structure is based on: https://research.swtch.com/sparse -/// Note though that we don't actually use uninitialized memory. We generally -/// reuse allocations, so the initial allocation cost is bareable. However, -/// its other properties listed above are extremely useful. -#[derive(Clone)] -pub struct SparseSet { - /// Dense contains the instruction pointers in the order in which they - /// were inserted. - dense: Vec, - /// Sparse maps instruction pointers to their location in dense. - /// - /// An instruction pointer is in the set if and only if - /// sparse[ip] < dense.len() && ip == dense[sparse[ip]]. - sparse: Box<[usize]>, -} - -impl SparseSet { - pub fn new(size: usize) -> SparseSet { - SparseSet { - dense: Vec::with_capacity(size), - sparse: vec![0; size].into_boxed_slice(), - } - } - - pub fn len(&self) -> usize { - self.dense.len() - } - - pub fn is_empty(&self) -> bool { - self.dense.is_empty() - } - - pub fn capacity(&self) -> usize { - self.dense.capacity() - } - - pub fn insert(&mut self, value: usize) { - let i = self.len(); - assert!(i < self.capacity()); - self.dense.push(value); - self.sparse[value] = i; - } - - pub fn contains(&self, value: usize) -> bool { - let i = self.sparse[value]; - self.dense.get(i) == Some(&value) - } - - pub fn clear(&mut self) { - self.dense.clear(); - } -} - -impl fmt::Debug for SparseSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "SparseSet({:?})", self.dense) - } -} - -impl Deref for SparseSet { - type Target = [usize]; - - fn deref(&self) -> &Self::Target { - &self.dense - } -} - -impl<'a> IntoIterator for &'a SparseSet { - type Item = &'a usize; - type IntoIter = slice::Iter<'a, usize>; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} diff --git a/src/utf8.rs b/src/utf8.rs deleted file mode 100644 index 2dfd2c0d1d..0000000000 --- a/src/utf8.rs +++ /dev/null @@ -1,264 +0,0 @@ -/// A few elementary UTF-8 encoding and decoding functions used by the matching -/// engines. -/// -/// In an ideal world, the matching engines operate on `&str` and we can just -/// lean on the standard library for all our UTF-8 needs. However, to support -/// byte based regexes (that can match on arbitrary bytes which may contain -/// UTF-8), we need to be capable of searching and decoding UTF-8 on a `&[u8]`. -/// The standard library doesn't really recognize this use case, so we have -/// to build it out ourselves. -/// -/// Should this be factored out into a separate crate? It seems independently -/// useful. There are other crates that already exist (e.g., `utf-8`) that have -/// overlapping use cases. Not sure what to do. -use std::char; - -const TAG_CONT: u8 = 0b1000_0000; -const TAG_TWO: u8 = 0b1100_0000; -const TAG_THREE: u8 = 0b1110_0000; -const TAG_FOUR: u8 = 0b1111_0000; - -/// Returns the smallest possible index of the next valid UTF-8 sequence -/// starting after `i`. -pub fn next_utf8(text: &[u8], i: usize) -> usize { - let b = match text.get(i) { - None => return i + 1, - Some(&b) => b, - }; - let inc = if b <= 0x7F { - 1 - } else if b <= 0b110_11111 { - 2 - } else if b <= 0b1110_1111 { - 3 - } else { - 4 - }; - i + inc -} - -/// Decode a single UTF-8 sequence into a single Unicode codepoint from `src`. -/// -/// If no valid UTF-8 sequence could be found, then `None` is returned. -/// Otherwise, the decoded codepoint and the number of bytes read is returned. -/// The number of bytes read (for a valid UTF-8 sequence) is guaranteed to be -/// 1, 2, 3 or 4. -/// -/// Note that a UTF-8 sequence is invalid if it is incorrect UTF-8, encodes a -/// codepoint that is out of range (surrogate codepoints are out of range) or -/// is not the shortest possible UTF-8 sequence for that codepoint. -#[inline] -pub fn decode_utf8(src: &[u8]) -> Option<(char, usize)> { - let b0 = match src.get(0) { - None => return None, - Some(&b) if b <= 0x7F => return Some((b as char, 1)), - Some(&b) => b, - }; - match b0 { - 0b110_00000..=0b110_11111 => { - if src.len() < 2 { - return None; - } - let b1 = src[1]; - if 0b11_000000 & b1 != TAG_CONT { - return None; - } - let cp = ((b0 & !TAG_TWO) as u32) << 6 | ((b1 & !TAG_CONT) as u32); - match cp { - 0x80..=0x7FF => char::from_u32(cp).map(|cp| (cp, 2)), - _ => None, - } - } - 0b1110_0000..=0b1110_1111 => { - if src.len() < 3 { - return None; - } - let (b1, b2) = (src[1], src[2]); - if 0b11_000000 & b1 != TAG_CONT { - return None; - } - if 0b11_000000 & b2 != TAG_CONT { - return None; - } - let cp = ((b0 & !TAG_THREE) as u32) << 12 - | ((b1 & !TAG_CONT) as u32) << 6 - | ((b2 & !TAG_CONT) as u32); - match cp { - // char::from_u32 will disallow surrogate codepoints. - 0x800..=0xFFFF => char::from_u32(cp).map(|cp| (cp, 3)), - _ => None, - } - } - 0b11110_000..=0b11110_111 => { - if src.len() < 4 { - return None; - } - let (b1, b2, b3) = (src[1], src[2], src[3]); - if 0b11_000000 & b1 != TAG_CONT { - return None; - } - if 0b11_000000 & b2 != TAG_CONT { - return None; - } - if 0b11_000000 & b3 != TAG_CONT { - return None; - } - let cp = ((b0 & !TAG_FOUR) as u32) << 18 - | ((b1 & !TAG_CONT) as u32) << 12 - | ((b2 & !TAG_CONT) as u32) << 6 - | ((b3 & !TAG_CONT) as u32); - match cp { - 0x10000..=0x0010_FFFF => char::from_u32(cp).map(|cp| (cp, 4)), - _ => None, - } - } - _ => None, - } -} - -/// Like `decode_utf8`, but decodes the last UTF-8 sequence in `src` instead -/// of the first. -pub fn decode_last_utf8(src: &[u8]) -> Option<(char, usize)> { - if src.is_empty() { - return None; - } - let mut start = src.len() - 1; - if src[start] <= 0x7F { - return Some((src[start] as char, 1)); - } - while start > src.len().saturating_sub(4) { - start -= 1; - if is_start_byte(src[start]) { - break; - } - } - match decode_utf8(&src[start..]) { - None => None, - Some((_, n)) if n < src.len() - start => None, - Some((cp, n)) => Some((cp, n)), - } -} - -fn is_start_byte(b: u8) -> bool { - b & 0b11_000000 != 0b1_0000000 -} - -#[cfg(test)] -mod tests { - use std::str; - - use quickcheck::quickcheck; - - use super::{ - decode_last_utf8, decode_utf8, TAG_CONT, TAG_FOUR, TAG_THREE, TAG_TWO, - }; - - #[test] - fn prop_roundtrip() { - fn p(given_cp: char) -> bool { - let mut tmp = [0; 4]; - let encoded_len = given_cp.encode_utf8(&mut tmp).len(); - let (got_cp, got_len) = decode_utf8(&tmp[..encoded_len]).unwrap(); - encoded_len == got_len && given_cp == got_cp - } - quickcheck(p as fn(char) -> bool) - } - - #[test] - fn prop_roundtrip_last() { - fn p(given_cp: char) -> bool { - let mut tmp = [0; 4]; - let encoded_len = given_cp.encode_utf8(&mut tmp).len(); - let (got_cp, got_len) = - decode_last_utf8(&tmp[..encoded_len]).unwrap(); - encoded_len == got_len && given_cp == got_cp - } - quickcheck(p as fn(char) -> bool) - } - - #[test] - fn prop_encode_matches_std() { - fn p(cp: char) -> bool { - let mut got = [0; 4]; - let n = cp.encode_utf8(&mut got).len(); - let expected = cp.to_string(); - &got[..n] == expected.as_bytes() - } - quickcheck(p as fn(char) -> bool) - } - - #[test] - fn prop_decode_matches_std() { - fn p(given_cp: char) -> bool { - let mut tmp = [0; 4]; - let n = given_cp.encode_utf8(&mut tmp).len(); - let (got_cp, _) = decode_utf8(&tmp[..n]).unwrap(); - let expected_cp = - str::from_utf8(&tmp[..n]).unwrap().chars().next().unwrap(); - got_cp == expected_cp - } - quickcheck(p as fn(char) -> bool) - } - - #[test] - fn prop_decode_last_matches_std() { - fn p(given_cp: char) -> bool { - let mut tmp = [0; 4]; - let n = given_cp.encode_utf8(&mut tmp).len(); - let (got_cp, _) = decode_last_utf8(&tmp[..n]).unwrap(); - let expected_cp = str::from_utf8(&tmp[..n]) - .unwrap() - .chars() - .rev() - .next() - .unwrap(); - got_cp == expected_cp - } - quickcheck(p as fn(char) -> bool) - } - - #[test] - fn reject_invalid() { - // Invalid start byte - assert_eq!(decode_utf8(&[0xFF]), None); - // Surrogate pair - assert_eq!(decode_utf8(&[0xED, 0xA0, 0x81]), None); - // Invalid continuation byte. - assert_eq!(decode_utf8(&[0xD4, 0xC2]), None); - // Bad lengths - assert_eq!(decode_utf8(&[0xC3]), None); // 2 bytes - assert_eq!(decode_utf8(&[0xEF, 0xBF]), None); // 3 bytes - assert_eq!(decode_utf8(&[0xF4, 0x8F, 0xBF]), None); // 4 bytes - // Not a minimal UTF-8 sequence - assert_eq!(decode_utf8(&[TAG_TWO, TAG_CONT | b'a']), None); - assert_eq!(decode_utf8(&[TAG_THREE, TAG_CONT, TAG_CONT | b'a']), None); - assert_eq!( - decode_utf8(&[TAG_FOUR, TAG_CONT, TAG_CONT, TAG_CONT | b'a',]), - None - ); - } - - #[test] - fn reject_invalid_last() { - // Invalid start byte - assert_eq!(decode_last_utf8(&[0xFF]), None); - // Surrogate pair - assert_eq!(decode_last_utf8(&[0xED, 0xA0, 0x81]), None); - // Bad lengths - assert_eq!(decode_last_utf8(&[0xC3]), None); // 2 bytes - assert_eq!(decode_last_utf8(&[0xEF, 0xBF]), None); // 3 bytes - assert_eq!(decode_last_utf8(&[0xF4, 0x8F, 0xBF]), None); // 4 bytes - // Not a minimal UTF-8 sequence - assert_eq!(decode_last_utf8(&[TAG_TWO, TAG_CONT | b'a']), None); - assert_eq!( - decode_last_utf8(&[TAG_THREE, TAG_CONT, TAG_CONT | b'a',]), - None - ); - assert_eq!( - decode_last_utf8( - &[TAG_FOUR, TAG_CONT, TAG_CONT, TAG_CONT | b'a',] - ), - None - ); - } -} diff --git a/test b/test index 13a26b78be..1f86b971fb 100755 --- a/test +++ b/test @@ -10,10 +10,10 @@ cd "$(dirname "$0")" # features. We don't test the complete space, since the complete space is quite # large. Hopefully once we migrate the test suite to better infrastructure # (like regex-automata), we'll be able to test more of the space. -echo "===== DEFAULT FEATURES ===" +echo "===== DEFAULT FEATURES =====" cargo test -echo "===== DOC TESTS ===" +echo "===== DOC TESTS =====" cargo test --doc features=( @@ -27,10 +27,8 @@ features=( "std perf-literal" ) for f in "${features[@]}"; do - echo "===== FEATURE: $f (default) ===" - cargo test --test default --no-default-features --features "$f" - echo "===== FEATURE: $f (default-bytes) ===" - cargo test --test default-bytes --no-default-features --features "$f" + echo "===== FEATURE: $f =====" + cargo test --test integration --no-default-features --features "$f" done # And test the probably-forever-nightly-only 'pattern' feature... diff --git a/testdata/regression.toml b/testdata/regression.toml index d5f9e30ece..96497bd532 100644 --- a/testdata/regression.toml +++ b/testdata/regression.toml @@ -635,3 +635,30 @@ haystack = "ababcd" bounds = [4, 6] search-kind = "earliest" matches = [[4, 6]] + +# I found this during the regex-automata migration. This is the fowler basic +# 154 test, but without anchored = true and without a match limit. +# +# This test caught a subtle bug in the hybrid reverse DFA search, where it +# would skip over the termination condition if it entered a start state. This +# was a double bug. Firstly, the reverse DFA shouldn't have had start states +# specialized in the first place, and thus it shouldn't have possible to detect +# that the DFA had entered a start state. The second bug was that the start +# state handling was incorrect by jumping over the termination condition. +[[test]] +name = "fowler-basic154-unanchored" +regex = '''a([bc]*)c*''' +haystack = '''abc''' +matches = [[[0, 3], [1, 3]]] + +# From: https://github.com/rust-lang/regex/issues/981 +# +# This was never really a problem in the new architecture because the +# regex-automata engines are far more principled about how they deal with +# look-around. (This was one of the many reasons I wanted to re-work the +# original regex crate engines.) +[[test]] +name = "word-boundary-interact-poorly-with-literal-optimizations" +regex = '(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))' +haystack = 'ubi-Darwin-x86_64.tar.gz' +matches = [] diff --git a/tests/bytes.rs b/tests/bytes.rs index d05f138edf..6cc822eb97 100644 --- a/tests/bytes.rs +++ b/tests/bytes.rs @@ -85,7 +85,7 @@ matiter!( // See https://github.com/rust-lang/regex/issues/303 #[test] fn negated_full_byte_range() { - assert!(::regex::bytes::Regex::new(r#"(?-u)[^\x00-\xff]"#).is_err()); + assert!(::regex::bytes::Regex::new(r#"(?-u)[^\x00-\xff]"#).is_ok()); } matiter!(word_boundary_ascii1, r"(?-u:\B)x(?-u:\B)", "áxβ"); diff --git a/tests/consistent.rs b/tests/consistent.rs deleted file mode 100644 index 722f2a51a0..0000000000 --- a/tests/consistent.rs +++ /dev/null @@ -1,238 +0,0 @@ -use regex::internal::ExecBuilder; - -/// Given a regex, check if all of the backends produce the same -/// results on a number of different inputs. -/// -/// For now this just throws quickcheck at the problem, which -/// is not very good because it only really tests half of the -/// problem space. It is pretty unlikely that a random string -/// will match any given regex, so this will probably just -/// be checking that the different backends fail in the same -/// way. This is still worthwhile to test, but is definitely not -/// the whole story. -/// -/// TODO(ethan): In order to cover the other half of the problem -/// space, we should generate a random matching string by inspecting -/// the AST of the input regex. The right way to do this probably -/// involves adding a custom Arbitrary instance around a couple -/// of newtypes. That way we can respect the quickcheck size hinting -/// and shrinking and whatnot. -pub fn backends_are_consistent(re: &str) -> Result { - let standard_backends = vec![ - ( - "bounded_backtracking_re", - ExecBuilder::new(re) - .bounded_backtracking() - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err))?, - ), - ( - "pikevm_re", - ExecBuilder::new(re) - .nfa() - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err))?, - ), - ( - "default_re", - ExecBuilder::new(re) - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err))?, - ), - ]; - - let utf8bytes_backends = vec![ - ( - "bounded_backtracking_utf8bytes_re", - ExecBuilder::new(re) - .bounded_backtracking() - .bytes(true) - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err))?, - ), - ( - "pikevm_utf8bytes_re", - ExecBuilder::new(re) - .nfa() - .bytes(true) - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err))?, - ), - ( - "default_utf8bytes_re", - ExecBuilder::new(re) - .bytes(true) - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err))?, - ), - ]; - - let bytes_backends = vec![ - ( - "bounded_backtracking_bytes_re", - ExecBuilder::new(re) - .bounded_backtracking() - .only_utf8(false) - .build() - .map(|exec| exec.into_byte_regex()) - .map_err(|err| format!("{}", err))?, - ), - ( - "pikevm_bytes_re", - ExecBuilder::new(re) - .nfa() - .only_utf8(false) - .build() - .map(|exec| exec.into_byte_regex()) - .map_err(|err| format!("{}", err))?, - ), - ( - "default_bytes_re", - ExecBuilder::new(re) - .only_utf8(false) - .build() - .map(|exec| exec.into_byte_regex()) - .map_err(|err| format!("{}", err))?, - ), - ]; - - Ok(string_checker::check_backends(&standard_backends)? - + string_checker::check_backends(&utf8bytes_backends)? - + bytes_checker::check_backends(&bytes_backends)?) -} - -// -// A consistency checker parameterized by the input type (&str or &[u8]). -// - -macro_rules! checker { - ($module_name:ident, $regex_type:path, $mk_input:expr) => { - mod $module_name { - use quickcheck; - use quickcheck::{Arbitrary, TestResult}; - - pub fn check_backends( - backends: &[(&str, $regex_type)], - ) -> Result { - let mut total_passed = 0; - for regex in backends[1..].iter() { - total_passed += quickcheck_regex_eq(&backends[0], regex)?; - } - - Ok(total_passed) - } - - fn quickcheck_regex_eq( - &(name1, ref re1): &(&str, $regex_type), - &(name2, ref re2): &(&str, $regex_type), - ) -> Result { - quickcheck::QuickCheck::new() - .quicktest(RegexEqualityTest::new( - re1.clone(), - re2.clone(), - )) - .map_err(|err| { - format!( - "{}(/{}/) and {}(/{}/) are inconsistent.\ - QuickCheck Err: {:?}", - name1, re1, name2, re2, err - ) - }) - } - - struct RegexEqualityTest { - re1: $regex_type, - re2: $regex_type, - } - impl RegexEqualityTest { - fn new(re1: $regex_type, re2: $regex_type) -> Self { - RegexEqualityTest { re1: re1, re2: re2 } - } - } - - impl quickcheck::Testable for RegexEqualityTest { - fn result(&self, gen: &mut quickcheck::Gen) -> TestResult { - let input = $mk_input(gen); - let input = &input; - - if self.re1.find(&input) != self.re2.find(input) { - return TestResult::error(format!( - "find mismatch input={:?}", - input - )); - } - - let cap1 = self.re1.captures(input); - let cap2 = self.re2.captures(input); - match (cap1, cap2) { - (None, None) => {} - (Some(cap1), Some(cap2)) => { - for (c1, c2) in cap1.iter().zip(cap2.iter()) { - if c1 != c2 { - return TestResult::error(format!( - "captures mismatch input={:?}", - input - )); - } - } - } - _ => { - return TestResult::error(format!( - "captures mismatch input={:?}", - input - )) - } - } - - let fi1 = self.re1.find_iter(input); - let fi2 = self.re2.find_iter(input); - for (m1, m2) in fi1.zip(fi2) { - if m1 != m2 { - return TestResult::error(format!( - "find_iter mismatch input={:?}", - input - )); - } - } - - let ci1 = self.re1.captures_iter(input); - let ci2 = self.re2.captures_iter(input); - for (cap1, cap2) in ci1.zip(ci2) { - for (c1, c2) in cap1.iter().zip(cap2.iter()) { - if c1 != c2 { - return TestResult::error(format!( - "captures_iter mismatch input={:?}", - input - )); - } - } - } - - let s1 = self.re1.split(input); - let s2 = self.re2.split(input); - for (chunk1, chunk2) in s1.zip(s2) { - if chunk1 != chunk2 { - return TestResult::error(format!( - "split mismatch input={:?}", - input - )); - } - } - - TestResult::from_bool(true) - } - } - } // mod - }; // rule case -} // macro_rules! - -checker!(string_checker, ::regex::Regex, |gen| String::arbitrary(gen)); -checker!(bytes_checker, ::regex::bytes::Regex, |gen| Vec::::arbitrary( - gen -)); diff --git a/tests/crates_regex.rs b/tests/crates_regex.rs deleted file mode 100644 index 200ec27b2d..0000000000 --- a/tests/crates_regex.rs +++ /dev/null @@ -1,3287 +0,0 @@ -// DO NOT EDIT. Automatically generated by 'scripts/scrape_crates_io.py' -// on 2018-06-20 09:56:32.820354. - -// autoshutdown-0.1.0: r"\s*(\d+)(\w)\s*" -consistent!(autoshutdown_0, r"\s*(\d+)(\w)\s*"); - -// epub-1.1.1: r"/" -consistent!(epub_0, r"/"); - -// rpi-info-0.2.0: "^Revision\t+: ([0-9a-fA-F]+)" -consistent!(rpi_info_0, "^Revision\t+: ([0-9a-fA-F]+)"); - -// rpi-info-0.2.0: "Serial\t+: ([0-9a-fA-F]+)" -consistent!(rpi_info_1, "Serial\t+: ([0-9a-fA-F]+)"); - -// pnet_macros-0.21.0: r"^u([0-9]+)(be|le|he)?$" -consistent!(pnet_macros_0, r"^u([0-9]+)(be|le|he)?$"); - -// iban_validate-1.0.3: r"^[A-Z]{2}\d{2}[A-Z\d]{1,30}$" -consistent!(iban_validate_0, r"^[A-Z]{2}\d{2}[A-Z\d]{1,30}$"); - -// markifier-0.1.0: r".*\[(?P.+)%.*\].*" -consistent!(markifier_0, r".*\[(?P.+)%.*\].*"); - -// mallumo-0.3.0: r"(#include) (\S*)(.*)" -consistent!(mallumo_0, r"(#include) (\S*)(.*)"); - -// mallumo-0.3.0: r"(ERROR: \d+:)(\d+)(: )(.+)" -consistent!(mallumo_1, r"(ERROR: \d+:)(\d+)(: )(.+)"); - -// mallumo-0.3.0: r"(\d+\()(\d+)(?:\) : )(.+)" -consistent!(mallumo_2, r"(\d+\()(\d+)(?:\) : )(.+)"); - -// magnet_more-0.0.1: r"(.+?)(\[.*?\])?" -consistent!(magnet_more_0, r"(.+?)(\[.*?\])?"); - -// magnet_app-0.0.1: r":(?P[a-zA-Z_]+)" -consistent!(magnet_app_0, r":(?P[a-zA-Z_]+)"); - -// yubibomb-0.2.0: r"^\d{6}(?:\s*,\s*\d{6})*$" -consistent!(yubibomb_0, r"^\d{6}(?:\s*,\s*\d{6})*$"); - -// multirust-rs-0.0.4: r"[\\/]([^\\/?]+)(\?.*)?$" -consistent!(multirust_rs_0, r"[\\/]([^\\/?]+)(\?.*)?$"); - -// hueclient-0.3.2: "\"[a-z]*\":null" -consistent!(hueclient_0, "\"[a-z]*\":null"); - -// hueclient-0.3.2: ",+" -consistent!(hueclient_1, ",+"); - -// hueclient-0.3.2: ",\\}" -consistent!(hueclient_2, ",\\}"); - -// hueclient-0.3.2: "\\{," -consistent!(hueclient_3, "\\{,"); - -// aerial-0.1.0: r"[a-zA-Z_\$][a-zA-Z_0-9]*" -consistent!(aerial_0, r"[a-zA-Z_\$][a-zA-Z_0-9]*"); - -// aerial-0.1.0: r"thi[sng]+" -consistent!(aerial_1, r"thi[sng]+"); - -// rvue-0.1.0: r"(.+)\s+\((.+?)\)" -consistent!(rvue_0, r"(.+)\s+\((.+?)\)"); - -// rvue-0.1.0: r"([\d\.]+)\s*out\s*of\s*([\d\.]+)" -consistent!(rvue_1, r"([\d\.]+)\s*out\s*of\s*([\d\.]+)"); - -// rvue-0.1.0: r"^([\d\.]+)\s*(?:\(\))?$" -consistent!(rvue_2, r"^([\d\.]+)\s*(?:\(\))?$"); - -// rvue-0.1.0: r"([\d\.]+)\s*Points\s*Possible" -consistent!(rvue_3, r"([\d\.]+)\s*Points\s*Possible"); - -// rvue-0.1.0: r"([\d\.]+)\s*/\s*([\d\.]+)" -consistent!(rvue_4, r"([\d\.]+)\s*/\s*([\d\.]+)"); - -// rvsim-0.1.0: r"_?([_a-z0-9]+)\s*:\s*([_a-z0-9]+)\s*[,)]" -consistent!(rvsim_0, r"_?([_a-z0-9]+)\s*:\s*([_a-z0-9]+)\s*[,)]"); - -// nereon-0.1.4: "(.*[^\\\\])\\{\\}(.*)" -consistent!(nereon_0, "(.*[^\\\\])\\{\\}(.*)"); - -// next_episode-0.3.0: r"((?i)^(.+).s(\d+)e(\d+).*)$" -consistent!(next_episode_0, r"((?i)^(.+).s(\d+)e(\d+).*)$"); - -// migrant_lib-0.19.2: r"[^a-z0-9-]+" -consistent!(migrant_lib_0, r"[^a-z0-9-]+"); - -// migrant_lib-0.19.2: r"[0-9]{14}_[a-z0-9-]+" -consistent!(migrant_lib_1, r"[0-9]{14}_[a-z0-9-]+"); - -// migrant_lib-0.19.2: r"([0-9]{14}_)?[a-z0-9-]+" -consistent!(migrant_lib_2, r"([0-9]{14}_)?[a-z0-9-]+"); - -// minipre-0.2.0: "$_" -consistent!(minipre_0, "$_"); - -// minifier-0.0.13: r">\s+<" -consistent!(minifier_0, r">\s+<"); - -// minifier-0.0.13: r"\s{2,}|[\r\n]" -consistent!(minifier_1, r"\s{2,}|[\r\n]"); - -// minifier-0.0.13: r"<(style|script)[\w|\s].*?>" -consistent!(minifier_2, r"<(style|script)[\w|\s].*?>"); - -// minifier-0.0.13: "" -consistent!(minifier_3, ""); - -// minifier-0.0.13: r"<\w.*?>" -consistent!(minifier_4, r"<\w.*?>"); - -// minifier-0.0.13: r" \s+|\s +" -consistent!(minifier_5, r" \s+|\s +"); - -// minifier-0.0.13: r"\w\s+\w" -consistent!(minifier_6, r"\w\s+\w"); - -// minifier-0.0.13: r"'\s+>" -consistent!(minifier_7, r"'\s+>"); - -// minifier-0.0.13: r"\d\s+>" -consistent!(minifier_8, r"\d\s+>"); - -// ggp-rs-0.1.2: r"(?P\([^)]+\))|(?P[a-zA-Z0-9_]+)" -consistent!(ggp_rs_0, r"(?P\([^)]+\))|(?P[a-zA-Z0-9_]+)"); - -// ggp-rs-0.1.2: r"\((.*)\)." -consistent!(ggp_rs_1, r"\((.*)\)."); - -// poe-superfilter-0.2.0: "[A-Za-z0-9_]" -consistent!(poe_superfilter_0, "[A-Za-z0-9_]"); - -// poke-a-mango-0.5.0: r"(\d+)x(\d+)" -consistent!(poke_a_mango_0, r"(\d+)x(\d+)"); - -// pop3-rs-0.1.0: r"(?P\d+) (?P\d+)" -consistent!(pop3_rs_0, r"(?P\d+) (?P\d+)"); - -// pop3-rs-0.1.0: r"(?P\d+) (?P[\x21-\x7E]{1,70})" -consistent!(pop3_rs_1, r"(?P\d+) (?P[\x21-\x7E]{1,70})"); - -// pop3-rs-0.1.0: r"(<.*>)\r\n$" -consistent!(pop3_rs_2, r"(<.*>)\r\n$"); - -// pop3-rs-0.1.0: r"^(?P\+OK|-ERR) (?P.*)" -consistent!(pop3_rs_3, r"^(?P\+OK|-ERR) (?P.*)"); - -// pop3-1.0.6: r"^\.\r\n$" -consistent!(pop3_0, r"^\.\r\n$"); - -// pop3-1.0.6: r"\+OK(.*)" -consistent!(pop3_1, r"\+OK(.*)"); - -// pop3-1.0.6: r"-ERR(.*)" -consistent!(pop3_2, r"-ERR(.*)"); - -// pop3-1.0.6: r"\+OK (\d+) (\d+)\r\n" -consistent!(pop3_3, r"\+OK (\d+) (\d+)\r\n"); - -// pop3-1.0.6: r"(\d+) ([\x21-\x7e]+)\r\n" -consistent!(pop3_4, r"(\d+) ([\x21-\x7e]+)\r\n"); - -// pop3-1.0.6: r"\+OK (\d+) ([\x21-\x7e]+)\r\n" -consistent!(pop3_5, r"\+OK (\d+) ([\x21-\x7e]+)\r\n"); - -// pop3-1.0.6: r"(\d+) (\d+)\r\n" -consistent!(pop3_6, r"(\d+) (\d+)\r\n"); - -// pop3-1.0.6: r"\+OK (\d+) (\d+)\r\n" -consistent!(pop3_7, r"\+OK (\d+) (\d+)\r\n"); - -// polk-1.1.3: "github:(\\w+)/?(\\w+)?" -consistent!(polk_0, "github:(\\w+)/?(\\w+)?"); - -// geochunk-0.1.5: "^[0-9]{5}" -consistent!(geochunk_0, "^[0-9]{5}"); - -// generic-dns-update-1.1.4: r"((?:(?:0|1[\d]{0,2}|2(?:[0-4]\d?|5[0-5]?|[6-9])?|[3-9]\d?)\.){3}(?:0|1[\d]{0,2}|2(?:[0-4]\d?|5[0-5]?|[6-9])?|[3-9]\d?))" -consistent!(generic_dns_update_0, r"((?:(?:0|1[\d]{0,2}|2(?:[0-4]\d?|5[0-5]?|[6-9])?|[3-9]\d?)\.){3}(?:0|1[\d]{0,2}|2(?:[0-4]\d?|5[0-5]?|[6-9])?|[3-9]\d?))"); - -// generic-dns-update-1.1.4: r"((([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){6}:[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){5}:([0-9A-Fa-f]{1,4}:)?[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){4}:([0-9A-Fa-f]{1,4}:){0,2}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){3}:([0-9A-Fa-f]{1,4}:){0,3}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){2}:([0-9A-Fa-f]{1,4}:){0,4}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){6}((\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d)\.){3}(\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d))|(([0-9A-Fa-f]{1,4}:){0,5}:((\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d)\.){3}(\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d))|(::([0-9A-Fa-f]{1,4}:){0,5}((\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d)\.){3}(\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d))|([0-9A-Fa-f]{1,4}::([0-9A-Fa-f]{1,4}:){0,5}[0-9A-Fa-f]{1,4})|(::([0-9A-Fa-f]{1,4}:){0,6}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){1,7}:))" -consistent!(generic_dns_update_1, r"((([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){6}:[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){5}:([0-9A-Fa-f]{1,4}:)?[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){4}:([0-9A-Fa-f]{1,4}:){0,2}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){3}:([0-9A-Fa-f]{1,4}:){0,3}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){2}:([0-9A-Fa-f]{1,4}:){0,4}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){6}((\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d)\.){3}(\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d))|(([0-9A-Fa-f]{1,4}:){0,5}:((\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d)\.){3}(\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d))|(::([0-9A-Fa-f]{1,4}:){0,5}((\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d)\.){3}(\d((25[0-5])|(1\d{2})|(2[0-4]\d)|(\d{1,2}))\d))|([0-9A-Fa-f]{1,4}::([0-9A-Fa-f]{1,4}:){0,5}[0-9A-Fa-f]{1,4})|(::([0-9A-Fa-f]{1,4}:){0,6}[0-9A-Fa-f]{1,4})|(([0-9A-Fa-f]{1,4}:){1,7}:))"); - -// generic-dns-update-1.1.4: r"([0-9.]*)" -consistent!( - generic_dns_update_2, - r"([0-9.]*)" -); - -// generic-dns-update-1.1.4: r"([0-9]+)" -consistent!(generic_dns_update_3, r"([0-9]+)"); - -// generic-dns-update-1.1.4: r"([0-9]+)" -consistent!(generic_dns_update_4, r"([0-9]+)"); - -// generic-dns-update-1.1.4: r"([0-1]*)" -consistent!(generic_dns_update_5, r"([0-1]*)"); - -// generate-nix-pkg-0.3.0: r"(\d*)\.(\d*)\.(\d*)(-(\S*))?" -consistent!(generate_nix_pkg_0, r"(\d*)\.(\d*)\.(\d*)(-(\S*))?"); - -// generate-nix-pkg-0.3.0: r"^(\S*) (\d*)\.(\d*)\.(\d*)(-(\S*))?" -consistent!(generate_nix_pkg_1, r"^(\S*) (\d*)\.(\d*)\.(\d*)(-(\S*))?"); - -// genact-0.6.0: r"arch/([a-z0-9_])+/" -consistent!(genact_0, r"arch/([a-z0-9_])+/"); - -// genact-0.6.0: r"arch/([a-z0-9_])+/" -consistent!(genact_1, r"arch/([a-z0-9_])+/"); - -// cron_rs-0.1.6: r"^\s*((\*(/\d+)?)|[0-9-,/]+)(\s+((\*(/\d+)?)|[0-9-,/]+)){4,5}\s*$" -consistent!( - cron_rs_0, - r"^\s*((\*(/\d+)?)|[0-9-,/]+)(\s+((\*(/\d+)?)|[0-9-,/]+)){4,5}\s*$" -); - -// systemfd-0.3.0: r"^([a-zA-Z]+)::(.+)$" -consistent!(systemfd_0, r"^([a-zA-Z]+)::(.+)$"); - -// symbolic-debuginfo-5.0.2: "__?hidden#\\d+_" -consistent!(symbolic_debuginfo_0, "__?hidden#\\d+_"); - -// symbolic-minidump-5.0.2: r"^Linux ([^ ]+) (.*) \w+(?: GNU/Linux)?$" -consistent!(symbolic_minidump_0, r"^Linux ([^ ]+) (.*) \w+(?: GNU/Linux)?$"); - -// graphql-idl-parser-0.1.1: "^(?u:\\#)(?u:[\t-\r - \u{85}-\u{85}\u{a0}-\u{a0}\u{1680}-\u{1680}\u{2000}-\u{200a}\u{2028}-\u{2029}\u{202f}-\u{202f}\u{205f}-\u{205f}\u{3000}-\u{3000}])*(?u:.)+" -consistent!(graphql_idl_parser_0, "^(?u:\\#)(?u:[\t-\r - \u{85}-\u{85}\u{a0}-\u{a0}\u{1680}-\u{1680}\u{2000}-\u{200a}\u{2028}-\u{2029}\u{202f}-\u{202f}\u{205f}-\u{205f}\u{3000}-\u{3000}])*(?u:.)+"); - -// graphql-idl-parser-0.1.1: "^(?u:=)(?u:[\t-\r - \u{85}-\u{85}\u{a0}-\u{a0}\u{1680}-\u{1680}\u{2000}-\u{200a}\u{2028}-\u{2029}\u{202f}-\u{202f}\u{205f}-\u{205f}\u{3000}-\u{3000}])*(?u:.)+" -consistent!(graphql_idl_parser_1, "^(?u:=)(?u:[\t-\r - \u{85}-\u{85}\u{a0}-\u{a0}\u{1680}-\u{1680}\u{2000}-\u{200a}\u{2028}-\u{2029}\u{202f}-\u{202f}\u{205f}-\u{205f}\u{3000}-\u{3000}])*(?u:.)+"); - -// graphql-idl-parser-0.1.1: "^(?u:[A-Z_-_a-z])(?u:[0-9A-Z_-_a-z])*" -consistent!(graphql_idl_parser_2, "^(?u:[A-Z_-_a-z])(?u:[0-9A-Z_-_a-z])*"); - -// graphql-idl-parser-0.1.1: "^(?u:!)" -consistent!(graphql_idl_parser_3, "^(?u:!)"); - -// graphql-idl-parser-0.1.1: "^(?u:\\()" -consistent!(graphql_idl_parser_4, "^(?u:\\()"); - -// graphql-idl-parser-0.1.1: "^(?u:\\))" -consistent!(graphql_idl_parser_5, "^(?u:\\))"); - -// graphql-idl-parser-0.1.1: "^(?u:,)" -consistent!(graphql_idl_parser_6, "^(?u:,)"); - -// graphql-idl-parser-0.1.1: "^(?u::)" -consistent!(graphql_idl_parser_7, "^(?u::)"); - -// graphql-idl-parser-0.1.1: "^(?u:@)" -consistent!(graphql_idl_parser_8, "^(?u:@)"); - -// graphql-idl-parser-0.1.1: "^(?u:\\[)" -consistent!(graphql_idl_parser_9, "^(?u:\\[)"); - -// graphql-idl-parser-0.1.1: "^(?u:\\])" -consistent!(graphql_idl_parser_10, "^(?u:\\])"); - -// graphql-idl-parser-0.1.1: "^(?u:enum)" -consistent!(graphql_idl_parser_11, "^(?u:enum)"); - -// graphql-idl-parser-0.1.1: "^(?u:implements)" -consistent!(graphql_idl_parser_12, "^(?u:implements)"); - -// graphql-idl-parser-0.1.1: "^(?u:input)" -consistent!(graphql_idl_parser_13, "^(?u:input)"); - -// graphql-idl-parser-0.1.1: "^(?u:interface)" -consistent!(graphql_idl_parser_14, "^(?u:interface)"); - -// graphql-idl-parser-0.1.1: "^(?u:scalar)" -consistent!(graphql_idl_parser_15, "^(?u:scalar)"); - -// graphql-idl-parser-0.1.1: "^(?u:type)" -consistent!(graphql_idl_parser_16, "^(?u:type)"); - -// graphql-idl-parser-0.1.1: "^(?u:union)" -consistent!(graphql_idl_parser_17, "^(?u:union)"); - -// graphql-idl-parser-0.1.1: "^(?u:\\{)" -consistent!(graphql_idl_parser_18, "^(?u:\\{)"); - -// graphql-idl-parser-0.1.1: "^(?u:\\})" -consistent!(graphql_idl_parser_19, "^(?u:\\})"); - -// grimoire-0.1.0: r"(?s)/\*(?P.*?)\*/" -consistent!(grimoire_0, r"(?s)/\*(?P.*?)\*/"); - -// phonenumber-0.2.0+8.9.0: r"[\d]+(?:[~\x{2053}\x{223C}\x{FF5E}][\d]+)?" -consistent!(phonenumber_0, r"[\d]+(?:[~\x{2053}\x{223C}\x{FF5E}][\d]+)?"); - -// phonenumber-0.2.0+8.9.0: r"[, \[\]]" -consistent!(phonenumber_1, r"[, \[\]]"); - -// phonenumber-0.2.0+8.9.0: r"[\\/] *x" -consistent!(phonenumber_2, r"[\\/] *x"); - -// phonenumber-0.2.0+8.9.0: r"[[\P{N}&&\P{L}]&&[^#]]+$" -consistent!(phonenumber_3, r"[[\P{N}&&\P{L}]&&[^#]]+$"); - -// phonenumber-0.2.0+8.9.0: r"(?:.*?[A-Za-z]){3}.*" -consistent!(phonenumber_4, r"(?:.*?[A-Za-z]){3}.*"); - -// phonenumber-0.2.0+8.9.0: r"(\D+)" -consistent!(phonenumber_5, r"(\D+)"); - -// phonenumber-0.2.0+8.9.0: r"(\$\d)" -consistent!(phonenumber_6, r"(\$\d)"); - -// phonenumber-0.2.0+8.9.0: r"\(?\$1\)?" -consistent!(phonenumber_7, r"\(?\$1\)?"); - -// phone_number-0.1.0: r"\D" -consistent!(phone_number_0, r"\D"); - -// phone_number-0.1.0: r"^0+" -consistent!(phone_number_1, r"^0+"); - -// phone_number-0.1.0: r"^89" -consistent!(phone_number_2, r"^89"); - -// phone_number-0.1.0: r"^8+" -consistent!(phone_number_3, r"^8+"); - -// phile-0.1.4: r"^ *(\^_*\^) *$" -consistent!(phile_0, r"^ *(\^_*\^) *$"); - -// phile-0.1.4: r"^[_\p{XID_Start}]$" -consistent!(phile_1, r"^[_\p{XID_Start}]$"); - -// phile-0.1.4: r"^\p{XID_Continue}$" -consistent!(phile_2, r"^\p{XID_Continue}$"); - -// uritemplate-0.1.2: "%25(?P[0-9a-fA-F][0-9a-fA-F])" -consistent!(uritemplate_0, "%25(?P[0-9a-fA-F][0-9a-fA-F])"); - -// urdf-rs-0.4.2: "^package://(\\w+)/" -consistent!(urdf_rs_0, "^package://(\\w+)/"); - -// url-match-0.1.7: r"(?P[?&.])" -consistent!(url_match_0, r"(?P[?&.])"); - -// url-match-0.1.7: r":(?P[a-zA-Z0-9_-]+)" -consistent!(url_match_1, r":(?P[a-zA-Z0-9_-]+)"); - -// tsm-sys-0.1.0: r"hello world" -consistent!(tsm_sys_0, r"hello world"); - -// deb-version-0.1.0: "^(?:(?:(?:\\d+:).+)|(?:[^:]+))$" -consistent!(deb_version_0, "^(?:(?:(?:\\d+:).+)|(?:[^:]+))$"); - -// debcargo-2.1.0: r"^(?i)(a|an|the)\s+" -consistent!(debcargo_0, r"^(?i)(a|an|the)\s+"); - -// debcargo-2.1.0: r"^(?i)(rust\s+)?(implementation|library|tool|crate)\s+(of|to|for)\s+" -consistent!( - debcargo_1, - r"^(?i)(rust\s+)?(implementation|library|tool|crate)\s+(of|to|for)\s+" -); - -// feaders-0.2.0: r"^.*\.h$" -consistent!(feaders_0, r"^.*\.h$"); - -// feaders-0.2.0: r"^.*\.c$" -consistent!(feaders_1, r"^.*\.c$"); - -// feaders-0.2.0: r"^.*\.hpp$" -consistent!(feaders_2, r"^.*\.hpp$"); - -// feaders-0.2.0: r"^.*\.cc$" -consistent!(feaders_3, r"^.*\.cc$"); - -// feaders-0.2.0: r"^.*\.cpp$" -consistent!(feaders_4, r"^.*\.cpp$"); - -// hyperscan-0.1.6: r"CPtr\(\w+\)" -consistent!(hyperscan_0, r"CPtr\(\w+\)"); - -// hyperscan-0.1.6: r"^Version:\s(\d\.\d\.\d)\sFeatures:\s+(\w+)?\sMode:\s(\w+)$" -consistent!( - hyperscan_1, - r"^Version:\s(\d\.\d\.\d)\sFeatures:\s+(\w+)?\sMode:\s(\w+)$" -); - -// hyperscan-0.1.6: r"RawDatabase\{db: \w+\}" -consistent!(hyperscan_2, r"RawDatabase\{db: \w+\}"); - -// hyperscan-0.1.6: r"RawSerializedDatabase\{p: \w+, len: \d+\}" -consistent!(hyperscan_3, r"RawSerializedDatabase\{p: \w+, len: \d+\}"); - -// ucd-parse-0.1.1: r"[0-9A-F]+" -consistent!(ucd_parse_0, r"[0-9A-F]+"); - -// afsort-0.2.0: r".*" -consistent!(afsort_0, r".*"); - -// afsort-0.2.0: r".*" -consistent!(afsort_1, r".*"); - -// afsort-0.2.0: r".*" -consistent!(afsort_2, r".*"); - -// afsort-0.2.0: r".*" -consistent!(afsort_3, r".*"); - -// afsort-0.2.0: r".*" -consistent!(afsort_4, r".*"); - -// afsort-0.2.0: r".*" -consistent!(afsort_5, r".*"); - -// afsort-0.2.0: r"^[a-z]+$" -consistent!(afsort_6, r"^[a-z]+$"); - -// afsort-0.2.0: r"^[a-z]+$" -consistent!(afsort_7, r"^[a-z]+$"); - -// tin-summer-1.21.4: r"(\.git|\.pijul|_darcs|\.hg)$" -consistent!(tin_summer_0, r"(\.git|\.pijul|_darcs|\.hg)$"); - -// tin-drummer-1.0.1: r".*?\.(a|la|lo|o|ll|keter|bc|dyn_o|d|rlib|crate|min\.js|hi|dyn_hi|S|jsexe|webapp|js\.externs|ibc|toc|aux|fdb_latexmk|fls|egg-info|whl|js_a|js_hi|jld|ji|js_o|so.*|dump-.*|vmb|crx|orig|elmo|elmi|pyc|mod|p_hi|p_o|prof|tix)$" -consistent!(tin_drummer_0, r".*?\.(a|la|lo|o|ll|keter|bc|dyn_o|d|rlib|crate|min\.js|hi|dyn_hi|S|jsexe|webapp|js\.externs|ibc|toc|aux|fdb_latexmk|fls|egg-info|whl|js_a|js_hi|jld|ji|js_o|so.*|dump-.*|vmb|crx|orig|elmo|elmi|pyc|mod|p_hi|p_o|prof|tix)$"); - -// tin-drummer-1.0.1: r".*?\.(stats|conf|h|out|cache.*|dat|pc|info|\.js)$" -consistent!( - tin_drummer_1, - r".*?\.(stats|conf|h|out|cache.*|dat|pc|info|\.js)$" -); - -// tin-drummer-1.0.1: r".*?\.(exe|a|la|o|ll|keter|bc|dyn_o|d|rlib|crate|min\.js|hi|dyn_hi|jsexe|webapp|js\.externs|ibc|toc|aux|fdb_latexmk|fls|egg-info|whl|js_a|js_hi|jld|ji|js_o|so.*|dump-.*|vmb|crx|orig|elmo|elmi|pyc|mod|p_hi|p_o|prof|tix)$" -consistent!(tin_drummer_2, r".*?\.(exe|a|la|o|ll|keter|bc|dyn_o|d|rlib|crate|min\.js|hi|dyn_hi|jsexe|webapp|js\.externs|ibc|toc|aux|fdb_latexmk|fls|egg-info|whl|js_a|js_hi|jld|ji|js_o|so.*|dump-.*|vmb|crx|orig|elmo|elmi|pyc|mod|p_hi|p_o|prof|tix)$"); - -// tin-drummer-1.0.1: r".*?\.(stats|conf|h|out|cache.*|\.js)$" -consistent!(tin_drummer_3, r".*?\.(stats|conf|h|out|cache.*|\.js)$"); - -// tin-drummer-1.0.1: r"(\.git|\.pijul|_darcs|\.hg)$" -consistent!(tin_drummer_4, r"(\.git|\.pijul|_darcs|\.hg)$"); - -// tin-drummer-1.0.1: r".*?\.(dyn_o|out|d|hi|dyn_hi|dump-.*|p_hi|p_o|prof|tix)$" -consistent!( - tin_drummer_5, - r".*?\.(dyn_o|out|d|hi|dyn_hi|dump-.*|p_hi|p_o|prof|tix)$" -); - -// tin-drummer-1.0.1: r".*?\.(ibc)$" -consistent!(tin_drummer_6, r".*?\.(ibc)$"); - -// tin-drummer-1.0.1: r"\.stack-work|dist-newstyle" -consistent!(tin_drummer_7, r"\.stack-work|dist-newstyle"); - -// timmy-0.3.0: r"_NET_WM_PID\(CARDINAL\) = (\d+)" -consistent!(timmy_0, r"_NET_WM_PID\(CARDINAL\) = (\d+)"); - -// timmy-0.3.0: r"today|yesterday|now" -consistent!(timmy_1, r"today|yesterday|now"); - -// timmy-0.3.0: r"(?P\d{1,2})/(?P\d{1,2})(/(?P\d{4}|\d{2}))?" -consistent!( - timmy_2, - r"(?P\d{1,2})/(?P\d{1,2})(/(?P\d{4}|\d{2}))?" -); - -// timmy-0.3.0: r"(?P\d+) (days?|ds?)(?P( ago)?)" -consistent!(timmy_3, r"(?P\d+) (days?|ds?)(?P( ago)?)"); - -// timmy-0.3.0: r"(?P
\d{2}):(?P\d{2})" -consistent!(timmy_4, r"(?P
\d{2}):(?P\d{2})"); - -// tinfo-0.5.0: r"^(\d+): \d+ windows \(.*\) \[\d+x\d+\]( \(attached\))?" -consistent!( - tinfo_0, - r"^(\d+): \d+ windows \(.*\) \[\d+x\d+\]( \(attached\))?" -); - -// tinfo-0.5.0: r"^(\d+):(\d+): (.*) \((\d+) panes\) \[(\d+)x(\d+)\]" -consistent!(tinfo_1, r"^(\d+):(\d+): (.*) \((\d+) panes\) \[(\d+)x(\d+)\]"); - -// timespan-0.0.4: r"(?:\\\{start\\\}|\\\{end\\\})" -consistent!(timespan_0, r"(?:\\\{start\\\}|\\\{end\\\})"); - -// timespan-0.0.4: r"(.*)\s+-\s+(.*)" -consistent!(timespan_1, r"(.*)\s+-\s+(.*)"); - -// timespan-0.0.4: r"(.*)\s+(\w+)$" -consistent!(timespan_2, r"(.*)\s+(\w+)$"); - -// timespan-0.0.4: r"(.*)\s+(\w+)$" -consistent!(timespan_3, r"(.*)\s+(\w+)$"); - -// timespan-0.0.4: r"(.*)\s+-\s+(.*)" -consistent!(timespan_4, r"(.*)\s+-\s+(.*)"); - -// titlecase-0.10.0: r"[[:lower:]]" -consistent!(titlecase_0, r"[[:lower:]]"); - -// tight-0.1.3: r"^\d+ (day|week|month|year)s?$" -consistent!(tight_0, r"^\d+ (day|week|month|year)s?$"); - -// tight-0.1.3: r"^\d+ (day|week|month|year)s?$" -consistent!(tight_1, r"^\d+ (day|week|month|year)s?$"); - -// yaml-0.2.1: r"^[-+]?(0|[1-9][0-9_]*)$" -consistent!(yaml_0, r"^[-+]?(0|[1-9][0-9_]*)$"); - -// yaml-0.2.1: r"^([-+]?)0o?([0-7_]+)$" -consistent!(yaml_1, r"^([-+]?)0o?([0-7_]+)$"); - -// yaml-0.2.1: r"^([-+]?)0x([0-9a-fA-F_]+)$" -consistent!(yaml_2, r"^([-+]?)0x([0-9a-fA-F_]+)$"); - -// yaml-0.2.1: r"^([-+]?)0b([0-1_]+)$" -consistent!(yaml_3, r"^([-+]?)0b([0-1_]+)$"); - -// yaml-0.2.1: r"^([-+]?)(\.[0-9]+|[0-9]+(\.[0-9]*)?([eE][-+]?[0-9]+)?)$" -consistent!( - yaml_4, - r"^([-+]?)(\.[0-9]+|[0-9]+(\.[0-9]*)?([eE][-+]?[0-9]+)?)$" -); - -// yaml-0.2.1: r"^[+]?(\.inf|\.Inf|\.INF)$" -consistent!(yaml_5, r"^[+]?(\.inf|\.Inf|\.INF)$"); - -// yaml-0.2.1: r"^-(\.inf|\.Inf|\.INF)$" -consistent!(yaml_6, r"^-(\.inf|\.Inf|\.INF)$"); - -// yaml-0.2.1: r"^(\.nan|\.NaN|\.NAN)$" -consistent!(yaml_7, r"^(\.nan|\.NaN|\.NAN)$"); - -// yaml-0.2.1: r"^(null|Null|NULL|~)$" -consistent!(yaml_8, r"^(null|Null|NULL|~)$"); - -// yaml-0.2.1: r"^(true|True|TRUE|yes|Yes|YES)$" -consistent!(yaml_9, r"^(true|True|TRUE|yes|Yes|YES)$"); - -// yaml-0.2.1: r"^(false|False|FALSE|no|No|NO)$" -consistent!(yaml_10, r"^(false|False|FALSE|no|No|NO)$"); - -// kefia-0.1.0: r"(?m)^(\S+)/(\S+) (\S+)(?: \((.*)\))?$" -consistent!(kefia_0, r"(?m)^(\S+)/(\S+) (\S+)(?: \((.*)\))?$"); - -// risp-0.7.0: "^(\\s+|;.*?(\n|$))+" -consistent!(risp_0, "^(\\s+|;.*?(\n|$))+"); - -// risp-0.7.0: "^\".*?\"" -consistent!(risp_1, "^\".*?\""); - -// risp-0.7.0: r"^[^\s\{\}()\[\]]+" -consistent!(risp_2, r"^[^\s\{\}()\[\]]+"); - -// risp-0.7.0: r"^-?\d+" -consistent!(risp_3, r"^-?\d+"); - -// ripgrep-0.8.1: "^([0-9]+)([KMG])?$" -consistent!(ripgrep_0, "^([0-9]+)([KMG])?$"); - -// riquid-0.0.1: r"^\w+" -consistent!(riquid_0, r"^\w+"); - -// riquid-0.0.1: r"^\d+" -consistent!(riquid_1, r"^\d+"); - -// recursive_disassembler-2.1.2: r"\A(0x)?([a-fA-F0-9]+)\z" -consistent!(recursive_disassembler_0, r"\A(0x)?([a-fA-F0-9]+)\z"); - -// remake-0.1.0: r"^[a-zA-Z_][a-zA-Z0-9_]*" -consistent!(remake_0, r"^[a-zA-Z_][a-zA-Z0-9_]*"); - -// regex-decode-0.1.0: r"'(?P[^']+)'\s+\((?P<year>\d{4})\)" -consistent!(regex_decode_0, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)" -consistent!(regex_decode_1, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)" -consistent!(regex_decode_2, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)" -consistent!(regex_decode_3, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)" -consistent!(regex_decode_4, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)" -consistent!(regex_decode_5, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{2})\)" -consistent!(regex_decode_6, r"'(?P<title>[^']+)'\s+\((?P<year>\d{2})\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)" -consistent!(regex_decode_7, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)" -consistent!(regex_decode_8, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)" -consistent!(regex_decode_9, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)" -consistent!(regex_decode_10, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)" -consistent!(regex_decode_11, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)" -consistent!(regex_decode_12, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)"); - -// regex-decode-0.1.0: r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)" -consistent!(regex_decode_13, r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})?\)"); - -// regex-cache-0.2.0: "[0-9]{3}-[0-9]{3}-[0-9]{4}" -consistent!(regex_cache_0, "[0-9]{3}-[0-9]{3}-[0-9]{4}"); - -// regex-cache-0.2.0: r"^\d+$" -consistent!(regex_cache_1, r"^\d+$"); - -// regex-cache-0.2.0: r"^[a-z]+$" -consistent!(regex_cache_2, r"^[a-z]+$"); - -// regex-cache-0.2.0: r"^\d+$" -consistent!(regex_cache_3, r"^\d+$"); - -// regex-cache-0.2.0: r"^\d+$" -consistent!(regex_cache_4, r"^\d+$"); - -// regex_dfa-0.5.0: r"\d{4}-\d{2}-\d{2}" -consistent!(regex_dfa_0, r"\d{4}-\d{2}-\d{2}"); - -// reaper-2.0.0: r"^[0-9\p{L} _\\.]{3,16}$" -consistent!(reaper_0, r"^[0-9\p{L} _\\.]{3,16}$"); - -// retdec-0.1.0: r"^attachment; filename=(.+)$" -consistent!(retdec_0, r"^attachment; filename=(.+)$"); - -// renvsubst-0.1.2: r"(\\)(?P<head>\$[0-9A-Za-z_{])" -consistent!(renvsubst_0, r"(\\)(?P<head>\$[0-9A-Za-z_{])"); - -// renvsubst-0.1.2: r"\$([[:word:]]+)" -consistent!(renvsubst_1, r"\$([[:word:]]+)"); - -// renvsubst-0.1.2: r"\$\{([[:word:]]+)\}" -consistent!(renvsubst_2, r"\$\{([[:word:]]+)\}"); - -// rexpect-0.3.0: r"'[a-z]+'" -consistent!(rexpect_0, r"'[a-z]+'"); - -// rexpect-0.3.0: r"^\d{4}-\d{2}-\d{2}$" -consistent!(rexpect_1, r"^\d{4}-\d{2}-\d{2}$"); - -// rexpect-0.3.0: r"-\d{2}-" -consistent!(rexpect_2, r"-\d{2}-"); - -// luther-0.1.0: "^a(b|c)c*$" -consistent!(luther_0, "^a(b|c)c*$"); - -// little_boxes-1.6.0: r"(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]" -consistent!(little_boxes_0, r"(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]"); - -// libimagentrytag-0.8.0: "^[a-zA-Z]([a-zA-Z0-9_-]*)$" -consistent!(libimagentrytag_0, "^[a-zA-Z]([a-zA-Z0-9_-]*)$"); - -// libimaginteraction-0.8.0: r"^[Yy](\n?)$" -consistent!(libimaginteraction_0, r"^[Yy](\n?)$"); - -// libimaginteraction-0.8.0: r"^[Nn](\n?)$" -consistent!(libimaginteraction_1, r"^[Nn](\n?)$"); - -// libimagutil-0.8.0: "^(?P<KEY>([^=]*))=(.*)$" -consistent!(libimagutil_0, "^(?P<KEY>([^=]*))=(.*)$"); - -// libimagutil-0.8.0: "(.*)=(\"(?P<QVALUE>([^\"]*))\"|(?P<VALUE>(.*)))$" -consistent!(libimagutil_1, "(.*)=(\"(?P<QVALUE>([^\"]*))\"|(?P<VALUE>(.*)))$"); - -// linux_ip-0.1.0: r"\s+" -consistent!(linux_ip_0, r"\s+"); - -// linux_ip-0.1.0: r"\s*[\n\r]+\s*" -consistent!(linux_ip_1, r"\s*[\n\r]+\s*"); - -// linux_ip-0.1.0: r"^([0-9a-fA-F\.:/]+)\s+dev\s+([a-z0-9\.]+)\s*(.*)$" -consistent!(linux_ip_2, r"^([0-9a-fA-F\.:/]+)\s+dev\s+([a-z0-9\.]+)\s*(.*)$"); - -// linux_ip-0.1.0: r"^([0-9a-fA-F\.:/]+|default)\s+via\s+([a-z0-9\.:]+)\s+dev\s+([a-z0-9\.]+)\s*(.*)$" -consistent!(linux_ip_3, r"^([0-9a-fA-F\.:/]+|default)\s+via\s+([a-z0-9\.:]+)\s+dev\s+([a-z0-9\.]+)\s*(.*)$"); - -// linux_ip-0.1.0: r"^(blackhole)\s+([0-9a-fA-F\.:/]+)$" -consistent!(linux_ip_4, r"^(blackhole)\s+([0-9a-fA-F\.:/]+)$"); - -// linux_ip-0.1.0: r"^(unreachable)\s+([0-9a-fA-F\.:/]+)\s+dev\s+([a-z0-9\.]+)\s+(.*)$" -consistent!( - linux_ip_5, - r"^(unreachable)\s+([0-9a-fA-F\.:/]+)\s+dev\s+([a-z0-9\.]+)\s+(.*)$" -); - -// linux_ip-0.1.0: r"\s*[\n\r]+\s*" -consistent!(linux_ip_6, r"\s*[\n\r]+\s*"); - -// linux_ip-0.1.0: r"^\d+:\s+([a-zA-Z0-9\.-]+)(@\S+)*:\s+(.*)$" -consistent!(linux_ip_7, r"^\d+:\s+([a-zA-Z0-9\.-]+)(@\S+)*:\s+(.*)$"); - -// linux_ip-0.1.0: r"\s*link/ether\s+([a-f0-9:]+)\s+.*" -consistent!(linux_ip_8, r"\s*link/ether\s+([a-f0-9:]+)\s+.*"); - -// linux_ip-0.1.0: r"\s*inet[6]*\s+([0-9a-f:\./]+)\s+.*" -consistent!(linux_ip_9, r"\s*inet[6]*\s+([0-9a-f:\./]+)\s+.*"); - -// linky-0.1.4: r"[^\w -]" -consistent!(linky_0, r"[^\w -]"); - -// linky-0.1.4: r"^(.*):(\d+): [^ ]* ([^ ]*)$" -consistent!(linky_1, r"^(.*):(\d+): [^ ]* ([^ ]*)$"); - -// limonite-0.2.1: r"^(\d{4}-\d{2}-\d{2})-(\d{3})-(.+)$" -consistent!(limonite_0, r"^(\d{4}-\d{2}-\d{2})-(\d{3})-(.+)$"); - -// process-queue-0.1.1: r"^[a-zA-Z]+$" -consistent!(process_queue_0, r"^[a-zA-Z]+$"); - -// pronghorn-0.1.2: r"^\{([a-zA-Z_]+)\}$" -consistent!(pronghorn_0, r"^\{([a-zA-Z_]+)\}$"); - -// protocol-ftp-client-0.1.1: "(?m:^(\\d{3}) (.+)\r$)" -consistent!(protocol_ftp_client_0, "(?m:^(\\d{3}) (.+)\r$)"); - -// protocol-ftp-client-0.1.1: "\"(.+)\"" -consistent!(protocol_ftp_client_1, "\"(.+)\""); - -// protocol-ftp-client-0.1.1: "(\\w+) [Tt]ype: (\\w+)" -consistent!(protocol_ftp_client_2, "(\\w+) [Tt]ype: (\\w+)"); - -// protocol-ftp-client-0.1.1: "(?m:^(\\d{3})-.+\r$)" -consistent!(protocol_ftp_client_3, "(?m:^(\\d{3})-.+\r$)"); - -// protocol-ftp-client-0.1.1: "Entering Passive Mode \\((\\d+),(\\d+),(\\d+),(\\d+),(\\d+),(\\d+)\\)" -consistent!( - protocol_ftp_client_4, - "Entering Passive Mode \\((\\d+),(\\d+),(\\d+),(\\d+),(\\d+),(\\d+)\\)" -); - -// protocol-ftp-client-0.1.1: "(?m:^(.+)\r$)" -consistent!(protocol_ftp_client_5, "(?m:^(.+)\r$)"); - -// protocol-ftp-client-0.1.1: "^([d-])(?:[rwx-]{3}){3} +\\d+ +\\w+ +\\w+ +(\\d+) +(.+) +(.+)$" -consistent!( - protocol_ftp_client_6, - "^([d-])(?:[rwx-]{3}){3} +\\d+ +\\w+ +\\w+ +(\\d+) +(.+) +(.+)$" -); - -// article-date-extractor-0.1.1: r"([\./\-_]{0,1}(19|20)\d{2})[\./\-_]{0,1}(([0-3]{0,1}[0-9][\./\-_])|(\w{3,5}[\./\-_]))([0-3]{0,1}[0-9][\./\-]{0,1})" -consistent!(article_date_extractor_0, r"([\./\-_]{0,1}(19|20)\d{2})[\./\-_]{0,1}(([0-3]{0,1}[0-9][\./\-_])|(\w{3,5}[\./\-_]))([0-3]{0,1}[0-9][\./\-]{0,1})"); - -// article-date-extractor-0.1.1: r"(?i)publishdate|pubdate|timestamp|article_date|articledate|date" -consistent!( - article_date_extractor_1, - r"(?i)publishdate|pubdate|timestamp|article_date|articledate|date" -); - -// arthas_plugin-0.1.1: r"type\((.*)\)" -consistent!(arthas_plugin_0, r"type\((.*)\)"); - -// arthas_plugin-0.1.1: r"Vec<(.*)>" -consistent!(arthas_plugin_1, r"Vec<(.*)>"); - -// arthas_plugin-0.1.1: r"Option<(.*)>" -consistent!(arthas_plugin_2, r"Option<(.*)>"); - -// arthas_plugin-0.1.1: r"HashMap<[a-z0-9A-Z]+, *(.*)>" -consistent!(arthas_plugin_3, r"HashMap<[a-z0-9A-Z]+, *(.*)>"); - -// arthas_derive-0.1.0: "Vec *< *(.*) *>" -consistent!(arthas_derive_0, "Vec *< *(.*) *>"); - -// arthas_derive-0.1.0: r"Option *< *(.*) *>" -consistent!(arthas_derive_1, r"Option *< *(.*) *>"); - -// arthas_derive-0.1.0: r"HashMap *< *[a-z0-9A-Z]+ *, *(.*) *>" -consistent!(arthas_derive_2, r"HashMap *< *[a-z0-9A-Z]+ *, *(.*) *>"); - -// arpabet-0.2.0: r"^([\w\-\(\)\.']+)\s+([^\s].*)\s*$" -consistent!(arpabet_0, r"^([\w\-\(\)\.']+)\s+([^\s].*)\s*$"); - -// arpabet-0.2.0: r"^;;;\s+" -consistent!(arpabet_1, r"^;;;\s+"); - -// glossy_codegen-0.2.0: r"/\*.*?\*/|//.*" -consistent!(glossy_codegen_0, r"/\*.*?\*/|//.*"); - -// glossy_codegen-0.2.0: "^\\s*#\\s*include\\s+<([:print:]+)>\\s*$" -consistent!(glossy_codegen_1, "^\\s*#\\s*include\\s+<([:print:]+)>\\s*$"); - -// glossy_codegen-0.2.0: "^\\s*#\\s*include\\s+\"([:print:]+)\"\\s*$" -consistent!(glossy_codegen_2, "^\\s*#\\s*include\\s+\"([:print:]+)\"\\s*$"); - -// glossy_codegen-0.2.0: r"^\s*#\s*version\s+(\d+)" -consistent!(glossy_codegen_3, r"^\s*#\s*version\s+(\d+)"); - -// glossy_codegen-0.2.0: r"^\s*$" -consistent!(glossy_codegen_4, r"^\s*$"); - -// gluster-1.0.1: r"(?P<addr>via \S+)" -consistent!(gluster_0, r"(?P<addr>via \S+)"); - -// gluster-1.0.1: r"(?P<src>src \S+)" -consistent!(gluster_1, r"(?P<src>src \S+)"); - -// gl_helpers-0.1.7: r"(.*)\[\d+\]" -consistent!(gl_helpers_0, r"(.*)\[\d+\]"); - -// gl_helpers-0.1.7: r"(\d+).(\d+)" -consistent!(gl_helpers_1, r"(\d+).(\d+)"); - -// glr-parser-0.0.1: r"(?P<c>[\\\.\+\*\?\(\)\|\[\]\{\}\^\$])" -consistent!(glr_parser_0, r"(?P<c>[\\\.\+\*\?\(\)\|\[\]\{\}\^\$])"); - -// glr-parser-0.0.1: r"^\w+$" -consistent!(glr_parser_1, r"^\w+$"); - -// glr-parser-0.0.1: "'[^']+'" -consistent!(glr_parser_2, "'[^']+'"); - -// hoodlum-0.5.0: r"(?m)//.*" -consistent!(hoodlum_0, r"(?m)//.*"); - -// form-checker-0.2.2: r"^1\d{10}$" -consistent!(form_checker_0, r"^1\d{10}$"); - -// form-checker-0.2.2: r"(?i)^[\w.%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,4}$" -consistent!(form_checker_1, r"(?i)^[\w.%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,4}$"); - -// wikibase-0.2.0: r"(?P<user_agent>[a-zA-Z0-9-_]+/[0-9\.]+)" -consistent!(wikibase_0, r"(?P<user_agent>[a-zA-Z0-9-_]+/[0-9\.]+)"); - -// wifiscanner-0.3.6: r"Cell [0-9]{2,} - Address:" -consistent!(wifiscanner_0, r"Cell [0-9]{2,} - Address:"); - -// wifiscanner-0.3.6: r"([0-9a-zA-Z]{1}[0-9a-zA-Z]{1}[:]{1}){5}[0-9a-zA-Z]{1}[0-9a-zA-Z]{1}" -consistent!( - wifiscanner_1, - r"([0-9a-zA-Z]{1}[0-9a-zA-Z]{1}[:]{1}){5}[0-9a-zA-Z]{1}[0-9a-zA-Z]{1}" -); - -// wifiscanner-0.3.6: r"Signal level=(\d+)/100" -consistent!(wifiscanner_2, r"Signal level=(\d+)/100"); - -// bbcode-1.0.2: r"(?s)\[b\](.*?)\[/b\]" -consistent!(bbcode_0, r"(?s)\[b\](.*?)\[/b\]"); - -// bbcode-1.0.2: r"(?s)\[i\](.*?)\[/i\]" -consistent!(bbcode_1, r"(?s)\[i\](.*?)\[/i\]"); - -// bbcode-1.0.2: r"(?s)\[u\](.*?)\[/u\]" -consistent!(bbcode_2, r"(?s)\[u\](.*?)\[/u\]"); - -// bbcode-1.0.2: r"(?s)\[s\](.*?)\[/s\]" -consistent!(bbcode_3, r"(?s)\[s\](.*?)\[/s\]"); - -// bbcode-1.0.2: r"(?s)\[size=(\d+)](.*?)\[/size\]" -consistent!(bbcode_4, r"(?s)\[size=(\d+)](.*?)\[/size\]"); - -// bbcode-1.0.2: r"(?s)\[color=(.+)](.*?)\[/color\]" -consistent!(bbcode_5, r"(?s)\[color=(.+)](.*?)\[/color\]"); - -// bbcode-1.0.2: r"(?s)\[center\](.*?)\[/center\]" -consistent!(bbcode_6, r"(?s)\[center\](.*?)\[/center\]"); - -// bbcode-1.0.2: r"(?s)\[left\](.*?)\[/left\]" -consistent!(bbcode_7, r"(?s)\[left\](.*?)\[/left\]"); - -// bbcode-1.0.2: r"(?s)\[right\](.*?)\[/right\]" -consistent!(bbcode_8, r"(?s)\[right\](.*?)\[/right\]"); - -// bbcode-1.0.2: r"(?s)\[table\](.*?)\[/table\]" -consistent!(bbcode_9, r"(?s)\[table\](.*?)\[/table\]"); - -// bbcode-1.0.2: r"(?s)\[td\](.*?)\[/td\]" -consistent!(bbcode_10, r"(?s)\[td\](.*?)\[/td\]"); - -// bbcode-1.0.2: r"(?s)\[tr\](.*?)\[/tr\]" -consistent!(bbcode_11, r"(?s)\[tr\](.*?)\[/tr\]"); - -// bbcode-1.0.2: r"(?s)\[th\](.*?)\[/th\]" -consistent!(bbcode_12, r"(?s)\[th\](.*?)\[/th\]"); - -// bbcode-1.0.2: r"(?s)\[url\](.*?)\[/url\]" -consistent!(bbcode_13, r"(?s)\[url\](.*?)\[/url\]"); - -// bbcode-1.0.2: r"(?s)\[url=(.+)\](.*?)\[/url\]" -consistent!(bbcode_14, r"(?s)\[url=(.+)\](.*?)\[/url\]"); - -// bbcode-1.0.2: r"(?s)\[quote\](.*?)\[/quote\]" -consistent!(bbcode_15, r"(?s)\[quote\](.*?)\[/quote\]"); - -// bbcode-1.0.2: r"(?s)\[quote=(.+)\](.*?)\[/quote\]" -consistent!(bbcode_16, r"(?s)\[quote=(.+)\](.*?)\[/quote\]"); - -// bbcode-1.0.2: r"(?s)\[img=(\d+)x(\d+)(\b.*)?\](.*?)\[/img\]" -consistent!(bbcode_17, r"(?s)\[img=(\d+)x(\d+)(\b.*)?\](.*?)\[/img\]"); - -// bbcode-1.0.2: r"(?s)\[img=(.+)(\b.*)?\](.*?)\[/img\]" -consistent!(bbcode_18, r"(?s)\[img=(.+)(\b.*)?\](.*?)\[/img\]"); - -// bbcode-1.0.2: r"(?s)\[img(\b.*)?\](.*?)\[/img\]" -consistent!(bbcode_19, r"(?s)\[img(\b.*)?\](.*?)\[/img\]"); - -// bbcode-1.0.2: r"(?s)\[ol\](.*?)\[/ol\]" -consistent!(bbcode_20, r"(?s)\[ol\](.*?)\[/ol\]"); - -// bbcode-1.0.2: r"(?s)\[ul\](.*?)\[/ul\]" -consistent!(bbcode_21, r"(?s)\[ul\](.*?)\[/ul\]"); - -// bbcode-1.0.2: r"(?s)\[list\](.*?)\[/list\]" -consistent!(bbcode_22, r"(?s)\[list\](.*?)\[/list\]"); - -// bbcode-1.0.2: r"(?s)\[youtube\](.*?)\[/youtube\]" -consistent!(bbcode_23, r"(?s)\[youtube\](.*?)\[/youtube\]"); - -// bbcode-1.0.2: r"(?s)\[youtube=(\d+)x(\d+)\](.*?)\[/youtube\]" -consistent!(bbcode_24, r"(?s)\[youtube=(\d+)x(\d+)\](.*?)\[/youtube\]"); - -// bbcode-1.0.2: r"(?s)\[li\](.*?)\[/li\]" -consistent!(bbcode_25, r"(?s)\[li\](.*?)\[/li\]"); - -// block-utils-0.5.0: r"loop\d+" -consistent!(block_utils_0, r"loop\d+"); - -// block-utils-0.5.0: r"ram\d+" -consistent!(block_utils_1, r"ram\d+"); - -// block-utils-0.5.0: r"md\d+" -consistent!(block_utils_2, r"md\d+"); - -// kvvliveapi-0.1.0: r"^([1-9]) min$" -consistent!(kvvliveapi_0, r"^([1-9]) min$"); - -// rfc822_sanitizer-0.3.3: r"(\d{2}):(\d{2}):(\d{2})" -consistent!(rfc822_sanitizer_0, r"(\d{2}):(\d{2}):(\d{2})"); - -// rfc822_sanitizer-0.3.3: r"(\d{1,2}):(\d{1,2}):(\d{1,2})" -consistent!(rfc822_sanitizer_1, r"(\d{1,2}):(\d{1,2}):(\d{1,2})"); - -// faker-0.0.4: r"[2-9]" -consistent!(faker_0, r"[2-9]"); - -// faker-0.0.4: r"[1-9]" -consistent!(faker_1, r"[1-9]"); - -// faker-0.0.4: r"[0-9]" -consistent!(faker_2, r"[0-9]"); - -// faker-0.0.4: r"\d{10}" -consistent!(faker_3, r"\d{10}"); - -// faker-0.0.4: r"\d{1}" -consistent!(faker_4, r"\d{1}"); - -// faker-0.0.4: r"^\w+" -consistent!(faker_5, r"^\w+"); - -// faker-0.0.4: r"^\w+" -consistent!(faker_6, r"^\w+"); - -// faker-0.0.4: r"^(\w+\.? ?){2,3}$" -consistent!(faker_7, r"^(\w+\.? ?){2,3}$"); - -// faker-0.0.4: r"^[A-Z][a-z]+\.?$" -consistent!(faker_8, r"^[A-Z][a-z]+\.?$"); - -// faker-0.0.4: r"^[A-Z][A-Za-z]*\.?$" -consistent!(faker_9, r"^[A-Z][A-Za-z]*\.?$"); - -// faker-0.0.4: r"http://lorempixel.com/100/100/\w+" -consistent!(faker_10, r"http://lorempixel.com/100/100/\w+"); - -// faker-0.0.4: r"http://lorempixel.com/100/100/cats" -consistent!(faker_11, r"http://lorempixel.com/100/100/cats"); - -// fancy-regex-0.1.0: "(?i:ß)" -consistent!(fancy_regex_0, "(?i:ß)"); - -// fancy-regex-0.1.0: "(?i:\\x{0587})" -consistent!(fancy_regex_1, "(?i:\\x{0587})"); - -// fancy-regex-0.1.0: "^\\\\([!-/:-@\\[-`\\{-~aftnrv]|[0-7]{1,3}|x[0-9a-fA-F]{2}|x\\{[0-9a-fA-F]{1,6}\\})" -consistent!(fancy_regex_2, "^\\\\([!-/:-@\\[-`\\{-~aftnrv]|[0-7]{1,3}|x[0-9a-fA-F]{2}|x\\{[0-9a-fA-F]{1,6}\\})"); - -// fancy-prompt-0.1.5: r"/([^/])[^/]+/" -consistent!(fancy_prompt_0, r"/([^/])[^/]+/"); - -// fancy-prompt-0.1.5: r"^([^:]+):.*?(?::([^:]+))?$" -consistent!(fancy_prompt_1, r"^([^:]+):.*?(?::([^:]+))?$"); - -// fanta-0.2.0: r"^(/?__\w+__)/(.*)" -consistent!(fanta_0, r"^(/?__\w+__)/(.*)"); - -// fanta-cli-0.1.1: r"(.)([A-Z])" -consistent!(fanta_cli_0, r"(.)([A-Z])"); - -// fanta-cli-0.1.1: "\\{:[^\\s]+\\}" -consistent!(fanta_cli_1, "\\{:[^\\s]+\\}"); - -// amethyst_tools-0.7.1: "(?P<last>[^\r])\n" -consistent!(amethyst_tools_0, "(?P<last>[^\r])\n"); - -// amigo-0.3.1: r"^-?\d+(\.\d)?" -consistent!(amigo_0, r"^-?\d+(\.\d)?"); - -// amigo-0.3.1: r"^[a-zA-Z_]+[\w-]*[!?_]?" -consistent!(amigo_1, r"^[a-zA-Z_]+[\w-]*[!?_]?"); - -// amigo-0.3.1: r"^\(" -consistent!(amigo_2, r"^\("); - -// amigo-0.3.1: r"^\)" -consistent!(amigo_3, r"^\)"); - -// amigo-0.3.1: r"^\s+" -consistent!(amigo_4, r"^\s+"); - -// ethcore-logger-1.12.0: "\x1b\\[[^m]+m" -consistent!(ethcore_logger_0, "\x1b\\[[^m]+m"); - -// dash2html-1.0.1: r"__.*?__" -consistent!(dash2html_0, r"__.*?__"); - -// dash2html-1.0.1: r"(?i)@(?:time|clipboard|cursor|date)" -consistent!(dash2html_1, r"(?i)@(?:time|clipboard|cursor|date)"); - -// os_type-2.0.0: r"^Microsoft Windows \[Version\s(\d+\.\d+\.\d+)\]$" -consistent!(os_type_0, r"^Microsoft Windows \[Version\s(\d+\.\d+\.\d+)\]$"); - -// os_type-2.0.0: r"ProductName:\s([\w\s]+)\n" -consistent!(os_type_1, r"ProductName:\s([\w\s]+)\n"); - -// os_type-2.0.0: r"ProductVersion:\s(\w+\.\w+\.\w+)" -consistent!(os_type_2, r"ProductVersion:\s(\w+\.\w+\.\w+)"); - -// os_type-2.0.0: r"BuildVersion:\s(\w+)" -consistent!(os_type_3, r"BuildVersion:\s(\w+)"); - -// os_type-2.0.0: r"(\w+) Linux release" -consistent!(os_type_4, r"(\w+) Linux release"); - -// os_type-2.0.0: r"release\s([\w\.]+)" -consistent!(os_type_5, r"release\s([\w\.]+)"); - -// os_type-2.0.0: r"Distributor ID:\s(\w+)" -consistent!(os_type_6, r"Distributor ID:\s(\w+)"); - -// os_type-2.0.0: r"Release:\s([\w\.]+)" -consistent!(os_type_7, r"Release:\s([\w\.]+)"); - -// bindgen-0.37.0: r"typename type\-parameter\-\d+\-\d+::.+" -consistent!(bindgen_0, r"typename type\-parameter\-\d+\-\d+::.+"); - -// imap-0.8.1: "^+(.*)\r\n" -consistent!(imap_0, "^+(.*)\r\n"); - -// image-base64-0.1.0: r"^ffd8ffe0" -consistent!(image_base64_0, r"^ffd8ffe0"); - -// image-base64-0.1.0: r"^89504e47" -consistent!(image_base64_1, r"^89504e47"); - -// image-base64-0.1.0: r"^47494638" -consistent!(image_base64_2, r"^47494638"); - -// json-pointer-0.3.2: "^(/([^/~]|~[01])*)*$" -consistent!(json_pointer_0, "^(/([^/~]|~[01])*)*$"); - -// json-pointer-0.3.2: "^#(/([^/~%]|~[01]|%[0-9a-fA-F]{2})*)*$" -consistent!(json_pointer_1, "^#(/([^/~%]|~[01]|%[0-9a-fA-F]{2})*)*$"); - -// mysql_common-0.7.0: r"^5.5.5-(\d{1,2})\.(\d{1,2})\.(\d{1,3})-MariaDB" -consistent!(mysql_common_0, r"^5.5.5-(\d{1,2})\.(\d{1,2})\.(\d{1,3})-MariaDB"); - -// mysql_common-0.7.0: r"^(\d{1,2})\.(\d{1,2})\.(\d{1,3})(.*)" -consistent!(mysql_common_1, r"^(\d{1,2})\.(\d{1,2})\.(\d{1,3})(.*)"); - -// government_id-0.1.0: r"^[0-9]{4}[0-9A-Z]{2}[0-9]{3}$" -consistent!(government_id_0, r"^[0-9]{4}[0-9A-Z]{2}[0-9]{3}$"); - -// ohmers-0.1.1: r"UniqueIndexViolation: (\w+)" -consistent!(ohmers_0, r"UniqueIndexViolation: (\w+)"); - -// eliza-1.0.0: r"(.*) you are (.*)" -consistent!(eliza_0, r"(.*) you are (.*)"); - -// eliza-1.0.0: r"(.*) you are (.*)" -consistent!(eliza_1, r"(.*) you are (.*)"); - -// eliza-1.0.0: r"(.*) you are (.*)" -consistent!(eliza_2, r"(.*) you are (.*)"); - -// chema-0.0.5: "^\\s*\\*" -consistent!(chema_0, "^\\s*\\*"); - -// chema-0.0.5: "^\\s*@(\\w+)\\s+(.*)" -consistent!(chema_1, "^\\s*@(\\w+)\\s+(.*)"); - -// chord3-0.3.0: r"^\s*#" -consistent!(chord3_0, r"^\s*#"); - -// chord3-0.3.0: r"\{(?P<cmd>\w+)(?::?\s*(?P<arg>.*))?\}" -consistent!(chord3_1, r"\{(?P<cmd>\w+)(?::?\s*(?P<arg>.*))?\}"); - -// chord3-0.3.0: r"\{(eot|end_of_tab):?\s*" -consistent!(chord3_2, r"\{(eot|end_of_tab):?\s*"); - -// chord3-0.3.0: r"([^\[]*)(?:\[([^\]]*)\])?" -consistent!(chord3_3, r"([^\[]*)(?:\[([^\]]*)\])?"); - -// checkmail-0.1.1: "^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$" -consistent!(checkmail_0, "^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$"); - -// cntk-0.2.1: r"\b\w\w+\b" -consistent!(cntk_0, r"\b\w\w+\b"); - -// cntk-0.2.1: r"\b\w\w+\b" -consistent!(cntk_1, r"\b\w\w+\b"); - -// cniguru-0.1.0: r"\(id: (\d+)\)" -consistent!(cniguru_0, r"\(id: (\d+)\)"); - -// upm_lib-0.3.0: r"^(\d+)\.(\d+)\.(\d+)(?:-([\dA-Za-z-]+(?:\.[\dA-Za-z-]+)*))?(?:\+([\dA-Za-z-]+(?:\.[\dA-Za-z-]+)*))?$" -consistent!(upm_lib_0, r"^(\d+)\.(\d+)\.(\d+)(?:-([\dA-Za-z-]+(?:\.[\dA-Za-z-]+)*))?(?:\+([\dA-Za-z-]+(?:\.[\dA-Za-z-]+)*))?$"); - -// avro-0.2.1: r"^\s*(\*+(\s+))?" -consistent!(avro_0, r"^\s*(\*+(\s+))?"); - -// avro-0.2.1: r"^\s*(\*+)?" -consistent!(avro_1, r"^\s*(\*+)?"); - -// nomi-0.0.2: "[0-9]+" -consistent!(nomi_0, "[0-9]+"); - -// nodes-0.1.0: "([0-9]+)@(?:nodes|n)?:([^@]+)?" -consistent!(nodes_0, "([0-9]+)@(?:nodes|n)?:([^@]+)?"); - -// not-stakkr-1.0.0: r"(?i)in (\d+) (second|minute|hour|day|week)s?" -consistent!(not_stakkr_0, r"(?i)in (\d+) (second|minute|hour|day|week)s?"); - -// notetxt-0.0.1: "^([A-Za-z0-9 -_:]+)\n-+\n" -consistent!(notetxt_0, "^([A-Za-z0-9 -_:]+)\n-+\n"); - -// nail-0.1.0-pre.0: r"^-?[0-9]+(\.[0-9]+)?([eE]-?[0-9]+)?$" -consistent!(nail_0, r"^-?[0-9]+(\.[0-9]+)?([eE]-?[0-9]+)?$"); - -// nail-0.1.0-pre.0: r"^-?[0-9]+$" -consistent!(nail_1, r"^-?[0-9]+$"); - -// askalono-0.2.0: r"[^\w\s\pP]+" -consistent!(askalono_0, r"[^\w\s\pP]+"); - -// askalono-0.2.0: r"(?x)[ \t\p{Zs} \\ / \| \x2044 ]+" -consistent!(askalono_1, r"(?x)[ \t\p{Zs} \\ / \| \x2044 ]+"); - -// askalono-0.2.0: r"\p{Pd}+" -consistent!(askalono_2, r"\p{Pd}+"); - -// askalono-0.2.0: r"\p{Ps}+" -consistent!(askalono_3, r"\p{Ps}+"); - -// askalono-0.2.0: r"\p{Pe}+" -consistent!(askalono_4, r"\p{Pe}+"); - -// askalono-0.2.0: r"\p{Pc}+" -consistent!(askalono_5, r"\p{Pc}+"); - -// askalono-0.2.0: r"[©Ⓒⓒ]" -consistent!(askalono_6, r"[©Ⓒⓒ]"); - -// askalono-0.2.0: r"[\r\n\v\f]" -consistent!(askalono_7, r"[\r\n\v\f]"); - -// askalono-0.2.0: r"\n{3,}" -consistent!(askalono_8, r"\n{3,}"); - -// askalono-0.2.0: r"[^\w\s]+" -consistent!(askalono_9, r"[^\w\s]+"); - -// askalono-0.2.0: r"\s+" -consistent!(askalono_10, r"\s+"); - -// assembunny_plus-0.0.3: r"[^0-9a-zA-Z_]" -consistent!(assembunny_plus_0, r"[^0-9a-zA-Z_]"); - -// assembunny_plus-0.0.3: r"[0-9]" -consistent!(assembunny_plus_1, r"[0-9]"); - -// salt-compressor-0.4.0: r"(?m)^Minion (\S*) did not respond\. No job will be sent\.$" -consistent!( - salt_compressor_0, - r"(?m)^Minion (\S*) did not respond\. No job will be sent\.$" -); - -// sabisabi-0.4.1: r"</?[^>]+?>" -consistent!(sabisabi_0, r"</?[^>]+?>"); - -// sabisabi-0.4.1: r"\([^)]*\)" -consistent!(sabisabi_1, r"\([^)]*\)"); - -// sassers-0.13.5-h28: "@import \"([^\"]*)\";" -consistent!(sassers_0, "@import \"([^\"]*)\";"); - -// shadowsocks-0.6.2: r"[A-Za-z\d-]{1,63}$" -consistent!(shadowsocks_0, r"[A-Za-z\d-]{1,63}$"); - -// shkeleton-0.1.5: "[abc]+" -consistent!(shkeleton_0, "[abc]+"); - -// shellwords-0.1.0: r"([^A-Za-z0-9_\-.,:/@\n])" -consistent!(shellwords_0, r"([^A-Za-z0-9_\-.,:/@\n])"); - -// shellwords-0.1.0: r"\n" -consistent!(shellwords_1, r"\n"); - -// shush-0.1.5: "(?P<num>[0-9]+)(?P<units>[dhms])" -consistent!(shush_0, "(?P<num>[0-9]+)(?P<units>[dhms])"); - -// woothee-0.8.0: r"(?:Chrome|CrMo|CriOS)/([.0-9]+)" -consistent!(woothee_0, r"(?:Chrome|CrMo|CriOS)/([.0-9]+)"); - -// woothee-0.8.0: r"Vivaldi/([.0-9]+)" -consistent!(woothee_1, r"Vivaldi/([.0-9]+)"); - -// woothee-0.8.0: r"Firefox/([.0-9]+)" -consistent!(woothee_2, r"Firefox/([.0-9]+)"); - -// woothee-0.8.0: r"^Mozilla/[.0-9]+ \((?:Mobile|Tablet);(?:.*;)? rv:([.0-9]+)\) Gecko/[.0-9]+ Firefox/[.0-9]+$" -consistent!(woothee_3, r"^Mozilla/[.0-9]+ \((?:Mobile|Tablet);(?:.*;)? rv:([.0-9]+)\) Gecko/[.0-9]+ Firefox/[.0-9]+$"); - -// woothee-0.8.0: r"FxiOS/([.0-9]+)" -consistent!(woothee_4, r"FxiOS/([.0-9]+)"); - -// woothee-0.8.0: r"\(([^;)]+);FOMA;" -consistent!(woothee_5, r"\(([^;)]+);FOMA;"); - -// woothee-0.8.0: r"jig browser[^;]+; ([^);]+)" -consistent!(woothee_6, r"jig browser[^;]+; ([^);]+)"); - -// woothee-0.8.0: r"(?i)rss(?:reader|bar|[-_ /;()]|[ +]*/)" -consistent!(woothee_7, r"(?i)rss(?:reader|bar|[-_ /;()]|[ +]*/)"); - -// woothee-0.8.0: r"(?i)(?:bot|crawler|spider)(?:[-_ ./;@()]|$)" -consistent!(woothee_8, r"(?i)(?:bot|crawler|spider)(?:[-_ ./;@()]|$)"); - -// woothee-0.8.0: r"(?i)(?:feed|web) ?parser" -consistent!(woothee_9, r"(?i)(?:feed|web) ?parser"); - -// woothee-0.8.0: r"(?i)watch ?dog" -consistent!(woothee_10, r"(?i)watch ?dog"); - -// woothee-0.8.0: r"Edge/([.0-9]+)" -consistent!(woothee_11, r"Edge/([.0-9]+)"); - -// woothee-0.8.0: r"MSIE ([.0-9]+);" -consistent!(woothee_12, r"MSIE ([.0-9]+);"); - -// woothee-0.8.0: r"Version/([.0-9]+)" -consistent!(woothee_13, r"Version/([.0-9]+)"); - -// woothee-0.8.0: r"Opera[/ ]([.0-9]+)" -consistent!(woothee_14, r"Opera[/ ]([.0-9]+)"); - -// woothee-0.8.0: r"OPR/([.0-9]+)" -consistent!(woothee_15, r"OPR/([.0-9]+)"); - -// woothee-0.8.0: r"Version/([.0-9]+)" -consistent!(woothee_16, r"Version/([.0-9]+)"); - -// woothee-0.8.0: r"(?:SoftBank|Vodafone|J-PHONE)/[.0-9]+/([^ /;()]+)" -consistent!(woothee_17, r"(?:SoftBank|Vodafone|J-PHONE)/[.0-9]+/([^ /;()]+)"); - -// woothee-0.8.0: r"Trident/([.0-9]+);" -consistent!(woothee_18, r"Trident/([.0-9]+);"); - -// woothee-0.8.0: r" rv:([.0-9]+)" -consistent!(woothee_19, r" rv:([.0-9]+)"); - -// woothee-0.8.0: r"IEMobile/([.0-9]+);" -consistent!(woothee_20, r"IEMobile/([.0-9]+);"); - -// woothee-0.8.0: r"(?:WILLCOM|DDIPOCKET);[^/]+/([^ /;()]+)" -consistent!(woothee_21, r"(?:WILLCOM|DDIPOCKET);[^/]+/([^ /;()]+)"); - -// woothee-0.8.0: r"Windows ([ .a-zA-Z0-9]+)[;\\)]" -consistent!(woothee_22, r"Windows ([ .a-zA-Z0-9]+)[;\\)]"); - -// woothee-0.8.0: r"^Phone(?: OS)? ([.0-9]+)" -consistent!(woothee_23, r"^Phone(?: OS)? ([.0-9]+)"); - -// woothee-0.8.0: r"iP(hone;|ad;|od) .*like Mac OS X" -consistent!(woothee_24, r"iP(hone;|ad;|od) .*like Mac OS X"); - -// woothee-0.8.0: r"Version/([.0-9]+)" -consistent!(woothee_25, r"Version/([.0-9]+)"); - -// woothee-0.8.0: r"rv:(\d+\.\d+\.\d+)" -consistent!(woothee_26, r"rv:(\d+\.\d+\.\d+)"); - -// woothee-0.8.0: r"FreeBSD ([^;\)]+);" -consistent!(woothee_27, r"FreeBSD ([^;\)]+);"); - -// woothee-0.8.0: r"CrOS ([^\)]+)\)" -consistent!(woothee_28, r"CrOS ([^\)]+)\)"); - -// woothee-0.8.0: r"Android[- ](\d+\.\d+(?:\.\d+)?)" -consistent!(woothee_29, r"Android[- ](\d+\.\d+(?:\.\d+)?)"); - -// woothee-0.8.0: r"PSP \(PlayStation Portable\); ([.0-9]+)\)" -consistent!(woothee_30, r"PSP \(PlayStation Portable\); ([.0-9]+)\)"); - -// woothee-0.8.0: r"PLAYSTATION 3;? ([.0-9]+)\)" -consistent!(woothee_31, r"PLAYSTATION 3;? ([.0-9]+)\)"); - -// woothee-0.8.0: r"PlayStation Vita ([.0-9]+)\)" -consistent!(woothee_32, r"PlayStation Vita ([.0-9]+)\)"); - -// woothee-0.8.0: r"PlayStation 4 ([.0-9]+)\)" -consistent!(woothee_33, r"PlayStation 4 ([.0-9]+)\)"); - -// woothee-0.8.0: r"BB10(?:.+)Version/([.0-9]+) " -consistent!(woothee_34, r"BB10(?:.+)Version/([.0-9]+) "); - -// woothee-0.8.0: r"BlackBerry(?:\d+)/([.0-9]+) " -consistent!(woothee_35, r"BlackBerry(?:\d+)/([.0-9]+) "); - -// woothee-0.8.0: r"; CPU(?: iPhone)? OS (\d+_\d+(?:_\d+)?) like Mac OS X" -consistent!( - woothee_36, - r"; CPU(?: iPhone)? OS (\d+_\d+(?:_\d+)?) like Mac OS X" -); - -// woothee-0.8.0: r"Mac OS X (10[._]\d+(?:[._]\d+)?)(?:\)|;)" -consistent!(woothee_37, r"Mac OS X (10[._]\d+(?:[._]\d+)?)(?:\)|;)"); - -// woothee-0.8.0: r"^(?:Apache-HttpClient/|Jakarta Commons-HttpClient/|Java/)" -consistent!( - woothee_38, - r"^(?:Apache-HttpClient/|Jakarta Commons-HttpClient/|Java/)" -); - -// woothee-0.8.0: r"[- ]HttpClient(/|$)" -consistent!(woothee_39, r"[- ]HttpClient(/|$)"); - -// woothee-0.8.0: r"^(?:PHP|WordPress|CakePHP|PukiWiki|PECL::HTTP)(?:/| |$)" -consistent!( - woothee_40, - r"^(?:PHP|WordPress|CakePHP|PukiWiki|PECL::HTTP)(?:/| |$)" -); - -// woothee-0.8.0: r"(?:PEAR HTTP_Request|HTTP_Request)(?: class|2)" -consistent!(woothee_41, r"(?:PEAR HTTP_Request|HTTP_Request)(?: class|2)"); - -// woothee-0.8.0: r"(?:Rome Client |UnwindFetchor/|ia_archiver |Summify |PostRank/)" -consistent!( - woothee_42, - r"(?:Rome Client |UnwindFetchor/|ia_archiver |Summify |PostRank/)" -); - -// woothee-0.8.0: r"Sleipnir/([.0-9]+)" -consistent!(woothee_43, r"Sleipnir/([.0-9]+)"); - -// word_replace-0.0.3: r"@@[a-z|A-Z|\d]+@@" -consistent!(word_replace_0, r"@@[a-z|A-Z|\d]+@@"); - -// wordcount-0.1.0: r"\w+" -consistent!(wordcount_0, r"\w+"); - -// just-0.3.12: "^([^=]+)=(.*)$" -consistent!(just_0, "^([^=]+)=(.*)$"); - -// emote-0.1.0: r":[a-zA-Z_]+?:" -consistent!(emote_0, r":[a-zA-Z_]+?:"); - -// emojicons-1.0.1: r":([a-zA-Z0-9_+-]+):" -consistent!(emojicons_0, r":([a-zA-Z0-9_+-]+):"); - -// git2_codecommit-0.1.2: r"git-codecommit\.([a-z0-9-]+)\.amazonaws\.com" -consistent!( - git2_codecommit_0, - r"git-codecommit\.([a-z0-9-]+)\.amazonaws\.com" -); - -// git-workarea-3.1.2: r"^submodule\.(?P<name>.*)\.(?P<key>[^=]*)=(?P<value>.*)$" -consistent!( - git_workarea_0, - r"^submodule\.(?P<name>.*)\.(?P<key>[^=]*)=(?P<value>.*)$" -); - -// git-shell-enforce-directory-1.0.0: r"^(?P<command>git-(?:receive|upload)-pack) '(?P<path>.+)'$" -consistent!( - git_shell_enforce_directory_0, - r"^(?P<command>git-(?:receive|upload)-pack) '(?P<path>.+)'$" -); - -// git-journal-1.6.3: r"[ \n]:(.*?):" -consistent!(git_journal_0, r"[ \n]:(.*?):"); - -// git-find-0.3.2: r"^git@(?P<host>[[:alnum:]\._-]+):(?P<path>[[:alnum:]\._\-/]+).git$" -consistent!( - git_find_0, - r"^git@(?P<host>[[:alnum:]\._-]+):(?P<path>[[:alnum:]\._\-/]+).git$" -); - -// gitlab-api-0.6.0: r"private_token=\w{20}" -consistent!(gitlab_api_0, r"private_token=\w{20}"); - -// td-client-0.7.0: "^(http://|https://)" -consistent!(td_client_0, "^(http://|https://)"); - -// karaconv-0.3.0: r"--(?P<type>[a-zA-Z]+)-- (?P<contents>.*)" -consistent!(karaconv_0, r"--(?P<type>[a-zA-Z]+)-- (?P<contents>.*)"); - -// katana-1.0.2: r"(?P<comp>et al\.)(?:\.)" -consistent!(katana_0, r"(?P<comp>et al\.)(?:\.)"); - -// katana-1.0.2: r"\.{3}" -consistent!(katana_1, r"\.{3}"); - -// katana-1.0.2: r"(?P<number>[0-9]+)\.(?P<decimal>[0-9]+)" -consistent!(katana_2, r"(?P<number>[0-9]+)\.(?P<decimal>[0-9]+)"); - -// katana-1.0.2: r"\s\.(?P<nums>[0-9]+)" -consistent!(katana_3, r"\s\.(?P<nums>[0-9]+)"); - -// katana-1.0.2: r"(?:[A-Za-z]\.){2,}" -consistent!(katana_4, r"(?:[A-Za-z]\.){2,}"); - -// katana-1.0.2: r"(?P<init>[A-Z])(?P<point>\.)" -consistent!(katana_5, r"(?P<init>[A-Z])(?P<point>\.)"); - -// katana-1.0.2: r"(?P<title>[A-Z][a-z]{1,3})(\.)" -consistent!(katana_6, r"(?P<title>[A-Z][a-z]{1,3})(\.)"); - -// katana-1.0.2: r"&==&(?P<p>[.!?])" -consistent!(katana_7, r"&==&(?P<p>[.!?])"); - -// katana-1.0.2: r"&\^&(?P<p>[.!?])" -consistent!(katana_8, r"&\^&(?P<p>[.!?])"); - -// katana-1.0.2: r"&\*\*&(?P<p>[.!?])" -consistent!(katana_9, r"&\*\*&(?P<p>[.!?])"); - -// katana-1.0.2: r"&=&(?P<p>[.!?])" -consistent!(katana_10, r"&=&(?P<p>[.!?])"); - -// katana-1.0.2: r"&##&(?P<p>[.!?])" -consistent!(katana_11, r"&##&(?P<p>[.!?])"); - -// katana-1.0.2: r"&\$&(?P<p>[.!?])" -consistent!(katana_12, r"&\$&(?P<p>[.!?])"); - -// kailua_syntax-1.1.0: r"@(?:_|\d+(?:/\d+(?:-\d+)?)?)" -consistent!(kailua_syntax_0, r"@(?:_|\d+(?:/\d+(?:-\d+)?)?)"); - -// kailua_syntax-1.1.0: r"<(\d+)>" -consistent!(kailua_syntax_1, r"<(\d+)>"); - -// ftp-3.0.1: r"\((\d+),(\d+),(\d+),(\d+),(\d+),(\d+)\)" -consistent!(ftp_0, r"\((\d+),(\d+),(\d+),(\d+),(\d+),(\d+)\)"); - -// ftp-3.0.1: r"\b(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})\b" -consistent!(ftp_1, r"\b(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})\b"); - -// ftp-3.0.1: r"\s+(\d+)\s*$" -consistent!(ftp_2, r"\s+(\d+)\s*$"); - -// vat-0.1.0: r"<countryCode>(.*?)</countryCode>" -consistent!(vat_0, r"<countryCode>(.*?)</countryCode>"); - -// vat-0.1.0: r"<vatNumber>(.*?)</vatNumber>" -consistent!(vat_1, r"<vatNumber>(.*?)</vatNumber>"); - -// vat-0.1.0: r"<name>(.*?)</name>" -consistent!(vat_2, r"<name>(.*?)</name>"); - -// vat-0.1.0: r"<address>(?s)(.*?)(?-s)</address>" -consistent!(vat_3, r"<address>(?s)(.*?)(?-s)</address>"); - -// vat-0.1.0: r"<valid>(true|false)</valid>" -consistent!(vat_4, r"<valid>(true|false)</valid>"); - -// vat-0.1.0: r"^ATU\d{8}$" -consistent!(vat_5, r"^ATU\d{8}$"); - -// vat-0.1.0: r"^BE0?\d{9, 10}$" -consistent!(vat_6, r"^BE0?\d{9, 10}$"); - -// vat-0.1.0: r"^BG\d{9,10}$" -consistent!(vat_7, r"^BG\d{9,10}$"); - -// vat-0.1.0: r"^HR\d{11}$" -consistent!(vat_8, r"^HR\d{11}$"); - -// vat-0.1.0: r"^CY\d{8}[A-Z]$" -consistent!(vat_9, r"^CY\d{8}[A-Z]$"); - -// vat-0.1.0: r"^CZ\d{8,10}$" -consistent!(vat_10, r"^CZ\d{8,10}$"); - -// vat-0.1.0: r"^DK\d{8}$" -consistent!(vat_11, r"^DK\d{8}$"); - -// vat-0.1.0: r"^EE\d{9}$" -consistent!(vat_12, r"^EE\d{9}$"); - -// vat-0.1.0: r"^FI\d{8}$" -consistent!(vat_13, r"^FI\d{8}$"); - -// vat-0.1.0: r"^FR[A-HJ-NP-Z0-9][A-HJ-NP-Z0-9]\d{9}$" -consistent!(vat_14, r"^FR[A-HJ-NP-Z0-9][A-HJ-NP-Z0-9]\d{9}$"); - -// vat-0.1.0: r"^DE\d{9}$" -consistent!(vat_15, r"^DE\d{9}$"); - -// vat-0.1.0: r"^EL\d{9}$" -consistent!(vat_16, r"^EL\d{9}$"); - -// vat-0.1.0: r"^HU\d{8}$" -consistent!(vat_17, r"^HU\d{8}$"); - -// vat-0.1.0: r"^IE\d[A-Z0-9\+\*]\d{5}[A-Z]{1,2}$" -consistent!(vat_18, r"^IE\d[A-Z0-9\+\*]\d{5}[A-Z]{1,2}$"); - -// vat-0.1.0: r"^IT\d{11}$" -consistent!(vat_19, r"^IT\d{11}$"); - -// vat-0.1.0: r"^LV\d{11}$" -consistent!(vat_20, r"^LV\d{11}$"); - -// vat-0.1.0: r"^LT(\d{9}|\d{12})$" -consistent!(vat_21, r"^LT(\d{9}|\d{12})$"); - -// vat-0.1.0: r"^LU\d{8}$" -consistent!(vat_22, r"^LU\d{8}$"); - -// vat-0.1.0: r"^MT\d{8}$" -consistent!(vat_23, r"^MT\d{8}$"); - -// vat-0.1.0: r"^NL\d{9}B\d{2}$" -consistent!(vat_24, r"^NL\d{9}B\d{2}$"); - -// vat-0.1.0: r"^PL\d{10}$" -consistent!(vat_25, r"^PL\d{10}$"); - -// vat-0.1.0: r"^PT\d{9}$" -consistent!(vat_26, r"^PT\d{9}$"); - -// vat-0.1.0: r"^RO\d{2,10}$" -consistent!(vat_27, r"^RO\d{2,10}$"); - -// vat-0.1.0: r"^SK\d{10}$" -consistent!(vat_28, r"^SK\d{10}$"); - -// vat-0.1.0: r"^SI\d{8}$" -consistent!(vat_29, r"^SI\d{8}$"); - -// vat-0.1.0: r"^ES[A-Z0-9]\d{7}[A-Z0-9]$" -consistent!(vat_30, r"^ES[A-Z0-9]\d{7}[A-Z0-9]$"); - -// vat-0.1.0: r"^SE\d{10}01$" -consistent!(vat_31, r"^SE\d{10}01$"); - -// vat-0.1.0: r"^(GB(GD|HA)\d{3}|GB\d{9}|GB\d{12})$" -consistent!(vat_32, r"^(GB(GD|HA)\d{3}|GB\d{9}|GB\d{12})$"); - -// eve-0.1.1: r"\{\{(.*)\}\}" -consistent!(eve_0, r"\{\{(.*)\}\}"); - -// egc-0.1.2: "^mio" -consistent!(egc_0, "^mio"); - -// pew-0.2.3: "" -consistent!(pew_0, ""); - -// pew-0.2.3: "" -consistent!(pew_1, ""); - -// mob-0.4.3: "y" -consistent!(mob_0, "y"); - -// lit-0.2.8: "@([a-z]+)" -consistent!(lit_0, "@([a-z]+)"); - -// lit-0.2.8: "([A-Z-]+):(.*)" -consistent!(lit_1, "([A-Z-]+):(.*)"); - -// lit-0.2.8: "^[a-zA-Z_][a-zA-Z0-9_]*$" -consistent!(lit_2, "^[a-zA-Z_][a-zA-Z0-9_]*$"); - -// avm-1.0.1: r"\d+\.\d+\.\d+" -consistent!(avm_0, r"\d+\.\d+\.\d+"); - -// avm-1.0.1: r"\d+\.\d+\.\d+" -consistent!(avm_1, r"\d+\.\d+\.\d+"); - -// orm-0.2.0: r"^Vec<(.+)>$" -consistent!(orm_0, r"^Vec<(.+)>$"); - -// sgf-0.1.5: r"\\(\r\n|\n\r|\n|\r)" -consistent!(sgf_0, r"\\(\r\n|\n\r|\n|\r)"); - -// sgf-0.1.5: r"\\(.)" -consistent!(sgf_1, r"\\(.)"); - -// sgf-0.1.5: r"\r\n|\n\r|\n|\r" -consistent!(sgf_2, r"\r\n|\n\r|\n|\r"); - -// sgf-0.1.5: r"([\]\\:])" -consistent!(sgf_3, r"([\]\\:])"); - -// dok-0.2.0: "^Bearer realm=\"(.+?)\",service=\"(.+?)\",scope=\"(.+?)\"$" -consistent!( - dok_0, - "^Bearer realm=\"(.+?)\",service=\"(.+?)\",scope=\"(.+?)\"$" -); - -// d20-0.1.0: r"([+-]?\s*\d+[dD]\d+|[+-]?\s*\d+)" -consistent!(d20_0, r"([+-]?\s*\d+[dD]\d+|[+-]?\s*\d+)"); - -// dvb-0.3.0: "E" -consistent!(dvb_0, "E"); - -// dvb-0.3.0: "^F" -consistent!(dvb_1, "^F"); - -// dvb-0.3.0: "^S" -consistent!(dvb_2, "^S"); - -// ger-0.2.0: r"Change-Id: (I[a-f0-9]{40})$" -consistent!(ger_0, r"Change-Id: (I[a-f0-9]{40})$"); - -// ger-0.2.0: r"(refs|ref|fix|fixes|close|closes)\s+([A-Z]{2,5}-[0-9]{1,5})$" -consistent!( - ger_1, - r"(refs|ref|fix|fixes|close|closes)\s+([A-Z]{2,5}-[0-9]{1,5})$" -); - -// n5-0.2.1: r"(\d+)(\.(\d+))?(\.(\d+))?(.*)" -consistent!(n5_0, r"(\d+)(\.(\d+))?(\.(\d+))?(.*)"); - -// po-0.1.4: r"[A-Za-z0-9]" -consistent!(po_0, r"[A-Za-z0-9]"); - -// carnix-0.8.5: "path is (‘|')?([^’'\n]*)(’|')?" -consistent!(carnix_0, "path is (‘|')?([^’'\n]*)(’|')?"); - -// carnix-0.8.5: r"^(\S*) (\d*)\.(\d*)\.(\d*)(-(\S*))?(.*)?" -consistent!(carnix_1, r"^(\S*) (\d*)\.(\d*)\.(\d*)(-(\S*))?(.*)?"); - -// carnix-0.8.5: r"(\d*)\.(\d*)\.(\d*)(-(\S*))?" -consistent!(carnix_2, r"(\d*)\.(\d*)\.(\d*)(-(\S*))?"); - -// carnix-0.8.5: r"(\S*)-(\d*)\.(\d*)\.(\d*)(-(\S*))?" -consistent!(carnix_3, r"(\S*)-(\d*)\.(\d*)\.(\d*)(-(\S*))?"); - -// caseless-0.2.1: r"^# CaseFolding-(\d+)\.(\d+)\.(\d+).txt$" -consistent!(caseless_0, r"^# CaseFolding-(\d+)\.(\d+)\.(\d+).txt$"); - -// caseless-0.2.1: r"^([0-9A-F]+); [CF]; ([0-9A-F ]+);" -consistent!(caseless_1, r"^([0-9A-F]+); [CF]; ([0-9A-F ]+);"); - -// cabot-0.2.0: "\r?\n\r?\n" -consistent!(cabot_0, "\r?\n\r?\n"); - -// cabot-0.2.0: "\r?\n" -consistent!(cabot_1, "\r?\n"); - -// card-validate-2.2.1: r"^600" -consistent!(card_validate_0, r"^600"); - -// card-validate-2.2.1: r"^5019" -consistent!(card_validate_1, r"^5019"); - -// card-validate-2.2.1: r"^4" -consistent!(card_validate_2, r"^4"); - -// card-validate-2.2.1: r"^(5[1-5]|2[2-7])" -consistent!(card_validate_3, r"^(5[1-5]|2[2-7])"); - -// card-validate-2.2.1: r"^3[47]" -consistent!(card_validate_4, r"^3[47]"); - -// card-validate-2.2.1: r"^3[0689]" -consistent!(card_validate_5, r"^3[0689]"); - -// card-validate-2.2.1: r"^6([045]|22)" -consistent!(card_validate_6, r"^6([045]|22)"); - -// card-validate-2.2.1: r"^(62|88)" -consistent!(card_validate_7, r"^(62|88)"); - -// card-validate-2.2.1: r"^35" -consistent!(card_validate_8, r"^35"); - -// card-validate-2.2.1: r"^[0-9]+$" -consistent!(card_validate_9, r"^[0-9]+$"); - -// cargo-testify-0.3.0: r"\d{1,} passed.*filtered out" -consistent!(cargo_testify_0, r"\d{1,} passed.*filtered out"); - -// cargo-testify-0.3.0: r"error(:|\[).*" -consistent!(cargo_testify_1, r"error(:|\[).*"); - -// cargo-wix-0.0.5: r"<(.*?)>" -consistent!(cargo_wix_0, r"<(.*?)>"); - -// cargo-wix-0.0.5: r"<(.*?)>" -consistent!(cargo_wix_1, r"<(.*?)>"); - -// cargo-wix-0.0.5: r"<(.*?)>" -consistent!(cargo_wix_2, r"<(.*?)>"); - -// cargo-wix-0.0.5: r"<(.*?)>" -consistent!(cargo_wix_3, r"<(.*?)>"); - -// cargo-incremental-0.1.23: r"(?m)^incremental: re-using (\d+) out of (\d+) modules$" -consistent!( - cargo_incremental_0, - r"(?m)^incremental: re-using (\d+) out of (\d+) modules$" -); - -// cargo-incremental-0.1.23: "(?m)(warning|error): (.*)\n --> ([^:]:\\d+:\\d+)$" -consistent!( - cargo_incremental_1, - "(?m)(warning|error): (.*)\n --> ([^:]:\\d+:\\d+)$" -); - -// cargo-incremental-0.1.23: r"(?m)^test (.*) \.\.\. (\w+)" -consistent!(cargo_incremental_2, r"(?m)^test (.*) \.\.\. (\w+)"); - -// cargo-incremental-0.1.23: r"(?m)(\d+) passed; (\d+) failed; (\d+) ignored; \d+ measured" -consistent!( - cargo_incremental_3, - r"(?m)(\d+) passed; (\d+) failed; (\d+) ignored; \d+ measured" -); - -// cargo-testjs-0.1.2: r"^[^-]+-[0-9a-f]+\.js$" -consistent!(cargo_testjs_0, r"^[^-]+-[0-9a-f]+\.js$"); - -// cargo-tarpaulin-0.6.2: r"\s*//" -consistent!(cargo_tarpaulin_0, r"\s*//"); - -// cargo-tarpaulin-0.6.2: r"/\*" -consistent!(cargo_tarpaulin_1, r"/\*"); - -// cargo-tarpaulin-0.6.2: r"\*/" -consistent!(cargo_tarpaulin_2, r"\*/"); - -// cargo-culture-kit-0.1.0: r"^fo" -consistent!(cargo_culture_kit_0, r"^fo"); - -// cargo-screeps-0.1.3: "\\s+" -consistent!(cargo_screeps_0, "\\s+"); - -// cargo-brew-0.1.4: r"`(\S+) v([0-9.]+)" -consistent!(cargo_brew_0, r"`(\S+) v([0-9.]+)"); - -// cargo-release-0.10.2: "^\\[.+\\]" -consistent!(cargo_release_0, "^\\[.+\\]"); - -// cargo-release-0.10.2: "^\\[\\[.+\\]\\]" -consistent!(cargo_release_1, "^\\[\\[.+\\]\\]"); - -// cargo-edit-0.3.0-beta.1: r"^https://github.com/([-_0-9a-zA-Z]+)/([-_0-9a-zA-Z]+)(/|.git)?$" -consistent!( - cargo_edit_0, - r"^https://github.com/([-_0-9a-zA-Z]+)/([-_0-9a-zA-Z]+)(/|.git)?$" -); - -// cargo-edit-0.3.0-beta.1: r"^https://gitlab.com/([-_0-9a-zA-Z]+)/([-_0-9a-zA-Z]+)(/|.git)?$" -consistent!( - cargo_edit_1, - r"^https://gitlab.com/([-_0-9a-zA-Z]+)/([-_0-9a-zA-Z]+)(/|.git)?$" -); - -// cargo-disassemble-0.1.1: ".*" -consistent!(cargo_disassemble_0, ".*"); - -// cargo-demangle-0.1.2: r"(?m)(?P<symbol>_ZN[0-9]+.*E)" -consistent!(cargo_demangle_0, r"(?m)(?P<symbol>_ZN[0-9]+.*E)"); - -// cargo-coverage-annotations-0.1.5: r"^\s*\}(?:\)*;?|\s*else\s*\{)$" -consistent!(cargo_coverage_annotations_0, r"^\s*\}(?:\)*;?|\s*else\s*\{)$"); - -// cargo-urlcrate-1.0.1: "[\u{001b}\u{009b}][\\[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-PRZcf-nqry=><]" -consistent!(cargo_urlcrate_0, "[\u{001b}\u{009b}][\\[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-PRZcf-nqry=><]"); - -// cargo-script-0.2.8: r"^\s*\*( |$)" -consistent!(cargo_script_0, r"^\s*\*( |$)"); - -// cargo-script-0.2.8: r"^(\s+)" -consistent!(cargo_script_1, r"^(\s+)"); - -// cargo-script-0.2.8: r"/\*|\*/" -consistent!(cargo_script_2, r"/\*|\*/"); - -// cargo-script-0.2.8: r"^\s*//!" -consistent!(cargo_script_3, r"^\s*//!"); - -// cargo-script-0.2.8: r"^#![^\[].*?(\r\n|\n)" -consistent!(cargo_script_4, r"^#![^\[].*?(\r\n|\n)"); - -// cargo-update-1.5.2: r"cargo-install-update\.exe-v.+" -consistent!(cargo_update_0, r"cargo-install-update\.exe-v.+"); - -// canteen-0.4.1: r"^<(?:(int|uint|str|float|path):)?([\w_][a-zA-Z0-9_]*)>$" -consistent!( - canteen_0, - r"^<(?:(int|uint|str|float|path):)?([\w_][a-zA-Z0-9_]*)>$" -); - -// thruster-cli-0.1.3: r"(.)([A-Z])" -consistent!(thruster_cli_0, r"(.)([A-Z])"); - -// thieves-cant-0.1.0: "([Z]+)$" -consistent!(thieves_cant_0, "([Z]+)$"); - -// codeowners-0.1.3: r"^@\S+/\S+" -consistent!(codeowners_0, r"^@\S+/\S+"); - -// codeowners-0.1.3: r"^@\S+" -consistent!(codeowners_1, r"^@\S+"); - -// codeowners-0.1.3: r"^\S+@\S+" -consistent!(codeowners_2, r"^\S+@\S+"); - -// conserve-0.4.2: r"^b0000 {21} complete 20[-0-9T:+]+\s +\d+s\n$" -consistent!(conserve_0, r"^b0000 {21} complete 20[-0-9T:+]+\s +\d+s\n$"); - -// commodore-0.3.0: r"(?P<greeting>\S+?) (?P<name>\S+?)$" -consistent!(commodore_0, r"(?P<greeting>\S+?) (?P<name>\S+?)$"); - -// corollary-0.3.0: r"([ \t]*)```haskell([\s\S]*?)```" -consistent!(corollary_0, r"([ \t]*)```haskell([\s\S]*?)```"); - -// corollary-0.3.0: r"\b((?:a|b|t)\d*)\b" -consistent!(corollary_1, r"\b((?:a|b|t)\d*)\b"); - -// colorizex-0.1.3: "NB" -consistent!(colorizex_0, "NB"); - -// colorstring-0.0.1: r"(?i)\[[a-z0-9_-]+\]" -consistent!(colorstring_0, r"(?i)\[[a-z0-9_-]+\]"); - -// colorstring-0.0.1: r"^(?i)(\[[a-z0-9_-]+\])+" -consistent!(colorstring_1, r"^(?i)(\[[a-z0-9_-]+\])+"); - -// cosmogony-0.3.0: "name:(.+)" -consistent!(cosmogony_0, "name:(.+)"); - -// cobalt-bin-0.12.1: r"(?m:^ {0,3}\[[^\]]+\]:.+$)" -consistent!(cobalt_bin_0, r"(?m:^ {0,3}\[[^\]]+\]:.+$)"); - -// comrak-0.2.12: r"[^\p{L}\p{M}\p{N}\p{Pc} -]" -consistent!(comrak_0, r"[^\p{L}\p{M}\p{N}\p{Pc} -]"); - -// content-blocker-0.2.3: "" -consistent!(content_blocker_0, ""); - -// content-blocker-0.2.3: "(?i)hi" -consistent!(content_blocker_1, "(?i)hi"); - -// content-blocker-0.2.3: "http[s]?://domain.org" -consistent!(content_blocker_2, "http[s]?://domain.org"); - -// content-blocker-0.2.3: "(?i)http[s]?://domain.org" -consistent!(content_blocker_3, "(?i)http[s]?://domain.org"); - -// content-blocker-0.2.3: "http://domain.org" -consistent!(content_blocker_4, "http://domain.org"); - -// content-blocker-0.2.3: "http://domain.org" -consistent!(content_blocker_5, "http://domain.org"); - -// content-blocker-0.2.3: "ad.html" -consistent!(content_blocker_6, "ad.html"); - -// content-blocker-0.2.3: "ad.html" -consistent!(content_blocker_7, "ad.html"); - -// content-blocker-0.2.3: "http://domain.org" -consistent!(content_blocker_8, "http://domain.org"); - -// content-blocker-0.2.3: "http://domain.org/nocookies.sjs" -consistent!(content_blocker_9, "http://domain.org/nocookies.sjs"); - -// content-blocker-0.2.3: "http://domain.org/nocookies.sjs" -consistent!(content_blocker_10, "http://domain.org/nocookies.sjs"); - -// content-blocker-0.2.3: "http://domain.org/hideme.jpg" -consistent!(content_blocker_11, "http://domain.org/hideme.jpg"); - -// content-blocker-0.2.3: "http://domain.org/ok.html" -consistent!(content_blocker_12, "http://domain.org/ok.html"); - -// content-blocker-0.2.3: "http://domain.org/ok.html\\?except_this=1" -consistent!(content_blocker_13, "http://domain.org/ok.html\\?except_this=1"); - -// victoria-dom-0.1.2: "[A-Za-z0-9=]" -consistent!(victoria_dom_0, "[A-Za-z0-9=]"); - -// numbat-1.0.0: r"^nsq://" -consistent!(numbat_0, r"^nsq://"); - -// airkorea-0.1.2: r"[\s\t\r\n]" -consistent!(airkorea_0, r"[\s\t\r\n]"); - -// airkorea-0.1.2: r"([\{\[,])|([\}\]])" -consistent!(airkorea_1, r"([\{\[,])|([\}\]])"); - -// airkorea-0.1.2: r"[^.\d]+$" -consistent!(airkorea_2, r"[^.\d]+$"); - -// rofl-0.0.1: r"\b" -// consistent!(rofl_0, r"\b"); - -// rogcat-0.2.15: r"--------- beginning of.*" -consistent!(rogcat_0, r"--------- beginning of.*"); - -// rogcat-0.2.15: r"a|e|i|o|u" -consistent!(rogcat_1, r"a|e|i|o|u"); - -// rogcat-0.2.15: r"^(\d+)([kMG])$" -consistent!(rogcat_2, r"^(\d+)([kMG])$"); - -// media_filename-0.1.4: "\\.([A-Za-z0-9]{2,4})$" -consistent!(media_filename_0, "\\.([A-Za-z0-9]{2,4})$"); - -// media_filename-0.1.4: "([0-9]{3,4}p|[0-9]{3,4}x[0-9]{3,4})" -consistent!(media_filename_1, "([0-9]{3,4}p|[0-9]{3,4}x[0-9]{3,4})"); - -// media_filename-0.1.4: "(?:^\\[([^]]+)\\]|- ?([^-]+)$)" -consistent!(media_filename_2, "(?:^\\[([^]]+)\\]|- ?([^-]+)$)"); - -// media_filename-0.1.4: "(?:[eE]([0-9]{2,3})|[^0-9A-Za-z]([0-9]{2,3})(?:v[0-9])?[^0-9A-Za-z])" -consistent!( - media_filename_3, - "(?:[eE]([0-9]{2,3})|[^0-9A-Za-z]([0-9]{2,3})(?:v[0-9])?[^0-9A-Za-z])" -); - -// media_filename-0.1.4: "[sS]([0-9]{1,2})" -consistent!(media_filename_4, "[sS]([0-9]{1,2})"); - -// media_filename-0.1.4: "((?i)(?:PPV.)?[HP]DTV|(?:HD)?CAM|BRRIP|[^a-z]TS[^a-z]|(?:PPV )?WEB.?DL(?: DVDRip)?|HDRip|DVDRip|CamRip|W[EB]BRip|BluRay|BD|DVD|DvDScr|hdtv)" -consistent!(media_filename_5, "((?i)(?:PPV.)?[HP]DTV|(?:HD)?CAM|BRRIP|[^a-z]TS[^a-z]|(?:PPV )?WEB.?DL(?: DVDRip)?|HDRip|DVDRip|CamRip|W[EB]BRip|BluRay|BD|DVD|DvDScr|hdtv)"); - -// media_filename-0.1.4: "((19[0-9]|20[01])[0-9])" -consistent!(media_filename_6, "((19[0-9]|20[01])[0-9])"); - -// media_filename-0.1.4: "((?i)xvid|x264|h\\.?264)" -consistent!(media_filename_7, "((?i)xvid|x264|h\\.?264)"); - -// media_filename-0.1.4: "((?i)MP3|DD5\\.?1|Dual[- ]Audio|LiNE|DTS|AAC(?:\\.?2\\.0)?|AC3(?:\\.5\\.1)?)" -consistent!(media_filename_8, "((?i)MP3|DD5\\.?1|Dual[- ]Audio|LiNE|DTS|AAC(?:\\.?2\\.0)?|AC3(?:\\.5\\.1)?)"); - -// media_filename-0.1.4: "\\[([0-9A-F]{8})\\]" -consistent!(media_filename_9, "\\[([0-9A-F]{8})\\]"); - -// termimage-0.3.2: r"(\d+)[xX](\d+)" -consistent!(termimage_0, r"(\d+)[xX](\d+)"); - -// teensy-0.1.0: r".*(\d{4}-\d{2}-\d{2}).*" -consistent!(teensy_0, r".*(\d{4}-\d{2}-\d{2}).*"); - -// telescreen-0.1.3: r"<@(.+)>" -consistent!(telescreen_0, r"<@(.+)>"); - -// tempus_fugit-0.4.4: r"^(\d+)" -consistent!(tempus_fugit_0, r"^(\d+)"); - -// fselect-0.4.1: "(\\?|\\.|\\*|\\[|\\]|\\(|\\)|\\^|\\$)" -consistent!(fselect_0, "(\\?|\\.|\\*|\\[|\\]|\\(|\\)|\\^|\\$)"); - -// fselect-0.4.1: "(%|_|\\?|\\.|\\*|\\[|\\]|\\(|\\)|\\^|\\$)" -consistent!(fselect_1, "(%|_|\\?|\\.|\\*|\\[|\\]|\\(|\\)|\\^|\\$)"); - -// fs_eventbridge-0.1.0: r"^([A-Z]+)(?:\s(.+))?\s*" -consistent!(fs_eventbridge_0, r"^([A-Z]+)(?:\s(.+))?\s*"); - -// joseki-0.0.1: r"(\w{1,2})\[(.+?)\]" -consistent!(joseki_0, r"(\w{1,2})\[(.+?)\]"); - -// tweetr-0.2.1: r"(?i)in (\d+) (second|minute|hour|day|week)s?" -consistent!(tweetr_0, r"(?i)in (\d+) (second|minute|hour|day|week)s?"); - -// bullet_core-0.1.1: "^(?u:[0-9])+" -consistent!(bullet_core_0, "^(?u:[0-9])+"); - -// bullet_core-0.1.1: "^(?u:[0-9])+(?u:\\.)(?u:[0-9])+" -consistent!(bullet_core_1, "^(?u:[0-9])+(?u:\\.)(?u:[0-9])+"); - -// bullet_core-0.1.1: "^(?u:[A-Za-zª-ªµ-µº-ºÀ-ÖØ-öø-ˁˆ-ˑˠ-ˤˬ-ˬˮ-ˮͰ-ʹͶ-ͷͺ-ͽͿ-ͿΆ-ΆΈ-ΊΌ-ΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-Ֆՙ-ՙա-ևא-תװ-ײؠ-يٮ-ٯٱ-ۓە-ەۥ-ۦۮ-ۯۺ-ۼۿ-ۿܐ-ܐܒ-ܯݍ-ޥޱ-ޱߊ-ߪߴ-ߵߺ-ߺࠀ-ࠕࠚ-ࠚࠤ-ࠤࠨ-ࠨࡀ-ࡘࢠ-ࢴऄ-हऽ-ऽॐ-ॐक़-ॡॱ-ঀঅ-ঌএ-ঐও-নপ-রল-লশ-হঽ-ঽৎ-ৎড়-ঢ়য়-ৡৰ-ৱਅ-ਊਏ-ਐਓ-ਨਪ-ਰਲ-ਲ਼ਵ-ਸ਼ਸ-ਹਖ਼-ੜਫ਼-ਫ਼ੲ-ੴઅ-ઍએ-ઑઓ-નપ-રલ-ળવ-હઽ-ઽૐ-ૐૠ-ૡૹ-ૹଅ-ଌଏ-ଐଓ-ନପ-ରଲ-ଳଵ-ହଽ-ଽଡ଼-ଢ଼ୟ-ୡୱ-ୱஃ-ஃஅ-ஊஎ-ஐஒ-கங-சஜ-ஜஞ-டண-தந-பம-ஹௐ-ௐఅ-ఌఎ-ఐఒ-నప-హఽ-ఽౘ-ౚౠ-ౡಅ-ಌಎ-ಐಒ-ನಪ-ಳವ-ಹಽ-ಽೞ-ೞೠ-ೡೱ-ೲഅ-ഌഎ-ഐഒ-ഺഽ-ഽൎ-ൎൟ-ൡൺ-ൿඅ-ඖක-නඳ-රල-ලව-ෆก-ะา-ำเ-ๆກ-ຂຄ-ຄງ-ຈຊ-ຊຍ-ຍດ-ທນ-ຟມ-ຣລ-ລວ-ວສ-ຫອ-ະາ-ຳຽ-ຽເ-ໄໆ-ໆໜ-ໟༀ-ༀཀ-ཇཉ-ཬྈ-ྌက-ဪဿ-ဿၐ-ၕၚ-ၝၡ-ၡၥ-ၦၮ-ၰၵ-ႁႎ-ႎႠ-ჅჇ-ჇჍ-Ⴭა-ჺჼ-ቈቊ-ቍቐ-ቖቘ-ቘቚ-ቝበ-ኈኊ-ኍነ-ኰኲ-ኵኸ-ኾዀ-ዀዂ-ዅወ-ዖዘ-ጐጒ-ጕጘ-ፚᎀ-ᎏᎠ-Ᏽᏸ-ᏽᐁ-ᙬᙯ-ᙿᚁ-ᚚᚠ-ᛪᛱ-ᛸᜀ-ᜌᜎ-ᜑᜠ-ᜱᝀ-ᝑᝠ-ᝬᝮ-ᝰក-ឳៗ-ៗៜ-ៜᠠ-ᡷᢀ-ᢨᢪ-ᢪᢰ-ᣵᤀ-ᤞᥐ-ᥭᥰ-ᥴᦀ-ᦫᦰ-ᧉᨀ-ᨖᨠ-ᩔᪧ-ᪧᬅ-ᬳᭅ-ᭋᮃ-ᮠᮮ-ᮯᮺ-ᯥᰀ-ᰣᱍ-ᱏᱚ-ᱽᳩ-ᳬᳮ-ᳱᳵ-ᳶᴀ-ᶿḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙ-ὙὛ-ὛὝ-ὝὟ-ώᾀ-ᾴᾶ-ᾼι-ιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼⁱ-ⁱⁿ-ⁿₐ-ₜℂ-ℂℇ-ℇℊ-ℓℕ-ℕℙ-ℝℤ-ℤΩ-Ωℨ-ℨK-ℭℯ-ℹℼ-ℿⅅ-ⅉⅎ-ⅎↃ-ↄⰀ-Ⱞⰰ-ⱞⱠ-ⳤⳫ-ⳮⳲ-ⳳⴀ-ⴥⴧ-ⴧⴭ-ⴭⴰ-ⵧⵯ-ⵯⶀ-ⶖⶠ-ⶦⶨ-ⶮⶰ-ⶶⶸ-ⶾⷀ-ⷆⷈ-ⷎⷐ-ⷖⷘ-ⷞⸯ-ⸯ々-〆〱-〵〻-〼ぁ-ゖゝ-ゟァ-ヺー-ヿㄅ-ㄭㄱ-ㆎㆠ-ㆺㇰ-ㇿ㐀-䶵一-鿕ꀀ-ꒌꓐ-ꓽꔀ-ꘌꘐ-ꘟꘪ-ꘫꙀ-ꙮꙿ-ꚝꚠ-ꛥꜗ-ꜟꜢ-ꞈꞋ-ꞭꞰ-ꞷꟷ-ꠁꠃ-ꠅꠇ-ꠊꠌ-ꠢꡀ-ꡳꢂ-ꢳꣲ-ꣷꣻ-ꣻꣽ-ꣽꤊ-ꤥꤰ-ꥆꥠ-ꥼꦄ-ꦲꧏ-ꧏꧠ-ꧤꧦ-ꧯꧺ-ꧾꨀ-ꨨꩀ-ꩂꩄ-ꩋꩠ-ꩶꩺ-ꩺꩾ-ꪯꪱ-ꪱꪵ-ꪶꪹ-ꪽꫀ-ꫀꫂ-ꫂꫛ-ꫝꫠ-ꫪꫲ-ꫴꬁ-ꬆꬉ-ꬎꬑ-ꬖꬠ-ꬦꬨ-ꬮꬰ-ꭚꭜ-ꭥꭰ-ꯢ가-힣ힰ-ퟆퟋ-ퟻ豈-舘並-龎ff-stﬓ-ﬗיִ-יִײַ-ﬨשׁ-זּטּ-לּמּ-מּנּ-סּףּ-פּצּ-ﮱﯓ-ﴽﵐ-ﶏﶒ-ﷇﷰ-ﷻﹰ-ﹴﹶ-ﻼA-Za-zヲ-하-ᅦᅧ-ᅬᅭ-ᅲᅳ-ᅵ𐀀-𐀋𐀍-𐀦𐀨-𐀺𐀼-𐀽𐀿-𐁍𐁐-𐁝𐂀-𐃺𐊀-𐊜𐊠-𐋐𐌀-𐌟𐌰-𐍀𐍂-𐍉𐍐-𐍵𐎀-𐎝𐎠-𐏃𐏈-𐏏𐐀-𐒝𐔀-𐔧𐔰-𐕣𐘀-𐜶𐝀-𐝕𐝠-𐝧𐠀-𐠅𐠈-𐠈𐠊-𐠵𐠷-𐠸𐠼-𐠼𐠿-𐡕𐡠-𐡶𐢀-𐢞𐣠-𐣲𐣴-𐣵𐤀-𐤕𐤠-𐤹𐦀-𐦷𐦾-𐦿𐨀-𐨀𐨐-𐨓𐨕-𐨗𐨙-𐨳𐩠-𐩼𐪀-𐪜𐫀-𐫇𐫉-𐫤𐬀-𐬵𐭀-𐭕𐭠-𐭲𐮀-𐮑𐰀-𐱈𐲀-𐲲𐳀-𐳲𑀃-𑀷𑂃-𑂯𑃐-𑃨𑄃-𑄦𑅐-𑅲𑅶-𑅶𑆃-𑆲𑇁-𑇄𑇚-𑇚𑇜-𑇜𑈀-𑈑𑈓-𑈫𑊀-𑊆𑊈-𑊈𑊊-𑊍𑊏-𑊝𑊟-𑊨𑊰-𑋞𑌅-𑌌𑌏-𑌐𑌓-𑌨𑌪-𑌰𑌲-𑌳𑌵-𑌹𑌽-𑌽𑍐-𑍐𑍝-𑍡𑒀-𑒯𑓄-𑓅𑓇-𑓇𑖀-𑖮𑗘-𑗛𑘀-𑘯𑙄-𑙄𑚀-𑚪𑜀-𑜙𑢠-𑣟𑣿-𑣿𑫀-𑫸𒀀-𒎙𒒀-𒕃𓀀-𓐮𔐀-𔙆𖠀-𖨸𖩀-𖩞𖫐-𖫭𖬀-𖬯𖭀-𖭃𖭣-𖭷𖭽-𖮏𖼀-𖽄𖽐-𖽐𖾓-𖾟𛀀-𛀁𛰀-𛱪𛱰-𛱼𛲀-𛲈𛲐-𛲙𝐀-𝑔𝑖-𝒜𝒞-𝒟𝒢-𝒢𝒥-𝒦𝒩-𝒬𝒮-𝒹𝒻-𝒻𝒽-𝓃𝓅-𝔅𝔇-𝔊𝔍-𝔔𝔖-𝔜𝔞-𝔹𝔻-𝔾𝕀-𝕄𝕆-𝕆𝕊-𝕐𝕒-𝚥𝚨-𝛀𝛂-𝛚𝛜-𝛺𝛼-𝜔𝜖-𝜴𝜶-𝝎𝝐-𝝮𝝰-𝞈𝞊-𝞨𝞪-𝟂𝟄-𝟋𞠀-𞣄𞸀-𞸃𞸅-𞸟𞸡-𞸢𞸤-𞸤𞸧-𞸧𞸩-𞸲𞸴-𞸷𞸹-𞸹𞸻-𞸻𞹂-𞹂𞹇-𞹇𞹉-𞹉𞹋-𞹋𞹍-𞹏𞹑-𞹒𞹔-𞹔𞹗-𞹗𞹙-𞹙𞹛-𞹛𞹝-𞹝𞹟-𞹟𞹡-𞹢𞹤-𞹤𞹧-𞹪𞹬-𞹲𞹴-𞹷𞹹-𞹼𞹾-𞹾𞺀-𞺉𞺋-𞺛𞺡-𞺣𞺥-𞺩𞺫-𞺻𠀀-𪛖𪜀-𫜴𫝀-𫠝𫠠-𬺡丽-𪘀])+" -consistent!(bullet_core_2, "^(?u:[A-Za-zª-ªµ-µº-ºÀ-ÖØ-öø-ˁˆ-ˑˠ-ˤˬ-ˬˮ-ˮͰ-ʹͶ-ͷͺ-ͽͿ-ͿΆ-ΆΈ-ΊΌ-ΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-Ֆՙ-ՙա-ևא-תװ-ײؠ-يٮ-ٯٱ-ۓە-ەۥ-ۦۮ-ۯۺ-ۼۿ-ۿܐ-ܐܒ-ܯݍ-ޥޱ-ޱߊ-ߪߴ-ߵߺ-ߺࠀ-ࠕࠚ-ࠚࠤ-ࠤࠨ-ࠨࡀ-ࡘࢠ-ࢴऄ-हऽ-ऽॐ-ॐक़-ॡॱ-ঀঅ-ঌএ-ঐও-নপ-রল-লশ-হঽ-ঽৎ-ৎড়-ঢ়য়-ৡৰ-ৱਅ-ਊਏ-ਐਓ-ਨਪ-ਰਲ-ਲ਼ਵ-ਸ਼ਸ-ਹਖ਼-ੜਫ਼-ਫ਼ੲ-ੴઅ-ઍએ-ઑઓ-નપ-રલ-ળવ-હઽ-ઽૐ-ૐૠ-ૡૹ-ૹଅ-ଌଏ-ଐଓ-ନପ-ରଲ-ଳଵ-ହଽ-ଽଡ଼-ଢ଼ୟ-ୡୱ-ୱஃ-ஃஅ-ஊஎ-ஐஒ-கங-சஜ-ஜஞ-டண-தந-பம-ஹௐ-ௐఅ-ఌఎ-ఐఒ-నప-హఽ-ఽౘ-ౚౠ-ౡಅ-ಌಎ-ಐಒ-ನಪ-ಳವ-ಹಽ-ಽೞ-ೞೠ-ೡೱ-ೲഅ-ഌഎ-ഐഒ-ഺഽ-ഽൎ-ൎൟ-ൡൺ-ൿඅ-ඖක-නඳ-රල-ලව-ෆก-ะา-ำเ-ๆກ-ຂຄ-ຄງ-ຈຊ-ຊຍ-ຍດ-ທນ-ຟມ-ຣລ-ລວ-ວສ-ຫອ-ະາ-ຳຽ-ຽເ-ໄໆ-ໆໜ-ໟༀ-ༀཀ-ཇཉ-ཬྈ-ྌက-ဪဿ-ဿၐ-ၕၚ-ၝၡ-ၡၥ-ၦၮ-ၰၵ-ႁႎ-ႎႠ-ჅჇ-ჇჍ-Ⴭა-ჺჼ-ቈቊ-ቍቐ-ቖቘ-ቘቚ-ቝበ-ኈኊ-ኍነ-ኰኲ-ኵኸ-ኾዀ-ዀዂ-ዅወ-ዖዘ-ጐጒ-ጕጘ-ፚᎀ-ᎏᎠ-Ᏽᏸ-ᏽᐁ-ᙬᙯ-ᙿᚁ-ᚚᚠ-ᛪᛱ-ᛸᜀ-ᜌᜎ-ᜑᜠ-ᜱᝀ-ᝑᝠ-ᝬᝮ-ᝰក-ឳៗ-ៗៜ-ៜᠠ-ᡷᢀ-ᢨᢪ-ᢪᢰ-ᣵᤀ-ᤞᥐ-ᥭᥰ-ᥴᦀ-ᦫᦰ-ᧉᨀ-ᨖᨠ-ᩔᪧ-ᪧᬅ-ᬳᭅ-ᭋᮃ-ᮠᮮ-ᮯᮺ-ᯥᰀ-ᰣᱍ-ᱏᱚ-ᱽᳩ-ᳬᳮ-ᳱᳵ-ᳶᴀ-ᶿḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙ-ὙὛ-ὛὝ-ὝὟ-ώᾀ-ᾴᾶ-ᾼι-ιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼⁱ-ⁱⁿ-ⁿₐ-ₜℂ-ℂℇ-ℇℊ-ℓℕ-ℕℙ-ℝℤ-ℤΩ-Ωℨ-ℨK-ℭℯ-ℹℼ-ℿⅅ-ⅉⅎ-ⅎↃ-ↄⰀ-Ⱞⰰ-ⱞⱠ-ⳤⳫ-ⳮⳲ-ⳳⴀ-ⴥⴧ-ⴧⴭ-ⴭⴰ-ⵧⵯ-ⵯⶀ-ⶖⶠ-ⶦⶨ-ⶮⶰ-ⶶⶸ-ⶾⷀ-ⷆⷈ-ⷎⷐ-ⷖⷘ-ⷞⸯ-ⸯ々-〆〱-〵〻-〼ぁ-ゖゝ-ゟァ-ヺー-ヿㄅ-ㄭㄱ-ㆎㆠ-ㆺㇰ-ㇿ㐀-䶵一-鿕ꀀ-ꒌꓐ-ꓽꔀ-ꘌꘐ-ꘟꘪ-ꘫꙀ-ꙮꙿ-ꚝꚠ-ꛥꜗ-ꜟꜢ-ꞈꞋ-ꞭꞰ-ꞷꟷ-ꠁꠃ-ꠅꠇ-ꠊꠌ-ꠢꡀ-ꡳꢂ-ꢳꣲ-ꣷꣻ-ꣻꣽ-ꣽꤊ-ꤥꤰ-ꥆꥠ-ꥼꦄ-ꦲꧏ-ꧏꧠ-ꧤꧦ-ꧯꧺ-ꧾꨀ-ꨨꩀ-ꩂꩄ-ꩋꩠ-ꩶꩺ-ꩺꩾ-ꪯꪱ-ꪱꪵ-ꪶꪹ-ꪽꫀ-ꫀꫂ-ꫂꫛ-ꫝꫠ-ꫪꫲ-ꫴꬁ-ꬆꬉ-ꬎꬑ-ꬖꬠ-ꬦꬨ-ꬮꬰ-ꭚꭜ-ꭥꭰ-ꯢ가-힣ힰ-ퟆퟋ-ퟻ豈-舘並-龎ff-stﬓ-ﬗיִ-יִײַ-ﬨשׁ-זּטּ-לּמּ-מּנּ-סּףּ-פּצּ-ﮱﯓ-ﴽﵐ-ﶏﶒ-ﷇﷰ-ﷻﹰ-ﹴﹶ-ﻼA-Za-zヲ-하-ᅦᅧ-ᅬᅭ-ᅲᅳ-ᅵ𐀀-𐀋𐀍-𐀦𐀨-𐀺𐀼-𐀽𐀿-𐁍𐁐-𐁝𐂀-𐃺𐊀-𐊜𐊠-𐋐𐌀-𐌟𐌰-𐍀𐍂-𐍉𐍐-𐍵𐎀-𐎝𐎠-𐏃𐏈-𐏏𐐀-𐒝𐔀-𐔧𐔰-𐕣𐘀-𐜶𐝀-𐝕𐝠-𐝧𐠀-𐠅𐠈-𐠈𐠊-𐠵𐠷-𐠸𐠼-𐠼𐠿-𐡕𐡠-𐡶𐢀-𐢞𐣠-𐣲𐣴-𐣵𐤀-𐤕𐤠-𐤹𐦀-𐦷𐦾-𐦿𐨀-𐨀𐨐-𐨓𐨕-𐨗𐨙-𐨳𐩠-𐩼𐪀-𐪜𐫀-𐫇𐫉-𐫤𐬀-𐬵𐭀-𐭕𐭠-𐭲𐮀-𐮑𐰀-𐱈𐲀-𐲲𐳀-𐳲𑀃-𑀷𑂃-𑂯𑃐-𑃨𑄃-𑄦𑅐-𑅲𑅶-𑅶𑆃-𑆲𑇁-𑇄𑇚-𑇚𑇜-𑇜𑈀-𑈑𑈓-𑈫𑊀-𑊆𑊈-𑊈𑊊-𑊍𑊏-𑊝𑊟-𑊨𑊰-𑋞𑌅-𑌌𑌏-𑌐𑌓-𑌨𑌪-𑌰𑌲-𑌳𑌵-𑌹𑌽-𑌽𑍐-𑍐𑍝-𑍡𑒀-𑒯𑓄-𑓅𑓇-𑓇𑖀-𑖮𑗘-𑗛𑘀-𑘯𑙄-𑙄𑚀-𑚪𑜀-𑜙𑢠-𑣟𑣿-𑣿𑫀-𑫸𒀀-𒎙𒒀-𒕃𓀀-𓐮𔐀-𔙆𖠀-𖨸𖩀-𖩞𖫐-𖫭𖬀-𖬯𖭀-𖭃𖭣-𖭷𖭽-𖮏𖼀-𖽄𖽐-𖽐𖾓-𖾟𛀀-𛀁𛰀-𛱪𛱰-𛱼𛲀-𛲈𛲐-𛲙𝐀-𝑔𝑖-𝒜𝒞-𝒟𝒢-𝒢𝒥-𝒦𝒩-𝒬𝒮-𝒹𝒻-𝒻𝒽-𝓃𝓅-𝔅𝔇-𝔊𝔍-𝔔𝔖-𝔜𝔞-𝔹𝔻-𝔾𝕀-𝕄𝕆-𝕆𝕊-𝕐𝕒-𝚥𝚨-𝛀𝛂-𝛚𝛜-𝛺𝛼-𝜔𝜖-𝜴𝜶-𝝎𝝐-𝝮𝝰-𝞈𝞊-𝞨𝞪-𝟂𝟄-𝟋𞠀-𞣄𞸀-𞸃𞸅-𞸟𞸡-𞸢𞸤-𞸤𞸧-𞸧𞸩-𞸲𞸴-𞸷𞸹-𞸹𞸻-𞸻𞹂-𞹂𞹇-𞹇𞹉-𞹉𞹋-𞹋𞹍-𞹏𞹑-𞹒𞹔-𞹔𞹗-𞹗𞹙-𞹙𞹛-𞹛𞹝-𞹝𞹟-𞹟𞹡-𞹢𞹤-𞹤𞹧-𞹪𞹬-𞹲𞹴-𞹷𞹹-𞹼𞹾-𞹾𞺀-𞺉𞺋-𞺛𞺡-𞺣𞺥-𞺩𞺫-𞺻𠀀-𪛖𪜀-𫜴𫝀-𫠝𫠠-𬺡丽-𪘀])+"); - -// bullet_core-0.1.1: "^(?u:d/d)((?u:[A-Za-zª-ªµ-µº-ºÀ-ÖØ-öø-ˁˆ-ˑˠ-ˤˬ-ˬˮ-ˮͰ-ʹͶ-ͷͺ-ͽͿ-ͿΆ-ΆΈ-ΊΌ-ΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-Ֆՙ-ՙա-ևא-תװ-ײؠ-يٮ-ٯٱ-ۓە-ەۥ-ۦۮ-ۯۺ-ۼۿ-ۿܐ-ܐܒ-ܯݍ-ޥޱ-ޱߊ-ߪߴ-ߵߺ-ߺࠀ-ࠕࠚ-ࠚࠤ-ࠤࠨ-ࠨࡀ-ࡘࢠ-ࢴऄ-हऽ-ऽॐ-ॐक़-ॡॱ-ঀঅ-ঌএ-ঐও-নপ-রল-লশ-হঽ-ঽৎ-ৎড়-ঢ়য়-ৡৰ-ৱਅ-ਊਏ-ਐਓ-ਨਪ-ਰਲ-ਲ਼ਵ-ਸ਼ਸ-ਹਖ਼-ੜਫ਼-ਫ਼ੲ-ੴઅ-ઍએ-ઑઓ-નપ-રલ-ળવ-હઽ-ઽૐ-ૐૠ-ૡૹ-ૹଅ-ଌଏ-ଐଓ-ନପ-ରଲ-ଳଵ-ହଽ-ଽଡ଼-ଢ଼ୟ-ୡୱ-ୱஃ-ஃஅ-ஊஎ-ஐஒ-கங-சஜ-ஜஞ-டண-தந-பம-ஹௐ-ௐఅ-ఌఎ-ఐఒ-నప-హఽ-ఽౘ-ౚౠ-ౡಅ-ಌಎ-ಐಒ-ನಪ-ಳವ-ಹಽ-ಽೞ-ೞೠ-ೡೱ-ೲഅ-ഌഎ-ഐഒ-ഺഽ-ഽൎ-ൎൟ-ൡൺ-ൿඅ-ඖක-නඳ-රල-ලව-ෆก-ะา-ำเ-ๆກ-ຂຄ-ຄງ-ຈຊ-ຊຍ-ຍດ-ທນ-ຟມ-ຣລ-ລວ-ວສ-ຫອ-ະາ-ຳຽ-ຽເ-ໄໆ-ໆໜ-ໟༀ-ༀཀ-ཇཉ-ཬྈ-ྌက-ဪဿ-ဿၐ-ၕၚ-ၝၡ-ၡၥ-ၦၮ-ၰၵ-ႁႎ-ႎႠ-ჅჇ-ჇჍ-Ⴭა-ჺჼ-ቈቊ-ቍቐ-ቖቘ-ቘቚ-ቝበ-ኈኊ-ኍነ-ኰኲ-ኵኸ-ኾዀ-ዀዂ-ዅወ-ዖዘ-ጐጒ-ጕጘ-ፚᎀ-ᎏᎠ-Ᏽᏸ-ᏽᐁ-ᙬᙯ-ᙿᚁ-ᚚᚠ-ᛪᛱ-ᛸᜀ-ᜌᜎ-ᜑᜠ-ᜱᝀ-ᝑᝠ-ᝬᝮ-ᝰក-ឳៗ-ៗៜ-ៜᠠ-ᡷᢀ-ᢨᢪ-ᢪᢰ-ᣵᤀ-ᤞᥐ-ᥭᥰ-ᥴᦀ-ᦫᦰ-ᧉᨀ-ᨖᨠ-ᩔᪧ-ᪧᬅ-ᬳᭅ-ᭋᮃ-ᮠᮮ-ᮯᮺ-ᯥᰀ-ᰣᱍ-ᱏᱚ-ᱽᳩ-ᳬᳮ-ᳱᳵ-ᳶᴀ-ᶿḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙ-ὙὛ-ὛὝ-ὝὟ-ώᾀ-ᾴᾶ-ᾼι-ιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼⁱ-ⁱⁿ-ⁿₐ-ₜℂ-ℂℇ-ℇℊ-ℓℕ-ℕℙ-ℝℤ-ℤΩ-Ωℨ-ℨK-ℭℯ-ℹℼ-ℿⅅ-ⅉⅎ-ⅎↃ-ↄⰀ-Ⱞⰰ-ⱞⱠ-ⳤⳫ-ⳮⳲ-ⳳⴀ-ⴥⴧ-ⴧⴭ-ⴭⴰ-ⵧⵯ-ⵯⶀ-ⶖⶠ-ⶦⶨ-ⶮⶰ-ⶶⶸ-ⶾⷀ-ⷆⷈ-ⷎⷐ-ⷖⷘ-ⷞⸯ-ⸯ々-〆〱-〵〻-〼ぁ-ゖゝ-ゟァ-ヺー-ヿㄅ-ㄭㄱ-ㆎㆠ-ㆺㇰ-ㇿ㐀-䶵一-鿕ꀀ-ꒌꓐ-ꓽꔀ-ꘌꘐ-ꘟꘪ-ꘫꙀ-ꙮꙿ-ꚝꚠ-ꛥꜗ-ꜟꜢ-ꞈꞋ-ꞭꞰ-ꞷꟷ-ꠁꠃ-ꠅꠇ-ꠊꠌ-ꠢꡀ-ꡳꢂ-ꢳꣲ-ꣷꣻ-ꣻꣽ-ꣽꤊ-ꤥꤰ-ꥆꥠ-ꥼꦄ-ꦲꧏ-ꧏꧠ-ꧤꧦ-ꧯꧺ-ꧾꨀ-ꨨꩀ-ꩂꩄ-ꩋꩠ-ꩶꩺ-ꩺꩾ-ꪯꪱ-ꪱꪵ-ꪶꪹ-ꪽꫀ-ꫀꫂ-ꫂꫛ-ꫝꫠ-ꫪꫲ-ꫴꬁ-ꬆꬉ-ꬎꬑ-ꬖꬠ-ꬦꬨ-ꬮꬰ-ꭚꭜ-ꭥꭰ-ꯢ가-힣ힰ-ퟆퟋ-ퟻ豈-舘並-龎ff-stﬓ-ﬗיִ-יִײַ-ﬨשׁ-זּטּ-לּמּ-מּנּ-סּףּ-פּצּ-ﮱﯓ-ﴽﵐ-ﶏﶒ-ﷇﷰ-ﷻﹰ-ﹴﹶ-ﻼA-Za-zヲ-하-ᅦᅧ-ᅬᅭ-ᅲᅳ-ᅵ𐀀-𐀋𐀍-𐀦𐀨-𐀺𐀼-𐀽𐀿-𐁍𐁐-𐁝𐂀-𐃺𐊀-𐊜𐊠-𐋐𐌀-𐌟𐌰-𐍀𐍂-𐍉𐍐-𐍵𐎀-𐎝𐎠-𐏃𐏈-𐏏𐐀-𐒝𐔀-𐔧𐔰-𐕣𐘀-𐜶𐝀-𐝕𐝠-𐝧𐠀-𐠅𐠈-𐠈𐠊-𐠵𐠷-𐠸𐠼-𐠼𐠿-𐡕𐡠-𐡶𐢀-𐢞𐣠-𐣲𐣴-𐣵𐤀-𐤕𐤠-𐤹𐦀-𐦷𐦾-𐦿𐨀-𐨀𐨐-𐨓𐨕-𐨗𐨙-𐨳𐩠-𐩼𐪀-𐪜𐫀-𐫇𐫉-𐫤𐬀-𐬵𐭀-𐭕𐭠-𐭲𐮀-𐮑𐰀-𐱈𐲀-𐲲𐳀-𐳲𑀃-𑀷𑂃-𑂯𑃐-𑃨𑄃-𑄦𑅐-𑅲𑅶-𑅶𑆃-𑆲𑇁-𑇄𑇚-𑇚𑇜-𑇜𑈀-𑈑𑈓-𑈫𑊀-𑊆𑊈-𑊈𑊊-𑊍𑊏-𑊝𑊟-𑊨𑊰-𑋞𑌅-𑌌𑌏-𑌐𑌓-𑌨𑌪-𑌰𑌲-𑌳𑌵-𑌹𑌽-𑌽𑍐-𑍐𑍝-𑍡𑒀-𑒯𑓄-𑓅𑓇-𑓇𑖀-𑖮𑗘-𑗛𑘀-𑘯𑙄-𑙄𑚀-𑚪𑜀-𑜙𑢠-𑣟𑣿-𑣿𑫀-𑫸𒀀-𒎙𒒀-𒕃𓀀-𓐮𔐀-𔙆𖠀-𖨸𖩀-𖩞𖫐-𖫭𖬀-𖬯𖭀-𖭃𖭣-𖭷𖭽-𖮏𖼀-𖽄𖽐-𖽐𖾓-𖾟𛀀-𛀁𛰀-𛱪𛱰-𛱼𛲀-𛲈𛲐-𛲙𝐀-𝑔𝑖-𝒜𝒞-𝒟𝒢-𝒢𝒥-𝒦𝒩-𝒬𝒮-𝒹𝒻-𝒻𝒽-𝓃𝓅-𝔅𝔇-𝔊𝔍-𝔔𝔖-𝔜𝔞-𝔹𝔻-𝔾𝕀-𝕄𝕆-𝕆𝕊-𝕐𝕒-𝚥𝚨-𝛀𝛂-𝛚𝛜-𝛺𝛼-𝜔𝜖-𝜴𝜶-𝝎𝝐-𝝮𝝰-𝞈𝞊-𝞨𝞪-𝟂𝟄-𝟋𞠀-𞣄𞸀-𞸃𞸅-𞸟𞸡-𞸢𞸤-𞸤𞸧-𞸧𞸩-𞸲𞸴-𞸷𞸹-𞸹𞸻-𞸻𞹂-𞹂𞹇-𞹇𞹉-𞹉𞹋-𞹋𞹍-𞹏𞹑-𞹒𞹔-𞹔𞹗-𞹗𞹙-𞹙𞹛-𞹛𞹝-𞹝𞹟-𞹟𞹡-𞹢𞹤-𞹤𞹧-𞹪𞹬-𞹲𞹴-𞹷𞹹-𞹼𞹾-𞹾𞺀-𞺉𞺋-𞺛𞺡-𞺣𞺥-𞺩𞺫-𞺻𠀀-𪛖𪜀-𫜴𫝀-𫠝𫠠-𬺡丽-𪘀])+)" -consistent!(bullet_core_3, "^(?u:d/d)((?u:[A-Za-zª-ªµ-µº-ºÀ-ÖØ-öø-ˁˆ-ˑˠ-ˤˬ-ˬˮ-ˮͰ-ʹͶ-ͷͺ-ͽͿ-ͿΆ-ΆΈ-ΊΌ-ΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-Ֆՙ-ՙա-ևא-תװ-ײؠ-يٮ-ٯٱ-ۓە-ەۥ-ۦۮ-ۯۺ-ۼۿ-ۿܐ-ܐܒ-ܯݍ-ޥޱ-ޱߊ-ߪߴ-ߵߺ-ߺࠀ-ࠕࠚ-ࠚࠤ-ࠤࠨ-ࠨࡀ-ࡘࢠ-ࢴऄ-हऽ-ऽॐ-ॐक़-ॡॱ-ঀঅ-ঌএ-ঐও-নপ-রল-লশ-হঽ-ঽৎ-ৎড়-ঢ়য়-ৡৰ-ৱਅ-ਊਏ-ਐਓ-ਨਪ-ਰਲ-ਲ਼ਵ-ਸ਼ਸ-ਹਖ਼-ੜਫ਼-ਫ਼ੲ-ੴઅ-ઍએ-ઑઓ-નપ-રલ-ળવ-હઽ-ઽૐ-ૐૠ-ૡૹ-ૹଅ-ଌଏ-ଐଓ-ନପ-ରଲ-ଳଵ-ହଽ-ଽଡ଼-ଢ଼ୟ-ୡୱ-ୱஃ-ஃஅ-ஊஎ-ஐஒ-கங-சஜ-ஜஞ-டண-தந-பம-ஹௐ-ௐఅ-ఌఎ-ఐఒ-నప-హఽ-ఽౘ-ౚౠ-ౡಅ-ಌಎ-ಐಒ-ನಪ-ಳವ-ಹಽ-ಽೞ-ೞೠ-ೡೱ-ೲഅ-ഌഎ-ഐഒ-ഺഽ-ഽൎ-ൎൟ-ൡൺ-ൿඅ-ඖක-නඳ-රල-ලව-ෆก-ะา-ำเ-ๆກ-ຂຄ-ຄງ-ຈຊ-ຊຍ-ຍດ-ທນ-ຟມ-ຣລ-ລວ-ວສ-ຫອ-ະາ-ຳຽ-ຽເ-ໄໆ-ໆໜ-ໟༀ-ༀཀ-ཇཉ-ཬྈ-ྌက-ဪဿ-ဿၐ-ၕၚ-ၝၡ-ၡၥ-ၦၮ-ၰၵ-ႁႎ-ႎႠ-ჅჇ-ჇჍ-Ⴭა-ჺჼ-ቈቊ-ቍቐ-ቖቘ-ቘቚ-ቝበ-ኈኊ-ኍነ-ኰኲ-ኵኸ-ኾዀ-ዀዂ-ዅወ-ዖዘ-ጐጒ-ጕጘ-ፚᎀ-ᎏᎠ-Ᏽᏸ-ᏽᐁ-ᙬᙯ-ᙿᚁ-ᚚᚠ-ᛪᛱ-ᛸᜀ-ᜌᜎ-ᜑᜠ-ᜱᝀ-ᝑᝠ-ᝬᝮ-ᝰក-ឳៗ-ៗៜ-ៜᠠ-ᡷᢀ-ᢨᢪ-ᢪᢰ-ᣵᤀ-ᤞᥐ-ᥭᥰ-ᥴᦀ-ᦫᦰ-ᧉᨀ-ᨖᨠ-ᩔᪧ-ᪧᬅ-ᬳᭅ-ᭋᮃ-ᮠᮮ-ᮯᮺ-ᯥᰀ-ᰣᱍ-ᱏᱚ-ᱽᳩ-ᳬᳮ-ᳱᳵ-ᳶᴀ-ᶿḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙ-ὙὛ-ὛὝ-ὝὟ-ώᾀ-ᾴᾶ-ᾼι-ιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼⁱ-ⁱⁿ-ⁿₐ-ₜℂ-ℂℇ-ℇℊ-ℓℕ-ℕℙ-ℝℤ-ℤΩ-Ωℨ-ℨK-ℭℯ-ℹℼ-ℿⅅ-ⅉⅎ-ⅎↃ-ↄⰀ-Ⱞⰰ-ⱞⱠ-ⳤⳫ-ⳮⳲ-ⳳⴀ-ⴥⴧ-ⴧⴭ-ⴭⴰ-ⵧⵯ-ⵯⶀ-ⶖⶠ-ⶦⶨ-ⶮⶰ-ⶶⶸ-ⶾⷀ-ⷆⷈ-ⷎⷐ-ⷖⷘ-ⷞⸯ-ⸯ々-〆〱-〵〻-〼ぁ-ゖゝ-ゟァ-ヺー-ヿㄅ-ㄭㄱ-ㆎㆠ-ㆺㇰ-ㇿ㐀-䶵一-鿕ꀀ-ꒌꓐ-ꓽꔀ-ꘌꘐ-ꘟꘪ-ꘫꙀ-ꙮꙿ-ꚝꚠ-ꛥꜗ-ꜟꜢ-ꞈꞋ-ꞭꞰ-ꞷꟷ-ꠁꠃ-ꠅꠇ-ꠊꠌ-ꠢꡀ-ꡳꢂ-ꢳꣲ-ꣷꣻ-ꣻꣽ-ꣽꤊ-ꤥꤰ-ꥆꥠ-ꥼꦄ-ꦲꧏ-ꧏꧠ-ꧤꧦ-ꧯꧺ-ꧾꨀ-ꨨꩀ-ꩂꩄ-ꩋꩠ-ꩶꩺ-ꩺꩾ-ꪯꪱ-ꪱꪵ-ꪶꪹ-ꪽꫀ-ꫀꫂ-ꫂꫛ-ꫝꫠ-ꫪꫲ-ꫴꬁ-ꬆꬉ-ꬎꬑ-ꬖꬠ-ꬦꬨ-ꬮꬰ-ꭚꭜ-ꭥꭰ-ꯢ가-힣ힰ-ퟆퟋ-ퟻ豈-舘並-龎ff-stﬓ-ﬗיִ-יִײַ-ﬨשׁ-זּטּ-לּמּ-מּנּ-סּףּ-פּצּ-ﮱﯓ-ﴽﵐ-ﶏﶒ-ﷇﷰ-ﷻﹰ-ﹴﹶ-ﻼA-Za-zヲ-하-ᅦᅧ-ᅬᅭ-ᅲᅳ-ᅵ𐀀-𐀋𐀍-𐀦𐀨-𐀺𐀼-𐀽𐀿-𐁍𐁐-𐁝𐂀-𐃺𐊀-𐊜𐊠-𐋐𐌀-𐌟𐌰-𐍀𐍂-𐍉𐍐-𐍵𐎀-𐎝𐎠-𐏃𐏈-𐏏𐐀-𐒝𐔀-𐔧𐔰-𐕣𐘀-𐜶𐝀-𐝕𐝠-𐝧𐠀-𐠅𐠈-𐠈𐠊-𐠵𐠷-𐠸𐠼-𐠼𐠿-𐡕𐡠-𐡶𐢀-𐢞𐣠-𐣲𐣴-𐣵𐤀-𐤕𐤠-𐤹𐦀-𐦷𐦾-𐦿𐨀-𐨀𐨐-𐨓𐨕-𐨗𐨙-𐨳𐩠-𐩼𐪀-𐪜𐫀-𐫇𐫉-𐫤𐬀-𐬵𐭀-𐭕𐭠-𐭲𐮀-𐮑𐰀-𐱈𐲀-𐲲𐳀-𐳲𑀃-𑀷𑂃-𑂯𑃐-𑃨𑄃-𑄦𑅐-𑅲𑅶-𑅶𑆃-𑆲𑇁-𑇄𑇚-𑇚𑇜-𑇜𑈀-𑈑𑈓-𑈫𑊀-𑊆𑊈-𑊈𑊊-𑊍𑊏-𑊝𑊟-𑊨𑊰-𑋞𑌅-𑌌𑌏-𑌐𑌓-𑌨𑌪-𑌰𑌲-𑌳𑌵-𑌹𑌽-𑌽𑍐-𑍐𑍝-𑍡𑒀-𑒯𑓄-𑓅𑓇-𑓇𑖀-𑖮𑗘-𑗛𑘀-𑘯𑙄-𑙄𑚀-𑚪𑜀-𑜙𑢠-𑣟𑣿-𑣿𑫀-𑫸𒀀-𒎙𒒀-𒕃𓀀-𓐮𔐀-𔙆𖠀-𖨸𖩀-𖩞𖫐-𖫭𖬀-𖬯𖭀-𖭃𖭣-𖭷𖭽-𖮏𖼀-𖽄𖽐-𖽐𖾓-𖾟𛀀-𛀁𛰀-𛱪𛱰-𛱼𛲀-𛲈𛲐-𛲙𝐀-𝑔𝑖-𝒜𝒞-𝒟𝒢-𝒢𝒥-𝒦𝒩-𝒬𝒮-𝒹𝒻-𝒻𝒽-𝓃𝓅-𝔅𝔇-𝔊𝔍-𝔔𝔖-𝔜𝔞-𝔹𝔻-𝔾𝕀-𝕄𝕆-𝕆𝕊-𝕐𝕒-𝚥𝚨-𝛀𝛂-𝛚𝛜-𝛺𝛼-𝜔𝜖-𝜴𝜶-𝝎𝝐-𝝮𝝰-𝞈𝞊-𝞨𝞪-𝟂𝟄-𝟋𞠀-𞣄𞸀-𞸃𞸅-𞸟𞸡-𞸢𞸤-𞸤𞸧-𞸧𞸩-𞸲𞸴-𞸷𞸹-𞸹𞸻-𞸻𞹂-𞹂𞹇-𞹇𞹉-𞹉𞹋-𞹋𞹍-𞹏𞹑-𞹒𞹔-𞹔𞹗-𞹗𞹙-𞹙𞹛-𞹛𞹝-𞹝𞹟-𞹟𞹡-𞹢𞹤-𞹤𞹧-𞹪𞹬-𞹲𞹴-𞹷𞹹-𞹼𞹾-𞹾𞺀-𞺉𞺋-𞺛𞺡-𞺣𞺥-𞺩𞺫-𞺻𠀀-𪛖𪜀-𫜴𫝀-𫠝𫠠-𬺡丽-𪘀])+)"); - -// bullet_core-0.1.1: "^(?u:\\()" -consistent!(bullet_core_4, "^(?u:\\()"); - -// bullet_core-0.1.1: "^(?u:\\))" -consistent!(bullet_core_5, "^(?u:\\))"); - -// bullet_core-0.1.1: "^(?u:\\*)" -consistent!(bullet_core_6, "^(?u:\\*)"); - -// bullet_core-0.1.1: "^(?u:\\+)" -consistent!(bullet_core_7, "^(?u:\\+)"); - -// bullet_core-0.1.1: "^(?u:,)" -consistent!(bullet_core_8, "^(?u:,)"); - -// bullet_core-0.1.1: "^(?u:\\-)" -consistent!(bullet_core_9, "^(?u:\\-)"); - -// bullet_core-0.1.1: "^(?u:/)" -consistent!(bullet_core_10, "^(?u:/)"); - -// bullet_core-0.1.1: "^(?u:\\[)" -consistent!(bullet_core_11, "^(?u:\\[)"); - -// bullet_core-0.1.1: "^(?u:\\])" -consistent!(bullet_core_12, "^(?u:\\])"); - -// bullet_core-0.1.1: "^(?u:\\^)" -consistent!(bullet_core_13, "^(?u:\\^)"); - -// bullet_core-0.1.1: "^(?u:·)" -consistent!(bullet_core_14, "^(?u:·)"); - -// actix-web-0.6.13: "//+" -consistent!(actix_web_0, "//+"); - -// actix-web-0.6.13: "//+" -consistent!(actix_web_1, "//+"); - -// althea_kernel_interface-0.1.0: r"(\S*) .* (\S*) (REACHABLE|STALE|DELAY)" -consistent!( - althea_kernel_interface_0, - r"(\S*) .* (\S*) (REACHABLE|STALE|DELAY)" -); - -// althea_kernel_interface-0.1.0: r"-s (.*) --ip6-dst (.*)/.* bcnt = (.*)" -consistent!( - althea_kernel_interface_1, - r"-s (.*) --ip6-dst (.*)/.* bcnt = (.*)" -); - -// alcibiades-0.3.0: r"\buci(?:\s|$)" -consistent!(alcibiades_0, r"\buci(?:\s|$)"); - -// ruma-identifiers-0.11.0: r"\A[a-z0-9._=-]+\z" -consistent!(ruma_identifiers_0, r"\A[a-z0-9._=-]+\z"); - -// rusqbin-0.2.3: r"/rusqbins/((?i)[A-F0-9]{8}\-[A-F0-9]{4}\-4[A-F0-9]{3}\-[89AB][A-F0-9]{3}\-[A-F0-9]{12})$" -consistent!(rusqbin_0, r"/rusqbins/((?i)[A-F0-9]{8}\-[A-F0-9]{4}\-4[A-F0-9]{3}\-[89AB][A-F0-9]{3}\-[A-F0-9]{12})$"); - -// rusqbin-0.2.3: r"/rusqbins/((?i)[A-F0-9]{8}\-[A-F0-9]{4}\-4[A-F0-9]{3}\-[89AB][A-F0-9]{3}\-[A-F0-9]{12})/requests/?$" -consistent!(rusqbin_1, r"/rusqbins/((?i)[A-F0-9]{8}\-[A-F0-9]{4}\-4[A-F0-9]{3}\-[89AB][A-F0-9]{3}\-[A-F0-9]{12})/requests/?$"); - -// rust-install-0.0.4: r"^(nightly|beta|stable)(?:-(\d{4}-\d{2}-\d{2}))?$" -consistent!( - rust_install_0, - r"^(nightly|beta|stable)(?:-(\d{4}-\d{2}-\d{2}))?$" -); - -// rust_inbox-0.0.5: "^+(.*)\r\n" -consistent!(rust_inbox_0, "^+(.*)\r\n"); - -// rust_inbox-0.0.5: r"^\* CAPABILITY (.*)\r\n" -consistent!(rust_inbox_1, r"^\* CAPABILITY (.*)\r\n"); - -// rust_inbox-0.0.5: r"^([a-zA-Z0-9]+) (OK|NO|BAD)(.*)" -consistent!(rust_inbox_2, r"^([a-zA-Z0-9]+) (OK|NO|BAD)(.*)"); - -// rust_inbox-0.0.5: r"^\* (\d+) EXISTS\r\n" -consistent!(rust_inbox_3, r"^\* (\d+) EXISTS\r\n"); - -// rust_inbox-0.0.5: r"^\* (\d+) RECENT\r\n" -consistent!(rust_inbox_4, r"^\* (\d+) RECENT\r\n"); - -// rust_inbox-0.0.5: r"^\* FLAGS (.+)\r\n" -consistent!(rust_inbox_5, r"^\* FLAGS (.+)\r\n"); - -// rust_inbox-0.0.5: r"^\* OK \[UNSEEN (\d+)\](.*)\r\n" -consistent!(rust_inbox_6, r"^\* OK \[UNSEEN (\d+)\](.*)\r\n"); - -// rust_inbox-0.0.5: r"^\* OK \[UIDVALIDITY (\d+)\](.*)\r\n" -consistent!(rust_inbox_7, r"^\* OK \[UIDVALIDITY (\d+)\](.*)\r\n"); - -// rust_inbox-0.0.5: r"^\* OK \[UIDNEXT (\d+)\](.*)\r\n" -consistent!(rust_inbox_8, r"^\* OK \[UIDNEXT (\d+)\](.*)\r\n"); - -// rust_inbox-0.0.5: r"^\* OK \[PERMANENTFLAGS (.+)\](.*)\r\n" -consistent!(rust_inbox_9, r"^\* OK \[PERMANENTFLAGS (.+)\](.*)\r\n"); - -// rustml-0.0.7: r"^[a-z]+ (\d+)$" -consistent!(rustml_0, r"^[a-z]+ (\d+)$"); - -// rustml-0.0.7: r"^[a-z]+ (\d+)$" -consistent!(rustml_1, r"^[a-z]+ (\d+)$"); - -// rustml-0.0.7: r"^[a-z]+ (\d+)$" -consistent!(rustml_2, r"^[a-z]+ (\d+)$"); - -// rustfmt-0.10.0: r"([^\\](\\\\)*)\\[\n\r][[:space:]]*" -consistent!(rustfmt_0, r"([^\\](\\\\)*)\\[\n\r][[:space:]]*"); - -// rustfmt-core-0.4.0: r"(^\s*$)|(^\s*//\s*rustfmt-[^:]+:\s*\S+)" -consistent!(rustfmt_core_0, r"(^\s*$)|(^\s*//\s*rustfmt-[^:]+:\s*\S+)"); - -// rustfmt-core-0.4.0: r"^## `([^`]+)`" -consistent!(rustfmt_core_1, r"^## `([^`]+)`"); - -// rustfmt-core-0.4.0: r"([^\\](\\\\)*)\\[\n\r][[:space:]]*" -consistent!(rustfmt_core_2, r"([^\\](\\\\)*)\\[\n\r][[:space:]]*"); - -// rustfmt-core-0.4.0: r"\s;" -consistent!(rustfmt_core_3, r"\s;"); - -// rust-enum-derive-0.4.0: r"^(0x)?([:digit:]+)$" -consistent!(rust_enum_derive_0, r"^(0x)?([:digit:]+)$"); - -// rust-enum-derive-0.4.0: r"^([:digit:]+)[:space:]*<<[:space:]*([:digit:]+)$" -consistent!( - rust_enum_derive_1, - r"^([:digit:]+)[:space:]*<<[:space:]*([:digit:]+)$" -); - -// rust-enum-derive-0.4.0: r"^[:space:]*([[:alnum:]_]+)([:space:]*=[:space:]*([:graph:]+))?[:space:]*," -consistent!(rust_enum_derive_2, r"^[:space:]*([[:alnum:]_]+)([:space:]*=[:space:]*([:graph:]+))?[:space:]*,"); - -// rust-enum-derive-0.4.0: r"^#define[:space:]+([:graph:]+)[:space:]+([:graph:]+)" -consistent!( - rust_enum_derive_3, - r"^#define[:space:]+([:graph:]+)[:space:]+([:graph:]+)" -); - -// rustsourcebundler-0.2.0: r"^\s*pub mod (.+);$" -consistent!(rustsourcebundler_0, r"^\s*pub mod (.+);$"); - -// rustsourcebundler-0.2.0: r"^\s*pub mod (.+);$" -consistent!(rustsourcebundler_1, r"^\s*pub mod (.+);$"); - -// rustfmt-nightly-0.8.2: r"([^\\](\\\\)*)\\[\n\r][[:space:]]*" -consistent!(rustfmt_nightly_0, r"([^\\](\\\\)*)\\[\n\r][[:space:]]*"); - -// rustfmt-nightly-0.8.2: r"\s;" -consistent!(rustfmt_nightly_1, r"\s;"); - -// rustache-0.1.0: r"(?s)(.*?)([ \t\r\n]*)(\{\{(\{?\S?\s*?[\w\.\s]*.*?\s*?\}?)\}\})([ \t\r\n]*)" -consistent!(rustache_0, r"(?s)(.*?)([ \t\r\n]*)(\{\{(\{?\S?\s*?[\w\.\s]*.*?\s*?\}?)\}\})([ \t\r\n]*)"); - -// rustfilt-0.2.0: r"_ZN[\$\._[:alnum:]]*" -consistent!(rustfilt_0, r"_ZN[\$\._[:alnum:]]*"); - -// rustache-lists-0.1.2: r"(?s)(.*?)([ \t\r\n]*)(\{\{(\{?\S?\s*?[\w\.\s]*.*?\s*?\}?)\}\})([ \t\r\n]*)" -consistent!(rustache_lists_0, r"(?s)(.*?)([ \t\r\n]*)(\{\{(\{?\S?\s*?[\w\.\s]*.*?\s*?\}?)\}\})([ \t\r\n]*)"); - -// rural-0.7.3: "(.+)=(.+)" -consistent!(rural_0, "(.+)=(.+)"); - -// rural-0.7.3: "(.*):(.+)" -consistent!(rural_1, "(.*):(.+)"); - -// rural-0.7.3: "(.+):=(.+)" -consistent!(rural_2, "(.+):=(.+)"); - -// rural-0.7.3: "(.*)==(.+)" -consistent!(rural_3, "(.*)==(.+)"); - -// rusoto_credential-0.11.0: r"^\[([^\]]+)\]$" -consistent!(rusoto_credential_0, r"^\[([^\]]+)\]$"); - -// rumblebars-0.3.0: "([:blank:]*)$" -consistent!(rumblebars_0, "([:blank:]*)$"); - -// rumblebars-0.3.0: "(\r?\n)[:blank:]*(\\{\\{~?[#!/](?:\\}?[^}])*\\}\\})[:blank:]*(:?\r?\n)?\\z" -consistent!(rumblebars_1, "(\r?\n)[:blank:]*(\\{\\{~?[#!/](?:\\}?[^}])*\\}\\})[:blank:]*(:?\r?\n)?\\z"); - -// rumblebars-0.3.0: "(\r?\n[:blank:]*)(\\{\\{~?>(?:\\}?[^}])*\\}\\})[:blank:]*(:?\r?\n)?\\z" -consistent!( - rumblebars_2, - "(\r?\n[:blank:]*)(\\{\\{~?>(?:\\}?[^}])*\\}\\})[:blank:]*(:?\r?\n)?\\z" -); - -// rumblebars-0.3.0: "((?:[:blank:]|\r?\n)*)(\r?\n)[:blank:]*$" -consistent!(rumblebars_3, "((?:[:blank:]|\r?\n)*)(\r?\n)[:blank:]*$"); - -// rumblebars-0.3.0: "^([:blank:]*\r?\n)(.*)" -consistent!(rumblebars_4, "^([:blank:]*\r?\n)(.*)"); - -// diesel_cli-1.3.1: r"(?P<stamp>[\d-]*)_hello" -consistent!(diesel_cli_0, r"(?P<stamp>[\d-]*)_hello"); - -// dishub-0.1.1: r"(\d+)s" -consistent!(dishub_0, r"(\d+)s"); - -// spreadsheet_textconv-0.1.0: r"\n" -consistent!(spreadsheet_textconv_0, r"\n"); - -// spreadsheet_textconv-0.1.0: r"\r" -consistent!(spreadsheet_textconv_1, r"\r"); - -// spreadsheet_textconv-0.1.0: r"\t" -consistent!(spreadsheet_textconv_2, r"\t"); - -// split_aud-0.1.0: r"DELAY (-?\d+)ms" -consistent!(split_aud_0, r"DELAY (-?\d+)ms"); - -// split_aud-0.1.0: r"Trim\((\d+), ?(\d+)\)" -consistent!(split_aud_1, r"Trim\((\d+), ?(\d+)\)"); - -// spotrust-0.0.5: r"spotify:[a-z]+:[a-zA-Z0-9]+" -consistent!(spotrust_0, r"spotify:[a-z]+:[a-zA-Z0-9]+"); - -// spaceslugs-0.1.0: r"[^\x00-\x7F]" -consistent!(spaceslugs_0, r"[^\x00-\x7F]"); - -// spaceslugs-0.1.0: r"[']+" -consistent!(spaceslugs_1, r"[']+"); - -// spaceslugs-0.1.0: r"\W+" -consistent!(spaceslugs_2, r"\W+"); - -// spaceslugs-0.1.0: r"[ ]+" -consistent!(spaceslugs_3, r"[ ]+"); - -// space_email_api-0.1.1: "PHPSESSID=([0-9a-f]+)" -consistent!(space_email_api_0, "PHPSESSID=([0-9a-f]+)"); - -// lorikeet-0.7.0: "[^0-9.,]" -consistent!(lorikeet_0, "[^0-9.,]"); - -// claude-0.3.0: r"^(?:\b|(-)?)(\p{Currency_Symbol})?((?:(?:\d{1,3}[\.,])+\d{3})|\d+)(?:[\.,](\d{2}))?\b$" -consistent!(claude_0, r"^(?:\b|(-)?)(\p{Currency_Symbol})?((?:(?:\d{1,3}[\.,])+\d{3})|\d+)(?:[\.,](\d{2}))?\b$"); - -// clam-0.1.6: r"<%=\s*(.+?)\s*%>" -consistent!(clam_0, r"<%=\s*(.+?)\s*%>"); - -// classifier-0.0.3: r"(\s)" -consistent!(classifier_0, r"(\s)"); - -// click-0.3.2: r"(-----BEGIN .*-----\n)((?:(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)*\n)+)(-----END .*-----)" -consistent!(click_0, r"(-----BEGIN .*-----\n)((?:(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)*\n)+)(-----END .*-----)"); - -// click-0.3.2: r"-----BEGIN PRIVATE KEY-----" -consistent!(click_1, r"-----BEGIN PRIVATE KEY-----"); - -// ultrastar-txt-0.1.2: r"#([A-Z3a-z]*):(.*)" -consistent!(ultrastar_txt_0, r"#([A-Z3a-z]*):(.*)"); - -// ultrastar-txt-0.1.2: "^-\\s?(-?[0-9]+)\\s*$" -consistent!(ultrastar_txt_1, "^-\\s?(-?[0-9]+)\\s*$"); - -// ultrastar-txt-0.1.2: "^-\\s?(-?[0-9]+)\\s+(-?[0-9]+)" -consistent!(ultrastar_txt_2, "^-\\s?(-?[0-9]+)\\s+(-?[0-9]+)"); - -// ultrastar-txt-0.1.2: "^(.)\\s*(-?[0-9]+)\\s+(-?[0-9]+)\\s+(-?[0-9]+)\\s?(.*)" -consistent!( - ultrastar_txt_3, - "^(.)\\s*(-?[0-9]+)\\s+(-?[0-9]+)\\s+(-?[0-9]+)\\s?(.*)" -); - -// ultrastar-txt-0.1.2: "^P\\s?(-?[0-9]+)" -consistent!(ultrastar_txt_4, "^P\\s?(-?[0-9]+)"); - -// db-accelerate-2.0.0: r"^template\.add($|\..+$)" -consistent!(db_accelerate_0, r"^template\.add($|\..+$)"); - -// db-accelerate-2.0.0: r"^template\.sub($|\..+$)" -consistent!(db_accelerate_1, r"^template\.sub($|\..+$)"); - -// sterling-0.3.0: r"(\d+)([cegps])" -consistent!(sterling_0, r"(\d+)([cegps])"); - -// stache-0.2.0: r"[^\w]" -consistent!(stache_0, r"[^\w]"); - -// strukt-0.1.0: "\"([<>]?)([xcbB\\?hHiIlLqQfdspP]*)\"" -consistent!(strukt_0, "\"([<>]?)([xcbB\\?hHiIlLqQfdspP]*)\""); - -// steamid-ng-0.3.1: r"^STEAM_([0-4]):([0-1]):([0-9]{1,10})$" -consistent!(steamid_ng_0, r"^STEAM_([0-4]):([0-1]):([0-9]{1,10})$"); - -// steamid-ng-0.3.1: r"^\[([AGMPCgcLTIUai]):([0-4]):([0-9]{1,10})(:([0-9]+))?\]$" -consistent!( - steamid_ng_1, - r"^\[([AGMPCgcLTIUai]):([0-4]):([0-9]{1,10})(:([0-9]+))?\]$" -); - -// strscan-0.1.1: r"^\w+" -consistent!(strscan_0, r"^\w+"); - -// strscan-0.1.1: r"^\s+" -consistent!(strscan_1, r"^\s+"); - -// strscan-0.1.1: r"^\w+" -consistent!(strscan_2, r"^\w+"); - -// strscan-0.1.1: r"^\s+" -consistent!(strscan_3, r"^\s+"); - -// strscan-0.1.1: r"^(\w+)\s+" -consistent!(strscan_4, r"^(\w+)\s+"); - -// tk-carbon-0.2.0: r"^([a-zA-Z0-9\.-]+)(?:\s+(\d+))$" -consistent!(tk_carbon_0, r"^([a-zA-Z0-9\.-]+)(?:\s+(\d+))$"); - -// tk-carbon-0.2.0: r"^([a-zA-Z0-9\.-]+)(?:\s+(\d+))$" -consistent!(tk_carbon_1, r"^([a-zA-Z0-9\.-]+)(?:\s+(\d+))$"); - -// evalrs-0.0.10: r"extern\s+crate\s+([a-z0-9_]+)\s*;(\s*//(.+))?" -consistent!(evalrs_0, r"extern\s+crate\s+([a-z0-9_]+)\s*;(\s*//(.+))?"); - -// evalrs-0.0.10: r"(?m)^# " -consistent!(evalrs_1, r"(?m)^# "); - -// evalrs-0.0.10: r"(?m)^\s*fn +main *\( *\)" -consistent!(evalrs_2, r"(?m)^\s*fn +main *\( *\)"); - -// evalrs-0.0.10: r"(extern\s+crate\s+[a-z0-9_]+\s*;)" -consistent!(evalrs_3, r"(extern\s+crate\s+[a-z0-9_]+\s*;)"); - -// gate_build-0.5.0: "(.*)_t([0-9]+)" -consistent!(gate_build_0, "(.*)_t([0-9]+)"); - -// rake-0.1.1: r"[^\P{P}-]|\s+-\s+" -consistent!(rake_0, r"[^\P{P}-]|\s+-\s+"); - -// rafy-0.2.1: r"^.*(?:(?:youtu\.be/|v/|vi/|u/w/|embed/)|(?:(?:watch)?\?v(?:i)?=|\&v(?:i)?=))([^#\&\?]*).*" -consistent!(rafy_0, r"^.*(?:(?:youtu\.be/|v/|vi/|u/w/|embed/)|(?:(?:watch)?\?v(?:i)?=|\&v(?:i)?=))([^#\&\?]*).*"); - -// raven-0.2.1: r"^(?P<protocol>.*?)://(?P<public_key>.*?):(?P<secret_key>.*?)@(?P<host>.*?)/(?P<path>.*/)?(?P<project_id>.*)$" -consistent!(raven_0, r"^(?P<protocol>.*?)://(?P<public_key>.*?):(?P<secret_key>.*?)@(?P<host>.*?)/(?P<path>.*/)?(?P<project_id>.*)$"); - -// rargs-0.2.0: r"\{[[:space:]]*[^{}]*[[:space:]]*\}" -consistent!(rargs_0, r"\{[[:space:]]*[^{}]*[[:space:]]*\}"); - -// rargs-0.2.0: r"^\{[[:space:]]*(?P<name>[[:word:]]*)[[:space:]]*\}$" -consistent!(rargs_1, r"^\{[[:space:]]*(?P<name>[[:word:]]*)[[:space:]]*\}$"); - -// rargs-0.2.0: r"^\{[[:space:]]*(?P<num>-?\d+)[[:space:]]*\}$" -consistent!(rargs_2, r"^\{[[:space:]]*(?P<num>-?\d+)[[:space:]]*\}$"); - -// rargs-0.2.0: r"^\{(?P<left>-?\d*)?\.\.(?P<right>-?\d*)?(?::(?P<sep>.*))?\}$" -consistent!( - rargs_3, - r"^\{(?P<left>-?\d*)?\.\.(?P<right>-?\d*)?(?::(?P<sep>.*))?\}$" -); - -// rargs-0.2.0: r"(.*?)[[:space:]]+|(.*?)$" -consistent!(rargs_4, r"(.*?)[[:space:]]+|(.*?)$"); - -// indradb-lib-0.15.0: r"[a-zA-Z0-9]{8}" -consistent!(indradb_lib_0, r"[a-zA-Z0-9]{8}"); - -// fungi-lang-0.1.50: r"::" -consistent!(fungi_lang_0, r"::"); - -// nickel-0.10.1: "/hello/(?P<name>[a-zA-Z]+)" -consistent!(nickel_0, "/hello/(?P<name>[a-zA-Z]+)"); - -// nickel-0.10.1: "/hello/(?P<name>[a-zA-Z]+)" -consistent!(nickel_1, "/hello/(?P<name>[a-zA-Z]+)"); - -// pact_verifier-0.4.0: r"\{(\w+)\}" -consistent!(pact_verifier_0, r"\{(\w+)\}"); - -// pact_matching-0.4.1: "application/.*json" -consistent!(pact_matching_0, "application/.*json"); - -// pact_matching-0.4.1: "application/json.*" -consistent!(pact_matching_1, "application/json.*"); - -// pact_matching-0.4.1: "application/.*xml" -consistent!(pact_matching_2, "application/.*xml"); - -// pangu-0.2.0: "([\"'\\(\\[\\{{<\u{201c}])(\\s*)(.+?)(\\s*)([\"'\\)\\]\\}}>\u{201d}])" -consistent!( - pangu_0, - "([\"'\\(\\[\\{{<\u{201c}])(\\s*)(.+?)(\\s*)([\"'\\)\\]\\}}>\u{201d}])" -); - -// pangu-0.2.0: "([\\(\\[\\{{<\u{201c}]+)(\\s*)(.+?)(\\s*)([\\)\\]\\}}>\u{201d}]+)" -consistent!( - pangu_1, - "([\\(\\[\\{{<\u{201c}]+)(\\s*)(.+?)(\\s*)([\\)\\]\\}}>\u{201d}]+)" -); - -// parser-haskell-0.2.0: r"\{-[\s\S]*?-\}" -consistent!(parser_haskell_0, r"\{-[\s\S]*?-\}"); - -// parser-haskell-0.2.0: r"(?m);+\s*$" -consistent!(parser_haskell_1, r"(?m);+\s*$"); - -// parser-haskell-0.2.0: r"(?m)^#(if|ifn?def|endif|else|include|elif).*" -consistent!(parser_haskell_2, r"(?m)^#(if|ifn?def|endif|else|include|elif).*"); - -// parser-haskell-0.2.0: r"'([^'\\]|\\[A-Z]{1,3}|\\.)'" -consistent!(parser_haskell_3, r"'([^'\\]|\\[A-Z]{1,3}|\\.)'"); - -// parser-haskell-0.2.0: r"forall\s+(.*?)\." -consistent!(parser_haskell_4, r"forall\s+(.*?)\."); - -// html2md-0.2.1: "\\s{2,}" -consistent!(html2md_0, "\\s{2,}"); - -// html2md-0.2.1: "\\n{2,}" -consistent!(html2md_1, "\\n{2,}"); - -// html2md-0.2.1: "(?m)(\\S) $" -consistent!(html2md_2, "(?m)(\\S) $"); - -// html2md-0.2.1: "(?m)^[-*] " -consistent!(html2md_3, "(?m)^[-*] "); - -// ovpnfile-0.1.2: r"#.*$" -consistent!(ovpnfile_0, r"#.*$"); - -// ovpnfile-0.1.2: r"^<(\S+)>" -consistent!(ovpnfile_1, r"^<(\S+)>"); - -// ovpnfile-0.1.2: r"^</(\S+)>" -consistent!(ovpnfile_2, r"^</(\S+)>"); - -// screenruster-saver-fractal-0.1.1: r"#([:xdigit:]{2})([:xdigit:]{2})([:xdigit:]{2})" -consistent!( - screenruster_saver_fractal_0, - r"#([:xdigit:]{2})([:xdigit:]{2})([:xdigit:]{2})" -); - -// scarlet-0.2.2: r"rgb\((?: *(\d{1,3}),)(?: *(\d{1,3}),)(?: *(\d{1,3}))\)" -consistent!( - scarlet_0, - r"rgb\((?: *(\d{1,3}),)(?: *(\d{1,3}),)(?: *(\d{1,3}))\)" -); - -// cpp_to_rust_generator-0.2.0: r"^([\w:]+)<(.+)>$" -consistent!(cpp_to_rust_generator_0, r"^([\w:]+)<(.+)>$"); - -// cpp_to_rust_generator-0.2.0: r"^type-parameter-(\d+)-(\d+)$" -consistent!(cpp_to_rust_generator_1, r"^type-parameter-(\d+)-(\d+)$"); - -// cpp_to_rust_generator-0.2.0: r"^([\w~]+)<[^<>]+>$" -consistent!(cpp_to_rust_generator_2, r"^([\w~]+)<[^<>]+>$"); - -// cpp_to_rust_generator-0.2.0: r"(signals|Q_SIGNALS)\s*:" -consistent!(cpp_to_rust_generator_3, r"(signals|Q_SIGNALS)\s*:"); - -// cpp_to_rust_generator-0.2.0: r"(slots|Q_SLOTS)\s*:" -consistent!(cpp_to_rust_generator_4, r"(slots|Q_SLOTS)\s*:"); - -// cpp_to_rust_generator-0.2.0: r"(public|protected|private)\s*:" -consistent!(cpp_to_rust_generator_5, r"(public|protected|private)\s*:"); - -// cpp_to_rust-0.5.3: r"^([\w:]+)<(.+)>$" -consistent!(cpp_to_rust_0, r"^([\w:]+)<(.+)>$"); - -// cpp_to_rust-0.5.3: r"^type-parameter-(\d+)-(\d+)$" -consistent!(cpp_to_rust_1, r"^type-parameter-(\d+)-(\d+)$"); - -// cpp_to_rust-0.5.3: r"^([\w~]+)<[^<>]+>$" -consistent!(cpp_to_rust_2, r"^([\w~]+)<[^<>]+>$"); - -// cpp_to_rust-0.5.3: r"(signals|Q_SIGNALS)\s*:" -consistent!(cpp_to_rust_3, r"(signals|Q_SIGNALS)\s*:"); - -// cpp_to_rust-0.5.3: r"(slots|Q_SLOTS)\s*:" -consistent!(cpp_to_rust_4, r"(slots|Q_SLOTS)\s*:"); - -// cpp_to_rust-0.5.3: r"(public|protected|private)\s*:" -consistent!(cpp_to_rust_5, r"(public|protected|private)\s*:"); - -// fritzbox_logs-0.2.0: "(\\d{2}\\.\\d{2}\\.\\d{2}) (\\d{2}:\\d{2}:\\d{2}) (.*)" -consistent!( - fritzbox_logs_0, - "(\\d{2}\\.\\d{2}\\.\\d{2}) (\\d{2}:\\d{2}:\\d{2}) (.*)" -); - -// fractal-matrix-api-3.29.0: r"mxc://(?P<server>[^/]+)/(?P<media>.+)" -consistent!(fractal_matrix_api_0, r"mxc://(?P<server>[^/]+)/(?P<media>.+)"); - -// smtp2go-0.1.4: r"^api-[a-zA-Z0-9]{32}$" -consistent!(smtp2go_0, r"^api-[a-zA-Z0-9]{32}$"); - -// pusher-0.3.1: r"^[-a-zA-Z0-9_=@,.;]+$" -consistent!(pusher_0, r"^[-a-zA-Z0-9_=@,.;]+$"); - -// pusher-0.3.1: r"\A\d+\.\d+\z" -consistent!(pusher_1, r"\A\d+\.\d+\z"); - -// bakervm-0.9.0: r"^\.(.+?) +?(.+)$" -consistent!(bakervm_0, r"^\.(.+?) +?(.+)$"); - -// bakervm-0.9.0: r"^\.([^\s]+)$" -consistent!(bakervm_1, r"^\.([^\s]+)$"); - -// bakervm-0.9.0: r"^include! +([^\s]+)$" -consistent!(bakervm_2, r"^include! +([^\s]+)$"); - -// bakervm-0.9.0: r"^@(\d+)$" -consistent!(bakervm_3, r"^@(\d+)$"); - -// bakervm-0.9.0: r"^true|false$" -consistent!(bakervm_4, r"^true|false$"); - -// bakervm-0.9.0: r"^(-?\d+)?\.[0-9]+$" -consistent!(bakervm_5, r"^(-?\d+)?\.[0-9]+$"); - -// bakervm-0.9.0: r"^(-?\d+)?$" -consistent!(bakervm_6, r"^(-?\d+)?$"); - -// bakervm-0.9.0: r"^#([0-9abcdefABCDEF]{6})$" -consistent!(bakervm_7, r"^#([0-9abcdefABCDEF]{6})$"); - -// bakervm-0.9.0: r"^'(.)'$" -consistent!(bakervm_8, r"^'(.)'$"); - -// bakervm-0.9.0: r"^\$vi\((\d+)\)$" -consistent!(bakervm_9, r"^\$vi\((\d+)\)$"); - -// bakervm-0.9.0: r"^\$key\((\d+)\)$" -consistent!(bakervm_10, r"^\$key\((\d+)\)$"); - -// banana-0.0.2: "(?P<type>[A-Z^']+) (?P<route>[^']+) HTTP/(?P<http>[^']+)" -consistent!( - banana_0, - "(?P<type>[A-Z^']+) (?P<route>[^']+) HTTP/(?P<http>[^']+)" -); - -// serial-key-2.0.0: r"[A-F0-9]{8}" -consistent!(serial_key_0, r"[A-F0-9]{8}"); - -// serde-hjson-0.8.1: "[\\\\\"\x00-\x1f\x7f-\u{9f}\u{00ad}\u{0600}-\u{0604}\u{070f}\u{17b4}\u{17b5}\u{200c}-\u{200f}\u{2028}-\u{202f}\u{2060}-\u{206f}\u{feff}\u{fff0}-\u{ffff}]" -consistent!(serde_hjson_0, "[\\\\\"\x00-\x1f\x7f-\u{9f}\u{00ad}\u{0600}-\u{0604}\u{070f}\u{17b4}\u{17b5}\u{200c}-\u{200f}\u{2028}-\u{202f}\u{2060}-\u{206f}\u{feff}\u{fff0}-\u{ffff}]"); - -// serde-hjson-0.8.1: "[\x00-\x1f\x7f-\u{9f}\u{00ad}\u{0600}-\u{0604}\u{070f}\u{17b4}\u{17b5}\u{200c}-\u{200f}\u{2028}-\u{202f}\u{2060}-\u{206f}\u{feff}\u{fff0}-\u{ffff}]" -consistent!(serde_hjson_1, "[\x00-\x1f\x7f-\u{9f}\u{00ad}\u{0600}-\u{0604}\u{070f}\u{17b4}\u{17b5}\u{200c}-\u{200f}\u{2028}-\u{202f}\u{2060}-\u{206f}\u{feff}\u{fff0}-\u{ffff}]"); - -// serde-hjson-0.8.1: "'''|[\x00-\x09\x0b\x0c\x0e-\x1f\x7f-\u{9f}\u{00ad}\u{0600}-\u{0604}\u{070f}\u{17b4}\u{17b5}\u{200c}-\u{200f}\u{2028}-\u{202f}\u{2060}-\u{206f}\u{feff}\u{fff0}-\u{ffff}]" -consistent!(serde_hjson_2, "'''|[\x00-\x09\x0b\x0c\x0e-\x1f\x7f-\u{9f}\u{00ad}\u{0600}-\u{0604}\u{070f}\u{17b4}\u{17b5}\u{200c}-\u{200f}\u{2028}-\u{202f}\u{2060}-\u{206f}\u{feff}\u{fff0}-\u{ffff}]"); - -// serde-odbc-0.1.0: r"/todos/(?P<id>\d+)" -consistent!(serde_odbc_0, r"/todos/(?P<id>\d+)"); - -// sentry-0.6.0: r"^(?:_<)?([a-zA-Z0-9_]+?)(?:\.\.|::)" -consistent!(sentry_0, r"^(?:_<)?([a-zA-Z0-9_]+?)(?:\.\.|::)"); - -// sentiment-0.1.1: r"[^a-zA-Z0 -]+" -consistent!(sentiment_0, r"[^a-zA-Z0 -]+"); - -// sentiment-0.1.1: r" {2,}" -consistent!(sentiment_1, r" {2,}"); - -// verilog-0.0.1: r"(?m)//.*" -consistent!(verilog_0, r"(?m)//.*"); - -// verex-0.2.2: "(?P<robot>C3PO)" -consistent!(verex_0, "(?P<robot>C3PO)"); - -// handlebars-0.32.4: ">|<|\"|&" -consistent!(handlebars_0, ">|<|\"|&"); - -// haikunator-0.1.2: r"^\w+-\w+-[0123456789]{4}$" -consistent!(haikunator_0, r"^\w+-\w+-[0123456789]{4}$"); - -// haikunator-0.1.2: r"^\w+@\w+@[0123456789]{4}$" -consistent!(haikunator_1, r"^\w+@\w+@[0123456789]{4}$"); - -// haikunator-0.1.2: r"^\w+-\w+-[0123456789abcdef]{4}$" -consistent!(haikunator_2, r"^\w+-\w+-[0123456789abcdef]{4}$"); - -// haikunator-0.1.2: r"^\w+-\w+-[0123456789忠犬ハチ公]{10}$" -consistent!(haikunator_3, r"^\w+-\w+-[0123456789忠犬ハチ公]{10}$"); - -// haikunator-0.1.2: r"^\w+-\w+$" -consistent!(haikunator_4, r"^\w+-\w+$"); - -// haikunator-0.1.2: r"^\w+-\w+-[foo]{4}$" -consistent!(haikunator_5, r"^\w+-\w+-[foo]{4}$"); - -// haikunator-0.1.2: r"^\w+-\w+-[0123456789忠犬ハチ公]{5}$" -consistent!(haikunator_6, r"^\w+-\w+-[0123456789忠犬ハチ公]{5}$"); - -// bobbin-cli-0.8.3: r"(.*)" -consistent!(bobbin_cli_0, r"(.*)"); - -// bobbin-cli-0.8.3: r"rustc (.*)" -consistent!(bobbin_cli_1, r"rustc (.*)"); - -// bobbin-cli-0.8.3: r"cargo (.*)" -consistent!(bobbin_cli_2, r"cargo (.*)"); - -// bobbin-cli-0.8.3: r"xargo (.*)\n" -consistent!(bobbin_cli_3, r"xargo (.*)\n"); - -// bobbin-cli-0.8.3: r"Open On-Chip Debugger (.*)" -consistent!(bobbin_cli_4, r"Open On-Chip Debugger (.*)"); - -// bobbin-cli-0.8.3: r"arm-none-eabi-gcc \(GNU Tools for ARM Embedded Processors[^\)]*\) (.*)" -consistent!( - bobbin_cli_5, - r"arm-none-eabi-gcc \(GNU Tools for ARM Embedded Processors[^\)]*\) (.*)" -); - -// bobbin-cli-0.8.3: r"(?m).*\nBasic Open Source SAM-BA Application \(BOSSA\) Version (.*)\n" -consistent!( - bobbin_cli_6, - r"(?m).*\nBasic Open Source SAM-BA Application \(BOSSA\) Version (.*)\n" -); - -// bobbin-cli-0.8.3: r"(?m)SEGGER J-Link Commander (.*)\n" -consistent!(bobbin_cli_7, r"(?m)SEGGER J-Link Commander (.*)\n"); - -// bobbin-cli-0.8.3: r"(?m)Teensy Loader, Command Line, Version (.*)\n" -consistent!(bobbin_cli_8, r"(?m)Teensy Loader, Command Line, Version (.*)\n"); - -// bobbin-cli-0.8.3: r"dfu-util (.*)\n" -consistent!(bobbin_cli_9, r"dfu-util (.*)\n"); - -// borsholder-0.9.1: r"^/static/[\w.]+$" -consistent!(borsholder_0, r"^/static/[\w.]+$"); - -// borsholder-0.9.1: r"^/timeline/([0-9]+)$" -consistent!(borsholder_1, r"^/timeline/([0-9]+)$"); - -// fblog-1.0.1: "\u{001B}\\[[\\d;]*[^\\d;]" -consistent!(fblog_0, "\u{001B}\\[[\\d;]*[^\\d;]"); - -// fblog-1.0.1: "\u{001B}\\[[\\d;]*[^\\d;]" -consistent!(fblog_1, "\u{001B}\\[[\\d;]*[^\\d;]"); - -// toml-query-0.6.0: r"^\[\d+\]$" -consistent!(toml_query_0, r"^\[\d+\]$"); - -// todo-txt-1.1.0: r" (?P<key>[^\s]+):(?P<value>[^\s^/]+)" -consistent!(todo_txt_0, r" (?P<key>[^\s]+):(?P<value>[^\s^/]+)"); - -// findr-0.1.5: r"\band\b" -consistent!(findr_0, r"\band\b"); - -// findr-0.1.5: r"\bor\b" -consistent!(findr_1, r"\bor\b"); - -// findr-0.1.5: r"\bnot\b" -consistent!(findr_2, r"\bnot\b"); - -// file-sniffer-3.0.1: r".*?\.(a|la|lo|o|ll|keter|bc|dyn_o|out|d|rlib|crate|min\.js|hi|dyn_hi|S|jsexe|webapp|js\.externs|ibc|toc|aux|fdb_latexmk|fls|egg-info|whl|js_a|js_hi|jld|ji|js_o|so.*|dump-.*|vmb|crx|orig|elmo|elmi|pyc|mod|p_hi|p_o|prof|tix)$" -consistent!(file_sniffer_0, r".*?\.(a|la|lo|o|ll|keter|bc|dyn_o|out|d|rlib|crate|min\.js|hi|dyn_hi|S|jsexe|webapp|js\.externs|ibc|toc|aux|fdb_latexmk|fls|egg-info|whl|js_a|js_hi|jld|ji|js_o|so.*|dump-.*|vmb|crx|orig|elmo|elmi|pyc|mod|p_hi|p_o|prof|tix)$"); - -// file-sniffer-3.0.1: r".*?\.(stats|conf|h|cache.*|dat|pc|info)$" -consistent!(file_sniffer_1, r".*?\.(stats|conf|h|cache.*|dat|pc|info)$"); - -// file-sniffer-3.0.1: r".*?\.(exe|a|la|o|ll|keter|bc|dyn_o|out|d|rlib|crate|min\.js|hi|dyn_hi|jsexe|webapp|js\.externs|ibc|toc|aux|fdb_latexmk|fls|egg-info|whl|js_a|js_hi|jld|ji|js_o|so.*|dump-.*|vmb|crx|orig|elmo|elmi|pyc|mod|p_hi|p_o|prof|tix)$" -consistent!(file_sniffer_2, r".*?\.(exe|a|la|o|ll|keter|bc|dyn_o|out|d|rlib|crate|min\.js|hi|dyn_hi|jsexe|webapp|js\.externs|ibc|toc|aux|fdb_latexmk|fls|egg-info|whl|js_a|js_hi|jld|ji|js_o|so.*|dump-.*|vmb|crx|orig|elmo|elmi|pyc|mod|p_hi|p_o|prof|tix)$"); - -// file-sniffer-3.0.1: r".*?\.(stats|conf|h|cache.*)$" -consistent!(file_sniffer_3, r".*?\.(stats|conf|h|cache.*)$"); - -// file-sniffer-3.0.1: r"(\.git|\.pijul|_darcs|\.hg)$" -consistent!(file_sniffer_4, r"(\.git|\.pijul|_darcs|\.hg)$"); - -// file_logger-0.1.0: "test" -consistent!(file_logger_0, "test"); - -// file_scanner-0.2.0: r"foo" -consistent!(file_scanner_0, r"foo"); - -// file_scanner-0.2.0: r"a+b" -consistent!(file_scanner_1, r"a+b"); - -// file_scanner-0.2.0: r"a[ab]*b" -consistent!(file_scanner_2, r"a[ab]*b"); - -// file_scanner-0.2.0: r"\s+" -consistent!(file_scanner_3, r"\s+"); - -// file_scanner-0.2.0: r"\s+" -consistent!(file_scanner_4, r"\s+"); - -// cellsplit-0.2.1: r"^\s*([^\s]+) %cellsplit<\d+>$" -consistent!(cellsplit_0, r"^\s*([^\s]+) %cellsplit<\d+>$"); - -// cellsplit-0.2.1: r"^\s*([^\s]+) %cellsplit<\d+>$" -consistent!(cellsplit_1, r"^\s*([^\s]+) %cellsplit<\d+>$"); - -// aterm-0.20.0: r"^[+\-]?[0-9]+" -consistent!(aterm_0, r"^[+\-]?[0-9]+"); - -// aterm-0.20.0: r"^[+\-]?[0-9]+\.[0-9]*([eE][+\-]?[0-9]+)?" -consistent!(aterm_1, r"^[+\-]?[0-9]+\.[0-9]*([eE][+\-]?[0-9]+)?"); - -// atarashii_imap-0.3.0: r"^[*] OK" -consistent!(atarashii_imap_0, r"^[*] OK"); - -// atarashii_imap-0.3.0: r"FLAGS\s\((.+)\)" -consistent!(atarashii_imap_1, r"FLAGS\s\((.+)\)"); - -// atarashii_imap-0.3.0: r"\[PERMANENTFLAGS\s\((.+)\)\]" -consistent!(atarashii_imap_2, r"\[PERMANENTFLAGS\s\((.+)\)\]"); - -// atarashii_imap-0.3.0: r"\[UIDVALIDITY\s(\d+)\]" -consistent!(atarashii_imap_3, r"\[UIDVALIDITY\s(\d+)\]"); - -// atarashii_imap-0.3.0: r"(\d+)\sEXISTS" -consistent!(atarashii_imap_4, r"(\d+)\sEXISTS"); - -// atarashii_imap-0.3.0: r"(\d+)\sRECENT" -consistent!(atarashii_imap_5, r"(\d+)\sRECENT"); - -// atarashii_imap-0.3.0: r"\[UNSEEN\s(\d+)\]" -consistent!(atarashii_imap_6, r"\[UNSEEN\s(\d+)\]"); - -// atarashii_imap-0.3.0: r"\[UIDNEXT\s(\d+)\]" -consistent!(atarashii_imap_7, r"\[UIDNEXT\s(\d+)\]"); - -// editorconfig-1.0.0: r"\\(\{|\})" -consistent!(editorconfig_0, r"\\(\{|\})"); - -// editorconfig-1.0.0: r"(^|[^\\])\\\|" -consistent!(editorconfig_1, r"(^|[^\\])\\\|"); - -// editorconfig-1.0.0: r"\[([^\]]*)$" -consistent!(editorconfig_2, r"\[([^\]]*)$"); - -// editorconfig-1.0.0: r"\[(.*/.*)\]" -consistent!(editorconfig_3, r"\[(.*/.*)\]"); - -// editorconfig-1.0.0: r"\{(-?\d+\\\.\\\.-?\d+)\}" -consistent!(editorconfig_4, r"\{(-?\d+\\\.\\\.-?\d+)\}"); - -// editorconfig-1.0.0: r"\{([^,]+)\}" -consistent!(editorconfig_5, r"\{([^,]+)\}"); - -// editorconfig-1.0.0: r"\{(([^\}].*)?(,|\|)(.*[^\\])?)\}" -consistent!(editorconfig_6, r"\{(([^\}].*)?(,|\|)(.*[^\\])?)\}"); - -// editorconfig-1.0.0: r"^/" -consistent!(editorconfig_7, r"^/"); - -// editorconfig-1.0.0: r"(^|[^\\])(\{|\})" -consistent!(editorconfig_8, r"(^|[^\\])(\{|\})"); - -// edmunge-1.0.0: "^#!.*\n" -consistent!(edmunge_0, "^#!.*\n"); - -// unicode_names2_macros-0.2.0: r"\\N\{(.*?)(?:\}|$)" -consistent!(unicode_names2_macros_0, r"\\N\{(.*?)(?:\}|$)"); - -// unidiff-0.2.1: r"^--- (?P<filename>[^\t\n]+)(?:\t(?P<timestamp>[^\n]+))?" -consistent!( - unidiff_0, - r"^--- (?P<filename>[^\t\n]+)(?:\t(?P<timestamp>[^\n]+))?" -); - -// unidiff-0.2.1: r"^\+\+\+ (?P<filename>[^\t\n]+)(?:\t(?P<timestamp>[^\n]+))?" -consistent!( - unidiff_1, - r"^\+\+\+ (?P<filename>[^\t\n]+)(?:\t(?P<timestamp>[^\n]+))?" -); - -// unidiff-0.2.1: r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)" -consistent!(unidiff_2, r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@[ ]?(.*)"); - -// unidiff-0.2.1: r"^(?P<line_type>[- \n\+\\]?)(?P<value>.*)" -consistent!(unidiff_3, r"^(?P<line_type>[- \n\+\\]?)(?P<value>.*)"); - -// slippy-map-tiles-0.13.1: "/?(?P<zoom>[0-9]?[0-9])/(?P<x>[0-9]{1,10})/(?P<y>[0-9]{1,10})(\\.[a-zA-Z]{3,4})?$" -consistent!(slippy_map_tiles_0, "/?(?P<zoom>[0-9]?[0-9])/(?P<x>[0-9]{1,10})/(?P<y>[0-9]{1,10})(\\.[a-zA-Z]{3,4})?$"); - -// slippy-map-tiles-0.13.1: r"^(?P<minlon>-?[0-9]{1,3}(\.[0-9]{1,10})?) (?P<minlat>-?[0-9]{1,3}(\.[0-9]{1,10})?) (?P<maxlon>-?[0-9]{1,3}(\.[0-9]{1,10})?) (?P<maxlat>-?[0-9]{1,3}(\.[0-9]{1,10})?)$" -consistent!(slippy_map_tiles_1, r"^(?P<minlon>-?[0-9]{1,3}(\.[0-9]{1,10})?) (?P<minlat>-?[0-9]{1,3}(\.[0-9]{1,10})?) (?P<maxlon>-?[0-9]{1,3}(\.[0-9]{1,10})?) (?P<maxlat>-?[0-9]{1,3}(\.[0-9]{1,10})?)$"); - -// slippy-map-tiles-0.13.1: r"^(?P<minlon>-?[0-9]{1,3}(\.[0-9]{1,10})?),(?P<minlat>-?[0-9]{1,3}(\.[0-9]{1,10})?),(?P<maxlon>-?[0-9]{1,3}(\.[0-9]{1,10})?),(?P<maxlat>-?[0-9]{1,3}(\.[0-9]{1,10})?)$" -consistent!(slippy_map_tiles_2, r"^(?P<minlon>-?[0-9]{1,3}(\.[0-9]{1,10})?),(?P<minlat>-?[0-9]{1,3}(\.[0-9]{1,10})?),(?P<maxlon>-?[0-9]{1,3}(\.[0-9]{1,10})?),(?P<maxlat>-?[0-9]{1,3}(\.[0-9]{1,10})?)$"); - -// sonos-0.1.2: r"^https?://(.+?):1400/xml" -consistent!(sonos_0, r"^https?://(.+?):1400/xml"); - -// validator_derive-0.7.0: r"^[a-z]{2}$" -consistent!(validator_derive_0, r"^[a-z]{2}$"); - -// validator_derive-0.7.0: r"[a-z]{2}" -consistent!(validator_derive_1, r"[a-z]{2}"); - -// validator_derive-0.7.0: r"[a-z]{2}" -consistent!(validator_derive_2, r"[a-z]{2}"); - -// nginx-config-0.8.0: r"one of \d+ options" -consistent!(nginx_config_0, r"one of \d+ options"); - -// waltz-0.4.0: r"[\s,]" -consistent!(waltz_0, r"[\s,]"); - -// warheadhateus-0.2.1: r"^aws_access_key_id = (.*)" -consistent!(warheadhateus_0, r"^aws_access_key_id = (.*)"); - -// warheadhateus-0.2.1: r"^aws_secret_access_key = (.*)" -consistent!(warheadhateus_1, r"^aws_secret_access_key = (.*)"); - -// warheadhateus-0.2.1: r"^aws_access_key_id = (.*)" -consistent!(warheadhateus_2, r"^aws_access_key_id = (.*)"); - -// warheadhateus-0.2.1: r"^aws_secret_access_key = (.*)" -consistent!(warheadhateus_3, r"^aws_secret_access_key = (.*)"); - -// jieba-rs-0.2.2: r"([\u{4E00}-\u{9FD5}a-zA-Z0-9+#&\._%]+)" -consistent!(jieba_rs_0, r"([\u{4E00}-\u{9FD5}a-zA-Z0-9+#&\._%]+)"); - -// jieba-rs-0.2.2: r"(\r\n|\s)" -consistent!(jieba_rs_1, r"(\r\n|\s)"); - -// jieba-rs-0.2.2: "([\u{4E00}-\u{9FD5}]+)" -consistent!(jieba_rs_2, "([\u{4E00}-\u{9FD5}]+)"); - -// jieba-rs-0.2.2: r"[^a-zA-Z0-9+#\n]" -consistent!(jieba_rs_3, r"[^a-zA-Z0-9+#\n]"); - -// jieba-rs-0.2.2: r"([\u{4E00}-\u{9FD5}]+)" -consistent!(jieba_rs_4, r"([\u{4E00}-\u{9FD5}]+)"); - -// jieba-rs-0.2.2: r"([a-zA-Z0-9]+(?:.\d+)?%?)" -consistent!(jieba_rs_5, r"([a-zA-Z0-9]+(?:.\d+)?%?)"); - -// lalrpop-0.15.2: r"Span\([0-9 ,]*\)" -consistent!(lalrpop_0, r"Span\([0-9 ,]*\)"); - -// lalrpop-snap-0.15.2: r"Span\([0-9 ,]*\)" -consistent!(lalrpop_snap_0, r"Span\([0-9 ,]*\)"); - -// nlp-tokenize-0.1.0: r"[\S]+" -consistent!(nlp_tokenize_0, r"[\S]+"); - -// kbgpg-0.1.2: "[[:xdigit:]][70]" -consistent!(kbgpg_0, "[[:xdigit:]][70]"); - -// cdbd-0.1.1: r"^((?P<address>.*):)?(?P<port>\d+)$" -consistent!(cdbd_0, r"^((?P<address>.*):)?(?P<port>\d+)$"); - -// mbutiles-0.1.1: r"[\w\s=+-/]+\((\{(.|\n)*\})\);?" -consistent!(mbutiles_0, r"[\w\s=+-/]+\((\{(.|\n)*\})\);?"); - -// extrahop-0.2.5: r"^-\d+(?:ms|s|m|h|d|w|y)?$" -consistent!(extrahop_0, r"^-\d+(?:ms|s|m|h|d|w|y)?$"); - -// pippin-0.1.0: "^((?:.*)-)?ss(0|[1-9][0-9]*)\\.pip$" -consistent!(pippin_0, "^((?:.*)-)?ss(0|[1-9][0-9]*)\\.pip$"); - -// pippin-0.1.0: "^((?:.*)-)?ss(0|[1-9][0-9]*)-cl(0|[1-9][0-9]*)\\.piplog$" -consistent!( - pippin_1, - "^((?:.*)-)?ss(0|[1-9][0-9]*)-cl(0|[1-9][0-9]*)\\.piplog$" -); - -// pippin-0.1.0: "^((?:.*)-)?ss(0|[1-9][0-9]*)\\.pip$" -consistent!(pippin_2, "^((?:.*)-)?ss(0|[1-9][0-9]*)\\.pip$"); - -// pippin-0.1.0: "^((?:.*)-)?ss(0|[1-9][0-9]*)-cl(0|[1-9][0-9]*)\\.piplog$" -consistent!( - pippin_3, - "^((?:.*)-)?ss(0|[1-9][0-9]*)-cl(0|[1-9][0-9]*)\\.piplog$" -); - -// pippin-0.1.0: "^.*pn(0|[1-9][0-9]*)(-ss(0|[1-9][0-9]*)(\\.pip|-cl(0|[1-9][0-9]*)\\.piplog))?$" -consistent!(pippin_4, "^.*pn(0|[1-9][0-9]*)(-ss(0|[1-9][0-9]*)(\\.pip|-cl(0|[1-9][0-9]*)\\.piplog))?$"); - -// pippin-0.1.0: "^(.*)-ss(?:0|[1-9][0-9]*)(?:\\.pip|-cl(?:0|[1-9][0-9]*)\\.piplog)$" -consistent!( - pippin_5, - "^(.*)-ss(?:0|[1-9][0-9]*)(?:\\.pip|-cl(?:0|[1-9][0-9]*)\\.piplog)$" -); - -// pinyin-0.3.0: r"(?i)[āáǎàēéěèōóǒòīíǐìūúǔùüǘǚǜńň]" -consistent!( - pinyin_0, - r"(?i)[āáǎàēéěèōóǒòīíǐìūúǔùüǘǚǜńň]" -); - -// pinyin-0.3.0: r"([aeoiuvnm])([0-4])$" -consistent!(pinyin_1, r"([aeoiuvnm])([0-4])$"); - -// duration-parser-0.2.0: r"(?P<value>\d+)(?P<units>[a-z])" -consistent!(duration_parser_0, r"(?P<value>\d+)(?P<units>[a-z])"); - -// dutree-0.2.7: r"^\d+\D?$" -consistent!(dutree_0, r"^\d+\D?$"); - -// djangohashers-0.3.0: r"^[A-Za-z0-9]*$" -consistent!(djangohashers_0, r"^[A-Za-z0-9]*$"); - -// rtag-0.3.5: r"^[A-Z][A-Z0-9]{2,}$" -consistent!(rtag_0, r"^[A-Z][A-Z0-9]{2,}$"); - -// rtag-0.3.5: r"^http://www\.emusic\.com" -consistent!(rtag_1, r"^http://www\.emusic\.com"); - -// rtag-0.3.5: r"^[A-Z][A-Z0-9]{2,}" -consistent!(rtag_2, r"^[A-Z][A-Z0-9]{2,}"); - -// rtag-0.3.5: r"(^[\x{0}|\x{feff}|\x{fffe}]*|[\x{0}|\x{feff}|\x{fffe}]*$)" -consistent!( - rtag_3, - r"(^[\x{0}|\x{feff}|\x{fffe}]*|[\x{0}|\x{feff}|\x{fffe}]*$)" -); - -// rtow-0.1.0: r"(\d+)[xX](\d+)" -consistent!(rtow_0, r"(\d+)[xX](\d+)"); - -// pleingres-sql-plugin-0.1.0: r"\$([a-zA-Z0-9_]+)" -consistent!(pleingres_sql_plugin_0, r"\$([a-zA-Z0-9_]+)"); - -// dono-2.0.0: "[\\n]+" -consistent!(dono_0, "[\\n]+"); - -// dono-2.0.0: "(?m)^\\n" -consistent!(dono_1, "(?m)^\\n"); - -// dono-2.0.0: "(?m)^\\n" -consistent!(dono_2, "(?m)^\\n"); - -// ssb-common-0.3.0: r"^[0-9A-Za-z\+/]{43}=\.ed25519$" -consistent!(ssb_common_0, r"^[0-9A-Za-z\+/]{43}=\.ed25519$"); - -// ssb-common-0.3.0: r"^[0-9A-Za-z\+/]{86}==\.ed25519$" -consistent!(ssb_common_1, r"^[0-9A-Za-z\+/]{86}==\.ed25519$"); - -// ssb-common-0.3.0: r"^[0-9A-Za-z\+/]{43}=\.sha256$" -consistent!(ssb_common_2, r"^[0-9A-Za-z\+/]{43}=\.sha256$"); - -// mozversion-0.1.3: r"^(?P<major>\d+)\.(?P<minor>\d+)(?:\.(?P<patch>\d+))?(?:(?P<pre0>[a-z]+)(?P<pre1>\d*))?$" -consistent!(mozversion_0, r"^(?P<major>\d+)\.(?P<minor>\d+)(?:\.(?P<patch>\d+))?(?:(?P<pre0>[a-z]+)(?P<pre1>\d*))?$"); - -// monger-0.5.6: r"^(\d+)\.(\d+)$" -consistent!(monger_0, r"^(\d+)\.(\d+)$"); - -// mongo_rub-0.0.2: r"^[rv]2\.6" -consistent!(mongo_rub_0, r"^[rv]2\.6"); - -// flow-0.3.5: "body value" -consistent!(flow_0, "body value"); - -// flow-0.3.5: "start marker" -consistent!(flow_1, "start marker"); - -// flow-0.3.5: "end marker" -consistent!(flow_2, "end marker"); - -// flow-0.3.5: "body value" -consistent!(flow_3, "body value"); - -// vobsub-0.2.3: "^([A-Za-z/ ]+): (.*)" -consistent!(vobsub_0, "^([A-Za-z/ ]+): (.*)"); - -// voidmap-1.1.2: r"#([^\s=]+)*" -consistent!(voidmap_0, r"#([^\s=]+)*"); - -// voidmap-1.1.2: r"#(\S+)*" -consistent!(voidmap_1, r"#(\S+)*"); - -// voidmap-1.1.2: r"#prio=(\d+)" -consistent!(voidmap_2, r"#prio=(\d+)"); - -// voidmap-1.1.2: r"\[(\S+)\]" -consistent!(voidmap_3, r"\[(\S+)\]"); - -// voidmap-1.1.2: r"#limit=(\d+)" -consistent!(voidmap_4, r"#limit=(\d+)"); - -// voidmap-1.1.2: r"#tagged=(\S+)" -consistent!(voidmap_5, r"#tagged=(\S+)"); - -// voidmap-1.1.2: r"#rev\b" -consistent!(voidmap_6, r"#rev\b"); - -// voidmap-1.1.2: r"#done\b" -consistent!(voidmap_7, r"#done\b"); - -// voidmap-1.1.2: r"#open\b" -consistent!(voidmap_8, r"#open\b"); - -// voidmap-1.1.2: r"#since=(\S+)" -consistent!(voidmap_9, r"#since=(\S+)"); - -// voidmap-1.1.2: r"#until=(\S+)" -consistent!(voidmap_10, r"#until=(\S+)"); - -// voidmap-1.1.2: r"#plot=(\S+)" -consistent!(voidmap_11, r"#plot=(\S+)"); - -// voidmap-1.1.2: r"#n=(\d+)" -consistent!(voidmap_12, r"#n=(\d+)"); - -// voidmap-1.1.2: r"(\S+)" -consistent!(voidmap_13, r"(\S+)"); - -// voidmap-1.1.2: r"(?P<y>\d+)y" -consistent!(voidmap_14, r"(?P<y>\d+)y"); - -// voidmap-1.1.2: r"(?P<m>\d+)m" -consistent!(voidmap_15, r"(?P<m>\d+)m"); - -// voidmap-1.1.2: r"(?P<w>\d+)w" -consistent!(voidmap_16, r"(?P<w>\d+)w"); - -// voidmap-1.1.2: r"(?P<d>\d+)d" -consistent!(voidmap_17, r"(?P<d>\d+)d"); - -// voidmap-1.1.2: r"(?P<h>\d+)h" -consistent!(voidmap_18, r"(?P<h>\d+)h"); - -// voidmap-1.1.2: r"C-(.)" -consistent!(voidmap_19, r"C-(.)"); - -// qt_generator-0.2.0: r"^\.\./qt[^/]+/" -consistent!(qt_generator_0, r"^\.\./qt[^/]+/"); - -// qt_generator-0.2.0: "(href|src)=\"([^\"]*)\"" -consistent!(qt_generator_1, "(href|src)=\"([^\"]*)\""); - -// kryptos-0.6.1: r"[01]{5}" -consistent!(kryptos_0, r"[01]{5}"); - -// cifar_10_loader-0.2.0: "data_batch_[1-5].bin" -consistent!(cifar_10_loader_0, "data_batch_[1-5].bin"); - -// cifar_10_loader-0.2.0: "test_batch.bin" -consistent!(cifar_10_loader_1, "test_batch.bin"); - -// circadian-0.6.0: r"^\d+.\d+s$" -consistent!(circadian_0, r"^\d+.\d+s$"); - -// circadian-0.6.0: r"^\d+:\d+$" -consistent!(circadian_1, r"^\d+:\d+$"); - -// circadian-0.6.0: r"^\d+:\d+m$" -consistent!(circadian_2, r"^\d+:\d+m$"); - -// cicada-0.8.1: r"!!" -consistent!(cicada_0, r"!!"); - -// cicada-0.8.1: r"^([^`]*)`([^`]+)`(.*)$" -consistent!(cicada_1, r"^([^`]*)`([^`]+)`(.*)$"); - -// cicada-0.8.1: r"\*+" -consistent!(cicada_2, r"\*+"); - -// cicada-0.8.1: r"([^\$]*)\$\{?([A-Za-z0-9\?\$_]+)\}?(.*)" -consistent!(cicada_3, r"([^\$]*)\$\{?([A-Za-z0-9\?\$_]+)\}?(.*)"); - -// cicada-0.8.1: r"^ *alias +([a-zA-Z0-9_\.-]+)=(.*)$" -consistent!(cicada_4, r"^ *alias +([a-zA-Z0-9_\.-]+)=(.*)$"); - -// vterm-sys-0.1.0: r"hi" -consistent!(vterm_sys_0, r"hi"); - -// skim-0.5.0: r".*?\t" -consistent!(skim_0, r".*?\t"); - -// skim-0.5.0: r".*?[\t ]" -consistent!(skim_1, r".*?[\t ]"); - -// skim-0.5.0: r"(\{-?[0-9.,q]*?})" -consistent!(skim_2, r"(\{-?[0-9.,q]*?})"); - -// skim-0.5.0: r"[ \t\n]+" -consistent!(skim_3, r"[ \t\n]+"); - -// skim-0.5.0: r"[ \t\n]+" -consistent!(skim_4, r"[ \t\n]+"); - -// skim-0.5.0: r"([^ |]+( +\| +[^ |]*)+)|( +)" -consistent!(skim_5, r"([^ |]+( +\| +[^ |]*)+)|( +)"); - -// skim-0.5.0: r" +\| +" -consistent!(skim_6, r" +\| +"); - -// skim-0.5.0: r"^(?P<left>-?\d+)?(?P<sep>\.\.)?(?P<right>-?\d+)?$" -consistent!(skim_7, r"^(?P<left>-?\d+)?(?P<sep>\.\.)?(?P<right>-?\d+)?$"); - -// skim-0.5.0: "," -consistent!(skim_8, ","); - -// skim-0.5.0: ".*?," -consistent!(skim_9, ".*?,"); - -// skim-0.5.0: ".*?," -consistent!(skim_10, ".*?,"); - -// skim-0.5.0: "," -consistent!(skim_11, ","); - -// skim-0.5.0: r"\x1B\[(?:([0-9]+;[0-9]+[Hf])|([0-9]+[ABCD])|(s|u|2J|K)|([0-9;]*m)|(=[0-9]+[hI]))" -consistent!(skim_12, r"\x1B\[(?:([0-9]+;[0-9]+[Hf])|([0-9]+[ABCD])|(s|u|2J|K)|([0-9;]*m)|(=[0-9]+[hI]))"); - -// egg-mode-text-1.14.7: r"[-_./]\z" -consistent!(egg_mode_text_0, r"[-_./]\z"); - -// java-properties-1.1.1: "^[ \t\r\n\x0c]*[#!]" -consistent!(java_properties_0, "^[ \t\r\n\x0c]*[#!]"); - -// java-properties-1.1.1: r"^[ \t\x0c]*[#!][^\r\n]*$" -consistent!(java_properties_1, r"^[ \t\x0c]*[#!][^\r\n]*$"); - -// java-properties-1.1.1: r"^([ \t\x0c]*[:=][ \t\x0c]*|[ \t\x0c]+)$" -consistent!(java_properties_2, r"^([ \t\x0c]*[:=][ \t\x0c]*|[ \t\x0c]+)$"); - -// ipaddress-0.1.2: r":.+\." -consistent!(ipaddress_0, r":.+\."); - -// ipaddress-0.1.2: r"\." -consistent!(ipaddress_1, r"\."); - -// ipaddress-0.1.2: r":" -consistent!(ipaddress_2, r":"); - -// iptables-0.2.2: r"v(\d+)\.(\d+)\.(\d+)" -consistent!(iptables_0, r"v(\d+)\.(\d+)\.(\d+)"); - -// rsure-0.8.1: r"^([^-]+)-(.*)\.dat\.gz$" -consistent!(rsure_0, r"^([^-]+)-(.*)\.dat\.gz$"); - -// rs-jsonpath-0.1.0: "^(.*?)(<=|<|==|>=|>)(.*?)$" -consistent!(rs_jsonpath_0, "^(.*?)(<=|<|==|>=|>)(.*?)$"); - -// oatie-0.3.0: r"(\n|^)(\w+):([\n\w\W]+?)(\n(?:\w)|(\n\]))" -consistent!(oatie_0, r"(\n|^)(\w+):([\n\w\W]+?)(\n(?:\w)|(\n\]))"); - -// weld-0.2.0: "#.*$" -consistent!(weld_0, "#.*$"); - -// weld-0.2.0: r"^[A-Za-z$_][A-Za-z0-9$_]*$" -consistent!(weld_1, r"^[A-Za-z$_][A-Za-z0-9$_]*$"); - -// weld-0.2.0: r"^[0-9]+[cC]$" -consistent!(weld_2, r"^[0-9]+[cC]$"); - -// weld-0.2.0: r"^0b[0-1]+[cC]$" -consistent!(weld_3, r"^0b[0-1]+[cC]$"); - -// weld-0.2.0: r"^0x[0-9a-fA-F]+[cC]$" -consistent!(weld_4, r"^0x[0-9a-fA-F]+[cC]$"); - -// weld-0.2.0: r"^[0-9]+$" -consistent!(weld_5, r"^[0-9]+$"); - -// weld-0.2.0: r"^0b[0-1]+$" -consistent!(weld_6, r"^0b[0-1]+$"); - -// weld-0.2.0: r"^0x[0-9a-fA-F]+$" -consistent!(weld_7, r"^0x[0-9a-fA-F]+$"); - -// weld-0.2.0: r"^[0-9]+[lL]$" -consistent!(weld_8, r"^[0-9]+[lL]$"); - -// weld-0.2.0: r"^0b[0-1]+[lL]$" -consistent!(weld_9, r"^0b[0-1]+[lL]$"); - -// weld-0.2.0: r"^0x[0-9a-fA-F]+[lL]$" -consistent!(weld_10, r"^0x[0-9a-fA-F]+[lL]$"); - -// webgl_generator-0.1.0: "([(, ])enum\\b" -consistent!(webgl_generator_0, "([(, ])enum\\b"); - -// webgl_generator-0.1.0: "\\bAcquireResourcesCallback\\b" -consistent!(webgl_generator_1, "\\bAcquireResourcesCallback\\b"); - -// weave-0.2.0: r"^(\d+)(,(\d+))?([acd]).*$" -consistent!(weave_0, r"^(\d+)(,(\d+))?([acd]).*$"); - -// wemo-0.0.12: r"<BinaryState>(\d)(\|-?\d+)*</BinaryState>" -consistent!(wemo_0, r"<BinaryState>(\d)(\|-?\d+)*</BinaryState>"); - -// webscale-0.9.4: r"(http[s]?://[^\s]+)" -consistent!(webscale_0, r"(http[s]?://[^\s]+)"); - -// svgrep-1.1.0: r"^\d+.*$" -consistent!(svgrep_0, r"^\d+.*$"); - -// ignore-0.4.2: r"^[\pL\pN]+$" -consistent!(ignore_0, r"^[\pL\pN]+$"); - -// ommui_string_patterns-0.1.2: r"^([A-Za-z][0-9A-Za-z_]*)?$" -consistent!(ommui_string_patterns_0, r"^([A-Za-z][0-9A-Za-z_]*)?$"); - -// ommui_string_patterns-0.1.2: r"^(\S+(?:.*\S)?)?$" -consistent!(ommui_string_patterns_1, r"^(\S+(?:.*\S)?)?$"); - -// opcua-types-0.3.0: "^(?P<min>[0-9]{1,10})(:(?P<max>[0-9]{1,10}))?$" -consistent!(opcua_types_0, "^(?P<min>[0-9]{1,10})(:(?P<max>[0-9]{1,10}))?$"); - -// opcua-types-0.3.0: r"^(ns=(?P<ns>[0-9]+);)?(?P<t>[isgb])=(?P<v>.+)$" -consistent!(opcua_types_1, r"^(ns=(?P<ns>[0-9]+);)?(?P<t>[isgb])=(?P<v>.+)$"); - -// open_read_later-1.1.1: r"^(.+?)\s*:\s*(.+)$" -consistent!(open_read_later_0, r"^(.+?)\s*:\s*(.+)$"); - -// youtube-downloader-0.1.0: r"^.*(?:(?:youtu\.be/|v/|vi/|u/w/|embed/)|(?:(?:watch)?\?v(?:i)?=|\&v(?:i)?=))([^#\&\?]*).*" -consistent!(youtube_downloader_0, r"^.*(?:(?:youtu\.be/|v/|vi/|u/w/|embed/)|(?:(?:watch)?\?v(?:i)?=|\&v(?:i)?=))([^#\&\?]*).*"); - -// yobot-0.1.1: "." -consistent!(yobot_0, "."); - -// yobot-0.1.1: r"." -consistent!(yobot_1, r"."); - -// yobot-0.1.1: r".+" -consistent!(yobot_2, r".+"); - -// yobot-0.1.1: r"." -consistent!(yobot_3, r"."); - -// ubiquity-0.1.5: r"foo" -consistent!(ubiquity_0, r"foo"); - -// ubiquity-0.1.5: r"/target/" -consistent!(ubiquity_1, r"/target/"); - -// ubiquity-0.1.5: r".DS_Store" -consistent!(ubiquity_2, r".DS_Store"); - -// qasm-1.0.0: r"//.*" -consistent!(qasm_0, r"//.*"); - -// drill-0.3.5: r"\{\{ *([a-z\._]+) *\}\}" -consistent!(drill_0, r"\{\{ *([a-z\._]+) *\}\}"); - -// queryst-2.0.0: r"^([^\]\[]+)" -consistent!(queryst_0, r"^([^\]\[]+)"); - -// queryst-2.0.0: r"(\[[^\]\[]*\])" -consistent!(queryst_1, r"(\[[^\]\[]*\])"); - -// qui-vive-0.1.0: r"^/(\w+)$" -consistent!(qui_vive_0, r"^/(\w+)$"); - -// qui-vive-0.1.0: r"^/key$" -consistent!(qui_vive_1, r"^/key$"); - -// qui-vive-0.1.0: r"^/key/(\w+)$" -consistent!(qui_vive_2, r"^/key/(\w+)$"); - -// qui-vive-0.1.0: r"^/url$" -consistent!(qui_vive_3, r"^/url$"); - -// qui-vive-0.1.0: r"^/url/(\w+)$" -consistent!(qui_vive_4, r"^/url/(\w+)$"); - -// qui-vive-0.1.0: r"^/inv$" -consistent!(qui_vive_5, r"^/inv$"); - -// qui-vive-0.1.0: r"^/inv/(\w+)$" -consistent!(qui_vive_6, r"^/inv/(\w+)$"); - -// subdiff-0.1.0: r"\b" -// consistent!(subdiff_0, r"\b"); - -// substudy-0.4.5: r"^(\d+)/(\d+)$" -consistent!(substudy_0, r"^(\d+)/(\d+)$"); - -// substudy-0.4.5: r"\s+" -consistent!(substudy_1, r"\s+"); - -// substudy-0.4.5: r"<[a-z/][^>]*>" -consistent!(substudy_2, r"<[a-z/][^>]*>"); - -// substudy-0.4.5: r"(\([^)]*\)|♪[^♪]*♪|[A-Z]{2,} ?:)" -consistent!(substudy_3, r"(\([^)]*\)|♪[^♪]*♪|[A-Z]{2,} ?:)"); - -// substudy-0.4.5: r"\s+" -consistent!(substudy_4, r"\s+"); - -// isbnid-0.1.3: r"^(\d(-| )?){9}(x|X|\d|(\d(-| )?){3}\d)$" -consistent!(isbnid_0, r"^(\d(-| )?){9}(x|X|\d|(\d(-| )?){3}\d)$"); - -// isbnid-0.1.3: r"[^0-9X]" -consistent!(isbnid_1, r"[^0-9X]"); - -// ispc-0.3.5: r"Intel\(r\) SPMD Program Compiler \(ispc\), (\d+\.\d+\.\d+)" -consistent!( - ispc_0, - r"Intel\(r\) SPMD Program Compiler \(ispc\), (\d+\.\d+\.\d+)" -); diff --git a/tests/fowler.rs b/tests/fowler.rs index 7f56a758d3..9fa4a3e910 100644 --- a/tests/fowler.rs +++ b/tests/fowler.rs @@ -305,20 +305,27 @@ mat!( ); mat!(match_basic_125, r"a+b+c", r"aabbabc", Some((4, 7))); mat!(match_basic_126, r"a*", r"aaa", Some((0, 3))); -mat!(match_basic_128, r"(a*)*", r"-", Some((0, 0)), None); +mat!(match_basic_128, r"(a*)*", r"-", Some((0, 0)), Some((0, 0))); mat!(match_basic_129, r"(a*)+", r"-", Some((0, 0)), Some((0, 0))); -mat!(match_basic_131, r"(a*|b)*", r"-", Some((0, 0)), None); +mat!(match_basic_131, r"(a*|b)*", r"-", Some((0, 0)), Some((0, 0))); mat!(match_basic_132, r"(a+|b)*", r"ab", Some((0, 2)), Some((1, 2))); mat!(match_basic_133, r"(a+|b)+", r"ab", Some((0, 2)), Some((1, 2))); mat!(match_basic_134, r"(a+|b)?", r"ab", Some((0, 1)), Some((0, 1))); mat!(match_basic_135, r"[^ab]*", r"cde", Some((0, 3))); -mat!(match_basic_137, r"(^)*", r"-", Some((0, 0)), None); +mat!(match_basic_137, r"(^)*", r"-", Some((0, 0)), Some((0, 0))); mat!(match_basic_138, r"a*", r"", Some((0, 0))); mat!(match_basic_139, r"([abc])*d", r"abbbcd", Some((0, 6)), Some((4, 5))); mat!(match_basic_140, r"([abc])*bcd", r"abcd", Some((0, 4)), Some((0, 1))); mat!(match_basic_141, r"a|b|c|d|e", r"e", Some((0, 1))); mat!(match_basic_142, r"(a|b|c|d|e)f", r"ef", Some((0, 2)), Some((0, 1))); -mat!(match_basic_144, r"((a*|b))*", r"-", Some((0, 0)), None, None); +mat!( + match_basic_144, + r"((a*|b))*", + r"-", + Some((0, 0)), + Some((0, 0)), + Some((0, 0)) +); mat!(match_basic_145, r"abcd*efg", r"abcdefg", Some((0, 7))); mat!(match_basic_146, r"ab*", r"xabyabbbz", Some((1, 3))); mat!(match_basic_147, r"ab*", r"xayabbbz", Some((1, 2))); @@ -782,7 +789,7 @@ mat!(match_basic_221, r"\\000", r"\000", Some((0, 4))); // Tests from nullsubexpr.dat mat!(match_nullsubexpr_3, r"(a*)*", r"a", Some((0, 1)), Some((0, 1))); -mat!(match_nullsubexpr_5, r"(a*)*", r"x", Some((0, 0)), None); +mat!(match_nullsubexpr_5, r"(a*)*", r"x", Some((0, 0)), Some((0, 0))); mat!(match_nullsubexpr_6, r"(a*)*", r"aaaaaa", Some((0, 6)), Some((0, 6))); mat!(match_nullsubexpr_7, r"(a*)*", r"aaaaaax", Some((0, 6)), Some((0, 6))); mat!(match_nullsubexpr_8, r"(a*)+", r"a", Some((0, 1)), Some((0, 1))); @@ -798,7 +805,7 @@ mat!(match_nullsubexpr_17, r"(a+)+", r"x", None); mat!(match_nullsubexpr_18, r"(a+)+", r"aaaaaa", Some((0, 6)), Some((0, 6))); mat!(match_nullsubexpr_19, r"(a+)+", r"aaaaaax", Some((0, 6)), Some((0, 6))); mat!(match_nullsubexpr_21, r"([a]*)*", r"a", Some((0, 1)), Some((0, 1))); -mat!(match_nullsubexpr_23, r"([a]*)*", r"x", Some((0, 0)), None); +mat!(match_nullsubexpr_23, r"([a]*)*", r"x", Some((0, 0)), Some((0, 0))); mat!(match_nullsubexpr_24, r"([a]*)*", r"aaaaaa", Some((0, 6)), Some((0, 6))); mat!(match_nullsubexpr_25, r"([a]*)*", r"aaaaaax", Some((0, 6)), Some((0, 6))); mat!(match_nullsubexpr_26, r"([a]*)+", r"a", Some((0, 1)), Some((0, 1))); @@ -806,7 +813,7 @@ mat!(match_nullsubexpr_27, r"([a]*)+", r"x", Some((0, 0)), Some((0, 0))); mat!(match_nullsubexpr_28, r"([a]*)+", r"aaaaaa", Some((0, 6)), Some((0, 6))); mat!(match_nullsubexpr_29, r"([a]*)+", r"aaaaaax", Some((0, 6)), Some((0, 6))); mat!(match_nullsubexpr_30, r"([^b]*)*", r"a", Some((0, 1)), Some((0, 1))); -mat!(match_nullsubexpr_32, r"([^b]*)*", r"b", Some((0, 0)), None); +mat!(match_nullsubexpr_32, r"([^b]*)*", r"b", Some((0, 0)), Some((0, 0))); mat!(match_nullsubexpr_33, r"([^b]*)*", r"aaaaaa", Some((0, 6)), Some((0, 6))); mat!( match_nullsubexpr_34, @@ -830,7 +837,7 @@ mat!( ); mat!(match_nullsubexpr_42, r"([^a]*)*", r"b", Some((0, 1)), Some((0, 1))); mat!(match_nullsubexpr_43, r"([^a]*)*", r"bbbbbb", Some((0, 6)), Some((0, 6))); -mat!(match_nullsubexpr_45, r"([^a]*)*", r"aaaaaa", Some((0, 0)), None); +mat!(match_nullsubexpr_45, r"([^a]*)*", r"aaaaaa", Some((0, 0)), Some((0, 0))); mat!( match_nullsubexpr_46, r"([^ab]*)*", @@ -838,7 +845,13 @@ mat!( Some((0, 6)), Some((0, 6)) ); -mat!(match_nullsubexpr_48, r"([^ab]*)*", r"ababab", Some((0, 0)), None); +mat!( + match_nullsubexpr_48, + r"([^ab]*)*", + r"ababab", + Some((0, 0)), + Some((0, 0)) +); mat!( match_nullsubexpr_50, r"((z)+|a)*", @@ -851,7 +864,7 @@ mat!( r"(a*)*(x)", r"x", Some((0, 1)), - None, + Some((0, 0)), Some((0, 1)) ); mat!( diff --git a/tests/multiline.rs b/tests/multiline.rs index 62ee47b62b..dbce622ec8 100644 --- a/tests/multiline.rs +++ b/tests/multiline.rs @@ -53,8 +53,10 @@ matiter!( match_multi_rep_3, r"(?m)(?:^|a)*", "a\naaa\n", - (0, 1), - (2, 5), + (0, 0), + (1, 1), + (2, 2), + (3, 5), (6, 6) ); matiter!( diff --git a/tests/regression_fuzz.rs b/tests/regression_fuzz.rs index 5f49530a72..31e67c78bb 100644 --- a/tests/regression_fuzz.rs +++ b/tests/regression_fuzz.rs @@ -14,8 +14,9 @@ fn fuzz1() { // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26505 // See: https://github.com/rust-lang/regex/issues/722 #[test] +#[cfg(feature = "unicode")] fn empty_any_errors_no_panic() { - assert!(regex_new!(r"\P{any}").is_err()); + assert!(regex_new!(r"\P{any}").is_ok()); } // This tests that a very large regex errors during compilation instead of diff --git a/tests/test_backtrack.rs b/tests/test_backtrack.rs deleted file mode 100644 index fb934e2d8f..0000000000 --- a/tests/test_backtrack.rs +++ /dev/null @@ -1,56 +0,0 @@ -#![cfg_attr(feature = "pattern", feature(pattern))] - -macro_rules! regex_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new($re) - .bounded_backtracking() - .build() - .map(|e| e.into_regex()) - }}; -} - -macro_rules! regex { - ($re:expr) => { - regex_new!($re).unwrap() - }; -} - -macro_rules! regex_set_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new_many($re) - .bounded_backtracking() - .build() - .map(|e| e.into_regex_set()) - }}; -} - -macro_rules! regex_set { - ($res:expr) => { - regex_set_new!($res).unwrap() - }; -} - -// Must come before other module definitions. -include!("macros_str.rs"); -include!("macros.rs"); - -mod api; -mod api_str; -mod crazy; -mod flags; -mod fowler; -mod multiline; -mod noparse; -mod regression; -mod replace; -mod searcher; -mod set; -mod suffix_reverse; -#[cfg(feature = "unicode")] -mod unicode; -#[cfg(feature = "unicode-perl")] -mod word_boundary; -#[cfg(feature = "unicode-perl")] -mod word_boundary_unicode; diff --git a/tests/test_backtrack_bytes.rs b/tests/test_backtrack_bytes.rs deleted file mode 100644 index a59426c949..0000000000 --- a/tests/test_backtrack_bytes.rs +++ /dev/null @@ -1,55 +0,0 @@ -macro_rules! regex_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new($re) - .bounded_backtracking() - .only_utf8(false) - .build() - .map(|e| e.into_byte_regex()) - }}; -} - -macro_rules! regex { - ($re:expr) => { - regex_new!($re).unwrap() - }; -} - -macro_rules! regex_set_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new_many($re) - .bounded_backtracking() - .only_utf8(false) - .build() - .map(|e| e.into_byte_regex_set()) - }}; -} - -macro_rules! regex_set { - ($res:expr) => { - regex_set_new!($res).unwrap() - }; -} - -// Must come before other module definitions. -include!("macros_bytes.rs"); -include!("macros.rs"); - -mod api; -mod bytes; -mod crazy; -mod flags; -mod fowler; -mod multiline; -mod noparse; -mod regression; -mod replace; -mod set; -mod suffix_reverse; -#[cfg(feature = "unicode")] -mod unicode; -#[cfg(feature = "unicode-perl")] -mod word_boundary; -#[cfg(feature = "unicode-perl")] -mod word_boundary_ascii; diff --git a/tests/test_backtrack_utf8bytes.rs b/tests/test_backtrack_utf8bytes.rs deleted file mode 100644 index 6d308e9e1c..0000000000 --- a/tests/test_backtrack_utf8bytes.rs +++ /dev/null @@ -1,58 +0,0 @@ -#![cfg_attr(feature = "pattern", feature(pattern))] - -macro_rules! regex_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new($re) - .bounded_backtracking() - .bytes(true) - .build() - .map(|e| e.into_regex()) - }}; -} - -macro_rules! regex { - ($re:expr) => { - regex_new!($re).unwrap() - }; -} - -macro_rules! regex_set_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new_many($re) - .bounded_backtracking() - .bytes(true) - .build() - .map(|e| e.into_regex_set()) - }}; -} - -macro_rules! regex_set { - ($res:expr) => { - regex_set_new!($res).unwrap() - }; -} - -// Must come before other module definitions. -include!("macros_str.rs"); -include!("macros.rs"); - -mod api; -mod api_str; -mod crazy; -mod flags; -mod fowler; -mod multiline; -mod noparse; -mod regression; -mod replace; -mod searcher; -mod set; -mod suffix_reverse; -#[cfg(feature = "unicode")] -mod unicode; -#[cfg(feature = "unicode-perl")] -mod word_boundary; -#[cfg(feature = "unicode-perl")] -mod word_boundary_unicode; diff --git a/tests/test_crates_regex.rs b/tests/test_crates_regex.rs deleted file mode 100644 index a681604727..0000000000 --- a/tests/test_crates_regex.rs +++ /dev/null @@ -1,54 +0,0 @@ -/* - * This test is a minimal version of <rofl_0> and <subdiff_0> - * - * Once this bug gets fixed, uncomment rofl_0 and subdiff_0 - * (in `tests/crates_regex.rs`). -#[test] -fn word_boundary_backtracking_default_mismatch() { - use regex::internal::ExecBuilder; - - let backtrack_re = ExecBuilder::new(r"\b") - .bounded_backtracking() - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err)) - .unwrap(); - - let default_re = ExecBuilder::new(r"\b") - .build() - .map(|exec| exec.into_regex()) - .map_err(|err| format!("{}", err)) - .unwrap(); - - let input = "䅅\\u{a0}"; - - let fi1 = backtrack_re.find_iter(input); - let fi2 = default_re.find_iter(input); - for (m1, m2) in fi1.zip(fi2) { - assert_eq!(m1, m2); - } -} -*/ - -mod consistent; - -mod crates_regex { - - macro_rules! consistent { - ($test_name:ident, $regex_src:expr) => { - #[test] - fn $test_name() { - use super::consistent::backends_are_consistent; - - if option_env!("RUST_REGEX_RANDOM_TEST").is_some() { - match backends_are_consistent($regex_src) { - Ok(_) => {} - Err(err) => panic!("{}", err), - } - } - } - }; - } - - include!("crates_regex.rs"); -} diff --git a/tests/test_default.rs b/tests/test_default.rs index 19a319af11..5a455efb61 100644 --- a/tests/test_default.rs +++ b/tests/test_default.rs @@ -145,10 +145,10 @@ fn regex_is_reasonably_small() { use regex::bytes; use regex::{Regex, RegexSet}; - assert_eq!(16, size_of::<Regex>()); - assert_eq!(16, size_of::<RegexSet>()); - assert_eq!(16, size_of::<bytes::Regex>()); - assert_eq!(16, size_of::<bytes::RegexSet>()); + assert_eq!(32, size_of::<Regex>()); + assert_eq!(32, size_of::<RegexSet>()); + assert_eq!(32, size_of::<bytes::Regex>()); + assert_eq!(32, size_of::<bytes::RegexSet>()); } // See: https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8 diff --git a/tests/test_nfa.rs b/tests/test_nfa.rs deleted file mode 100644 index e5a67d180a..0000000000 --- a/tests/test_nfa.rs +++ /dev/null @@ -1,50 +0,0 @@ -#![cfg_attr(feature = "pattern", feature(pattern))] - -macro_rules! regex_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new($re).nfa().build().map(|e| e.into_regex()) - }}; -} - -macro_rules! regex { - ($re:expr) => { - regex_new!($re).unwrap() - }; -} - -macro_rules! regex_set_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new_many($re).nfa().build().map(|e| e.into_regex_set()) - }}; -} - -macro_rules! regex_set { - ($res:expr) => { - regex_set_new!($res).unwrap() - }; -} - -// Must come before other module definitions. -include!("macros_str.rs"); -include!("macros.rs"); - -mod api; -mod api_str; -mod crazy; -mod flags; -mod fowler; -mod multiline; -mod noparse; -mod regression; -mod replace; -mod searcher; -mod set; -mod suffix_reverse; -#[cfg(feature = "unicode")] -mod unicode; -#[cfg(feature = "unicode-perl")] -mod word_boundary; -#[cfg(feature = "unicode-perl")] -mod word_boundary_unicode; diff --git a/tests/test_nfa_bytes.rs b/tests/test_nfa_bytes.rs deleted file mode 100644 index 0a10e032a2..0000000000 --- a/tests/test_nfa_bytes.rs +++ /dev/null @@ -1,55 +0,0 @@ -macro_rules! regex_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new($re) - .nfa() - .only_utf8(false) - .build() - .map(|e| e.into_byte_regex()) - }}; -} - -macro_rules! regex { - ($re:expr) => { - regex_new!($re).unwrap() - }; -} - -macro_rules! regex_set_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new_many($re) - .nfa() - .only_utf8(false) - .build() - .map(|e| e.into_byte_regex_set()) - }}; -} - -macro_rules! regex_set { - ($res:expr) => { - regex_set_new!($res).unwrap() - }; -} - -// Must come before other module definitions. -include!("macros_bytes.rs"); -include!("macros.rs"); - -mod api; -mod bytes; -mod crazy; -mod flags; -mod fowler; -mod multiline; -mod noparse; -mod regression; -mod replace; -mod set; -mod suffix_reverse; -#[cfg(feature = "unicode")] -mod unicode; -#[cfg(feature = "unicode-perl")] -mod word_boundary; -#[cfg(feature = "unicode-perl")] -mod word_boundary_unicode; diff --git a/tests/test_nfa_utf8bytes.rs b/tests/test_nfa_utf8bytes.rs deleted file mode 100644 index 36a572b5fc..0000000000 --- a/tests/test_nfa_utf8bytes.rs +++ /dev/null @@ -1,54 +0,0 @@ -#![cfg_attr(feature = "pattern", feature(pattern))] - -macro_rules! regex_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new($re).nfa().bytes(true).build().map(|e| e.into_regex()) - }}; -} - -macro_rules! regex { - ($re:expr) => { - regex_new!($re).unwrap() - }; -} - -macro_rules! regex_set_new { - ($re:expr) => {{ - use regex::internal::ExecBuilder; - ExecBuilder::new_many($re) - .nfa() - .bytes(true) - .build() - .map(|e| e.into_regex_set()) - }}; -} - -macro_rules! regex_set { - ($res:expr) => { - regex_set_new!($res).unwrap() - }; -} - -// Must come before other module definitions. -include!("macros_str.rs"); -include!("macros.rs"); - -mod api; -mod api_str; -mod crazy; -mod flags; -mod fowler; -mod multiline; -mod noparse; -mod regression; -mod replace; -mod searcher; -mod set; -mod suffix_reverse; -#[cfg(feature = "unicode")] -mod unicode; -#[cfg(feature = "unicode-perl")] -mod word_boundary; -#[cfg(feature = "unicode-perl")] -mod word_boundary_unicode;