From 8e4708dfdfb09cdb4a8d07d31c3ed5637e423dfa Mon Sep 17 00:00:00 2001 From: Ed Page Date: Tue, 23 Jul 2019 20:39:09 -0600 Subject: [PATCH 1/8] refactor(parser): Split out into struct --- benches/tokenize.rs | 26 ++++++---- src/lib.rs | 5 +- src/tokens.rs | 118 ++++++++++++++++++++++---------------------- 3 files changed, 77 insertions(+), 72 deletions(-) diff --git a/benches/tokenize.rs b/benches/tokenize.rs index 75b1b9c..dea743a 100644 --- a/benches/tokenize.rs +++ b/benches/tokenize.rs @@ -6,60 +6,66 @@ mod data; #[bench] fn symbol_parse_empty(b: &mut test::Bencher) { - b.iter(|| typos::tokens::Identifier::parse_bytes(data::EMPTY.as_bytes()).last()); + let parser = typos::tokens::Parser::new(); + b.iter(|| parser.parse_bytes(data::EMPTY.as_bytes()).last()); } #[bench] fn symbol_parse_no_tokens(b: &mut test::Bencher) { - b.iter(|| typos::tokens::Identifier::parse_bytes(data::NO_TOKENS.as_bytes()).last()); + let parser = typos::tokens::Parser::new(); + b.iter(|| parser.parse_bytes(data::NO_TOKENS.as_bytes()).last()); } #[bench] fn symbol_parse_single_token(b: &mut test::Bencher) { + let parser = typos::tokens::Parser::new(); b.iter(|| { - typos::tokens::Identifier::parse_bytes(data::SINGLE_TOKEN.as_bytes()).last(); + parser.parse_bytes(data::SINGLE_TOKEN.as_bytes()).last(); }); } #[bench] fn symbol_parse_sherlock(b: &mut test::Bencher) { - b.iter(|| typos::tokens::Identifier::parse_bytes(data::SHERLOCK.as_bytes()).last()); + let parser = typos::tokens::Parser::new(); + b.iter(|| parser.parse_bytes(data::SHERLOCK.as_bytes()).last()); } #[bench] fn symbol_parse_code(b: &mut test::Bencher) { - b.iter(|| typos::tokens::Identifier::parse_bytes(data::CODE.as_bytes()).last()); + let parser = typos::tokens::Parser::new(); + b.iter(|| parser.parse_bytes(data::CODE.as_bytes()).last()); } #[bench] fn symbol_parse_corpus(b: &mut test::Bencher) { - b.iter(|| typos::tokens::Identifier::parse_bytes(data::CORPUS.as_bytes()).last()); + let parser = typos::tokens::Parser::new(); + b.iter(|| parser.parse_bytes(data::CORPUS.as_bytes()).last()); } #[bench] fn symbol_split_lowercase_short(b: &mut test::Bencher) { let input = "abcabcabcabc"; - let symbol = typos::tokens::Identifier::new(input, 0).unwrap(); + let symbol = typos::tokens::Identifier::new_unchecked(input, 0); b.iter(|| symbol.split().last()); } #[bench] fn symbol_split_lowercase_long(b: &mut test::Bencher) { let input = "abcabcabcabc".repeat(90); - let symbol = typos::tokens::Identifier::new(&input, 0).unwrap(); + let symbol = typos::tokens::Identifier::new_unchecked(&input, 0); b.iter(|| symbol.split().last()); } #[bench] fn symbol_split_mixed_short(b: &mut test::Bencher) { let input = "abcABCAbc123"; - let symbol = typos::tokens::Identifier::new(input, 0).unwrap(); + let symbol = typos::tokens::Identifier::new_unchecked(input, 0); b.iter(|| symbol.split().last()); } #[bench] fn symbol_split_mixed_long(b: &mut test::Bencher) { let input = "abcABCAbc123".repeat(90); - let symbol = typos::tokens::Identifier::new(&input, 0).unwrap(); + let symbol = typos::tokens::Identifier::new_unchecked(&input, 0); b.iter(|| symbol.split().last()); } diff --git a/src/lib.rs b/src/lib.rs index 7ef5ea5..cc0a65f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,11 +23,12 @@ pub fn process_file( binary: bool, report: report::Report, ) -> Result { + let parser = tokens::Parser::new(); let mut typos_found = false; if check_filenames { for part in path.components().filter_map(|c| c.as_os_str().to_str()) { - for ident in tokens::Identifier::parse(part) { + for ident in parser.parse(part) { if !ignore_hex && is_hex(ident.token()) { continue; } @@ -71,7 +72,7 @@ pub fn process_file( for (line_idx, line) in buffer.lines().enumerate() { let line_num = line_idx + 1; - for ident in tokens::Identifier::parse_bytes(line) { + for ident in parser.parse_bytes(line) { if !ignore_hex && is_hex(ident.token()) { continue; } diff --git a/src/tokens.rs b/src/tokens.rs index 2d8c09a..46ce93e 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -6,6 +6,37 @@ pub enum Case { None, } +#[derive(Debug, Clone)] +pub struct Parser { + words_str: regex::Regex, + words_bytes: regex::bytes::Regex, +} + +impl Parser { + pub fn new() -> Self { + let pattern = r#"\b(\p{Alphabetic}|\d|_|')+\b"#; + let words_str = regex::Regex::new(pattern).unwrap(); + let words_bytes = regex::bytes::Regex::new(pattern).unwrap(); + Self { + words_str, + words_bytes, + } + } + + pub fn parse<'c>(&'c self, content: &'c str) -> impl Iterator> { + self.words_str + .find_iter(content) + .map(|m| Identifier::new_unchecked(m.as_str(), m.start())) + } + + pub fn parse_bytes<'c>(&'c self, content: &'c [u8]) -> impl Iterator> { + self.words_bytes.find_iter(content).filter_map(|m| { + let s = std::str::from_utf8(m.as_bytes()).ok(); + s.map(|s| Identifier::new_unchecked(s, m.start())) + }) + } +} + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Identifier<'t> { token: &'t str, @@ -13,54 +44,10 @@ pub struct Identifier<'t> { } impl<'t> Identifier<'t> { - pub fn new(token: &'t str, offset: usize) -> Result { - let mut itr = Self::parse_bytes(token.as_bytes()); - let mut item = itr - .next() - .ok_or_else(|| failure::format_err!("Invalid ident (none found): {:?}", token))?; - if item.offset != 0 { - return Err(failure::format_err!( - "Invalid ident (padding found): {:?}", - token - )); - } - item.offset += offset; - if itr.next().is_some() { - return Err(failure::format_err!( - "Invalid ident (contains more than one): {:?}", - token - )); - } - Ok(item) - } - - pub(crate) fn new_unchecked(token: &'t str, offset: usize) -> Self { + pub fn new_unchecked(token: &'t str, offset: usize) -> Self { Self { token, offset } } - pub fn parse(content: &str) -> impl Iterator> { - lazy_static::lazy_static! { - // Getting false positives for this lint - #[allow(clippy::invalid_regex)] - static ref SPLIT: regex::Regex = regex::Regex::new(r#"\b(\p{Alphabetic}|\d|_|')+\b"#).unwrap(); - } - SPLIT - .find_iter(content) - .map(|m| Identifier::new_unchecked(m.as_str(), m.start())) - } - - pub fn parse_bytes(content: &[u8]) -> impl Iterator> { - lazy_static::lazy_static! { - // Getting false positives for this lint - #[allow(clippy::invalid_regex)] - static ref SPLIT: regex::bytes::Regex = regex::bytes::Regex::new(r#"\b(\p{Alphabetic}|\d|_|')+\b"#).unwrap(); - } - SPLIT.find_iter(content).filter_map(|m| { - let s = std::str::from_utf8(m.as_bytes()).ok(); - s.map(|s| Identifier::new_unchecked(s, m.start())) - }) - } - pub fn token(&self) -> &str { self.token } @@ -87,7 +74,6 @@ pub struct Word<'t> { impl<'t> Word<'t> { pub fn new(token: &'t str, offset: usize) -> Result { - Identifier::new(token, offset)?; let mut itr = split_ident(token, 0); let mut item = itr .next() @@ -108,7 +94,7 @@ impl<'t> Word<'t> { Ok(item) } - pub(crate) fn new_unchecked(token: &'t str, case: Case, offset: usize) -> Self { + pub fn new_unchecked(token: &'t str, case: Case, offset: usize) -> Self { Self { token, case, @@ -251,70 +237,82 @@ mod test { #[test] fn tokenize_empty_is_empty() { + let parser = Parser::new(); + let input = ""; let expected: Vec = vec![]; - let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect(); + let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect(); assert_eq!(expected, actual); - let actual: Vec<_> = Identifier::parse(input).collect(); + let actual: Vec<_> = parser.parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_word_is_word() { + let parser = Parser::new(); + let input = "word"; let expected: Vec = vec![Identifier::new_unchecked("word", 0)]; - let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect(); + let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect(); assert_eq!(expected, actual); - let actual: Vec<_> = Identifier::parse(input).collect(); + let actual: Vec<_> = parser.parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_space_separated_words() { + let parser = Parser::new(); + let input = "A B"; let expected: Vec = vec![ Identifier::new_unchecked("A", 0), Identifier::new_unchecked("B", 2), ]; - let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect(); + let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect(); assert_eq!(expected, actual); - let actual: Vec<_> = Identifier::parse(input).collect(); + let actual: Vec<_> = parser.parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_dot_separated_words() { + let parser = Parser::new(); + let input = "A.B"; let expected: Vec = vec![ Identifier::new_unchecked("A", 0), Identifier::new_unchecked("B", 2), ]; - let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect(); + let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect(); assert_eq!(expected, actual); - let actual: Vec<_> = Identifier::parse(input).collect(); + let actual: Vec<_> = parser.parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_namespace_separated_words() { + let parser = Parser::new(); + let input = "A::B"; let expected: Vec = vec![ Identifier::new_unchecked("A", 0), Identifier::new_unchecked("B", 3), ]; - let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect(); + let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect(); assert_eq!(expected, actual); - let actual: Vec<_> = Identifier::parse(input).collect(); + let actual: Vec<_> = parser.parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_underscore_doesnt_separate() { + let parser = Parser::new(); + let input = "A_B"; let expected: Vec = vec![Identifier::new_unchecked("A_B", 0)]; - let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect(); + let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect(); assert_eq!(expected, actual); - let actual: Vec<_> = Identifier::parse(input).collect(); + let actual: Vec<_> = parser.parse(input).collect(); assert_eq!(expected, actual); } @@ -375,7 +373,7 @@ mod test { ), ]; for (input, expected) in cases.iter() { - let ident = Identifier::new(input, 0).unwrap(); + let ident = Identifier::new_unchecked(input, 0); let result: Vec<_> = ident.split().map(|w| (w.token, w.case, w.offset)).collect(); assert_eq!(&result, expected); } From d0b9979c36861b7a7625e6ea2079fef32f4f2150 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Tue, 23 Jul 2019 20:42:20 -0600 Subject: [PATCH 2/8] refactor(parser): Split out parser creation --- src/tokens.rs | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/src/tokens.rs b/src/tokens.rs index 46ce93e..23d1d1d 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -6,6 +6,25 @@ pub enum Case { None, } +#[derive(Debug, Clone, Default)] +pub struct ParserBuilder {} + +impl ParserBuilder { + pub fn new() -> Self { + Default::default() + } + + pub fn build(self) -> Parser { + let pattern = r#"\b(\p{Alphabetic}|\d|_|')+\b"#; + let words_str = regex::Regex::new(pattern).unwrap(); + let words_bytes = regex::bytes::Regex::new(pattern).unwrap(); + Parser { + words_str, + words_bytes, + } + } +} + #[derive(Debug, Clone)] pub struct Parser { words_str: regex::Regex, @@ -14,13 +33,7 @@ pub struct Parser { impl Parser { pub fn new() -> Self { - let pattern = r#"\b(\p{Alphabetic}|\d|_|')+\b"#; - let words_str = regex::Regex::new(pattern).unwrap(); - let words_bytes = regex::bytes::Regex::new(pattern).unwrap(); - Self { - words_str, - words_bytes, - } + ParserBuilder::default().build() } pub fn parse<'c>(&'c self, content: &'c str) -> impl Iterator> { @@ -37,6 +50,12 @@ impl Parser { } } +impl Default for Parser { + fn default() -> Self { + Self::new() + } +} + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Identifier<'t> { token: &'t str, From 3cf9d8672c8fabe9b93f4453d4dbfa8cece8b1a3 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Wed, 24 Jul 2019 06:47:50 -0600 Subject: [PATCH 3/8] refactor(parser): Move hex handling to parser --- src/lib.rs | 17 +------------- src/tokens.rs | 65 +++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 61 insertions(+), 21 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index cc0a65f..f357182 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,15 +23,12 @@ pub fn process_file( binary: bool, report: report::Report, ) -> Result { - let parser = tokens::Parser::new(); + let parser = tokens::ParserBuilder::new().ignore_hex(ignore_hex).build(); let mut typos_found = false; if check_filenames { for part in path.components().filter_map(|c| c.as_os_str().to_str()) { for ident in parser.parse(part) { - if !ignore_hex && is_hex(ident.token()) { - continue; - } if let Some(correction) = dictionary.correct_ident(ident) { let msg = report::FilenameCorrection { path, @@ -73,9 +70,6 @@ pub fn process_file( for (line_idx, line) in buffer.lines().enumerate() { let line_num = line_idx + 1; for ident in parser.parse_bytes(line) { - if !ignore_hex && is_hex(ident.token()) { - continue; - } if let Some(correction) = dictionary.correct_ident(ident) { let col_num = ident.offset(); let msg = report::Correction { @@ -112,12 +106,3 @@ pub fn process_file( Ok(typos_found) } - -fn is_hex(ident: &str) -> bool { - lazy_static::lazy_static! { - // `_`: number literal separator in Rust and other languages - // `'`: number literal separator in C++ - static ref HEX: regex::Regex = regex::Regex::new(r#"^0[xX][0-9a-fA-F_']+$"#).unwrap(); - } - HEX.is_match(ident) -} diff --git a/src/tokens.rs b/src/tokens.rs index 23d1d1d..c0071ac 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -7,13 +7,20 @@ pub enum Case { } #[derive(Debug, Clone, Default)] -pub struct ParserBuilder {} +pub struct ParserBuilder { + ignore_hex: bool, +} impl ParserBuilder { pub fn new() -> Self { Default::default() } + pub fn ignore_hex(mut self, yes: bool) -> Self { + self.ignore_hex = yes; + self + } + pub fn build(self) -> Parser { let pattern = r#"\b(\p{Alphabetic}|\d|_|')+\b"#; let words_str = regex::Regex::new(pattern).unwrap(); @@ -21,6 +28,7 @@ impl ParserBuilder { Parser { words_str, words_bytes, + ignore_hex: self.ignore_hex, } } } @@ -29,6 +37,7 @@ impl ParserBuilder { pub struct Parser { words_str: regex::Regex, words_bytes: regex::bytes::Regex, + ignore_hex: bool, } impl Parser { @@ -37,16 +46,22 @@ impl Parser { } pub fn parse<'c>(&'c self, content: &'c str) -> impl Iterator> { + let ignore_hex = self.ignore_hex; self.words_str .find_iter(content) + .filter(move |m| !ignore_hex || !is_hex(m.as_str().as_bytes())) .map(|m| Identifier::new_unchecked(m.as_str(), m.start())) } pub fn parse_bytes<'c>(&'c self, content: &'c [u8]) -> impl Iterator> { - self.words_bytes.find_iter(content).filter_map(|m| { - let s = std::str::from_utf8(m.as_bytes()).ok(); - s.map(|s| Identifier::new_unchecked(s, m.start())) - }) + let ignore_hex = self.ignore_hex; + self.words_bytes + .find_iter(content) + .filter(move |m| !ignore_hex || !is_hex(m.as_bytes())) + .filter_map(|m| { + let s = std::str::from_utf8(m.as_bytes()).ok(); + s.map(|s| Identifier::new_unchecked(s, m.start())) + }) } } @@ -56,6 +71,15 @@ impl Default for Parser { } } +fn is_hex(ident: &[u8]) -> bool { + lazy_static::lazy_static! { + // `_`: number literal separator in Rust and other languages + // `'`: number literal separator in C++ + static ref HEX: regex::bytes::Regex = regex::bytes::Regex::new(r#"^0[xX][0-9a-fA-F_']+$"#).unwrap(); + } + HEX.is_match(ident) +} + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Identifier<'t> { token: &'t str, @@ -335,6 +359,37 @@ mod test { assert_eq!(expected, actual); } + #[test] + fn tokenize_ignore_hex_enabled() { + let parser = ParserBuilder::new().ignore_hex(true).build(); + + let input = "Hello 0xDEADBEEF World"; + let expected: Vec = vec![ + Identifier::new_unchecked("Hello", 0), + Identifier::new_unchecked("World", 17), + ]; + let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect(); + assert_eq!(expected, actual); + let actual: Vec<_> = parser.parse(input).collect(); + assert_eq!(expected, actual); + } + + #[test] + fn tokenize_ignore_hex_disabled() { + let parser = ParserBuilder::new().ignore_hex(false).build(); + + let input = "Hello 0xDEADBEEF World"; + let expected: Vec = vec![ + Identifier::new_unchecked("Hello", 0), + Identifier::new_unchecked("0xDEADBEEF", 6), + Identifier::new_unchecked("World", 17), + ]; + let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect(); + assert_eq!(expected, actual); + let actual: Vec<_> = parser.parse(input).collect(); + assert_eq!(expected, actual); + } + #[test] fn split_ident() { let cases = [ From 039664339d5440f0140f29df1ad2b9fdfb8ce2c2 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Wed, 24 Jul 2019 06:48:44 -0600 Subject: [PATCH 4/8] refactor(parser): Switch to by-ref builder Since nothing is being moved into `Parser`, we don't get any performance benefit with a moving builder, so switching to a by-ref builder. --- src/tokens.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tokens.rs b/src/tokens.rs index c0071ac..594ba5d 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -16,12 +16,12 @@ impl ParserBuilder { Default::default() } - pub fn ignore_hex(mut self, yes: bool) -> Self { + pub fn ignore_hex(&mut self, yes: bool) -> &mut Self { self.ignore_hex = yes; self } - pub fn build(self) -> Parser { + pub fn build(&self) -> Parser { let pattern = r#"\b(\p{Alphabetic}|\d|_|')+\b"#; let words_str = regex::Regex::new(pattern).unwrap(); let words_bytes = regex::bytes::Regex::new(pattern).unwrap(); From 36fefc166e25aa5065c5cfb52d0d596e6cf4b480 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Wed, 24 Jul 2019 06:49:51 -0600 Subject: [PATCH 5/8] refactor(parser): Add more traits to builder --- src/tokens.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tokens.rs b/src/tokens.rs index 594ba5d..e91824a 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -6,7 +6,7 @@ pub enum Case { None, } -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq, Eq, Hash)] pub struct ParserBuilder { ignore_hex: bool, } From 3e678cca1e361d1392b80283caebbe3a57e190ac Mon Sep 17 00:00:00 2001 From: Ed Page Date: Thu, 25 Jul 2019 07:45:16 -0600 Subject: [PATCH 6/8] refactor(parser): Share a parser across calls --- benches/file.rs | 18 ++++++++++++------ src/lib.rs | 3 +-- src/main.rs | 6 +++++- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/benches/file.rs b/benches/file.rs index 5f69285..0cfea12 100644 --- a/benches/file.rs +++ b/benches/file.rs @@ -13,13 +13,14 @@ fn process_empty(b: &mut test::Bencher) { sample_path.write_str(data::EMPTY).unwrap(); let corrections = typos::Dictionary::new(); + let parser = typos::tokens::Parser::new(); b.iter(|| { typos::process_file( sample_path.path(), &corrections, true, true, - true, + &parser, false, typos::report::print_silent, ) @@ -35,13 +36,14 @@ fn process_no_tokens(b: &mut test::Bencher) { sample_path.write_str(data::NO_TOKENS).unwrap(); let corrections = typos::Dictionary::new(); + let parser = typos::tokens::Parser::new(); b.iter(|| { typos::process_file( sample_path.path(), &corrections, true, true, - true, + &parser, false, typos::report::print_silent, ) @@ -57,13 +59,14 @@ fn process_single_token(b: &mut test::Bencher) { sample_path.write_str(data::SINGLE_TOKEN).unwrap(); let corrections = typos::Dictionary::new(); + let parser = typos::tokens::Parser::new(); b.iter(|| { typos::process_file( sample_path.path(), &corrections, true, true, - true, + &parser, false, typos::report::print_silent, ) @@ -79,13 +82,14 @@ fn process_sherlock(b: &mut test::Bencher) { sample_path.write_str(data::SHERLOCK).unwrap(); let corrections = typos::Dictionary::new(); + let parser = typos::tokens::Parser::new(); b.iter(|| { typos::process_file( sample_path.path(), &corrections, true, true, - true, + &parser, false, typos::report::print_silent, ) @@ -101,13 +105,14 @@ fn process_code(b: &mut test::Bencher) { sample_path.write_str(data::CODE).unwrap(); let corrections = typos::Dictionary::new(); + let parser = typos::tokens::Parser::new(); b.iter(|| { typos::process_file( sample_path.path(), &corrections, true, true, - true, + &parser, false, typos::report::print_silent, ) @@ -123,13 +128,14 @@ fn process_corpus(b: &mut test::Bencher) { sample_path.write_str(data::CORPUS).unwrap(); let corrections = typos::Dictionary::new(); + let parser = typos::tokens::Parser::new(); b.iter(|| { typos::process_file( sample_path.path(), &corrections, true, true, - true, + &parser, false, typos::report::print_silent, ) diff --git a/src/lib.rs b/src/lib.rs index f357182..fb61469 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,11 +19,10 @@ pub fn process_file( dictionary: &Dictionary, check_filenames: bool, check_files: bool, - ignore_hex: bool, + parser: &tokens::Parser, binary: bool, report: report::Report, ) -> Result { - let parser = tokens::ParserBuilder::new().ignore_hex(ignore_hex).build(); let mut typos_found = false; if check_filenames { diff --git a/src/main.rs b/src/main.rs index 2e662a7..a512db9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -261,6 +261,10 @@ fn run() -> Result { let ignore_hex = options.ignore_hex().unwrap_or(true); let binary = options.binary().unwrap_or(false); + let parser = typos::tokens::ParserBuilder::new() + .ignore_hex(ignore_hex) + .build(); + let first_path = &options .path .get(0) @@ -284,7 +288,7 @@ fn run() -> Result { &dictionary, check_filenames, check_files, - ignore_hex, + &parser, binary, options.format.report(), )? { From 834b9f77f2a2490697479d6a1de4481b6b41fdec Mon Sep 17 00:00:00 2001 From: Ed Page Date: Sat, 27 Jul 2019 19:20:02 -0600 Subject: [PATCH 7/8] refactor(checks): Separate out the logic --- benches/file.rs | 78 ++++------------------ src/checks.rs | 171 ++++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 98 +-------------------------- src/main.rs | 22 ++++--- 4 files changed, 196 insertions(+), 173 deletions(-) create mode 100644 src/checks.rs diff --git a/benches/file.rs b/benches/file.rs index 0cfea12..d25c7d1 100644 --- a/benches/file.rs +++ b/benches/file.rs @@ -14,17 +14,8 @@ fn process_empty(b: &mut test::Bencher) { let corrections = typos::Dictionary::new(); let parser = typos::tokens::Parser::new(); - b.iter(|| { - typos::process_file( - sample_path.path(), - &corrections, - true, - true, - &parser, - false, - typos::report::print_silent, - ) - }); + let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); + b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); temp.close().unwrap(); } @@ -37,17 +28,8 @@ fn process_no_tokens(b: &mut test::Bencher) { let corrections = typos::Dictionary::new(); let parser = typos::tokens::Parser::new(); - b.iter(|| { - typos::process_file( - sample_path.path(), - &corrections, - true, - true, - &parser, - false, - typos::report::print_silent, - ) - }); + let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); + b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); temp.close().unwrap(); } @@ -60,17 +42,8 @@ fn process_single_token(b: &mut test::Bencher) { let corrections = typos::Dictionary::new(); let parser = typos::tokens::Parser::new(); - b.iter(|| { - typos::process_file( - sample_path.path(), - &corrections, - true, - true, - &parser, - false, - typos::report::print_silent, - ) - }); + let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); + b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); temp.close().unwrap(); } @@ -83,17 +56,8 @@ fn process_sherlock(b: &mut test::Bencher) { let corrections = typos::Dictionary::new(); let parser = typos::tokens::Parser::new(); - b.iter(|| { - typos::process_file( - sample_path.path(), - &corrections, - true, - true, - &parser, - false, - typos::report::print_silent, - ) - }); + let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); + b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); temp.close().unwrap(); } @@ -106,17 +70,8 @@ fn process_code(b: &mut test::Bencher) { let corrections = typos::Dictionary::new(); let parser = typos::tokens::Parser::new(); - b.iter(|| { - typos::process_file( - sample_path.path(), - &corrections, - true, - true, - &parser, - false, - typos::report::print_silent, - ) - }); + let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); + b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); temp.close().unwrap(); } @@ -129,17 +84,8 @@ fn process_corpus(b: &mut test::Bencher) { let corrections = typos::Dictionary::new(); let parser = typos::tokens::Parser::new(); - b.iter(|| { - typos::process_file( - sample_path.path(), - &corrections, - true, - true, - &parser, - false, - typos::report::print_silent, - ) - }); + let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); + b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); temp.close().unwrap(); } diff --git a/src/checks.rs b/src/checks.rs new file mode 100644 index 0000000..308d724 --- /dev/null +++ b/src/checks.rs @@ -0,0 +1,171 @@ +use std::fs::File; +use std::io::Read; + +use bstr::ByteSlice; + +use crate::report; +use crate::tokens; +use crate::Dictionary; + +pub struct CheckSettings { + check_filenames: bool, + check_files: bool, + binary: bool, +} + +impl CheckSettings { + pub fn new() -> Self { + Default::default() + } + + pub fn check_filenames(&mut self, yes: bool) -> &mut Self { + self.check_filenames = yes; + self + } + + pub fn check_files(&mut self, yes: bool) -> &mut Self { + self.check_files = yes; + self + } + + pub fn binary(&mut self, yes: bool) -> &mut Self { + self.binary = yes; + self + } + + pub fn build<'d, 'p>( + &self, + dictionary: &'d Dictionary, + parser: &'p tokens::Parser, + ) -> Checks<'d, 'p> { + Checks { + dictionary, + parser, + check_filenames: self.check_filenames, + check_files: self.check_files, + binary: self.binary, + } + } +} + +impl Default for CheckSettings { + fn default() -> Self { + Self { + check_filenames: true, + check_files: true, + binary: false, + } + } +} + +pub struct Checks<'d, 'p> { + dictionary: &'d Dictionary, + parser: &'p tokens::Parser, + check_filenames: bool, + check_files: bool, + binary: bool, +} + +impl<'d, 'p> Checks<'d, 'p> { + pub fn check_filename( + &self, + path: &std::path::Path, + report: report::Report, + ) -> Result { + let mut typos_found = false; + + if !self.check_filenames { + return Ok(typos_found); + } + + for part in path.components().filter_map(|c| c.as_os_str().to_str()) { + for ident in self.parser.parse(part) { + if let Some(correction) = self.dictionary.correct_ident(ident) { + let msg = report::FilenameCorrection { + path, + typo: ident.token(), + correction, + non_exhaustive: (), + }; + report(msg.into()); + typos_found = true; + } + for word in ident.split() { + if let Some(correction) = self.dictionary.correct_word(word) { + let msg = report::FilenameCorrection { + path, + typo: word.token(), + correction, + non_exhaustive: (), + }; + report(msg.into()); + typos_found = true; + } + } + } + } + + Ok(typos_found) + } + + pub fn check_file( + &self, + path: &std::path::Path, + report: report::Report, + ) -> Result { + let mut typos_found = false; + + if !self.check_files { + return Ok(typos_found); + } + + let mut buffer = Vec::new(); + File::open(path)?.read_to_end(&mut buffer)?; + if !self.binary && buffer.find_byte(b'\0').is_some() { + let msg = report::BinaryFile { + path, + non_exhaustive: (), + }; + report(msg.into()); + return Ok(typos_found); + } + + for (line_idx, line) in buffer.lines().enumerate() { + let line_num = line_idx + 1; + for ident in self.parser.parse_bytes(line) { + if let Some(correction) = self.dictionary.correct_ident(ident) { + let col_num = ident.offset(); + let msg = report::Correction { + path, + line, + line_num, + col_num, + typo: ident.token(), + correction, + non_exhaustive: (), + }; + typos_found = true; + report(msg.into()); + } + for word in ident.split() { + if let Some(correction) = self.dictionary.correct_word(word) { + let col_num = word.offset(); + let msg = report::Correction { + path, + line, + line_num, + col_num, + typo: word.token(), + correction, + non_exhaustive: (), + }; + typos_found = true; + report(msg.into()); + } + } + } + } + + Ok(typos_found) + } +} diff --git a/src/lib.rs b/src/lib.rs index fb61469..b5201df 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,104 +4,8 @@ extern crate serde_derive; mod dict; mod dict_codegen; +pub mod checks; pub mod report; pub mod tokens; pub use crate::dict::*; - -use std::fs::File; -use std::io::Read; - -use bstr::ByteSlice; - -pub fn process_file( - path: &std::path::Path, - dictionary: &Dictionary, - check_filenames: bool, - check_files: bool, - parser: &tokens::Parser, - binary: bool, - report: report::Report, -) -> Result { - let mut typos_found = false; - - if check_filenames { - for part in path.components().filter_map(|c| c.as_os_str().to_str()) { - for ident in parser.parse(part) { - if let Some(correction) = dictionary.correct_ident(ident) { - let msg = report::FilenameCorrection { - path, - typo: ident.token(), - correction, - non_exhaustive: (), - }; - report(msg.into()); - typos_found = true; - } - for word in ident.split() { - if let Some(correction) = dictionary.correct_word(word) { - let msg = report::FilenameCorrection { - path, - typo: word.token(), - correction, - non_exhaustive: (), - }; - report(msg.into()); - typos_found = true; - } - } - } - } - } - - if check_files { - let mut buffer = Vec::new(); - File::open(path)?.read_to_end(&mut buffer)?; - if !binary && buffer.find_byte(b'\0').is_some() { - let msg = report::BinaryFile { - path, - non_exhaustive: (), - }; - report(msg.into()); - return Ok(typos_found); - } - - for (line_idx, line) in buffer.lines().enumerate() { - let line_num = line_idx + 1; - for ident in parser.parse_bytes(line) { - if let Some(correction) = dictionary.correct_ident(ident) { - let col_num = ident.offset(); - let msg = report::Correction { - path, - line, - line_num, - col_num, - typo: ident.token(), - correction, - non_exhaustive: (), - }; - typos_found = true; - report(msg.into()); - } - for word in ident.split() { - if let Some(correction) = dictionary.correct_word(word) { - let col_num = word.offset(); - let msg = report::Correction { - path, - line, - line_num, - col_num, - typo: word.token(), - correction, - non_exhaustive: (), - }; - typos_found = true; - report(msg.into()); - } - } - } - } - } - - Ok(typos_found) -} diff --git a/src/main.rs b/src/main.rs index a512db9..efe688e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -255,16 +255,23 @@ fn run() -> Result { let mut builder = get_logging(options.verbose.log_level()); builder.init(); - let dictionary = typos::Dictionary::new(); let check_filenames = options.check_filenames().unwrap_or(true); let check_files = options.check_files().unwrap_or(true); let ignore_hex = options.ignore_hex().unwrap_or(true); let binary = options.binary().unwrap_or(false); + let dictionary = typos::Dictionary::new(); + let parser = typos::tokens::ParserBuilder::new() .ignore_hex(ignore_hex) .build(); + let checks = typos::checks::CheckSettings::new() + .check_filenames(check_filenames) + .check_files(check_files) + .binary(binary) + .build(&dictionary, &parser); + let first_path = &options .path .get(0) @@ -283,15 +290,10 @@ fn run() -> Result { for entry in walk.build() { let entry = entry?; if entry.file_type().map(|t| t.is_file()).unwrap_or(true) { - if typos::process_file( - entry.path(), - &dictionary, - check_filenames, - check_files, - &parser, - binary, - options.format.report(), - )? { + if checks.check_filename(entry.path(), options.format.report())? { + typos_found = true; + } + if checks.check_file(entry.path(), options.format.report())? { typos_found = true; } } From adcbe68621714e45dff488f8f2d94317e4e3aebf Mon Sep 17 00:00:00 2001 From: Ed Page Date: Sat, 27 Jul 2019 19:42:45 -0600 Subject: [PATCH 8/8] refactor(dict): Split out a trait --- benches/corrections.rs | 6 +++--- benches/file.rs | 12 ++++++------ src/dict.rs | 30 ++++++++++++++++++++++++++---- src/main.rs | 2 +- 4 files changed, 36 insertions(+), 14 deletions(-) diff --git a/benches/corrections.rs b/benches/corrections.rs index b849986..8e0d751 100644 --- a/benches/corrections.rs +++ b/benches/corrections.rs @@ -4,12 +4,12 @@ extern crate test; #[bench] fn load_corrections(b: &mut test::Bencher) { - b.iter(|| typos::Dictionary::new()); + b.iter(|| typos::BuiltIn::new()); } #[bench] fn correct_word_hit(b: &mut test::Bencher) { - let corrections = typos::Dictionary::new(); + let corrections = typos::BuiltIn::new(); let input = typos::tokens::Word::new("successs", 0).unwrap(); assert_eq!( corrections.correct_word(input), @@ -20,7 +20,7 @@ fn correct_word_hit(b: &mut test::Bencher) { #[bench] fn correct_word_miss(b: &mut test::Bencher) { - let corrections = typos::Dictionary::new(); + let corrections = typos::BuiltIn::new(); let input = typos::tokens::Word::new("success", 0).unwrap(); assert_eq!(corrections.correct_word(input), None); b.iter(|| corrections.correct_word(input)); diff --git a/benches/file.rs b/benches/file.rs index d25c7d1..0783e86 100644 --- a/benches/file.rs +++ b/benches/file.rs @@ -12,7 +12,7 @@ fn process_empty(b: &mut test::Bencher) { let sample_path = temp.child("sample"); sample_path.write_str(data::EMPTY).unwrap(); - let corrections = typos::Dictionary::new(); + let corrections = typos::BuiltIn::new(); let parser = typos::tokens::Parser::new(); let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); @@ -26,7 +26,7 @@ fn process_no_tokens(b: &mut test::Bencher) { let sample_path = temp.child("sample"); sample_path.write_str(data::NO_TOKENS).unwrap(); - let corrections = typos::Dictionary::new(); + let corrections = typos::BuiltIn::new(); let parser = typos::tokens::Parser::new(); let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); @@ -40,7 +40,7 @@ fn process_single_token(b: &mut test::Bencher) { let sample_path = temp.child("sample"); sample_path.write_str(data::SINGLE_TOKEN).unwrap(); - let corrections = typos::Dictionary::new(); + let corrections = typos::BuiltIn::new(); let parser = typos::tokens::Parser::new(); let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); @@ -54,7 +54,7 @@ fn process_sherlock(b: &mut test::Bencher) { let sample_path = temp.child("sample"); sample_path.write_str(data::SHERLOCK).unwrap(); - let corrections = typos::Dictionary::new(); + let corrections = typos::BuiltIn::new(); let parser = typos::tokens::Parser::new(); let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); @@ -68,7 +68,7 @@ fn process_code(b: &mut test::Bencher) { let sample_path = temp.child("sample"); sample_path.write_str(data::CODE).unwrap(); - let corrections = typos::Dictionary::new(); + let corrections = typos::BuiltIn::new(); let parser = typos::tokens::Parser::new(); let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); @@ -82,7 +82,7 @@ fn process_corpus(b: &mut test::Bencher) { let sample_path = temp.child("sample"); sample_path.write_str(data::CORPUS).unwrap(); - let corrections = typos::Dictionary::new(); + let corrections = typos::BuiltIn::new(); let parser = typos::tokens::Parser::new(); let checks = typos::checks::CheckSettings::new().build(&corrections, &parser); b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent)); diff --git a/src/dict.rs b/src/dict.rs index 1861757..1ff7359 100644 --- a/src/dict.rs +++ b/src/dict.rs @@ -4,12 +4,21 @@ use unicase::UniCase; use crate::tokens::Case; -#[derive(Default)] -pub struct Dictionary {} +pub trait Dictionary { + fn correct_ident<'s, 'w>( + &'s self, + _ident: crate::tokens::Identifier<'w>, + ) -> Option>; -impl Dictionary { + fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option>; +} + +#[derive(Default)] +pub struct BuiltIn {} + +impl BuiltIn { pub fn new() -> Self { - Dictionary {} + Self {} } pub fn correct_ident<'s, 'w>( @@ -25,6 +34,19 @@ impl Dictionary { } } +impl Dictionary for BuiltIn { + fn correct_ident<'s, 'w>( + &'s self, + ident: crate::tokens::Identifier<'w>, + ) -> Option> { + BuiltIn::correct_ident(self, ident) + } + + fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option> { + BuiltIn::correct_word(self, word) + } +} + fn map_lookup( map: &'static phf::Map, &'static str>, key: &str, diff --git a/src/main.rs b/src/main.rs index efe688e..8673f7e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -260,7 +260,7 @@ fn run() -> Result { let ignore_hex = options.ignore_hex().unwrap_or(true); let binary = options.binary().unwrap_or(false); - let dictionary = typos::Dictionary::new(); + let dictionary = typos::BuiltIn::new(); let parser = typos::tokens::ParserBuilder::new() .ignore_hex(ignore_hex)