From a082207283c01f067d371df4bc366ef1d55d8da1 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Sat, 22 Jun 2019 09:12:54 -0600 Subject: [PATCH 1/8] perf(report): Reduce grabbing of locks --- src/report.rs | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/src/report.rs b/src/report.rs index a2fbbed..d3cc8e3 100644 --- a/src/report.rs +++ b/src/report.rs @@ -1,3 +1,5 @@ +use std::io::{self, Write}; + #[derive(Copy, Clone, Debug, Serialize)] pub struct Message<'m> { pub path: &'m std::path::Path, @@ -33,21 +35,33 @@ pub fn print_long(msg: Message) { let hl_indent: String = itertools::repeat_n(" ", msg.col_num).collect(); let hl: String = itertools::repeat_n("^", msg.word.len()).collect(); - println!("error: `{}` should be `{}`", msg.word, msg.correction); - println!( + let stdout = io::stdout(); + let mut handle = stdout.lock(); + + writeln!( + handle, + "error: `{}` should be `{}`", + msg.word, msg.correction + ) + .unwrap(); + writeln!( + handle, " --> {}:{}:{}", msg.path.display(), msg.line_num, msg.col_num - ); - println!("{} |", line_indent); - println!( + ) + .unwrap(); + writeln!(handle, "{} |", line_indent).unwrap(); + writeln!( + handle, "{} | {}", msg.line_num, String::from_utf8_lossy(msg.line).trim_end() - ); - println!("{} | {}{}", line_indent, hl_indent, hl); - println!("{} |", line_indent); + ) + .unwrap(); + writeln!(handle, "{} | {}{}", line_indent, hl_indent, hl).unwrap(); + writeln!(handle, "{} |", line_indent).unwrap(); } pub fn print_json(msg: Message) { From 80aeed1b43a9ccd1a36267163d6a4d27a99badba Mon Sep 17 00:00:00 2001 From: Ed Page Date: Sat, 22 Jun 2019 09:18:03 -0600 Subject: [PATCH 2/8] fix(report): Align text when tabs are used Ideally we would provide for more than a space per tab but this at least gets us better alignment. Fixes #11 --- src/report.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/report.rs b/src/report.rs index d3cc8e3..f501f64 100644 --- a/src/report.rs +++ b/src/report.rs @@ -35,6 +35,9 @@ pub fn print_long(msg: Message) { let hl_indent: String = itertools::repeat_n(" ", msg.col_num).collect(); let hl: String = itertools::repeat_n("^", msg.word.len()).collect(); + let line = String::from_utf8_lossy(msg.line); + let line = line.replace("\t", " "); + let stdout = io::stdout(); let mut handle = stdout.lock(); @@ -53,13 +56,7 @@ pub fn print_long(msg: Message) { ) .unwrap(); writeln!(handle, "{} |", line_indent).unwrap(); - writeln!( - handle, - "{} | {}", - msg.line_num, - String::from_utf8_lossy(msg.line).trim_end() - ) - .unwrap(); + writeln!(handle, "{} | {}", msg.line_num, line.trim_end()).unwrap(); writeln!(handle, "{} | {}{}", line_indent, hl_indent, hl).unwrap(); writeln!(handle, "{} |", line_indent).unwrap(); } From 881fce5114e9d16517be911445682b14cc4eb837 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Sat, 22 Jun 2019 11:47:51 -0600 Subject: [PATCH 3/8] feat(parse): Track the case of each word --- src/tokens.rs | 139 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 117 insertions(+), 22 deletions(-) diff --git a/src/tokens.rs b/src/tokens.rs index 0da2b07..d9c0f72 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -1,3 +1,11 @@ +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Case { + Title, + Lower, + Scream, + None, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Symbol<'t> { token: &'t str, @@ -46,6 +54,10 @@ impl<'t> Symbol<'t> { self.token } + pub fn case(&self) -> Case { + Case::None + } + pub fn offset(&self) -> usize { self.offset } @@ -58,6 +70,7 @@ impl<'t> Symbol<'t> { #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Word<'t> { token: &'t str, + case: Case, offset: usize, } @@ -84,14 +97,22 @@ impl<'t> Word<'t> { Ok(item) } - pub(crate) fn new_unchecked(token: &'t str, offset: usize) -> Self { - Self { token, offset } + pub(crate) fn new_unchecked(token: &'t str, case: Case, offset: usize) -> Self { + Self { + token, + case, + offset, + } } pub fn token(&self) -> &str { self.token } + pub fn case(&self) -> Case { + self.case + } + pub fn offset(&self) -> usize { self.offset } @@ -127,6 +148,22 @@ impl WordMode { WordMode::Boundary } } + + fn case(self, last: WordMode) -> Case { + match (self, last) { + (WordMode::Uppercase, WordMode::Uppercase) => Case::Scream, + (WordMode::Uppercase, WordMode::Lowercase) => Case::Title, + (WordMode::Lowercase, WordMode::Lowercase) => Case::Lower, + (WordMode::Number, WordMode::Number) => Case::None, + (WordMode::Number, _) + | (_, WordMode::Number) + | (WordMode::Boundary, _) + | (_, WordMode::Boundary) + | (WordMode::Lowercase, WordMode::Uppercase) => { + unreachable!("Invalid case combination: ({:?}, {:?})", self, last) + } + } + } } fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { @@ -135,6 +172,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { let mut char_indices = symbol.char_indices().peekable(); let mut start = 0; let mut start_mode = WordMode::Boundary; + let mut last_mode = WordMode::Boundary; while let Some((i, c)) = char_indices.next() { let cur_mode = WordMode::classify(c); if cur_mode == WordMode::Boundary { @@ -143,13 +181,16 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { } continue; } + if start_mode == WordMode::Boundary { + start_mode = cur_mode; + } if let Some(&(next_i, next)) = char_indices.peek() { // The mode including the current character, assuming the current character does // not result in a word boundary. let next_mode = WordMode::classify(next); - match (start_mode, cur_mode, next_mode) { + match (last_mode, cur_mode, next_mode) { // cur_mode is last of current word (_, _, WordMode::Boundary) | (_, WordMode::Lowercase, WordMode::Number) @@ -157,24 +198,36 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { | (_, WordMode::Number, WordMode::Lowercase) | (_, WordMode::Number, WordMode::Uppercase) | (_, WordMode::Lowercase, WordMode::Uppercase) => { - result.push(Word::new_unchecked(&symbol[start..next_i], start + offset)); + let case = start_mode.case(cur_mode); + result.push(Word::new_unchecked( + &symbol[start..next_i], + case, + start + offset, + )); start = next_i; start_mode = WordMode::Boundary; + last_mode = WordMode::Boundary; } // cur_mode is start of next word (WordMode::Uppercase, WordMode::Uppercase, WordMode::Lowercase) => { - result.push(Word::new_unchecked(&symbol[start..i], start + offset)); + result.push(Word::new_unchecked( + &symbol[start..i], + Case::Scream, + start + offset, + )); start = i; - start_mode = WordMode::Boundary; + start_mode = cur_mode; + last_mode = WordMode::Boundary; } // No word boundary (_, _, _) => { - start_mode = cur_mode; + last_mode = cur_mode; } } } else { // Collect trailing characters as a word - result.push(Word::new_unchecked(&symbol[start..], start + offset)); + let case = start_mode.case(cur_mode); + result.push(Word::new_unchecked(&symbol[start..], case, start + offset)); break; } } @@ -240,23 +293,65 @@ mod test { #[test] fn split_symbol() { let cases = [ - ("lowercase", &["lowercase"] as &[&str]), - ("Class", &["Class"]), - ("MyClass", &["My", "Class"]), - ("MyC", &["My", "C"]), - ("HTML", &["HTML"]), - ("PDFLoader", &["PDF", "Loader"]), - ("AString", &["A", "String"]), - ("SimpleXMLParser", &["Simple", "XML", "Parser"]), - ("vimRPCPlugin", &["vim", "RPC", "Plugin"]), - ("GL11Version", &["GL", "11", "Version"]), - ("99Bottles", &["99", "Bottles"]), - ("May5", &["May", "5"]), - ("BFG9000", &["BFG", "9000"]), + ( + "lowercase", + &[("lowercase", Case::Lower, 0usize)] as &[(&str, Case, usize)], + ), + ("Class", &[("Class", Case::Title, 0)]), + ( + "MyClass", + &[("My", Case::Title, 0), ("Class", Case::Title, 2)], + ), + ("MyC", &[("My", Case::Title, 0), ("C", Case::Scream, 2)]), + ("HTML", &[("HTML", Case::Scream, 0)]), + ( + "PDFLoader", + &[("PDF", Case::Scream, 0), ("Loader", Case::Title, 3)], + ), + ( + "AString", + &[("A", Case::Scream, 0), ("String", Case::Title, 1)], + ), + ( + "SimpleXMLParser", + &[ + ("Simple", Case::Title, 0), + ("XML", Case::Scream, 6), + ("Parser", Case::Title, 9), + ], + ), + ( + "vimRPCPlugin", + &[ + ("vim", Case::Lower, 0), + ("RPC", Case::Scream, 3), + ("Plugin", Case::Title, 6), + ], + ), + ( + "GL11Version", + &[ + ("GL", Case::Scream, 0), + ("11", Case::None, 2), + ("Version", Case::Title, 4), + ], + ), + ( + "99Bottles", + &[("99", Case::None, 0), ("Bottles", Case::Title, 2)], + ), + ("May5", &[("May", Case::Title, 0), ("5", Case::None, 3)]), + ( + "BFG9000", + &[("BFG", Case::Scream, 0), ("9000", Case::None, 3)], + ), ]; for (input, expected) in cases.iter() { let symbol = Symbol::new(input, 0).unwrap(); - let result: Vec<_> = symbol.split().map(|w| w.token).collect(); + let result: Vec<_> = symbol + .split() + .map(|w| (w.token, w.case, w.offset)) + .collect(); assert_eq!(&result, expected); } } From 5bbd6f530ad8f4763b7002481ecf1518ce25a5c1 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Sat, 22 Jun 2019 11:51:15 -0600 Subject: [PATCH 4/8] chore: Fix typo --- src/tokens.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tokens.rs b/src/tokens.rs index d9c0f72..26e01e6 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -123,7 +123,7 @@ impl<'t> Word<'t> { /// The mode is a tri-state which tracks the case of the last cased character of the current /// word. If there is no cased character (either lowercase or uppercase) since the previous /// word boundary, than the mode is `Boundary`. If the last cased character is lowercase, then -/// the mode is `Lowercase`. Othertherwise, the mode is `Uppercase`. +/// the mode is `Lowercase`. Otherrwise, the mode is `Uppercase`. #[derive(Clone, Copy, PartialEq, Debug)] enum WordMode { /// There have been no lowercase or uppercase characters in the current word. From 859769b835df4f7f1af8e9445f196cc1a501d8c4 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Sat, 22 Jun 2019 11:57:23 -0600 Subject: [PATCH 5/8] refactor: Rename Symbol to Identifier This is more descriptive --- benches/tokenize.rs | 20 ++++++------ src/dict.rs | 5 ++- src/lib.rs | 10 +++--- src/tokens.rs | 75 +++++++++++++++++++++++---------------------- 4 files changed, 58 insertions(+), 52 deletions(-) diff --git a/benches/tokenize.rs b/benches/tokenize.rs index df2f2a6..3f685e7 100644 --- a/benches/tokenize.rs +++ b/benches/tokenize.rs @@ -6,60 +6,60 @@ mod data; #[bench] fn symbol_parse_empty(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::EMPTY.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::EMPTY.as_bytes()).last()); } #[bench] fn symbol_parse_no_tokens(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::NO_TOKENS.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::NO_TOKENS.as_bytes()).last()); } #[bench] fn symbol_parse_single_token(b: &mut test::Bencher) { b.iter(|| { - defenestrate::tokens::Symbol::parse(data::SINGLE_TOKEN.as_bytes()).last(); + defenestrate::tokens::Identifier::parse(data::SINGLE_TOKEN.as_bytes()).last(); }); } #[bench] fn symbol_parse_sherlock(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::SHERLOCK.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::SHERLOCK.as_bytes()).last()); } #[bench] fn symbol_parse_code(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::CODE.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::CODE.as_bytes()).last()); } #[bench] fn symbol_parse_corpus(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::CORPUS.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::CORPUS.as_bytes()).last()); } #[bench] fn symbol_split_lowercase_short(b: &mut test::Bencher) { let input = "abcabcabcabc"; - let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap(); + let symbol = defenestrate::tokens::Identifier::new(input, 0).unwrap(); b.iter(|| symbol.split().last()); } #[bench] fn symbol_split_lowercase_long(b: &mut test::Bencher) { let input = "abcabcabcabc".repeat(90); - let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap(); + let symbol = defenestrate::tokens::Identifier::new(&input, 0).unwrap(); b.iter(|| symbol.split().last()); } #[bench] fn symbol_split_mixed_short(b: &mut test::Bencher) { let input = "abcABCAbc123"; - let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap(); + let symbol = defenestrate::tokens::Identifier::new(input, 0).unwrap(); b.iter(|| symbol.split().last()); } #[bench] fn symbol_split_mixed_long(b: &mut test::Bencher) { let input = "abcABCAbc123".repeat(90); - let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap(); + let symbol = defenestrate::tokens::Identifier::new(&input, 0).unwrap(); b.iter(|| symbol.split().last()); } diff --git a/src/dict.rs b/src/dict.rs index 25467d4..7294924 100644 --- a/src/dict.rs +++ b/src/dict.rs @@ -8,7 +8,10 @@ impl Dictionary { Dictionary {} } - pub fn correct_symbol<'s, 'w>(&'s self, _sym: crate::tokens::Symbol<'w>) -> Option<&'s str> { + pub fn correct_ident<'s, 'w>( + &'s self, + _ident: crate::tokens::Identifier<'w>, + ) -> Option<&'s str> { None } diff --git a/src/lib.rs b/src/lib.rs index ed84cdb..df4561f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,21 +21,21 @@ pub fn process_file( File::open(path)?.read_to_end(&mut buffer)?; for (line_idx, line) in grep_searcher::LineIter::new(b'\n', &buffer).enumerate() { let line_num = line_idx + 1; - for symbol in tokens::Symbol::parse(line) { - if let Some(correction) = dictionary.correct_symbol(symbol) { - let col_num = symbol.offset(); + for ident in tokens::Identifier::parse(line) { + if let Some(correction) = dictionary.correct_ident(ident) { + let col_num = ident.offset(); let msg = report::Message { path, line, line_num, col_num, - word: symbol.token(), + word: ident.token(), correction, non_exhaustive: (), }; report(msg); } - for word in symbol.split() { + for word in ident.split() { if let Some(correction) = dictionary.correct_word(word) { let col_num = word.offset(); let msg = report::Message { diff --git a/src/tokens.rs b/src/tokens.rs index 26e01e6..5d7503e 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -7,27 +7,27 @@ pub enum Case { } #[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct Symbol<'t> { +pub struct Identifier<'t> { token: &'t str, offset: usize, } -impl<'t> Symbol<'t> { +impl<'t> Identifier<'t> { pub fn new(token: &'t str, offset: usize) -> Result { let mut itr = Self::parse(token.as_bytes()); let mut item = itr .next() - .ok_or_else(|| failure::format_err!("Invalid symbol (none found): {:?}", token))?; + .ok_or_else(|| failure::format_err!("Invalid ident (none found): {:?}", token))?; if item.offset != 0 { return Err(failure::format_err!( - "Invalid symbol (padding found): {:?}", + "Invalid ident (padding found): {:?}", token )); } item.offset += offset; if itr.next().is_some() { return Err(failure::format_err!( - "Invalid symbol (contains more than one): {:?}", + "Invalid ident (contains more than one): {:?}", token )); } @@ -38,7 +38,7 @@ impl<'t> Symbol<'t> { Self { token, offset } } - pub fn parse(content: &[u8]) -> impl Iterator> { + pub fn parse(content: &[u8]) -> impl Iterator> { lazy_static::lazy_static! { // Getting false positives for this lint #[allow(clippy::invalid_regex)] @@ -46,7 +46,7 @@ impl<'t> Symbol<'t> { } SPLIT.find_iter(content).filter_map(|m| { let s = std::str::from_utf8(m.as_bytes()).ok(); - s.map(|s| Symbol::new_unchecked(s, m.start())) + s.map(|s| Identifier::new_unchecked(s, m.start())) }) } @@ -63,7 +63,7 @@ impl<'t> Symbol<'t> { } pub fn split(&self) -> impl Iterator> { - split_symbol(self.token, self.offset) + split_ident(self.token, self.offset) } } @@ -76,8 +76,8 @@ pub struct Word<'t> { impl<'t> Word<'t> { pub fn new(token: &'t str, offset: usize) -> Result { - Symbol::new(token, offset)?; - let mut itr = split_symbol(token, 0); + Identifier::new(token, offset)?; + let mut itr = split_ident(token, 0); let mut item = itr .next() .ok_or_else(|| failure::format_err!("Invalid word (none found): {:?}", token))?; @@ -166,10 +166,10 @@ impl WordMode { } } -fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { +fn split_ident(ident: &str, offset: usize) -> impl Iterator> { let mut result = vec![]; - let mut char_indices = symbol.char_indices().peekable(); + let mut char_indices = ident.char_indices().peekable(); let mut start = 0; let mut start_mode = WordMode::Boundary; let mut last_mode = WordMode::Boundary; @@ -200,7 +200,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { | (_, WordMode::Lowercase, WordMode::Uppercase) => { let case = start_mode.case(cur_mode); result.push(Word::new_unchecked( - &symbol[start..next_i], + &ident[start..next_i], case, start + offset, )); @@ -211,7 +211,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { // cur_mode is start of next word (WordMode::Uppercase, WordMode::Uppercase, WordMode::Lowercase) => { result.push(Word::new_unchecked( - &symbol[start..i], + &ident[start..i], Case::Scream, start + offset, )); @@ -227,7 +227,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { } else { // Collect trailing characters as a word let case = start_mode.case(cur_mode); - result.push(Word::new_unchecked(&symbol[start..], case, start + offset)); + result.push(Word::new_unchecked(&ident[start..], case, start + offset)); break; } } @@ -242,56 +242,62 @@ mod test { #[test] fn tokenize_empty_is_empty() { let input = b""; - let expected: Vec = vec![]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_word_is_word() { let input = b"word"; - let expected: Vec = vec![Symbol::new_unchecked("word", 0)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![Identifier::new_unchecked("word", 0)]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_space_separated_words() { let input = b"A B"; - let expected: Vec = - vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![ + Identifier::new_unchecked("A", 0), + Identifier::new_unchecked("B", 2), + ]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_dot_separated_words() { let input = b"A.B"; - let expected: Vec = - vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![ + Identifier::new_unchecked("A", 0), + Identifier::new_unchecked("B", 2), + ]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_namespace_separated_words() { let input = b"A::B"; - let expected: Vec = - vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 3)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![ + Identifier::new_unchecked("A", 0), + Identifier::new_unchecked("B", 3), + ]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_underscore_doesnt_separate() { let input = b"A_B"; - let expected: Vec = vec![Symbol::new_unchecked("A_B", 0)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![Identifier::new_unchecked("A_B", 0)]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] - fn split_symbol() { + fn split_ident() { let cases = [ ( "lowercase", @@ -347,11 +353,8 @@ mod test { ), ]; for (input, expected) in cases.iter() { - let symbol = Symbol::new(input, 0).unwrap(); - let result: Vec<_> = symbol - .split() - .map(|w| (w.token, w.case, w.offset)) - .collect(); + let ident = Identifier::new(input, 0).unwrap(); + let result: Vec<_> = ident.split().map(|w| (w.token, w.case, w.offset)).collect(); assert_eq!(&result, expected); } } From b12e90c141ed9a27fb79496d5ea15a73260b3801 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Sat, 22 Jun 2019 22:01:27 -0600 Subject: [PATCH 6/8] refactor(report): Rename source field --- src/lib.rs | 4 ++-- src/report.rs | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index df4561f..2c9f1f9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,7 +29,7 @@ pub fn process_file( line, line_num, col_num, - word: ident.token(), + typo: ident.token(), correction, non_exhaustive: (), }; @@ -43,7 +43,7 @@ pub fn process_file( line, line_num, col_num, - word: word.token(), + typo: word.token(), correction, non_exhaustive: (), }; diff --git a/src/report.rs b/src/report.rs index f501f64..09e9c07 100644 --- a/src/report.rs +++ b/src/report.rs @@ -7,7 +7,7 @@ pub struct Message<'m> { pub line: &'m [u8], pub line_num: usize, pub col_num: usize, - pub word: &'m str, + pub typo: &'m str, pub correction: &'m str, #[serde(skip)] pub(crate) non_exhaustive: (), @@ -23,7 +23,7 @@ pub fn print_brief(msg: Message) { msg.path.display(), msg.line_num, msg.col_num, - msg.word, + msg.typo, msg.correction ); } @@ -33,7 +33,7 @@ pub fn print_long(msg: Message) { let line_indent: String = itertools::repeat_n(" ", line_num.len()).collect(); let hl_indent: String = itertools::repeat_n(" ", msg.col_num).collect(); - let hl: String = itertools::repeat_n("^", msg.word.len()).collect(); + let hl: String = itertools::repeat_n("^", msg.typo.len()).collect(); let line = String::from_utf8_lossy(msg.line); let line = line.replace("\t", " "); @@ -44,7 +44,7 @@ pub fn print_long(msg: Message) { writeln!( handle, "error: `{}` should be `{}`", - msg.word, msg.correction + msg.typo, msg.correction ) .unwrap(); writeln!( From a5b8636bdbd3556d7a9162e0d0faac050d0ddc79 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Mon, 24 Jun 2019 21:45:30 -0600 Subject: [PATCH 7/8] refactor(dict): Allow for owned corrections --- benches/corrections.rs | 5 ++++- src/dict.rs | 8 +++++--- src/report.rs | 5 +++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/benches/corrections.rs b/benches/corrections.rs index 4c16bcc..71c911a 100644 --- a/benches/corrections.rs +++ b/benches/corrections.rs @@ -11,7 +11,10 @@ fn load_corrections(b: &mut test::Bencher) { fn correct_word_hit(b: &mut test::Bencher) { let corrections = defenestrate::Dictionary::new(); let input = defenestrate::tokens::Word::new("successs", 0).unwrap(); - assert_eq!(corrections.correct_word(input), Some("successes")); + assert_eq!( + corrections.correct_word(input), + Some(std::borrow::Cow::Borrowed("successes")) + ); b.iter(|| corrections.correct_word(input)); } diff --git a/src/dict.rs b/src/dict.rs index 7294924..c5160d7 100644 --- a/src/dict.rs +++ b/src/dict.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + use unicase::UniCase; #[derive(Default)] @@ -11,12 +13,12 @@ impl Dictionary { pub fn correct_ident<'s, 'w>( &'s self, _ident: crate::tokens::Identifier<'w>, - ) -> Option<&'s str> { + ) -> Option> { None } - pub fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option<&'s str> { - map_lookup(&crate::dict_codegen::WORD_DICTIONARY, word.token()) + pub fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option> { + map_lookup(&crate::dict_codegen::WORD_DICTIONARY, word.token()).map(|s| s.into()) } } diff --git a/src/report.rs b/src/report.rs index 09e9c07..129755a 100644 --- a/src/report.rs +++ b/src/report.rs @@ -1,6 +1,7 @@ +use std::borrow::Cow; use std::io::{self, Write}; -#[derive(Copy, Clone, Debug, Serialize)] +#[derive(Clone, Debug, Serialize)] pub struct Message<'m> { pub path: &'m std::path::Path, #[serde(skip)] @@ -8,7 +9,7 @@ pub struct Message<'m> { pub line_num: usize, pub col_num: usize, pub typo: &'m str, - pub correction: &'m str, + pub correction: Cow<'m, str>, #[serde(skip)] pub(crate) non_exhaustive: (), } From 953064e7d1b12a06e76547f58a834a11de205f84 Mon Sep 17 00:00:00 2001 From: Ed Page Date: Tue, 25 Jun 2019 22:03:10 -0600 Subject: [PATCH 8/8] fix(dict): Fix should match typo's case Fixes #10 --- src/dict.rs | 47 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/src/dict.rs b/src/dict.rs index c5160d7..1861757 100644 --- a/src/dict.rs +++ b/src/dict.rs @@ -2,6 +2,8 @@ use std::borrow::Cow; use unicase::UniCase; +use crate::tokens::Case; + #[derive(Default)] pub struct Dictionary {} @@ -18,7 +20,8 @@ impl Dictionary { } pub fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option> { - map_lookup(&crate::dict_codegen::WORD_DICTIONARY, word.token()).map(|s| s.into()) + map_lookup(&crate::dict_codegen::WORD_DICTIONARY, word.token()) + .map(|s| case_correct(s, word.case())) } } @@ -37,3 +40,45 @@ fn map_lookup( map.get(&UniCase(key)).cloned() } } + +fn case_correct(correction: &str, case: Case) -> Cow<'_, str> { + match case { + Case::Lower | Case::None => correction.into(), + Case::Title => { + let mut title = String::with_capacity(correction.as_bytes().len()); + let mut char_indices = correction.char_indices(); + if let Some((_, c)) = char_indices.next() { + title.extend(c.to_uppercase()); + if let Some((i, _)) = char_indices.next() { + title.push_str(&correction[i..]); + } + } + title.into() + } + Case::Scream => correction + .chars() + .flat_map(|c| c.to_uppercase()) + .collect::() + .into(), + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_case_correct() { + let cases = [ + ("foo", Case::Lower, "foo"), + ("foo", Case::None, "foo"), + ("foo", Case::Title, "Foo"), + ("foo", Case::Scream, "FOO"), + ("fOo", Case::None, "fOo"), + ]; + for (correction, case, expected) in cases.iter() { + let actual = case_correct(correction, *case); + assert_eq!(*expected, actual); + } + } +}