diff --git a/benches/tokenize.rs b/benches/tokenize.rs index df2f2a6..3f685e7 100644 --- a/benches/tokenize.rs +++ b/benches/tokenize.rs @@ -6,60 +6,60 @@ mod data; #[bench] fn symbol_parse_empty(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::EMPTY.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::EMPTY.as_bytes()).last()); } #[bench] fn symbol_parse_no_tokens(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::NO_TOKENS.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::NO_TOKENS.as_bytes()).last()); } #[bench] fn symbol_parse_single_token(b: &mut test::Bencher) { b.iter(|| { - defenestrate::tokens::Symbol::parse(data::SINGLE_TOKEN.as_bytes()).last(); + defenestrate::tokens::Identifier::parse(data::SINGLE_TOKEN.as_bytes()).last(); }); } #[bench] fn symbol_parse_sherlock(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::SHERLOCK.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::SHERLOCK.as_bytes()).last()); } #[bench] fn symbol_parse_code(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::CODE.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::CODE.as_bytes()).last()); } #[bench] fn symbol_parse_corpus(b: &mut test::Bencher) { - b.iter(|| defenestrate::tokens::Symbol::parse(data::CORPUS.as_bytes()).last()); + b.iter(|| defenestrate::tokens::Identifier::parse(data::CORPUS.as_bytes()).last()); } #[bench] fn symbol_split_lowercase_short(b: &mut test::Bencher) { let input = "abcabcabcabc"; - let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap(); + let symbol = defenestrate::tokens::Identifier::new(input, 0).unwrap(); b.iter(|| symbol.split().last()); } #[bench] fn symbol_split_lowercase_long(b: &mut test::Bencher) { let input = "abcabcabcabc".repeat(90); - let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap(); + let symbol = defenestrate::tokens::Identifier::new(&input, 0).unwrap(); b.iter(|| symbol.split().last()); } #[bench] fn symbol_split_mixed_short(b: &mut test::Bencher) { let input = "abcABCAbc123"; - let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap(); + let symbol = defenestrate::tokens::Identifier::new(input, 0).unwrap(); b.iter(|| symbol.split().last()); } #[bench] fn symbol_split_mixed_long(b: &mut test::Bencher) { let input = "abcABCAbc123".repeat(90); - let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap(); + let symbol = defenestrate::tokens::Identifier::new(&input, 0).unwrap(); b.iter(|| symbol.split().last()); } diff --git a/src/dict.rs b/src/dict.rs index 25467d4..7294924 100644 --- a/src/dict.rs +++ b/src/dict.rs @@ -8,7 +8,10 @@ impl Dictionary { Dictionary {} } - pub fn correct_symbol<'s, 'w>(&'s self, _sym: crate::tokens::Symbol<'w>) -> Option<&'s str> { + pub fn correct_ident<'s, 'w>( + &'s self, + _ident: crate::tokens::Identifier<'w>, + ) -> Option<&'s str> { None } diff --git a/src/lib.rs b/src/lib.rs index ed84cdb..df4561f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,21 +21,21 @@ pub fn process_file( File::open(path)?.read_to_end(&mut buffer)?; for (line_idx, line) in grep_searcher::LineIter::new(b'\n', &buffer).enumerate() { let line_num = line_idx + 1; - for symbol in tokens::Symbol::parse(line) { - if let Some(correction) = dictionary.correct_symbol(symbol) { - let col_num = symbol.offset(); + for ident in tokens::Identifier::parse(line) { + if let Some(correction) = dictionary.correct_ident(ident) { + let col_num = ident.offset(); let msg = report::Message { path, line, line_num, col_num, - word: symbol.token(), + word: ident.token(), correction, non_exhaustive: (), }; report(msg); } - for word in symbol.split() { + for word in ident.split() { if let Some(correction) = dictionary.correct_word(word) { let col_num = word.offset(); let msg = report::Message { diff --git a/src/tokens.rs b/src/tokens.rs index 26e01e6..5d7503e 100644 --- a/src/tokens.rs +++ b/src/tokens.rs @@ -7,27 +7,27 @@ pub enum Case { } #[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct Symbol<'t> { +pub struct Identifier<'t> { token: &'t str, offset: usize, } -impl<'t> Symbol<'t> { +impl<'t> Identifier<'t> { pub fn new(token: &'t str, offset: usize) -> Result { let mut itr = Self::parse(token.as_bytes()); let mut item = itr .next() - .ok_or_else(|| failure::format_err!("Invalid symbol (none found): {:?}", token))?; + .ok_or_else(|| failure::format_err!("Invalid ident (none found): {:?}", token))?; if item.offset != 0 { return Err(failure::format_err!( - "Invalid symbol (padding found): {:?}", + "Invalid ident (padding found): {:?}", token )); } item.offset += offset; if itr.next().is_some() { return Err(failure::format_err!( - "Invalid symbol (contains more than one): {:?}", + "Invalid ident (contains more than one): {:?}", token )); } @@ -38,7 +38,7 @@ impl<'t> Symbol<'t> { Self { token, offset } } - pub fn parse(content: &[u8]) -> impl Iterator> { + pub fn parse(content: &[u8]) -> impl Iterator> { lazy_static::lazy_static! { // Getting false positives for this lint #[allow(clippy::invalid_regex)] @@ -46,7 +46,7 @@ impl<'t> Symbol<'t> { } SPLIT.find_iter(content).filter_map(|m| { let s = std::str::from_utf8(m.as_bytes()).ok(); - s.map(|s| Symbol::new_unchecked(s, m.start())) + s.map(|s| Identifier::new_unchecked(s, m.start())) }) } @@ -63,7 +63,7 @@ impl<'t> Symbol<'t> { } pub fn split(&self) -> impl Iterator> { - split_symbol(self.token, self.offset) + split_ident(self.token, self.offset) } } @@ -76,8 +76,8 @@ pub struct Word<'t> { impl<'t> Word<'t> { pub fn new(token: &'t str, offset: usize) -> Result { - Symbol::new(token, offset)?; - let mut itr = split_symbol(token, 0); + Identifier::new(token, offset)?; + let mut itr = split_ident(token, 0); let mut item = itr .next() .ok_or_else(|| failure::format_err!("Invalid word (none found): {:?}", token))?; @@ -166,10 +166,10 @@ impl WordMode { } } -fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { +fn split_ident(ident: &str, offset: usize) -> impl Iterator> { let mut result = vec![]; - let mut char_indices = symbol.char_indices().peekable(); + let mut char_indices = ident.char_indices().peekable(); let mut start = 0; let mut start_mode = WordMode::Boundary; let mut last_mode = WordMode::Boundary; @@ -200,7 +200,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { | (_, WordMode::Lowercase, WordMode::Uppercase) => { let case = start_mode.case(cur_mode); result.push(Word::new_unchecked( - &symbol[start..next_i], + &ident[start..next_i], case, start + offset, )); @@ -211,7 +211,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { // cur_mode is start of next word (WordMode::Uppercase, WordMode::Uppercase, WordMode::Lowercase) => { result.push(Word::new_unchecked( - &symbol[start..i], + &ident[start..i], Case::Scream, start + offset, )); @@ -227,7 +227,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator> { } else { // Collect trailing characters as a word let case = start_mode.case(cur_mode); - result.push(Word::new_unchecked(&symbol[start..], case, start + offset)); + result.push(Word::new_unchecked(&ident[start..], case, start + offset)); break; } } @@ -242,56 +242,62 @@ mod test { #[test] fn tokenize_empty_is_empty() { let input = b""; - let expected: Vec = vec![]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_word_is_word() { let input = b"word"; - let expected: Vec = vec![Symbol::new_unchecked("word", 0)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![Identifier::new_unchecked("word", 0)]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_space_separated_words() { let input = b"A B"; - let expected: Vec = - vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![ + Identifier::new_unchecked("A", 0), + Identifier::new_unchecked("B", 2), + ]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_dot_separated_words() { let input = b"A.B"; - let expected: Vec = - vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![ + Identifier::new_unchecked("A", 0), + Identifier::new_unchecked("B", 2), + ]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_namespace_separated_words() { let input = b"A::B"; - let expected: Vec = - vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 3)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![ + Identifier::new_unchecked("A", 0), + Identifier::new_unchecked("B", 3), + ]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] fn tokenize_underscore_doesnt_separate() { let input = b"A_B"; - let expected: Vec = vec![Symbol::new_unchecked("A_B", 0)]; - let actual: Vec<_> = Symbol::parse(input).collect(); + let expected: Vec = vec![Identifier::new_unchecked("A_B", 0)]; + let actual: Vec<_> = Identifier::parse(input).collect(); assert_eq!(expected, actual); } #[test] - fn split_symbol() { + fn split_ident() { let cases = [ ( "lowercase", @@ -347,11 +353,8 @@ mod test { ), ]; for (input, expected) in cases.iter() { - let symbol = Symbol::new(input, 0).unwrap(); - let result: Vec<_> = symbol - .split() - .map(|w| (w.token, w.case, w.offset)) - .collect(); + let ident = Identifier::new(input, 0).unwrap(); + let result: Vec<_> = ident.split().map(|w| (w.token, w.case, w.offset)).collect(); assert_eq!(&result, expected); } }