refactor: Rename Symbol to Identifier

This is more descriptive
This commit is contained in:
Ed Page 2019-06-22 11:57:23 -06:00
parent 5bbd6f530a
commit 859769b835
4 changed files with 58 additions and 52 deletions

View file

@ -6,60 +6,60 @@ mod data;
#[bench] #[bench]
fn symbol_parse_empty(b: &mut test::Bencher) { fn symbol_parse_empty(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::EMPTY.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::EMPTY.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_parse_no_tokens(b: &mut test::Bencher) { fn symbol_parse_no_tokens(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::NO_TOKENS.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::NO_TOKENS.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_parse_single_token(b: &mut test::Bencher) { fn symbol_parse_single_token(b: &mut test::Bencher) {
b.iter(|| { b.iter(|| {
defenestrate::tokens::Symbol::parse(data::SINGLE_TOKEN.as_bytes()).last(); defenestrate::tokens::Identifier::parse(data::SINGLE_TOKEN.as_bytes()).last();
}); });
} }
#[bench] #[bench]
fn symbol_parse_sherlock(b: &mut test::Bencher) { fn symbol_parse_sherlock(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::SHERLOCK.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::SHERLOCK.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_parse_code(b: &mut test::Bencher) { fn symbol_parse_code(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::CODE.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::CODE.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_parse_corpus(b: &mut test::Bencher) { fn symbol_parse_corpus(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::CORPUS.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::CORPUS.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_split_lowercase_short(b: &mut test::Bencher) { fn symbol_split_lowercase_short(b: &mut test::Bencher) {
let input = "abcabcabcabc"; let input = "abcabcabcabc";
let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap(); let symbol = defenestrate::tokens::Identifier::new(input, 0).unwrap();
b.iter(|| symbol.split().last()); b.iter(|| symbol.split().last());
} }
#[bench] #[bench]
fn symbol_split_lowercase_long(b: &mut test::Bencher) { fn symbol_split_lowercase_long(b: &mut test::Bencher) {
let input = "abcabcabcabc".repeat(90); let input = "abcabcabcabc".repeat(90);
let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap(); let symbol = defenestrate::tokens::Identifier::new(&input, 0).unwrap();
b.iter(|| symbol.split().last()); b.iter(|| symbol.split().last());
} }
#[bench] #[bench]
fn symbol_split_mixed_short(b: &mut test::Bencher) { fn symbol_split_mixed_short(b: &mut test::Bencher) {
let input = "abcABCAbc123"; let input = "abcABCAbc123";
let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap(); let symbol = defenestrate::tokens::Identifier::new(input, 0).unwrap();
b.iter(|| symbol.split().last()); b.iter(|| symbol.split().last());
} }
#[bench] #[bench]
fn symbol_split_mixed_long(b: &mut test::Bencher) { fn symbol_split_mixed_long(b: &mut test::Bencher) {
let input = "abcABCAbc123".repeat(90); let input = "abcABCAbc123".repeat(90);
let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap(); let symbol = defenestrate::tokens::Identifier::new(&input, 0).unwrap();
b.iter(|| symbol.split().last()); b.iter(|| symbol.split().last());
} }

View file

@ -8,7 +8,10 @@ impl Dictionary {
Dictionary {} Dictionary {}
} }
pub fn correct_symbol<'s, 'w>(&'s self, _sym: crate::tokens::Symbol<'w>) -> Option<&'s str> { pub fn correct_ident<'s, 'w>(
&'s self,
_ident: crate::tokens::Identifier<'w>,
) -> Option<&'s str> {
None None
} }

View file

@ -21,21 +21,21 @@ pub fn process_file(
File::open(path)?.read_to_end(&mut buffer)?; File::open(path)?.read_to_end(&mut buffer)?;
for (line_idx, line) in grep_searcher::LineIter::new(b'\n', &buffer).enumerate() { for (line_idx, line) in grep_searcher::LineIter::new(b'\n', &buffer).enumerate() {
let line_num = line_idx + 1; let line_num = line_idx + 1;
for symbol in tokens::Symbol::parse(line) { for ident in tokens::Identifier::parse(line) {
if let Some(correction) = dictionary.correct_symbol(symbol) { if let Some(correction) = dictionary.correct_ident(ident) {
let col_num = symbol.offset(); let col_num = ident.offset();
let msg = report::Message { let msg = report::Message {
path, path,
line, line,
line_num, line_num,
col_num, col_num,
word: symbol.token(), word: ident.token(),
correction, correction,
non_exhaustive: (), non_exhaustive: (),
}; };
report(msg); report(msg);
} }
for word in symbol.split() { for word in ident.split() {
if let Some(correction) = dictionary.correct_word(word) { if let Some(correction) = dictionary.correct_word(word) {
let col_num = word.offset(); let col_num = word.offset();
let msg = report::Message { let msg = report::Message {

View file

@ -7,27 +7,27 @@ pub enum Case {
} }
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Symbol<'t> { pub struct Identifier<'t> {
token: &'t str, token: &'t str,
offset: usize, offset: usize,
} }
impl<'t> Symbol<'t> { impl<'t> Identifier<'t> {
pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> { pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> {
let mut itr = Self::parse(token.as_bytes()); let mut itr = Self::parse(token.as_bytes());
let mut item = itr let mut item = itr
.next() .next()
.ok_or_else(|| failure::format_err!("Invalid symbol (none found): {:?}", token))?; .ok_or_else(|| failure::format_err!("Invalid ident (none found): {:?}", token))?;
if item.offset != 0 { if item.offset != 0 {
return Err(failure::format_err!( return Err(failure::format_err!(
"Invalid symbol (padding found): {:?}", "Invalid ident (padding found): {:?}",
token token
)); ));
} }
item.offset += offset; item.offset += offset;
if itr.next().is_some() { if itr.next().is_some() {
return Err(failure::format_err!( return Err(failure::format_err!(
"Invalid symbol (contains more than one): {:?}", "Invalid ident (contains more than one): {:?}",
token token
)); ));
} }
@ -38,7 +38,7 @@ impl<'t> Symbol<'t> {
Self { token, offset } Self { token, offset }
} }
pub fn parse(content: &[u8]) -> impl Iterator<Item = Symbol<'_>> { pub fn parse(content: &[u8]) -> impl Iterator<Item = Identifier<'_>> {
lazy_static::lazy_static! { lazy_static::lazy_static! {
// Getting false positives for this lint // Getting false positives for this lint
#[allow(clippy::invalid_regex)] #[allow(clippy::invalid_regex)]
@ -46,7 +46,7 @@ impl<'t> Symbol<'t> {
} }
SPLIT.find_iter(content).filter_map(|m| { SPLIT.find_iter(content).filter_map(|m| {
let s = std::str::from_utf8(m.as_bytes()).ok(); let s = std::str::from_utf8(m.as_bytes()).ok();
s.map(|s| Symbol::new_unchecked(s, m.start())) s.map(|s| Identifier::new_unchecked(s, m.start()))
}) })
} }
@ -63,7 +63,7 @@ impl<'t> Symbol<'t> {
} }
pub fn split(&self) -> impl Iterator<Item = Word<'_>> { pub fn split(&self) -> impl Iterator<Item = Word<'_>> {
split_symbol(self.token, self.offset) split_ident(self.token, self.offset)
} }
} }
@ -76,8 +76,8 @@ pub struct Word<'t> {
impl<'t> Word<'t> { impl<'t> Word<'t> {
pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> { pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> {
Symbol::new(token, offset)?; Identifier::new(token, offset)?;
let mut itr = split_symbol(token, 0); let mut itr = split_ident(token, 0);
let mut item = itr let mut item = itr
.next() .next()
.ok_or_else(|| failure::format_err!("Invalid word (none found): {:?}", token))?; .ok_or_else(|| failure::format_err!("Invalid word (none found): {:?}", token))?;
@ -166,10 +166,10 @@ impl WordMode {
} }
} }
fn split_symbol(symbol: &str, offset: usize) -> impl Iterator<Item = Word<'_>> { fn split_ident(ident: &str, offset: usize) -> impl Iterator<Item = Word<'_>> {
let mut result = vec![]; let mut result = vec![];
let mut char_indices = symbol.char_indices().peekable(); let mut char_indices = ident.char_indices().peekable();
let mut start = 0; let mut start = 0;
let mut start_mode = WordMode::Boundary; let mut start_mode = WordMode::Boundary;
let mut last_mode = WordMode::Boundary; let mut last_mode = WordMode::Boundary;
@ -200,7 +200,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator<Item = Word<'_>> {
| (_, WordMode::Lowercase, WordMode::Uppercase) => { | (_, WordMode::Lowercase, WordMode::Uppercase) => {
let case = start_mode.case(cur_mode); let case = start_mode.case(cur_mode);
result.push(Word::new_unchecked( result.push(Word::new_unchecked(
&symbol[start..next_i], &ident[start..next_i],
case, case,
start + offset, start + offset,
)); ));
@ -211,7 +211,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator<Item = Word<'_>> {
// cur_mode is start of next word // cur_mode is start of next word
(WordMode::Uppercase, WordMode::Uppercase, WordMode::Lowercase) => { (WordMode::Uppercase, WordMode::Uppercase, WordMode::Lowercase) => {
result.push(Word::new_unchecked( result.push(Word::new_unchecked(
&symbol[start..i], &ident[start..i],
Case::Scream, Case::Scream,
start + offset, start + offset,
)); ));
@ -227,7 +227,7 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator<Item = Word<'_>> {
} else { } else {
// Collect trailing characters as a word // Collect trailing characters as a word
let case = start_mode.case(cur_mode); let case = start_mode.case(cur_mode);
result.push(Word::new_unchecked(&symbol[start..], case, start + offset)); result.push(Word::new_unchecked(&ident[start..], case, start + offset));
break; break;
} }
} }
@ -242,56 +242,62 @@ mod test {
#[test] #[test]
fn tokenize_empty_is_empty() { fn tokenize_empty_is_empty() {
let input = b""; let input = b"";
let expected: Vec<Symbol> = vec![]; let expected: Vec<Identifier> = vec![];
let actual: Vec<_> = Symbol::parse(input).collect(); let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_word_is_word() { fn tokenize_word_is_word() {
let input = b"word"; let input = b"word";
let expected: Vec<Symbol> = vec![Symbol::new_unchecked("word", 0)]; let expected: Vec<Identifier> = vec![Identifier::new_unchecked("word", 0)];
let actual: Vec<_> = Symbol::parse(input).collect(); let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_space_separated_words() { fn tokenize_space_separated_words() {
let input = b"A B"; let input = b"A B";
let expected: Vec<Symbol> = let expected: Vec<Identifier> = vec![
vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)]; Identifier::new_unchecked("A", 0),
let actual: Vec<_> = Symbol::parse(input).collect(); Identifier::new_unchecked("B", 2),
];
let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_dot_separated_words() { fn tokenize_dot_separated_words() {
let input = b"A.B"; let input = b"A.B";
let expected: Vec<Symbol> = let expected: Vec<Identifier> = vec![
vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)]; Identifier::new_unchecked("A", 0),
let actual: Vec<_> = Symbol::parse(input).collect(); Identifier::new_unchecked("B", 2),
];
let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_namespace_separated_words() { fn tokenize_namespace_separated_words() {
let input = b"A::B"; let input = b"A::B";
let expected: Vec<Symbol> = let expected: Vec<Identifier> = vec![
vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 3)]; Identifier::new_unchecked("A", 0),
let actual: Vec<_> = Symbol::parse(input).collect(); Identifier::new_unchecked("B", 3),
];
let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_underscore_doesnt_separate() { fn tokenize_underscore_doesnt_separate() {
let input = b"A_B"; let input = b"A_B";
let expected: Vec<Symbol> = vec![Symbol::new_unchecked("A_B", 0)]; let expected: Vec<Identifier> = vec![Identifier::new_unchecked("A_B", 0)];
let actual: Vec<_> = Symbol::parse(input).collect(); let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn split_symbol() { fn split_ident() {
let cases = [ let cases = [
( (
"lowercase", "lowercase",
@ -347,11 +353,8 @@ mod test {
), ),
]; ];
for (input, expected) in cases.iter() { for (input, expected) in cases.iter() {
let symbol = Symbol::new(input, 0).unwrap(); let ident = Identifier::new(input, 0).unwrap();
let result: Vec<_> = symbol let result: Vec<_> = ident.split().map(|w| (w.token, w.case, w.offset)).collect();
.split()
.map(|w| (w.token, w.case, w.offset))
.collect();
assert_eq!(&result, expected); assert_eq!(&result, expected);
} }
} }