Merge pull request #13 from epage/fix

fix(dict): Fix should match typo's case
This commit is contained in:
Ed Page 2019-06-26 07:40:38 -06:00 committed by GitHub
commit 5ef815390f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 250 additions and 87 deletions

View file

@ -11,7 +11,10 @@ fn load_corrections(b: &mut test::Bencher) {
fn correct_word_hit(b: &mut test::Bencher) { fn correct_word_hit(b: &mut test::Bencher) {
let corrections = defenestrate::Dictionary::new(); let corrections = defenestrate::Dictionary::new();
let input = defenestrate::tokens::Word::new("successs", 0).unwrap(); let input = defenestrate::tokens::Word::new("successs", 0).unwrap();
assert_eq!(corrections.correct_word(input), Some("successes")); assert_eq!(
corrections.correct_word(input),
Some(std::borrow::Cow::Borrowed("successes"))
);
b.iter(|| corrections.correct_word(input)); b.iter(|| corrections.correct_word(input));
} }

View file

@ -6,60 +6,60 @@ mod data;
#[bench] #[bench]
fn symbol_parse_empty(b: &mut test::Bencher) { fn symbol_parse_empty(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::EMPTY.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::EMPTY.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_parse_no_tokens(b: &mut test::Bencher) { fn symbol_parse_no_tokens(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::NO_TOKENS.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::NO_TOKENS.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_parse_single_token(b: &mut test::Bencher) { fn symbol_parse_single_token(b: &mut test::Bencher) {
b.iter(|| { b.iter(|| {
defenestrate::tokens::Symbol::parse(data::SINGLE_TOKEN.as_bytes()).last(); defenestrate::tokens::Identifier::parse(data::SINGLE_TOKEN.as_bytes()).last();
}); });
} }
#[bench] #[bench]
fn symbol_parse_sherlock(b: &mut test::Bencher) { fn symbol_parse_sherlock(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::SHERLOCK.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::SHERLOCK.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_parse_code(b: &mut test::Bencher) { fn symbol_parse_code(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::CODE.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::CODE.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_parse_corpus(b: &mut test::Bencher) { fn symbol_parse_corpus(b: &mut test::Bencher) {
b.iter(|| defenestrate::tokens::Symbol::parse(data::CORPUS.as_bytes()).last()); b.iter(|| defenestrate::tokens::Identifier::parse(data::CORPUS.as_bytes()).last());
} }
#[bench] #[bench]
fn symbol_split_lowercase_short(b: &mut test::Bencher) { fn symbol_split_lowercase_short(b: &mut test::Bencher) {
let input = "abcabcabcabc"; let input = "abcabcabcabc";
let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap(); let symbol = defenestrate::tokens::Identifier::new(input, 0).unwrap();
b.iter(|| symbol.split().last()); b.iter(|| symbol.split().last());
} }
#[bench] #[bench]
fn symbol_split_lowercase_long(b: &mut test::Bencher) { fn symbol_split_lowercase_long(b: &mut test::Bencher) {
let input = "abcabcabcabc".repeat(90); let input = "abcabcabcabc".repeat(90);
let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap(); let symbol = defenestrate::tokens::Identifier::new(&input, 0).unwrap();
b.iter(|| symbol.split().last()); b.iter(|| symbol.split().last());
} }
#[bench] #[bench]
fn symbol_split_mixed_short(b: &mut test::Bencher) { fn symbol_split_mixed_short(b: &mut test::Bencher) {
let input = "abcABCAbc123"; let input = "abcABCAbc123";
let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap(); let symbol = defenestrate::tokens::Identifier::new(input, 0).unwrap();
b.iter(|| symbol.split().last()); b.iter(|| symbol.split().last());
} }
#[bench] #[bench]
fn symbol_split_mixed_long(b: &mut test::Bencher) { fn symbol_split_mixed_long(b: &mut test::Bencher) {
let input = "abcABCAbc123".repeat(90); let input = "abcABCAbc123".repeat(90);
let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap(); let symbol = defenestrate::tokens::Identifier::new(&input, 0).unwrap();
b.iter(|| symbol.split().last()); b.iter(|| symbol.split().last());
} }

View file

@ -1,5 +1,9 @@
use std::borrow::Cow;
use unicase::UniCase; use unicase::UniCase;
use crate::tokens::Case;
#[derive(Default)] #[derive(Default)]
pub struct Dictionary {} pub struct Dictionary {}
@ -8,12 +12,16 @@ impl Dictionary {
Dictionary {} Dictionary {}
} }
pub fn correct_symbol<'s, 'w>(&'s self, _sym: crate::tokens::Symbol<'w>) -> Option<&'s str> { pub fn correct_ident<'s, 'w>(
&'s self,
_ident: crate::tokens::Identifier<'w>,
) -> Option<Cow<'s, str>> {
None None
} }
pub fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option<&'s str> { pub fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option<Cow<'s, str>> {
map_lookup(&crate::dict_codegen::WORD_DICTIONARY, word.token()) map_lookup(&crate::dict_codegen::WORD_DICTIONARY, word.token())
.map(|s| case_correct(s, word.case()))
} }
} }
@ -32,3 +40,45 @@ fn map_lookup(
map.get(&UniCase(key)).cloned() map.get(&UniCase(key)).cloned()
} }
} }
fn case_correct(correction: &str, case: Case) -> Cow<'_, str> {
match case {
Case::Lower | Case::None => correction.into(),
Case::Title => {
let mut title = String::with_capacity(correction.as_bytes().len());
let mut char_indices = correction.char_indices();
if let Some((_, c)) = char_indices.next() {
title.extend(c.to_uppercase());
if let Some((i, _)) = char_indices.next() {
title.push_str(&correction[i..]);
}
}
title.into()
}
Case::Scream => correction
.chars()
.flat_map(|c| c.to_uppercase())
.collect::<String>()
.into(),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_case_correct() {
let cases = [
("foo", Case::Lower, "foo"),
("foo", Case::None, "foo"),
("foo", Case::Title, "Foo"),
("foo", Case::Scream, "FOO"),
("fOo", Case::None, "fOo"),
];
for (correction, case, expected) in cases.iter() {
let actual = case_correct(correction, *case);
assert_eq!(*expected, actual);
}
}
}

View file

@ -21,21 +21,21 @@ pub fn process_file(
File::open(path)?.read_to_end(&mut buffer)?; File::open(path)?.read_to_end(&mut buffer)?;
for (line_idx, line) in grep_searcher::LineIter::new(b'\n', &buffer).enumerate() { for (line_idx, line) in grep_searcher::LineIter::new(b'\n', &buffer).enumerate() {
let line_num = line_idx + 1; let line_num = line_idx + 1;
for symbol in tokens::Symbol::parse(line) { for ident in tokens::Identifier::parse(line) {
if let Some(correction) = dictionary.correct_symbol(symbol) { if let Some(correction) = dictionary.correct_ident(ident) {
let col_num = symbol.offset(); let col_num = ident.offset();
let msg = report::Message { let msg = report::Message {
path, path,
line, line,
line_num, line_num,
col_num, col_num,
word: symbol.token(), typo: ident.token(),
correction, correction,
non_exhaustive: (), non_exhaustive: (),
}; };
report(msg); report(msg);
} }
for word in symbol.split() { for word in ident.split() {
if let Some(correction) = dictionary.correct_word(word) { if let Some(correction) = dictionary.correct_word(word) {
let col_num = word.offset(); let col_num = word.offset();
let msg = report::Message { let msg = report::Message {
@ -43,7 +43,7 @@ pub fn process_file(
line, line,
line_num, line_num,
col_num, col_num,
word: word.token(), typo: word.token(),
correction, correction,
non_exhaustive: (), non_exhaustive: (),
}; };

View file

@ -1,12 +1,15 @@
#[derive(Copy, Clone, Debug, Serialize)] use std::borrow::Cow;
use std::io::{self, Write};
#[derive(Clone, Debug, Serialize)]
pub struct Message<'m> { pub struct Message<'m> {
pub path: &'m std::path::Path, pub path: &'m std::path::Path,
#[serde(skip)] #[serde(skip)]
pub line: &'m [u8], pub line: &'m [u8],
pub line_num: usize, pub line_num: usize,
pub col_num: usize, pub col_num: usize,
pub word: &'m str, pub typo: &'m str,
pub correction: &'m str, pub correction: Cow<'m, str>,
#[serde(skip)] #[serde(skip)]
pub(crate) non_exhaustive: (), pub(crate) non_exhaustive: (),
} }
@ -21,7 +24,7 @@ pub fn print_brief(msg: Message) {
msg.path.display(), msg.path.display(),
msg.line_num, msg.line_num,
msg.col_num, msg.col_num,
msg.word, msg.typo,
msg.correction msg.correction
); );
} }
@ -31,23 +34,32 @@ pub fn print_long(msg: Message) {
let line_indent: String = itertools::repeat_n(" ", line_num.len()).collect(); let line_indent: String = itertools::repeat_n(" ", line_num.len()).collect();
let hl_indent: String = itertools::repeat_n(" ", msg.col_num).collect(); let hl_indent: String = itertools::repeat_n(" ", msg.col_num).collect();
let hl: String = itertools::repeat_n("^", msg.word.len()).collect(); let hl: String = itertools::repeat_n("^", msg.typo.len()).collect();
println!("error: `{}` should be `{}`", msg.word, msg.correction); let line = String::from_utf8_lossy(msg.line);
println!( let line = line.replace("\t", " ");
let stdout = io::stdout();
let mut handle = stdout.lock();
writeln!(
handle,
"error: `{}` should be `{}`",
msg.typo, msg.correction
)
.unwrap();
writeln!(
handle,
" --> {}:{}:{}", " --> {}:{}:{}",
msg.path.display(), msg.path.display(),
msg.line_num, msg.line_num,
msg.col_num msg.col_num
); )
println!("{} |", line_indent); .unwrap();
println!( writeln!(handle, "{} |", line_indent).unwrap();
"{} | {}", writeln!(handle, "{} | {}", msg.line_num, line.trim_end()).unwrap();
msg.line_num, writeln!(handle, "{} | {}{}", line_indent, hl_indent, hl).unwrap();
String::from_utf8_lossy(msg.line).trim_end() writeln!(handle, "{} |", line_indent).unwrap();
);
println!("{} | {}{}", line_indent, hl_indent, hl);
println!("{} |", line_indent);
} }
pub fn print_json(msg: Message) { pub fn print_json(msg: Message) {

View file

@ -1,25 +1,33 @@
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Symbol<'t> { pub enum Case {
Title,
Lower,
Scream,
None,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Identifier<'t> {
token: &'t str, token: &'t str,
offset: usize, offset: usize,
} }
impl<'t> Symbol<'t> { impl<'t> Identifier<'t> {
pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> { pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> {
let mut itr = Self::parse(token.as_bytes()); let mut itr = Self::parse(token.as_bytes());
let mut item = itr let mut item = itr
.next() .next()
.ok_or_else(|| failure::format_err!("Invalid symbol (none found): {:?}", token))?; .ok_or_else(|| failure::format_err!("Invalid ident (none found): {:?}", token))?;
if item.offset != 0 { if item.offset != 0 {
return Err(failure::format_err!( return Err(failure::format_err!(
"Invalid symbol (padding found): {:?}", "Invalid ident (padding found): {:?}",
token token
)); ));
} }
item.offset += offset; item.offset += offset;
if itr.next().is_some() { if itr.next().is_some() {
return Err(failure::format_err!( return Err(failure::format_err!(
"Invalid symbol (contains more than one): {:?}", "Invalid ident (contains more than one): {:?}",
token token
)); ));
} }
@ -30,7 +38,7 @@ impl<'t> Symbol<'t> {
Self { token, offset } Self { token, offset }
} }
pub fn parse(content: &[u8]) -> impl Iterator<Item = Symbol<'_>> { pub fn parse(content: &[u8]) -> impl Iterator<Item = Identifier<'_>> {
lazy_static::lazy_static! { lazy_static::lazy_static! {
// Getting false positives for this lint // Getting false positives for this lint
#[allow(clippy::invalid_regex)] #[allow(clippy::invalid_regex)]
@ -38,7 +46,7 @@ impl<'t> Symbol<'t> {
} }
SPLIT.find_iter(content).filter_map(|m| { SPLIT.find_iter(content).filter_map(|m| {
let s = std::str::from_utf8(m.as_bytes()).ok(); let s = std::str::from_utf8(m.as_bytes()).ok();
s.map(|s| Symbol::new_unchecked(s, m.start())) s.map(|s| Identifier::new_unchecked(s, m.start()))
}) })
} }
@ -46,25 +54,30 @@ impl<'t> Symbol<'t> {
self.token self.token
} }
pub fn case(&self) -> Case {
Case::None
}
pub fn offset(&self) -> usize { pub fn offset(&self) -> usize {
self.offset self.offset
} }
pub fn split(&self) -> impl Iterator<Item = Word<'_>> { pub fn split(&self) -> impl Iterator<Item = Word<'_>> {
split_symbol(self.token, self.offset) split_ident(self.token, self.offset)
} }
} }
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Word<'t> { pub struct Word<'t> {
token: &'t str, token: &'t str,
case: Case,
offset: usize, offset: usize,
} }
impl<'t> Word<'t> { impl<'t> Word<'t> {
pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> { pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> {
Symbol::new(token, offset)?; Identifier::new(token, offset)?;
let mut itr = split_symbol(token, 0); let mut itr = split_ident(token, 0);
let mut item = itr let mut item = itr
.next() .next()
.ok_or_else(|| failure::format_err!("Invalid word (none found): {:?}", token))?; .ok_or_else(|| failure::format_err!("Invalid word (none found): {:?}", token))?;
@ -84,14 +97,22 @@ impl<'t> Word<'t> {
Ok(item) Ok(item)
} }
pub(crate) fn new_unchecked(token: &'t str, offset: usize) -> Self { pub(crate) fn new_unchecked(token: &'t str, case: Case, offset: usize) -> Self {
Self { token, offset } Self {
token,
case,
offset,
}
} }
pub fn token(&self) -> &str { pub fn token(&self) -> &str {
self.token self.token
} }
pub fn case(&self) -> Case {
self.case
}
pub fn offset(&self) -> usize { pub fn offset(&self) -> usize {
self.offset self.offset
} }
@ -102,7 +123,7 @@ impl<'t> Word<'t> {
/// The mode is a tri-state which tracks the case of the last cased character of the current /// The mode is a tri-state which tracks the case of the last cased character of the current
/// word. If there is no cased character (either lowercase or uppercase) since the previous /// word. If there is no cased character (either lowercase or uppercase) since the previous
/// word boundary, than the mode is `Boundary`. If the last cased character is lowercase, then /// word boundary, than the mode is `Boundary`. If the last cased character is lowercase, then
/// the mode is `Lowercase`. Othertherwise, the mode is `Uppercase`. /// the mode is `Lowercase`. Otherrwise, the mode is `Uppercase`.
#[derive(Clone, Copy, PartialEq, Debug)] #[derive(Clone, Copy, PartialEq, Debug)]
enum WordMode { enum WordMode {
/// There have been no lowercase or uppercase characters in the current word. /// There have been no lowercase or uppercase characters in the current word.
@ -127,14 +148,31 @@ impl WordMode {
WordMode::Boundary WordMode::Boundary
} }
} }
fn case(self, last: WordMode) -> Case {
match (self, last) {
(WordMode::Uppercase, WordMode::Uppercase) => Case::Scream,
(WordMode::Uppercase, WordMode::Lowercase) => Case::Title,
(WordMode::Lowercase, WordMode::Lowercase) => Case::Lower,
(WordMode::Number, WordMode::Number) => Case::None,
(WordMode::Number, _)
| (_, WordMode::Number)
| (WordMode::Boundary, _)
| (_, WordMode::Boundary)
| (WordMode::Lowercase, WordMode::Uppercase) => {
unreachable!("Invalid case combination: ({:?}, {:?})", self, last)
}
}
}
} }
fn split_symbol(symbol: &str, offset: usize) -> impl Iterator<Item = Word<'_>> { fn split_ident(ident: &str, offset: usize) -> impl Iterator<Item = Word<'_>> {
let mut result = vec![]; let mut result = vec![];
let mut char_indices = symbol.char_indices().peekable(); let mut char_indices = ident.char_indices().peekable();
let mut start = 0; let mut start = 0;
let mut start_mode = WordMode::Boundary; let mut start_mode = WordMode::Boundary;
let mut last_mode = WordMode::Boundary;
while let Some((i, c)) = char_indices.next() { while let Some((i, c)) = char_indices.next() {
let cur_mode = WordMode::classify(c); let cur_mode = WordMode::classify(c);
if cur_mode == WordMode::Boundary { if cur_mode == WordMode::Boundary {
@ -143,13 +181,16 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator<Item = Word<'_>> {
} }
continue; continue;
} }
if start_mode == WordMode::Boundary {
start_mode = cur_mode;
}
if let Some(&(next_i, next)) = char_indices.peek() { if let Some(&(next_i, next)) = char_indices.peek() {
// The mode including the current character, assuming the current character does // The mode including the current character, assuming the current character does
// not result in a word boundary. // not result in a word boundary.
let next_mode = WordMode::classify(next); let next_mode = WordMode::classify(next);
match (start_mode, cur_mode, next_mode) { match (last_mode, cur_mode, next_mode) {
// cur_mode is last of current word // cur_mode is last of current word
(_, _, WordMode::Boundary) (_, _, WordMode::Boundary)
| (_, WordMode::Lowercase, WordMode::Number) | (_, WordMode::Lowercase, WordMode::Number)
@ -157,24 +198,36 @@ fn split_symbol(symbol: &str, offset: usize) -> impl Iterator<Item = Word<'_>> {
| (_, WordMode::Number, WordMode::Lowercase) | (_, WordMode::Number, WordMode::Lowercase)
| (_, WordMode::Number, WordMode::Uppercase) | (_, WordMode::Number, WordMode::Uppercase)
| (_, WordMode::Lowercase, WordMode::Uppercase) => { | (_, WordMode::Lowercase, WordMode::Uppercase) => {
result.push(Word::new_unchecked(&symbol[start..next_i], start + offset)); let case = start_mode.case(cur_mode);
result.push(Word::new_unchecked(
&ident[start..next_i],
case,
start + offset,
));
start = next_i; start = next_i;
start_mode = WordMode::Boundary; start_mode = WordMode::Boundary;
last_mode = WordMode::Boundary;
} }
// cur_mode is start of next word // cur_mode is start of next word
(WordMode::Uppercase, WordMode::Uppercase, WordMode::Lowercase) => { (WordMode::Uppercase, WordMode::Uppercase, WordMode::Lowercase) => {
result.push(Word::new_unchecked(&symbol[start..i], start + offset)); result.push(Word::new_unchecked(
&ident[start..i],
Case::Scream,
start + offset,
));
start = i; start = i;
start_mode = WordMode::Boundary; start_mode = cur_mode;
last_mode = WordMode::Boundary;
} }
// No word boundary // No word boundary
(_, _, _) => { (_, _, _) => {
start_mode = cur_mode; last_mode = cur_mode;
} }
} }
} else { } else {
// Collect trailing characters as a word // Collect trailing characters as a word
result.push(Word::new_unchecked(&symbol[start..], start + offset)); let case = start_mode.case(cur_mode);
result.push(Word::new_unchecked(&ident[start..], case, start + offset));
break; break;
} }
} }
@ -189,74 +242,119 @@ mod test {
#[test] #[test]
fn tokenize_empty_is_empty() { fn tokenize_empty_is_empty() {
let input = b""; let input = b"";
let expected: Vec<Symbol> = vec![]; let expected: Vec<Identifier> = vec![];
let actual: Vec<_> = Symbol::parse(input).collect(); let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_word_is_word() { fn tokenize_word_is_word() {
let input = b"word"; let input = b"word";
let expected: Vec<Symbol> = vec![Symbol::new_unchecked("word", 0)]; let expected: Vec<Identifier> = vec![Identifier::new_unchecked("word", 0)];
let actual: Vec<_> = Symbol::parse(input).collect(); let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_space_separated_words() { fn tokenize_space_separated_words() {
let input = b"A B"; let input = b"A B";
let expected: Vec<Symbol> = let expected: Vec<Identifier> = vec![
vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)]; Identifier::new_unchecked("A", 0),
let actual: Vec<_> = Symbol::parse(input).collect(); Identifier::new_unchecked("B", 2),
];
let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_dot_separated_words() { fn tokenize_dot_separated_words() {
let input = b"A.B"; let input = b"A.B";
let expected: Vec<Symbol> = let expected: Vec<Identifier> = vec![
vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)]; Identifier::new_unchecked("A", 0),
let actual: Vec<_> = Symbol::parse(input).collect(); Identifier::new_unchecked("B", 2),
];
let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_namespace_separated_words() { fn tokenize_namespace_separated_words() {
let input = b"A::B"; let input = b"A::B";
let expected: Vec<Symbol> = let expected: Vec<Identifier> = vec![
vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 3)]; Identifier::new_unchecked("A", 0),
let actual: Vec<_> = Symbol::parse(input).collect(); Identifier::new_unchecked("B", 3),
];
let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn tokenize_underscore_doesnt_separate() { fn tokenize_underscore_doesnt_separate() {
let input = b"A_B"; let input = b"A_B";
let expected: Vec<Symbol> = vec![Symbol::new_unchecked("A_B", 0)]; let expected: Vec<Identifier> = vec![Identifier::new_unchecked("A_B", 0)];
let actual: Vec<_> = Symbol::parse(input).collect(); let actual: Vec<_> = Identifier::parse(input).collect();
assert_eq!(expected, actual); assert_eq!(expected, actual);
} }
#[test] #[test]
fn split_symbol() { fn split_ident() {
let cases = [ let cases = [
("lowercase", &["lowercase"] as &[&str]), (
("Class", &["Class"]), "lowercase",
("MyClass", &["My", "Class"]), &[("lowercase", Case::Lower, 0usize)] as &[(&str, Case, usize)],
("MyC", &["My", "C"]), ),
("HTML", &["HTML"]), ("Class", &[("Class", Case::Title, 0)]),
("PDFLoader", &["PDF", "Loader"]), (
("AString", &["A", "String"]), "MyClass",
("SimpleXMLParser", &["Simple", "XML", "Parser"]), &[("My", Case::Title, 0), ("Class", Case::Title, 2)],
("vimRPCPlugin", &["vim", "RPC", "Plugin"]), ),
("GL11Version", &["GL", "11", "Version"]), ("MyC", &[("My", Case::Title, 0), ("C", Case::Scream, 2)]),
("99Bottles", &["99", "Bottles"]), ("HTML", &[("HTML", Case::Scream, 0)]),
("May5", &["May", "5"]), (
("BFG9000", &["BFG", "9000"]), "PDFLoader",
&[("PDF", Case::Scream, 0), ("Loader", Case::Title, 3)],
),
(
"AString",
&[("A", Case::Scream, 0), ("String", Case::Title, 1)],
),
(
"SimpleXMLParser",
&[
("Simple", Case::Title, 0),
("XML", Case::Scream, 6),
("Parser", Case::Title, 9),
],
),
(
"vimRPCPlugin",
&[
("vim", Case::Lower, 0),
("RPC", Case::Scream, 3),
("Plugin", Case::Title, 6),
],
),
(
"GL11Version",
&[
("GL", Case::Scream, 0),
("11", Case::None, 2),
("Version", Case::Title, 4),
],
),
(
"99Bottles",
&[("99", Case::None, 0), ("Bottles", Case::Title, 2)],
),
("May5", &[("May", Case::Title, 0), ("5", Case::None, 3)]),
(
"BFG9000",
&[("BFG", Case::Scream, 0), ("9000", Case::None, 3)],
),
]; ];
for (input, expected) in cases.iter() { for (input, expected) in cases.iter() {
let symbol = Symbol::new(input, 0).unwrap(); let ident = Identifier::new(input, 0).unwrap();
let result: Vec<_> = symbol.split().map(|w| w.token).collect(); let result: Vec<_> = ident.split().map(|w| (w.token, w.case, w.offset)).collect();
assert_eq!(&result, expected); assert_eq!(&result, expected);
} }
} }