mirror of
https://github.com/crate-ci/typos.git
synced 2024-11-21 08:30:57 -05:00
feat(parse): Process words composing symbols
This commit is contained in:
parent
63a84863be
commit
3d1fb3b1ae
6 changed files with 264 additions and 33 deletions
|
@ -8,15 +8,17 @@ fn load_corrections(b: &mut test::Bencher) {
|
|||
}
|
||||
|
||||
#[bench]
|
||||
fn correction(b: &mut test::Bencher) {
|
||||
fn correct_word_hit(b: &mut test::Bencher) {
|
||||
let corrections = defenestrate::Dictionary::new();
|
||||
assert_eq!(corrections.correct_str("successs"), Some("successes"));
|
||||
b.iter(|| corrections.correct_str("successs"));
|
||||
let input = defenestrate::tokens::Word::new("successs", 0).unwrap();
|
||||
assert_eq!(corrections.correct_word(input), Some("successes"));
|
||||
b.iter(|| corrections.correct_word(input));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn no_correction(b: &mut test::Bencher) {
|
||||
fn correct_word_miss(b: &mut test::Bencher) {
|
||||
let corrections = defenestrate::Dictionary::new();
|
||||
assert_eq!(corrections.correct_str("success"), None);
|
||||
b.iter(|| corrections.correct_str("success"));
|
||||
let input = defenestrate::tokens::Word::new("success", 0).unwrap();
|
||||
assert_eq!(corrections.correct_word(input), None);
|
||||
b.iter(|| corrections.correct_word(input));
|
||||
}
|
||||
|
|
|
@ -6,32 +6,60 @@ mod data;
|
|||
|
||||
#[bench]
|
||||
fn symbol_parse_empty(b: &mut test::Bencher) {
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::EMPTY.as_bytes()).collect::<Vec<_>>());
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::EMPTY.as_bytes()).last());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn symbol_parse_no_tokens(b: &mut test::Bencher) {
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::NO_TOKENS.as_bytes()).collect::<Vec<_>>());
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::NO_TOKENS.as_bytes()).last());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn symbol_parse_single_token(b: &mut test::Bencher) {
|
||||
b.iter(|| {
|
||||
defenestrate::tokens::Symbol::parse(data::SINGLE_TOKEN.as_bytes()).collect::<Vec<_>>()
|
||||
defenestrate::tokens::Symbol::parse(data::SINGLE_TOKEN.as_bytes()).last();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn symbol_parse_sherlock(b: &mut test::Bencher) {
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::SHERLOCK.as_bytes()).collect::<Vec<_>>());
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::SHERLOCK.as_bytes()).last());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn symbol_parse_code(b: &mut test::Bencher) {
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::CODE.as_bytes()).collect::<Vec<_>>());
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::CODE.as_bytes()).last());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn symbol_parse_corpus(b: &mut test::Bencher) {
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::CORPUS.as_bytes()).collect::<Vec<_>>());
|
||||
b.iter(|| defenestrate::tokens::Symbol::parse(data::CORPUS.as_bytes()).last());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn symbol_split_lowercase_short(b: &mut test::Bencher) {
|
||||
let input = "abcabcabcabc";
|
||||
let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap();
|
||||
b.iter(|| symbol.split().last());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn symbol_split_lowercase_long(b: &mut test::Bencher) {
|
||||
let input = "abcabcabcabc".repeat(90);
|
||||
let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap();
|
||||
b.iter(|| symbol.split().last());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn symbol_split_mixed_short(b: &mut test::Bencher) {
|
||||
let input = "abcABCAbc123";
|
||||
let symbol = defenestrate::tokens::Symbol::new(input, 0).unwrap();
|
||||
b.iter(|| symbol.split().last());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn symbol_split_mixed_long(b: &mut test::Bencher) {
|
||||
let input = "abcABCAbc123".repeat(90);
|
||||
let symbol = defenestrate::tokens::Symbol::new(&input, 0).unwrap();
|
||||
b.iter(|| symbol.split().last());
|
||||
}
|
||||
|
|
3
build.rs
3
build.rs
|
@ -11,9 +11,10 @@ fn main() {
|
|||
|
||||
println!("rerun-if-changed=./assets/words.csv");
|
||||
write!(&mut file, "use unicase::UniCase;").unwrap();
|
||||
|
||||
write!(
|
||||
&mut file,
|
||||
"pub(crate) static DICTIONARY: phf::Map<unicase::UniCase<&'static str>, &'static str> = "
|
||||
"pub(crate) static WORD_DICTIONARY: phf::Map<unicase::UniCase<&'static str>, &'static str> = "
|
||||
)
|
||||
.unwrap();
|
||||
let mut builder = phf_codegen::Map::new();
|
||||
|
|
11
src/dict.rs
11
src/dict.rs
|
@ -8,14 +8,12 @@ impl Dictionary {
|
|||
Dictionary {}
|
||||
}
|
||||
|
||||
pub fn correct_str<'s, 'w>(&'s self, word: &'w str) -> Option<&'s str> {
|
||||
map_lookup(&crate::dict_codegen::DICTIONARY, word)
|
||||
pub fn correct_symbol<'s, 'w>(&'s self, _sym: crate::tokens::Symbol<'w>) -> Option<&'s str> {
|
||||
None
|
||||
}
|
||||
|
||||
pub fn correct_bytes<'s, 'w>(&'s self, word: &'w [u8]) -> Option<&'s str> {
|
||||
std::str::from_utf8(word)
|
||||
.ok()
|
||||
.and_then(|word| self.correct_str(word))
|
||||
pub fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option<&'s str> {
|
||||
map_lookup(&crate::dict_codegen::WORD_DICTIONARY, word.token())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,6 +25,7 @@ fn map_lookup(
|
|||
// the expanded lifetime. This is due to `Borrow` being overly strict and
|
||||
// can't have an impl for `&'static str` to `Borrow<&'a str>`.
|
||||
//
|
||||
//
|
||||
// See https://github.com/rust-lang/rust/issues/28853#issuecomment-158735548
|
||||
unsafe {
|
||||
let key = ::std::mem::transmute::<_, &'static str>(key);
|
||||
|
|
22
src/lib.rs
22
src/lib.rs
|
@ -22,20 +22,34 @@ pub fn process_file(
|
|||
for (line_idx, line) in grep_searcher::LineIter::new(b'\n', &buffer).enumerate() {
|
||||
let line_num = line_idx + 1;
|
||||
for symbol in tokens::Symbol::parse(line) {
|
||||
// Correct tokens as-is
|
||||
if let Some(correction) = dictionary.correct_str(symbol.token) {
|
||||
let col_num = symbol.offset;
|
||||
if let Some(correction) = dictionary.correct_symbol(symbol) {
|
||||
let col_num = symbol.offset();
|
||||
let msg = report::Message {
|
||||
path,
|
||||
line,
|
||||
line_num,
|
||||
col_num,
|
||||
word: symbol.token,
|
||||
word: symbol.token(),
|
||||
correction,
|
||||
non_exhaustive: (),
|
||||
};
|
||||
report(msg);
|
||||
}
|
||||
for word in symbol.split() {
|
||||
if let Some(correction) = dictionary.correct_word(word) {
|
||||
let col_num = word.offset();
|
||||
let msg = report::Message {
|
||||
path,
|
||||
line,
|
||||
line_num,
|
||||
col_num,
|
||||
word: word.token(),
|
||||
correction,
|
||||
non_exhaustive: (),
|
||||
};
|
||||
report(msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
207
src/tokens.rs
207
src/tokens.rs
|
@ -1,11 +1,32 @@
|
|||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct Symbol<'t> {
|
||||
pub token: &'t str,
|
||||
pub offset: usize,
|
||||
token: &'t str,
|
||||
offset: usize,
|
||||
}
|
||||
|
||||
impl<'t> Symbol<'t> {
|
||||
pub fn new(token: &'t str, offset: usize) -> Self {
|
||||
pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> {
|
||||
let mut itr = Self::parse(token.as_bytes());
|
||||
let mut item = itr
|
||||
.next()
|
||||
.ok_or_else(|| failure::format_err!("Invalid symbol (none found): {:?}", token))?;
|
||||
if item.offset != 0 {
|
||||
return Err(failure::format_err!(
|
||||
"Invalid symbol (padding found): {:?}",
|
||||
token
|
||||
));
|
||||
}
|
||||
item.offset += offset;
|
||||
if itr.next().is_some() {
|
||||
return Err(failure::format_err!(
|
||||
"Invalid symbol (contains more than one): {:?}",
|
||||
token
|
||||
));
|
||||
}
|
||||
Ok(item)
|
||||
}
|
||||
|
||||
pub(crate) fn new_unchecked(token: &'t str, offset: usize) -> Self {
|
||||
Self { token, offset }
|
||||
}
|
||||
|
||||
|
@ -17,9 +38,148 @@ impl<'t> Symbol<'t> {
|
|||
}
|
||||
SPLIT.find_iter(content).filter_map(|m| {
|
||||
let s = std::str::from_utf8(m.as_bytes()).ok();
|
||||
s.map(|s| Symbol::new(s, m.start()))
|
||||
s.map(|s| Symbol::new_unchecked(s, m.start()))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn token(&self) -> &str {
|
||||
self.token
|
||||
}
|
||||
|
||||
pub fn offset(&self) -> usize {
|
||||
self.offset
|
||||
}
|
||||
|
||||
pub fn split(&self) -> impl Iterator<Item = Word<'_>> {
|
||||
split_symbol(self.token, self.offset)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct Word<'t> {
|
||||
token: &'t str,
|
||||
offset: usize,
|
||||
}
|
||||
|
||||
impl<'t> Word<'t> {
|
||||
pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> {
|
||||
Symbol::new(token, offset)?;
|
||||
let mut itr = split_symbol(token, 0);
|
||||
let mut item = itr
|
||||
.next()
|
||||
.ok_or_else(|| failure::format_err!("Invalid word (none found): {:?}", token))?;
|
||||
if item.offset != 0 {
|
||||
return Err(failure::format_err!(
|
||||
"Invalid word (padding found): {:?}",
|
||||
token
|
||||
));
|
||||
}
|
||||
item.offset += offset;
|
||||
if itr.next().is_some() {
|
||||
return Err(failure::format_err!(
|
||||
"Invalid word (contains more than one): {:?}",
|
||||
token
|
||||
));
|
||||
}
|
||||
Ok(item)
|
||||
}
|
||||
|
||||
pub(crate) fn new_unchecked(token: &'t str, offset: usize) -> Self {
|
||||
Self { token, offset }
|
||||
}
|
||||
|
||||
pub fn token(&self) -> &str {
|
||||
self.token
|
||||
}
|
||||
|
||||
pub fn offset(&self) -> usize {
|
||||
self.offset
|
||||
}
|
||||
}
|
||||
|
||||
/// Tracks the current 'mode' of the transformation algorithm as it scans the input string.
|
||||
///
|
||||
/// The mode is a tri-state which tracks the case of the last cased character of the current
|
||||
/// word. If there is no cased character (either lowercase or uppercase) since the previous
|
||||
/// word boundary, than the mode is `Boundary`. If the last cased character is lowercase, then
|
||||
/// the mode is `Lowercase`. Othertherwise, the mode is `Uppercase`.
|
||||
#[derive(Clone, Copy, PartialEq, Debug)]
|
||||
enum WordMode {
|
||||
/// There have been no lowercase or uppercase characters in the current word.
|
||||
Boundary,
|
||||
/// The previous cased character in the current word is lowercase.
|
||||
Lowercase,
|
||||
/// The previous cased character in the current word is uppercase.
|
||||
Uppercase,
|
||||
Number,
|
||||
}
|
||||
|
||||
impl WordMode {
|
||||
fn classify(c: char) -> Self {
|
||||
if c.is_lowercase() {
|
||||
WordMode::Lowercase
|
||||
} else if c.is_uppercase() {
|
||||
WordMode::Uppercase
|
||||
} else if c.is_ascii_digit() {
|
||||
WordMode::Number
|
||||
} else {
|
||||
// This assumes all characters are either lower or upper case.
|
||||
WordMode::Boundary
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn split_symbol(symbol: &str, offset: usize) -> impl Iterator<Item = Word<'_>> {
|
||||
let mut result = vec![];
|
||||
|
||||
let mut char_indices = symbol.char_indices().peekable();
|
||||
let mut start = 0;
|
||||
let mut start_mode = WordMode::Boundary;
|
||||
while let Some((i, c)) = char_indices.next() {
|
||||
let cur_mode = WordMode::classify(c);
|
||||
if cur_mode == WordMode::Boundary {
|
||||
if start == i {
|
||||
start += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(&(next_i, next)) = char_indices.peek() {
|
||||
// The mode including the current character, assuming the current character does
|
||||
// not result in a word boundary.
|
||||
let next_mode = WordMode::classify(next);
|
||||
|
||||
match (start_mode, cur_mode, next_mode) {
|
||||
// cur_mode is last of current word
|
||||
(_, _, WordMode::Boundary)
|
||||
| (_, WordMode::Lowercase, WordMode::Number)
|
||||
| (_, WordMode::Uppercase, WordMode::Number)
|
||||
| (_, WordMode::Number, WordMode::Lowercase)
|
||||
| (_, WordMode::Number, WordMode::Uppercase)
|
||||
| (_, WordMode::Lowercase, WordMode::Uppercase) => {
|
||||
result.push(Word::new_unchecked(&symbol[start..next_i], start + offset));
|
||||
start = next_i;
|
||||
start_mode = WordMode::Boundary;
|
||||
}
|
||||
// cur_mode is start of next word
|
||||
(WordMode::Uppercase, WordMode::Uppercase, WordMode::Lowercase) => {
|
||||
result.push(Word::new_unchecked(&symbol[start..i], start + offset));
|
||||
start = i;
|
||||
start_mode = WordMode::Boundary;
|
||||
}
|
||||
// No word boundary
|
||||
(_, _, _) => {
|
||||
start_mode = cur_mode;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Collect trailing characters as a word
|
||||
result.push(Word::new_unchecked(&symbol[start..], start + offset));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
result.into_iter()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -37,7 +197,7 @@ mod test {
|
|||
#[test]
|
||||
fn tokenize_word_is_word() {
|
||||
let input = b"word";
|
||||
let expected: Vec<Symbol> = vec![Symbol::new("word", 0)];
|
||||
let expected: Vec<Symbol> = vec![Symbol::new_unchecked("word", 0)];
|
||||
let actual: Vec<_> = Symbol::parse(input).collect();
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
@ -45,7 +205,8 @@ mod test {
|
|||
#[test]
|
||||
fn tokenize_space_separated_words() {
|
||||
let input = b"A B";
|
||||
let expected: Vec<Symbol> = vec![Symbol::new("A", 0), Symbol::new("B", 2)];
|
||||
let expected: Vec<Symbol> =
|
||||
vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)];
|
||||
let actual: Vec<_> = Symbol::parse(input).collect();
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
@ -53,7 +214,8 @@ mod test {
|
|||
#[test]
|
||||
fn tokenize_dot_separated_words() {
|
||||
let input = b"A.B";
|
||||
let expected: Vec<Symbol> = vec![Symbol::new("A", 0), Symbol::new("B", 2)];
|
||||
let expected: Vec<Symbol> =
|
||||
vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 2)];
|
||||
let actual: Vec<_> = Symbol::parse(input).collect();
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
@ -61,7 +223,8 @@ mod test {
|
|||
#[test]
|
||||
fn tokenize_namespace_separated_words() {
|
||||
let input = b"A::B";
|
||||
let expected: Vec<Symbol> = vec![Symbol::new("A", 0), Symbol::new("B", 3)];
|
||||
let expected: Vec<Symbol> =
|
||||
vec![Symbol::new_unchecked("A", 0), Symbol::new_unchecked("B", 3)];
|
||||
let actual: Vec<_> = Symbol::parse(input).collect();
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
@ -69,8 +232,32 @@ mod test {
|
|||
#[test]
|
||||
fn tokenize_underscore_doesnt_separate() {
|
||||
let input = b"A_B";
|
||||
let expected: Vec<Symbol> = vec![Symbol::new("A_B", 0)];
|
||||
let expected: Vec<Symbol> = vec![Symbol::new_unchecked("A_B", 0)];
|
||||
let actual: Vec<_> = Symbol::parse(input).collect();
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn split_symbol() {
|
||||
let cases = [
|
||||
("lowercase", &["lowercase"] as &[&str]),
|
||||
("Class", &["Class"]),
|
||||
("MyClass", &["My", "Class"]),
|
||||
("MyC", &["My", "C"]),
|
||||
("HTML", &["HTML"]),
|
||||
("PDFLoader", &["PDF", "Loader"]),
|
||||
("AString", &["A", "String"]),
|
||||
("SimpleXMLParser", &["Simple", "XML", "Parser"]),
|
||||
("vimRPCPlugin", &["vim", "RPC", "Plugin"]),
|
||||
("GL11Version", &["GL", "11", "Version"]),
|
||||
("99Bottles", &["99", "Bottles"]),
|
||||
("May5", &["May", "5"]),
|
||||
("BFG9000", &["BFG", "9000"]),
|
||||
];
|
||||
for (input, expected) in cases.iter() {
|
||||
let symbol = Symbol::new(input, 0).unwrap();
|
||||
let result: Vec<_> = symbol.split().map(|w| w.token).collect();
|
||||
assert_eq!(&result, expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue