mirror of
https://github.com/crate-ci/typos.git
synced 2024-11-28 20:11:05 -05:00
Merge pull request #47 from epage/refactor
Try to make the code more approachable
This commit is contained in:
commit
6142941cfc
8 changed files with 389 additions and 271 deletions
|
@ -4,12 +4,12 @@ extern crate test;
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn load_corrections(b: &mut test::Bencher) {
|
fn load_corrections(b: &mut test::Bencher) {
|
||||||
b.iter(|| typos::Dictionary::new());
|
b.iter(|| typos::BuiltIn::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn correct_word_hit(b: &mut test::Bencher) {
|
fn correct_word_hit(b: &mut test::Bencher) {
|
||||||
let corrections = typos::Dictionary::new();
|
let corrections = typos::BuiltIn::new();
|
||||||
let input = typos::tokens::Word::new("successs", 0).unwrap();
|
let input = typos::tokens::Word::new("successs", 0).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
corrections.correct_word(input),
|
corrections.correct_word(input),
|
||||||
|
@ -20,7 +20,7 @@ fn correct_word_hit(b: &mut test::Bencher) {
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn correct_word_miss(b: &mut test::Bencher) {
|
fn correct_word_miss(b: &mut test::Bencher) {
|
||||||
let corrections = typos::Dictionary::new();
|
let corrections = typos::BuiltIn::new();
|
||||||
let input = typos::tokens::Word::new("success", 0).unwrap();
|
let input = typos::tokens::Word::new("success", 0).unwrap();
|
||||||
assert_eq!(corrections.correct_word(input), None);
|
assert_eq!(corrections.correct_word(input), None);
|
||||||
b.iter(|| corrections.correct_word(input));
|
b.iter(|| corrections.correct_word(input));
|
||||||
|
|
|
@ -12,18 +12,10 @@ fn process_empty(b: &mut test::Bencher) {
|
||||||
let sample_path = temp.child("sample");
|
let sample_path = temp.child("sample");
|
||||||
sample_path.write_str(data::EMPTY).unwrap();
|
sample_path.write_str(data::EMPTY).unwrap();
|
||||||
|
|
||||||
let corrections = typos::Dictionary::new();
|
let corrections = typos::BuiltIn::new();
|
||||||
b.iter(|| {
|
let parser = typos::tokens::Parser::new();
|
||||||
typos::process_file(
|
let checks = typos::checks::CheckSettings::new().build(&corrections, &parser);
|
||||||
sample_path.path(),
|
b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent));
|
||||||
&corrections,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
typos::report::print_silent,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
temp.close().unwrap();
|
temp.close().unwrap();
|
||||||
}
|
}
|
||||||
|
@ -34,18 +26,10 @@ fn process_no_tokens(b: &mut test::Bencher) {
|
||||||
let sample_path = temp.child("sample");
|
let sample_path = temp.child("sample");
|
||||||
sample_path.write_str(data::NO_TOKENS).unwrap();
|
sample_path.write_str(data::NO_TOKENS).unwrap();
|
||||||
|
|
||||||
let corrections = typos::Dictionary::new();
|
let corrections = typos::BuiltIn::new();
|
||||||
b.iter(|| {
|
let parser = typos::tokens::Parser::new();
|
||||||
typos::process_file(
|
let checks = typos::checks::CheckSettings::new().build(&corrections, &parser);
|
||||||
sample_path.path(),
|
b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent));
|
||||||
&corrections,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
typos::report::print_silent,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
temp.close().unwrap();
|
temp.close().unwrap();
|
||||||
}
|
}
|
||||||
|
@ -56,18 +40,10 @@ fn process_single_token(b: &mut test::Bencher) {
|
||||||
let sample_path = temp.child("sample");
|
let sample_path = temp.child("sample");
|
||||||
sample_path.write_str(data::SINGLE_TOKEN).unwrap();
|
sample_path.write_str(data::SINGLE_TOKEN).unwrap();
|
||||||
|
|
||||||
let corrections = typos::Dictionary::new();
|
let corrections = typos::BuiltIn::new();
|
||||||
b.iter(|| {
|
let parser = typos::tokens::Parser::new();
|
||||||
typos::process_file(
|
let checks = typos::checks::CheckSettings::new().build(&corrections, &parser);
|
||||||
sample_path.path(),
|
b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent));
|
||||||
&corrections,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
typos::report::print_silent,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
temp.close().unwrap();
|
temp.close().unwrap();
|
||||||
}
|
}
|
||||||
|
@ -78,18 +54,10 @@ fn process_sherlock(b: &mut test::Bencher) {
|
||||||
let sample_path = temp.child("sample");
|
let sample_path = temp.child("sample");
|
||||||
sample_path.write_str(data::SHERLOCK).unwrap();
|
sample_path.write_str(data::SHERLOCK).unwrap();
|
||||||
|
|
||||||
let corrections = typos::Dictionary::new();
|
let corrections = typos::BuiltIn::new();
|
||||||
b.iter(|| {
|
let parser = typos::tokens::Parser::new();
|
||||||
typos::process_file(
|
let checks = typos::checks::CheckSettings::new().build(&corrections, &parser);
|
||||||
sample_path.path(),
|
b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent));
|
||||||
&corrections,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
typos::report::print_silent,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
temp.close().unwrap();
|
temp.close().unwrap();
|
||||||
}
|
}
|
||||||
|
@ -100,18 +68,10 @@ fn process_code(b: &mut test::Bencher) {
|
||||||
let sample_path = temp.child("sample");
|
let sample_path = temp.child("sample");
|
||||||
sample_path.write_str(data::CODE).unwrap();
|
sample_path.write_str(data::CODE).unwrap();
|
||||||
|
|
||||||
let corrections = typos::Dictionary::new();
|
let corrections = typos::BuiltIn::new();
|
||||||
b.iter(|| {
|
let parser = typos::tokens::Parser::new();
|
||||||
typos::process_file(
|
let checks = typos::checks::CheckSettings::new().build(&corrections, &parser);
|
||||||
sample_path.path(),
|
b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent));
|
||||||
&corrections,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
typos::report::print_silent,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
temp.close().unwrap();
|
temp.close().unwrap();
|
||||||
}
|
}
|
||||||
|
@ -122,18 +82,10 @@ fn process_corpus(b: &mut test::Bencher) {
|
||||||
let sample_path = temp.child("sample");
|
let sample_path = temp.child("sample");
|
||||||
sample_path.write_str(data::CORPUS).unwrap();
|
sample_path.write_str(data::CORPUS).unwrap();
|
||||||
|
|
||||||
let corrections = typos::Dictionary::new();
|
let corrections = typos::BuiltIn::new();
|
||||||
b.iter(|| {
|
let parser = typos::tokens::Parser::new();
|
||||||
typos::process_file(
|
let checks = typos::checks::CheckSettings::new().build(&corrections, &parser);
|
||||||
sample_path.path(),
|
b.iter(|| checks.check_file(sample_path.path(), typos::report::print_silent));
|
||||||
&corrections,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
typos::report::print_silent,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
temp.close().unwrap();
|
temp.close().unwrap();
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,60 +6,66 @@ mod data;
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_parse_empty(b: &mut test::Bencher) {
|
fn symbol_parse_empty(b: &mut test::Bencher) {
|
||||||
b.iter(|| typos::tokens::Identifier::parse_bytes(data::EMPTY.as_bytes()).last());
|
let parser = typos::tokens::Parser::new();
|
||||||
|
b.iter(|| parser.parse_bytes(data::EMPTY.as_bytes()).last());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_parse_no_tokens(b: &mut test::Bencher) {
|
fn symbol_parse_no_tokens(b: &mut test::Bencher) {
|
||||||
b.iter(|| typos::tokens::Identifier::parse_bytes(data::NO_TOKENS.as_bytes()).last());
|
let parser = typos::tokens::Parser::new();
|
||||||
|
b.iter(|| parser.parse_bytes(data::NO_TOKENS.as_bytes()).last());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_parse_single_token(b: &mut test::Bencher) {
|
fn symbol_parse_single_token(b: &mut test::Bencher) {
|
||||||
|
let parser = typos::tokens::Parser::new();
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
typos::tokens::Identifier::parse_bytes(data::SINGLE_TOKEN.as_bytes()).last();
|
parser.parse_bytes(data::SINGLE_TOKEN.as_bytes()).last();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_parse_sherlock(b: &mut test::Bencher) {
|
fn symbol_parse_sherlock(b: &mut test::Bencher) {
|
||||||
b.iter(|| typos::tokens::Identifier::parse_bytes(data::SHERLOCK.as_bytes()).last());
|
let parser = typos::tokens::Parser::new();
|
||||||
|
b.iter(|| parser.parse_bytes(data::SHERLOCK.as_bytes()).last());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_parse_code(b: &mut test::Bencher) {
|
fn symbol_parse_code(b: &mut test::Bencher) {
|
||||||
b.iter(|| typos::tokens::Identifier::parse_bytes(data::CODE.as_bytes()).last());
|
let parser = typos::tokens::Parser::new();
|
||||||
|
b.iter(|| parser.parse_bytes(data::CODE.as_bytes()).last());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_parse_corpus(b: &mut test::Bencher) {
|
fn symbol_parse_corpus(b: &mut test::Bencher) {
|
||||||
b.iter(|| typos::tokens::Identifier::parse_bytes(data::CORPUS.as_bytes()).last());
|
let parser = typos::tokens::Parser::new();
|
||||||
|
b.iter(|| parser.parse_bytes(data::CORPUS.as_bytes()).last());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_split_lowercase_short(b: &mut test::Bencher) {
|
fn symbol_split_lowercase_short(b: &mut test::Bencher) {
|
||||||
let input = "abcabcabcabc";
|
let input = "abcabcabcabc";
|
||||||
let symbol = typos::tokens::Identifier::new(input, 0).unwrap();
|
let symbol = typos::tokens::Identifier::new_unchecked(input, 0);
|
||||||
b.iter(|| symbol.split().last());
|
b.iter(|| symbol.split().last());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_split_lowercase_long(b: &mut test::Bencher) {
|
fn symbol_split_lowercase_long(b: &mut test::Bencher) {
|
||||||
let input = "abcabcabcabc".repeat(90);
|
let input = "abcabcabcabc".repeat(90);
|
||||||
let symbol = typos::tokens::Identifier::new(&input, 0).unwrap();
|
let symbol = typos::tokens::Identifier::new_unchecked(&input, 0);
|
||||||
b.iter(|| symbol.split().last());
|
b.iter(|| symbol.split().last());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_split_mixed_short(b: &mut test::Bencher) {
|
fn symbol_split_mixed_short(b: &mut test::Bencher) {
|
||||||
let input = "abcABCAbc123";
|
let input = "abcABCAbc123";
|
||||||
let symbol = typos::tokens::Identifier::new(input, 0).unwrap();
|
let symbol = typos::tokens::Identifier::new_unchecked(input, 0);
|
||||||
b.iter(|| symbol.split().last());
|
b.iter(|| symbol.split().last());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn symbol_split_mixed_long(b: &mut test::Bencher) {
|
fn symbol_split_mixed_long(b: &mut test::Bencher) {
|
||||||
let input = "abcABCAbc123".repeat(90);
|
let input = "abcABCAbc123".repeat(90);
|
||||||
let symbol = typos::tokens::Identifier::new(&input, 0).unwrap();
|
let symbol = typos::tokens::Identifier::new_unchecked(&input, 0);
|
||||||
b.iter(|| symbol.split().last());
|
b.iter(|| symbol.split().last());
|
||||||
}
|
}
|
||||||
|
|
171
src/checks.rs
Normal file
171
src/checks.rs
Normal file
|
@ -0,0 +1,171 @@
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::Read;
|
||||||
|
|
||||||
|
use bstr::ByteSlice;
|
||||||
|
|
||||||
|
use crate::report;
|
||||||
|
use crate::tokens;
|
||||||
|
use crate::Dictionary;
|
||||||
|
|
||||||
|
pub struct CheckSettings {
|
||||||
|
check_filenames: bool,
|
||||||
|
check_files: bool,
|
||||||
|
binary: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CheckSettings {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Default::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check_filenames(&mut self, yes: bool) -> &mut Self {
|
||||||
|
self.check_filenames = yes;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check_files(&mut self, yes: bool) -> &mut Self {
|
||||||
|
self.check_files = yes;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn binary(&mut self, yes: bool) -> &mut Self {
|
||||||
|
self.binary = yes;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build<'d, 'p>(
|
||||||
|
&self,
|
||||||
|
dictionary: &'d Dictionary,
|
||||||
|
parser: &'p tokens::Parser,
|
||||||
|
) -> Checks<'d, 'p> {
|
||||||
|
Checks {
|
||||||
|
dictionary,
|
||||||
|
parser,
|
||||||
|
check_filenames: self.check_filenames,
|
||||||
|
check_files: self.check_files,
|
||||||
|
binary: self.binary,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CheckSettings {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
check_filenames: true,
|
||||||
|
check_files: true,
|
||||||
|
binary: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Checks<'d, 'p> {
|
||||||
|
dictionary: &'d Dictionary,
|
||||||
|
parser: &'p tokens::Parser,
|
||||||
|
check_filenames: bool,
|
||||||
|
check_files: bool,
|
||||||
|
binary: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'d, 'p> Checks<'d, 'p> {
|
||||||
|
pub fn check_filename(
|
||||||
|
&self,
|
||||||
|
path: &std::path::Path,
|
||||||
|
report: report::Report,
|
||||||
|
) -> Result<bool, failure::Error> {
|
||||||
|
let mut typos_found = false;
|
||||||
|
|
||||||
|
if !self.check_filenames {
|
||||||
|
return Ok(typos_found);
|
||||||
|
}
|
||||||
|
|
||||||
|
for part in path.components().filter_map(|c| c.as_os_str().to_str()) {
|
||||||
|
for ident in self.parser.parse(part) {
|
||||||
|
if let Some(correction) = self.dictionary.correct_ident(ident) {
|
||||||
|
let msg = report::FilenameCorrection {
|
||||||
|
path,
|
||||||
|
typo: ident.token(),
|
||||||
|
correction,
|
||||||
|
non_exhaustive: (),
|
||||||
|
};
|
||||||
|
report(msg.into());
|
||||||
|
typos_found = true;
|
||||||
|
}
|
||||||
|
for word in ident.split() {
|
||||||
|
if let Some(correction) = self.dictionary.correct_word(word) {
|
||||||
|
let msg = report::FilenameCorrection {
|
||||||
|
path,
|
||||||
|
typo: word.token(),
|
||||||
|
correction,
|
||||||
|
non_exhaustive: (),
|
||||||
|
};
|
||||||
|
report(msg.into());
|
||||||
|
typos_found = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(typos_found)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check_file(
|
||||||
|
&self,
|
||||||
|
path: &std::path::Path,
|
||||||
|
report: report::Report,
|
||||||
|
) -> Result<bool, failure::Error> {
|
||||||
|
let mut typos_found = false;
|
||||||
|
|
||||||
|
if !self.check_files {
|
||||||
|
return Ok(typos_found);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
File::open(path)?.read_to_end(&mut buffer)?;
|
||||||
|
if !self.binary && buffer.find_byte(b'\0').is_some() {
|
||||||
|
let msg = report::BinaryFile {
|
||||||
|
path,
|
||||||
|
non_exhaustive: (),
|
||||||
|
};
|
||||||
|
report(msg.into());
|
||||||
|
return Ok(typos_found);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (line_idx, line) in buffer.lines().enumerate() {
|
||||||
|
let line_num = line_idx + 1;
|
||||||
|
for ident in self.parser.parse_bytes(line) {
|
||||||
|
if let Some(correction) = self.dictionary.correct_ident(ident) {
|
||||||
|
let col_num = ident.offset();
|
||||||
|
let msg = report::Correction {
|
||||||
|
path,
|
||||||
|
line,
|
||||||
|
line_num,
|
||||||
|
col_num,
|
||||||
|
typo: ident.token(),
|
||||||
|
correction,
|
||||||
|
non_exhaustive: (),
|
||||||
|
};
|
||||||
|
typos_found = true;
|
||||||
|
report(msg.into());
|
||||||
|
}
|
||||||
|
for word in ident.split() {
|
||||||
|
if let Some(correction) = self.dictionary.correct_word(word) {
|
||||||
|
let col_num = word.offset();
|
||||||
|
let msg = report::Correction {
|
||||||
|
path,
|
||||||
|
line,
|
||||||
|
line_num,
|
||||||
|
col_num,
|
||||||
|
typo: word.token(),
|
||||||
|
correction,
|
||||||
|
non_exhaustive: (),
|
||||||
|
};
|
||||||
|
typos_found = true;
|
||||||
|
report(msg.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(typos_found)
|
||||||
|
}
|
||||||
|
}
|
30
src/dict.rs
30
src/dict.rs
|
@ -4,12 +4,21 @@ use unicase::UniCase;
|
||||||
|
|
||||||
use crate::tokens::Case;
|
use crate::tokens::Case;
|
||||||
|
|
||||||
#[derive(Default)]
|
pub trait Dictionary {
|
||||||
pub struct Dictionary {}
|
fn correct_ident<'s, 'w>(
|
||||||
|
&'s self,
|
||||||
|
_ident: crate::tokens::Identifier<'w>,
|
||||||
|
) -> Option<Cow<'s, str>>;
|
||||||
|
|
||||||
impl Dictionary {
|
fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option<Cow<'s, str>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct BuiltIn {}
|
||||||
|
|
||||||
|
impl BuiltIn {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Dictionary {}
|
Self {}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn correct_ident<'s, 'w>(
|
pub fn correct_ident<'s, 'w>(
|
||||||
|
@ -25,6 +34,19 @@ impl Dictionary {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Dictionary for BuiltIn {
|
||||||
|
fn correct_ident<'s, 'w>(
|
||||||
|
&'s self,
|
||||||
|
ident: crate::tokens::Identifier<'w>,
|
||||||
|
) -> Option<Cow<'s, str>> {
|
||||||
|
BuiltIn::correct_ident(self, ident)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn correct_word<'s, 'w>(&'s self, word: crate::tokens::Word<'w>) -> Option<Cow<'s, str>> {
|
||||||
|
BuiltIn::correct_word(self, word)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn map_lookup(
|
fn map_lookup(
|
||||||
map: &'static phf::Map<UniCase<&'static str>, &'static str>,
|
map: &'static phf::Map<UniCase<&'static str>, &'static str>,
|
||||||
key: &str,
|
key: &str,
|
||||||
|
|
113
src/lib.rs
113
src/lib.rs
|
@ -4,119 +4,8 @@ extern crate serde_derive;
|
||||||
mod dict;
|
mod dict;
|
||||||
mod dict_codegen;
|
mod dict_codegen;
|
||||||
|
|
||||||
|
pub mod checks;
|
||||||
pub mod report;
|
pub mod report;
|
||||||
pub mod tokens;
|
pub mod tokens;
|
||||||
|
|
||||||
pub use crate::dict::*;
|
pub use crate::dict::*;
|
||||||
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::Read;
|
|
||||||
|
|
||||||
use bstr::ByteSlice;
|
|
||||||
|
|
||||||
pub fn process_file(
|
|
||||||
path: &std::path::Path,
|
|
||||||
dictionary: &Dictionary,
|
|
||||||
check_filenames: bool,
|
|
||||||
check_files: bool,
|
|
||||||
ignore_hex: bool,
|
|
||||||
binary: bool,
|
|
||||||
report: report::Report,
|
|
||||||
) -> Result<bool, failure::Error> {
|
|
||||||
let mut typos_found = false;
|
|
||||||
|
|
||||||
if check_filenames {
|
|
||||||
for part in path.components().filter_map(|c| c.as_os_str().to_str()) {
|
|
||||||
for ident in tokens::Identifier::parse(part) {
|
|
||||||
if !ignore_hex && is_hex(ident.token()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if let Some(correction) = dictionary.correct_ident(ident) {
|
|
||||||
let msg = report::FilenameCorrection {
|
|
||||||
path,
|
|
||||||
typo: ident.token(),
|
|
||||||
correction,
|
|
||||||
non_exhaustive: (),
|
|
||||||
};
|
|
||||||
report(msg.into());
|
|
||||||
typos_found = true;
|
|
||||||
}
|
|
||||||
for word in ident.split() {
|
|
||||||
if let Some(correction) = dictionary.correct_word(word) {
|
|
||||||
let msg = report::FilenameCorrection {
|
|
||||||
path,
|
|
||||||
typo: word.token(),
|
|
||||||
correction,
|
|
||||||
non_exhaustive: (),
|
|
||||||
};
|
|
||||||
report(msg.into());
|
|
||||||
typos_found = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if check_files {
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
File::open(path)?.read_to_end(&mut buffer)?;
|
|
||||||
if !binary && buffer.find_byte(b'\0').is_some() {
|
|
||||||
let msg = report::BinaryFile {
|
|
||||||
path,
|
|
||||||
non_exhaustive: (),
|
|
||||||
};
|
|
||||||
report(msg.into());
|
|
||||||
return Ok(typos_found);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (line_idx, line) in buffer.lines().enumerate() {
|
|
||||||
let line_num = line_idx + 1;
|
|
||||||
for ident in tokens::Identifier::parse_bytes(line) {
|
|
||||||
if !ignore_hex && is_hex(ident.token()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if let Some(correction) = dictionary.correct_ident(ident) {
|
|
||||||
let col_num = ident.offset();
|
|
||||||
let msg = report::Correction {
|
|
||||||
path,
|
|
||||||
line,
|
|
||||||
line_num,
|
|
||||||
col_num,
|
|
||||||
typo: ident.token(),
|
|
||||||
correction,
|
|
||||||
non_exhaustive: (),
|
|
||||||
};
|
|
||||||
typos_found = true;
|
|
||||||
report(msg.into());
|
|
||||||
}
|
|
||||||
for word in ident.split() {
|
|
||||||
if let Some(correction) = dictionary.correct_word(word) {
|
|
||||||
let col_num = word.offset();
|
|
||||||
let msg = report::Correction {
|
|
||||||
path,
|
|
||||||
line,
|
|
||||||
line_num,
|
|
||||||
col_num,
|
|
||||||
typo: word.token(),
|
|
||||||
correction,
|
|
||||||
non_exhaustive: (),
|
|
||||||
};
|
|
||||||
typos_found = true;
|
|
||||||
report(msg.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(typos_found)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_hex(ident: &str) -> bool {
|
|
||||||
lazy_static::lazy_static! {
|
|
||||||
// `_`: number literal separator in Rust and other languages
|
|
||||||
// `'`: number literal separator in C++
|
|
||||||
static ref HEX: regex::Regex = regex::Regex::new(r#"^0[xX][0-9a-fA-F_']+$"#).unwrap();
|
|
||||||
}
|
|
||||||
HEX.is_match(ident)
|
|
||||||
}
|
|
||||||
|
|
26
src/main.rs
26
src/main.rs
|
@ -255,12 +255,23 @@ fn run() -> Result<i32, failure::Error> {
|
||||||
let mut builder = get_logging(options.verbose.log_level());
|
let mut builder = get_logging(options.verbose.log_level());
|
||||||
builder.init();
|
builder.init();
|
||||||
|
|
||||||
let dictionary = typos::Dictionary::new();
|
|
||||||
let check_filenames = options.check_filenames().unwrap_or(true);
|
let check_filenames = options.check_filenames().unwrap_or(true);
|
||||||
let check_files = options.check_files().unwrap_or(true);
|
let check_files = options.check_files().unwrap_or(true);
|
||||||
let ignore_hex = options.ignore_hex().unwrap_or(true);
|
let ignore_hex = options.ignore_hex().unwrap_or(true);
|
||||||
let binary = options.binary().unwrap_or(false);
|
let binary = options.binary().unwrap_or(false);
|
||||||
|
|
||||||
|
let dictionary = typos::BuiltIn::new();
|
||||||
|
|
||||||
|
let parser = typos::tokens::ParserBuilder::new()
|
||||||
|
.ignore_hex(ignore_hex)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let checks = typos::checks::CheckSettings::new()
|
||||||
|
.check_filenames(check_filenames)
|
||||||
|
.check_files(check_files)
|
||||||
|
.binary(binary)
|
||||||
|
.build(&dictionary, &parser);
|
||||||
|
|
||||||
let first_path = &options
|
let first_path = &options
|
||||||
.path
|
.path
|
||||||
.get(0)
|
.get(0)
|
||||||
|
@ -279,15 +290,10 @@ fn run() -> Result<i32, failure::Error> {
|
||||||
for entry in walk.build() {
|
for entry in walk.build() {
|
||||||
let entry = entry?;
|
let entry = entry?;
|
||||||
if entry.file_type().map(|t| t.is_file()).unwrap_or(true) {
|
if entry.file_type().map(|t| t.is_file()).unwrap_or(true) {
|
||||||
if typos::process_file(
|
if checks.check_filename(entry.path(), options.format.report())? {
|
||||||
entry.path(),
|
typos_found = true;
|
||||||
&dictionary,
|
}
|
||||||
check_filenames,
|
if checks.check_file(entry.path(), options.format.report())? {
|
||||||
check_files,
|
|
||||||
ignore_hex,
|
|
||||||
binary,
|
|
||||||
options.format.report(),
|
|
||||||
)? {
|
|
||||||
typos_found = true;
|
typos_found = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
192
src/tokens.rs
192
src/tokens.rs
|
@ -6,6 +6,80 @@ pub enum Case {
|
||||||
None,
|
None,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default, PartialEq, Eq, Hash)]
|
||||||
|
pub struct ParserBuilder {
|
||||||
|
ignore_hex: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ParserBuilder {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Default::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ignore_hex(&mut self, yes: bool) -> &mut Self {
|
||||||
|
self.ignore_hex = yes;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build(&self) -> Parser {
|
||||||
|
let pattern = r#"\b(\p{Alphabetic}|\d|_|')+\b"#;
|
||||||
|
let words_str = regex::Regex::new(pattern).unwrap();
|
||||||
|
let words_bytes = regex::bytes::Regex::new(pattern).unwrap();
|
||||||
|
Parser {
|
||||||
|
words_str,
|
||||||
|
words_bytes,
|
||||||
|
ignore_hex: self.ignore_hex,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Parser {
|
||||||
|
words_str: regex::Regex,
|
||||||
|
words_bytes: regex::bytes::Regex,
|
||||||
|
ignore_hex: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Parser {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
ParserBuilder::default().build()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse<'c>(&'c self, content: &'c str) -> impl Iterator<Item = Identifier<'c>> {
|
||||||
|
let ignore_hex = self.ignore_hex;
|
||||||
|
self.words_str
|
||||||
|
.find_iter(content)
|
||||||
|
.filter(move |m| !ignore_hex || !is_hex(m.as_str().as_bytes()))
|
||||||
|
.map(|m| Identifier::new_unchecked(m.as_str(), m.start()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_bytes<'c>(&'c self, content: &'c [u8]) -> impl Iterator<Item = Identifier<'c>> {
|
||||||
|
let ignore_hex = self.ignore_hex;
|
||||||
|
self.words_bytes
|
||||||
|
.find_iter(content)
|
||||||
|
.filter(move |m| !ignore_hex || !is_hex(m.as_bytes()))
|
||||||
|
.filter_map(|m| {
|
||||||
|
let s = std::str::from_utf8(m.as_bytes()).ok();
|
||||||
|
s.map(|s| Identifier::new_unchecked(s, m.start()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Parser {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_hex(ident: &[u8]) -> bool {
|
||||||
|
lazy_static::lazy_static! {
|
||||||
|
// `_`: number literal separator in Rust and other languages
|
||||||
|
// `'`: number literal separator in C++
|
||||||
|
static ref HEX: regex::bytes::Regex = regex::bytes::Regex::new(r#"^0[xX][0-9a-fA-F_']+$"#).unwrap();
|
||||||
|
}
|
||||||
|
HEX.is_match(ident)
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
pub struct Identifier<'t> {
|
pub struct Identifier<'t> {
|
||||||
token: &'t str,
|
token: &'t str,
|
||||||
|
@ -13,54 +87,10 @@ pub struct Identifier<'t> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'t> Identifier<'t> {
|
impl<'t> Identifier<'t> {
|
||||||
pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> {
|
pub fn new_unchecked(token: &'t str, offset: usize) -> Self {
|
||||||
let mut itr = Self::parse_bytes(token.as_bytes());
|
|
||||||
let mut item = itr
|
|
||||||
.next()
|
|
||||||
.ok_or_else(|| failure::format_err!("Invalid ident (none found): {:?}", token))?;
|
|
||||||
if item.offset != 0 {
|
|
||||||
return Err(failure::format_err!(
|
|
||||||
"Invalid ident (padding found): {:?}",
|
|
||||||
token
|
|
||||||
));
|
|
||||||
}
|
|
||||||
item.offset += offset;
|
|
||||||
if itr.next().is_some() {
|
|
||||||
return Err(failure::format_err!(
|
|
||||||
"Invalid ident (contains more than one): {:?}",
|
|
||||||
token
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(item)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn new_unchecked(token: &'t str, offset: usize) -> Self {
|
|
||||||
Self { token, offset }
|
Self { token, offset }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse(content: &str) -> impl Iterator<Item = Identifier<'_>> {
|
|
||||||
lazy_static::lazy_static! {
|
|
||||||
// Getting false positives for this lint
|
|
||||||
#[allow(clippy::invalid_regex)]
|
|
||||||
static ref SPLIT: regex::Regex = regex::Regex::new(r#"\b(\p{Alphabetic}|\d|_|')+\b"#).unwrap();
|
|
||||||
}
|
|
||||||
SPLIT
|
|
||||||
.find_iter(content)
|
|
||||||
.map(|m| Identifier::new_unchecked(m.as_str(), m.start()))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn parse_bytes(content: &[u8]) -> impl Iterator<Item = Identifier<'_>> {
|
|
||||||
lazy_static::lazy_static! {
|
|
||||||
// Getting false positives for this lint
|
|
||||||
#[allow(clippy::invalid_regex)]
|
|
||||||
static ref SPLIT: regex::bytes::Regex = regex::bytes::Regex::new(r#"\b(\p{Alphabetic}|\d|_|')+\b"#).unwrap();
|
|
||||||
}
|
|
||||||
SPLIT.find_iter(content).filter_map(|m| {
|
|
||||||
let s = std::str::from_utf8(m.as_bytes()).ok();
|
|
||||||
s.map(|s| Identifier::new_unchecked(s, m.start()))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn token(&self) -> &str {
|
pub fn token(&self) -> &str {
|
||||||
self.token
|
self.token
|
||||||
}
|
}
|
||||||
|
@ -87,7 +117,6 @@ pub struct Word<'t> {
|
||||||
|
|
||||||
impl<'t> Word<'t> {
|
impl<'t> Word<'t> {
|
||||||
pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> {
|
pub fn new(token: &'t str, offset: usize) -> Result<Self, failure::Error> {
|
||||||
Identifier::new(token, offset)?;
|
|
||||||
let mut itr = split_ident(token, 0);
|
let mut itr = split_ident(token, 0);
|
||||||
let mut item = itr
|
let mut item = itr
|
||||||
.next()
|
.next()
|
||||||
|
@ -108,7 +137,7 @@ impl<'t> Word<'t> {
|
||||||
Ok(item)
|
Ok(item)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn new_unchecked(token: &'t str, case: Case, offset: usize) -> Self {
|
pub fn new_unchecked(token: &'t str, case: Case, offset: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
token,
|
token,
|
||||||
case,
|
case,
|
||||||
|
@ -251,70 +280,113 @@ mod test {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn tokenize_empty_is_empty() {
|
fn tokenize_empty_is_empty() {
|
||||||
|
let parser = Parser::new();
|
||||||
|
|
||||||
let input = "";
|
let input = "";
|
||||||
let expected: Vec<Identifier> = vec![];
|
let expected: Vec<Identifier> = vec![];
|
||||||
let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect();
|
let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
let actual: Vec<_> = Identifier::parse(input).collect();
|
let actual: Vec<_> = parser.parse(input).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn tokenize_word_is_word() {
|
fn tokenize_word_is_word() {
|
||||||
|
let parser = Parser::new();
|
||||||
|
|
||||||
let input = "word";
|
let input = "word";
|
||||||
let expected: Vec<Identifier> = vec![Identifier::new_unchecked("word", 0)];
|
let expected: Vec<Identifier> = vec![Identifier::new_unchecked("word", 0)];
|
||||||
let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect();
|
let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
let actual: Vec<_> = Identifier::parse(input).collect();
|
let actual: Vec<_> = parser.parse(input).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn tokenize_space_separated_words() {
|
fn tokenize_space_separated_words() {
|
||||||
|
let parser = Parser::new();
|
||||||
|
|
||||||
let input = "A B";
|
let input = "A B";
|
||||||
let expected: Vec<Identifier> = vec![
|
let expected: Vec<Identifier> = vec![
|
||||||
Identifier::new_unchecked("A", 0),
|
Identifier::new_unchecked("A", 0),
|
||||||
Identifier::new_unchecked("B", 2),
|
Identifier::new_unchecked("B", 2),
|
||||||
];
|
];
|
||||||
let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect();
|
let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
let actual: Vec<_> = Identifier::parse(input).collect();
|
let actual: Vec<_> = parser.parse(input).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn tokenize_dot_separated_words() {
|
fn tokenize_dot_separated_words() {
|
||||||
|
let parser = Parser::new();
|
||||||
|
|
||||||
let input = "A.B";
|
let input = "A.B";
|
||||||
let expected: Vec<Identifier> = vec![
|
let expected: Vec<Identifier> = vec![
|
||||||
Identifier::new_unchecked("A", 0),
|
Identifier::new_unchecked("A", 0),
|
||||||
Identifier::new_unchecked("B", 2),
|
Identifier::new_unchecked("B", 2),
|
||||||
];
|
];
|
||||||
let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect();
|
let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
let actual: Vec<_> = Identifier::parse(input).collect();
|
let actual: Vec<_> = parser.parse(input).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn tokenize_namespace_separated_words() {
|
fn tokenize_namespace_separated_words() {
|
||||||
|
let parser = Parser::new();
|
||||||
|
|
||||||
let input = "A::B";
|
let input = "A::B";
|
||||||
let expected: Vec<Identifier> = vec![
|
let expected: Vec<Identifier> = vec![
|
||||||
Identifier::new_unchecked("A", 0),
|
Identifier::new_unchecked("A", 0),
|
||||||
Identifier::new_unchecked("B", 3),
|
Identifier::new_unchecked("B", 3),
|
||||||
];
|
];
|
||||||
let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect();
|
let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
let actual: Vec<_> = Identifier::parse(input).collect();
|
let actual: Vec<_> = parser.parse(input).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn tokenize_underscore_doesnt_separate() {
|
fn tokenize_underscore_doesnt_separate() {
|
||||||
|
let parser = Parser::new();
|
||||||
|
|
||||||
let input = "A_B";
|
let input = "A_B";
|
||||||
let expected: Vec<Identifier> = vec![Identifier::new_unchecked("A_B", 0)];
|
let expected: Vec<Identifier> = vec![Identifier::new_unchecked("A_B", 0)];
|
||||||
let actual: Vec<_> = Identifier::parse_bytes(input.as_bytes()).collect();
|
let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
let actual: Vec<_> = Identifier::parse(input).collect();
|
let actual: Vec<_> = parser.parse(input).collect();
|
||||||
|
assert_eq!(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tokenize_ignore_hex_enabled() {
|
||||||
|
let parser = ParserBuilder::new().ignore_hex(true).build();
|
||||||
|
|
||||||
|
let input = "Hello 0xDEADBEEF World";
|
||||||
|
let expected: Vec<Identifier> = vec![
|
||||||
|
Identifier::new_unchecked("Hello", 0),
|
||||||
|
Identifier::new_unchecked("World", 17),
|
||||||
|
];
|
||||||
|
let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect();
|
||||||
|
assert_eq!(expected, actual);
|
||||||
|
let actual: Vec<_> = parser.parse(input).collect();
|
||||||
|
assert_eq!(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tokenize_ignore_hex_disabled() {
|
||||||
|
let parser = ParserBuilder::new().ignore_hex(false).build();
|
||||||
|
|
||||||
|
let input = "Hello 0xDEADBEEF World";
|
||||||
|
let expected: Vec<Identifier> = vec![
|
||||||
|
Identifier::new_unchecked("Hello", 0),
|
||||||
|
Identifier::new_unchecked("0xDEADBEEF", 6),
|
||||||
|
Identifier::new_unchecked("World", 17),
|
||||||
|
];
|
||||||
|
let actual: Vec<_> = parser.parse_bytes(input.as_bytes()).collect();
|
||||||
|
assert_eq!(expected, actual);
|
||||||
|
let actual: Vec<_> = parser.parse(input).collect();
|
||||||
assert_eq!(expected, actual);
|
assert_eq!(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,7 +447,7 @@ mod test {
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
for (input, expected) in cases.iter() {
|
for (input, expected) in cases.iter() {
|
||||||
let ident = Identifier::new(input, 0).unwrap();
|
let ident = Identifier::new_unchecked(input, 0);
|
||||||
let result: Vec<_> = ident.split().map(|w| (w.token, w.case, w.offset)).collect();
|
let result: Vec<_> = ident.split().map(|w| (w.token, w.case, w.offset)).collect();
|
||||||
assert_eq!(&result, expected);
|
assert_eq!(&result, expected);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue