mirror of
https://github.com/crate-ci/typos.git
synced 2024-11-25 10:31:02 -05:00
Merge pull request #1066 from epage/template
chore: Update from _rust/main template
This commit is contained in:
commit
ebbe2a1b19
25 changed files with 226 additions and 122 deletions
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
|
@ -35,7 +35,7 @@ jobs:
|
||||||
name: Test
|
name: Test
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: ["ubuntu-latest", "windows-latest", "macos-14"]
|
os: ["ubuntu-latest", "windows-latest", "macos-latest"]
|
||||||
rust: ["stable"]
|
rust: ["stable"]
|
||||||
continue-on-error: ${{ matrix.rust != 'stable' }}
|
continue-on-error: ${{ matrix.rust != 'stable' }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
2
.github/workflows/rust-next.yml
vendored
2
.github/workflows/rust-next.yml
vendored
|
@ -22,7 +22,7 @@ jobs:
|
||||||
name: Test
|
name: Test
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: ["ubuntu-latest", "windows-latest", "macos-latest", "macos-14"]
|
os: ["ubuntu-latest", "windows-latest", "macos-latest"]
|
||||||
rust: ["stable", "beta"]
|
rust: ["stable", "beta"]
|
||||||
include:
|
include:
|
||||||
- os: ubuntu-latest
|
- os: ubuntu-latest
|
||||||
|
|
|
@ -62,25 +62,42 @@ Auto-cleans up your change according to some rules we have like:
|
||||||
|
|
||||||
### Process
|
### Process
|
||||||
|
|
||||||
When you first post a PR, we request that the commit history get cleaned
|
|
||||||
up. We recommend avoiding this during the PR to make it easier to review how
|
|
||||||
feedback was handled. Once the commit is ready, we'll ask you to clean up the
|
|
||||||
commit history. Once you let us know this is done, we can move forward with
|
|
||||||
merging! If you are uncomfortable with these parts of git, let us know and we
|
|
||||||
can help.
|
|
||||||
|
|
||||||
We ask that all new files have the copyright header. Please update the
|
|
||||||
copyright year for files you are modifying.
|
|
||||||
|
|
||||||
As a heads up, we'll be running your PR through the following gauntlet:
|
As a heads up, we'll be running your PR through the following gauntlet:
|
||||||
- warnings turned to compile errors
|
- warnings turned to compile errors
|
||||||
- `cargo test`
|
- `cargo test`
|
||||||
- `rustfmt`
|
- `rustfmt`
|
||||||
- `clippy`
|
- `clippy`
|
||||||
- `rustdoc`
|
- `rustdoc`
|
||||||
- [`committed`](https://github.com/crate-ci/committed) to enforce [conventional commits](https://www.conventionalcommits.org)
|
- [`committed`](https://github.com/crate-ci/committed) as we use [Conventional](https://www.conventionalcommits.org) commit style
|
||||||
|
- [`typos`](https://github.com/crate-ci/typos) to check spelling
|
||||||
|
|
||||||
Check out our [CI][CI] for more information.
|
Not everything can be checked automatically though.
|
||||||
|
|
||||||
|
We request that the commit history gets cleaned up.
|
||||||
|
We ask that commits are atomic, meaning they are complete and have a single responsibility.
|
||||||
|
PRs should tell a cohesive story, with test and refactor commits that keep the
|
||||||
|
fix or feature commits simple and clear.
|
||||||
|
|
||||||
|
Specifically, we would encouage
|
||||||
|
- File renames be isolated into their own commit
|
||||||
|
- Add tests in a commit before their feature or fix, showing the current behavior.
|
||||||
|
The diff for the feature/fix commit will then show how the behavior changed,
|
||||||
|
making it clearer to reviewrs and the community and showing people that the
|
||||||
|
test is verifying the expected state.
|
||||||
|
- e.g. [clap#5520](https://github.com/clap-rs/clap/pull/5520)
|
||||||
|
|
||||||
|
Note that we are talking about ideals.
|
||||||
|
We understand having a clean history requires more advanced git skills;
|
||||||
|
feel free to ask us for help!
|
||||||
|
We might even suggest where it would work to be lax.
|
||||||
|
We also understand that editing some early commits may cause a lot of churn
|
||||||
|
with merge conflicts which can make it not worth editing all of the history.
|
||||||
|
|
||||||
|
For code organization, we recommend
|
||||||
|
- Grouping `impl` blocks next to their type (or trait)
|
||||||
|
- Grouping private items after the `pub` item that uses them.
|
||||||
|
- The intent is to help people quickly find the "relevant" details, allowing them to "dig deeper" as needed. Or put another way, the `pub` items serve as a table-of-contents.
|
||||||
|
- The exact order is fuzzy; do what makes sense
|
||||||
|
|
||||||
## Releasing
|
## Releasing
|
||||||
|
|
||||||
|
|
|
@ -6,8 +6,8 @@ default-members = ["crates/typos-cli"]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
license = "MIT OR Apache-2.0"
|
|
||||||
repository = "https://github.com/crate-ci/typos"
|
repository = "https://github.com/crate-ci/typos"
|
||||||
|
license = "MIT OR Apache-2.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.75" # MSRV
|
rust-version = "1.75" # MSRV
|
||||||
include = [
|
include = [
|
||||||
|
@ -22,7 +22,7 @@ include = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.lints.rust]
|
[workspace.lints.rust]
|
||||||
rust_2018_idioms = "warn"
|
rust_2018_idioms = { level = "warn", priority = -1 }
|
||||||
unreachable_pub = "warn"
|
unreachable_pub = "warn"
|
||||||
unsafe_op_in_unsafe_fn = "warn"
|
unsafe_op_in_unsafe_fn = "warn"
|
||||||
unused_lifetimes = "warn"
|
unused_lifetimes = "warn"
|
||||||
|
@ -63,7 +63,6 @@ let_and_return = "allow" # sometimes good to name what you are returning
|
||||||
linkedlist = "warn"
|
linkedlist = "warn"
|
||||||
lossy_float_literal = "warn"
|
lossy_float_literal = "warn"
|
||||||
macro_use_imports = "warn"
|
macro_use_imports = "warn"
|
||||||
match_wildcard_for_single_variants = "warn"
|
|
||||||
mem_forget = "warn"
|
mem_forget = "warn"
|
||||||
mutex_integer = "warn"
|
mutex_integer = "warn"
|
||||||
needless_for_each = "warn"
|
needless_for_each = "warn"
|
||||||
|
@ -77,7 +76,6 @@ rest_pat_in_fully_bound_structs = "warn"
|
||||||
same_functions_in_if_condition = "warn"
|
same_functions_in_if_condition = "warn"
|
||||||
self_named_module_files = "warn"
|
self_named_module_files = "warn"
|
||||||
semicolon_if_nothing_returned = "warn"
|
semicolon_if_nothing_returned = "warn"
|
||||||
single_match_else = "warn"
|
|
||||||
str_to_string = "warn"
|
str_to_string = "warn"
|
||||||
string_add = "warn"
|
string_add = "warn"
|
||||||
string_add_assign = "warn"
|
string_add_assign = "warn"
|
||||||
|
@ -85,6 +83,7 @@ string_lit_as_bytes = "warn"
|
||||||
string_to_string = "warn"
|
string_to_string = "warn"
|
||||||
todo = "warn"
|
todo = "warn"
|
||||||
trait_duplication_in_bounds = "warn"
|
trait_duplication_in_bounds = "warn"
|
||||||
|
uninlined_format_args = "warn"
|
||||||
verbose_file_reads = "warn"
|
verbose_file_reads = "warn"
|
||||||
wildcard_imports = "warn"
|
wildcard_imports = "warn"
|
||||||
zero_sized_map_values = "warn"
|
zero_sized_map_values = "warn"
|
||||||
|
|
|
@ -4,8 +4,8 @@ version = "0.2.8"
|
||||||
description = "Compile-time case-insensitive map"
|
description = "Compile-time case-insensitive map"
|
||||||
categories = ["development-tools", "text-processing"]
|
categories = ["development-tools", "text-processing"]
|
||||||
keywords = ["development", "spelling", "no_std"]
|
keywords = ["development", "spelling", "no_std"]
|
||||||
license.workspace = true
|
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
include.workspace = true
|
include.workspace = true
|
||||||
|
|
|
@ -13,8 +13,7 @@ pub fn generate_map<'d, W: std::io::Write, V: std::fmt::Display>(
|
||||||
|
|
||||||
writeln!(
|
writeln!(
|
||||||
file,
|
file,
|
||||||
"pub static {}: dictgen::DictTable<{}> = dictgen::DictTable {{",
|
"pub static {name}: dictgen::DictTable<{value_type}> = dictgen::DictTable {{"
|
||||||
name, value_type
|
|
||||||
)?;
|
)?;
|
||||||
writeln!(file, " keys: &[")?;
|
writeln!(file, " keys: &[")?;
|
||||||
for (key, _value) in data.iter() {
|
for (key, _value) in data.iter() {
|
||||||
|
@ -22,12 +21,12 @@ pub fn generate_map<'d, W: std::io::Write, V: std::fmt::Display>(
|
||||||
largest = std::cmp::max(largest, key.len());
|
largest = std::cmp::max(largest, key.len());
|
||||||
|
|
||||||
let key = if key.is_ascii() {
|
let key = if key.is_ascii() {
|
||||||
format!("dictgen::InsensitiveStr::Ascii({:?})", key)
|
format!("dictgen::InsensitiveStr::Ascii({key:?})")
|
||||||
} else {
|
} else {
|
||||||
format!("dictgen::InsensitiveStr::Unicode({:?})", key)
|
format!("dictgen::InsensitiveStr::Unicode({key:?})")
|
||||||
};
|
};
|
||||||
|
|
||||||
writeln!(file, " {},", key)?;
|
writeln!(file, " {key},")?;
|
||||||
}
|
}
|
||||||
if largest == 0 {
|
if largest == 0 {
|
||||||
smallest = 0;
|
smallest = 0;
|
||||||
|
@ -35,10 +34,10 @@ pub fn generate_map<'d, W: std::io::Write, V: std::fmt::Display>(
|
||||||
writeln!(file, " ],")?;
|
writeln!(file, " ],")?;
|
||||||
writeln!(file, " values: &[")?;
|
writeln!(file, " values: &[")?;
|
||||||
for (_key, value) in data.iter() {
|
for (_key, value) in data.iter() {
|
||||||
writeln!(file, " {},", value)?;
|
writeln!(file, " {value},")?;
|
||||||
}
|
}
|
||||||
writeln!(file, " ],")?;
|
writeln!(file, " ],")?;
|
||||||
writeln!(file, " range: {}..={},", smallest, largest)?;
|
writeln!(file, " range: {smallest}..={largest},")?;
|
||||||
writeln!(file, "}};")?;
|
writeln!(file, "}};")?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -13,8 +13,7 @@ pub fn generate_table<'d, W: std::io::Write, V: std::fmt::Display>(
|
||||||
|
|
||||||
writeln!(
|
writeln!(
|
||||||
file,
|
file,
|
||||||
"pub static {}: dictgen::DictTable<{}> = dictgen::DictTable {{",
|
"pub static {name}: dictgen::DictTable<{value_type}> = dictgen::DictTable {{"
|
||||||
name, value_type
|
|
||||||
)?;
|
)?;
|
||||||
writeln!(file, " keys: &[")?;
|
writeln!(file, " keys: &[")?;
|
||||||
for (key, _value) in data.iter() {
|
for (key, _value) in data.iter() {
|
||||||
|
@ -22,12 +21,12 @@ pub fn generate_table<'d, W: std::io::Write, V: std::fmt::Display>(
|
||||||
largest = std::cmp::max(largest, key.len());
|
largest = std::cmp::max(largest, key.len());
|
||||||
|
|
||||||
let key = if key.is_ascii() {
|
let key = if key.is_ascii() {
|
||||||
format!("dictgen::InsensitiveStr::Ascii({:?})", key)
|
format!("dictgen::InsensitiveStr::Ascii({key:?})")
|
||||||
} else {
|
} else {
|
||||||
format!("dictgen::InsensitiveStr::Unicode({:?})", key)
|
format!("dictgen::InsensitiveStr::Unicode({key:?})")
|
||||||
};
|
};
|
||||||
|
|
||||||
writeln!(file, " {},", key)?;
|
writeln!(file, " {key},")?;
|
||||||
}
|
}
|
||||||
if largest == 0 {
|
if largest == 0 {
|
||||||
smallest = 0;
|
smallest = 0;
|
||||||
|
@ -35,10 +34,10 @@ pub fn generate_table<'d, W: std::io::Write, V: std::fmt::Display>(
|
||||||
writeln!(file, " ],")?;
|
writeln!(file, " ],")?;
|
||||||
writeln!(file, " values: &[")?;
|
writeln!(file, " values: &[")?;
|
||||||
for (_key, value) in data.iter() {
|
for (_key, value) in data.iter() {
|
||||||
writeln!(file, " {},", value)?;
|
writeln!(file, " {value},")?;
|
||||||
}
|
}
|
||||||
writeln!(file, " ],")?;
|
writeln!(file, " ],")?;
|
||||||
writeln!(file, " range: {}..={},", smallest, largest)?;
|
writeln!(file, " range: {smallest}..={largest},")?;
|
||||||
writeln!(file, "}};")?;
|
writeln!(file, "}};")?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -86,12 +86,11 @@ mod codegen {
|
||||||
let mut root = DynRoot::new(data);
|
let mut root = DynRoot::new(data);
|
||||||
root.burst(limit);
|
root.burst(limit);
|
||||||
|
|
||||||
let unicode_table_name = format!("{}_UNICODE_TABLE", prefix);
|
let unicode_table_name = format!("{prefix}_UNICODE_TABLE");
|
||||||
|
|
||||||
writeln!(
|
writeln!(
|
||||||
file,
|
file,
|
||||||
"pub static {}_TRIE: dictgen::DictTrie<{}> = dictgen::DictTrie {{",
|
"pub static {prefix}_TRIE: dictgen::DictTrie<{value_type}> = dictgen::DictTrie {{"
|
||||||
prefix, value_type
|
|
||||||
)?;
|
)?;
|
||||||
writeln!(file, " root: &{},", gen_node_name(prefix, ""))?;
|
writeln!(file, " root: &{},", gen_node_name(prefix, ""))?;
|
||||||
writeln!(file, " unicode: &{},", &unicode_table_name)?;
|
writeln!(file, " unicode: &{},", &unicode_table_name)?;
|
||||||
|
@ -118,8 +117,7 @@ mod codegen {
|
||||||
let children_name = gen_children_name(prefix, &start);
|
let children_name = gen_children_name(prefix, &start);
|
||||||
writeln!(
|
writeln!(
|
||||||
file,
|
file,
|
||||||
"static {}: dictgen::DictTrieNode<{}> = dictgen::DictTrieNode {{",
|
"static {node_name}: dictgen::DictTrieNode<{value_type}> = dictgen::DictTrieNode {{"
|
||||||
node_name, value_type
|
|
||||||
)?;
|
)?;
|
||||||
writeln!(
|
writeln!(
|
||||||
file,
|
file,
|
||||||
|
@ -128,7 +126,7 @@ mod codegen {
|
||||||
children_name
|
children_name
|
||||||
)?;
|
)?;
|
||||||
if let Some(value) = node.value.as_ref() {
|
if let Some(value) = node.value.as_ref() {
|
||||||
writeln!(file, " value: Some({}),", value)?;
|
writeln!(file, " value: Some({value}),")?;
|
||||||
} else {
|
} else {
|
||||||
writeln!(file, " value: None,")?;
|
writeln!(file, " value: None,")?;
|
||||||
}
|
}
|
||||||
|
@ -139,13 +137,12 @@ mod codegen {
|
||||||
DynChild::Nested(n) => {
|
DynChild::Nested(n) => {
|
||||||
writeln!(
|
writeln!(
|
||||||
file,
|
file,
|
||||||
"static {}: [Option<&dictgen::DictTrieNode<{}>>; 26] = [",
|
"static {children_name}: [Option<&dictgen::DictTrieNode<{value_type}>>; 26] = [",
|
||||||
children_name, value_type,
|
|
||||||
)?;
|
)?;
|
||||||
for b in b'a'..=b'z' {
|
for b in b'a'..=b'z' {
|
||||||
if let Some(child) = n.get(&b) {
|
if let Some(child) = n.get(&b) {
|
||||||
let c = b as char;
|
let c = b as char;
|
||||||
let next_start = format!("{}{}", start, c);
|
let next_start = format!("{start}{c}");
|
||||||
writeln!(file, " Some(&{}),", gen_node_name(prefix, &next_start))?;
|
writeln!(file, " Some(&{}),", gen_node_name(prefix, &next_start))?;
|
||||||
nodes.push((next_start, child));
|
nodes.push((next_start, child));
|
||||||
} else {
|
} else {
|
||||||
|
@ -171,21 +168,21 @@ mod codegen {
|
||||||
|
|
||||||
fn gen_node_name(prefix: &str, start: &str) -> String {
|
fn gen_node_name(prefix: &str, start: &str) -> String {
|
||||||
if start.is_empty() {
|
if start.is_empty() {
|
||||||
format!("{}_NODE", prefix)
|
format!("{prefix}_NODE")
|
||||||
} else {
|
} else {
|
||||||
let mut start = start.to_owned();
|
let mut start = start.to_owned();
|
||||||
start.make_ascii_uppercase();
|
start.make_ascii_uppercase();
|
||||||
format!("{}_{}_NODE", prefix, start)
|
format!("{prefix}_{start}_NODE")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_children_name(prefix: &str, start: &str) -> String {
|
fn gen_children_name(prefix: &str, start: &str) -> String {
|
||||||
if start.is_empty() {
|
if start.is_empty() {
|
||||||
format!("{}_CHILDREN", prefix)
|
format!("{prefix}_CHILDREN")
|
||||||
} else {
|
} else {
|
||||||
let mut start = start.to_owned();
|
let mut start = start.to_owned();
|
||||||
start.make_ascii_uppercase();
|
start.make_ascii_uppercase();
|
||||||
format!("{}_{}_CHILDREN", prefix, start)
|
format!("{prefix}_{start}_CHILDREN")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,7 +209,7 @@ mod codegen {
|
||||||
let mut empty = None;
|
let mut empty = None;
|
||||||
for (key, value) in data {
|
for (key, value) in data {
|
||||||
if existing.contains(key) {
|
if existing.contains(key) {
|
||||||
panic!("Duplicate present: {}", key);
|
panic!("Duplicate present: {key}");
|
||||||
}
|
}
|
||||||
existing.insert(key);
|
existing.insert(key);
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,8 @@ description = "Source Code Spelling Correction"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
categories = ["development-tools", "text-processing"]
|
categories = ["development-tools", "text-processing"]
|
||||||
keywords = ["development", "spelling"]
|
keywords = ["development", "spelling"]
|
||||||
license.workspace = true
|
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
include.workspace = true
|
include.workspace = true
|
||||||
|
|
|
@ -89,13 +89,13 @@ fn parse_dict(raw: &str) -> Words<'_> {
|
||||||
vec![captures.get(2).unwrap().as_str()],
|
vec![captures.get(2).unwrap().as_str()],
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
eprintln!("Unknown line: {}", line);
|
eprintln!("Unknown line: {line}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bad.is_empty() {
|
if !bad.is_empty() {
|
||||||
panic!("Failed parsing; found extra words: {:#?}", bad);
|
panic!("Failed parsing; found extra words: {bad:#?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
Words {
|
Words {
|
||||||
|
|
|
@ -5,8 +5,8 @@ description = "Source Code Spelling Correction"
|
||||||
readme = "../../README.md"
|
readme = "../../README.md"
|
||||||
categories = ["development-tools", "text-processing"]
|
categories = ["development-tools", "text-processing"]
|
||||||
keywords = ["development", "spelling"]
|
keywords = ["development", "spelling"]
|
||||||
license.workspace = true
|
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
include.workspace = true
|
include.workspace = true
|
||||||
|
|
|
@ -244,7 +244,7 @@ fn run_checks(args: &args::Args) -> proc_exit::ExitResult {
|
||||||
let mut overrides = ignore::overrides::OverrideBuilder::new(".");
|
let mut overrides = ignore::overrides::OverrideBuilder::new(".");
|
||||||
for pattern in walk_policy.extend_exclude.iter() {
|
for pattern in walk_policy.extend_exclude.iter() {
|
||||||
overrides
|
overrides
|
||||||
.add(&format!("!{}", pattern))
|
.add(&format!("!{pattern}"))
|
||||||
.with_code(proc_exit::sysexits::CONFIG_ERR)?;
|
.with_code(proc_exit::sysexits::CONFIG_ERR)?;
|
||||||
}
|
}
|
||||||
let overrides = overrides
|
let overrides = overrides
|
||||||
|
|
|
@ -153,7 +153,7 @@ fn print_brief_correction(msg: &Typo<'_>) -> Result<(), std::io::Error> {
|
||||||
context_display(&msg.context),
|
context_display(&msg.context),
|
||||||
msg.typo,
|
msg.typo,
|
||||||
itertools::join(
|
itertools::join(
|
||||||
corrections.iter().map(|s| format!("`{good}{}{reset}`", s)),
|
corrections.iter().map(|s| format!("`{good}{s}{reset}`")),
|
||||||
", "
|
", "
|
||||||
)
|
)
|
||||||
)?;
|
)?;
|
||||||
|
@ -192,7 +192,7 @@ fn print_long_correction(msg: &Typo<'_>) -> Result<(), std::io::Error> {
|
||||||
"{error}error{reset}: `{error}{}{reset}` should be {}",
|
"{error}error{reset}: `{error}{}{reset}` should be {}",
|
||||||
msg.typo,
|
msg.typo,
|
||||||
itertools::join(
|
itertools::join(
|
||||||
corrections.iter().map(|s| format!("`{good}{}{reset}`", s)),
|
corrections.iter().map(|s| format!("`{good}{s}{reset}`")),
|
||||||
", "
|
", "
|
||||||
)
|
)
|
||||||
)?;
|
)?;
|
||||||
|
@ -305,7 +305,7 @@ mod tests {
|
||||||
];
|
];
|
||||||
for (i, ch) in latin_cyrillic_chars.iter().enumerate() {
|
for (i, ch) in latin_cyrillic_chars.iter().enumerate() {
|
||||||
let width = calculate_visible_column_width(ch);
|
let width = calculate_visible_column_width(ch);
|
||||||
assert_eq!(1, width, "latin_cyrillic[{}]: {}", i, ch,);
|
assert_eq!(1, width, "latin_cyrillic[{i}]: {ch}",);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -319,7 +319,7 @@ mod tests {
|
||||||
];
|
];
|
||||||
for (i, ch) in cjk_chars.iter().enumerate() {
|
for (i, ch) in cjk_chars.iter().enumerate() {
|
||||||
let width = calculate_visible_column_width(ch);
|
let width = calculate_visible_column_width(ch);
|
||||||
assert_eq!(2, width, "cjk[{}]: {}", i, ch);
|
assert_eq!(2, width, "cjk[{i}]: {ch}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -340,7 +340,7 @@ mod tests {
|
||||||
];
|
];
|
||||||
for (i, ch) in simple_emojis.iter().enumerate() {
|
for (i, ch) in simple_emojis.iter().enumerate() {
|
||||||
let width = calculate_visible_column_width(ch);
|
let width = calculate_visible_column_width(ch);
|
||||||
assert_eq!(2, width, "emoji[{}]: {}", i, ch);
|
assert_eq!(2, width, "emoji[{i}]: {ch}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -352,7 +352,7 @@ mod tests {
|
||||||
];
|
];
|
||||||
for (i, ch) in zwj_sequences.iter().enumerate() {
|
for (i, ch) in zwj_sequences.iter().enumerate() {
|
||||||
let width = calculate_visible_column_width(ch);
|
let width = calculate_visible_column_width(ch);
|
||||||
assert_eq!(2, width, "zwj[{}]: {}", i, ch);
|
assert_eq!(2, width, "zwj[{i}]: {ch}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -237,7 +237,7 @@ impl FileChecker for DiffTypos {
|
||||||
let stdout = std::io::stdout();
|
let stdout = std::io::stdout();
|
||||||
let mut handle = stdout.lock();
|
let mut handle = stdout.lock();
|
||||||
for line in diff {
|
for line in diff {
|
||||||
write!(handle, "{}", line)?;
|
write!(handle, "{line}")?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,8 @@ description = "Source Code Spelling Correction"
|
||||||
readme = "../../README.md"
|
readme = "../../README.md"
|
||||||
categories = ["development-tools", "text-processing"]
|
categories = ["development-tools", "text-processing"]
|
||||||
keywords = ["development", "spelling"]
|
keywords = ["development", "spelling"]
|
||||||
license.workspace = true
|
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
include.workspace = true
|
include.workspace = true
|
||||||
|
|
|
@ -36,7 +36,7 @@ fn generate<W: std::io::Write>(file: &mut W, prefix: &str, dict: &[u8]) {
|
||||||
let key = record_fields.next().unwrap();
|
let key = record_fields.next().unwrap();
|
||||||
let value = format!(
|
let value = format!(
|
||||||
"&[{}]",
|
"&[{}]",
|
||||||
itertools::join(record_fields.map(|field| format!(r#""{}""#, field)), ", ")
|
itertools::join(record_fields.map(|field| format!(r#""{field}""#)), ", ")
|
||||||
);
|
);
|
||||||
(key, value)
|
(key, value)
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -93,13 +93,13 @@ fn process<S: Into<String>>(
|
||||||
.filter(|(typo, _)| {
|
.filter(|(typo, _)| {
|
||||||
let is_disallowed = varcon_words.contains(&UniCase::new(typo));
|
let is_disallowed = varcon_words.contains(&UniCase::new(typo));
|
||||||
if is_disallowed {
|
if is_disallowed {
|
||||||
eprintln!("{:?} is disallowed; in varcon", typo);
|
eprintln!("{typo:?} is disallowed; in varcon");
|
||||||
}
|
}
|
||||||
!is_disallowed
|
!is_disallowed
|
||||||
})
|
})
|
||||||
.filter(|(typo, _)| {
|
.filter(|(typo, _)| {
|
||||||
if let Some(reason) = allowed_words.get(typo.as_ref()) {
|
if let Some(reason) = allowed_words.get(typo.as_ref()) {
|
||||||
eprintln!("{:?} is disallowed; {}", typo, reason);
|
eprintln!("{typo:?} is disallowed; {reason}");
|
||||||
false
|
false
|
||||||
} else {
|
} else {
|
||||||
true
|
true
|
||||||
|
|
|
@ -5,8 +5,8 @@ description = "Source Code Spelling Correction"
|
||||||
readme = "../../README.md"
|
readme = "../../README.md"
|
||||||
categories = ["development-tools", "text-processing"]
|
categories = ["development-tools", "text-processing"]
|
||||||
keywords = ["development", "spelling"]
|
keywords = ["development", "spelling"]
|
||||||
license.workspace = true
|
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
include.workspace = true
|
include.workspace = true
|
||||||
|
|
|
@ -52,9 +52,7 @@ fn generate_variations<W: Write>(file: &mut W) {
|
||||||
file,
|
file,
|
||||||
" {}",
|
" {}",
|
||||||
itertools::join(
|
itertools::join(
|
||||||
CATEGORIES
|
CATEGORIES.iter().map(|c| format!("crate::Category::{c:?}")),
|
||||||
.iter()
|
|
||||||
.map(|c| format!("crate::Category::{:?}", c)),
|
|
||||||
" | "
|
" | "
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -71,8 +69,7 @@ fn generate_variations<W: Write>(file: &mut W) {
|
||||||
for (index, category) in CATEGORIES.iter().enumerate() {
|
for (index, category) in CATEGORIES.iter().enumerate() {
|
||||||
writeln!(
|
writeln!(
|
||||||
file,
|
file,
|
||||||
" crate::Category::{:?} => options[{}],",
|
" crate::Category::{category:?} => options[{index}],"
|
||||||
category, index
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
@ -108,7 +105,7 @@ fn generate_variations<W: Write>(file: &mut W) {
|
||||||
|
|
||||||
let no_invalid = entry_sets.values().all(|data| !is_always_invalid(data));
|
let no_invalid = entry_sets.values().all(|data| !is_always_invalid(data));
|
||||||
writeln!(file).unwrap();
|
writeln!(file).unwrap();
|
||||||
writeln!(file, "pub const NO_INVALID: bool = {:?};", no_invalid,).unwrap();
|
writeln!(file, "pub const NO_INVALID: bool = {no_invalid:?};",).unwrap();
|
||||||
|
|
||||||
writeln!(file).unwrap();
|
writeln!(file).unwrap();
|
||||||
for (symbol, entry) in entries.iter() {
|
for (symbol, entry) in entries.iter() {
|
||||||
|
@ -120,14 +117,14 @@ fn generate_variations<W: Write>(file: &mut W) {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_entry(file: &mut impl Write, symbol: &str, entry: &varcon_core::Entry) {
|
fn generate_entry(file: &mut impl Write, symbol: &str, entry: &varcon_core::Entry) {
|
||||||
writeln!(file, "pub(crate) static {}: VariantsMap = [", symbol).unwrap();
|
writeln!(file, "pub(crate) static {symbol}: VariantsMap = [").unwrap();
|
||||||
for category in &CATEGORIES {
|
for category in &CATEGORIES {
|
||||||
let corrections = collect_correct(entry, *category);
|
let corrections = collect_correct(entry, *category);
|
||||||
let mut corrections: Vec<_> = corrections.iter().collect();
|
let mut corrections: Vec<_> = corrections.iter().collect();
|
||||||
corrections.sort_unstable();
|
corrections.sort_unstable();
|
||||||
writeln!(file, " &[").unwrap();
|
writeln!(file, " &[").unwrap();
|
||||||
for correction in &corrections {
|
for correction in &corrections {
|
||||||
writeln!(file, " {:?},", correction).unwrap();
|
writeln!(file, " {correction:?},").unwrap();
|
||||||
}
|
}
|
||||||
writeln!(file, " ],").unwrap();
|
writeln!(file, " ],").unwrap();
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,8 +5,8 @@ description = "Source Code Spelling Correction"
|
||||||
readme = "../../README.md"
|
readme = "../../README.md"
|
||||||
categories = ["development-tools", "text-processing"]
|
categories = ["development-tools", "text-processing"]
|
||||||
keywords = ["development", "spelling"]
|
keywords = ["development", "spelling"]
|
||||||
license.workspace = true
|
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
include.workspace = true
|
include.workspace = true
|
||||||
|
|
|
@ -773,20 +773,20 @@ impl<'t> Word<'t> {
|
||||||
let mut item = itr.next().ok_or_else(|| {
|
let mut item = itr.next().ok_or_else(|| {
|
||||||
std::io::Error::new(
|
std::io::Error::new(
|
||||||
std::io::ErrorKind::InvalidInput,
|
std::io::ErrorKind::InvalidInput,
|
||||||
format!("{:?} is nothing", token),
|
format!("{token:?} is nothing"),
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
if item.offset != 0 {
|
if item.offset != 0 {
|
||||||
return Err(std::io::Error::new(
|
return Err(std::io::Error::new(
|
||||||
std::io::ErrorKind::InvalidInput,
|
std::io::ErrorKind::InvalidInput,
|
||||||
format!("{:?} has padding", token),
|
format!("{token:?} has padding"),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
item.offset += offset;
|
item.offset += offset;
|
||||||
if itr.next().is_some() {
|
if itr.next().is_some() {
|
||||||
return Err(std::io::Error::new(
|
return Err(std::io::Error::new(
|
||||||
std::io::ErrorKind::InvalidInput,
|
std::io::ErrorKind::InvalidInput,
|
||||||
format!("{:?} is multiple words", token),
|
format!("{token:?} is multiple words"),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
Ok(item)
|
Ok(item)
|
||||||
|
@ -1407,7 +1407,7 @@ mod test {
|
||||||
// A 31-character hexadecimal string: too short to be a hash.
|
// A 31-character hexadecimal string: too short to be a hash.
|
||||||
("D41D8CD98F00B204E9800998ECF8427", false),
|
("D41D8CD98F00B204E9800998ECF8427", false),
|
||||||
] {
|
] {
|
||||||
let input = format!("Hello {} World", hashlike);
|
let input = format!("Hello {hashlike} World");
|
||||||
let mut expected: Vec<Identifier<'_>> = vec![
|
let mut expected: Vec<Identifier<'_>> = vec![
|
||||||
Identifier::new_unchecked("Hello", Case::None, 0),
|
Identifier::new_unchecked("Hello", Case::None, 0),
|
||||||
Identifier::new_unchecked("World", Case::None, 7+hashlike.len()),
|
Identifier::new_unchecked("World", Case::None, 7+hashlike.len()),
|
||||||
|
|
|
@ -4,8 +4,8 @@ version = "4.0.9"
|
||||||
description = "Varcon-relevant data structures"
|
description = "Varcon-relevant data structures"
|
||||||
readme = "../../README.md"
|
readme = "../../README.md"
|
||||||
categories = ["text-processing"]
|
categories = ["text-processing"]
|
||||||
license.workspace = true
|
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
include.workspace = true
|
include.workspace = true
|
||||||
|
|
|
@ -5,8 +5,8 @@ description = "Source Code Spelling Correction"
|
||||||
readme = "../../README.md"
|
readme = "../../README.md"
|
||||||
categories = ["development-tools", "text-processing"]
|
categories = ["development-tools", "text-processing"]
|
||||||
keywords = ["development", "spelling"]
|
keywords = ["development", "spelling"]
|
||||||
license.workspace = true
|
|
||||||
repository.workspace = true
|
repository.workspace = true
|
||||||
|
license.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
include.workspace = true
|
include.workspace = true
|
||||||
|
|
|
@ -48,7 +48,7 @@ fn generate<W: std::io::Write>(file: &mut W) {
|
||||||
write!(file, " Type {{").unwrap();
|
write!(file, " Type {{").unwrap();
|
||||||
write!(file, "category: Category::{:?}, ", t.category).unwrap();
|
write!(file, "category: Category::{:?}, ", t.category).unwrap();
|
||||||
if let Some(tag) = t.tag {
|
if let Some(tag) = t.tag {
|
||||||
write!(file, "tag: Some(Tag::{:?}), ", tag).unwrap();
|
write!(file, "tag: Some(Tag::{tag:?}), ").unwrap();
|
||||||
} else {
|
} else {
|
||||||
write!(file, "tag: {:?}, ", t.tag).unwrap();
|
write!(file, "tag: {:?}, ", t.tag).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ fn generate<W: std::io::Write>(file: &mut W) {
|
||||||
}
|
}
|
||||||
writeln!(file, " ],").unwrap();
|
writeln!(file, " ],").unwrap();
|
||||||
if let Some(pos) = entry.pos {
|
if let Some(pos) = entry.pos {
|
||||||
write!(file, " pos: Some(Pos::{:?}),", pos).unwrap();
|
write!(file, " pos: Some(Pos::{pos:?}),").unwrap();
|
||||||
} else {
|
} else {
|
||||||
write!(file, " pos: {:?},", entry.pos).unwrap();
|
write!(file, " pos: {:?},", entry.pos).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@ fn generate<W: std::io::Write>(file: &mut W) {
|
||||||
writeln!(file, " ],").unwrap();
|
writeln!(file, " ],").unwrap();
|
||||||
writeln!(file, " notes: &[").unwrap();
|
writeln!(file, " notes: &[").unwrap();
|
||||||
for note in &cluster.notes {
|
for note in &cluster.notes {
|
||||||
writeln!(file, " {:?},", note).unwrap();
|
writeln!(file, " {note:?},").unwrap();
|
||||||
}
|
}
|
||||||
writeln!(file, " ],").unwrap();
|
writeln!(file, " ],").unwrap();
|
||||||
writeln!(file, " }},").unwrap();
|
writeln!(file, " }},").unwrap();
|
||||||
|
|
176
deny.toml
176
deny.toml
|
@ -4,32 +4,82 @@
|
||||||
# * allow - No warning or error will be produced, though in some cases a note
|
# * allow - No warning or error will be produced, though in some cases a note
|
||||||
# will be
|
# will be
|
||||||
|
|
||||||
|
# Root options
|
||||||
|
|
||||||
|
# The graph table configures how the dependency graph is constructed and thus
|
||||||
|
# which crates the checks are performed against
|
||||||
|
[graph]
|
||||||
|
# If 1 or more target triples (and optionally, target_features) are specified,
|
||||||
|
# only the specified targets will be checked when running `cargo deny check`.
|
||||||
|
# This means, if a particular package is only ever used as a target specific
|
||||||
|
# dependency, such as, for example, the `nix` crate only being used via the
|
||||||
|
# `target_family = "unix"` configuration, that only having windows targets in
|
||||||
|
# this list would mean the nix crate, as well as any of its exclusive
|
||||||
|
# dependencies not shared by any other crates, would be ignored, as the target
|
||||||
|
# list here is effectively saying which targets you are building for.
|
||||||
|
targets = [
|
||||||
|
# The triple can be any string, but only the target triples built in to
|
||||||
|
# rustc (as of 1.40) can be checked against actual config expressions
|
||||||
|
#"x86_64-unknown-linux-musl",
|
||||||
|
# You can also specify which target_features you promise are enabled for a
|
||||||
|
# particular target. target_features are currently not validated against
|
||||||
|
# the actual valid features supported by the target architecture.
|
||||||
|
#{ triple = "wasm32-unknown-unknown", features = ["atomics"] },
|
||||||
|
]
|
||||||
|
# When creating the dependency graph used as the source of truth when checks are
|
||||||
|
# executed, this field can be used to prune crates from the graph, removing them
|
||||||
|
# from the view of cargo-deny. This is an extremely heavy hammer, as if a crate
|
||||||
|
# is pruned from the graph, all of its dependencies will also be pruned unless
|
||||||
|
# they are connected to another crate in the graph that hasn't been pruned,
|
||||||
|
# so it should be used with care. The identifiers are [Package ID Specifications]
|
||||||
|
# (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html)
|
||||||
|
#exclude = []
|
||||||
|
# If true, metadata will be collected with `--all-features`. Note that this can't
|
||||||
|
# be toggled off if true, if you want to conditionally enable `--all-features` it
|
||||||
|
# is recommended to pass `--all-features` on the cmd line instead
|
||||||
|
all-features = false
|
||||||
|
# If true, metadata will be collected with `--no-default-features`. The same
|
||||||
|
# caveat with `all-features` applies
|
||||||
|
no-default-features = false
|
||||||
|
# If set, these feature will be enabled when collecting metadata. If `--features`
|
||||||
|
# is specified on the cmd line they will take precedence over this option.
|
||||||
|
#features = []
|
||||||
|
|
||||||
|
# The output table provides options for how/if diagnostics are outputted
|
||||||
|
[output]
|
||||||
|
# When outputting inclusion graphs in diagnostics that include features, this
|
||||||
|
# option can be used to specify the depth at which feature edges will be added.
|
||||||
|
# This option is included since the graphs can be quite large and the addition
|
||||||
|
# of features from the crate(s) to all of the graph roots can be far too verbose.
|
||||||
|
# This option can be overridden via `--feature-depth` on the cmd line
|
||||||
|
feature-depth = 1
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check advisories`
|
# This section is considered when running `cargo deny check advisories`
|
||||||
# More documentation for the advisories section can be found here:
|
# More documentation for the advisories section can be found here:
|
||||||
# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html
|
# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html
|
||||||
[advisories]
|
[advisories]
|
||||||
# The lint level for security vulnerabilities
|
# The path where the advisory databases are cloned/fetched into
|
||||||
vulnerability = "deny"
|
#db-path = "$CARGO_HOME/advisory-dbs"
|
||||||
# The lint level for unmaintained crates
|
# The url(s) of the advisory databases to use
|
||||||
unmaintained = "warn"
|
#db-urls = ["https://github.com/rustsec/advisory-db"]
|
||||||
# The lint level for crates that have been yanked from their source registry
|
|
||||||
yanked = "warn"
|
|
||||||
# The lint level for crates with security notices. Note that as of
|
|
||||||
# 2019-12-17 there are no security notice advisories in
|
|
||||||
# https://github.com/rustsec/advisory-db
|
|
||||||
notice = "warn"
|
|
||||||
# A list of advisory IDs to ignore. Note that ignored advisories will still
|
# A list of advisory IDs to ignore. Note that ignored advisories will still
|
||||||
# output a note when they are encountered.
|
# output a note when they are encountered.
|
||||||
#
|
|
||||||
# e.g. "RUSTSEC-0000-0000",
|
|
||||||
ignore = [
|
ignore = [
|
||||||
|
#"RUSTSEC-0000-0000",
|
||||||
|
#{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" },
|
||||||
|
#"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish
|
||||||
|
#{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" },
|
||||||
]
|
]
|
||||||
|
# If this is true, then cargo deny will use the git executable to fetch advisory database.
|
||||||
|
# If this is false, then it uses a built-in git library.
|
||||||
|
# Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support.
|
||||||
|
# See Git Authentication for more information about setting up git authentication.
|
||||||
|
#git-fetch-with-cli = true
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check licenses`
|
# This section is considered when running `cargo deny check licenses`
|
||||||
# More documentation for the licenses section can be found here:
|
# More documentation for the licenses section can be found here:
|
||||||
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
|
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
|
||||||
[licenses]
|
[licenses]
|
||||||
unlicensed = "deny"
|
|
||||||
# List of explicitly allowed licenses
|
# List of explicitly allowed licenses
|
||||||
# See https://spdx.org/licenses/ for list of possible licenses
|
# See https://spdx.org/licenses/ for list of possible licenses
|
||||||
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
|
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
|
||||||
|
@ -42,26 +92,8 @@ allow = [
|
||||||
"Unicode-DFS-2016",
|
"Unicode-DFS-2016",
|
||||||
"CC0-1.0",
|
"CC0-1.0",
|
||||||
"ISC",
|
"ISC",
|
||||||
|
"OpenSSL",
|
||||||
]
|
]
|
||||||
# List of explicitly disallowed licenses
|
|
||||||
# See https://spdx.org/licenses/ for list of possible licenses
|
|
||||||
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
|
|
||||||
deny = [
|
|
||||||
]
|
|
||||||
# Lint level for licenses considered copyleft
|
|
||||||
copyleft = "deny"
|
|
||||||
# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses
|
|
||||||
# * both - The license will be approved if it is both OSI-approved *AND* FSF
|
|
||||||
# * either - The license will be approved if it is either OSI-approved *OR* FSF
|
|
||||||
# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF
|
|
||||||
# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved
|
|
||||||
# * neither - This predicate is ignored and the default lint level is used
|
|
||||||
allow-osi-fsf-free = "neither"
|
|
||||||
# Lint level used when no other predicates are matched
|
|
||||||
# 1. License isn't in the allow or deny lists
|
|
||||||
# 2. License isn't copyleft
|
|
||||||
# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither"
|
|
||||||
default = "deny"
|
|
||||||
# The confidence threshold for detecting a license from license text.
|
# The confidence threshold for detecting a license from license text.
|
||||||
# The higher the value, the more closely the license text must be to the
|
# The higher the value, the more closely the license text must be to the
|
||||||
# canonical license text of a valid SPDX license file.
|
# canonical license text of a valid SPDX license file.
|
||||||
|
@ -72,7 +104,25 @@ confidence-threshold = 0.8
|
||||||
exceptions = [
|
exceptions = [
|
||||||
# Each entry is the crate and version constraint, and its specific allow
|
# Each entry is the crate and version constraint, and its specific allow
|
||||||
# list
|
# list
|
||||||
#{ allow = ["Zlib"], name = "adler32", version = "*" },
|
#{ allow = ["Zlib"], crate = "adler32" },
|
||||||
|
]
|
||||||
|
|
||||||
|
# Some crates don't have (easily) machine readable licensing information,
|
||||||
|
# adding a clarification entry for it allows you to manually specify the
|
||||||
|
# licensing information
|
||||||
|
[[licenses.clarify]]
|
||||||
|
# The package spec the clarification applies to
|
||||||
|
crate = "ring"
|
||||||
|
# The SPDX expression for the license requirements of the crate
|
||||||
|
expression = "MIT AND ISC AND OpenSSL"
|
||||||
|
# One or more files in the crate's source used as the "source of truth" for
|
||||||
|
# the license expression. If the contents match, the clarification will be used
|
||||||
|
# when running the license check, otherwise the clarification will be ignored
|
||||||
|
# and the crate will be checked normally, which may produce warnings or errors
|
||||||
|
# depending on the rest of your configuration
|
||||||
|
license-files = [
|
||||||
|
# Each entry is a crate relative path, and the (opaque) hash of its contents
|
||||||
|
{ path = "LICENSE", hash = 0xbd0eed23 }
|
||||||
]
|
]
|
||||||
|
|
||||||
[licenses.private]
|
[licenses.private]
|
||||||
|
@ -81,6 +131,12 @@ exceptions = [
|
||||||
# To see how to mark a crate as unpublished (to the official registry),
|
# To see how to mark a crate as unpublished (to the official registry),
|
||||||
# visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field.
|
# visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field.
|
||||||
ignore = true
|
ignore = true
|
||||||
|
# One or more private registries that you might publish crates to, if a crate
|
||||||
|
# is only published to private registries, and ignore is true, the crate will
|
||||||
|
# not have its license(s) checked
|
||||||
|
registries = [
|
||||||
|
#"https://sekretz.com/registry
|
||||||
|
]
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check bans`.
|
# This section is considered when running `cargo deny check bans`.
|
||||||
# More documentation about the 'bans' section can be found here:
|
# More documentation about the 'bans' section can be found here:
|
||||||
|
@ -89,7 +145,7 @@ ignore = true
|
||||||
# Lint level for when multiple versions of the same crate are detected
|
# Lint level for when multiple versions of the same crate are detected
|
||||||
multiple-versions = "warn"
|
multiple-versions = "warn"
|
||||||
# Lint level for when a crate version requirement is `*`
|
# Lint level for when a crate version requirement is `*`
|
||||||
wildcards = "warn"
|
wildcards = "allow"
|
||||||
# The graph highlighting used when creating dotgraphs for crates
|
# The graph highlighting used when creating dotgraphs for crates
|
||||||
# with multiple versions
|
# with multiple versions
|
||||||
# * lowest-version - The path to the lowest versioned duplicate is highlighted
|
# * lowest-version - The path to the lowest versioned duplicate is highlighted
|
||||||
|
@ -106,17 +162,53 @@ workspace-default-features = "allow"
|
||||||
external-default-features = "allow"
|
external-default-features = "allow"
|
||||||
# List of crates that are allowed. Use with care!
|
# List of crates that are allowed. Use with care!
|
||||||
allow = [
|
allow = [
|
||||||
#{ name = "ansi_term", version = "=0.11.0" },
|
#"ansi_term@0.11.0",
|
||||||
|
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is allowed" },
|
||||||
]
|
]
|
||||||
# List of crates to deny
|
# List of crates to deny
|
||||||
deny = [
|
deny = [
|
||||||
# Each entry the name of a crate and a version range. If version is
|
#"ansi_term@0.11.0",
|
||||||
# not specified, all versions will be matched.
|
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is banned" },
|
||||||
#{ name = "ansi_term", version = "=0.11.0" },
|
|
||||||
#
|
|
||||||
# Wrapper crates can optionally be specified to allow the crate when it
|
# Wrapper crates can optionally be specified to allow the crate when it
|
||||||
# is a direct dependency of the otherwise banned crate
|
# is a direct dependency of the otherwise banned crate
|
||||||
#{ name = "ansi_term", version = "=0.11.0", wrappers = [] },
|
#{ crate = "ansi_term@0.11.0", wrappers = ["this-crate-directly-depends-on-ansi_term"] },
|
||||||
|
]
|
||||||
|
|
||||||
|
# List of features to allow/deny
|
||||||
|
# Each entry the name of a crate and a version range. If version is
|
||||||
|
# not specified, all versions will be matched.
|
||||||
|
#[[bans.features]]
|
||||||
|
#crate = "reqwest"
|
||||||
|
# Features to not allow
|
||||||
|
#deny = ["json"]
|
||||||
|
# Features to allow
|
||||||
|
#allow = [
|
||||||
|
# "rustls",
|
||||||
|
# "__rustls",
|
||||||
|
# "__tls",
|
||||||
|
# "hyper-rustls",
|
||||||
|
# "rustls",
|
||||||
|
# "rustls-pemfile",
|
||||||
|
# "rustls-tls-webpki-roots",
|
||||||
|
# "tokio-rustls",
|
||||||
|
# "webpki-roots",
|
||||||
|
#]
|
||||||
|
# If true, the allowed features must exactly match the enabled feature set. If
|
||||||
|
# this is set there is no point setting `deny`
|
||||||
|
#exact = true
|
||||||
|
|
||||||
|
# Certain crates/versions that will be skipped when doing duplicate detection.
|
||||||
|
skip = [
|
||||||
|
#"ansi_term@0.11.0",
|
||||||
|
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason why it can't be updated/removed" },
|
||||||
|
]
|
||||||
|
# Similarly to `skip` allows you to skip certain crates during duplicate
|
||||||
|
# detection. Unlike skip, it also includes the entire tree of transitive
|
||||||
|
# dependencies starting at the specified crate, up to a certain depth, which is
|
||||||
|
# by default infinite.
|
||||||
|
skip-tree = [
|
||||||
|
#"ansi_term@0.11.0", # will be skipped along with _all_ of its direct and transitive dependencies
|
||||||
|
#{ crate = "ansi_term@0.11.0", depth = 20 },
|
||||||
]
|
]
|
||||||
|
|
||||||
# This section is considered when running `cargo deny check sources`.
|
# This section is considered when running `cargo deny check sources`.
|
||||||
|
@ -138,3 +230,7 @@ allow-git = []
|
||||||
[sources.allow-org]
|
[sources.allow-org]
|
||||||
# 1 or more github.com organizations to allow git sources for
|
# 1 or more github.com organizations to allow git sources for
|
||||||
github = []
|
github = []
|
||||||
|
# 1 or more gitlab.com organizations to allow git sources for
|
||||||
|
gitlab = []
|
||||||
|
# 1 or more bitbucket.org organizations to allow git sources for
|
||||||
|
bitbucket = []
|
||||||
|
|
Loading…
Reference in a new issue