diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml
new file mode 100644
index 00000000..f43a2e54
--- /dev/null
+++ b/.github/workflows/precommit.yml
@@ -0,0 +1,24 @@
+name: Precommit
+
+on:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ branches: [ "master" ]
+
+env:
+ CARGO_TERM_COLOR: always
+
+jobs:
+ precommit:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-node@v4
+ - name: Install Rust Nightly
+ run: rustup toolchain install nightly && rustup component add rustfmt --toolchain nightly
+ - name: Install `wasm-pack`
+ run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
+ - name: Precommit
+ run: ./precommit.sh
diff --git a/.github/workflows/rust-tests.yml b/.github/workflows/rust-tests.yml
deleted file mode 100644
index 56904f82..00000000
--- a/.github/workflows/rust-tests.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-name: Rust Tests
-
-on:
- push:
- branches: [ "master" ]
- pull_request:
- branches: [ "master" ]
-
-env:
- CARGO_TERM_COLOR: always
-
-jobs:
- build:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v3
- - name: Build
- run: cargo build --verbose
- - name: Build Release
- run: cargo build --verbose --release
- - name: Run Tests
- run: cargo test --verbose
- - name: Run Tests in Release
- run: cargo test --verbose --release
diff --git a/README.md b/README.md
index 69af2533..0d73946f 100644
--- a/README.md
+++ b/README.md
@@ -3,8 +3,9 @@
Harper
-[![Build](https://github.com/chilipepperhott/harper/actions/workflows/build.yml/badge.svg)](https://github.com/chilipepperhott/harper/actions/workflows/build.yml)
-[![Rust Tests](https://github.com/chilipepperhott/harper/actions/workflows/rust-tests.yml/badge.svg)](https://github.com/chilipepperhott/harper/actions/workflows/rust-tests.yml)
+[![Harper LS](https://github.com/chilipepperhott/harper/actions/workflows/build_harper_ls.yml/badge.svg)](https://github.com/chilipepperhott/harper/actions/workflows/build_harper_ls.yml)
+[![Web](https://github.com/chilipepperhott/harper/actions/workflows/build_web.yml/badge.svg)](https://github.com/chilipepperhott/harper/actions/workflows/build_web.yml)
+[![Precommit](https://github.com/chilipepperhott/harper/actions/workflows/precommit.yml/badge.svg)](https://github.com/chilipepperhott/harper/actions/workflows/precommit.yml)
Harper is a an English grammar checker designed to be _just right._
I created it after years of dealing with the shortcomings of the competition.
diff --git a/harper-core/src/document.rs b/harper-core/src/document.rs
index 99b3b55d..9abbd146 100644
--- a/harper-core/src/document.rs
+++ b/harper-core/src/document.rs
@@ -2,20 +2,16 @@ use std::fmt::Display;
use itertools::Itertools;
+use crate::linting::Suggestion;
use crate::parsers::{Markdown, Parser, PlainEnglish};
-use crate::TokenStringExt;
-use crate::{
- linting::Suggestion,
- span::Span,
- FatToken,
- Punctuation::{self},
- Token, TokenKind,
-};
+use crate::span::Span;
+use crate::Punctuation::{self};
+use crate::{FatToken, Token, TokenKind, TokenStringExt};
pub struct Document {
source: Vec,
tokens: Vec,
- parser: Box,
+ parser: Box
}
impl Default for Document {
@@ -33,7 +29,7 @@ impl Document {
let mut doc = Self {
source,
tokens: Vec::new(),
- parser,
+ parser
};
doc.parse();
@@ -137,7 +133,7 @@ impl Document {
pub fn get_full_string(&self) -> String {
self.get_span_content_str(Span {
start: 0,
- end: self.source.len(),
+ end: self.source.len()
})
}
@@ -165,8 +161,9 @@ impl Document {
self.parse();
}
- /// Searches for quotation marks and fills the [`Punctuation::Quote::twin_loc`] field.
- /// This is on a best effort basis.
+ /// Searches for quotation marks and fills the
+ /// [`Punctuation::Quote::twin_loc`] field. This is on a best effort
+ /// basis.
///
/// Current algorithm is very basic and could use some work.
fn match_quotes(&mut self) {
@@ -188,7 +185,8 @@ impl Document {
}
}
- /// Searches for contractions and condenses them down into single self.tokens
+ /// Searches for contractions and condenses them down into single
+ /// self.tokens
fn condense_contractions(&mut self) {
if self.tokens.len() < 3 {
return;
@@ -233,7 +231,7 @@ impl Document {
&old[0..replace_starts
.first()
.copied()
- .unwrap_or(replace_starts.len())],
+ .unwrap_or(replace_starts.len())]
);
let mut iter = replace_starts.iter().peekable();
@@ -251,7 +249,7 @@ impl Document {
&old[replace_starts
.last()
.map(|v| v + 3)
- .unwrap_or(replace_starts.len())..],
+ .unwrap_or(replace_starts.len())..]
)
}
}
@@ -321,21 +319,19 @@ fn is_sentence_terminator(token: &TokenKind) -> bool {
TokenKind::Punctuation(punct) => [
Punctuation::Period,
Punctuation::Bang,
- Punctuation::Question,
+ Punctuation::Question
]
.contains(punct),
TokenKind::Newline(_) => true,
- _ => false,
+ _ => false
}
}
#[cfg(test)]
mod tests {
use super::Document;
- use crate::{
- parsers::{Markdown, PlainEnglish},
- token::TokenStringExt,
- };
+ use crate::parsers::{Markdown, PlainEnglish};
+ use crate::token::TokenStringExt;
#[test]
fn parses_sentences_correctly() {
diff --git a/harper-core/src/lexing/email_address.rs b/harper-core/src/lexing/email_address.rs
index ca4d4f21..c50860e2 100644
--- a/harper-core/src/lexing/email_address.rs
+++ b/harper-core/src/lexing/email_address.rs
@@ -1,8 +1,7 @@
use itertools::Itertools;
-use crate::TokenKind;
-
use super::FoundToken;
+use crate::TokenKind;
pub fn lex_email_address(source: &[char]) -> Option {
// Location of the @ sign
@@ -31,7 +30,7 @@ pub fn lex_email_address(source: &[char]) -> Option {
Some(FoundToken {
next_index: at_loc + 1 + domain_part_len,
- token: TokenKind::EmailAddress,
+ token: TokenKind::EmailAddress
})
}
@@ -103,7 +102,7 @@ fn valid_unquoted_character(c: char) -> bool {
let others = [
'!', '#', '$', '%', '&', '\'', '*', '+', '-', '/', '=', '?', '^', '_', '`', '{', '|', '}',
- '~', '.',
+ '~', '.'
];
if others.contains(&c) {
@@ -136,9 +135,8 @@ fn validate_hostname(source: &[char]) -> bool {
#[cfg(test)]
mod tests {
- use crate::lexing::email_address::validate_hostname;
-
use super::{lex_email_address, validate_local_part};
+ use crate::lexing::email_address::validate_hostname;
fn example_local_parts() -> impl Iterator- > {
[
@@ -158,7 +156,7 @@ mod tests {
r#"user-"#,
r#"postmaster"#,
r#"postmaster"#,
- r#"_test"#,
+ r#"_test"#
]
.into_iter()
.map(|s| s.chars().collect())
@@ -179,12 +177,11 @@ mod tests {
r#"example.org"#,
r#"strange.example.com"#,
r#"example.org"#,
- r#"example.org"#,
- // The existing parser intentionally doesn't support IP addresses
- // It simply isn't worth the effort at the moment.
- // r#"[123.123.123.123]"#,
- // r#"[IPv6:2001:0db8:85a3:0000:0000:8a2e:0370:7334]"#,
- // r#"[IPv6:2001:0db8:85a3:0000:0000:8a2e:0370:7334]"#,
+ r#"example.org"# /* The existing parser intentionally doesn't support IP addresses
+ * It simply isn't worth the effort at the moment.
+ * r#"[123.123.123.123]"#,
+ * r#"[IPv6:2001:0db8:85a3:0000:0000:8a2e:0370:7334]"#,
+ * r#"[IPv6:2001:0db8:85a3:0000:0000:8a2e:0370:7334]"#, */
]
.into_iter()
.map(|s| s.chars().collect())
diff --git a/harper-core/src/lexing/mod.rs b/harper-core/src/lexing/mod.rs
index 017625e4..895b6511 100644
--- a/harper-core/src/lexing/mod.rs
+++ b/harper-core/src/lexing/mod.rs
@@ -1,17 +1,14 @@
mod email_address;
-use crate::token::Quote;
-
-use crate::token::{Punctuation, TokenKind};
-
use self::email_address::lex_email_address;
+use crate::token::{Punctuation, Quote, TokenKind};
#[derive(Debug)]
pub struct FoundToken {
/// The index of the character __after__ the lexed token
pub next_index: usize,
/// Token lexed
- pub token: TokenKind,
+ pub token: TokenKind
}
pub fn lex_token(source: &[char]) -> Option {
@@ -21,7 +18,7 @@ pub fn lex_token(source: &[char]) -> Option {
lex_newlines,
lex_number,
lex_email_address,
- lex_word,
+ lex_word
];
for lexer in lexers {
@@ -46,7 +43,7 @@ fn lex_word(source: &[char]) -> Option {
} else {
return Some(FoundToken {
next_index: end + 1,
- token: TokenKind::Word,
+ token: TokenKind::Word
});
}
}
@@ -78,7 +75,7 @@ pub fn lex_number(source: &[char]) -> Option {
if let Ok(n) = s.parse::() {
return Some(FoundToken {
token: TokenKind::Number(n),
- next_index: end + 1,
+ next_index: end + 1
});
}
}
@@ -92,7 +89,7 @@ fn lex_newlines(source: &[char]) -> Option {
if count > 0 {
Some(FoundToken {
token: TokenKind::Newline(count),
- next_index: count,
+ next_index: count
})
} else {
None
@@ -105,7 +102,7 @@ fn lex_spaces(source: &[char]) -> Option {
if count > 0 {
Some(FoundToken {
token: TokenKind::Space(count),
- next_index: count,
+ next_index: count
})
} else {
None
@@ -156,12 +153,12 @@ fn lex_punctuation(source: &[char]) -> Option {
'$' => Dollar,
'|' => Pipe,
'_' => Underscore,
- _ => return None,
+ _ => return None
};
Some(FoundToken {
next_index: 1,
- token: TokenKind::Punctuation(punct),
+ token: TokenKind::Punctuation(punct)
})
}
@@ -171,7 +168,7 @@ fn lex_quote(source: &[char]) -> Option {
if c == '\"' || c == '“' || c == '”' {
Some(FoundToken {
next_index: 1,
- token: TokenKind::Punctuation(Punctuation::Quote(Quote { twin_loc: None })),
+ token: TokenKind::Punctuation(Punctuation::Quote(Quote { twin_loc: None }))
})
} else {
None
diff --git a/harper-core/src/lib.rs b/harper-core/src/lib.rs
index d22c705f..5db04dd6 100644
--- a/harper-core/src/lib.rs
+++ b/harper-core/src/lib.rs
@@ -9,8 +9,7 @@ mod spell;
mod token;
pub use document::Document;
-pub use linting::LintSet;
-pub use linting::{Lint, LintKind, Linter, Suggestion};
+pub use linting::{Lint, LintKind, LintSet, Linter, Suggestion};
pub use span::Span;
pub use spell::{Dictionary, FullDictionary, MergedDictionary};
pub use token::{FatToken, Punctuation, Token, TokenKind, TokenStringExt};
diff --git a/harper-core/src/linting/lint.rs b/harper-core/src/linting/lint.rs
index fc6399ae..7f095cee 100644
--- a/harper-core/src/linting/lint.rs
+++ b/harper-core/src/linting/lint.rs
@@ -13,7 +13,7 @@ pub struct Lint {
pub message: String,
/// A numerical value for the importance of a lint.
/// Lower = more important.
- pub priority: u8,
+ pub priority: u8
}
impl Default for Lint {
@@ -23,7 +23,7 @@ impl Default for Lint {
lint_kind: Default::default(),
suggestions: Default::default(),
message: Default::default(),
- priority: 127,
+ priority: 127
}
}
}
@@ -36,12 +36,12 @@ pub enum LintKind {
Repetition,
Readability,
#[default]
- Miscellaneous,
+ Miscellaneous
}
#[derive(Debug, Clone, Serialize, Deserialize, Is)]
pub enum Suggestion {
- ReplaceWith(Vec),
+ ReplaceWith(Vec)
}
impl Display for Suggestion {
diff --git a/harper-core/src/linting/lint_set.rs b/harper-core/src/linting/lint_set.rs
index e3c38f3a..8f7e3e0c 100644
--- a/harper-core/src/linting/lint_set.rs
+++ b/harper-core/src/linting/lint_set.rs
@@ -1,16 +1,18 @@
-use crate::{Dictionary, Document, Lint};
-
-use super::{spaces::Spaces, Linter};
use paste::paste;
-use super::{
- long_sentences::LongSentences, matcher::Matcher, repeated_words::RepeatedWords,
- sentence_capitalization::SentenceCapitalization, spell_check::SpellCheck,
- unclosed_quotes::UnclosedQuotes, wrong_quotes::WrongQuotes,
-};
+use super::long_sentences::LongSentences;
+use super::matcher::Matcher;
+use super::repeated_words::RepeatedWords;
+use super::sentence_capitalization::SentenceCapitalization;
+use super::spaces::Spaces;
+use super::spell_check::SpellCheck;
+use super::unclosed_quotes::UnclosedQuotes;
+use super::wrong_quotes::WrongQuotes;
+use super::Linter;
+use crate::{Dictionary, Document, Lint};
pub struct LintSet {
- pub(super) linters: Vec>,
+ pub(super) linters: Vec>
}
impl Linter for LintSet {
@@ -30,7 +32,7 @@ impl Linter for LintSet {
impl LintSet {
pub fn new() -> Self {
Self {
- linters: Vec::new(),
+ linters: Vec::new()
}
}
diff --git a/harper-core/src/linting/long_sentences.rs b/harper-core/src/linting/long_sentences.rs
index 0cdd115c..c6ba801f 100644
--- a/harper-core/src/linting/long_sentences.rs
+++ b/harper-core/src/linting/long_sentences.rs
@@ -1,5 +1,6 @@
use super::{Lint, LintKind, Linter};
-use crate::{token::TokenStringExt, Document, Span};
+use crate::token::TokenStringExt;
+use crate::{Document, Span};
/// Detect and warn that the sentence is too long.
#[derive(Debug, Clone, Copy, Default)]
diff --git a/harper-core/src/linting/matcher.rs b/harper-core/src/linting/matcher.rs
index c98b3a4e..41d0187f 100644
--- a/harper-core/src/linting/matcher.rs
+++ b/harper-core/src/linting/matcher.rs
@@ -1,12 +1,10 @@
-use crate::{
- spell::DictWord, Document, Lint, LintKind, Linter, Punctuation, Span, Suggestion, Token,
- TokenKind,
-};
+use crate::spell::DictWord;
+use crate::{Document, Lint, LintKind, Linter, Punctuation, Span, Suggestion, Token, TokenKind};
#[derive(Debug, PartialEq, PartialOrd, Clone)]
struct PatternToken {
kind: TokenKind,
- content: Option,
+ content: Option
}
impl PatternToken {
@@ -14,12 +12,12 @@ impl PatternToken {
if token.kind.is_word() {
Self {
kind: token.kind,
- content: Some(document.get_span_content(token.span).into()),
+ content: Some(document.get_span_content(token.span).into())
}
} else {
Self {
kind: token.kind,
- content: None,
+ content: None
}
}
}
@@ -87,19 +85,19 @@ macro_rules! pt {
struct Rule {
pattern: Vec,
- replace_with: Vec,
+ replace_with: Vec
}
-/// A linter that uses a variety of curated pattern matches to find and fix common
-/// grammatical issues.
+/// A linter that uses a variety of curated pattern matches to find and fix
+/// common grammatical issues.
pub struct Matcher {
- triggers: Vec,
+ triggers: Vec
}
impl Matcher {
pub fn new() -> Self {
- // This match list needs to be automatically expanded instead of explicitly defined
- // like it is now.
+ // This match list needs to be automatically expanded instead of explicitly
+ // defined like it is now.
let mut triggers = pt! {
"my","self" => "myself",
"human","live" => "human life",
@@ -195,24 +193,24 @@ impl Matcher {
// We need to be more explicit that we are replacing with an Em dash
triggers.push(Rule {
pattern: vec![pt!(Hyphen), pt!(Hyphen), pt!(Hyphen)],
- replace_with: vecword!("—"),
+ replace_with: vecword!("—")
});
// Same goes for this En dash
triggers.push(Rule {
pattern: vec![pt!(Hyphen), pt!(Hyphen)],
- replace_with: vecword!("–"),
+ replace_with: vecword!("–")
});
// And this ellipsis
triggers.push(Rule {
pattern: vec![pt!(Period), pt!(Period), pt!(Period)],
- replace_with: vecword!("…"),
+ replace_with: vecword!("…")
});
triggers.push(Rule {
pattern: vec![pt!("L"), pt!(Period), pt!("L"), pt!(Period), pt!("M")],
- replace_with: vecword!("large language model"),
+ replace_with: vecword!("large language model")
});
triggers.push(Rule {
@@ -224,7 +222,7 @@ impl Matcher {
pt!("M"),
pt!(Period),
],
- replace_with: vecword!("large language model"),
+ replace_with: vecword!("large language model")
});
Self { triggers }
@@ -264,7 +262,7 @@ impl Linter for Matcher {
if match_tokens.len() == trigger.pattern.len() && !match_tokens.is_empty() {
let span = Span::new(
match_tokens.first().unwrap().span.start,
- match_tokens.last().unwrap().span.end,
+ match_tokens.last().unwrap().span.end
);
lints.push(Lint {
@@ -275,7 +273,7 @@ impl Linter for Matcher {
"Did you mean “{}”?",
trigger.replace_with.iter().collect::()
),
- priority: 15,
+ priority: 15
})
}
}
@@ -287,9 +285,8 @@ impl Linter for Matcher {
#[cfg(test)]
mod tests {
- use crate::{Document, Linter};
-
use super::Matcher;
+ use crate::{Document, Linter};
#[test]
fn matches_therefore() {
diff --git a/harper-core/src/linting/mod.rs b/harper-core/src/linting/mod.rs
index 5234f214..3ac7d956 100644
--- a/harper-core/src/linting/mod.rs
+++ b/harper-core/src/linting/mod.rs
@@ -20,7 +20,8 @@ pub trait Linter: Send + Sync {
#[cfg(test)]
mod tests {
- use crate::{parsers::Markdown, Document, Linter};
+ use crate::parsers::Markdown;
+ use crate::{Document, Linter};
pub fn assert_lint_count(text: &str, mut linter: impl Linter, count: usize) {
let test = Document::new(text, Box::new(Markdown));
diff --git a/harper-core/src/linting/repeated_words.rs b/harper-core/src/linting/repeated_words.rs
index eef65056..75b1e5a1 100644
--- a/harper-core/src/linting/repeated_words.rs
+++ b/harper-core/src/linting/repeated_words.rs
@@ -1,17 +1,14 @@
use hashbrown::HashSet;
-use crate::{
- spell::DictWord,
- token::{Token, TokenKind, TokenStringExt},
- Document, Span, Suggestion,
-};
-
use super::{Lint, LintKind, Linter};
+use crate::spell::DictWord;
+use crate::token::{Token, TokenKind, TokenStringExt};
+use crate::{Document, Span, Suggestion};
#[derive(Debug, Clone)]
pub struct RepeatedWords {
/// The set of words that can be considered for repetition checking.
- set: HashSet,
+ set: HashSet
}
impl RepeatedWords {
@@ -77,7 +74,7 @@ impl Linter for RepeatedWords {
let remove_start = if let Some(Token {
span,
- kind: TokenKind::Space(_),
+ kind: TokenKind::Space(_)
}) = intervening_tokens.last()
{
span.start
diff --git a/harper-core/src/linting/sentence_capitalization.rs b/harper-core/src/linting/sentence_capitalization.rs
index f08891ab..602d0dd9 100644
--- a/harper-core/src/linting/sentence_capitalization.rs
+++ b/harper-core/src/linting/sentence_capitalization.rs
@@ -1,15 +1,16 @@
use itertools::Itertools;
-use crate::{document::Document, TokenStringExt};
-
use super::lint::Suggestion;
use super::{Lint, LintKind, Linter};
+use crate::document::Document;
+use crate::TokenStringExt;
#[derive(Debug, Clone, Copy, Default)]
pub struct SentenceCapitalization;
impl Linter for SentenceCapitalization {
- /// A linter that checks to make sure the first word of each sentence is capitalized.
+ /// A linter that checks to make sure the first word of each sentence is
+ /// capitalized.
fn lint(&mut self, document: &Document) -> Vec {
let mut lints = Vec::new();
@@ -27,11 +28,11 @@ impl Linter for SentenceCapitalization {
span: first_word.span.with_len(1),
lint_kind: LintKind::Capitalization,
suggestions: vec![Suggestion::ReplaceWith(
- first_letter.to_uppercase().collect_vec(),
+ first_letter.to_uppercase().collect_vec()
)],
priority: 31,
message: "This sentence does not start with a capital letter"
- .to_string(),
+ .to_string()
})
}
}
@@ -62,7 +63,7 @@ mod tests {
assert_lint_count(
"i have complete conviction. she is guilty",
SentenceCapitalization,
- 2,
+ 2
)
}
@@ -71,7 +72,7 @@ mod tests {
assert_lint_count(
"53 is the length of the longest word.",
SentenceCapitalization,
- 0,
+ 0
);
}
@@ -80,7 +81,7 @@ mod tests {
assert_lint_count(
"[`misspelled_word`] is assumed to be quite small (n < 100). ",
SentenceCapitalization,
- 0,
+ 0
)
}
@@ -89,7 +90,7 @@ mod tests {
assert_lint_count(
"the linter should not be affected by `this` unlintable.",
SentenceCapitalization,
- 1,
+ 1
)
}
}
diff --git a/harper-core/src/linting/spaces.rs b/harper-core/src/linting/spaces.rs
index aade8169..59831943 100644
--- a/harper-core/src/linting/spaces.rs
+++ b/harper-core/src/linting/spaces.rs
@@ -1,5 +1,6 @@
use super::{Lint, Linter};
-use crate::{token::TokenStringExt, Document, LintKind, Suggestion, TokenKind};
+use crate::token::TokenStringExt;
+use crate::{Document, LintKind, Suggestion, TokenKind};
#[derive(Debug, Default)]
pub struct Spaces;
@@ -23,7 +24,7 @@ impl Linter for Spaces {
"There are {} spaces where there should be only one.",
count
),
- priority: 15,
+ priority: 15
})
}
}
diff --git a/harper-core/src/linting/spell_check.rs b/harper-core/src/linting/spell_check.rs
index a72e05c2..e0578587 100644
--- a/harper-core/src/linting/spell_check.rs
+++ b/harper-core/src/linting/spell_check.rs
@@ -1,23 +1,24 @@
use hashbrown::HashMap;
-use super::{Lint, LintKind, Linter};
-use crate::{document::Document, spell::suggest_correct_spelling, Dictionary};
-
use super::lint::Suggestion;
+use super::{Lint, LintKind, Linter};
+use crate::document::Document;
+use crate::spell::suggest_correct_spelling;
+use crate::Dictionary;
pub struct SpellCheck
where
- T: Dictionary,
+ T: Dictionary
{
dictionary: T,
- word_cache: HashMap, Vec>>,
+ word_cache: HashMap, Vec>>
}
impl SpellCheck {
pub fn new(dictionary: T) -> Self {
Self {
dictionary,
- word_cache: HashMap::new(),
+ word_cache: HashMap::new()
}
}
}
@@ -85,7 +86,7 @@ impl Linter for SpellCheck {
"Did you mean to spell “{}” this way?",
document.get_span_content_str(word.span)
),
- priority: 63,
+ priority: 63
})
}
diff --git a/harper-core/src/linting/unclosed_quotes.rs b/harper-core/src/linting/unclosed_quotes.rs
index 1d0cdf73..a7083c49 100644
--- a/harper-core/src/linting/unclosed_quotes.rs
+++ b/harper-core/src/linting/unclosed_quotes.rs
@@ -1,6 +1,7 @@
-use crate::{document::Document, token::Quote, Punctuation, TokenKind};
-
use super::{Lint, LintKind, Linter};
+use crate::document::Document;
+use crate::token::Quote;
+use crate::{Punctuation, TokenKind};
#[derive(Debug, Clone, Copy, Default)]
pub struct UnclosedQuotes;
@@ -18,7 +19,7 @@ impl Linter for UnclosedQuotes {
lint_kind: LintKind::Formatting,
suggestions: vec![],
message: "This quote has no termination.".to_string(),
- priority: 255,
+ priority: 255
})
}
}
diff --git a/harper-core/src/linting/wrong_quotes.rs b/harper-core/src/linting/wrong_quotes.rs
index 3e195d98..4b9f9fd9 100644
--- a/harper-core/src/linting/wrong_quotes.rs
+++ b/harper-core/src/linting/wrong_quotes.rs
@@ -1,6 +1,6 @@
-use crate::{document::Document, Suggestion, Token, TokenStringExt};
-
use super::{Lint, Linter};
+use crate::document::Document;
+use crate::{Suggestion, Token, TokenStringExt};
#[derive(Debug, Clone, Copy, Default)]
pub struct WrongQuotes;
diff --git a/harper-core/src/parsers/markdown.rs b/harper-core/src/parsers/markdown.rs
index 9fb39a09..b6a9abd7 100644
--- a/harper-core/src/parsers/markdown.rs
+++ b/harper-core/src/parsers/markdown.rs
@@ -1,7 +1,8 @@
use super::{Parser, PlainEnglish};
use crate::{Span, Token, TokenKind};
-/// A parser that wraps the [`PlainEnglish`] parser that allows one to parse CommonMark files.
+/// A parser that wraps the [`PlainEnglish`] parser that allows one to parse
+/// CommonMark files.
///
/// Will ignore code blocks and tables.
pub struct Markdown;
@@ -33,14 +34,14 @@ impl Parser for Markdown {
pulldown_cmark::Event::HardBreak => {
tokens.push(Token {
span: Span::new_with_len(traversed_chars, 1),
- kind: TokenKind::Newline(1),
+ kind: TokenKind::Newline(1)
});
}
pulldown_cmark::Event::Start(tag) => stack.push(tag),
pulldown_cmark::Event::End(pulldown_cmark::Tag::Paragraph)
| pulldown_cmark::Event::End(pulldown_cmark::Tag::Item) => tokens.push(Token {
span: Span::new_with_len(traversed_chars, 1),
- kind: TokenKind::Newline(1),
+ kind: TokenKind::Newline(1)
}),
pulldown_cmark::Event::End(_) => {
stack.pop();
@@ -50,7 +51,7 @@ impl Parser for Markdown {
tokens.push(Token {
span: Span::new(traversed_chars, chunk_len),
- kind: TokenKind::Unlintable,
+ kind: TokenKind::Unlintable
});
}
pulldown_cmark::Event::Text(text) => {
@@ -62,7 +63,7 @@ impl Parser for Markdown {
if matches!(tag, Tag::CodeBlock(..)) {
tokens.push(Token {
span: Span::new(traversed_chars, text.chars().count()),
- kind: TokenKind::Unlintable,
+ kind: TokenKind::Unlintable
});
continue;
}
@@ -89,7 +90,7 @@ impl Parser for Markdown {
tokens.append(&mut new_tokens);
}
- _ => (),
+ _ => ()
}
}
diff --git a/harper-core/src/parsers/mod.rs b/harper-core/src/parsers/mod.rs
index 9e608c7c..fa37b2c1 100644
--- a/harper-core/src/parsers/mod.rs
+++ b/harper-core/src/parsers/mod.rs
@@ -1,10 +1,11 @@
mod markdown;
mod plain_english;
-pub use crate::token::{Quote, Token, TokenKind, TokenStringExt};
pub use markdown::Markdown;
pub use plain_english::PlainEnglish;
+pub use crate::token::{Quote, Token, TokenKind, TokenStringExt};
+
pub trait Parser: Send + Sync {
fn parse(&mut self, source: &[char]) -> Vec;
}
@@ -15,7 +16,7 @@ pub trait StrParser {
impl StrParser for T
where
- T: Parser,
+ T: Parser
{
fn parse_str(&mut self, source: impl AsRef) -> Vec {
let source: Vec<_> = source.as_ref().chars().collect();
@@ -26,15 +27,13 @@ where
#[cfg(test)]
mod tests {
use super::{Markdown, Parser, PlainEnglish};
- use crate::{
- Punctuation,
- TokenKind::{self, *},
- };
+ use crate::Punctuation;
+ use crate::TokenKind::{self, *};
fn assert_tokens_eq(
test_str: impl AsRef,
expected: &[TokenKind],
- parser: &mut impl Parser,
+ parser: &mut impl Parser
) {
let chars: Vec<_> = test_str.as_ref().chars().collect();
let tokens = parser.parse(&chars);
@@ -71,8 +70,8 @@ mod tests {
Space(1),
Word,
Space(1),
- Word,
- ],
+ Word
+ ]
)
}
@@ -89,8 +88,8 @@ mod tests {
Word,
Space(1),
Word,
- Newline(1),
- ],
+ Newline(1)
+ ]
);
}
@@ -107,8 +106,8 @@ mod tests {
Word,
Space(1),
Word,
- Newline(1),
- ],
+ Newline(1)
+ ]
);
}
}
diff --git a/harper-core/src/parsers/plain_english.rs b/harper-core/src/parsers/plain_english.rs
index 332515dc..00f0f8f5 100644
--- a/harper-core/src/parsers/plain_english.rs
+++ b/harper-core/src/parsers/plain_english.rs
@@ -1,8 +1,6 @@
use super::Parser;
-use crate::{
- lexing::{lex_token, FoundToken},
- Span, Token,
-};
+use crate::lexing::{lex_token, FoundToken};
+use crate::{Span, Token};
/// A parser that will attempt to lex as many tokens a possible,
/// without discrimination and until the end of input.
@@ -25,7 +23,7 @@ impl Parser for PlainEnglish {
if let Some(FoundToken { token, next_index }) = lex_token(&source[cursor..]) {
tokens.push(Token {
span: Span::new(cursor, cursor + next_index),
- kind: token,
+ kind: token
});
cursor += next_index;
} else {
diff --git a/harper-core/src/span.rs b/harper-core/src/span.rs
index 1c5ebef6..0a285192 100644
--- a/harper-core/src/span.rs
+++ b/harper-core/src/span.rs
@@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default, PartialEq, Eq)]
pub struct Span {
pub start: usize,
- pub end: usize,
+ pub end: usize
}
impl Span {
@@ -17,7 +17,7 @@ impl Span {
pub fn new_with_len(start: usize, len: usize) -> Self {
Self {
start,
- end: start + len,
+ end: start + len
}
}
diff --git a/harper-core/src/spell/full_dictionary.rs b/harper-core/src/spell/full_dictionary.rs
index b7465daf..ec83e81f 100644
--- a/harper-core/src/spell/full_dictionary.rs
+++ b/harper-core/src/spell/full_dictionary.rs
@@ -2,29 +2,27 @@ use hashbrown::HashSet;
use once_cell::sync::Lazy;
use smallvec::{SmallVec, ToSmallVec};
-use super::{
- dictionary::Dictionary,
- hunspell::{parse_default_attribute_list, parse_default_word_list},
- seq_to_normalized, DictWord,
-};
+use super::dictionary::Dictionary;
+use super::hunspell::{parse_default_attribute_list, parse_default_word_list};
+use super::{seq_to_normalized, DictWord};
/// A full, fat dictionary.
/// All of the elements are stored in-memory.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct FullDictionary {
- /// Storing a separate [`Vec`] for iterations speeds up spellchecking by ~16% at the cost of
- /// additional memory.
+ /// Storing a separate [`Vec`] for iterations speeds up spellchecking by
+ /// ~16% at the cost of additional memory.
///
/// This is likely due to increased locality :shrug:.
///
/// This list is sorted by word length (i.e. the shortest words are first).
words: Vec,
/// A lookup list for each word length.
- /// Each index of this list will return the first index of [`Self::words`] that has a word
- /// whose index is that length.
+ /// Each index of this list will return the first index of [`Self::words`]
+ /// that has a word whose index is that length.
word_len_starts: Vec,
/// All English words
- word_set: HashSet,
+ word_set: HashSet
}
fn uncached_inner_new() -> FullDictionary {
@@ -37,7 +35,7 @@ fn uncached_inner_new() -> FullDictionary {
FullDictionary {
word_set: HashSet::from_iter(words.iter().cloned()),
word_len_starts: FullDictionary::create_len_starts(&mut words),
- words,
+ words
}
}
@@ -48,7 +46,7 @@ impl FullDictionary {
Self {
words: Vec::new(),
word_len_starts: Vec::new(),
- word_set: HashSet::new(),
+ word_set: HashSet::new()
}
}
@@ -72,14 +70,16 @@ impl FullDictionary {
/// Append a single word to the dictionary.
///
- /// If you are appending many words, consider using [`Self::extend_words`] instead.
+ /// If you are appending many words, consider using [`Self::extend_words`]
+ /// instead.
pub fn append_word(&mut self, word: impl AsRef<[char]>) {
self.extend_words(std::iter::once(word.as_ref()))
}
- /// Create a lookup table for finding words of a specific length in a word list.
- /// NOTE: This function will sort the original word list by its length.
- /// If the word list's order is changed after creating the lookup, it will no longer be valid.
+ /// Create a lookup table for finding words of a specific length in a word
+ /// list. NOTE: This function will sort the original word list by its
+ /// length. If the word list's order is changed after creating the
+ /// lookup, it will no longer be valid.
fn create_len_starts(words: &mut [DictWord]) -> Vec {
words.sort_by_key(|a| a.len());
let mut word_len_starts = vec![0, 0];
diff --git a/harper-core/src/spell/hunspell/attributes.rs b/harper-core/src/spell/hunspell/attributes.rs
index 95e5bbd2..b90bd677 100644
--- a/harper-core/src/spell/hunspell/attributes.rs
+++ b/harper-core/src/spell/hunspell/attributes.rs
@@ -1,18 +1,20 @@
-use itertools::Itertools;
-use smallvec::ToSmallVec;
use std::usize;
use hashbrown::HashMap;
+use itertools::Itertools;
+use smallvec::ToSmallVec;
-use crate::{spell::DictWord, Span};
-
-use super::{matcher::Matcher, word_list::MarkedWord, Error};
+use super::matcher::Matcher;
+use super::word_list::MarkedWord;
+use super::Error;
+use crate::spell::DictWord;
+use crate::Span;
#[derive(Debug, Clone)]
struct AffixReplacement {
pub remove: Vec,
pub add: Vec,
- pub condition: Matcher,
+ pub condition: Matcher
}
#[derive(Debug, Clone)]
@@ -20,19 +22,19 @@ struct Expansion {
// If not true, its a prefix
pub suffix: bool,
pub cross_product: bool,
- pub replacements: Vec,
+ pub replacements: Vec
}
#[derive(Debug)]
pub struct AttributeList {
/// Key = Affix Flag
- affixes: HashMap,
+ affixes: HashMap
}
impl AttributeList {
pub fn parse(file: &str) -> Result {
let mut output = Self {
- affixes: HashMap::default(),
+ affixes: HashMap::default()
};
for line in file.lines() {
@@ -56,7 +58,7 @@ impl AttributeList {
let suffix = match parser.parse_arg()? {
"PFX" => false,
"SFX" => true,
- _ => return Ok(()),
+ _ => return Ok(())
};
let flag = {
@@ -84,7 +86,7 @@ impl AttributeList {
let replacement = AffixReplacement {
remove,
add,
- condition,
+ condition
};
expansion.replacements.push(replacement)
@@ -97,8 +99,8 @@ impl AttributeList {
Expansion {
suffix,
cross_product,
- replacements: Vec::with_capacity(count),
- },
+ replacements: Vec::with_capacity(count)
+ }
);
}
@@ -122,7 +124,7 @@ impl AttributeList {
new_words.extend(Self::apply_replacement(
replacement,
&word.letters,
- expansion.suffix,
+ expansion.suffix
))
}
@@ -143,7 +145,7 @@ impl AttributeList {
for new_word in new_words {
cross_product_words.extend(self.expand_marked_word(MarkedWord {
letters: new_word,
- attributes: opp_attr.clone(),
+ attributes: opp_attr.clone()
})?)
}
@@ -160,7 +162,7 @@ impl AttributeList {
pub fn expand_marked_words(
&self,
- words: impl IntoIterator
- ,
+ words: impl IntoIterator
-
) -> Result, Error> {
let mut output = Vec::new();
@@ -174,7 +176,7 @@ impl AttributeList {
fn apply_replacement(
replacement: &AffixReplacement,
letters: &[char],
- suffix: bool,
+ suffix: bool
) -> Option {
if replacement.condition.len() > letters.len() {
return None;
@@ -231,7 +233,7 @@ impl AttributeList {
struct AttributeArgParser<'a> {
line: &'a str,
- cursor: usize,
+ cursor: usize
}
impl<'a> AttributeArgParser<'a> {
@@ -262,7 +264,8 @@ impl<'a> AttributeArgParser<'a> {
Ok(&self.line[abs_start..abs_end])
}
- // Grab next affix argument, returning an error if it isn't parsable as a number.
+ // Grab next affix argument, returning an error if it isn't parsable as a
+ // number.
fn parse_usize_arg(&mut self) -> Result {
self.parse_arg()?
.parse()
@@ -274,7 +277,7 @@ impl<'a> AttributeArgParser<'a> {
match self.parse_arg()? {
"Y" => Ok(true),
"N" => Ok(false),
- _ => Err(Error::ExpectedBoolean),
+ _ => Err(Error::ExpectedBoolean)
}
}
}
diff --git a/harper-core/src/spell/hunspell/error.rs b/harper-core/src/spell/hunspell/error.rs
index 8f1e113d..c2d5fcf6 100644
--- a/harper-core/src/spell/hunspell/error.rs
+++ b/harper-core/src/spell/hunspell/error.rs
@@ -13,5 +13,5 @@ pub enum Error {
#[error("Could not parse because we encountered the end of the line.")]
UnexpectedEndOfLine,
#[error("An error occured with a condition: {0}")]
- Matcher(#[from] matcher::Error),
+ Matcher(#[from] matcher::Error)
}
diff --git a/harper-core/src/spell/hunspell/matcher.rs b/harper-core/src/spell/hunspell/matcher.rs
index 2525b011..299fab9a 100644
--- a/harper-core/src/spell/hunspell/matcher.rs
+++ b/harper-core/src/spell/hunspell/matcher.rs
@@ -4,7 +4,7 @@
#[derive(Debug, Clone)]
pub struct Matcher {
/// Position-based operators.
- operators: Vec,
+ operators: Vec
}
impl Matcher {
@@ -38,7 +38,7 @@ impl Matcher {
}
}
'.' => operators.push(Operator::Any),
- _ => operators.push(Operator::Literal(c)),
+ _ => operators.push(Operator::Literal(c))
}
char_idx += 1;
@@ -71,7 +71,7 @@ enum Operator {
Literal(char),
MatchOne(Vec),
MatchNone(Vec),
- Any,
+ Any
}
impl Operator {
@@ -80,7 +80,7 @@ impl Operator {
Operator::Literal(b) => a == *b,
Operator::MatchOne(b) => b.contains(&a),
Operator::MatchNone(b) => !b.contains(&a),
- Operator::Any => true,
+ Operator::Any => true
}
}
}
@@ -88,14 +88,13 @@ impl Operator {
#[derive(Debug, Clone, Copy, thiserror::Error)]
pub enum Error {
#[error("Unmatched bracket at index: {index}")]
- UnmatchedBracket { index: usize },
+ UnmatchedBracket { index: usize }
}
#[cfg(test)]
mod tests {
- use crate::spell::hunspell::matcher::Operator;
-
use super::Matcher;
+ use crate::spell::hunspell::matcher::Operator;
#[test]
fn parses_simple() {
diff --git a/harper-core/src/spell/hunspell/mod.rs b/harper-core/src/spell/hunspell/mod.rs
index a8fb59d9..e5aa34f7 100644
--- a/harper-core/src/spell/hunspell/mod.rs
+++ b/harper-core/src/spell/hunspell/mod.rs
@@ -3,11 +3,12 @@ mod error;
mod matcher;
mod word_list;
-use self::word_list::parse_word_list;
-pub use self::word_list::MarkedWord;
pub use attributes::AttributeList;
pub use error::Error;
+use self::word_list::parse_word_list;
+pub use self::word_list::MarkedWord;
+
pub fn parse_default_word_list() -> Result, Error> {
parse_word_list(include_str!("../../../dictionary.dict"))
}
@@ -18,15 +19,15 @@ pub fn parse_default_attribute_list() -> Result {
#[cfg(test)]
mod tests {
+ use super::attributes::AttributeList;
+ use super::word_list::parse_word_list;
+ use super::{parse_default_attribute_list, parse_default_word_list};
use crate::spell::DictWord;
- use super::{
- attributes::AttributeList, parse_default_attribute_list, parse_default_word_list,
- word_list::parse_word_list,
- };
-
pub const TEST_WORD_LIST: &str = "3\nhello\ntry/B\nwork/AB";
- pub const ATTR_LIST: &str = "SET UTF-8\nTRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'\n\nREP 2\nREP f ph\nREP ph f\n\nPFX A Y 1\nPFX A 0 re .\n\nSFX B Y 2\nSFX B 0 ed [^y]\nSFX B y ied y";
+ pub const ATTR_LIST: &str =
+ "SET UTF-8\nTRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'\n\nREP 2\nREP f ph\nREP \
+ ph f\n\nPFX A Y 1\nPFX A 0 re .\n\nSFX B Y 2\nSFX B 0 ed [^y]\nSFX B y ied y";
#[test]
fn correctly_expands_test_files() {
@@ -49,7 +50,9 @@ mod tests {
fn plural_giants() {
let words = parse_word_list("1\ngiant/SM").unwrap();
let attributes = AttributeList::parse(
- "SFX S Y 4\nSFX S y ies [^aeiou]y\nSFX S 0 s [aeiou]y\nSFX S 0 es [sxzh]\nSFX S 0 s [^sxzhy]\n\nSFX M Y 1\nSFX M 0 's .",
+ "SFX S Y 4\nSFX S y ies [^aeiou]y\nSFX S 0 s \
+ [aeiou]y\nSFX S 0 es [sxzh]\nSFX S 0 s [^sxzhy]\n\nSFX \
+ M Y 1\nSFX M 0 's ."
)
.unwrap();
diff --git a/harper-core/src/spell/hunspell/word_list.rs b/harper-core/src/spell/hunspell/word_list.rs
index c3bf404e..37d9f88c 100644
--- a/harper-core/src/spell/hunspell/word_list.rs
+++ b/harper-core/src/spell/hunspell/word_list.rs
@@ -1,10 +1,9 @@
-use crate::spell::DictWord;
-
use super::Error;
+use crate::spell::DictWord;
pub struct MarkedWord {
pub letters: DictWord,
- pub attributes: Vec,
+ pub attributes: Vec
}
/// Parse a hunspell word list
@@ -25,12 +24,12 @@ pub fn parse_word_list(source: &str) -> Result, Error> {
if let Some((word, attributes)) = line.split_once('/') {
words.push(MarkedWord {
letters: word.chars().collect(),
- attributes: attributes.chars().collect(),
+ attributes: attributes.chars().collect()
})
} else {
words.push(MarkedWord {
letters: line.chars().collect(),
- attributes: Vec::new(),
+ attributes: Vec::new()
})
}
}
diff --git a/harper-core/src/spell/merged_dictionary.rs b/harper-core/src/spell/merged_dictionary.rs
index 01850660..5e851c8d 100644
--- a/harper-core/src/spell/merged_dictionary.rs
+++ b/harper-core/src/spell/merged_dictionary.rs
@@ -7,18 +7,18 @@ use super::dictionary::Dictionary;
#[derive(Clone)]
pub struct MergedDictionary
where
- T: Dictionary + Clone,
+ T: Dictionary + Clone
{
- children: Vec>,
+ children: Vec>
}
impl MergedDictionary
where
- T: Dictionary + Clone,
+ T: Dictionary + Clone
{
pub fn new() -> Self {
Self {
- children: Vec::new(),
+ children: Vec::new()
}
}
@@ -29,7 +29,7 @@ where
impl Default for MergedDictionary
where
- T: Dictionary + Clone,
+ T: Dictionary + Clone
{
fn default() -> Self {
Self::new()
@@ -38,7 +38,7 @@ where
impl Dictionary for MergedDictionary
where
- T: Dictionary + Clone,
+ T: Dictionary + Clone
{
fn contains_word(&self, word: &[char]) -> bool {
for child in &self.children {
@@ -57,7 +57,7 @@ where
Box::new(
self.children
.iter()
- .flat_map(move |c| c.words_with_len_iter(len)),
+ .flat_map(move |c| c.words_with_len_iter(len))
)
}
}
diff --git a/harper-core/src/spell/mod.rs b/harper-core/src/spell/mod.rs
index 17a611e7..c44010c1 100644
--- a/harper-core/src/spell/mod.rs
+++ b/harper-core/src/spell/mod.rs
@@ -17,12 +17,13 @@ pub type DictWord = SmallVec<[char; 6]>;
/// Suggest a correct spelling for a given misspelled word.
/// [`misspelled_word`] is assumed to be quite small (n < 100).
-/// [`max_edit_dist`] relates to an optimization that allows the search algorithm to prune large portions of the search.
+/// [`max_edit_dist`] relates to an optimization that allows the search
+/// algorithm to prune large portions of the search.
pub fn suggest_correct_spelling<'a>(
misspelled_word: &[char],
result_limit: usize,
max_edit_dist: u8,
- dictionary: &'a impl Dictionary,
+ dictionary: &'a impl Dictionary
) -> Vec<&'a [char]> {
let misspelled_word = seq_to_normalized(misspelled_word);
@@ -77,7 +78,8 @@ pub fn suggest_correct_spelling<'a>(
// Create final, ordered list of suggestions.
let mut found = Vec::with_capacity(found_dist.len());
- // Often the longest and the shortest words are the most helpful, so lets push them first.
+ // Often the longest and the shortest words are the most helpful, so lets push
+ // them first.
let minmax = found_dist.iter().position_minmax_by_key(|a| a.0.len());
if let MinMaxResult::MinMax(a, b) = minmax {
if a == b {
@@ -103,12 +105,13 @@ pub fn suggest_correct_spelling<'a>(
found
}
-/// Convenience function over [`suggest_correct_spelling`] that does conversions for you.
+/// Convenience function over [`suggest_correct_spelling`] that does conversions
+/// for you.
pub fn suggest_correct_spelling_str(
misspelled_word: impl AsRef,
result_limit: usize,
max_edit_dist: u8,
- dictionary: &FullDictionary,
+ dictionary: &FullDictionary
) -> Vec {
let chars: Vec = misspelled_word.as_ref().chars().collect();
@@ -127,7 +130,7 @@ fn edit_distance_min_alloc(
source: &[char],
target: &[char],
previous_row: &mut Vec,
- current_row: &mut Vec,
+ current_row: &mut Vec
) -> u8 {
if cfg!(debug) {
assert!(source.len() <= 255 && target.len() <= 255);
@@ -175,7 +178,7 @@ fn seq_to_normalized(seq: &[char]) -> Cow<'_, [char]> {
fn char_to_normalized(c: char) -> char {
match c {
'’' => '\'',
- _ => c,
+ _ => c
}
}
diff --git a/harper-core/src/token.rs b/harper-core/src/token.rs
index b019f0f4..c114f464 100644
--- a/harper-core/src/token.rs
+++ b/harper-core/src/token.rs
@@ -7,7 +7,7 @@ use crate::span::Span;
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Default)]
pub struct Token {
pub span: Span,
- pub kind: TokenKind,
+ pub kind: TokenKind
}
impl Token {
@@ -17,16 +17,17 @@ impl Token {
FatToken {
content,
- kind: self.kind,
+ kind: self.kind
}
}
}
-/// A [`Token`] that holds its content as a fat [`Vec`] rather than as a [`Span`].
+/// A [`Token`] that holds its content as a fat [`Vec`] rather than as a
+/// [`Span`].
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
pub struct FatToken {
pub content: Vec,
- pub kind: TokenKind,
+ pub kind: TokenKind
}
#[derive(Debug, Is, Clone, Copy, Serialize, Deserialize, PartialEq, Default, PartialOrd)]
@@ -41,9 +42,9 @@ pub enum TokenKind {
/// A sequence of "\n" newlines
Newline(usize),
EmailAddress,
- /// A special token used for things like inline code blocks that should be ignored by all
- /// linters.
- Unlintable,
+ /// A special token used for things like inline code blocks that should be
+ /// ignored by all linters.
+ Unlintable
}
impl TokenKind {
@@ -135,20 +136,20 @@ pub enum Punctuation {
/// `|`
Pipe,
/// `_`
- Underscore,
+ Underscore
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, PartialOrd)]
pub struct Quote {
/// The location of the matching quote, if it exists.
- pub twin_loc: Option,
+ pub twin_loc: Option
}
pub trait TokenStringExt {
fn first_word(&self) -> Option;
/// Grabs the first word in the sentence.
- /// Will also return [`None`] if there is an unlintable token in the position of the first
- /// word.
+ /// Will also return [`None`] if there is an unlintable token in the
+ /// position of the first word.
fn first_sentence_word(&self) -> Option;
/// Grabs the first token that isn't whitespace from the token string.
fn first_non_whitespace(&self) -> Option;
@@ -158,8 +159,8 @@ pub trait TokenStringExt {
fn iter_spaces(&self) -> impl Iterator
- + '_;
fn iter_apostrophe_indices(&self) -> impl Iterator
- + '_;
fn iter_apostrophes(&self) -> impl Iterator
- + '_;
- /// Grab the span that represents the beginning of the first element and the end of the last
- /// element.
+ /// Grab the span that represents the beginning of the first element and the
+ /// end of the last element.
fn span(&self) -> Option;
fn iter_quote_indices(&self) -> impl Iterator
- + '_;
diff --git a/harper-ls/src/backend.rs b/harper-ls/src/backend.rs
index ba2c9763..57b29114 100644
--- a/harper-ls/src/backend.rs
+++ b/harper-ls/src/backend.rs
@@ -1,39 +1,52 @@
-use std::{collections::HashMap, path::PathBuf, sync::Arc};
+use std::collections::HashMap;
+use std::path::PathBuf;
+use std::sync::Arc;
-use harper_core::{
- parsers::Markdown, Dictionary, Document, FullDictionary, LintSet, Linter, MergedDictionary,
-};
+use harper_core::parsers::Markdown;
+use harper_core::{Dictionary, Document, FullDictionary, LintSet, Linter, MergedDictionary};
use itertools::Itertools;
use serde_json::Value;
use tokio::sync::Mutex;
-use tower_lsp::{
- jsonrpc::Result,
- lsp_types::{
- notification::{PublishDiagnostics, ShowMessage},
- CodeActionOrCommand, CodeActionParams, CodeActionProviderCapability, CodeActionResponse,
- Diagnostic, DidChangeTextDocumentParams, DidCloseTextDocumentParams,
- DidOpenTextDocumentParams, DidSaveTextDocumentParams, ExecuteCommandParams,
- InitializeParams, InitializeResult, InitializedParams, MessageType,
- PublishDiagnosticsParams, Range, ServerCapabilities, ShowMessageParams,
- TextDocumentSyncCapability, TextDocumentSyncKind, TextDocumentSyncOptions,
- TextDocumentSyncSaveOptions, Url,
- },
- Client, LanguageServer,
+use tower_lsp::jsonrpc::Result;
+use tower_lsp::lsp_types::notification::{PublishDiagnostics, ShowMessage};
+use tower_lsp::lsp_types::{
+ CodeActionOrCommand,
+ CodeActionParams,
+ CodeActionProviderCapability,
+ CodeActionResponse,
+ Diagnostic,
+ DidChangeTextDocumentParams,
+ DidCloseTextDocumentParams,
+ DidOpenTextDocumentParams,
+ DidSaveTextDocumentParams,
+ ExecuteCommandParams,
+ InitializeParams,
+ InitializeResult,
+ InitializedParams,
+ MessageType,
+ PublishDiagnosticsParams,
+ Range,
+ ServerCapabilities,
+ ShowMessageParams,
+ TextDocumentSyncCapability,
+ TextDocumentSyncKind,
+ TextDocumentSyncOptions,
+ TextDocumentSyncSaveOptions,
+ Url
};
+use tower_lsp::{Client, LanguageServer};
-use crate::{
- config::Config,
- diagnostics::{lint_to_code_actions, lints_to_diagnostics},
- dictionary_io::{load_dict, save_dict},
- pos_conv::range_to_span,
- tree_sitter_parser::TreeSitterParser,
-};
+use crate::config::Config;
+use crate::diagnostics::{lint_to_code_actions, lints_to_diagnostics};
+use crate::dictionary_io::{load_dict, save_dict};
+use crate::pos_conv::range_to_span;
+use crate::tree_sitter_parser::TreeSitterParser;
#[derive(Default)]
struct DocumentState {
document: Document,
ident_dict: Arc,
- linter: LintSet,
+ linter: LintSet
}
/// Deallocate
@@ -41,7 +54,7 @@ pub struct Backend {
client: Client,
static_dictionary: Arc,
config: Config,
- doc_state: Mutex>,
+ doc_state: Mutex>
}
impl Backend {
@@ -52,11 +65,12 @@ impl Backend {
client,
static_dictionary: dictionary.into(),
doc_state: Mutex::new(HashMap::new()),
- config,
+ config
}
}
- /// Rewrites a path to a filename using the same conventions as [Neovim's undo-files](https://neovim.io/doc/user/options.html#'undodir').
+ /// Rewrites a path to a filename using the same conventions as
+ /// [Neovim's undo-files](https://neovim.io/doc/user/options.html#'undodir').
fn file_dict_name(url: &Url) -> PathBuf {
let mut rewritten = String::new();
@@ -76,7 +90,7 @@ impl Backend {
async fn load_file_dictionary(&self, url: &Url) -> FullDictionary {
match load_dict(self.get_file_dict_path(url)).await {
Ok(dict) => dict,
- Err(_) => FullDictionary::new(),
+ Err(_) => FullDictionary::new()
}
}
@@ -87,7 +101,7 @@ impl Backend {
async fn load_user_dictionary(&self) -> FullDictionary {
match load_dict(&self.config.user_dict_path).await {
Ok(dict) => dict,
- Err(_) => FullDictionary::new(),
+ Err(_) => FullDictionary::new()
}
}
@@ -105,7 +119,7 @@ impl Backend {
async fn generate_file_dictionary(
&self,
- url: &Url,
+ url: &Url
) -> anyhow::Result> {
let (global_dictionary, file_dictionary) = tokio::join!(
self.generate_global_dictionary(),
@@ -171,7 +185,7 @@ impl Backend {
async fn generate_code_actions(
&self,
url: &Url,
- range: Range,
+ range: Range
) -> Result> {
let mut doc_states = self.doc_state.lock().await;
let Some(doc_state) = doc_states.get_mut(url) else {
@@ -213,7 +227,7 @@ impl Backend {
client
.send_notification::(ShowMessageParams {
typ: MessageType::INFO,
- message: "Linting...".to_string(),
+ message: "Linting...".to_string()
})
.await
});
@@ -223,7 +237,7 @@ impl Backend {
let result = PublishDiagnosticsParams {
uri: url.clone(),
diagnostics,
- version: None,
+ version: None
};
self.client
@@ -245,11 +259,11 @@ impl LanguageServer for Backend {
change: Some(TextDocumentSyncKind::FULL),
will_save: None,
will_save_wait_until: None,
- save: Some(TextDocumentSyncSaveOptions::Supported(true)),
- },
+ save: Some(TextDocumentSyncSaveOptions::Supported(true))
+ }
)),
..Default::default()
- },
+ }
})
}
@@ -268,8 +282,10 @@ impl LanguageServer for Backend {
.log_message(MessageType::INFO, "File saved!")
.await;
- self.update_document_from_file(¶ms.text_document.uri)
+ let _ = self
+ .update_document_from_file(¶ms.text_document.uri)
.await;
+
self.publish_diagnostics(¶ms.text_document.uri).await;
}
@@ -278,7 +294,8 @@ impl LanguageServer for Backend {
.log_message(MessageType::INFO, "File opened!")
.await;
- self.update_document_from_file(¶ms.text_document.uri)
+ let _ = self
+ .update_document_from_file(¶ms.text_document.uri)
.await;
self.publish_diagnostics(¶ms.text_document.uri).await;
@@ -339,7 +356,7 @@ impl LanguageServer for Backend {
Ok(None)
}
- _ => Ok(None),
+ _ => Ok(None)
}
}
diff --git a/harper-ls/src/config.rs b/harper-ls/src/config.rs
index 2cfe0900..18e47516 100644
--- a/harper-ls/src/config.rs
+++ b/harper-ls/src/config.rs
@@ -5,7 +5,7 @@ use dirs::{config_dir, data_local_dir};
#[derive(Debug, Clone)]
pub struct Config {
pub user_dict_path: PathBuf,
- pub file_dict_path: PathBuf,
+ pub file_dict_path: PathBuf
}
impl Default for Config {
@@ -14,7 +14,7 @@ impl Default for Config {
user_dict_path: config_dir().unwrap().join("harper-ls/dictionary.txt"),
file_dict_path: data_local_dir()
.unwrap()
- .join("harper-ls/file_dictionaries/"),
+ .join("harper-ls/file_dictionaries/")
}
}
}
diff --git a/harper-ls/src/diagnostics.rs b/harper-ls/src/diagnostics.rs
index c46535ca..d121766e 100644
--- a/harper-ls/src/diagnostics.rs
+++ b/harper-ls/src/diagnostics.rs
@@ -1,8 +1,15 @@
-use harper_core::{Lint, Suggestion};
use std::collections::HashMap;
+
+use harper_core::{Lint, Suggestion};
use tower_lsp::lsp_types::{
- CodeAction, CodeActionKind, CodeActionOrCommand, Command, Diagnostic, TextEdit, Url,
- WorkspaceEdit,
+ CodeAction,
+ CodeActionKind,
+ CodeActionOrCommand,
+ Command,
+ Diagnostic,
+ TextEdit,
+ Url,
+ WorkspaceEdit
};
use crate::pos_conv::span_to_range;
@@ -17,7 +24,7 @@ pub fn lints_to_diagnostics(source: &[char], lints: &[Lint]) -> Vec
pub fn lint_to_code_actions<'a>(
lint: &'a Lint,
url: &'a Url,
- source: &'a [char],
+ source: &'a [char]
) -> Vec {
let mut results = Vec::new();
@@ -38,19 +45,19 @@ pub fn lint_to_code_actions<'a>(
vec![TextEdit {
range,
- new_text: with.iter().collect(),
- }],
+ new_text: with.iter().collect()
+ }]
)])),
document_changes: None,
- change_annotations: None,
+ change_annotations: None
}),
command: None,
is_preferred: None,
disabled: None,
- data: None,
+ data: None
})
})
- .map(CodeActionOrCommand::CodeAction),
+ .map(CodeActionOrCommand::CodeAction)
);
if lint.lint_kind.is_spelling() {
@@ -59,13 +66,13 @@ pub fn lint_to_code_actions<'a>(
results.push(CodeActionOrCommand::Command(Command::new(
format!("Add \"{}\" to the global dictionary.", orig),
"AddToUserDict".to_string(),
- Some(vec![orig.clone().into()]),
+ Some(vec![orig.clone().into()])
)));
results.push(CodeActionOrCommand::Command(Command::new(
format!("Add \"{}\" to the file dictionary.", orig),
"AddToFileDict".to_string(),
- Some(vec![orig.into(), url.to_string().into()]),
+ Some(vec![orig.into(), url.to_string().into()])
)))
}
@@ -84,6 +91,6 @@ fn lint_to_diagnostic(lint: &Lint, source: &[char]) -> Diagnostic {
message: lint.message.clone(),
related_information: None,
tags: None,
- data: None,
+ data: None
}
}
diff --git a/harper-ls/src/dictionary_io.rs b/harper-ls/src/dictionary_io.rs
index 4262fc77..5861b8cc 100644
--- a/harper-ls/src/dictionary_io.rs
+++ b/harper-ls/src/dictionary_io.rs
@@ -1,9 +1,8 @@
-use std::path::{Path, PathBuf};
+use std::path::Path;
use harper_core::{Dictionary, FullDictionary};
use tokio::fs::File;
-use tokio::io::{self, AsyncRead, AsyncReadExt, BufReader};
-use tokio::io::{AsyncWrite, AsyncWriteExt};
+use tokio::io::{self, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader};
pub async fn save_dict(path: impl AsRef, dict: impl Dictionary) -> io::Result<()> {
let file = File::create(path.as_ref()).await?;
diff --git a/harper-ls/src/main.rs b/harper-ls/src/main.rs
index ab9cbe73..eb2e8826 100644
--- a/harper-ls/src/main.rs
+++ b/harper-ls/src/main.rs
@@ -1,5 +1,6 @@
use config::Config;
-use tokio::{fs, net::TcpListener};
+use tokio::fs;
+use tokio::net::TcpListener;
mod backend;
mod config;
mod diagnostics;
@@ -14,7 +15,7 @@ use tower_lsp::{LspService, Server};
#[derive(Debug, Parser)]
struct Args {
#[arg(short, long, default_value_t = false)]
- stdio: bool,
+ stdio: bool
}
#[tokio::main]
diff --git a/harper-ls/src/pos_conv.rs b/harper-ls/src/pos_conv.rs
index 1699e115..36776594 100644
--- a/harper-ls/src/pos_conv.rs
+++ b/harper-ls/src/pos_conv.rs
@@ -1,8 +1,8 @@
use harper_core::Span;
use tower_lsp::lsp_types::{Position, Range};
-/// This module includes various conversions from the index-based [`Span`]s that Harper uses, and
-/// the Ranges that the LSP uses.
+/// This module includes various conversions from the index-based [`Span`]s that
+/// Harper uses, and the Ranges that the LSP uses.
pub fn span_to_range(source: &[char], span: Span) -> Range {
let start = index_to_position(source, span.start);
@@ -24,7 +24,7 @@ fn index_to_position(source: &[char], index: usize) -> Position {
Position {
line: lines as u32,
- character: cols as u32,
+ character: cols as u32
}
}
@@ -62,7 +62,7 @@ mod tests {
let start = Position {
line: 0,
- character: 4,
+ character: 4
};
let i = position_to_index(&source, start);
@@ -82,7 +82,7 @@ mod tests {
let a = Position {
line: 1,
- character: 2,
+ character: 2
};
let b = position_to_index(&source, a);
diff --git a/harper-ls/src/tree_sitter_parser.rs b/harper-ls/src/tree_sitter_parser.rs
index d5675e37..4f3cd73e 100644
--- a/harper-ls/src/tree_sitter_parser.rs
+++ b/harper-ls/src/tree_sitter_parser.rs
@@ -1,23 +1,17 @@
use std::collections::HashSet;
-use harper_core::{
- parsers::{Markdown, Parser},
- FullDictionary, Span,
-};
+use harper_core::parsers::{Markdown, Parser};
+use harper_core::{FullDictionary, Span};
use tree_sitter::{Language, Node, Tree, TreeCursor};
-/// A Harper parser that wraps the standard [`Markdown`] parser that exclusively parses
-/// comments in any language supported by [`tree_sitter`].
+/// A Harper parser that wraps the standard [`Markdown`] parser that exclusively
+/// parses comments in any language supported by [`tree_sitter`].
#[derive(Debug, Clone)]
pub struct TreeSitterParser {
- language: Language,
+ language: Language
}
impl TreeSitterParser {
- pub fn new(language: Language) -> Self {
- Self { language }
- }
-
pub fn new_from_extension(file_extension: &str) -> Option {
let language = match file_extension {
"rs" => tree_sitter_rust::language(),
@@ -35,7 +29,7 @@ impl TreeSitterParser {
"cs" => tree_sitter_c_sharp::language(),
"toml" => tree_sitter_toml::language(),
"lua" => tree_sitter_lua::language(),
- _ => return None,
+ _ => return None
};
Some(Self { language })
diff --git a/harper-serve/src/main.rs b/harper-serve/src/main.rs
index 76521827..e1803b2d 100644
--- a/harper-serve/src/main.rs
+++ b/harper-serve/src/main.rs
@@ -1,22 +1,19 @@
#![allow(dead_code)]
-use harper_core::{Document, FatToken, FullDictionary, Lint, LintSet, Linter, Span, Suggestion};
use std::net::SocketAddr;
+
+use axum::body::Body;
+use axum::http::{Request, StatusCode};
+use axum::middleware::{self, Next};
+use axum::response::Response;
+use axum::routing::post;
+use axum::{Json, Router};
+use harper_core::{Document, FatToken, FullDictionary, Lint, LintSet, Linter, Span, Suggestion};
+use serde::{Deserialize, Serialize};
use tokio::time::Instant;
use tracing::{info, Level};
use tracing_subscriber::FmtSubscriber;
-use axum::{
- body::Body,
- http::Request,
- http::StatusCode,
- middleware::{self, Next},
- response::Response,
- routing::post,
- Json, Router,
-};
-use serde::{Deserialize, Serialize};
-
#[tokio::main]
async fn main() {
let subscriber = FmtSubscriber::builder()
@@ -79,12 +76,12 @@ async fn parse_text(Json(payload): Json) -> (StatusCode, Json,
+ pub tokens: Vec
}
async fn lint(Json(payload): Json) -> (StatusCode, Json) {
@@ -101,16 +98,16 @@ async fn lint(Json(payload): Json) -> (StatusCode, Json,
+ pub lints: Vec
}
async fn apply_suggestion(
- Json(payload): Json,
+ Json(payload): Json
) -> (StatusCode, Json) {
let text = payload.text;
let mut document = Document::new_markdown(&text);
@@ -119,8 +116,8 @@ async fn apply_suggestion(
(
StatusCode::ACCEPTED,
Json(ApplySuggestionResponse {
- text: document.get_full_string(),
- }),
+ text: document.get_full_string()
+ })
)
}
@@ -128,10 +125,10 @@ async fn apply_suggestion(
struct ApplySuggestionRequest {
pub text: String,
pub suggestion: Suggestion,
- pub span: Span,
+ pub span: Span
}
#[derive(Serialize)]
struct ApplySuggestionResponse {
- pub text: String,
+ pub text: String
}
diff --git a/harper-wasm/src/lib.rs b/harper-wasm/src/lib.rs
index b08f60cf..bdff116c 100644
--- a/harper-wasm/src/lib.rs
+++ b/harper-wasm/src/lib.rs
@@ -3,7 +3,8 @@ use std::sync::Mutex;
use harper_core::{remove_overlaps, Document, FullDictionary, LintSet, Linter};
use once_cell::sync::Lazy;
use serde::Serialize;
-use wasm_bindgen::{prelude::wasm_bindgen, JsValue};
+use wasm_bindgen::prelude::wasm_bindgen;
+use wasm_bindgen::JsValue;
static LINTER: Lazy> =
Lazy::new(|| Mutex::new(LintSet::new().with_standard(FullDictionary::create_from_curated())));
@@ -15,7 +16,8 @@ fn glue_serializer() -> serde_wasm_bindgen::Serializer {
/// Setup the WebAssembly module's logging.
///
-/// Not strictly necessary for anything to function, but makes bug-hunting less painful.
+/// Not strictly necessary for anything to function, but makes bug-hunting less
+/// painful.
#[wasm_bindgen(start)]
pub fn setup() {
console_error_panic_hook::set_once();
@@ -51,7 +53,7 @@ pub fn parse(text: String) -> Vec {
pub fn apply_suggestion(
text: String,
span: JsValue,
- suggestion: JsValue,
+ suggestion: JsValue
) -> Result {
let span = serde_wasm_bindgen::from_value(span).map_err(|e| e.to_string())?;
let suggestion = serde_wasm_bindgen::from_value(suggestion).map_err(|e| e.to_string())?;
diff --git a/precommit.sh b/precommit.sh
new file mode 100755
index 00000000..835c8eee
--- /dev/null
+++ b/precommit.sh
@@ -0,0 +1,25 @@
+#! /bin/bash
+
+# Run the tools necessary to make sure the code is ready for commit.
+
+set -eo pipefail
+
+R=$(pwd)
+
+cargo +nightly fmt
+cargo clippy -- -Dwarnings
+cargo test
+cargo test --release
+cargo doc
+cargo build
+cargo build --release
+
+cd $R/harper-wasm
+wasm-pack build
+
+cd $R/web
+yarn install
+yarn run format
+yarn run lint
+yarn run check
+yarn run build
diff --git a/rustfmt.toml b/rustfmt.toml
new file mode 100644
index 00000000..e3d8b383
--- /dev/null
+++ b/rustfmt.toml
@@ -0,0 +1,14 @@
+newline_style = "Unix"
+use_field_init_shorthand = true
+
+unstable_features = true
+combine_control_expr = false
+condense_wildcard_suffixes = true
+error_on_line_overflow = true
+error_on_unformatted = true
+format_strings = true
+imports_layout = "HorizontalVertical"
+imports_granularity = "Module"
+group_imports = "StdExternalCrate"
+wrap_comments = true
+trailing_comma = "Never"
diff --git a/web/.prettierrc b/web/.prettierrc
index a77fddea..3f7802c3 100644
--- a/web/.prettierrc
+++ b/web/.prettierrc
@@ -4,6 +4,12 @@
"trailingComma": "none",
"printWidth": 100,
"plugins": ["prettier-plugin-svelte"],
- "pluginSearchDirs": ["."],
- "overrides": [{ "files": "*.svelte", "options": { "parser": "svelte" } }]
+ "overrides": [
+ {
+ "files": "*.svelte",
+ "options": {
+ "parser": "svelte"
+ }
+ }
+ ]
}
diff --git a/web/package.json b/web/package.json
index 5d7f0463..e16ce832 100644
--- a/web/package.json
+++ b/web/package.json
@@ -1,44 +1,44 @@
{
- "name": "harper-web",
- "version": "0.0.1",
- "private": true,
- "scripts": {
- "dev": "vite dev",
- "build": "vite build",
- "preview": "vite preview",
- "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
- "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
- "lint": "prettier --plugin-search-dir . --check . && eslint .",
- "format": "prettier --plugin-search-dir . --write ."
- },
- "devDependencies": {
- "@flydotio/dockerfile": "^0.5.0",
- "@sveltejs/adapter-node": "^3.0.3",
- "@sveltejs/kit": "^1.20.4",
- "@typescript-eslint/eslint-plugin": "^6.0.0",
- "@typescript-eslint/parser": "^6.0.0",
- "autoprefixer": "^10.4.16",
- "eslint": "^8.28.0",
- "eslint-config-prettier": "^9.0.0",
- "eslint-plugin-prettier": "^5.0.1",
- "eslint-plugin-svelte": "^2.34.0",
- "flowbite": "^1.8.1",
- "flowbite-svelte": "^0.44.18",
- "postcss": "^8.4.31",
- "prettier": "^3.0.3",
- "prettier-plugin-svelte": "^3.0.3",
- "svelte": "^4.0.5",
- "svelte-check": "^3.4.3",
- "tailwindcss": "^3.3.3",
- "tslib": "^2.4.1",
- "typescript": "^5.0.0",
- "vite": "^4.4.2",
- "vite-plugin-top-level-await": "^1.4.1",
- "vite-plugin-wasm": "^3.3.0",
- "wasm": "link:../harper-wasm/pkg"
- },
- "type": "module",
- "dependencies": {
- "lodash-es": "^4.17.21"
- }
+ "name": "harper-web",
+ "version": "0.0.1",
+ "private": true,
+ "scripts": {
+ "dev": "vite dev",
+ "build": "vite build",
+ "preview": "vite preview",
+ "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
+ "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
+ "lint": "prettier --check . && eslint .",
+ "format": "prettier --write ."
+ },
+ "devDependencies": {
+ "@flydotio/dockerfile": "^0.5.0",
+ "@sveltejs/adapter-node": "^3.0.3",
+ "@sveltejs/kit": "^1.20.4",
+ "@typescript-eslint/eslint-plugin": "^6.0.0",
+ "@typescript-eslint/parser": "^6.0.0",
+ "autoprefixer": "^10.4.16",
+ "eslint": "^8.28.0",
+ "eslint-config-prettier": "^9.0.0",
+ "eslint-plugin-prettier": "^5.0.1",
+ "eslint-plugin-svelte": "^2.34.0",
+ "flowbite": "^1.8.1",
+ "flowbite-svelte": "^0.44.18",
+ "postcss": "^8.4.31",
+ "prettier": "^3.0.3",
+ "prettier-plugin-svelte": "^3.0.3",
+ "svelte": "^4.0.5",
+ "svelte-check": "^3.4.3",
+ "tailwindcss": "^3.3.3",
+ "tslib": "^2.4.1",
+ "typescript": "^5.0.0",
+ "vite": "^4.4.2",
+ "vite-plugin-top-level-await": "^1.4.1",
+ "vite-plugin-wasm": "^3.3.0",
+ "wasm": "link:../harper-wasm/pkg"
+ },
+ "type": "module",
+ "dependencies": {
+ "lodash-es": "^4.17.21"
+ }
}
diff --git a/web/postcss.config.js b/web/postcss.config.js
index 2e7af2b7..0f772168 100644
--- a/web/postcss.config.js
+++ b/web/postcss.config.js
@@ -1,6 +1,6 @@
export default {
- plugins: {
- tailwindcss: {},
- autoprefixer: {},
- },
-}
+ plugins: {
+ tailwindcss: {},
+ autoprefixer: {}
+ }
+};
diff --git a/web/src/app.css b/web/src/app.css
index 40ec3762..b0503056 100644
--- a/web/src/app.css
+++ b/web/src/app.css
@@ -3,54 +3,55 @@
@tailwind utilities;
* {
- user-select: none;
- font-family: Outfit;
+ user-select: none;
+ font-family: Outfit;
}
@font-face {
- font-family: Outfit;
- src: url('/fonts/outfit.ttf');
+ font-family: Outfit;
+ src: url('/fonts/outfit.ttf');
}
.underlinespecial {
- position: relative;
- background-color: var(--bg-color);
+ position: relative;
+ background-color: var(--bg-color);
}
.underlinespecial::after {
- transition-property: all;
- transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
- transition-duration: 150ms;
- content: '';
- display: block;
- width: 100%;
- height: var(--line-width);
- border-radius: 1000px;
- background: var(--line-color);
- position: absolute;
- bottom: -3px;
- left: 0;
+ transition-property: all;
+ transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
+ transition-duration: 150ms;
+ content: '';
+ display: block;
+ width: 100%;
+ height: var(--line-width);
+ border-radius: 1000px;
+ background: var(--line-color);
+ position: absolute;
+ bottom: -3px;
+ left: 0;
}
textarea {
- --tw-ring-shadow: 0 0 #000 !important;
+ --tw-ring-shadow: 0 0 #000 !important;
}
-.animate-bigbounce{
- animation: bigbounce 1s infinite;
+.animate-bigbounce {
+ animation: bigbounce 1s infinite;
}
.animate-after-bigbounce::after {
- animation: bigbounce 1s infinite;
+ animation: bigbounce 1s infinite;
}
@keyframes bigbounce {
- 0%, 100% {
+ 0%,
+ 100% {
transform: translateY(-40%);
- animation-timing-function: cubic-bezier(0.8,0,1,1);
+ animation-timing-function: cubic-bezier(0.8, 0, 1, 1);
}
50% {
transform: none;
- animation-timing-function: cubic-bezier(0,0,0.2,1);
- }
+ animation-timing-function: cubic-bezier(0, 0, 0.2, 1);
+ }
}
diff --git a/web/src/app.html b/web/src/app.html
index c4c14dbb..29a96bd1 100644
--- a/web/src/app.html
+++ b/web/src/app.html
@@ -1,23 +1,25 @@
-
+
+
+
+
+
+
+
+
+
+
+
+ Write With Harper
+ %sveltekit.head%
+
+
-
-
-
-
-
-
-
-
-
-
- Write With Harper
- %sveltekit.head%
-
-
-
-
-
%sveltekit.body%
-
-
+
+ %sveltekit.body%
+
diff --git a/web/src/lib/Editor.svelte b/web/src/lib/Editor.svelte
index 1dee89b4..2efc0268 100644
--- a/web/src/lib/Editor.svelte
+++ b/web/src/lib/Editor.svelte
@@ -9,7 +9,7 @@
let content = demo;
let lints: Lint[] = [];
- let lintCards: HTMLDivElement[] = [];
+ let lintCards: HTMLButtonElement[] = [];
let focused: number | undefined;
let editor: HTMLTextAreaElement | null;
@@ -55,7 +55,7 @@
Suggestions
-
+
{/each}
diff --git a/web/src/lib/Underlines.svelte b/web/src/lib/Underlines.svelte
index 6b9b5ad0..fbe97583 100644
--- a/web/src/lib/Underlines.svelte
+++ b/web/src/lib/Underlines.svelte
@@ -39,8 +39,16 @@
return output;
}
+ type UnderlineDetails = {
+ focused: boolean;
+ content: string;
+ index: number;
+ };
+
+ type UnderlineToken = string | null | undefined | UnderlineDetails;
+
function processString(lintMap: [Lint, number][], focusLintIndex?: number) {
- let results = lintMap
+ let results: UnderlineToken[] = lintMap
.map(([lint, lintIndex], index, arr) => {
let prevStart = 0;
let prev = arr[index - 1];
@@ -57,11 +65,11 @@
prevContent.push(...reOrgString(content.substring(prevStart, prevEnd)));
}
- let lintContent = [
- spanContent(lint.span, content).replaceAll(' ', '\u00A0'),
- lintIndex === focusLintIndex,
- lintIndex
- ];
+ let lintContent: UnderlineDetails = {
+ focused: lintIndex === focusLintIndex,
+ index: lintIndex,
+ content: spanContent(lint.span, content).replaceAll(' ', '\u00A0')
+ };
return [...prevContent, lintContent];
})
@@ -95,15 +103,15 @@
{chunk}
{:else}
- (focusLintIndex = chunk[2])}
- style={`--line-color: #DB2B39; --line-width: ${chunk[1] ? '4px' : '2px'}; --bg-color: ${chunk[1] ? '#dbafb3' : 'transparent'};`}
- l
+
+ {chunk.content}
+
{/if}
{/each}
diff --git a/web/static/site.webmanifest b/web/static/site.webmanifest
index b20abb7c..95911504 100644
--- a/web/static/site.webmanifest
+++ b/web/static/site.webmanifest
@@ -1,19 +1,19 @@
{
- "name": "",
- "short_name": "",
- "icons": [
- {
- "src": "/android-chrome-192x192.png",
- "sizes": "192x192",
- "type": "image/png"
- },
- {
- "src": "/android-chrome-512x512.png",
- "sizes": "512x512",
- "type": "image/png"
- }
- ],
- "theme_color": "#ffffff",
- "background_color": "#ffffff",
- "display": "standalone"
+ "name": "",
+ "short_name": "",
+ "icons": [
+ {
+ "src": "/android-chrome-192x192.png",
+ "sizes": "192x192",
+ "type": "image/png"
+ },
+ {
+ "src": "/android-chrome-512x512.png",
+ "sizes": "512x512",
+ "type": "image/png"
+ }
+ ],
+ "theme_color": "#ffffff",
+ "background_color": "#ffffff",
+ "display": "standalone"
}