From 03e47f497063fddef50a48db5b7857ecd2695753 Mon Sep 17 00:00:00 2001 From: Ellie Huxtable Date: Mon, 27 Nov 2023 13:10:15 +0000 Subject: [PATCH 001/130] Initial commit --- LICENSE | 21 +++++++++++++++++++++ README.md | 2 ++ 2 files changed, 23 insertions(+) create mode 100644 LICENSE create mode 100644 README.md diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d1e439c --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 PostHog + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..cf48fa8 --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# rusty-hook +A reliable and performant webhook system for PostHog From 80f5270df14f1da01382ddf9bbeef5bb74f1e3a9 Mon Sep 17 00:00:00 2001 From: Ellie Huxtable Date: Mon, 27 Nov 2023 13:20:47 +0000 Subject: [PATCH 002/130] Structure --- .gitignore | 1 + Cargo.lock | 15 +++++++++++++++ Cargo.toml | 11 +++++++++++ hook-common/Cargo.toml | 8 ++++++++ hook-common/src/lib.rs | 14 ++++++++++++++ hook-consumer/Cargo.toml | 8 ++++++++ hook-consumer/src/main.rs | 3 +++ hook-producer/Cargo.toml | 8 ++++++++ hook-producer/src/main.rs | 3 +++ 9 files changed, 71 insertions(+) create mode 100644 .gitignore create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 hook-common/Cargo.toml create mode 100644 hook-common/src/lib.rs create mode 100644 hook-consumer/Cargo.toml create mode 100644 hook-consumer/src/main.rs create mode 100644 hook-producer/Cargo.toml create mode 100644 hook-producer/src/main.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ea8c4bf --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/target diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..456c385 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,15 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "hook-common" +version = "0.1.0" + +[[package]] +name = "hook-consumer" +version = "0.1.0" + +[[package]] +name = "hook-producer" +version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..d880e2c --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,11 @@ +[workspace] +resolver = "2" + +members = [ + "hook-common", + "hook-producer", + "hook-consumer", +] + +[workspace.dependencies] +sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", "postgres", "uuid", "json" ] } diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml new file mode 100644 index 0000000..1d14185 --- /dev/null +++ b/hook-common/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "hook-common" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/hook-common/src/lib.rs b/hook-common/src/lib.rs new file mode 100644 index 0000000..7d12d9a --- /dev/null +++ b/hook-common/src/lib.rs @@ -0,0 +1,14 @@ +pub fn add(left: usize, right: usize) -> usize { + left + right +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); + } +} diff --git a/hook-consumer/Cargo.toml b/hook-consumer/Cargo.toml new file mode 100644 index 0000000..49c2d9f --- /dev/null +++ b/hook-consumer/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "hook-consumer" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs new file mode 100644 index 0000000..e7a11a9 --- /dev/null +++ b/hook-consumer/src/main.rs @@ -0,0 +1,3 @@ +fn main() { + println!("Hello, world!"); +} diff --git a/hook-producer/Cargo.toml b/hook-producer/Cargo.toml new file mode 100644 index 0000000..96fbb4d --- /dev/null +++ b/hook-producer/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "hook-producer" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/hook-producer/src/main.rs b/hook-producer/src/main.rs new file mode 100644 index 0000000..e7a11a9 --- /dev/null +++ b/hook-producer/src/main.rs @@ -0,0 +1,3 @@ +fn main() { + println!("Hello, world!"); +} From 2ca64c086c96c997688dc9abca8f7e2fbb39cf31 Mon Sep 17 00:00:00 2001 From: Ellie Huxtable Date: Mon, 27 Nov 2023 14:44:09 +0000 Subject: [PATCH 003/130] Add rust workflow --- .github/workflows/rust.yml | 93 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 .github/workflows/rust.yml diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml new file mode 100644 index 0000000..6fc44a9 --- /dev/null +++ b/.github/workflows/rust.yml @@ -0,0 +1,93 @@ +name: Rust + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + runs-on: buildjet-4vcpu-ubuntu-2204 + + steps: + - uses: actions/checkout@v3 + + - name: Install rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + + - uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-release-${{ hashFiles('**/Cargo.lock') }} + + - name: Run cargo build + run: cargo build --all --locked --release + + test: + runs-on: buildjet-4vcpu-ubuntu-2204 + + steps: + - uses: actions/checkout@v3 + + - name: Install rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + + - uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${ runner.os }-cargo-debug-${{ hashFiles('**/Cargo.lock') }} + + - name: Run cargo test + run: cargo test --all-features + + clippy: + runs-on: buildjet-4vcpu-ubuntu-2204 + + steps: + - uses: actions/checkout@v3 + + - name: Install latest rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + components: clippy + + - uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-debug-${{ hashFiles('**/Cargo.lock') }} + + - name: Run clippy + run: cargo clippy -- -D warnings + + format: + runs-on: buildjet-4vcpu-ubuntu-2204 + + steps: + - uses: actions/checkout@v3 + + - name: Install latest rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + components: rustfmt + + - name: Format + run: cargo fmt -- --check From 132ceeec9d7cffcafaeed46a1f12ad09374542c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 29 Nov 2023 18:52:12 +0100 Subject: [PATCH 004/130] feat: Initial PgQueue implementation --- .github/workflows/rust.yml | 22 +- Cargo.lock | 1865 +++++++++++++++++ README.md | 19 + docker-compose.yml | 15 + hook-common/Cargo.toml | 8 + hook-common/README.md | 2 + hook-common/src/lib.rs | 15 +- hook-common/src/pgqueue.rs | 215 ++ migrations/20231129172339_job_queue_table.sql | 16 + 9 files changed, 2161 insertions(+), 16 deletions(-) create mode 100644 docker-compose.yml create mode 100644 hook-common/README.md create mode 100644 hook-common/src/pgqueue.rs create mode 100644 migrations/20231129172339_job_queue_table.sql diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6fc44a9..a06e9ee 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -30,7 +30,7 @@ jobs: key: ${{ runner.os }}-cargo-release-${{ hashFiles('**/Cargo.lock') }} - name: Run cargo build - run: cargo build --all --locked --release + run: cargo build --all --locked --release test: runs-on: buildjet-4vcpu-ubuntu-2204 @@ -43,6 +43,24 @@ jobs: with: toolchain: stable + - name: Install rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + + - name: Stop/Start stack with Docker Compose + shell: bash + run: | + docker compose -f docker-compose.dev.yml down + docker compose -f docker-compose.dev.yml up -d + + - name: Run migrations + shell: bash + run: | + cargo install sqlx-cli --no-default-features --features native-tls,postgres + DATABASE_URL=postgres://posthog:posthog@localhost:15432/test_database sqlx database create + DATABASE_URL=postgres://posthog:posthog@localhost:15432/test_database sqlx migrate run + - uses: actions/cache@v3 with: path: | @@ -76,7 +94,7 @@ jobs: - name: Run clippy run: cargo clippy -- -D warnings - + format: runs-on: buildjet-4vcpu-ubuntu-2204 diff --git a/Cargo.lock b/Cargo.lock index 456c385..16de926 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,9 +2,542 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +dependencies = [ + "cfg-if", + "getrandom", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "atomic-write-file" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae364a6c1301604bbc6dfbf8c385c47ff82301dd01eef506195a029196d8d04" +dependencies = [ + "nix", + "rand", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +dependencies = [ + "serde", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-targets 0.48.5", +] + +[[package]] +name = "const-oid" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" + +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" + +[[package]] +name = "cpufeatures" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +dependencies = [ + "libc", +] + +[[package]] +name = "crc" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crossbeam-queue" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "der" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + +[[package]] +name = "either" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +dependencies = [ + "serde", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "spin 0.9.8", +] + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures-channel" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" + +[[package]] +name = "futures-executor" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot", +] + +[[package]] +name = "futures-io" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" + +[[package]] +name = "futures-sink" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" + +[[package]] +name = "futures-task" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" + +[[package]] +name = "futures-util" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +dependencies = [ + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hkdf" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "hook-common" version = "0.1.0" +dependencies = [ + "chrono", + "serde", + "serde_derive", + "sqlx", + "thiserror", + "tokio", +] [[package]] name = "hook-consumer" @@ -13,3 +546,1335 @@ version = "0.1.0" [[package]] name = "hook-producer" version = "0.1.0" + +[[package]] +name = "iana-time-zone" +version = "0.1.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" + +[[package]] +name = "js-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] + +[[package]] +name = "libc" +version = "0.2.150" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libsqlite3-sys" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" + +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] + +[[package]] +name = "memchr" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.48.0", +] + +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "libc", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "object" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "openssl" +version = "0.10.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.96" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.48.5", +] + +[[package]] +name = "paste" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "rsa" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af6c4b23d99685a1408194da11270ef8e9809aff951cc70ec9b17350b087e474" +dependencies = [ + "const-oid", + "digest", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "signature", + "spki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustix" +version = "0.38.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" +dependencies = [ + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.48.0", +] + +[[package]] +name = "ryu" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" + +[[package]] +name = "schannel" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "serde_json" +version = "1.0.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" + +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "sqlformat" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85" +dependencies = [ + "itertools", + "nom", + "unicode_categories", +] + +[[package]] +name = "sqlx" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dba03c279da73694ef99763320dea58b51095dfe87d001b1d4b5fe78ba8763cf" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d84b0a3c3739e220d94b3239fd69fb1f74bc36e16643423bd99de3b43c21bfbd" +dependencies = [ + "ahash", + "atoi", + "byteorder", + "bytes", + "chrono", + "crc", + "crossbeam-queue", + "dotenvy", + "either", + "event-listener", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashlink", + "hex", + "indexmap", + "log", + "memchr", + "native-tls", + "once_cell", + "paste", + "percent-encoding", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlformat", + "thiserror", + "tokio", + "tokio-stream", + "tracing", + "url", + "uuid", +] + +[[package]] +name = "sqlx-macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 1.0.109", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0bd4519486723648186a08785143599760f7cc81c52334a55d6a83ea1e20841" +dependencies = [ + "atomic-write-file", + "dotenvy", + "either", + "heck", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 1.0.109", + "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e37195395df71fd068f6e2082247891bc11e3289624bbc776a0cdfa1ca7f1ea4" +dependencies = [ + "atoi", + "base64", + "bitflags 2.4.1", + "byteorder", + "bytes", + "chrono", + "crc", + "digest", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand", + "rsa", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "uuid", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ac0ac3b7ccd10cc96c7ab29791a7dd236bd94021f31eec7ba3d46a74aa1c24" +dependencies = [ + "atoi", + "base64", + "bitflags 2.4.1", + "byteorder", + "chrono", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "uuid", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "210976b7d948c7ba9fced8ca835b11cbb2d677c59c79de41ac0d397e14547490" +dependencies = [ + "atoi", + "chrono", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "sqlx-core", + "tracing", + "url", + "urlencoding", + "uuid", +] + +[[package]] +name = "stringprep" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +dependencies = [ + "finl_unicode", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +dependencies = [ + "cfg-if", + "fastrand", + "redox_syscall", + "rustix", + "windows-sys 0.48.0", +] + +[[package]] +name = "thiserror" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "pin-project-lite", + "socket2", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-bidi" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" + +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "uuid" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.39", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" + +[[package]] +name = "whoami" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" + +[[package]] +name = "windows-core" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "zerocopy" +version = "0.7.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" diff --git a/README.md b/README.md index cf48fa8..2ce36b3 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,21 @@ # rusty-hook A reliable and performant webhook system for PostHog + +## Testing + +1. Start a PostgreSQL instance: +```bash +docker compose -f docker-compose.yml up -d +``` + +2. Prepare test database: +```bash +export DATABASE_URL=postgres://posthog:posthog@localhost:15432/test_database +sqlx database create +sqlx migrate run +``` + +3. Test: +```bash +cargo test +``` diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..35b7a49 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,15 @@ +services: + db: + image: docker.io/library/postgres:16-alpine + restart: on-failure + environment: + POSTGRES_USER: posthog + POSTGRES_DB: posthog + POSTGRES_PASSWORD: posthog + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U posthog'] + interval: 5s + timeout: 5s + ports: + - '15432:5432' + command: postgres -c max_connections=1000 -c idle_in_transaction_session_timeout=300000 diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml index 1d14185..673d887 100644 --- a/hook-common/Cargo.toml +++ b/hook-common/Cargo.toml @@ -6,3 +6,11 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +chrono = { version = "0.4" } +serde = { version = "1.0" } +serde_derive = { version = "1.0" } +sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", "postgres", "uuid", "json", "chrono" ] } +thiserror = { version = "1.0" } + +[dev-dependencies] +tokio = { version = "1.34", features = ["macros"] } # We need a runtime for async tests diff --git a/hook-common/README.md b/hook-common/README.md new file mode 100644 index 0000000..d277a6c --- /dev/null +++ b/hook-common/README.md @@ -0,0 +1,2 @@ +# hook-common +Library of common utilities used by rusty-hook. diff --git a/hook-common/src/lib.rs b/hook-common/src/lib.rs index 7d12d9a..d1dadf3 100644 --- a/hook-common/src/lib.rs +++ b/hook-common/src/lib.rs @@ -1,14 +1 @@ -pub fn add(left: usize, right: usize) -> usize { - left + right -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_works() { - let result = add(2, 2); - assert_eq!(result, 4); - } -} +pub mod pgqueue; diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs new file mode 100644 index 0000000..f06f50d --- /dev/null +++ b/hook-common/src/pgqueue.rs @@ -0,0 +1,215 @@ +use std::str::FromStr; + +use chrono::prelude::*; +use serde::{de::DeserializeOwned, Serialize}; +use sqlx::postgres::{PgPool, PgPoolOptions}; +use thiserror::Error; + +/// Enumeration of errors for operations with PgQueue. +/// Errors can originate from sqlx and are wrapped by us to provide additional context. +#[derive(Error, Debug)] +pub enum PgQueueError { + #[error("connection failed with: {error}")] + ConnectionError { + error: sqlx::Error + }, + #[error("{command} query failed with: {error}")] + QueryError { + command: String, + error: sqlx::Error + }, + #[error("{0} is not a valid JobStatus")] + ParseJobStatusError(String), +} + +/// Enumeration of possible statuses for a Job. +/// Available: A job that is waiting in the queue to be picked up by a worker. +/// Completed: A job that was successfully completed by a worker. +/// Failed: A job that was unsuccessfully completed by a worker. +/// Running: A job that was picked up by a worker and it's currentlly being run. +#[derive(Debug, PartialEq, sqlx::Type)] +#[sqlx(type_name = "job_status")] +#[sqlx(rename_all = "lowercase")] +pub enum JobStatus { + Available, + Completed, + Failed, + Running, +} + +/// Allow casting JobStatus from strings. +impl FromStr for JobStatus { + type Err = PgQueueError; + + fn from_str(s: &str) -> Result { + match s { + "available" => Ok(JobStatus::Available), + "completed" => Ok(JobStatus::Completed), + "failed" => Ok(JobStatus::Failed), + "running" => Ok(JobStatus::Running), + invalid => Err(PgQueueError::ParseJobStatusError(invalid.to_owned())), + } + } +} + +/// JobParameters are stored and read to and from a JSONB field, so we accept anything that fits `sqlx::types::Json`. +pub type JobParameters = sqlx::types::Json; + +/// A Job to be executed by a worker dequeueing a PgQueue. +#[derive(sqlx::FromRow)] +pub struct Job { + pub id: i64, + pub attempt: i32, + pub finished_at: Option>, + pub created_at: DateTime, + pub started_at: Option>, + pub status: JobStatus, + pub parameters: sqlx::types::Json, +} + +/// A NewJob to be enqueued into a PgQueue. +pub struct NewJob { + pub attempt: i32, + pub finished_at: Option>, + pub started_at: Option>, + pub status: JobStatus, + pub parameters: sqlx::types::Json, +} + +impl NewJob { + pub fn new(parameters: J) -> Self { + Self { + attempt: 0, + parameters: sqlx::types::Json(parameters), + finished_at: None, + started_at: None, + status: JobStatus::Available, + } + } +} + +/// A queue implemented on top of a PostgreSQL table. +pub struct PgQueue { + table: String, + pool: PgPool, +} + +pub type PgQueueResult = std::result::Result; + +impl PgQueue { + /// Initialize a new PgQueue backed by table in PostgreSQL. + pub async fn new(table: &str, url: &str) -> PgQueueResult { + let table = table.to_owned(); + let pool = PgPoolOptions::new() + .connect(url) + .await + .map_err(|error| PgQueueError::ConnectionError {error})?; + + Ok(Self { + table, + pool, + }) + } + + /// Dequeue a Job from this PgQueue. + pub async fn dequeue(&self) -> PgQueueResult> { + let base_query = format!( + r#" +WITH available_in_queue AS ( + SELECT + id + FROM + "{0}" + WHERE + status = 'available' + ORDER BY + id + LIMIT 1 + FOR UPDATE SKIP LOCKED +) +UPDATE + "{0}" +SET + started_at = NOW(), + status = 'running'::job_status, + attempt = "{0}".attempt + 1 +FROM + available_in_queue +WHERE + "{0}".id = available_in_queue.id +RETURNING + "{0}".* + "#, &self.table); + + let item: Job = sqlx::query_as(&base_query) + .bind(&self.table) + .bind(&self.table) + .bind(&self.table) + .fetch_one(&self.pool) + .await + .map_err(|error| PgQueueError::QueryError { command: "UPDATE".to_owned(), error})?; + + Ok(item) + } + + /// Enqueue a Job into this PgQueue. + /// We take ownership of NewJob to enforce a specific NewJob is only enqueued once. + pub async fn enqueue(&self, job: NewJob) -> PgQueueResult<()> { + // TODO: Escaping. I think sqlx doesn't support identifiers. + let base_query = format!( + r#" +INSERT INTO {} + (attempt, created_at, finished_at, started_at, status, parameters) +VALUES + ($1, NOW(), $2, $3, $4::job_status, $5) + "#, &self.table); + + sqlx::query(&base_query) + .bind(job.attempt) + .bind(job.finished_at) + .bind(job.started_at) + .bind(job.status) + .bind(&job.parameters) + .execute(&self.pool) + .await + .map_err(|error| PgQueueError::QueryError { command: "INSERT".to_owned(), error})?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde::Deserialize; + + #[derive(Serialize, Deserialize)] + struct JobParameters { + method: String, + body: String, + url: String, + } + + #[tokio::test] + async fn test_can_enqueue_and_dequeue_job() { + let job_parameters = JobParameters { + method: "POST".to_string(), + body: "{\"event\":\"event-name\"}".to_string(), + url: "https://localhost".to_string(), + }; + let new_job = NewJob::new(job_parameters); + + let queue = PgQueue::new("job_queue", "postgres://posthog:posthog@localhost:15432/test_database").await.unwrap(); + + queue.enqueue(new_job).await.unwrap(); + + let job: Job = queue.dequeue().await.unwrap(); + + assert_eq!(job.attempt, 1); + assert_eq!(job.parameters.method, "POST".to_string()); + assert_eq!(job.parameters.body, "{\"event\":\"event-name\"}".to_string()); + assert_eq!(job.parameters.url, "https://localhost".to_string()); + assert_eq!(job.finished_at, None); + assert_eq!(job.status, JobStatus::Running); + } +} diff --git a/migrations/20231129172339_job_queue_table.sql b/migrations/20231129172339_job_queue_table.sql new file mode 100644 index 0000000..078fdd9 --- /dev/null +++ b/migrations/20231129172339_job_queue_table.sql @@ -0,0 +1,16 @@ +CREATE TYPE job_status AS ENUM( + 'available', + 'completed', + 'failed', + 'running' +); + +CREATE TABLE job_queue( + id BIGSERIAL PRIMARY KEY, + attempt INT NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + finished_at TIMESTAMPTZ DEFAULT NULL, + started_at TIMESTAMPTZ DEFAULT NULL, + status job_status NOT NULL DEFAULT 'available'::job_status, + parameters JSONB +); From 388b5444afd67d18b8cb66a84278f7725c072d42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 30 Nov 2023 12:18:17 +0100 Subject: [PATCH 005/130] feat: Support for attempted_by --- hook-common/src/pgqueue.rs | 52 ++++++++++++++----- migrations/20231129172339_job_queue_table.sql | 2 + 2 files changed, 40 insertions(+), 14 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index f06f50d..e2c206e 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -1,3 +1,7 @@ +//! # PgQueue +//! +//! A job queue implementation backed by a PostgreSQL table. + use std::str::FromStr; use chrono::prelude::*; @@ -6,7 +10,7 @@ use sqlx::postgres::{PgPool, PgPoolOptions}; use thiserror::Error; /// Enumeration of errors for operations with PgQueue. -/// Errors can originate from sqlx and are wrapped by us to provide additional context. +/// Errors that can originate from sqlx and are wrapped by us to provide additional context. #[derive(Error, Debug)] pub enum PgQueueError { #[error("connection failed with: {error}")] @@ -23,17 +27,17 @@ pub enum PgQueueError { } /// Enumeration of possible statuses for a Job. -/// Available: A job that is waiting in the queue to be picked up by a worker. -/// Completed: A job that was successfully completed by a worker. -/// Failed: A job that was unsuccessfully completed by a worker. -/// Running: A job that was picked up by a worker and it's currentlly being run. #[derive(Debug, PartialEq, sqlx::Type)] #[sqlx(type_name = "job_status")] #[sqlx(rename_all = "lowercase")] pub enum JobStatus { + /// A job that is waiting in the queue to be picked up by a worker. Available, + /// A job that was successfully completed by a worker. Completed, + /// A job that was unsuccessfully completed by a worker. Failed, + /// A job that was picked up by a worker and it's currentlly being run. Running, } @@ -58,12 +62,23 @@ pub type JobParameters = sqlx::types::Json; /// A Job to be executed by a worker dequeueing a PgQueue. #[derive(sqlx::FromRow)] pub struct Job { + /// A unique id identifying a job. pub id: i64, + /// A number corresponding to the current job attempt. pub attempt: i32, + /// A datetime corresponding to when the current job attempt started. + pub attempted_at: Option>, + /// A vector of identifiers that have attempted this job. E.g. thread ids, pod names, etc... + pub attempted_by: Vec, + /// A datetime corresponding to when the job was finished (either successfully or unsuccessfully). pub finished_at: Option>, + /// A datetime corresponding to when the job was created. pub created_at: DateTime, + /// A datetime corresponding to when the first job attempt was started. pub started_at: Option>, + /// The current status of the job. pub status: JobStatus, + /// Arbitrary job parameters stored as JSON. pub parameters: sqlx::types::Json, } @@ -90,16 +105,21 @@ impl NewJob { /// A queue implemented on top of a PostgreSQL table. pub struct PgQueue { + /// The identifier of the PostgreSQL table this queue runs on. table: String, + /// A connection pool used to connect to the PostgreSQL database. pool: PgPool, + /// The identifier of the worker listening on this queue. + worker: String, } pub type PgQueueResult = std::result::Result; impl PgQueue { /// Initialize a new PgQueue backed by table in PostgreSQL. - pub async fn new(table: &str, url: &str) -> PgQueueResult { + pub async fn new(table: &str, url: &str, worker: &str) -> PgQueueResult { let table = table.to_owned(); + let worker = worker.to_owned(); let pool = PgPoolOptions::new() .connect(url) .await @@ -108,6 +128,7 @@ impl PgQueue { Ok(Self { table, pool, + worker, }) } @@ -132,7 +153,8 @@ UPDATE SET started_at = NOW(), status = 'running'::job_status, - attempt = "{0}".attempt + 1 + attempt = "{0}".attempt + 1, + attempted_by = array_append("{0}".attempted_by, $1::text) FROM available_in_queue WHERE @@ -142,9 +164,7 @@ RETURNING "#, &self.table); let item: Job = sqlx::query_as(&base_query) - .bind(&self.table) - .bind(&self.table) - .bind(&self.table) + .bind(&self.worker) .fetch_one(&self.pool) .await .map_err(|error| PgQueueError::QueryError { command: "UPDATE".to_owned(), error})?; @@ -199,17 +219,21 @@ mod tests { }; let new_job = NewJob::new(job_parameters); - let queue = PgQueue::new("job_queue", "postgres://posthog:posthog@localhost:15432/test_database").await.unwrap(); + let worker_id = std::process::id().to_string(); + let queue = PgQueue::new("job_queue", "postgres://posthog:posthog@localhost:15432/test_database", &worker_id) + .await + .expect("failed to connect to local test postgresql database"); - queue.enqueue(new_job).await.unwrap(); + queue.enqueue(new_job).await.expect("failed to enqueue job"); - let job: Job = queue.dequeue().await.unwrap(); + let job: Job = queue.dequeue().await.expect("failed to dequeue job"); assert_eq!(job.attempt, 1); assert_eq!(job.parameters.method, "POST".to_string()); assert_eq!(job.parameters.body, "{\"event\":\"event-name\"}".to_string()); assert_eq!(job.parameters.url, "https://localhost".to_string()); - assert_eq!(job.finished_at, None); + assert!(job.finished_at.is_none()); assert_eq!(job.status, JobStatus::Running); + assert!(job.attempted_by.contains(&worker_id)); } } diff --git a/migrations/20231129172339_job_queue_table.sql b/migrations/20231129172339_job_queue_table.sql index 078fdd9..f2682ee 100644 --- a/migrations/20231129172339_job_queue_table.sql +++ b/migrations/20231129172339_job_queue_table.sql @@ -8,6 +8,8 @@ CREATE TYPE job_status AS ENUM( CREATE TABLE job_queue( id BIGSERIAL PRIMARY KEY, attempt INT NOT NULL DEFAULT 0, + attempted_at TIMESTAMPTZ DEFAULT NULL, + attempted_by TEXT[] DEFAULT ARRAY[]::TEXT[], created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), finished_at TIMESTAMPTZ DEFAULT NULL, started_at TIMESTAMPTZ DEFAULT NULL, From bfeab3642dab184f4c6a717071c75d85d61ab790 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 30 Nov 2023 12:20:38 +0100 Subject: [PATCH 006/130] fix: Wait for Postgres to be up in CI --- .github/workflows/rust.yml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a06e9ee..ee0e5b3 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -38,11 +38,6 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Install rust - uses: dtolnay/rust-toolchain@master - with: - toolchain: stable - - name: Install rust uses: dtolnay/rust-toolchain@master with: @@ -51,8 +46,8 @@ jobs: - name: Stop/Start stack with Docker Compose shell: bash run: | - docker compose -f docker-compose.dev.yml down - docker compose -f docker-compose.dev.yml up -d + docker compose -f docker-compose.yml down + docker compose -f docker-compose.yml up -d --wait - name: Run migrations shell: bash From 8b2325bddd2d907dc0e81bdf11c1c7dcf94a56c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 30 Nov 2023 12:23:05 +0100 Subject: [PATCH 007/130] fix: Formatting --- hook-common/src/pgqueue.rs | 55 +++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 19 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index e2c206e..18d324b 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -14,14 +14,9 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum PgQueueError { #[error("connection failed with: {error}")] - ConnectionError { - error: sqlx::Error - }, + ConnectionError { error: sqlx::Error }, #[error("{command} query failed with: {error}")] - QueryError { - command: String, - error: sqlx::Error - }, + QueryError { command: String, error: sqlx::Error }, #[error("{0} is not a valid JobStatus")] ParseJobStatusError(String), } @@ -95,7 +90,7 @@ impl NewJob { pub fn new(parameters: J) -> Self { Self { attempt: 0, - parameters: sqlx::types::Json(parameters), + parameters: sqlx::types::Json(parameters), finished_at: None, started_at: None, status: JobStatus::Available, @@ -123,7 +118,7 @@ impl PgQueue { let pool = PgPoolOptions::new() .connect(url) .await - .map_err(|error| PgQueueError::ConnectionError {error})?; + .map_err(|error| PgQueueError::ConnectionError { error })?; Ok(Self { table, @@ -133,7 +128,9 @@ impl PgQueue { } /// Dequeue a Job from this PgQueue. - pub async fn dequeue(&self) -> PgQueueResult> { + pub async fn dequeue( + &self, + ) -> PgQueueResult> { let base_query = format!( r#" WITH available_in_queue AS ( @@ -161,20 +158,28 @@ WHERE "{0}".id = available_in_queue.id RETURNING "{0}".* - "#, &self.table); + "#, + &self.table + ); let item: Job = sqlx::query_as(&base_query) .bind(&self.worker) .fetch_one(&self.pool) .await - .map_err(|error| PgQueueError::QueryError { command: "UPDATE".to_owned(), error})?; + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; Ok(item) } /// Enqueue a Job into this PgQueue. /// We take ownership of NewJob to enforce a specific NewJob is only enqueued once. - pub async fn enqueue(&self, job: NewJob) -> PgQueueResult<()> { + pub async fn enqueue( + &self, + job: NewJob, + ) -> PgQueueResult<()> { // TODO: Escaping. I think sqlx doesn't support identifiers. let base_query = format!( r#" @@ -182,7 +187,9 @@ INSERT INTO {} (attempt, created_at, finished_at, started_at, status, parameters) VALUES ($1, NOW(), $2, $3, $4::job_status, $5) - "#, &self.table); + "#, + &self.table + ); sqlx::query(&base_query) .bind(job.attempt) @@ -192,7 +199,10 @@ VALUES .bind(&job.parameters) .execute(&self.pool) .await - .map_err(|error| PgQueueError::QueryError { command: "INSERT".to_owned(), error})?; + .map_err(|error| PgQueueError::QueryError { + command: "INSERT".to_owned(), + error, + })?; Ok(()) } @@ -220,9 +230,13 @@ mod tests { let new_job = NewJob::new(job_parameters); let worker_id = std::process::id().to_string(); - let queue = PgQueue::new("job_queue", "postgres://posthog:posthog@localhost:15432/test_database", &worker_id) - .await - .expect("failed to connect to local test postgresql database"); + let queue = PgQueue::new( + "job_queue", + "postgres://posthog:posthog@localhost:15432/test_database", + &worker_id, + ) + .await + .expect("failed to connect to local test postgresql database"); queue.enqueue(new_job).await.expect("failed to enqueue job"); @@ -230,7 +244,10 @@ mod tests { assert_eq!(job.attempt, 1); assert_eq!(job.parameters.method, "POST".to_string()); - assert_eq!(job.parameters.body, "{\"event\":\"event-name\"}".to_string()); + assert_eq!( + job.parameters.body, + "{\"event\":\"event-name\"}".to_string() + ); assert_eq!(job.parameters.url, "https://localhost".to_string()); assert!(job.finished_at.is_none()); assert_eq!(job.status, JobStatus::Running); From ec6ba09b820a9b6515acfd719694a5dad696ce3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 30 Nov 2023 12:27:47 +0100 Subject: [PATCH 008/130] fix: README recommends --waiting for docker compose to be up --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2ce36b3..8eb2e2c 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ A reliable and performant webhook system for PostHog 1. Start a PostgreSQL instance: ```bash -docker compose -f docker-compose.yml up -d +docker compose -f docker-compose.yml up -d --wait ``` 2. Prepare test database: From c6ab642c284d26c504fc66dbd5ec58d30eae5de2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 30 Nov 2023 12:31:02 +0100 Subject: [PATCH 009/130] chore: Add comment on SKIP LOCKED clause --- hook-common/src/pgqueue.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 18d324b..8bb40e1 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -131,6 +131,8 @@ impl PgQueue { pub async fn dequeue( &self, ) -> PgQueueResult> { + // The query that follows uses a FOR UPDATE SKIP LOCKED clause. + // For more details on this see: 2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5. let base_query = format!( r#" WITH available_in_queue AS ( From da952a662956493b27380f01e2f8bde6ee487811 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 1 Dec 2023 00:43:31 +0100 Subject: [PATCH 010/130] fix: Use the type alias I defined --- hook-common/src/pgqueue.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 8bb40e1..53398bf 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -74,7 +74,7 @@ pub struct Job { /// The current status of the job. pub status: JobStatus, /// Arbitrary job parameters stored as JSON. - pub parameters: sqlx::types::Json, + pub parameters: JobParameters, } /// A NewJob to be enqueued into a PgQueue. @@ -83,7 +83,7 @@ pub struct NewJob { pub finished_at: Option>, pub started_at: Option>, pub status: JobStatus, - pub parameters: sqlx::types::Json, + pub parameters: JobParameters, } impl NewJob { From 5d0838f175bca4e4e6208dbce1d1150a220b3212 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 1 Dec 2023 12:47:20 +0100 Subject: [PATCH 011/130] feat: Support for retrying --- hook-common/src/pgqueue.rs | 365 ++++++++++++++++-- migrations/20231129172339_job_queue_table.sql | 9 +- 2 files changed, 338 insertions(+), 36 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 53398bf..a85b323 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -2,9 +2,10 @@ //! //! A job queue implementation backed by a PostgreSQL table. +use std::default::Default; use std::str::FromStr; -use chrono::prelude::*; +use chrono::{prelude::*, Duration}; use serde::{de::DeserializeOwned, Serialize}; use sqlx::postgres::{PgPool, PgPoolOptions}; use thiserror::Error; @@ -19,6 +20,8 @@ pub enum PgQueueError { QueryError { command: String, error: sqlx::Error }, #[error("{0} is not a valid JobStatus")] ParseJobStatusError(String), + #[error("{0} Job has reached max attempts and cannot be retried further")] + MaxAttemptsReachedError(String), } /// Enumeration of possible statuses for a Job. @@ -28,8 +31,12 @@ pub enum PgQueueError { pub enum JobStatus { /// A job that is waiting in the queue to be picked up by a worker. Available, + /// A job that was cancelled by a worker. + Cancelled, /// A job that was successfully completed by a worker. Completed, + /// A job that has + Discarded, /// A job that was unsuccessfully completed by a worker. Failed, /// A job that was picked up by a worker and it's currentlly being run. @@ -61,49 +68,123 @@ pub struct Job { pub id: i64, /// A number corresponding to the current job attempt. pub attempt: i32, - /// A datetime corresponding to when the current job attempt started. - pub attempted_at: Option>, + /// A datetime corresponding to when the job was attempted. + pub attempted_at: DateTime, /// A vector of identifiers that have attempted this job. E.g. thread ids, pod names, etc... pub attempted_by: Vec, - /// A datetime corresponding to when the job was finished (either successfully or unsuccessfully). - pub finished_at: Option>, /// A datetime corresponding to when the job was created. pub created_at: DateTime, - /// A datetime corresponding to when the first job attempt was started. - pub started_at: Option>, - /// The current status of the job. - pub status: JobStatus, + /// The current job's number of max attempts. + pub max_attempts: i32, /// Arbitrary job parameters stored as JSON. pub parameters: JobParameters, + /// The current status of the job. + pub status: JobStatus, + /// The target of the job. E.g. an endpoint or service we are trying to reach. + pub target: String, +} + +impl Job { + pub fn retry(self, error: E) -> Result, PgQueueError> { + if self.attempt == self.max_attempts { + Err(PgQueueError::MaxAttemptsReachedError(self.target)) + } else { + Ok(RetryableJob { + id: self.id, + attempt: self.attempt, + max_attempts: self.max_attempts, + error: sqlx::types::Json(error), + }) + } + } + + pub fn complete(self) -> CompletedJob { + CompletedJob { id: self.id } + } + + pub fn fail(self, error: E) -> FailedJob { + FailedJob { + id: self.id, + error: sqlx::types::Json(error), + } + } +} + +pub struct RetryPolicy { + backoff_coefficient: i32, + initial_interval: Duration, + maximum_interval: Option, +} + +impl RetryPolicy { + pub fn time_until_next_retry(&self, job: &RetryableJob) -> Duration { + let candidate_interval = + self.initial_interval * self.backoff_coefficient.pow(job.attempt as u32); + + if let Some(max_interval) = self.maximum_interval { + std::cmp::min(candidate_interval, max_interval) + } else { + candidate_interval + } + } +} + +impl Default for RetryPolicy { + fn default() -> Self { + Self { + backoff_coefficient: 2, + initial_interval: Duration::seconds(1), + maximum_interval: None, + } + } +} + +pub struct RetryableJob { + pub id: i64, + pub attempt: i32, + pub max_attempts: i32, + pub error: sqlx::types::Json, +} + +pub struct CompletedJob { + pub id: i64, +} + +pub struct FailedJob { + pub id: i64, + pub error: sqlx::types::Json, } /// A NewJob to be enqueued into a PgQueue. pub struct NewJob { - pub attempt: i32, - pub finished_at: Option>, - pub started_at: Option>, - pub status: JobStatus, + /// The maximum amount of attempts this NewJob has to complete. + pub max_attempts: i32, + /// The JSON-deserializable parameters for this NewJob. pub parameters: JobParameters, + /// The target of the NewJob. E.g. an endpoint or service we are trying to reach. + pub target: String, } impl NewJob { - pub fn new(parameters: J) -> Self { + pub fn new(max_attempts: i32, parameters: J, target: &str) -> Self { Self { - attempt: 0, + max_attempts, parameters: sqlx::types::Json(parameters), - finished_at: None, - started_at: None, - status: JobStatus::Available, + target: target.to_owned(), } } } /// A queue implemented on top of a PostgreSQL table. pub struct PgQueue { - /// The identifier of the PostgreSQL table this queue runs on. - table: String, + /// A name to identify this PgQueue as multiple may share a table. + name: String, /// A connection pool used to connect to the PostgreSQL database. pool: PgPool, + /// The retry policy to use to enqueue any retryable jobs. + retry_policy: RetryPolicy, + /// The identifier of the PostgreSQL table this queue runs on. + table: String, /// The identifier of the worker listening on this queue. worker: String, } @@ -112,7 +193,14 @@ pub type PgQueueResult = std::result::Result; impl PgQueue { /// Initialize a new PgQueue backed by table in PostgreSQL. - pub async fn new(table: &str, url: &str, worker: &str) -> PgQueueResult { + pub async fn new( + name: &str, + table: &str, + retry_policy: RetryPolicy, + url: &str, + worker: &str, + ) -> PgQueueResult { + let name = name.to_owned(); let table = table.to_owned(); let worker = worker.to_owned(); let pool = PgPoolOptions::new() @@ -121,13 +209,15 @@ impl PgQueue { .map_err(|error| PgQueueError::ConnectionError { error })?; Ok(Self { + name, table, pool, worker, + retry_policy, }) } - /// Dequeue a Job from this PgQueue. + /// Dequeue a Job from this PgQueue to work on it. pub async fn dequeue( &self, ) -> PgQueueResult> { @@ -142,6 +232,8 @@ WITH available_in_queue AS ( "{0}" WHERE status = 'available' + AND scheduled_at <= NOW() + AND queue = $1 ORDER BY id LIMIT 1 @@ -150,10 +242,10 @@ WITH available_in_queue AS ( UPDATE "{0}" SET - started_at = NOW(), + attempted_at = NOW(), status = 'running'::job_status, attempt = "{0}".attempt + 1, - attempted_by = array_append("{0}".attempted_by, $1::text) + attempted_by = array_append("{0}".attempted_by, $2::text) FROM available_in_queue WHERE @@ -165,6 +257,7 @@ RETURNING ); let item: Job = sqlx::query_as(&base_query) + .bind(&self.name) .bind(&self.worker) .fetch_one(&self.pool) .await @@ -186,19 +279,18 @@ RETURNING let base_query = format!( r#" INSERT INTO {} - (attempt, created_at, finished_at, started_at, status, parameters) + (attempt, created_at, scheduled_at, max_attempts, parameters, queue, status, target) VALUES - ($1, NOW(), $2, $3, $4::job_status, $5) + (0, NOW(), NOW(), $1, $2, $3, 'available'::job_status, $4) "#, &self.table ); sqlx::query(&base_query) - .bind(job.attempt) - .bind(job.finished_at) - .bind(job.started_at) - .bind(job.status) + .bind(job.max_attempts) .bind(&job.parameters) + .bind(&self.name) + .bind(&job.target) .execute(&self.pool) .await .map_err(|error| PgQueueError::QueryError { @@ -208,6 +300,120 @@ VALUES Ok(()) } + + /// Enqueue a Job back into this PgQueue marked as completed. + /// We take ownership of Job to enforce a specific Job is only enqueued once. + pub async fn enqueue_completed(&self, job: CompletedJob) -> PgQueueResult<()> { + // TODO: Escaping. I think sqlx doesn't support identifiers. + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + completed_at = NOW(), + status = 'completed'::job_status +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + "#, + &self.table + ); + + sqlx::query(&base_query) + .bind(&self.name) + .bind(job.id) + .execute(&self.pool) + .await + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; + + Ok(()) + } + + /// Enqueue a Job back into this PgQueue to be retried at a later time. + /// We take ownership of Job to enforce a specific Job is only enqueued once. + pub async fn enqueue_retryable( + &self, + job: RetryableJob, + ) -> PgQueueResult<()> { + // TODO: Escaping. I think sqlx doesn't support identifiers. + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + status = 'available'::job_status, + scheduled_at = NOW() + $3, + errors = array_append("{0}".errors, $4) +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + "#, + &self.table + ); + + sqlx::query(&base_query) + .bind(&self.name) + .bind(job.id) + .bind(self.retry_policy.time_until_next_retry(&job)) + .bind(&job.error) + .execute(&self.pool) + .await + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; + + Ok(()) + } + + /// Enqueue a Job back into this PgQueue marked as failed. + /// Jobs marked as failed will remain in the queue for tracking purposes but will not be dequeued. + /// We take ownership of FailedJob to enforce a specific FailedJob is only enqueued once. + pub async fn enqueue_failed( + &self, + job: FailedJob, + ) -> PgQueueResult<()> { + // TODO: Escaping. I think sqlx doesn't support identifiers. + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + completed_at = NOW(), + status = 'failed'::job_status + errors = array_append("{0}".errors, $3) +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + "#, + &self.table + ); + + sqlx::query(&base_query) + .bind(&self.name) + .bind(job.id) + .bind(&job.error) + .execute(&self.pool) + .await + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; + + Ok(()) + } } #[cfg(test)] @@ -223,17 +429,21 @@ mod tests { } #[tokio::test] - async fn test_can_enqueue_and_dequeue_job() { + async fn test_can_dequeue_job() { let job_parameters = JobParameters { method: "POST".to_string(), body: "{\"event\":\"event-name\"}".to_string(), url: "https://localhost".to_string(), }; - let new_job = NewJob::new(job_parameters); + let job_target = "https://myhost/endpoint"; + let new_job = NewJob::new(1, job_parameters, job_target); let worker_id = std::process::id().to_string(); + let retry_policy = RetryPolicy::default(); let queue = PgQueue::new( + "test_queue_1", "job_queue", + retry_policy, "postgres://posthog:posthog@localhost:15432/test_database", &worker_id, ) @@ -245,14 +455,101 @@ mod tests { let job: Job = queue.dequeue().await.expect("failed to dequeue job"); assert_eq!(job.attempt, 1); + assert!(job.attempted_by.contains(&worker_id)); + assert_eq!(job.attempted_by.len(), 1); + assert_eq!(job.max_attempts, 1); assert_eq!(job.parameters.method, "POST".to_string()); assert_eq!( job.parameters.body, "{\"event\":\"event-name\"}".to_string() ); assert_eq!(job.parameters.url, "https://localhost".to_string()); - assert!(job.finished_at.is_none()); assert_eq!(job.status, JobStatus::Running); - assert!(job.attempted_by.contains(&worker_id)); + assert_eq!(job.target, job_target.to_string()); + } + + #[tokio::test] + async fn test_can_retry_job_with_remaining_attempts() { + let job_parameters = JobParameters { + method: "POST".to_string(), + body: "{\"event\":\"event-name\"}".to_string(), + url: "https://localhost".to_string(), + }; + let job_target = "https://myhost/endpoint"; + let new_job = NewJob::new(2, job_parameters, job_target); + + let worker_id = std::process::id().to_string(); + let retry_policy = RetryPolicy { + backoff_coefficient: 0, + initial_interval: Duration::seconds(0), + maximum_interval: None, + }; + let queue = PgQueue::new( + "test_queue_2", + "job_queue", + retry_policy, + "postgres://posthog:posthog@localhost:15432/test_database", + &worker_id, + ) + .await + .expect("failed to connect to local test postgresql database"); + + queue.enqueue(new_job).await.expect("failed to enqueue job"); + let job: Job = queue.dequeue().await.expect("failed to dequeue job"); + let retryable_job = job + .retry("a very reasonable failure reason") + .expect("failed to retry job"); + + queue + .enqueue_retryable(retryable_job) + .await + .expect("failed to enqueue retryable job"); + let retried_job: Job = queue.dequeue().await.expect("failed to dequeue job"); + + assert_eq!(retried_job.attempt, 2); + assert!(retried_job.attempted_by.contains(&worker_id)); + assert_eq!(retried_job.attempted_by.len(), 2); + assert_eq!(retried_job.max_attempts, 2); + assert_eq!(retried_job.parameters.method, "POST".to_string()); + assert_eq!( + retried_job.parameters.body, + "{\"event\":\"event-name\"}".to_string() + ); + assert_eq!(retried_job.parameters.url, "https://localhost".to_string()); + assert_eq!(retried_job.status, JobStatus::Running); + assert_eq!(retried_job.target, job_target.to_string()); + } + + #[tokio::test] + #[should_panic(expected = "failed to retry job")] + async fn test_cannot_retry_job_without_remaining_attempts() { + let job_parameters = JobParameters { + method: "POST".to_string(), + body: "{\"event\":\"event-name\"}".to_string(), + url: "https://localhost".to_string(), + }; + let job_target = "https://myhost/endpoint"; + let new_job = NewJob::new(1, job_parameters, job_target); + + let worker_id = std::process::id().to_string(); + let retry_policy = RetryPolicy { + backoff_coefficient: 0, + initial_interval: Duration::seconds(0), + maximum_interval: None, + }; + let queue = PgQueue::new( + "test_queue_3", + "job_queue", + retry_policy, + "postgres://posthog:posthog@localhost:15432/test_database", + &worker_id, + ) + .await + .expect("failed to connect to local test postgresql database"); + + queue.enqueue(new_job).await.expect("failed to enqueue job"); + let job: Job = queue.dequeue().await.expect("failed to dequeue job"); + job.retry("a very reasonable failure reason") + .expect("failed to retry job"); } } diff --git a/migrations/20231129172339_job_queue_table.sql b/migrations/20231129172339_job_queue_table.sql index f2682ee..4631f0b 100644 --- a/migrations/20231129172339_job_queue_table.sql +++ b/migrations/20231129172339_job_queue_table.sql @@ -10,9 +10,14 @@ CREATE TABLE job_queue( attempt INT NOT NULL DEFAULT 0, attempted_at TIMESTAMPTZ DEFAULT NULL, attempted_by TEXT[] DEFAULT ARRAY[]::TEXT[], + completed_at TIMESTAMPTZ DEFAULT NULL, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + errors jsonb[], + max_attempts INT NOT NULL DEFAULT 1, finished_at TIMESTAMPTZ DEFAULT NULL, - started_at TIMESTAMPTZ DEFAULT NULL, + parameters JSONB, + queue TEXT NOT NULL DEFAULT 'default'::text, + scheduled_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), status job_status NOT NULL DEFAULT 'available'::job_status, - parameters JSONB + target TEXT NOT NULL ); From 67c28b0c6aa574198a3a7adddf02d4a373186cf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 1 Dec 2023 14:49:58 +0100 Subject: [PATCH 012/130] chore: Add docstrings --- hook-common/src/pgqueue.rs | 71 ++++++++++++++++++++++++++------------ 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index a85b323..1aa2a8f 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -85,23 +85,36 @@ pub struct Job { } impl Job { + /// Consume Job to retry it. + /// This returns a RetryableJob that can be enqueued by PgQueue. + /// + /// # Arguments + /// + /// * `error`: Any JSON-serializable value to be stored as an error. pub fn retry(self, error: E) -> Result, PgQueueError> { - if self.attempt == self.max_attempts { + if self.attempt >= self.max_attempts { Err(PgQueueError::MaxAttemptsReachedError(self.target)) } else { Ok(RetryableJob { id: self.id, attempt: self.attempt, - max_attempts: self.max_attempts, error: sqlx::types::Json(error), }) } } + /// Consume Job to complete it. + /// This returns a CompletedJob that can be enqueued by PgQueue. pub fn complete(self) -> CompletedJob { CompletedJob { id: self.id } } + /// Consume Job to fail it. + /// This returns a FailedJob that can be enqueued by PgQueue. + /// + /// # Arguments + /// + /// * `error`: Any JSON-serializable value to be stored as an error. pub fn fail(self, error: E) -> FailedJob { FailedJob { id: self.id, @@ -110,25 +123,6 @@ impl Job { } } -pub struct RetryPolicy { - backoff_coefficient: i32, - initial_interval: Duration, - maximum_interval: Option, -} - -impl RetryPolicy { - pub fn time_until_next_retry(&self, job: &RetryableJob) -> Duration { - let candidate_interval = - self.initial_interval * self.backoff_coefficient.pow(job.attempt as u32); - - if let Some(max_interval) = self.maximum_interval { - std::cmp::min(candidate_interval, max_interval) - } else { - candidate_interval - } - } -} - impl Default for RetryPolicy { fn default() -> Self { Self { @@ -139,19 +133,28 @@ impl Default for RetryPolicy { } } +/// A Job that has failed but can still be enqueued into a PgQueue to be retried at a later point. +/// The time until retry will depend on the PgQueue's RetryPolicy. pub struct RetryableJob { + /// A unique id identifying a job. pub id: i64, + /// A number corresponding to the current job attempt. pub attempt: i32, - pub max_attempts: i32, + /// Any JSON-serializable value to be stored as an error. pub error: sqlx::types::Json, } +/// A Job that has completed to be enqueued into a PgQueue and marked as completed. pub struct CompletedJob { + /// A unique id identifying a job. pub id: i64, } +/// A Job that has failed to be enqueued into a PgQueue and marked as failed. pub struct FailedJob { + /// A unique id identifying a job. pub id: i64, + /// Any JSON-serializable value to be stored as an error. pub error: sqlx::types::Json, } @@ -175,6 +178,30 @@ impl NewJob { } } +/// The retry policy that PgQueue will use to determine how to set scheduled_at when enqueuing a retry. +pub struct RetryPolicy { + /// Coeficient to multiply initial_interval with for every past attempt. + backoff_coefficient: i32, + /// The backoff interval for the first retry. + initial_interval: Duration, + /// The maximum possible backoff between retries. + maximum_interval: Option, +} + +impl RetryPolicy { + /// Calculate the time until the next retry for a given RetryableJob. + pub fn time_until_next_retry(&self, job: &RetryableJob) -> Duration { + let candidate_interval = + self.initial_interval * self.backoff_coefficient.pow(job.attempt as u32); + + if let Some(max_interval) = self.maximum_interval { + std::cmp::min(candidate_interval, max_interval) + } else { + candidate_interval + } + } +} + /// A queue implemented on top of a PostgreSQL table. pub struct PgQueue { /// A name to identify this PgQueue as multiple may share a table. From 8c5c7ec19d4f984a23fb539626761f483759717f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 1 Dec 2023 15:01:22 +0100 Subject: [PATCH 013/130] chore: Add basic requirements --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 8eb2e2c..b17e7ae 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,12 @@ # rusty-hook A reliable and performant webhook system for PostHog +## Requirements + +1. [Rust](https://www.rust-lang.org/tools/install). +2. [sqlx-cli](https://crates.io/crates/sqlx-cli): To setup database and run migrations. +3. [Docker](https://docs.docker.com/engine/install/) or [podman](https://podman.io/docs/installation) (and [podman-compose](https://github.com/containers/podman-compose#installation)): To setup testing services. + ## Testing 1. Start a PostgreSQL instance: From d05f6ebccbfdf5e6c8837d0e468513c893664740 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 1 Dec 2023 16:48:03 +0100 Subject: [PATCH 014/130] feat: Implement PgTransactionJob to hold a transaction open for the job --- hook-common/src/pgqueue.rs | 290 ++++++++++++++++++++++++++++++++++++- 1 file changed, 282 insertions(+), 8 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 1aa2a8f..3d1b233 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -18,6 +18,8 @@ pub enum PgQueueError { ConnectionError { error: sqlx::Error }, #[error("{command} query failed with: {error}")] QueryError { command: String, error: sqlx::Error }, + #[error("transaction {command} failed with: {error}")] + TransactionError { command: String, error: sqlx::Error }, #[error("{0} is not a valid JobStatus")] ParseJobStatusError(String), #[error("{0} Job has reached max attempts and cannot be retried further")] @@ -78,6 +80,8 @@ pub struct Job { pub max_attempts: i32, /// Arbitrary job parameters stored as JSON. pub parameters: JobParameters, + /// The queue this job belongs to. + pub queue: String, /// The current status of the job. pub status: JobStatus, /// The target of the job. E.g. an endpoint or service we are trying to reach. @@ -99,6 +103,7 @@ impl Job { id: self.id, attempt: self.attempt, error: sqlx::types::Json(error), + queue: self.queue, }) } } @@ -106,7 +111,10 @@ impl Job { /// Consume Job to complete it. /// This returns a CompletedJob that can be enqueued by PgQueue. pub fn complete(self) -> CompletedJob { - CompletedJob { id: self.id } + CompletedJob { + id: self.id, + queue: self.queue, + } } /// Consume Job to fail it. @@ -119,17 +127,152 @@ impl Job { FailedJob { id: self.id, error: sqlx::types::Json(error), + queue: self.queue, } } } -impl Default for RetryPolicy { - fn default() -> Self { - Self { - backoff_coefficient: 2, - initial_interval: Duration::seconds(1), - maximum_interval: None, - } +/// A Job within an open PostgreSQL transaction. +/// This implementation allows 'hiding' the job from any other workers running SKIP LOCKED queries. +pub struct PgTransactionJob<'c, J> { + pub job: Job, + pub table: String, + pub transaction: sqlx::Transaction<'c, sqlx::postgres::Postgres>, +} + +impl<'c, J> PgTransactionJob<'c, J> { + pub async fn retry( + mut self, + error: E, + retry_policy: &RetryPolicy, + ) -> Result, PgQueueError> { + let retryable_job = self.job.retry(error)?; + + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + status = 'available'::job_status, + scheduled_at = NOW() + $3, + errors = array_append("{0}".errors, $4) +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + + "#, + &self.table + ); + + sqlx::query(&base_query) + .bind(&retryable_job.queue) + .bind(retryable_job.id) + .bind(retry_policy.time_until_next_retry(&retryable_job)) + .bind(&retryable_job.error) + .execute(&mut *self.transaction) + .await + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; + + self.transaction + .commit() + .await + .map_err(|error| PgQueueError::TransactionError { + command: "COMMIT".to_owned(), + error, + })?; + + Ok(retryable_job) + } + + pub async fn complete(mut self) -> Result { + let completed_job = self.job.complete(); + + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + status = 'completed'::job_status, +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + + "#, + &self.table + ); + + sqlx::query(&base_query) + .bind(&completed_job.queue) + .bind(completed_job.id) + .execute(&mut *self.transaction) + .await + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; + + self.transaction + .commit() + .await + .map_err(|error| PgQueueError::TransactionError { + command: "COMMIT".to_owned(), + error, + })?; + + Ok(completed_job) + } + + pub async fn fail( + mut self, + error: E, + ) -> Result, PgQueueError> { + let failed_job = self.job.fail(error); + + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + status = 'failed'::job_status, +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + + "#, + &self.table + ); + + sqlx::query(&base_query) + .bind(&failed_job.queue) + .bind(failed_job.id) + .execute(&mut *self.transaction) + .await + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; + + self.transaction + .commit() + .await + .map_err(|error| PgQueueError::TransactionError { + command: "COMMIT".to_owned(), + error, + })?; + + Ok(failed_job) } } @@ -142,12 +285,16 @@ pub struct RetryableJob { pub attempt: i32, /// Any JSON-serializable value to be stored as an error. pub error: sqlx::types::Json, + /// A unique id identifying a job queue. + pub queue: String, } /// A Job that has completed to be enqueued into a PgQueue and marked as completed. pub struct CompletedJob { /// A unique id identifying a job. pub id: i64, + /// A unique id identifying a job queue. + pub queue: String, } /// A Job that has failed to be enqueued into a PgQueue and marked as failed. @@ -156,6 +303,8 @@ pub struct FailedJob { pub id: i64, /// Any JSON-serializable value to be stored as an error. pub error: sqlx::types::Json, + /// A unique id identifying a job queue. + pub queue: String, } /// A NewJob to be enqueued into a PgQueue. @@ -202,6 +351,16 @@ impl RetryPolicy { } } +impl Default for RetryPolicy { + fn default() -> Self { + Self { + backoff_coefficient: 2, + initial_interval: Duration::seconds(1), + maximum_interval: None, + } + } +} + /// A queue implemented on top of a PostgreSQL table. pub struct PgQueue { /// A name to identify this PgQueue as multiple may share a table. @@ -296,6 +455,73 @@ RETURNING Ok(item) } + /// Dequeue a Job from this PgQueue to work on it. + pub async fn dequeue_tx< + J: DeserializeOwned + std::marker::Send + std::marker::Unpin + 'static, + >( + &self, + ) -> PgQueueResult>> { + // The query that follows uses a FOR UPDATE SKIP LOCKED clause. + // For more details on this see: 2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5. + let mut tx = self.pool.begin().await.unwrap(); + + let base_query = format!( + r#" +WITH available_in_queue AS ( + SELECT + id + FROM + "{0}" + WHERE + status = 'available' + AND scheduled_at <= NOW() + AND queue = $1 + ORDER BY + id + LIMIT 1 + FOR UPDATE SKIP LOCKED +) +UPDATE + "{0}" +SET + attempted_at = NOW(), + status = 'running'::job_status, + attempt = "{0}".attempt + 1, + attempted_by = array_append("{0}".attempted_by, $2::text) +FROM + available_in_queue +WHERE + "{0}".id = available_in_queue.id +RETURNING + "{0}".* + "#, + &self.table + ); + + let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) + .bind(&self.name) + .bind(&self.worker) + .fetch_one(&mut *tx) + .await; + + let job: Job = match query_result { + Ok(j) => j, + Err(sqlx::Error::RowNotFound) => return Ok(None), + Err(e) => { + return Err(PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error: e, + }) + } + }; + + Ok(Some(PgTransactionJob { + job, + table: self.table.to_owned(), + transaction: tx, + })) + } + /// Enqueue a Job into this PgQueue. /// We take ownership of NewJob to enforce a specific NewJob is only enqueued once. pub async fn enqueue( @@ -495,6 +721,54 @@ mod tests { assert_eq!(job.target, job_target.to_string()); } + #[tokio::test] + async fn test_can_dequeue_tx_job() { + let job_parameters = JobParameters { + method: "POST".to_string(), + body: "{\"event\":\"event-name\"}".to_string(), + url: "https://localhost".to_string(), + }; + let job_target = "https://myhost/endpoint"; + let new_job = NewJob::new(1, job_parameters, job_target); + + let worker_id = std::process::id().to_string(); + let retry_policy = RetryPolicy::default(); + let queue = PgQueue::new( + "test_queue_tx_1", + "job_queue", + retry_policy, + "postgres://posthog:posthog@localhost:15432/test_database", + &worker_id, + ) + .await + .expect("failed to connect to local test postgresql database"); + + queue.enqueue(new_job).await.expect("failed to enqueue job"); + + let tx_job: PgTransactionJob<'_, JobParameters> = queue + .dequeue_tx() + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + let another_job: Option> = + queue.dequeue_tx().await.expect("failed to dequeue job"); + + assert!(another_job.is_none()); + + assert_eq!(tx_job.job.attempt, 1); + assert!(tx_job.job.attempted_by.contains(&worker_id)); + assert_eq!(tx_job.job.attempted_by.len(), 1); + assert_eq!(tx_job.job.max_attempts, 1); + assert_eq!(tx_job.job.parameters.method, "POST".to_string()); + assert_eq!( + tx_job.job.parameters.body, + "{\"event\":\"event-name\"}".to_string() + ); + assert_eq!(tx_job.job.parameters.url, "https://localhost".to_string()); + assert_eq!(tx_job.job.status, JobStatus::Running); + assert_eq!(tx_job.job.target, job_target.to_string()); + } + #[tokio::test] async fn test_can_retry_job_with_remaining_attempts() { let job_parameters = JobParameters { From 4bf65168952211982ec543646d54006a215fc1c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 1 Dec 2023 18:09:52 +0100 Subject: [PATCH 015/130] refactor: Move queries to state handling methods in Job --- hook-common/src/pgqueue.rs | 505 +++++++++++++++++++++---------------- 1 file changed, 286 insertions(+), 219 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 3d1b233..451724d 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -132,19 +132,138 @@ impl Job { } } +/// A Job that can be updated in PostgreSQL. +pub struct PgJob { + pub job: Job, + pub table: String, + pub connection: sqlx::pool::PoolConnection, + pub retry_policy: RetryPolicy, +} + +impl PgJob { + pub async fn retry( + mut self, + error: E, + ) -> Result, PgQueueError> { + let retryable_job = self.job.retry(error)?; + + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + status = 'available'::job_status, + scheduled_at = NOW() + $3, + errors = array_append("{0}".errors, $4) +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + + "#, + &self.table + ); + + sqlx::query(&base_query) + .bind(&retryable_job.queue) + .bind(retryable_job.id) + .bind(self.retry_policy.time_until_next_retry(&retryable_job)) + .bind(&retryable_job.error) + .execute(&mut *self.connection) + .await + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; + + Ok(retryable_job) + } + + pub async fn complete(mut self) -> Result { + let completed_job = self.job.complete(); + + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + status = 'completed'::job_status, +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + + "#, + &self.table + ); + + sqlx::query(&base_query) + .bind(&completed_job.queue) + .bind(completed_job.id) + .execute(&mut *self.connection) + .await + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; + + Ok(completed_job) + } + + pub async fn fail( + mut self, + error: E, + ) -> Result, PgQueueError> { + let failed_job = self.job.fail(error); + + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + status = 'failed'::job_status, +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + + "#, + &self.table + ); + + sqlx::query(&base_query) + .bind(&failed_job.queue) + .bind(failed_job.id) + .execute(&mut *self.connection) + .await + .map_err(|error| PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; + + Ok(failed_job) + } +} + /// A Job within an open PostgreSQL transaction. /// This implementation allows 'hiding' the job from any other workers running SKIP LOCKED queries. pub struct PgTransactionJob<'c, J> { pub job: Job, pub table: String, pub transaction: sqlx::Transaction<'c, sqlx::postgres::Postgres>, + pub retry_policy: RetryPolicy, } impl<'c, J> PgTransactionJob<'c, J> { pub async fn retry( mut self, error: E, - retry_policy: &RetryPolicy, ) -> Result, PgQueueError> { let retryable_job = self.job.retry(error)?; @@ -170,7 +289,7 @@ RETURNING sqlx::query(&base_query) .bind(&retryable_job.queue) .bind(retryable_job.id) - .bind(retry_policy.time_until_next_retry(&retryable_job)) + .bind(self.retry_policy.time_until_next_retry(&retryable_job)) .bind(&retryable_job.error) .execute(&mut *self.transaction) .await @@ -327,6 +446,7 @@ impl NewJob { } } +#[derive(Copy, Clone)] /// The retry policy that PgQueue will use to determine how to set scheduled_at when enqueuing a retry. pub struct RetryPolicy { /// Coeficient to multiply initial_interval with for every past attempt. @@ -367,7 +487,7 @@ pub struct PgQueue { name: String, /// A connection pool used to connect to the PostgreSQL database. pool: PgPool, - /// The retry policy to use to enqueue any retryable jobs. + /// The retry policy to be assigned to Jobs in this PgQueue. retry_policy: RetryPolicy, /// The identifier of the PostgreSQL table this queue runs on. table: String, @@ -382,9 +502,9 @@ impl PgQueue { pub async fn new( name: &str, table: &str, - retry_policy: RetryPolicy, url: &str, worker: &str, + retry_policy: RetryPolicy, ) -> PgQueueResult { let name = name.to_owned(); let table = table.to_owned(); @@ -396,17 +516,23 @@ impl PgQueue { Ok(Self { name, - table, pool, - worker, retry_policy, + table, + worker, }) } /// Dequeue a Job from this PgQueue to work on it. pub async fn dequeue( &self, - ) -> PgQueueResult> { + ) -> PgQueueResult>> { + let mut connection = self + .pool + .acquire() + .await + .map_err(|error| PgQueueError::ConnectionError { error })?; + // The query that follows uses a FOR UPDATE SKIP LOCKED clause. // For more details on this see: 2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5. let base_query = format!( @@ -442,17 +568,29 @@ RETURNING &self.table ); - let item: Job = sqlx::query_as(&base_query) + let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) .bind(&self.name) .bind(&self.worker) - .fetch_one(&self.pool) - .await - .map_err(|error| PgQueueError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; + .fetch_one(&mut *connection) + .await; + + let job: Job = match query_result { + Ok(j) => j, + Err(sqlx::Error::RowNotFound) => return Ok(None), + Err(e) => { + return Err(PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error: e, + }) + } + }; - Ok(item) + Ok(Some(PgJob { + job, + table: self.table.to_owned(), + connection, + retry_policy: self.retry_policy, + })) } /// Dequeue a Job from this PgQueue to work on it. @@ -461,10 +599,14 @@ RETURNING >( &self, ) -> PgQueueResult>> { + let mut tx = self + .pool + .begin() + .await + .map_err(|error| PgQueueError::ConnectionError { error })?; + // The query that follows uses a FOR UPDATE SKIP LOCKED clause. // For more details on this see: 2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5. - let mut tx = self.pool.begin().await.unwrap(); - let base_query = format!( r#" WITH available_in_queue AS ( @@ -519,6 +661,7 @@ RETURNING job, table: self.table.to_owned(), transaction: tx, + retry_policy: self.retry_policy, })) } @@ -553,120 +696,6 @@ VALUES Ok(()) } - - /// Enqueue a Job back into this PgQueue marked as completed. - /// We take ownership of Job to enforce a specific Job is only enqueued once. - pub async fn enqueue_completed(&self, job: CompletedJob) -> PgQueueResult<()> { - // TODO: Escaping. I think sqlx doesn't support identifiers. - let base_query = format!( - r#" -UPDATE - "{0}" -SET - finished_at = NOW(), - completed_at = NOW(), - status = 'completed'::job_status -WHERE - "{0}".id = $2 - AND queue = $1 -RETURNING - "{0}".* - "#, - &self.table - ); - - sqlx::query(&base_query) - .bind(&self.name) - .bind(job.id) - .execute(&self.pool) - .await - .map_err(|error| PgQueueError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; - - Ok(()) - } - - /// Enqueue a Job back into this PgQueue to be retried at a later time. - /// We take ownership of Job to enforce a specific Job is only enqueued once. - pub async fn enqueue_retryable( - &self, - job: RetryableJob, - ) -> PgQueueResult<()> { - // TODO: Escaping. I think sqlx doesn't support identifiers. - let base_query = format!( - r#" -UPDATE - "{0}" -SET - finished_at = NOW(), - status = 'available'::job_status, - scheduled_at = NOW() + $3, - errors = array_append("{0}".errors, $4) -WHERE - "{0}".id = $2 - AND queue = $1 -RETURNING - "{0}".* - "#, - &self.table - ); - - sqlx::query(&base_query) - .bind(&self.name) - .bind(job.id) - .bind(self.retry_policy.time_until_next_retry(&job)) - .bind(&job.error) - .execute(&self.pool) - .await - .map_err(|error| PgQueueError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; - - Ok(()) - } - - /// Enqueue a Job back into this PgQueue marked as failed. - /// Jobs marked as failed will remain in the queue for tracking purposes but will not be dequeued. - /// We take ownership of FailedJob to enforce a specific FailedJob is only enqueued once. - pub async fn enqueue_failed( - &self, - job: FailedJob, - ) -> PgQueueResult<()> { - // TODO: Escaping. I think sqlx doesn't support identifiers. - let base_query = format!( - r#" -UPDATE - "{0}" -SET - finished_at = NOW(), - completed_at = NOW(), - status = 'failed'::job_status - errors = array_append("{0}".errors, $3) -WHERE - "{0}".id = $2 - AND queue = $1 -RETURNING - "{0}".* - "#, - &self.table - ); - - sqlx::query(&base_query) - .bind(&self.name) - .bind(job.id) - .bind(&job.error) - .execute(&self.pool) - .await - .map_err(|error| PgQueueError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; - - Ok(()) - } } #[cfg(test)] @@ -674,71 +703,99 @@ mod tests { use super::*; use serde::Deserialize; - #[derive(Serialize, Deserialize)] + #[derive(Serialize, Deserialize, PartialEq, Debug)] struct JobParameters { method: String, body: String, url: String, } + impl Default for JobParameters { + fn default() -> Self { + Self { + method: "POST".to_string(), + body: "{\"event\":\"event-name\"}".to_string(), + url: "https://localhost".to_string(), + } + } + } + + /// Use process id as a worker id for tests. + fn worker_id() -> String { + std::process::id().to_string() + } + + /// Hardcoded test value for job target. + fn job_target() -> String { + "https://myhost/endpoint".to_owned() + } + #[tokio::test] async fn test_can_dequeue_job() { - let job_parameters = JobParameters { - method: "POST".to_string(), - body: "{\"event\":\"event-name\"}".to_string(), - url: "https://localhost".to_string(), - }; - let job_target = "https://myhost/endpoint"; - let new_job = NewJob::new(1, job_parameters, job_target); + let job_target = job_target(); + let job_parameters = JobParameters::default(); + let worker_id = worker_id(); + let new_job = NewJob::new(1, job_parameters, &job_target); - let worker_id = std::process::id().to_string(); - let retry_policy = RetryPolicy::default(); let queue = PgQueue::new( - "test_queue_1", + "test_can_dequeue_job", "job_queue", - retry_policy, "postgres://posthog:posthog@localhost:15432/test_database", &worker_id, + RetryPolicy::default(), ) .await .expect("failed to connect to local test postgresql database"); queue.enqueue(new_job).await.expect("failed to enqueue job"); - let job: Job = queue.dequeue().await.expect("failed to dequeue job"); + let pg_job: PgJob = queue + .dequeue() + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + + assert_eq!(pg_job.job.attempt, 1); + assert!(pg_job.job.attempted_by.contains(&worker_id)); + assert_eq!(pg_job.job.attempted_by.len(), 1); + assert_eq!(pg_job.job.max_attempts, 1); + assert_eq!(*pg_job.job.parameters.as_ref(), JobParameters::default()); + assert_eq!(pg_job.job.status, JobStatus::Running); + assert_eq!(pg_job.job.target, job_target); + } - assert_eq!(job.attempt, 1); - assert!(job.attempted_by.contains(&worker_id)); - assert_eq!(job.attempted_by.len(), 1); - assert_eq!(job.max_attempts, 1); - assert_eq!(job.parameters.method, "POST".to_string()); - assert_eq!( - job.parameters.body, - "{\"event\":\"event-name\"}".to_string() - ); - assert_eq!(job.parameters.url, "https://localhost".to_string()); - assert_eq!(job.status, JobStatus::Running); - assert_eq!(job.target, job_target.to_string()); + #[tokio::test] + async fn test_dequeue_returns_none_on_no_jobs() { + let worker_id = worker_id(); + let queue = PgQueue::new( + "test_dequeue_returns_none_on_no_jobs", + "job_queue", + "postgres://posthog:posthog@localhost:15432/test_database", + &worker_id, + RetryPolicy::default(), + ) + .await + .expect("failed to connect to local test postgresql database"); + + let pg_job: Option> = + queue.dequeue().await.expect("failed to dequeue job"); + + assert!(pg_job.is_none()); } #[tokio::test] async fn test_can_dequeue_tx_job() { - let job_parameters = JobParameters { - method: "POST".to_string(), - body: "{\"event\":\"event-name\"}".to_string(), - url: "https://localhost".to_string(), - }; - let job_target = "https://myhost/endpoint"; - let new_job = NewJob::new(1, job_parameters, job_target); + let job_target = job_target(); + let job_parameters = JobParameters::default(); + let worker_id = worker_id(); + let new_job = NewJob::new(1, job_parameters, &job_target); - let worker_id = std::process::id().to_string(); - let retry_policy = RetryPolicy::default(); let queue = PgQueue::new( - "test_queue_tx_1", + "test_can_dequeue_tx_job", "job_queue", - retry_policy, "postgres://posthog:posthog@localhost:15432/test_database", &worker_id, + RetryPolicy::default(), ) .await .expect("failed to connect to local test postgresql database"); @@ -750,107 +807,117 @@ mod tests { .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue"); - let another_job: Option> = - queue.dequeue_tx().await.expect("failed to dequeue job"); - - assert!(another_job.is_none()); assert_eq!(tx_job.job.attempt, 1); assert!(tx_job.job.attempted_by.contains(&worker_id)); assert_eq!(tx_job.job.attempted_by.len(), 1); assert_eq!(tx_job.job.max_attempts, 1); - assert_eq!(tx_job.job.parameters.method, "POST".to_string()); - assert_eq!( - tx_job.job.parameters.body, - "{\"event\":\"event-name\"}".to_string() - ); - assert_eq!(tx_job.job.parameters.url, "https://localhost".to_string()); + assert_eq!(*tx_job.job.parameters.as_ref(), JobParameters::default()); assert_eq!(tx_job.job.status, JobStatus::Running); - assert_eq!(tx_job.job.target, job_target.to_string()); + assert_eq!(tx_job.job.target, job_target); } #[tokio::test] - async fn test_can_retry_job_with_remaining_attempts() { - let job_parameters = JobParameters { - method: "POST".to_string(), - body: "{\"event\":\"event-name\"}".to_string(), - url: "https://localhost".to_string(), - }; - let job_target = "https://myhost/endpoint"; - let new_job = NewJob::new(2, job_parameters, job_target); + async fn test_dequeue_tx_returns_none_on_no_jobs() { + let worker_id = worker_id(); + let queue = PgQueue::new( + "test_dequeue_tx_returns_none_on_no_jobs", + "job_queue", + "postgres://posthog:posthog@localhost:15432/test_database", + &worker_id, + RetryPolicy::default(), + ) + .await + .expect("failed to connect to local test postgresql database"); + + let tx_job: Option> = + queue.dequeue_tx().await.expect("failed to dequeue job"); + + assert!(tx_job.is_none()); + } - let worker_id = std::process::id().to_string(); + #[tokio::test] + async fn test_can_retry_job_with_remaining_attempts() { + let job_target = job_target(); + let job_parameters = JobParameters::default(); + let worker_id = worker_id(); + let new_job = NewJob::new(2, job_parameters, &job_target); let retry_policy = RetryPolicy { backoff_coefficient: 0, initial_interval: Duration::seconds(0), maximum_interval: None, }; + let queue = PgQueue::new( - "test_queue_2", + "test_can_retry_job_with_remaining_attempts", "job_queue", - retry_policy, "postgres://posthog:posthog@localhost:15432/test_database", &worker_id, + retry_policy, ) .await .expect("failed to connect to local test postgresql database"); queue.enqueue(new_job).await.expect("failed to enqueue job"); - let job: Job = queue.dequeue().await.expect("failed to dequeue job"); - let retryable_job = job + let job: PgJob = queue + .dequeue() + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + let _ = job .retry("a very reasonable failure reason") + .await .expect("failed to retry job"); - - queue - .enqueue_retryable(retryable_job) + let retried_job: PgJob = queue + .dequeue() .await - .expect("failed to enqueue retryable job"); - let retried_job: Job = queue.dequeue().await.expect("failed to dequeue job"); - - assert_eq!(retried_job.attempt, 2); - assert!(retried_job.attempted_by.contains(&worker_id)); - assert_eq!(retried_job.attempted_by.len(), 2); - assert_eq!(retried_job.max_attempts, 2); - assert_eq!(retried_job.parameters.method, "POST".to_string()); + .expect("failed to dequeue job") + .expect("didn't find retried job to dequeue"); + + assert_eq!(retried_job.job.attempt, 2); + assert!(retried_job.job.attempted_by.contains(&worker_id)); + assert_eq!(retried_job.job.attempted_by.len(), 2); + assert_eq!(retried_job.job.max_attempts, 2); assert_eq!( - retried_job.parameters.body, - "{\"event\":\"event-name\"}".to_string() + *retried_job.job.parameters.as_ref(), + JobParameters::default() ); - assert_eq!(retried_job.parameters.url, "https://localhost".to_string()); - assert_eq!(retried_job.status, JobStatus::Running); - assert_eq!(retried_job.target, job_target.to_string()); + assert_eq!(retried_job.job.status, JobStatus::Running); + assert_eq!(retried_job.job.target, job_target); } #[tokio::test] #[should_panic(expected = "failed to retry job")] async fn test_cannot_retry_job_without_remaining_attempts() { - let job_parameters = JobParameters { - method: "POST".to_string(), - body: "{\"event\":\"event-name\"}".to_string(), - url: "https://localhost".to_string(), - }; - let job_target = "https://myhost/endpoint"; - let new_job = NewJob::new(1, job_parameters, job_target); - - let worker_id = std::process::id().to_string(); + let job_target = job_target(); + let job_parameters = JobParameters::default(); + let worker_id = worker_id(); + let new_job = NewJob::new(1, job_parameters, &job_target); let retry_policy = RetryPolicy { backoff_coefficient: 0, initial_interval: Duration::seconds(0), maximum_interval: None, }; + let queue = PgQueue::new( - "test_queue_3", + "test_cannot_retry_job_without_remaining_attempts", "job_queue", - retry_policy, "postgres://posthog:posthog@localhost:15432/test_database", &worker_id, + retry_policy, ) .await .expect("failed to connect to local test postgresql database"); queue.enqueue(new_job).await.expect("failed to enqueue job"); - let job: Job = queue.dequeue().await.expect("failed to dequeue job"); + + let job: PgJob = queue + .dequeue() + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); job.retry("a very reasonable failure reason") + .await .expect("failed to retry job"); } } From 401336a853a1c5a8eec0505c00fc5ff811a7d994 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 1 Dec 2023 18:27:51 +0100 Subject: [PATCH 016/130] refactor: Return from match and close connection as recommended --- hook-common/src/pgqueue.rs | 61 ++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 451724d..599785c 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -574,23 +574,28 @@ RETURNING .fetch_one(&mut *connection) .await; - let job: Job = match query_result { - Ok(j) => j, - Err(sqlx::Error::RowNotFound) => return Ok(None), + match query_result { + Ok(job) => Ok(Some(PgJob { + job, + table: self.table.to_owned(), + connection, + retry_policy: self.retry_policy, + })), + + // Although connection would be closed once it goes out of scope, sqlx recommends explicitly calling close(). + // See: https://docs.rs/sqlx/latest/sqlx/postgres/any/trait.AnyConnectionBackend.html#tymethod.close. + Err(sqlx::Error::RowNotFound) => { + let _ = connection.close().await; + Ok(None) + } Err(e) => { - return Err(PgQueueError::QueryError { + let _ = connection.close().await; + Err(PgQueueError::QueryError { command: "UPDATE".to_owned(), error: e, }) } - }; - - Ok(Some(PgJob { - job, - table: self.table.to_owned(), - connection, - retry_policy: self.retry_policy, - })) + } } /// Dequeue a Job from this PgQueue to work on it. @@ -646,23 +651,21 @@ RETURNING .fetch_one(&mut *tx) .await; - let job: Job = match query_result { - Ok(j) => j, - Err(sqlx::Error::RowNotFound) => return Ok(None), - Err(e) => { - return Err(PgQueueError::QueryError { - command: "UPDATE".to_owned(), - error: e, - }) - } - }; - - Ok(Some(PgTransactionJob { - job, - table: self.table.to_owned(), - transaction: tx, - retry_policy: self.retry_policy, - })) + match query_result { + Ok(job) => Ok(Some(PgTransactionJob { + job, + table: self.table.to_owned(), + transaction: tx, + retry_policy: self.retry_policy, + })), + + // Transaction is rolledback on drop. + Err(sqlx::Error::RowNotFound) => Ok(None), + Err(e) => Err(PgQueueError::QueryError { + command: "UPDATE".to_owned(), + error: e, + }), + } } /// Enqueue a Job into this PgQueue. From 2ab6250078d782b7f90d08089ed7304fecf4b2cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Mon, 4 Dec 2023 10:24:28 +0100 Subject: [PATCH 017/130] fix: Update docstring Co-authored-by: Brett Hoerner --- hook-common/src/pgqueue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 599785c..dd38fa2 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -109,7 +109,7 @@ impl Job { } /// Consume Job to complete it. - /// This returns a CompletedJob that can be enqueued by PgQueue. + /// This returns a CompletedJob that can be marked as completed by PgQueue. pub fn complete(self) -> CompletedJob { CompletedJob { id: self.id, From 521fb51264c1e0a7202f7d8aa74199bf4e27ee67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Mon, 4 Dec 2023 10:24:43 +0100 Subject: [PATCH 018/130] fix: More docstring updates. Co-authored-by: Brett Hoerner --- hook-common/src/pgqueue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index dd38fa2..6575be8 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -118,7 +118,7 @@ impl Job { } /// Consume Job to fail it. - /// This returns a FailedJob that can be enqueued by PgQueue. + /// This returns a FailedJob that can be marked as failed by PgQueue. /// /// # Arguments /// From 746120b4cd8e843c8ee53b500bc07322f79fabfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Mon, 4 Dec 2023 10:25:03 +0100 Subject: [PATCH 019/130] fix: Typo Co-authored-by: Brett Hoerner --- hook-common/src/pgqueue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 6575be8..7204f97 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -449,7 +449,7 @@ impl NewJob { #[derive(Copy, Clone)] /// The retry policy that PgQueue will use to determine how to set scheduled_at when enqueuing a retry. pub struct RetryPolicy { - /// Coeficient to multiply initial_interval with for every past attempt. + /// Coefficient to multiply initial_interval with for every past attempt. backoff_coefficient: i32, /// The backoff interval for the first retry. initial_interval: Duration, From d5f345dadd6f19734b2d9c195550de1a46c24d79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Mon, 4 Dec 2023 10:31:59 +0100 Subject: [PATCH 020/130] fix: Update documentation and names --- hook-common/src/pgqueue.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 7204f97..89aab7e 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -499,16 +499,24 @@ pub type PgQueueResult = std::result::Result; impl PgQueue { /// Initialize a new PgQueue backed by table in PostgreSQL. + /// + /// # Arguments + /// + /// * `queue_name`: A name for the queue we are going to initialize. + /// * `table_name`: The name for the table the queue will use in PostgreSQL. + /// * `url`: A URL pointing to where the PostgreSQL database is hosted. + /// * `worker_name`: The name of the worker that is operating with this queue. + /// * `retry_policy`: A retry policy to pass to jobs from this queue. pub async fn new( - name: &str, - table: &str, + queue_name: &str, + table_name: &str, url: &str, - worker: &str, + worker_name: &str, retry_policy: RetryPolicy, ) -> PgQueueResult { - let name = name.to_owned(); - let table = table.to_owned(); - let worker = worker.to_owned(); + let name = queue_name.to_owned(); + let table = table_name.to_owned(); + let worker = worker_name.to_owned(); let pool = PgPoolOptions::new() .connect(url) .await From e4554641d0afdf7c20cb195ffcc5aded4f17c03f Mon Sep 17 00:00:00 2001 From: Ellie Huxtable Date: Mon, 27 Nov 2023 15:13:04 +0000 Subject: [PATCH 021/130] add deps --- Cargo.lock | 348 +++++++++++++++++++++++++++++++++++++-- Cargo.toml | 1 + hook-producer/Cargo.toml | 2 + 3 files changed, 334 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 16de926..aa7f488 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -51,6 +51,17 @@ dependencies = [ "libc", ] +[[package]] +name = "async-trait" +version = "0.1.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "atoi" version = "2.0.0" @@ -62,9 +73,9 @@ dependencies = [ [[package]] name = "atomic-write-file" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ae364a6c1301604bbc6dfbf8c385c47ff82301dd01eef506195a029196d8d04" +checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436" dependencies = [ "nix", "rand", @@ -76,6 +87,59 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "202651474fe73c62d9e0a56c6133f7a0ff1dc1c8cf7a5b03381af2a26553ac9d" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77cb22c689c44d4c07b0ab44ebc25d69d8ae601a2f28fb8d672d344178fa17aa" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.69" @@ -182,9 +246,9 @@ checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -192,9 +256,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" @@ -343,6 +407,12 @@ dependencies = [ "spin 0.9.8", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "foreign-types" version = "0.3.2" @@ -466,6 +536,25 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +[[package]] +name = "h2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d308f63daf4181410c242d34c11f928dcb3aa105852019e043c9d1f4e4368a" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "hashbrown" version = "0.14.3" @@ -494,6 +583,12 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "hermit-abi" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" + [[package]] name = "hex" version = "0.4.3" @@ -546,6 +641,95 @@ version = "0.1.0" [[package]] name = "hook-producer" version = "0.1.0" +dependencies = [ + "axum", + "tokio", +] + +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes", + "futures-util", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403f9214f3e703236b221f1a9cd88ec8b4adfa5296de01ab96216361f4692f56" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "hyper-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca339002caeb0d159cc6e023dff48e199f081e42fa039895c7c6f38b37f2e9d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "socket2", + "tokio", + "tower", + "tower-service", + "tracing", +] [[package]] name = "iana-time-zone" @@ -648,9 +832,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "lock_api" @@ -668,6 +852,12 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "md-5" version = "0.10.6" @@ -684,6 +874,12 @@ version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -797,6 +993,16 @@ dependencies = [ "libm", ] +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + [[package]] name = "object" version = "0.32.1" @@ -900,6 +1106,26 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pin-project" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "pin-project-lite" version = "0.2.13" @@ -1004,9 +1230,9 @@ dependencies = [ [[package]] name = "rsa" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af6c4b23d99685a1408194da11270ef8e9809aff951cc70ec9b17350b087e474" +checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" dependencies = [ "const-oid", "digest", @@ -1030,17 +1256,23 @@ checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustix" -version = "0.38.25" +version = "0.38.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" +checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" dependencies = [ "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "ryu" version = "1.0.15" @@ -1116,6 +1348,28 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + [[package]] name = "sha1" version = "0.10.6" @@ -1138,6 +1392,15 @@ dependencies = [ "digest", ] +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + [[package]] name = "signature" version = "2.2.0" @@ -1454,6 +1717,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "tempfile" version = "3.8.1" @@ -1512,7 +1781,10 @@ dependencies = [ "bytes", "libc", "mio", + "num_cpus", + "parking_lot", "pin-project-lite", + "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.48.0", @@ -1540,6 +1812,48 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-util" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + [[package]] name = "tracing" version = "0.1.40" @@ -1855,18 +2169,18 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "zerocopy" -version = "0.7.26" +version = "0.7.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0" +checksum = "7d6f15f7ade05d2a4935e34a457b936c23dc70a05cc1d97133dc99e7a3fe0f0e" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.26" +version = "0.7.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f" +checksum = "dbbad221e3f78500350ecbd7dfa4e63ef945c05f4c61cb7f4d3f84cd0bba649b" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index d880e2c..ea18839 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,3 +9,4 @@ members = [ [workspace.dependencies] sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", "postgres", "uuid", "json" ] } +tokio = { version = "1.34.0", features = ["full"] } diff --git a/hook-producer/Cargo.toml b/hook-producer/Cargo.toml index 96fbb4d..7626cc5 100644 --- a/hook-producer/Cargo.toml +++ b/hook-producer/Cargo.toml @@ -6,3 +6,5 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +tokio = { workspace = true } +axum = { version="0.7.1", features=["http2"] } From efe67f8fef07926cab24fe6799d43b1458f4e3d8 Mon Sep 17 00:00:00 2001 From: Ellie Huxtable Date: Mon, 27 Nov 2023 15:29:45 +0000 Subject: [PATCH 022/130] boot http server --- Cargo.lock | 108 ++++++++++++++++++++++++++++ hook-producer/Cargo.toml | 3 + hook-producer/src/handlers/index.rs | 3 + hook-producer/src/handlers/mod.rs | 9 +++ hook-producer/src/main.rs | 23 +++++- 5 files changed, 144 insertions(+), 2 deletions(-) create mode 100644 hook-producer/src/handlers/index.rs create mode 100644 hook-producer/src/handlers/mod.rs diff --git a/Cargo.lock b/Cargo.lock index aa7f488..be15d2d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -384,6 +384,16 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "eyre" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80f656be11ddf91bd709454d15d5bd896fbaf4cc3314e69349e4d1569f5b46cd" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -643,7 +653,10 @@ name = "hook-producer" version = "0.1.0" dependencies = [ "axum", + "eyre", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -764,6 +777,12 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "2.1.0" @@ -945,6 +964,16 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint-dig" version = "0.8.4" @@ -1062,6 +1091,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "parking_lot" version = "0.12.1" @@ -1392,6 +1427,15 @@ dependencies = [ "digest", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "signal-hook-registry" version = "1.4.1" @@ -1756,6 +1800,16 @@ dependencies = [ "syn 2.0.39", ] +[[package]] +name = "thread_local" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +dependencies = [ + "cfg-if", + "once_cell", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -1884,6 +1938,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "nu-ansi-term", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", ] [[package]] @@ -1948,6 +2028,12 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vcpkg" version = "0.2.15" @@ -2026,6 +2112,28 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-core" version = "0.51.1" diff --git a/hook-producer/Cargo.toml b/hook-producer/Cargo.toml index 7626cc5..35af92d 100644 --- a/hook-producer/Cargo.toml +++ b/hook-producer/Cargo.toml @@ -8,3 +8,6 @@ edition = "2021" [dependencies] tokio = { workspace = true } axum = { version="0.7.1", features=["http2"] } +eyre = "0.6.9" +tracing = "0.1.40" +tracing-subscriber = "0.3.18" diff --git a/hook-producer/src/handlers/index.rs b/hook-producer/src/handlers/index.rs new file mode 100644 index 0000000..56896fa --- /dev/null +++ b/hook-producer/src/handlers/index.rs @@ -0,0 +1,3 @@ +pub async fn get() -> &'static str { + "rusty hook" +} diff --git a/hook-producer/src/handlers/mod.rs b/hook-producer/src/handlers/mod.rs new file mode 100644 index 0000000..a83e46e --- /dev/null +++ b/hook-producer/src/handlers/mod.rs @@ -0,0 +1,9 @@ +use axum::{Router, routing}; + +mod index; + +pub fn router() -> Router { + let app = Router::new().route("/", routing::get(index::get)); + + app +} diff --git a/hook-producer/src/main.rs b/hook-producer/src/main.rs index e7a11a9..a8a1ce5 100644 --- a/hook-producer/src/main.rs +++ b/hook-producer/src/main.rs @@ -1,3 +1,22 @@ -fn main() { - println!("Hello, world!"); +use axum::Router; +use eyre::Result; +mod handlers; + +async fn listen(app: Router) -> Result<()> { + let listener = tokio::net::TcpListener::bind("0.0.0.0:8000").await?; + axum::serve(listener, app).await?; + + Ok(()) +} + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::init(); + + let app = handlers::router(); + + match listen(app).await { + Ok(_) => {}, + Err(e) => tracing::error!("failed to start hook-producer http server, {}", e) + } } From 9fb81ef3ca045d18f9163c70eda57c74cb6a7389 Mon Sep 17 00:00:00 2001 From: Ellie Huxtable Date: Mon, 4 Dec 2023 14:51:19 +0000 Subject: [PATCH 023/130] Formatting, move shared deps into workspace --- Cargo.toml | 9 ++++++++- hook-common/Cargo.toml | 12 ++++++------ hook-producer/Cargo.toml | 8 ++++---- hook-producer/src/handlers/mod.rs | 2 +- hook-producer/src/main.rs | 4 ++-- 5 files changed, 21 insertions(+), 14 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ea18839..8f55767 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,5 +8,12 @@ members = [ ] [workspace.dependencies] -sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", "postgres", "uuid", "json" ] } +chrono = { version = "0.4" } +serde = { version = "1.0" } +serde_derive = { version = "1.0" } +thiserror = { version = "1.0" } +sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", "postgres", "uuid", "json", "chrono" ] } tokio = { version = "1.34.0", features = ["full"] } +eyre = "0.6.9" +tracing = "0.1.40" +tracing-subscriber = "0.3.18" diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml index 673d887..b55a9ec 100644 --- a/hook-common/Cargo.toml +++ b/hook-common/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -chrono = { version = "0.4" } -serde = { version = "1.0" } -serde_derive = { version = "1.0" } -sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", "postgres", "uuid", "json", "chrono" ] } -thiserror = { version = "1.0" } +chrono = { workspace = true} +serde = { workspace = true } +serde_derive = { workspace = true} +thiserror = { workspace = true } +sqlx = { workspace = true } [dev-dependencies] -tokio = { version = "1.34", features = ["macros"] } # We need a runtime for async tests +tokio = { workspace = true } # We need a runtime for async tests diff --git a/hook-producer/Cargo.toml b/hook-producer/Cargo.toml index 35af92d..85100d0 100644 --- a/hook-producer/Cargo.toml +++ b/hook-producer/Cargo.toml @@ -6,8 +6,8 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tokio = { workspace = true } axum = { version="0.7.1", features=["http2"] } -eyre = "0.6.9" -tracing = "0.1.40" -tracing-subscriber = "0.3.18" +tokio = { workspace = true } +eyre = {workspace = true } +tracing = {workspace = true} +tracing-subscriber = {workspace = true} diff --git a/hook-producer/src/handlers/mod.rs b/hook-producer/src/handlers/mod.rs index a83e46e..8b4f83c 100644 --- a/hook-producer/src/handlers/mod.rs +++ b/hook-producer/src/handlers/mod.rs @@ -1,4 +1,4 @@ -use axum::{Router, routing}; +use axum::{routing, Router}; mod index; diff --git a/hook-producer/src/main.rs b/hook-producer/src/main.rs index a8a1ce5..f05edab 100644 --- a/hook-producer/src/main.rs +++ b/hook-producer/src/main.rs @@ -16,7 +16,7 @@ async fn main() { let app = handlers::router(); match listen(app).await { - Ok(_) => {}, - Err(e) => tracing::error!("failed to start hook-producer http server, {}", e) + Ok(_) => {} + Err(e) => tracing::error!("failed to start hook-producer http server, {}", e), } } From 731ae143e7a58586973b441b6ec863bfe85a46a2 Mon Sep 17 00:00:00 2001 From: Ellie Huxtable Date: Mon, 4 Dec 2023 15:08:00 +0000 Subject: [PATCH 024/130] add metrics --- Cargo.lock | 297 ++++++++++++++++++++++++++++-- Cargo.toml | 3 + hook-producer/Cargo.toml | 3 + hook-producer/src/config.rs | 16 ++ hook-producer/src/handlers/mod.rs | 10 +- hook-producer/src/main.rs | 16 +- hook-producer/src/metrics.rs | 53 ++++++ 7 files changed, 373 insertions(+), 25 deletions(-) create mode 100644 hook-producer/src/config.rs create mode 100644 hook-producer/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index be15d2d..451d424 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,10 +97,10 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http", - "http-body", + "http 1.0.0", + "http-body 1.0.0", "http-body-util", - "hyper", + "hyper 1.0.1", "hyper-util", "itoa", "matchit", @@ -129,8 +129,8 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 1.0.0", + "http-body 1.0.0", "http-body-util", "mime", "pin-project-lite", @@ -284,6 +284,19 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" +[[package]] +name = "crossbeam-epoch" +version = "0.9.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + [[package]] name = "crossbeam-queue" version = "0.3.8" @@ -351,6 +364,26 @@ dependencies = [ "serde", ] +[[package]] +name = "envconfig" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea81cc7e21f55a9d9b1efb6816904978d0bfbe31a50347cb24b2e75564bcac9b" +dependencies = [ + "envconfig_derive", +] + +[[package]] +name = "envconfig_derive" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dfca278e5f84b45519acaaff758ebfa01f18e96998bc24b8f1b722dd804b9bf" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -557,14 +590,29 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", - "indexmap", + "http 1.0.0", + "indexmap 2.1.0", "slab", "tokio", "tokio-util", "tracing", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" +dependencies = [ + "ahash", +] + [[package]] name = "hashbrown" version = "0.14.3" @@ -581,7 +629,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -653,12 +701,26 @@ name = "hook-producer" version = "0.1.0" dependencies = [ "axum", + "envconfig", "eyre", + "metrics", + "metrics-exporter-prometheus", "tokio", "tracing", "tracing-subscriber", ] +[[package]] +name = "http" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http" version = "1.0.0" @@ -670,6 +732,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http 0.2.11", + "pin-project-lite", +] + [[package]] name = "http-body" version = "1.0.0" @@ -677,7 +750,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http", + "http 1.0.0", ] [[package]] @@ -688,8 +761,8 @@ checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" dependencies = [ "bytes", "futures-util", - "http", - "http-body", + "http 1.0.0", + "http-body 1.0.0", "pin-project-lite", ] @@ -705,6 +778,29 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "hyper" +version = "0.14.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 0.2.11", + "http-body 0.4.5", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.4.10", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "hyper" version = "1.0.1" @@ -715,8 +811,8 @@ dependencies = [ "futures-channel", "futures-util", "h2", - "http", - "http-body", + "http 1.0.0", + "http-body 1.0.0", "httparse", "httpdate", "itoa", @@ -733,11 +829,11 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http", - "http-body", - "hyper", + "http 1.0.0", + "http-body 1.0.0", + "hyper 1.0.1", "pin-project-lite", - "socket2", + "socket2 0.5.5", "tokio", "tower", "tower-service", @@ -783,6 +879,16 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + [[package]] name = "indexmap" version = "2.1.0" @@ -790,9 +896,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.14.3", ] +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + [[package]] name = "itertools" version = "0.11.0" @@ -871,6 +983,15 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +[[package]] +name = "mach2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" +dependencies = [ + "libc", +] + [[package]] name = "matchit" version = "0.7.3" @@ -893,6 +1014,70 @@ version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "metrics" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" +dependencies = [ + "ahash", + "metrics-macros", + "portable-atomic", +] + +[[package]] +name = "metrics-exporter-prometheus" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" +dependencies = [ + "base64", + "hyper 0.14.27", + "indexmap 1.9.3", + "ipnet", + "metrics", + "metrics-util", + "quanta", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "metrics-macros" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "metrics-util" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", + "hashbrown 0.13.1", + "metrics", + "num_cpus", + "quanta", + "sketches-ddsketch", +] + [[package]] name = "mime" version = "0.3.17" @@ -1200,6 +1385,12 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +[[package]] +name = "portable-atomic" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bccab0e7fd7cc19f820a1c8c91720af652d0c88dc9664dd72aef2614f04af3b" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -1215,6 +1406,22 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "quanta" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +dependencies = [ + "crossbeam-utils", + "libc", + "mach2", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + [[package]] name = "quote" version = "1.0.33" @@ -1254,6 +1461,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "raw-cpuid" +version = "10.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_syscall" version = "0.4.1" @@ -1455,6 +1671,12 @@ dependencies = [ "rand_core", ] +[[package]] +name = "sketches-ddsketch" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" + [[package]] name = "slab" version = "0.4.9" @@ -1470,6 +1692,16 @@ version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "socket2" version = "0.5.5" @@ -1552,7 +1784,7 @@ dependencies = [ "futures-util", "hashlink", "hex", - "indexmap", + "indexmap 2.1.0", "log", "memchr", "native-tls", @@ -1839,7 +2071,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.5", "tokio-macros", "windows-sys 0.48.0", ] @@ -1966,6 +2198,12 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "try-lock" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" + [[package]] name = "typenum" version = "1.17.0" @@ -2046,6 +2284,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -2106,6 +2353,16 @@ version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" +[[package]] +name = "web-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "whoami" version = "1.4.1" diff --git a/Cargo.toml b/Cargo.toml index 8f55767..e92db69 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,3 +17,6 @@ tokio = { version = "1.34.0", features = ["full"] } eyre = "0.6.9" tracing = "0.1.40" tracing-subscriber = "0.3.18" +envconfig = "0.10.0" +metrics = "0.21.1" +metrics-exporter-prometheus = "0.12.1" diff --git a/hook-producer/Cargo.toml b/hook-producer/Cargo.toml index 85100d0..47ef532 100644 --- a/hook-producer/Cargo.toml +++ b/hook-producer/Cargo.toml @@ -11,3 +11,6 @@ tokio = { workspace = true } eyre = {workspace = true } tracing = {workspace = true} tracing-subscriber = {workspace = true} +envconfig = { workspace = true } +metrics = { workspace = true } +metrics-exporter-prometheus = { workspace = true } diff --git a/hook-producer/src/config.rs b/hook-producer/src/config.rs new file mode 100644 index 0000000..9d093c6 --- /dev/null +++ b/hook-producer/src/config.rs @@ -0,0 +1,16 @@ +use envconfig::Envconfig; + +#[derive(Envconfig)] +pub struct Config { + #[envconfig(from = "BIND_HOST", default = "0.0.0.0")] + pub host: String, + + #[envconfig(from = "BIND_PORT", default = "8000")] + pub port: u16, +} + +impl Config { + pub fn bind(&self) -> String { + format!("{}:{}", self.host, self.port) + } +} diff --git a/hook-producer/src/handlers/mod.rs b/hook-producer/src/handlers/mod.rs index 8b4f83c..2504073 100644 --- a/hook-producer/src/handlers/mod.rs +++ b/hook-producer/src/handlers/mod.rs @@ -3,7 +3,13 @@ use axum::{routing, Router}; mod index; pub fn router() -> Router { - let app = Router::new().route("/", routing::get(index::get)); + let recorder_handle = crate::metrics::setup_metrics_recorder(); - app + Router::new() + .route("/", routing::get(index::get)) + .route( + "/metrics", + routing::get(move || std::future::ready(recorder_handle.render())), + ) + .layer(axum::middleware::from_fn(crate::metrics::track_metrics)) } diff --git a/hook-producer/src/main.rs b/hook-producer/src/main.rs index f05edab..118829b 100644 --- a/hook-producer/src/main.rs +++ b/hook-producer/src/main.rs @@ -1,9 +1,17 @@ use axum::Router; + +use config::Config; +use envconfig::Envconfig; + use eyre::Result; + +mod config; mod handlers; +mod metrics; + +async fn listen(app: Router, bind: String) -> Result<()> { + let listener = tokio::net::TcpListener::bind(bind).await?; -async fn listen(app: Router) -> Result<()> { - let listener = tokio::net::TcpListener::bind("0.0.0.0:8000").await?; axum::serve(listener, app).await?; Ok(()) @@ -15,7 +23,9 @@ async fn main() { let app = handlers::router(); - match listen(app).await { + let config = Config::init_from_env().expect("failed to load configuration from env"); + + match listen(app, config.bind()).await { Ok(_) => {} Err(e) => tracing::error!("failed to start hook-producer http server, {}", e), } diff --git a/hook-producer/src/metrics.rs b/hook-producer/src/metrics.rs new file mode 100644 index 0000000..dbdc7b1 --- /dev/null +++ b/hook-producer/src/metrics.rs @@ -0,0 +1,53 @@ +use std::time::Instant; + +use axum::{ + body::Body, extract::MatchedPath, http::Request, middleware::Next, response::IntoResponse, +}; +use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; + +pub fn setup_metrics_recorder() -> PrometheusHandle { + const EXPONENTIAL_SECONDS: &[f64] = &[ + 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, + ]; + + PrometheusBuilder::new() + .set_buckets_for_metric( + Matcher::Full("http_requests_duration_seconds".to_string()), + EXPONENTIAL_SECONDS, + ) + .unwrap() + .install_recorder() + .unwrap() +} + +/// Middleware to record some common HTTP metrics +/// Someday tower-http might provide a metrics middleware: https://github.com/tower-rs/tower-http/issues/57 +pub async fn track_metrics(req: Request, next: Next) -> impl IntoResponse { + let start = Instant::now(); + + let path = if let Some(matched_path) = req.extensions().get::() { + matched_path.as_str().to_owned() + } else { + req.uri().path().to_owned() + }; + + let method = req.method().clone(); + + // Run the rest of the request handling first, so we can measure it and get response + // codes. + let response = next.run(req).await; + + let latency = start.elapsed().as_secs_f64(); + let status = response.status().as_u16().to_string(); + + let labels = [ + ("method", method.to_string()), + ("path", path), + ("status", status), + ]; + + metrics::increment_counter!("http_requests_total", &labels); + metrics::histogram!("http_requests_duration_seconds", latency, &labels); + + response +} From a63a00ce667c99fb4a185da3dd4b20c431f5d3ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 6 Dec 2023 13:46:09 +0100 Subject: [PATCH 025/130] feat: Include migrations and echo-server in docker-compose stack --- Dockerfile.sqlx | 5 +++++ README.md | 14 +++----------- docker-compose.yml | 25 +++++++++++++++++++++++++ docker/echo-server/Caddyfile | 17 +++++++++++++++++ hook-consumer/README.md | 2 ++ 5 files changed, 52 insertions(+), 11 deletions(-) create mode 100644 Dockerfile.sqlx create mode 100644 docker/echo-server/Caddyfile create mode 100644 hook-consumer/README.md diff --git a/Dockerfile.sqlx b/Dockerfile.sqlx new file mode 100644 index 0000000..c55dfaa --- /dev/null +++ b/Dockerfile.sqlx @@ -0,0 +1,5 @@ +FROM docker.io/library/rust:1.74.0 + +RUN cargo install sqlx-cli --no-default-features --features native-tls,postgres + +WORKDIR /sqlx diff --git a/README.md b/README.md index b17e7ae..a3a674c 100644 --- a/README.md +++ b/README.md @@ -4,24 +4,16 @@ A reliable and performant webhook system for PostHog ## Requirements 1. [Rust](https://www.rust-lang.org/tools/install). -2. [sqlx-cli](https://crates.io/crates/sqlx-cli): To setup database and run migrations. -3. [Docker](https://docs.docker.com/engine/install/) or [podman](https://podman.io/docs/installation) (and [podman-compose](https://github.com/containers/podman-compose#installation)): To setup testing services. +2. [Docker](https://docs.docker.com/engine/install/), or [podman](https://podman.io/docs/installation) and [podman-compose](https://github.com/containers/podman-compose#installation): To setup development stack. ## Testing -1. Start a PostgreSQL instance: +1. Start development stack: ```bash docker compose -f docker-compose.yml up -d --wait ``` -2. Prepare test database: -```bash -export DATABASE_URL=postgres://posthog:posthog@localhost:15432/test_database -sqlx database create -sqlx migrate run -``` - -3. Test: +2. Test: ```bash cargo test ``` diff --git a/docker-compose.yml b/docker-compose.yml index 35b7a49..6f62692 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,6 @@ services: db: + container_name: db image: docker.io/library/postgres:16-alpine restart: on-failure environment: @@ -13,3 +14,27 @@ services: ports: - '15432:5432' command: postgres -c max_connections=1000 -c idle_in_transaction_session_timeout=300000 + + setup_test_db: + container_name: setup-test-db + build: + context: . + dockerfile: Dockerfile.sqlx + restart: on-failure + command: > + sh -c "sqlx database create && sqlx migrate run" + requires: + - db + environment: + DATABASE_URL: postgres://posthog:posthog@db:5432/test_database + volumes: + - ./migrations:/sqlx/migrations/ + + echo_server: + image: docker.io/library/caddy:2 + container_name: echo-server + restart: on-failure + ports: + - '18081:8081' + volumes: + - ./docker/echo-server/Caddyfile:/etc/caddy/Caddyfile diff --git a/docker/echo-server/Caddyfile b/docker/echo-server/Caddyfile new file mode 100644 index 0000000..a13ac68 --- /dev/null +++ b/docker/echo-server/Caddyfile @@ -0,0 +1,17 @@ +{ + auto_https off +} + +:8081 + +route /echo { + respond `{http.request.body}` 200 { + close + } +} + +route /fail { + respond `{http.request.body}` 400 { + close + } +} diff --git a/hook-consumer/README.md b/hook-consumer/README.md new file mode 100644 index 0000000..1adab6e --- /dev/null +++ b/hook-consumer/README.md @@ -0,0 +1,2 @@ +# hook-consumer +Consume and process webhook jobs From 690888a250733c0f7036895fd64901f55b419380 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 6 Dec 2023 13:55:06 +0100 Subject: [PATCH 026/130] refactor: Support in pgqueue for consumer --- hook-common/src/pgqueue.rs | 237 ++++++++++++++++++++++--------------- 1 file changed, 140 insertions(+), 97 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 89aab7e..7ed4655 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -4,9 +4,10 @@ use std::default::Default; use std::str::FromStr; +use std::time; -use chrono::{prelude::*, Duration}; -use serde::{de::DeserializeOwned, Serialize}; +use chrono; +use serde; use sqlx::postgres::{PgPool, PgPoolOptions}; use thiserror::Error; @@ -18,12 +19,18 @@ pub enum PgQueueError { ConnectionError { error: sqlx::Error }, #[error("{command} query failed with: {error}")] QueryError { command: String, error: sqlx::Error }, - #[error("transaction {command} failed with: {error}")] - TransactionError { command: String, error: sqlx::Error }, #[error("{0} is not a valid JobStatus")] ParseJobStatusError(String), - #[error("{0} Job has reached max attempts and cannot be retried further")] - MaxAttemptsReachedError(String), +} + +#[derive(Error, Debug)] +pub enum PgJobError { + #[error("retry is an invalid state for this PgJob: {error}")] + RetryInvalidError { job: T, error: String }, + #[error("{command} query failed with: {error}")] + QueryError { command: String, error: sqlx::Error }, + #[error("transaction {command} failed with: {error}")] + TransactionError { command: String, error: sqlx::Error }, } /// Enumeration of possible statuses for a Job. @@ -64,18 +71,18 @@ impl FromStr for JobStatus { pub type JobParameters = sqlx::types::Json; /// A Job to be executed by a worker dequeueing a PgQueue. -#[derive(sqlx::FromRow)] +#[derive(sqlx::FromRow, Debug)] pub struct Job { /// A unique id identifying a job. pub id: i64, /// A number corresponding to the current job attempt. pub attempt: i32, /// A datetime corresponding to when the job was attempted. - pub attempted_at: DateTime, + pub attempted_at: chrono::DateTime, /// A vector of identifiers that have attempted this job. E.g. thread ids, pod names, etc... pub attempted_by: Vec, /// A datetime corresponding to when the job was created. - pub created_at: DateTime, + pub created_at: chrono::DateTime, /// The current job's number of max attempts. pub max_attempts: i32, /// Arbitrary job parameters stored as JSON. @@ -89,28 +96,29 @@ pub struct Job { } impl Job { + /// Return true if this job attempt is greater or equal to the maximum number of possible attempts. + pub fn is_gte_max_attempts(&self) -> bool { + self.attempt >= self.max_attempts + } + /// Consume Job to retry it. /// This returns a RetryableJob that can be enqueued by PgQueue. /// /// # Arguments /// /// * `error`: Any JSON-serializable value to be stored as an error. - pub fn retry(self, error: E) -> Result, PgQueueError> { - if self.attempt >= self.max_attempts { - Err(PgQueueError::MaxAttemptsReachedError(self.target)) - } else { - Ok(RetryableJob { - id: self.id, - attempt: self.attempt, - error: sqlx::types::Json(error), - queue: self.queue, - }) + fn retry(self, error: E) -> RetryableJob { + RetryableJob { + id: self.id, + attempt: self.attempt, + error: sqlx::types::Json(error), + queue: self.queue, } } /// Consume Job to complete it. /// This returns a CompletedJob that can be marked as completed by PgQueue. - pub fn complete(self) -> CompletedJob { + fn complete(self) -> CompletedJob { CompletedJob { id: self.id, queue: self.queue, @@ -123,7 +131,7 @@ impl Job { /// # Arguments /// /// * `error`: Any JSON-serializable value to be stored as an error. - pub fn fail(self, error: E) -> FailedJob { + fn fail(self, error: E) -> FailedJob { FailedJob { id: self.id, error: sqlx::types::Json(error), @@ -133,6 +141,7 @@ impl Job { } /// A Job that can be updated in PostgreSQL. +#[derive(Debug)] pub struct PgJob { pub job: Job, pub table: String, @@ -141,11 +150,21 @@ pub struct PgJob { } impl PgJob { - pub async fn retry( + pub async fn retry( mut self, error: E, - ) -> Result, PgQueueError> { - let retryable_job = self.job.retry(error)?; + preferred_retry_interval: Option, + ) -> Result, PgJobError>> { + if self.job.is_gte_max_attempts() { + return Err(PgJobError::RetryInvalidError { + job: self, + error: "Maximum attempts reached".to_owned(), + }); + } + let retryable_job = self.job.retry(error); + let retry_interval = self + .retry_policy + .time_until_next_retry(&retryable_job, preferred_retry_interval); let base_query = format!( r#" @@ -161,7 +180,6 @@ WHERE AND queue = $1 RETURNING "{0}".* - "#, &self.table ); @@ -169,11 +187,11 @@ RETURNING sqlx::query(&base_query) .bind(&retryable_job.queue) .bind(retryable_job.id) - .bind(self.retry_policy.time_until_next_retry(&retryable_job)) + .bind(retry_interval) .bind(&retryable_job.error) .execute(&mut *self.connection) .await - .map_err(|error| PgQueueError::QueryError { + .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), error, })?; @@ -181,7 +199,7 @@ RETURNING Ok(retryable_job) } - pub async fn complete(mut self) -> Result { + pub async fn complete(mut self) -> Result>> { let completed_job = self.job.complete(); let base_query = format!( @@ -190,13 +208,12 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'completed'::job_status, + status = 'completed'::job_status WHERE "{0}".id = $2 AND queue = $1 RETURNING "{0}".* - "#, &self.table ); @@ -206,7 +223,7 @@ RETURNING .bind(completed_job.id) .execute(&mut *self.connection) .await - .map_err(|error| PgQueueError::QueryError { + .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), error, })?; @@ -214,10 +231,10 @@ RETURNING Ok(completed_job) } - pub async fn fail( + pub async fn fail( mut self, error: E, - ) -> Result, PgQueueError> { + ) -> Result, PgJobError>> { let failed_job = self.job.fail(error); let base_query = format!( @@ -226,7 +243,7 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'failed'::job_status, + status = 'failed'::job_status WHERE "{0}".id = $2 AND queue = $1 @@ -242,7 +259,7 @@ RETURNING .bind(failed_job.id) .execute(&mut *self.connection) .await - .map_err(|error| PgQueueError::QueryError { + .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), error, })?; @@ -253,6 +270,7 @@ RETURNING /// A Job within an open PostgreSQL transaction. /// This implementation allows 'hiding' the job from any other workers running SKIP LOCKED queries. +#[derive(Debug)] pub struct PgTransactionJob<'c, J> { pub job: Job, pub table: String, @@ -261,11 +279,21 @@ pub struct PgTransactionJob<'c, J> { } impl<'c, J> PgTransactionJob<'c, J> { - pub async fn retry( + pub async fn retry( mut self, error: E, - ) -> Result, PgQueueError> { - let retryable_job = self.job.retry(error)?; + preferred_retry_interval: Option, + ) -> Result, PgJobError>> { + if self.job.is_gte_max_attempts() { + return Err(PgJobError::RetryInvalidError { + job: self, + error: "Maximum attempts reached".to_owned(), + }); + } + let retryable_job = self.job.retry(error); + let retry_interval = self + .retry_policy + .time_until_next_retry(&retryable_job, preferred_retry_interval); let base_query = format!( r#" @@ -289,11 +317,11 @@ RETURNING sqlx::query(&base_query) .bind(&retryable_job.queue) .bind(retryable_job.id) - .bind(self.retry_policy.time_until_next_retry(&retryable_job)) + .bind(retry_interval) .bind(&retryable_job.error) .execute(&mut *self.transaction) .await - .map_err(|error| PgQueueError::QueryError { + .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), error, })?; @@ -301,7 +329,7 @@ RETURNING self.transaction .commit() .await - .map_err(|error| PgQueueError::TransactionError { + .map_err(|error| PgJobError::TransactionError { command: "COMMIT".to_owned(), error, })?; @@ -309,7 +337,7 @@ RETURNING Ok(retryable_job) } - pub async fn complete(mut self) -> Result { + pub async fn complete(mut self) -> Result>> { let completed_job = self.job.complete(); let base_query = format!( @@ -318,13 +346,12 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'completed'::job_status, + status = 'completed'::job_status WHERE "{0}".id = $2 AND queue = $1 RETURNING "{0}".* - "#, &self.table ); @@ -334,7 +361,7 @@ RETURNING .bind(completed_job.id) .execute(&mut *self.transaction) .await - .map_err(|error| PgQueueError::QueryError { + .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), error, })?; @@ -342,7 +369,7 @@ RETURNING self.transaction .commit() .await - .map_err(|error| PgQueueError::TransactionError { + .map_err(|error| PgJobError::TransactionError { command: "COMMIT".to_owned(), error, })?; @@ -350,10 +377,10 @@ RETURNING Ok(completed_job) } - pub async fn fail( + pub async fn fail( mut self, error: E, - ) -> Result, PgQueueError> { + ) -> Result, PgJobError>> { let failed_job = self.job.fail(error); let base_query = format!( @@ -362,13 +389,12 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'failed'::job_status, + status = 'failed'::job_status WHERE "{0}".id = $2 AND queue = $1 RETURNING "{0}".* - "#, &self.table ); @@ -378,7 +404,7 @@ RETURNING .bind(failed_job.id) .execute(&mut *self.transaction) .await - .map_err(|error| PgQueueError::QueryError { + .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), error, })?; @@ -386,7 +412,7 @@ RETURNING self.transaction .commit() .await - .map_err(|error| PgQueueError::TransactionError { + .map_err(|error| PgJobError::TransactionError { command: "COMMIT".to_owned(), error, })?; @@ -399,7 +425,7 @@ RETURNING /// The time until retry will depend on the PgQueue's RetryPolicy. pub struct RetryableJob { /// A unique id identifying a job. - pub id: i64, + id: i64, /// A number corresponding to the current job attempt. pub attempt: i32, /// Any JSON-serializable value to be stored as an error. @@ -411,7 +437,7 @@ pub struct RetryableJob { /// A Job that has completed to be enqueued into a PgQueue and marked as completed. pub struct CompletedJob { /// A unique id identifying a job. - pub id: i64, + id: i64, /// A unique id identifying a job queue. pub queue: String, } @@ -419,7 +445,7 @@ pub struct CompletedJob { /// A Job that has failed to be enqueued into a PgQueue and marked as failed. pub struct FailedJob { /// A unique id identifying a job. - pub id: i64, + id: i64, /// Any JSON-serializable value to be stored as an error. pub error: sqlx::types::Json, /// A unique id identifying a job queue. @@ -446,27 +472,47 @@ impl NewJob { } } -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] /// The retry policy that PgQueue will use to determine how to set scheduled_at when enqueuing a retry. pub struct RetryPolicy { /// Coefficient to multiply initial_interval with for every past attempt. - backoff_coefficient: i32, + backoff_coefficient: u32, /// The backoff interval for the first retry. - initial_interval: Duration, + initial_interval: time::Duration, /// The maximum possible backoff between retries. - maximum_interval: Option, + maximum_interval: Option, } impl RetryPolicy { + pub fn new( + backoff_coefficient: u32, + initial_interval: time::Duration, + maximum_interval: Option, + ) -> Self { + Self { + backoff_coefficient, + initial_interval, + maximum_interval, + } + } + /// Calculate the time until the next retry for a given RetryableJob. - pub fn time_until_next_retry(&self, job: &RetryableJob) -> Duration { + pub fn time_until_next_retry( + &self, + job: &RetryableJob, + preferred_retry_interval: Option, + ) -> time::Duration { let candidate_interval = self.initial_interval * self.backoff_coefficient.pow(job.attempt as u32); - if let Some(max_interval) = self.maximum_interval { - std::cmp::min(candidate_interval, max_interval) - } else { - candidate_interval + match (preferred_retry_interval, self.maximum_interval) { + (Some(duration), Some(max_interval)) => std::cmp::min( + std::cmp::max(std::cmp::min(candidate_interval, max_interval), duration), + max_interval, + ), + (Some(duration), None) => std::cmp::max(candidate_interval, duration), + (None, Some(max_interval)) => std::cmp::min(candidate_interval, max_interval), + (None, None) => candidate_interval, } } } @@ -475,7 +521,7 @@ impl Default for RetryPolicy { fn default() -> Self { Self { backoff_coefficient: 2, - initial_interval: Duration::seconds(1), + initial_interval: time::Duration::from_secs(1), maximum_interval: None, } } @@ -491,8 +537,6 @@ pub struct PgQueue { retry_policy: RetryPolicy, /// The identifier of the PostgreSQL table this queue runs on. table: String, - /// The identifier of the worker listening on this queue. - worker: String, } pub type PgQueueResult = std::result::Result; @@ -511,12 +555,10 @@ impl PgQueue { queue_name: &str, table_name: &str, url: &str, - worker_name: &str, retry_policy: RetryPolicy, ) -> PgQueueResult { let name = queue_name.to_owned(); let table = table_name.to_owned(); - let worker = worker_name.to_owned(); let pool = PgPoolOptions::new() .connect(url) .await @@ -527,13 +569,15 @@ impl PgQueue { pool, retry_policy, table, - worker, }) } /// Dequeue a Job from this PgQueue to work on it. - pub async fn dequeue( + pub async fn dequeue< + J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, + >( &self, + attempted_by: &str, ) -> PgQueueResult>> { let mut connection = self .pool @@ -578,7 +622,7 @@ RETURNING let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) .bind(&self.name) - .bind(&self.worker) + .bind(attempted_by) .fetch_one(&mut *connection) .await; @@ -608,10 +652,12 @@ RETURNING /// Dequeue a Job from this PgQueue to work on it. pub async fn dequeue_tx< - J: DeserializeOwned + std::marker::Send + std::marker::Unpin + 'static, + 'a, + J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, >( &self, - ) -> PgQueueResult>> { + attempted_by: &str, + ) -> PgQueueResult>> { let mut tx = self .pool .begin() @@ -655,7 +701,7 @@ RETURNING let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) .bind(&self.name) - .bind(&self.worker) + .bind(attempted_by) .fetch_one(&mut *tx) .await; @@ -678,7 +724,7 @@ RETURNING /// Enqueue a Job into this PgQueue. /// We take ownership of NewJob to enforce a specific NewJob is only enqueued once. - pub async fn enqueue( + pub async fn enqueue( &self, job: NewJob, ) -> PgQueueResult<()> { @@ -712,9 +758,8 @@ VALUES #[cfg(test)] mod tests { use super::*; - use serde::Deserialize; - #[derive(Serialize, Deserialize, PartialEq, Debug)] + #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] struct JobParameters { method: String, body: String, @@ -752,7 +797,6 @@ mod tests { "test_can_dequeue_job", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - &worker_id, RetryPolicy::default(), ) .await @@ -761,7 +805,7 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); let pg_job: PgJob = queue - .dequeue() + .dequeue(&worker_id) .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue"); @@ -782,14 +826,15 @@ mod tests { "test_dequeue_returns_none_on_no_jobs", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - &worker_id, RetryPolicy::default(), ) .await .expect("failed to connect to local test postgresql database"); - let pg_job: Option> = - queue.dequeue().await.expect("failed to dequeue job"); + let pg_job: Option> = queue + .dequeue(&worker_id) + .await + .expect("failed to dequeue job"); assert!(pg_job.is_none()); } @@ -805,7 +850,6 @@ mod tests { "test_can_dequeue_tx_job", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - &worker_id, RetryPolicy::default(), ) .await @@ -814,7 +858,7 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); let tx_job: PgTransactionJob<'_, JobParameters> = queue - .dequeue_tx() + .dequeue_tx(&worker_id) .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue"); @@ -835,14 +879,15 @@ mod tests { "test_dequeue_tx_returns_none_on_no_jobs", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - &worker_id, RetryPolicy::default(), ) .await .expect("failed to connect to local test postgresql database"); - let tx_job: Option> = - queue.dequeue_tx().await.expect("failed to dequeue job"); + let tx_job: Option> = queue + .dequeue_tx(&worker_id) + .await + .expect("failed to dequeue job"); assert!(tx_job.is_none()); } @@ -855,7 +900,7 @@ mod tests { let new_job = NewJob::new(2, job_parameters, &job_target); let retry_policy = RetryPolicy { backoff_coefficient: 0, - initial_interval: Duration::seconds(0), + initial_interval: time::Duration::from_secs(0), maximum_interval: None, }; @@ -863,7 +908,6 @@ mod tests { "test_can_retry_job_with_remaining_attempts", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - &worker_id, retry_policy, ) .await @@ -871,16 +915,16 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); let job: PgJob = queue - .dequeue() + .dequeue(&worker_id) .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue"); let _ = job - .retry("a very reasonable failure reason") + .retry("a very reasonable failure reason", None) .await .expect("failed to retry job"); let retried_job: PgJob = queue - .dequeue() + .dequeue(&worker_id) .await .expect("failed to dequeue job") .expect("didn't find retried job to dequeue"); @@ -906,7 +950,7 @@ mod tests { let new_job = NewJob::new(1, job_parameters, &job_target); let retry_policy = RetryPolicy { backoff_coefficient: 0, - initial_interval: Duration::seconds(0), + initial_interval: time::Duration::from_secs(0), maximum_interval: None, }; @@ -914,7 +958,6 @@ mod tests { "test_cannot_retry_job_without_remaining_attempts", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - &worker_id, retry_policy, ) .await @@ -923,11 +966,11 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); let job: PgJob = queue - .dequeue() + .dequeue(&worker_id) .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue"); - job.retry("a very reasonable failure reason") + job.retry("a very reasonable failure reason", None) .await .expect("failed to retry job"); } From 7824d69da6dd104650ef3f8fcc245be8b8b6a6cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 6 Dec 2023 13:55:21 +0100 Subject: [PATCH 027/130] feat: First consumer implementation --- hook-consumer/Cargo.toml | 13 + hook-consumer/src/config.rs | 56 ++++ hook-consumer/src/consumer.rs | 492 ++++++++++++++++++++++++++++++++++ hook-consumer/src/lib.rs | 2 + hook-consumer/src/main.rs | 34 ++- 5 files changed, 595 insertions(+), 2 deletions(-) create mode 100644 hook-consumer/src/config.rs create mode 100644 hook-consumer/src/consumer.rs create mode 100644 hook-consumer/src/lib.rs diff --git a/hook-consumer/Cargo.toml b/hook-consumer/Cargo.toml index 49c2d9f..2e95a6b 100644 --- a/hook-consumer/Cargo.toml +++ b/hook-consumer/Cargo.toml @@ -6,3 +6,16 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +async-std = { version = "1.12" } +chrono = { version = "0.4" } +envconfig = { version = "0.10" } +futures = "0.3" +hook-common = { path = "../hook-common" } +http = { version = "0.2" } +reqwest = { version = "0.11" } +serde = { version = "1.0" } +serde_derive = { version = "1.0" } +sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", "postgres", "uuid", "json", "chrono" ] } +thiserror = { version = "1.0" } +tokio = { version = "1.34", features = ["macros", "rt", "rt-multi-thread"] } +url = { version = "2.2" } diff --git a/hook-consumer/src/config.rs b/hook-consumer/src/config.rs new file mode 100644 index 0000000..fde1373 --- /dev/null +++ b/hook-consumer/src/config.rs @@ -0,0 +1,56 @@ +use std::str::FromStr; +use std::time; + +use envconfig::Envconfig; + +#[derive(Envconfig, Clone)] +pub struct Config { + #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] + pub database_url: String, + + #[envconfig(default = "consumer")] + pub consumer_name: String, + + #[envconfig(default = "default")] + pub queue_name: String, + + #[envconfig(default = "100")] + pub poll_interval: EnvMsDuration, + + #[envconfig(default = "5000")] + pub request_timeout: EnvMsDuration, + + #[envconfig(nested = true)] + pub retry_policy: RetryPolicyConfig, + + #[envconfig(default = "job_queue")] + pub table_name: String, +} + +#[derive(Debug, Clone, Copy)] +pub struct EnvMsDuration(pub time::Duration); + +#[derive(Debug, PartialEq, Eq)] +pub struct ParseEnvMsDurationError; + +impl FromStr for EnvMsDuration { + type Err = ParseEnvMsDurationError; + + fn from_str(s: &str) -> Result { + let ms = s.parse::().map_err(|_| ParseEnvMsDurationError)?; + + Ok(EnvMsDuration(time::Duration::from_millis(ms))) + } +} + +#[derive(Envconfig, Clone)] +pub struct RetryPolicyConfig { + #[envconfig(default = "2")] + pub backoff_coefficient: u32, + + #[envconfig(default = "1000")] + pub initial_interval: EnvMsDuration, + + #[envconfig(default = "100000")] + pub maximum_interval: EnvMsDuration, +} diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs new file mode 100644 index 0000000..33b09b0 --- /dev/null +++ b/hook-consumer/src/consumer.rs @@ -0,0 +1,492 @@ +use std::collections; +use std::fmt; +use std::str::FromStr; +use std::time; + +use async_std::task; +use hook_common::pgqueue::{PgJobError, PgQueue, PgQueueError, PgTransactionJob}; +use http::StatusCode; +use serde::{de::Visitor, Deserialize, Serialize}; +use thiserror::Error; + +/// Enumeration of errors for operations with WebhookConsumer. +#[derive(Error, Debug)] +pub enum WebhookConsumerError { + #[error("timed out while waiting for jobs to be available")] + TimeoutError, + #[error("{0} is not a valid HttpMethod")] + ParseHttpMethodError(String), + #[error("error parsing webhook headers")] + ParseHeadersError(http::Error), + #[error("error parsing webhook url")] + ParseUrlError(url::ParseError), + #[error("an error occurred in the underlying queue")] + QueueError(#[from] PgQueueError), + #[error("an error occurred in the underlying job")] + PgJobError(String), + #[error("an error occurred when attempting to send a request")] + RequestError(#[from] reqwest::Error), + #[error("a webhook could not be delivered but it could be retried later: {reason}")] + RetryableWebhookError { + reason: String, + retry_after: Option, + }, + #[error("a webhook could not be delivered and it cannot be retried further: {0}")] + NonRetryableWebhookError(String), +} + +/// Supported HTTP methods for webhooks. +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum HttpMethod { + DELETE, + GET, + PATCH, + POST, + PUT, +} + +/// Allow casting `HttpMethod` from strings. +impl FromStr for HttpMethod { + type Err = WebhookConsumerError; + + fn from_str(s: &str) -> Result { + match s.to_ascii_uppercase().as_ref() { + "DELETE" => Ok(HttpMethod::DELETE), + "GET" => Ok(HttpMethod::GET), + "PATCH" => Ok(HttpMethod::PATCH), + "POST" => Ok(HttpMethod::POST), + "PUT" => Ok(HttpMethod::PUT), + invalid => Err(WebhookConsumerError::ParseHttpMethodError( + invalid.to_owned(), + )), + } + } +} + +/// Implement `std::fmt::Display` to convert HttpMethod to string. +impl fmt::Display for HttpMethod { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + HttpMethod::DELETE => write!(f, "DELETE"), + HttpMethod::GET => write!(f, "GET"), + HttpMethod::PATCH => write!(f, "PATCH"), + HttpMethod::POST => write!(f, "POST"), + HttpMethod::PUT => write!(f, "PUT"), + } + } +} + +struct HttpMethodVisitor; + +impl<'de> Visitor<'de> for HttpMethodVisitor { + type Value = HttpMethod; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "the string representation of HttpMethod") + } + + fn visit_str(self, s: &str) -> Result + where + E: serde::de::Error, + { + match HttpMethod::from_str(s) { + Ok(method) => Ok(method), + Err(_) => Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(s), + &self, + )), + } + } +} + +/// Deserialize required to read `HttpMethod` from database. +impl<'de> Deserialize<'de> for HttpMethod { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(HttpMethodVisitor) + } +} + +/// Serialize required to write `HttpMethod` to database. +impl Serialize for HttpMethod { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +/// Convinience to cast `HttpMethod` to `http::Method`. +/// Not all `http::Method` variants are valid `HttpMethod` variants, hence why we +/// can't just use the former. +impl Into for HttpMethod { + fn into(self) -> http::Method { + match self { + HttpMethod::DELETE => http::Method::DELETE, + HttpMethod::GET => http::Method::GET, + HttpMethod::PATCH => http::Method::PATCH, + HttpMethod::POST => http::Method::POST, + HttpMethod::PUT => http::Method::PUT, + } + } +} + +impl Into for &HttpMethod { + fn into(self) -> http::Method { + match self { + HttpMethod::DELETE => http::Method::DELETE, + HttpMethod::GET => http::Method::GET, + HttpMethod::PATCH => http::Method::PATCH, + HttpMethod::POST => http::Method::POST, + HttpMethod::PUT => http::Method::PUT, + } + } +} + +/// `JobParameters` required for the `WebhookConsumer` to execute a webhook. +/// These parameters should match the exported Webhook interface that PostHog plugins. +/// implement. See: https://github.com/PostHog/plugin-scaffold/blob/main/src/types.ts#L15. +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub struct WebhookJobParameters { + body: String, + headers: collections::HashMap, + method: HttpMethod, + url: String, +} + +/// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. +pub struct WebhookConsumer<'p> { + /// An identifier for this consumer. Used to mark jobs we have consumed. + name: String, + /// The queue we will be dequeuing jobs from. + queue: &'p PgQueue, + /// The interval for polling the queue. + poll_interval: time::Duration, + /// A timeout for webhook requests. + request_timeout: time::Duration, +} + +impl<'p> WebhookConsumer<'p> { + pub fn new( + name: &str, + queue: &'p PgQueue, + poll_interval: time::Duration, + request_timeout: time::Duration, + ) -> Self { + Self { + name: name.to_owned(), + queue, + poll_interval, + request_timeout, + } + } + + /// Wait until a job becomes available in our queue. + async fn wait_for_job<'a>( + &self, + ) -> Result, WebhookConsumerError> { + loop { + if let Some(job) = self.queue.dequeue_tx(&self.name).await? { + return Ok(job); + } else { + task::sleep(self.poll_interval).await; + } + } + } + + /// Run this consumer to continuously process any jobs that become available. + pub async fn run(&self) -> Result<(), WebhookConsumerError> { + loop { + let webhook_job = self.wait_for_job().await?; + + let request_timeout = self.request_timeout; // Required to avoid capturing self in closure. + tokio::spawn(async move { process_webhook_job(webhook_job, request_timeout) }); + } + } +} + +/// Process a webhook job by transitioning it to its appropriate state after its request is sent. +/// After we finish, the webhook job will be set as completed (if the request was successful), retryable (if the request +/// was unsuccessful but we can still attempt a retry), or failed (if the request was unsuccessful and no more retries +/// may be attempted). +/// +/// A webhook job is considered retryable after a failing request if: +/// 1. The job has attempts remaining (i.e. hasn't reached `max_attempts`), and... +/// 2. The status code indicates retrying at a later point could resolve the issue. This means: 429 and any 5XX. +/// +/// # Arguments +/// +/// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. +/// * `request_timeout`: A timeout for the HTTP request. +async fn process_webhook_job( + webhook_job: PgTransactionJob<'_, WebhookJobParameters>, + request_timeout: std::time::Duration, +) -> Result<(), WebhookConsumerError> { + match send_webhook( + &webhook_job.job.parameters.method, + &webhook_job.job.parameters.url, + &webhook_job.job.parameters.headers, + webhook_job.job.parameters.body.clone(), + request_timeout, + ) + .await + { + Ok(_) => { + webhook_job + .complete() + .await + .map_err(|error| WebhookConsumerError::PgJobError(error.to_string()))?; + Ok(()) + } + Err(WebhookConsumerError::RetryableWebhookError { + reason, + retry_after, + }) => match webhook_job.retry(reason.to_string(), retry_after).await { + Ok(_) => Ok(()), + Err(PgJobError::RetryInvalidError { + job: webhook_job, + error: fail_error, + }) => { + webhook_job + .fail(fail_error.to_string()) + .await + .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; + Ok(()) + } + Err(job_error) => Err(WebhookConsumerError::PgJobError(job_error.to_string())), + }, + Err(error) => { + webhook_job + .fail(error.to_string()) + .await + .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; + Ok(()) + } + } +} + +/// Make an HTTP request to a webhook endpoint. +/// +/// # Arguments +/// +/// * `method`: The HTTP method to use in the HTTP request. +/// * `url`: The URL we are targetting with our request. Parsing this URL fail. +/// * `headers`: Key, value pairs of HTTP headers in a `std::collections::HashMap`. Can fail if headers are not valid. +/// * `body`: The body of the request. Ownership is required. +/// * `timeout`: A timeout for the HTTP request. +async fn send_webhook( + method: &HttpMethod, + url: &str, + headers: &collections::HashMap, + body: String, + timeout: std::time::Duration, +) -> Result { + let client = reqwest::Client::new(); + let method: http::Method = method.into(); + let url: reqwest::Url = (url) + .parse() + .map_err(|error| WebhookConsumerError::ParseUrlError(error))?; + let headers: reqwest::header::HeaderMap = (headers) + .try_into() + .map_err(|error| WebhookConsumerError::ParseHeadersError(error))?; + + let body = reqwest::Body::from(body); + let response = client + .request(method, url) + .headers(headers) + .timeout(timeout) + .body(body) + .send() + .await?; + + let status = response.status(); + + if status.is_success() { + Ok(response) + } else if is_retryable_status(status) { + let retry_after = parse_retry_after_header(response.headers()); + + Err(WebhookConsumerError::RetryableWebhookError { + reason: format!("retryable status code {}", status), + retry_after, + }) + } else { + Err(WebhookConsumerError::NonRetryableWebhookError(format!( + "non-retryable status code {}", + status + ))) + } +} + +fn is_retryable_status(status: StatusCode) -> bool { + status == StatusCode::TOO_MANY_REQUESTS || status.is_server_error() +} + +/// Attempt to parse a chrono::Duration from a Retry-After header, returning None if not possible. +/// Retry-After header can specify a date in RFC2822 or a number of seconds; we try to parse both. +/// If a Retry-After header is not present in the provided `header_map`, `None` is returned. +/// +/// # Arguments +/// +/// * `header_map`: A `&reqwest::HeaderMap` of response headers that could contain Retry-After. +fn parse_retry_after_header(header_map: &reqwest::header::HeaderMap) -> Option { + let retry_after_header = header_map.get(reqwest::header::RETRY_AFTER); + + let retry_after = match retry_after_header { + Some(header_value) => match header_value.to_str() { + Ok(s) => s, + Err(_) => { + return None; + } + }, + None => { + return None; + } + }; + + if let Ok(u) = u64::from_str_radix(retry_after, 10) { + let duration = time::Duration::from_secs(u); + return Some(duration); + } + + if let Ok(dt) = chrono::DateTime::parse_from_rfc2822(retry_after) { + let duration = + chrono::DateTime::::from(dt) - chrono::offset::Utc::now(); + + // This can only fail when negative, in which case we return None. + return duration.to_std().ok(); + } + + None +} + +mod tests { + use super::*; + // Note we are ignoring some warnings in this module. + // This is due to a long-standing cargo bug that reports imports and helper functions as unused. + // See: https://github.com/rust-lang/rust/issues/46379. + #[allow(unused_imports)] + use hook_common::pgqueue::{JobStatus, NewJob, RetryPolicy}; + + /// Use process id as a worker id for tests. + #[allow(dead_code)] + fn worker_id() -> String { + std::process::id().to_string() + } + + #[allow(dead_code)] + async fn enqueue_job( + queue: &PgQueue, + max_attempts: i32, + job_parameters: WebhookJobParameters, + ) -> Result<(), PgQueueError> { + let job_target = job_parameters.url.to_owned(); + let new_job = NewJob::new(max_attempts, job_parameters, &job_target); + queue.enqueue(new_job).await?; + Ok(()) + } + + #[test] + fn test_is_retryable_status() { + assert!(!is_retryable_status(http::StatusCode::FORBIDDEN)); + assert!(!is_retryable_status(http::StatusCode::BAD_REQUEST)); + assert!(is_retryable_status(http::StatusCode::TOO_MANY_REQUESTS)); + assert!(is_retryable_status(http::StatusCode::INTERNAL_SERVER_ERROR)); + } + + #[test] + fn test_parse_retry_after_header() { + let mut headers = reqwest::header::HeaderMap::new(); + headers.insert(reqwest::header::RETRY_AFTER, "120".parse().unwrap()); + + let duration = parse_retry_after_header(&headers).unwrap(); + assert_eq!(duration, time::Duration::from_secs(120)); + + headers.remove(reqwest::header::RETRY_AFTER); + + let duration = parse_retry_after_header(&headers); + assert_eq!(duration, None); + + headers.insert( + reqwest::header::RETRY_AFTER, + "Wed, 21 Oct 2015 07:28:00 GMT".parse().unwrap(), + ); + + let duration = parse_retry_after_header(&headers); + assert_eq!(duration, None); + } + + #[tokio::test] + async fn test_wait_for_job() { + let worker_id = worker_id(); + let queue_name = "test_wait_for_job".to_string(); + let table_name = "job_queue".to_string(); + let db_url = "postgres://posthog:posthog@localhost:15432/test_database".to_string(); + let queue = PgQueue::new(&queue_name, &table_name, &db_url, RetryPolicy::default()) + .await + .expect("failed to connect to PG"); + + let webhook_job = WebhookJobParameters { + body: "a webhook job body. much wow.".to_owned(), + headers: collections::HashMap::new(), + method: HttpMethod::POST, + url: "localhost".to_owned(), + }; + // enqueue takes ownership of the job enqueued to avoid bugs that can cause duplicate jobs. + // Normally, a separate application would be enqueueing jobs for us to consume, so no ownership + // conflicts would arise. However, in this test we need to do the enqueueing ourselves. + // So, we clone the job to keep it around and assert the values returned by wait_for_job. + enqueue_job(&queue, 1, webhook_job.clone()) + .await + .expect("failed to enqueue job"); + let consumer = WebhookConsumer::new( + &worker_id, + &queue, + time::Duration::from_millis(100), + time::Duration::from_millis(5000), + ); + let consumed_job = consumer + .wait_for_job() + .await + .expect("failed to wait and read job"); + + assert_eq!(consumed_job.job.attempt, 1); + assert!(consumed_job.job.attempted_by.contains(&worker_id)); + assert_eq!(consumed_job.job.attempted_by.len(), 1); + assert_eq!(consumed_job.job.max_attempts, 1); + assert_eq!(*consumed_job.job.parameters.as_ref(), webhook_job); + assert_eq!(consumed_job.job.status, JobStatus::Running); + assert_eq!(consumed_job.job.target, webhook_job.url); + + consumed_job + .complete() + .await + .expect("job not successfully completed"); + } + + #[tokio::test] + async fn test_send_webhook() { + let method = HttpMethod::POST; + let url = "http://localhost:18081/echo"; + let headers = collections::HashMap::new(); + let body = "a very relevant request body"; + let response = send_webhook( + &method, + url, + &headers, + body.to_owned(), + time::Duration::from_millis(5000), + ) + .await + .expect("send_webhook failed"); + + assert_eq!(response.status(), StatusCode::OK); + assert_eq!( + response.text().await.expect("failed to read response body"), + body.to_owned(), + ); + } +} diff --git a/hook-consumer/src/lib.rs b/hook-consumer/src/lib.rs new file mode 100644 index 0000000..cc746b0 --- /dev/null +++ b/hook-consumer/src/lib.rs @@ -0,0 +1,2 @@ +pub mod config; +pub mod consumer; diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index e7a11a9..22acee1 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -1,3 +1,33 @@ -fn main() { - println!("Hello, world!"); +use envconfig::Envconfig; + +use hook_common::pgqueue::{PgQueue, RetryPolicy}; +use hook_consumer::config::Config; +use hook_consumer::consumer::WebhookConsumer; + +#[tokio::main] +async fn main() { + let config = Config::init_from_env().expect("Invalid configuration:"); + + let retry_policy = RetryPolicy::new( + config.retry_policy.backoff_coefficient, + config.retry_policy.initial_interval.0, + Some(config.retry_policy.maximum_interval.0), + ); + let queue = PgQueue::new( + &config.queue_name, + &config.table_name, + &config.database_url, + retry_policy, + ) + .await + .expect("failed to initialize queue"); + + let consumer = WebhookConsumer::new( + &config.consumer_name, + &queue, + config.poll_interval.0, + config.request_timeout.0, + ); + + let _ = consumer.run().await; } From d91c90a89af31c4895b7fc4109b20caff90a9bb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 6 Dec 2023 14:17:08 +0100 Subject: [PATCH 028/130] chore: Cargo lock update --- Cargo.lock | 731 ++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 559 insertions(+), 172 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 451d424..0764358 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -52,16 +52,149 @@ dependencies = [ ] [[package]] -name = "async-trait" -version = "0.1.74" +name = "async-channel" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" +dependencies = [ + "concurrent-queue", + "event-listener 4.0.0", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.2.0", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite 2.1.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4353121d5644cdf2beb5726ab752e79a8db1ebb52031770ec47db31d245526" +dependencies = [ + "async-channel 2.1.1", + "async-executor", + "async-io 2.2.1", + "async-lock 3.2.0", + "blocking", + "futures-lite 2.1.0", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6d3b15875ba253d1110c740755e246537483f152fa334f91abd7fe84c88b3ff" +dependencies = [ + "async-lock 3.2.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.1.0", + "parking", + "polling 3.3.1", + "rustix 0.38.25", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" +dependencies = [ + "event-listener 4.0.0", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", ] +[[package]] +name = "async-task" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4eb2cdb97421e01129ccb49169d8279ed21e829929144f4a22a6e54ac549ca1" + [[package]] name = "atoi" version = "2.0.0" @@ -71,6 +204,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "atomic-write-file" version = "0.1.2" @@ -191,6 +330,22 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.1.1", + "async-lock 3.2.0", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite 2.1.0", + "piper", + "tracing", +] + [[package]] name = "bumpalo" version = "3.14.0" @@ -238,6 +393,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "const-oid" version = "0.9.5" @@ -364,6 +528,15 @@ dependencies = [ "serde", ] +[[package]] +name = "encoding_rs" +version = "0.8.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +dependencies = [ + "cfg-if", +] + [[package]] name = "envconfig" version = "0.10.0" @@ -418,13 +591,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] -name = "eyre" -version = "0.6.9" +name = "event-listener" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80f656be11ddf91bd709454d15d5bd896fbaf4cc3314e69349e4d1569f5b46cd" +checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" dependencies = [ - "indenter", - "once_cell", + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.0", + "pin-project-lite", +] + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", ] [[package]] @@ -480,6 +673,21 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "futures" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.29" @@ -524,6 +732,45 @@ version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aeee267a1883f7ebef3700f262d2d54de95dfaf38189015a74fdc4e0c7ad8143" +dependencies = [ + "fastrand 2.0.1", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-macro" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "futures-sink" version = "0.3.29" @@ -542,8 +789,10 @@ version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ + "futures-channel", "futures-core", "futures-io", + "futures-macro", "futures-sink", "futures-task", "memchr", @@ -579,40 +828,37 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "h2" -version = "0.4.0" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d308f63daf4181410c242d34c11f928dcb3aa105852019e043c9d1f4e4368a" +checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http 1.0.0", - "indexmap 2.1.0", + "http", + "indexmap", "slab", "tokio", "tokio-util", "tracing", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hashbrown" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" -dependencies = [ - "ahash", -] - [[package]] name = "hashbrown" version = "0.14.3" @@ -695,6 +941,21 @@ dependencies = [ [[package]] name = "hook-consumer" version = "0.1.0" +dependencies = [ + "async-std", + "chrono", + "envconfig", + "futures", + "hook-common", + "http", + "reqwest", + "serde", + "serde_derive", + "sqlx", + "thiserror", + "tokio", + "url", +] [[package]] name = "hook-producer" @@ -840,6 +1101,77 @@ dependencies = [ "tracing", ] +[[package]] +name = "http" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "0.14.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.4.10", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "iana-time-zone" version = "0.1.58" @@ -899,6 +1231,26 @@ dependencies = [ "hashbrown 0.14.3", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -929,6 +1281,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -963,7 +1324,13 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" @@ -982,6 +1349,9 @@ name = "log" version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +dependencies = [ + "value-bag", +] [[package]] name = "mach2" @@ -1014,70 +1384,6 @@ version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - -[[package]] -name = "metrics" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" -dependencies = [ - "ahash", - "metrics-macros", - "portable-atomic", -] - -[[package]] -name = "metrics-exporter-prometheus" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" -dependencies = [ - "base64", - "hyper 0.14.27", - "indexmap 1.9.3", - "ipnet", - "metrics", - "metrics-util", - "quanta", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "metrics-macros" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "metrics-util" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", - "hashbrown 0.13.1", - "metrics", - "num_cpus", - "quanta", - "sketches-ddsketch", -] - [[package]] name = "mime" version = "0.3.17" @@ -1277,10 +1583,10 @@ dependencies = [ ] [[package]] -name = "overload" -version = "0.1.1" +name = "parking" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" @@ -1358,6 +1664,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + [[package]] name = "pkcs1" version = "0.7.5" @@ -1386,10 +1703,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] -name = "portable-atomic" -version = "1.5.1" +name = "polling" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bccab0e7fd7cc19f820a1c8c91720af652d0c88dc9664dd72aef2614f04af3b" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" +dependencies = [ + "cfg-if", + "concurrent-queue", + "pin-project-lite", + "rustix 0.38.25", + "tracing", + "windows-sys 0.52.0", +] [[package]] name = "ppv-lite86" @@ -1479,6 +1820,44 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "reqwest" +version = "0.11.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + [[package]] name = "rsa" version = "0.9.6" @@ -1507,15 +1886,29 @@ checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustix" -version = "0.38.26" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" dependencies = [ "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys", - "windows-sys 0.52.0", + "linux-raw-sys 0.4.11", + "windows-sys 0.48.0", ] [[package]] @@ -1599,16 +1992,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_path_to_error" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" -dependencies = [ - "itoa", - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -1776,7 +2159,7 @@ dependencies = [ "crossbeam-queue", "dotenvy", "either", - "event-listener", + "event-listener 2.5.3", "futures-channel", "futures-core", "futures-intrusive", @@ -1994,10 +2377,25 @@ dependencies = [ ] [[package]] -name = "sync_wrapper" -version = "0.1.2" +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] [[package]] name = "tempfile" @@ -2006,9 +2404,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", - "fastrand", + "fastrand 2.0.1", "redox_syscall", - "rustix", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -2068,9 +2466,7 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot", "pin-project-lite", - "signal-hook-registry", "socket2 0.5.5", "tokio-macros", "windows-sys 0.48.0", @@ -2087,6 +2483,16 @@ dependencies = [ "syn 2.0.39", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.14" @@ -2112,28 +2518,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tokio", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - [[package]] name = "tower-service" version = "0.3.2" @@ -2173,31 +2557,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-log" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" -dependencies = [ - "nu-ansi-term", - "sharded-slab", - "smallvec", - "thread_local", - "tracing-core", - "tracing-log", -] - [[package]] name = "try-lock" version = "0.2.4" @@ -2267,10 +2626,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" [[package]] -name = "valuable" -version = "0.1.0" +name = "value-bag" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "4a72e1902dde2bd6441347de2b70b7f5d59bf157c6c62f0c44572607a1d55bbe" [[package]] name = "vcpkg" @@ -2284,6 +2643,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + [[package]] name = "want" version = "0.3.1" @@ -2324,6 +2689,18 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.89" @@ -2532,6 +2909,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "zerocopy" version = "0.7.28" From 3bdd1c05d736835eaa8454fd3348fac60430bf02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 6 Dec 2023 14:17:35 +0100 Subject: [PATCH 029/130] chore: Remove install of sqlx from CI --- .github/workflows/rust.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index ee0e5b3..5ddac41 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -49,13 +49,6 @@ jobs: docker compose -f docker-compose.yml down docker compose -f docker-compose.yml up -d --wait - - name: Run migrations - shell: bash - run: | - cargo install sqlx-cli --no-default-features --features native-tls,postgres - DATABASE_URL=postgres://posthog:posthog@localhost:15432/test_database sqlx database create - DATABASE_URL=postgres://posthog:posthog@localhost:15432/test_database sqlx migrate run - - uses: actions/cache@v3 with: path: | From c25ac6fde1d322b1c6cdccab574ae503d4ae83da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 6 Dec 2023 14:17:46 +0100 Subject: [PATCH 030/130] fix: Use depends_on instead of requires --- docker-compose.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 6f62692..afaf48e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,8 +23,10 @@ services: restart: on-failure command: > sh -c "sqlx database create && sqlx migrate run" - requires: - - db + depends_on: + db: + condition: service_healthy + restart: true environment: DATABASE_URL: postgres://posthog:posthog@db:5432/test_database volumes: From 8c7cb796bca701d7b2b520f15582d7c90bb8dd2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 6 Dec 2023 14:25:33 +0100 Subject: [PATCH 031/130] fix: Address clippy linting issues --- hook-consumer/src/consumer.rs | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 33b09b0..bf9f818 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -121,10 +121,10 @@ impl Serialize for HttpMethod { /// Convinience to cast `HttpMethod` to `http::Method`. /// Not all `http::Method` variants are valid `HttpMethod` variants, hence why we -/// can't just use the former. -impl Into for HttpMethod { - fn into(self) -> http::Method { - match self { +/// can't just use the former or implement `From`. +impl From for http::Method { + fn from(val: HttpMethod) -> Self { + match val { HttpMethod::DELETE => http::Method::DELETE, HttpMethod::GET => http::Method::GET, HttpMethod::PATCH => http::Method::PATCH, @@ -134,9 +134,9 @@ impl Into for HttpMethod { } } -impl Into for &HttpMethod { - fn into(self) -> http::Method { - match self { +impl From<&HttpMethod> for http::Method { + fn from(val: &HttpMethod) -> Self { + match val { HttpMethod::DELETE => http::Method::DELETE, HttpMethod::GET => http::Method::GET, HttpMethod::PATCH => http::Method::PATCH, @@ -203,7 +203,7 @@ impl<'p> WebhookConsumer<'p> { let webhook_job = self.wait_for_job().await?; let request_timeout = self.request_timeout; // Required to avoid capturing self in closure. - tokio::spawn(async move { process_webhook_job(webhook_job, request_timeout) }); + tokio::spawn(async move { process_webhook_job(webhook_job, request_timeout).await }); } } } @@ -286,12 +286,10 @@ async fn send_webhook( ) -> Result { let client = reqwest::Client::new(); let method: http::Method = method.into(); - let url: reqwest::Url = (url) - .parse() - .map_err(|error| WebhookConsumerError::ParseUrlError(error))?; + let url: reqwest::Url = (url).parse().map_err(WebhookConsumerError::ParseUrlError)?; let headers: reqwest::header::HeaderMap = (headers) .try_into() - .map_err(|error| WebhookConsumerError::ParseHeadersError(error))?; + .map_err(WebhookConsumerError::ParseHeadersError)?; let body = reqwest::Body::from(body); let response = client @@ -347,7 +345,7 @@ fn parse_retry_after_header(header_map: &reqwest::header::HeaderMap) -> Option() { let duration = time::Duration::from_secs(u); return Some(duration); } From 2d99f77d80ffaae8fb1e754e85d4b8774b8dfc93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 6 Dec 2023 14:36:44 +0100 Subject: [PATCH 032/130] fix: Split up docker compose start up in CI --- .github/workflows/rust.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 5ddac41..b811c1a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -47,7 +47,8 @@ jobs: shell: bash run: | docker compose -f docker-compose.yml down - docker compose -f docker-compose.yml up -d --wait + docker compose -f docker-compose.yml up db echo_server -d --wait + docker compose -f docker-compose.yml up setup_test_db - uses: actions/cache@v3 with: From 71d59299c2dc49608ae8807a57b0ecc738c73d40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 7 Dec 2023 10:51:33 +0100 Subject: [PATCH 033/130] fix: Typo Co-authored-by: Brett Hoerner --- hook-consumer/src/consumer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index bf9f818..a8f2fa6 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -119,7 +119,7 @@ impl Serialize for HttpMethod { } } -/// Convinience to cast `HttpMethod` to `http::Method`. +/// Convenience to cast `HttpMethod` to `http::Method`. /// Not all `http::Method` variants are valid `HttpMethod` variants, hence why we /// can't just use the former or implement `From`. impl From for http::Method { From 09acfef888a5c1835ca9118a3320fe7995c6a1c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 11:37:24 +0100 Subject: [PATCH 034/130] fix: Delete Cargo.lock --- Cargo.lock | 2946 ---------------------------------------------------- 1 file changed, 2946 deletions(-) delete mode 100644 Cargo.lock diff --git a/Cargo.lock b/Cargo.lock deleted file mode 100644 index 0764358..0000000 --- a/Cargo.lock +++ /dev/null @@ -1,2946 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "ahash" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" -dependencies = [ - "cfg-if", - "getrandom", - "once_cell", - "version_check", - "zerocopy", -] - -[[package]] -name = "allocator-api2" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - -[[package]] -name = "async-channel" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" -dependencies = [ - "concurrent-queue", - "event-listener 4.0.0", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-executor" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" -dependencies = [ - "async-lock 3.2.0", - "async-task", - "concurrent-queue", - "fastrand 2.0.1", - "futures-lite 2.1.0", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b4353121d5644cdf2beb5726ab752e79a8db1ebb52031770ec47db31d245526" -dependencies = [ - "async-channel 2.1.1", - "async-executor", - "async-io 2.2.1", - "async-lock 3.2.0", - "blocking", - "futures-lite 2.1.0", - "once_cell", -] - -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2 0.4.10", - "waker-fn", -] - -[[package]] -name = "async-io" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6d3b15875ba253d1110c740755e246537483f152fa334f91abd7fe84c88b3ff" -dependencies = [ - "async-lock 3.2.0", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite 2.1.0", - "parking", - "polling 3.3.1", - "rustix 0.38.25", - "slab", - "tracing", - "windows-sys 0.52.0", -] - -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - -[[package]] -name = "async-lock" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" -dependencies = [ - "event-listener 4.0.0", - "event-listener-strategy", - "pin-project-lite", -] - -[[package]] -name = "async-std" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" -dependencies = [ - "async-channel 1.9.0", - "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", - "crossbeam-utils", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite 1.13.0", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - -[[package]] -name = "async-task" -version = "4.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4eb2cdb97421e01129ccb49169d8279ed21e829929144f4a22a6e54ac549ca1" - -[[package]] -name = "atoi" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" -dependencies = [ - "num-traits", -] - -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - -[[package]] -name = "atomic-write-file" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436" -dependencies = [ - "nix", - "rand", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "axum" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "202651474fe73c62d9e0a56c6133f7a0ff1dc1c8cf7a5b03381af2a26553ac9d" -dependencies = [ - "async-trait", - "axum-core", - "bytes", - "futures-util", - "http 1.0.0", - "http-body 1.0.0", - "http-body-util", - "hyper 1.0.1", - "hyper-util", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "serde_json", - "serde_path_to_error", - "serde_urlencoded", - "sync_wrapper", - "tokio", - "tower", - "tower-layer", - "tower-service", -] - -[[package]] -name = "axum-core" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77cb22c689c44d4c07b0ab44ebc25d69d8ae601a2f28fb8d672d344178fa17aa" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http 1.0.0", - "http-body 1.0.0", - "http-body-util", - "mime", - "pin-project-lite", - "rustversion", - "sync_wrapper", - "tower-layer", - "tower-service", -] - -[[package]] -name = "backtrace" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base64" -version = "0.21.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" - -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" -dependencies = [ - "serde", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "blocking" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" -dependencies = [ - "async-channel 2.1.1", - "async-lock 3.2.0", - "async-task", - "fastrand 2.0.1", - "futures-io", - "futures-lite 2.1.0", - "piper", - "tracing", -] - -[[package]] -name = "bumpalo" -version = "3.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" - -[[package]] -name = "cc" -version = "1.0.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "js-sys", - "num-traits", - "wasm-bindgen", - "windows-targets 0.48.5", -] - -[[package]] -name = "concurrent-queue" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "const-oid" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" - -[[package]] -name = "cpufeatures" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" -dependencies = [ - "libc", -] - -[[package]] -name = "crc" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" - -[[package]] -name = "crossbeam-epoch" -version = "0.9.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "der" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" -dependencies = [ - "const-oid", - "pem-rfc7468", - "zeroize", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "const-oid", - "crypto-common", - "subtle", -] - -[[package]] -name = "dotenvy" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" - -[[package]] -name = "either" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" -dependencies = [ - "serde", -] - -[[package]] -name = "encoding_rs" -version = "0.8.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "envconfig" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea81cc7e21f55a9d9b1efb6816904978d0bfbe31a50347cb24b2e75564bcac9b" -dependencies = [ - "envconfig_derive", -] - -[[package]] -name = "envconfig_derive" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfca278e5f84b45519acaaff758ebfa01f18e96998bc24b8f1b722dd804b9bf" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "etcetera" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" -dependencies = [ - "cfg-if", - "home", - "windows-sys 0.48.0", -] - -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - -[[package]] -name = "event-listener" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.0", - "pin-project-lite", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" - -[[package]] -name = "finl_unicode" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" - -[[package]] -name = "flume" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" -dependencies = [ - "futures-core", - "futures-sink", - "spin 0.9.8", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" - -[[package]] -name = "futures-executor" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-intrusive" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" -dependencies = [ - "futures-core", - "lock_api", - "parking_lot", -] - -[[package]] -name = "futures-io" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" - -[[package]] -name = "futures-lite" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-lite" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeee267a1883f7ebef3700f262d2d54de95dfaf38189015a74fdc4e0c7ad8143" -dependencies = [ - "fastrand 2.0.1", - "futures-core", - "futures-io", - "parking", - "pin-project-lite", -] - -[[package]] -name = "futures-macro" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "futures-sink" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" - -[[package]] -name = "futures-task" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" - -[[package]] -name = "futures-util" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "gimli" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" - -[[package]] -name = "gloo-timers" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "h2" -version = "0.3.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "hashbrown" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" -dependencies = [ - "ahash", - "allocator-api2", -] - -[[package]] -name = "hashlink" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" -dependencies = [ - "hashbrown 0.14.3", -] - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "hermit-abi" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hkdf" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" -dependencies = [ - "hmac", -] - -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest", -] - -[[package]] -name = "home" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "hook-common" -version = "0.1.0" -dependencies = [ - "chrono", - "serde", - "serde_derive", - "sqlx", - "thiserror", - "tokio", -] - -[[package]] -name = "hook-consumer" -version = "0.1.0" -dependencies = [ - "async-std", - "chrono", - "envconfig", - "futures", - "hook-common", - "http", - "reqwest", - "serde", - "serde_derive", - "sqlx", - "thiserror", - "tokio", - "url", -] - -[[package]] -name = "hook-producer" -version = "0.1.0" -dependencies = [ - "axum", - "envconfig", - "eyre", - "metrics", - "metrics-exporter-prometheus", - "tokio", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "http" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http 0.2.11", - "pin-project-lite", -] - -[[package]] -name = "http-body" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" -dependencies = [ - "bytes", - "http 1.0.0", -] - -[[package]] -name = "http-body-util" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" -dependencies = [ - "bytes", - "futures-util", - "http 1.0.0", - "http-body 1.0.0", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "hyper" -version = "0.14.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "http 0.2.11", - "http-body 0.4.5", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2 0.4.10", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403f9214f3e703236b221f1a9cd88ec8b4adfa5296de01ab96216361f4692f56" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "h2", - "http 1.0.0", - "http-body 1.0.0", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "hyper-util" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca339002caeb0d159cc6e023dff48e199f081e42fa039895c7c6f38b37f2e9d" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "http 1.0.0", - "http-body 1.0.0", - "hyper 1.0.1", - "pin-project-lite", - "socket2 0.5.5", - "tokio", - "tower", - "tower-service", - "tracing", -] - -[[package]] -name = "http" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "hyper" -version = "0.14.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2 0.4.10", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - -[[package]] -name = "iana-time-zone" -version = "0.1.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows-core", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "idna" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "indenter" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - -[[package]] -name = "indexmap" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" -dependencies = [ - "equivalent", - "hashbrown 0.14.3", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "ipnet" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" - -[[package]] -name = "itertools" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" - -[[package]] -name = "js-sys" -version = "0.3.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -dependencies = [ - "spin 0.5.2", -] - -[[package]] -name = "libc" -version = "0.2.150" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" - -[[package]] -name = "libm" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" - -[[package]] -name = "libsqlite3-sys" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" - -[[package]] -name = "lock_api" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" -dependencies = [ - "value-bag", -] - -[[package]] -name = "mach2" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" -dependencies = [ - "libc", -] - -[[package]] -name = "matchit" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" - -[[package]] -name = "md-5" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" -dependencies = [ - "cfg-if", - "digest", -] - -[[package]] -name = "memchr" -version = "2.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "miniz_oxide" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" -dependencies = [ - "libc", - "wasi", - "windows-sys 0.48.0", -] - -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "nix" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" -dependencies = [ - "bitflags 2.4.1", - "cfg-if", - "libc", -] - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - -[[package]] -name = "num-bigint-dig" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" -dependencies = [ - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand", - "smallvec", - "zeroize", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" -dependencies = [ - "autocfg", - "libm", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "object" -version = "0.32.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "openssl" -version = "0.10.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" -dependencies = [ - "bitflags 2.4.1", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.96" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets 0.48.5", -] - -[[package]] -name = "paste" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" - -[[package]] -name = "pem-rfc7468" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" -dependencies = [ - "base64ct", -] - -[[package]] -name = "percent-encoding" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" - -[[package]] -name = "pin-project" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "piper" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" -dependencies = [ - "atomic-waker", - "fastrand 2.0.1", - "futures-io", -] - -[[package]] -name = "pkcs1" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" -dependencies = [ - "der", - "pkcs8", - "spki", -] - -[[package]] -name = "pkcs8" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" -dependencies = [ - "der", - "spki", -] - -[[package]] -name = "pkg-config" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" - -[[package]] -name = "polling" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] - -[[package]] -name = "polling" -version = "3.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" -dependencies = [ - "cfg-if", - "concurrent-queue", - "pin-project-lite", - "rustix 0.38.25", - "tracing", - "windows-sys 0.52.0", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "proc-macro2" -version = "1.0.70" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quanta" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" -dependencies = [ - "crossbeam-utils", - "libc", - "mach2", - "once_cell", - "raw-cpuid", - "wasi", - "web-sys", - "winapi", -] - -[[package]] -name = "quote" -version = "1.0.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "raw-cpuid" -version = "10.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "reqwest" -version = "0.11.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" -dependencies = [ - "base64", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "serde", - "serde_json", - "serde_urlencoded", - "system-configuration", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - -[[package]] -name = "rsa" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" -dependencies = [ - "const-oid", - "digest", - "num-bigint-dig", - "num-integer", - "num-traits", - "pkcs1", - "pkcs8", - "rand_core", - "signature", - "spki", - "subtle", - "zeroize", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] -name = "rustix" -version = "0.37.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" -dependencies = [ - "bitflags 2.4.1", - "errno", - "libc", - "linux-raw-sys 0.4.11", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustversion" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" - -[[package]] -name = "ryu" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" - -[[package]] -name = "schannel" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" -dependencies = [ - "windows-sys 0.48.0", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "security-framework" -version = "2.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "serde" -version = "1.0.193" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.193" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "serde_json" -version = "1.0.108" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "sha1" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] -name = "sha2" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "signal-hook-registry" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" -dependencies = [ - "libc", -] - -[[package]] -name = "signature" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" -dependencies = [ - "digest", - "rand_core", -] - -[[package]] -name = "sketches-ddsketch" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "smallvec" -version = "1.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" - -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] - -[[package]] -name = "spki" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" -dependencies = [ - "base64ct", - "der", -] - -[[package]] -name = "sqlformat" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85" -dependencies = [ - "itertools", - "nom", - "unicode_categories", -] - -[[package]] -name = "sqlx" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba03c279da73694ef99763320dea58b51095dfe87d001b1d4b5fe78ba8763cf" -dependencies = [ - "sqlx-core", - "sqlx-macros", - "sqlx-mysql", - "sqlx-postgres", - "sqlx-sqlite", -] - -[[package]] -name = "sqlx-core" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d84b0a3c3739e220d94b3239fd69fb1f74bc36e16643423bd99de3b43c21bfbd" -dependencies = [ - "ahash", - "atoi", - "byteorder", - "bytes", - "chrono", - "crc", - "crossbeam-queue", - "dotenvy", - "either", - "event-listener 2.5.3", - "futures-channel", - "futures-core", - "futures-intrusive", - "futures-io", - "futures-util", - "hashlink", - "hex", - "indexmap 2.1.0", - "log", - "memchr", - "native-tls", - "once_cell", - "paste", - "percent-encoding", - "serde", - "serde_json", - "sha2", - "smallvec", - "sqlformat", - "thiserror", - "tokio", - "tokio-stream", - "tracing", - "url", - "uuid", -] - -[[package]] -name = "sqlx-macros" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5" -dependencies = [ - "proc-macro2", - "quote", - "sqlx-core", - "sqlx-macros-core", - "syn 1.0.109", -] - -[[package]] -name = "sqlx-macros-core" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0bd4519486723648186a08785143599760f7cc81c52334a55d6a83ea1e20841" -dependencies = [ - "atomic-write-file", - "dotenvy", - "either", - "heck", - "hex", - "once_cell", - "proc-macro2", - "quote", - "serde", - "serde_json", - "sha2", - "sqlx-core", - "sqlx-mysql", - "sqlx-postgres", - "sqlx-sqlite", - "syn 1.0.109", - "tempfile", - "tokio", - "url", -] - -[[package]] -name = "sqlx-mysql" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e37195395df71fd068f6e2082247891bc11e3289624bbc776a0cdfa1ca7f1ea4" -dependencies = [ - "atoi", - "base64", - "bitflags 2.4.1", - "byteorder", - "bytes", - "chrono", - "crc", - "digest", - "dotenvy", - "either", - "futures-channel", - "futures-core", - "futures-io", - "futures-util", - "generic-array", - "hex", - "hkdf", - "hmac", - "itoa", - "log", - "md-5", - "memchr", - "once_cell", - "percent-encoding", - "rand", - "rsa", - "serde", - "sha1", - "sha2", - "smallvec", - "sqlx-core", - "stringprep", - "thiserror", - "tracing", - "uuid", - "whoami", -] - -[[package]] -name = "sqlx-postgres" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ac0ac3b7ccd10cc96c7ab29791a7dd236bd94021f31eec7ba3d46a74aa1c24" -dependencies = [ - "atoi", - "base64", - "bitflags 2.4.1", - "byteorder", - "chrono", - "crc", - "dotenvy", - "etcetera", - "futures-channel", - "futures-core", - "futures-io", - "futures-util", - "hex", - "hkdf", - "hmac", - "home", - "itoa", - "log", - "md-5", - "memchr", - "once_cell", - "rand", - "serde", - "serde_json", - "sha1", - "sha2", - "smallvec", - "sqlx-core", - "stringprep", - "thiserror", - "tracing", - "uuid", - "whoami", -] - -[[package]] -name = "sqlx-sqlite" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "210976b7d948c7ba9fced8ca835b11cbb2d677c59c79de41ac0d397e14547490" -dependencies = [ - "atoi", - "chrono", - "flume", - "futures-channel", - "futures-core", - "futures-executor", - "futures-intrusive", - "futures-util", - "libsqlite3-sys", - "log", - "percent-encoding", - "serde", - "sqlx-core", - "tracing", - "url", - "urlencoding", - "uuid", -] - -[[package]] -name = "stringprep" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" -dependencies = [ - "finl_unicode", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "subtle" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tempfile" -version = "3.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" -dependencies = [ - "cfg-if", - "fastrand 2.0.1", - "redox_syscall", - "rustix 0.38.25", - "windows-sys 0.48.0", -] - -[[package]] -name = "thiserror" -version = "1.0.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "thread_local" -version = "1.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" -dependencies = [ - "cfg-if", - "once_cell", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "1.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" -dependencies = [ - "backtrace", - "bytes", - "libc", - "mio", - "num_cpus", - "pin-project-lite", - "socket2 0.5.5", - "tokio-macros", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-macros" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-stream" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" -dependencies = [ - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "tracing-core" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] -name = "typenum" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" - -[[package]] -name = "unicode-bidi" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" - -[[package]] -name = "unicode-ident" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" - -[[package]] -name = "unicode_categories" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" - -[[package]] -name = "url" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", -] - -[[package]] -name = "urlencoding" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" - -[[package]] -name = "uuid" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" - -[[package]] -name = "value-bag" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72e1902dde2bd6441347de2b70b7f5d59bf157c6c62f0c44572607a1d55bbe" - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "waker-fn" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.39", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" - -[[package]] -name = "web-sys" -version = "0.3.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "whoami" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-core" -version = "0.51.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.0", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" -dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" - -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - -[[package]] -name = "zerocopy" -version = "0.7.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6f15f7ade05d2a4935e34a457b936c23dc70a05cc1d97133dc99e7a3fe0f0e" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbbad221e3f78500350ecbd7dfa4e63ef945c05f4c61cb7f4d3f84cd0bba649b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.39", -] - -[[package]] -name = "zeroize" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" From 6d3080de965279d6b0bb82ce9c331eee2b6a9ef5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 11:39:00 +0100 Subject: [PATCH 035/130] fix: Re-add Cargo.lock --- Cargo.lock | 3091 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 3091 insertions(+) create mode 100644 Cargo.lock diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..b24af98 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,3091 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +dependencies = [ + "cfg-if", + "getrandom", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" +dependencies = [ + "concurrent-queue", + "event-listener 4.0.0", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.2.0", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite 2.1.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4353121d5644cdf2beb5726ab752e79a8db1ebb52031770ec47db31d245526" +dependencies = [ + "async-channel 2.1.1", + "async-executor", + "async-io 2.2.1", + "async-lock 3.2.0", + "blocking", + "futures-lite 2.1.0", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6d3b15875ba253d1110c740755e246537483f152fa334f91abd7fe84c88b3ff" +dependencies = [ + "async-lock 3.2.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.1.0", + "parking", + "polling 3.3.1", + "rustix 0.38.27", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" +dependencies = [ + "event-listener 4.0.0", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4eb2cdb97421e01129ccb49169d8279ed21e829929144f4a22a6e54ac549ca1" + +[[package]] +name = "async-trait" +version = "0.1.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "atomic-write-file" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436" +dependencies = [ + "nix", + "rand", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "axum" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "202651474fe73c62d9e0a56c6133f7a0ff1dc1c8cf7a5b03381af2a26553ac9d" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.0.1", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77cb22c689c44d4c07b0ab44ebc25d69d8ae601a2f28fb8d672d344178fa17aa" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +dependencies = [ + "serde", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.1.1", + "async-lock 3.2.0", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite 2.1.0", + "piper", + "tracing", +] + +[[package]] +name = "bumpalo" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-targets 0.48.5", +] + +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "const-oid" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + +[[package]] +name = "cpufeatures" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +dependencies = [ + "libc", +] + +[[package]] +name = "crc" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crossbeam-epoch" +version = "0.9.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "der" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + +[[package]] +name = "either" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +dependencies = [ + "serde", +] + +[[package]] +name = "encoding_rs" +version = "0.8.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "envconfig" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea81cc7e21f55a9d9b1efb6816904978d0bfbe31a50347cb24b2e75564bcac9b" +dependencies = [ + "envconfig_derive", +] + +[[package]] +name = "envconfig_derive" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dfca278e5f84b45519acaaff758ebfa01f18e96998bc24b8f1b722dd804b9bf" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.0", + "pin-project-lite", +] + +[[package]] +name = "eyre" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bbb8258be8305fb0237d7b295f47bb24ff1b136a535f473baf40e70468515aa" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "spin 0.9.8", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" + +[[package]] +name = "futures-executor" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot", +] + +[[package]] +name = "futures-io" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" + +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aeee267a1883f7ebef3700f262d2d54de95dfaf38189015a74fdc4e0c7ad8143" +dependencies = [ + "fastrand 2.0.1", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-macro" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "futures-sink" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" + +[[package]] +name = "futures-task" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" + +[[package]] +name = "futures-util" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "h2" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.11", + "indexmap 2.1.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d308f63daf4181410c242d34c11f928dcb3aa105852019e043c9d1f4e4368a" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 1.0.0", + "indexmap 2.1.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.3", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hkdf" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "hook-common" +version = "0.1.0" +dependencies = [ + "chrono", + "serde", + "serde_derive", + "sqlx", + "thiserror", + "tokio", +] + +[[package]] +name = "hook-consumer" +version = "0.1.0" +dependencies = [ + "async-std", + "chrono", + "envconfig", + "futures", + "hook-common", + "http 0.2.11", + "reqwest", + "serde", + "serde_derive", + "sqlx", + "thiserror", + "tokio", + "url", +] + +[[package]] +name = "hook-producer" +version = "0.1.0" +dependencies = [ + "axum", + "envconfig", + "eyre", + "metrics", + "metrics-exporter-prometheus", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "http" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http 0.2.11", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.0.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "0.14.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.22", + "http 0.2.11", + "http-body 0.4.5", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.4.10", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403f9214f3e703236b221f1a9cd88ec8b4adfa5296de01ab96216361f4692f56" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.0", + "http 1.0.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper 0.14.27", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyper-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca339002caeb0d159cc6e023dff48e199f081e42fa039895c7c6f38b37f2e9d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "hyper 1.0.1", + "pin-project-lite", + "socket2 0.5.5", + "tokio", + "tower", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +dependencies = [ + "equivalent", + "hashbrown 0.14.3", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + +[[package]] +name = "itertools" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" + +[[package]] +name = "js-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] + +[[package]] +name = "libc" +version = "0.2.150" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libsqlite3-sys" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" + +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +dependencies = [ + "value-bag", +] + +[[package]] +name = "mach2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" +dependencies = [ + "libc", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] + +[[package]] +name = "memchr" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "metrics" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" +dependencies = [ + "ahash", + "metrics-macros", + "portable-atomic", +] + +[[package]] +name = "metrics-exporter-prometheus" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" +dependencies = [ + "base64", + "hyper 0.14.27", + "indexmap 1.9.3", + "ipnet", + "metrics", + "metrics-util", + "quanta", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "metrics-macros" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "metrics-util" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", + "hashbrown 0.13.1", + "metrics", + "num_cpus", + "quanta", + "sketches-ddsketch", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.48.0", +] + +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "libc", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "openssl" +version = "0.10.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.48.5", +] + +[[package]] +name = "paste" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" + +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" +dependencies = [ + "cfg-if", + "concurrent-queue", + "pin-project-lite", + "rustix 0.38.27", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quanta" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +dependencies = [ + "crossbeam-utils", + "libc", + "mach2", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "raw-cpuid" +version = "10.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "reqwest" +version = "0.11.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2 0.3.22", + "http 0.2.11", + "http-body 0.4.5", + "hyper 0.14.27", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + +[[package]] +name = "rsa" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" +dependencies = [ + "const-oid", + "digest", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "signature", + "spki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfeae074e687625746172d639330f1de242a178bf3189b51e35a7a21573513ac" +dependencies = [ + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys 0.4.12", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + +[[package]] +name = "ryu" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" + +[[package]] +name = "schannel" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.193" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "serde_json" +version = "1.0.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core", +] + +[[package]] +name = "sketches-ddsketch" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" + +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "sqlformat" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" +dependencies = [ + "itertools", + "nom", + "unicode_categories", +] + +[[package]] +name = "sqlx" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dba03c279da73694ef99763320dea58b51095dfe87d001b1d4b5fe78ba8763cf" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d84b0a3c3739e220d94b3239fd69fb1f74bc36e16643423bd99de3b43c21bfbd" +dependencies = [ + "ahash", + "atoi", + "byteorder", + "bytes", + "chrono", + "crc", + "crossbeam-queue", + "dotenvy", + "either", + "event-listener 2.5.3", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashlink", + "hex", + "indexmap 2.1.0", + "log", + "memchr", + "native-tls", + "once_cell", + "paste", + "percent-encoding", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlformat", + "thiserror", + "tokio", + "tokio-stream", + "tracing", + "url", + "uuid", +] + +[[package]] +name = "sqlx-macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 1.0.109", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0bd4519486723648186a08785143599760f7cc81c52334a55d6a83ea1e20841" +dependencies = [ + "atomic-write-file", + "dotenvy", + "either", + "heck", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 1.0.109", + "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e37195395df71fd068f6e2082247891bc11e3289624bbc776a0cdfa1ca7f1ea4" +dependencies = [ + "atoi", + "base64", + "bitflags 2.4.1", + "byteorder", + "bytes", + "chrono", + "crc", + "digest", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand", + "rsa", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "uuid", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ac0ac3b7ccd10cc96c7ab29791a7dd236bd94021f31eec7ba3d46a74aa1c24" +dependencies = [ + "atoi", + "base64", + "bitflags 2.4.1", + "byteorder", + "chrono", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "uuid", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "210976b7d948c7ba9fced8ca835b11cbb2d677c59c79de41ac0d397e14547490" +dependencies = [ + "atoi", + "chrono", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "sqlx-core", + "tracing", + "url", + "urlencoding", + "uuid", +] + +[[package]] +name = "stringprep" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +dependencies = [ + "finl_unicode", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tempfile" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +dependencies = [ + "cfg-if", + "fastrand 2.0.1", + "redox_syscall", + "rustix 0.38.27", + "windows-sys 0.48.0", +] + +[[package]] +name = "thiserror" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "thread_local" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.5.5", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "nu-ansi-term", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-bidi" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" + +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "uuid" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "value-bag" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a72e1902dde2bd6441347de2b70b7f5d59bf157c6c62f0c44572607a1d55bbe" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.39", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" + +[[package]] +name = "web-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "whoami" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "zerocopy" +version = "0.7.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d075cf85bbb114e933343e087b92f2146bac0d55b534cbb8188becf0039948e" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86cd5ca076997b97ef09d3ad65efe811fa68c9e874cb636ccb211223a813b0c2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" From f76915e0b54eeb852910844c63f3afb433b1867f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 11:41:06 +0100 Subject: [PATCH 036/130] chore: Add comment referencing connection limit --- hook-consumer/src/consumer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index a8f2fa6..cff54f6 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -200,6 +200,7 @@ impl<'p> WebhookConsumer<'p> { /// Run this consumer to continuously process any jobs that become available. pub async fn run(&self) -> Result<(), WebhookConsumerError> { loop { + // TODO: The number of jobs processed will be capped by the PG connection limit when running in transactional mode. let webhook_job = self.wait_for_job().await?; let request_timeout = self.request_timeout; // Required to avoid capturing self in closure. From 104717937ec5f1537f6b587e0b3fb1839b98435a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 12:23:41 +0100 Subject: [PATCH 037/130] refactor: Re-use client in consumer --- hook-consumer/src/consumer.rs | 83 ++++++++++++++--------------------- hook-consumer/src/error.rs | 30 +++++++++++++ hook-consumer/src/lib.rs | 1 + hook-consumer/src/main.rs | 7 ++- 4 files changed, 69 insertions(+), 52 deletions(-) create mode 100644 hook-consumer/src/error.rs diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index cff54f6..d18aed5 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -6,34 +6,10 @@ use std::time; use async_std::task; use hook_common::pgqueue::{PgJobError, PgQueue, PgQueueError, PgTransactionJob}; use http::StatusCode; +use reqwest::header; use serde::{de::Visitor, Deserialize, Serialize}; -use thiserror::Error; - -/// Enumeration of errors for operations with WebhookConsumer. -#[derive(Error, Debug)] -pub enum WebhookConsumerError { - #[error("timed out while waiting for jobs to be available")] - TimeoutError, - #[error("{0} is not a valid HttpMethod")] - ParseHttpMethodError(String), - #[error("error parsing webhook headers")] - ParseHeadersError(http::Error), - #[error("error parsing webhook url")] - ParseUrlError(url::ParseError), - #[error("an error occurred in the underlying queue")] - QueueError(#[from] PgQueueError), - #[error("an error occurred in the underlying job")] - PgJobError(String), - #[error("an error occurred when attempting to send a request")] - RequestError(#[from] reqwest::Error), - #[error("a webhook could not be delivered but it could be retried later: {reason}")] - RetryableWebhookError { - reason: String, - retry_after: Option, - }, - #[error("a webhook could not be delivered and it cannot be retried further: {0}")] - NonRetryableWebhookError(String), -} + +use crate::error::WebhookConsumerError; /// Supported HTTP methods for webhooks. #[derive(Debug, PartialEq, Clone, Copy)] @@ -165,8 +141,8 @@ pub struct WebhookConsumer<'p> { queue: &'p PgQueue, /// The interval for polling the queue. poll_interval: time::Duration, - /// A timeout for webhook requests. - request_timeout: time::Duration, + /// The client used for HTTP requests. + client: reqwest::Client, } impl<'p> WebhookConsumer<'p> { @@ -175,13 +151,24 @@ impl<'p> WebhookConsumer<'p> { queue: &'p PgQueue, poll_interval: time::Duration, request_timeout: time::Duration, - ) -> Self { - Self { + ) -> Result { + let mut headers = header::HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static("application/json"), + ); + + let client = reqwest::Client::builder() + .default_headers(headers) + .timeout(request_timeout) + .build()?; + + Ok(Self { name: name.to_owned(), queue, poll_interval, - request_timeout, - } + client, + }) } /// Wait until a job becomes available in our queue. @@ -203,8 +190,9 @@ impl<'p> WebhookConsumer<'p> { // TODO: The number of jobs processed will be capped by the PG connection limit when running in transactional mode. let webhook_job = self.wait_for_job().await?; - let request_timeout = self.request_timeout; // Required to avoid capturing self in closure. - tokio::spawn(async move { process_webhook_job(webhook_job, request_timeout).await }); + // reqwest::Client internally wraps with Arc, so this allocation is cheap. + let client = self.client.clone(); + tokio::spawn(async move { process_webhook_job(client, webhook_job).await }); } } } @@ -223,15 +211,15 @@ impl<'p> WebhookConsumer<'p> { /// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. /// * `request_timeout`: A timeout for the HTTP request. async fn process_webhook_job( + client: reqwest::Client, webhook_job: PgTransactionJob<'_, WebhookJobParameters>, - request_timeout: std::time::Duration, ) -> Result<(), WebhookConsumerError> { match send_webhook( + client, &webhook_job.job.parameters.method, &webhook_job.job.parameters.url, &webhook_job.job.parameters.headers, webhook_job.job.parameters.body.clone(), - request_timeout, ) .await { @@ -279,13 +267,12 @@ async fn process_webhook_job( /// * `body`: The body of the request. Ownership is required. /// * `timeout`: A timeout for the HTTP request. async fn send_webhook( + client: reqwest::Client, method: &HttpMethod, url: &str, headers: &collections::HashMap, body: String, - timeout: std::time::Duration, ) -> Result { - let client = reqwest::Client::new(); let method: http::Method = method.into(); let url: reqwest::Url = (url).parse().map_err(WebhookConsumerError::ParseUrlError)?; let headers: reqwest::header::HeaderMap = (headers) @@ -296,7 +283,6 @@ async fn send_webhook( let response = client .request(method, url) .headers(headers) - .timeout(timeout) .body(body) .send() .await?; @@ -446,7 +432,8 @@ mod tests { &queue, time::Duration::from_millis(100), time::Duration::from_millis(5000), - ); + ) + .expect("consumer failed to initialize"); let consumed_job = consumer .wait_for_job() .await @@ -472,15 +459,11 @@ mod tests { let url = "http://localhost:18081/echo"; let headers = collections::HashMap::new(); let body = "a very relevant request body"; - let response = send_webhook( - &method, - url, - &headers, - body.to_owned(), - time::Duration::from_millis(5000), - ) - .await - .expect("send_webhook failed"); + let client = reqwest::Client::new(); + + let response = send_webhook(client, &method, url, &headers, body.to_owned()) + .await + .expect("send_webhook failed"); assert_eq!(response.status(), StatusCode::OK); assert_eq!( diff --git a/hook-consumer/src/error.rs b/hook-consumer/src/error.rs new file mode 100644 index 0000000..34f0619 --- /dev/null +++ b/hook-consumer/src/error.rs @@ -0,0 +1,30 @@ +use std::time; + +use hook_common::pgqueue; +use thiserror::Error; + +/// Enumeration of errors for operations with WebhookConsumer. +#[derive(Error, Debug)] +pub enum WebhookConsumerError { + #[error("timed out while waiting for jobs to be available")] + TimeoutError, + #[error("{0} is not a valid HttpMethod")] + ParseHttpMethodError(String), + #[error("error parsing webhook headers")] + ParseHeadersError(http::Error), + #[error("error parsing webhook url")] + ParseUrlError(url::ParseError), + #[error("an error occurred in the underlying queue")] + QueueError(#[from] pgqueue::PgQueueError), + #[error("an error occurred in the underlying job")] + PgJobError(String), + #[error("an error occurred when attempting to send a request")] + RequestError(#[from] reqwest::Error), + #[error("a webhook could not be delivered but it could be retried later: {reason}")] + RetryableWebhookError { + reason: String, + retry_after: Option, + }, + #[error("a webhook could not be delivered and it cannot be retried further: {0}")] + NonRetryableWebhookError(String), +} diff --git a/hook-consumer/src/lib.rs b/hook-consumer/src/lib.rs index cc746b0..b99481b 100644 --- a/hook-consumer/src/lib.rs +++ b/hook-consumer/src/lib.rs @@ -1,2 +1,3 @@ pub mod config; pub mod consumer; +pub mod error; diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index 22acee1..1651564 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -3,9 +3,10 @@ use envconfig::Envconfig; use hook_common::pgqueue::{PgQueue, RetryPolicy}; use hook_consumer::config::Config; use hook_consumer::consumer::WebhookConsumer; +use hook_consumer::error::WebhookConsumerError; #[tokio::main] -async fn main() { +async fn main() -> Result<(), WebhookConsumerError> { let config = Config::init_from_env().expect("Invalid configuration:"); let retry_policy = RetryPolicy::new( @@ -27,7 +28,9 @@ async fn main() { &queue, config.poll_interval.0, config.request_timeout.0, - ); + )?; let _ = consumer.run().await; + + Ok(()) } From 0525d7b71728fdfbd7915c6c49308325a921460f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 14:59:42 +0100 Subject: [PATCH 038/130] fix: Limit number of concurrent HTTP requests with Semaphore --- hook-consumer/src/config.rs | 3 +++ hook-consumer/src/consumer.rs | 19 +++++++++++++++++-- hook-consumer/src/main.rs | 1 + 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/hook-consumer/src/config.rs b/hook-consumer/src/config.rs index fde1373..50d0f1c 100644 --- a/hook-consumer/src/config.rs +++ b/hook-consumer/src/config.rs @@ -20,6 +20,9 @@ pub struct Config { #[envconfig(default = "5000")] pub request_timeout: EnvMsDuration, + #[envconfig(default = "1024")] + pub max_requests: usize, + #[envconfig(nested = true)] pub retry_policy: RetryPolicyConfig, diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index d18aed5..fba6cde 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -1,6 +1,7 @@ use std::collections; use std::fmt; use std::str::FromStr; +use std::sync::Arc; use std::time; use async_std::task; @@ -8,6 +9,7 @@ use hook_common::pgqueue::{PgJobError, PgQueue, PgQueueError, PgTransactionJob}; use http::StatusCode; use reqwest::header; use serde::{de::Visitor, Deserialize, Serialize}; +use tokio::sync; use crate::error::WebhookConsumerError; @@ -143,6 +145,8 @@ pub struct WebhookConsumer<'p> { poll_interval: time::Duration, /// The client used for HTTP requests. client: reqwest::Client, + /// Maximum number of concurrent HTTP requests. + max_requests: usize, } impl<'p> WebhookConsumer<'p> { @@ -151,6 +155,7 @@ impl<'p> WebhookConsumer<'p> { queue: &'p PgQueue, poll_interval: time::Duration, request_timeout: time::Duration, + max_requests: usize, ) -> Result { let mut headers = header::HeaderMap::new(); headers.insert( @@ -168,6 +173,7 @@ impl<'p> WebhookConsumer<'p> { queue, poll_interval, client, + max_requests, }) } @@ -186,13 +192,21 @@ impl<'p> WebhookConsumer<'p> { /// Run this consumer to continuously process any jobs that become available. pub async fn run(&self) -> Result<(), WebhookConsumerError> { + let semaphore = Arc::new(sync::Semaphore::new(self.max_requests)); + loop { // TODO: The number of jobs processed will be capped by the PG connection limit when running in transactional mode. let webhook_job = self.wait_for_job().await?; // reqwest::Client internally wraps with Arc, so this allocation is cheap. let client = self.client.clone(); - tokio::spawn(async move { process_webhook_job(client, webhook_job).await }); + let permit = semaphore.clone().acquire_owned().await.unwrap(); + + tokio::spawn(async move { + let result = process_webhook_job(client, webhook_job).await; + drop(permit); + result.expect("webhook processing failed"); + }); } } } @@ -278,8 +292,8 @@ async fn send_webhook( let headers: reqwest::header::HeaderMap = (headers) .try_into() .map_err(WebhookConsumerError::ParseHeadersError)?; - let body = reqwest::Body::from(body); + let response = client .request(method, url) .headers(headers) @@ -432,6 +446,7 @@ mod tests { &queue, time::Duration::from_millis(100), time::Duration::from_millis(5000), + 10, ) .expect("consumer failed to initialize"); let consumed_job = consumer diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index 1651564..5f0d46d 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -28,6 +28,7 @@ async fn main() -> Result<(), WebhookConsumerError> { &queue, config.poll_interval.0, config.request_timeout.0, + config.max_requests, )?; let _ = consumer.run().await; From 35825378b0fa68a71bcb98403e57d33c4e602858 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 16:31:06 +0100 Subject: [PATCH 039/130] chore: Use workspace dependencies --- hook-consumer/Cargo.toml | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/hook-consumer/Cargo.toml b/hook-consumer/Cargo.toml index 2e95a6b..5ff1eb0 100644 --- a/hook-consumer/Cargo.toml +++ b/hook-consumer/Cargo.toml @@ -3,19 +3,17 @@ name = "hook-consumer" version = "0.1.0" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] async-std = { version = "1.12" } -chrono = { version = "0.4" } -envconfig = { version = "0.10" } +chrono = { workspace = true } +envconfig = { workspace = true } futures = "0.3" hook-common = { path = "../hook-common" } http = { version = "0.2" } reqwest = { version = "0.11" } -serde = { version = "1.0" } -serde_derive = { version = "1.0" } -sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", "postgres", "uuid", "json", "chrono" ] } -thiserror = { version = "1.0" } -tokio = { version = "1.34", features = ["macros", "rt", "rt-multi-thread"] } +serde = { workspace = true } +serde_derive = { workspace = true } +sqlx = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } url = { version = "2.2" } From 31b1e7ae0e6f19fcf9cf40fd0e2a87e779667f19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 16:37:43 +0100 Subject: [PATCH 040/130] refactor: Rename to max_concurrent_jobs --- hook-consumer/src/config.rs | 2 +- hook-consumer/src/consumer.rs | 10 +++++----- hook-consumer/src/main.rs | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hook-consumer/src/config.rs b/hook-consumer/src/config.rs index 50d0f1c..8e4bde9 100644 --- a/hook-consumer/src/config.rs +++ b/hook-consumer/src/config.rs @@ -21,7 +21,7 @@ pub struct Config { pub request_timeout: EnvMsDuration, #[envconfig(default = "1024")] - pub max_requests: usize, + pub max_concurrent_jobs: usize, #[envconfig(nested = true)] pub retry_policy: RetryPolicyConfig, diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index fba6cde..9abe9bd 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -145,8 +145,8 @@ pub struct WebhookConsumer<'p> { poll_interval: time::Duration, /// The client used for HTTP requests. client: reqwest::Client, - /// Maximum number of concurrent HTTP requests. - max_requests: usize, + /// Maximum number of concurrent jobs being processed. + max_concurrent_jobs: usize, } impl<'p> WebhookConsumer<'p> { @@ -155,7 +155,7 @@ impl<'p> WebhookConsumer<'p> { queue: &'p PgQueue, poll_interval: time::Duration, request_timeout: time::Duration, - max_requests: usize, + max_concurrent_jobs: usize, ) -> Result { let mut headers = header::HeaderMap::new(); headers.insert( @@ -173,7 +173,7 @@ impl<'p> WebhookConsumer<'p> { queue, poll_interval, client, - max_requests, + max_concurrent_jobs, }) } @@ -192,7 +192,7 @@ impl<'p> WebhookConsumer<'p> { /// Run this consumer to continuously process any jobs that become available. pub async fn run(&self) -> Result<(), WebhookConsumerError> { - let semaphore = Arc::new(sync::Semaphore::new(self.max_requests)); + let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); loop { // TODO: The number of jobs processed will be capped by the PG connection limit when running in transactional mode. diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index 5f0d46d..f165b32 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -28,7 +28,7 @@ async fn main() -> Result<(), WebhookConsumerError> { &queue, config.poll_interval.0, config.request_timeout.0, - config.max_requests, + config.max_concurrent_jobs, )?; let _ = consumer.run().await; From 5cdff6a884d6ed2ecaae8b880d2bd70ccfcf6d41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 16:38:39 +0100 Subject: [PATCH 041/130] chore: Remove deprecated comment --- hook-consumer/src/consumer.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 9abe9bd..653a890 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -195,7 +195,6 @@ impl<'p> WebhookConsumer<'p> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); loop { - // TODO: The number of jobs processed will be capped by the PG connection limit when running in transactional mode. let webhook_job = self.wait_for_job().await?; // reqwest::Client internally wraps with Arc, so this allocation is cheap. From 1358e3b62260dab524751a1d4a4538c99b6ddcce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 16:58:30 +0100 Subject: [PATCH 042/130] refactor: Return result from processing task --- hook-consumer/src/consumer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 653a890..8da2df6 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -204,7 +204,7 @@ impl<'p> WebhookConsumer<'p> { tokio::spawn(async move { let result = process_webhook_job(client, webhook_job).await; drop(permit); - result.expect("webhook processing failed"); + result }); } } From eae1cb13b6c534f73aba37e21d211b71e6b1eb36 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 7 Dec 2023 15:34:39 -0700 Subject: [PATCH 043/130] Add basic webhook produce endpoint --- Cargo.lock | 9 + Cargo.toml | 20 +- hook-common/Cargo.toml | 7 +- hook-common/src/lib.rs | 1 + hook-common/src/pgqueue.rs | 10 +- hook-common/src/webhook.rs | 139 +++++++++++++ hook-consumer/src/consumer.rs | 132 +----------- hook-producer/Cargo.toml | 18 +- hook-producer/src/config.rs | 9 + hook-producer/src/handlers/app.rs | 60 ++++++ hook-producer/src/handlers/index.rs | 3 - hook-producer/src/handlers/mod.rs | 17 +- hook-producer/src/handlers/webhook.rs | 278 ++++++++++++++++++++++++++ hook-producer/src/main.rs | 22 +- 14 files changed, 562 insertions(+), 163 deletions(-) create mode 100644 hook-common/src/webhook.rs create mode 100644 hook-producer/src/handlers/app.rs delete mode 100644 hook-producer/src/handlers/index.rs create mode 100644 hook-producer/src/handlers/webhook.rs diff --git a/Cargo.lock b/Cargo.lock index b24af98..73fd7cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -986,6 +986,7 @@ name = "hook-common" version = "0.1.0" dependencies = [ "chrono", + "http 0.2.11", "serde", "serde_derive", "sqlx", @@ -1019,11 +1020,19 @@ dependencies = [ "axum", "envconfig", "eyre", + "hook-common", + "http-body-util", "metrics", "metrics-exporter-prometheus", + "serde", + "serde_derive", + "serde_json", + "sqlx", "tokio", + "tower", "tracing", "tracing-subscriber", + "url", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e92db69..60a7219 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,18 +1,22 @@ [workspace] resolver = "2" -members = [ - "hook-common", - "hook-producer", - "hook-consumer", -] +members = ["hook-common", "hook-producer", "hook-consumer"] [workspace.dependencies] chrono = { version = "0.4" } serde = { version = "1.0" } serde_derive = { version = "1.0" } +serde_json = { version = "1.0" } thiserror = { version = "1.0" } -sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", "postgres", "uuid", "json", "chrono" ] } +sqlx = { version = "0.7", features = [ + "runtime-tokio", + "tls-native-tls", + "postgres", + "uuid", + "json", + "chrono", +] } tokio = { version = "1.34.0", features = ["full"] } eyre = "0.6.9" tracing = "0.1.40" @@ -20,3 +24,7 @@ tracing-subscriber = "0.3.18" envconfig = "0.10.0" metrics = "0.21.1" metrics-exporter-prometheus = "0.12.1" +http = { version = "0.2" } +url = { version = "2.5.0 " } +tower = "0.4.13" +http-body-util = "0.1.0" diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml index b55a9ec..24d1a0d 100644 --- a/hook-common/Cargo.toml +++ b/hook-common/Cargo.toml @@ -6,11 +6,12 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -chrono = { workspace = true} +chrono = { workspace = true } +http = { workspace = true } serde = { workspace = true } -serde_derive = { workspace = true} -thiserror = { workspace = true } +serde_derive = { workspace = true } sqlx = { workspace = true } +thiserror = { workspace = true } [dev-dependencies] tokio = { workspace = true } # We need a runtime for async tests diff --git a/hook-common/src/lib.rs b/hook-common/src/lib.rs index d1dadf3..3138f08 100644 --- a/hook-common/src/lib.rs +++ b/hook-common/src/lib.rs @@ -1 +1,2 @@ pub mod pgqueue; +pub mod webhook; diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 7ed4655..fb2211b 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -15,12 +15,16 @@ use thiserror::Error; /// Errors that can originate from sqlx and are wrapped by us to provide additional context. #[derive(Error, Debug)] pub enum PgQueueError { + #[error("pool creation failed with: {error}")] + PoolCreationError { error: sqlx::Error }, #[error("connection failed with: {error}")] ConnectionError { error: sqlx::Error }, #[error("{command} query failed with: {error}")] QueryError { command: String, error: sqlx::Error }, #[error("{0} is not a valid JobStatus")] ParseJobStatusError(String), + #[error("{0} is not a valid HttpMethod")] + ParseHttpMethodError(String), } #[derive(Error, Debug)] @@ -528,6 +532,7 @@ impl Default for RetryPolicy { } /// A queue implemented on top of a PostgreSQL table. +#[derive(Clone)] pub struct PgQueue { /// A name to identify this PgQueue as multiple may share a table. name: String, @@ -560,9 +565,8 @@ impl PgQueue { let name = queue_name.to_owned(); let table = table_name.to_owned(); let pool = PgPoolOptions::new() - .connect(url) - .await - .map_err(|error| PgQueueError::ConnectionError { error })?; + .connect_lazy(url) + .map_err(|error| PgQueueError::PoolCreationError { error })?; Ok(Self { name, diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs new file mode 100644 index 0000000..b17959c --- /dev/null +++ b/hook-common/src/webhook.rs @@ -0,0 +1,139 @@ +use std::collections; +use std::fmt; +use std::str::FromStr; + +use serde::{de::Visitor, Deserialize, Serialize}; + +use crate::pgqueue::PgQueueError; + +/// Supported HTTP methods for webhooks. +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum HttpMethod { + DELETE, + GET, + PATCH, + POST, + PUT, +} + +/// Allow casting `HttpMethod` from strings. +impl FromStr for HttpMethod { + type Err = PgQueueError; + + fn from_str(s: &str) -> Result { + match s.to_ascii_uppercase().as_ref() { + "DELETE" => Ok(HttpMethod::DELETE), + "GET" => Ok(HttpMethod::GET), + "PATCH" => Ok(HttpMethod::PATCH), + "POST" => Ok(HttpMethod::POST), + "PUT" => Ok(HttpMethod::PUT), + invalid => Err(PgQueueError::ParseHttpMethodError(invalid.to_owned())), + } + } +} + +/// Implement `std::fmt::Display` to convert HttpMethod to string. +impl fmt::Display for HttpMethod { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + HttpMethod::DELETE => write!(f, "DELETE"), + HttpMethod::GET => write!(f, "GET"), + HttpMethod::PATCH => write!(f, "PATCH"), + HttpMethod::POST => write!(f, "POST"), + HttpMethod::PUT => write!(f, "PUT"), + } + } +} + +struct HttpMethodVisitor; + +impl<'de> Visitor<'de> for HttpMethodVisitor { + type Value = HttpMethod; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "the string representation of HttpMethod") + } + + fn visit_str(self, s: &str) -> Result + where + E: serde::de::Error, + { + match HttpMethod::from_str(s) { + Ok(method) => Ok(method), + Err(_) => Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(s), + &self, + )), + } + } +} + +/// Deserialize required to read `HttpMethod` from database. +impl<'de> Deserialize<'de> for HttpMethod { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(HttpMethodVisitor) + } +} + +/// Serialize required to write `HttpMethod` to database. +impl Serialize for HttpMethod { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +/// Convenience to cast `HttpMethod` to `http::Method`. +/// Not all `http::Method` variants are valid `HttpMethod` variants, hence why we +/// can't just use the former or implement `From`. +impl From for http::Method { + fn from(val: HttpMethod) -> Self { + match val { + HttpMethod::DELETE => http::Method::DELETE, + HttpMethod::GET => http::Method::GET, + HttpMethod::PATCH => http::Method::PATCH, + HttpMethod::POST => http::Method::POST, + HttpMethod::PUT => http::Method::PUT, + } + } +} + +impl From<&HttpMethod> for http::Method { + fn from(val: &HttpMethod) -> Self { + match val { + HttpMethod::DELETE => http::Method::DELETE, + HttpMethod::GET => http::Method::GET, + HttpMethod::PATCH => http::Method::PATCH, + HttpMethod::POST => http::Method::POST, + HttpMethod::PUT => http::Method::PUT, + } + } +} + +/// `JobParameters` required for the `WebhookConsumer` to execute a webhook. +/// These parameters should match the exported Webhook interface that PostHog plugins. +/// implement. See: https://github.com/PostHog/plugin-scaffold/blob/main/src/types.ts#L15. +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub struct WebhookJobParameters { + pub body: String, + pub headers: collections::HashMap, + pub method: HttpMethod, + pub url: String, + + // These should be set if the Webhook is associated with a plugin `composeWebhook` invocation. + pub team_id: Option, + pub plugin_id: Option, + pub plugin_config_id: Option, + + #[serde(default = "default_max_attempts")] + pub max_attempts: i32, +} + +fn default_max_attempts() -> i32 { + 3 +} diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 8da2df6..406dd73 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -1,140 +1,16 @@ use std::collections; -use std::fmt; -use std::str::FromStr; use std::sync::Arc; use std::time; use async_std::task; use hook_common::pgqueue::{PgJobError, PgQueue, PgQueueError, PgTransactionJob}; +use hook_common::webhook::{HttpMethod, WebhookJobParameters}; use http::StatusCode; use reqwest::header; -use serde::{de::Visitor, Deserialize, Serialize}; use tokio::sync; use crate::error::WebhookConsumerError; -/// Supported HTTP methods for webhooks. -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum HttpMethod { - DELETE, - GET, - PATCH, - POST, - PUT, -} - -/// Allow casting `HttpMethod` from strings. -impl FromStr for HttpMethod { - type Err = WebhookConsumerError; - - fn from_str(s: &str) -> Result { - match s.to_ascii_uppercase().as_ref() { - "DELETE" => Ok(HttpMethod::DELETE), - "GET" => Ok(HttpMethod::GET), - "PATCH" => Ok(HttpMethod::PATCH), - "POST" => Ok(HttpMethod::POST), - "PUT" => Ok(HttpMethod::PUT), - invalid => Err(WebhookConsumerError::ParseHttpMethodError( - invalid.to_owned(), - )), - } - } -} - -/// Implement `std::fmt::Display` to convert HttpMethod to string. -impl fmt::Display for HttpMethod { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - HttpMethod::DELETE => write!(f, "DELETE"), - HttpMethod::GET => write!(f, "GET"), - HttpMethod::PATCH => write!(f, "PATCH"), - HttpMethod::POST => write!(f, "POST"), - HttpMethod::PUT => write!(f, "PUT"), - } - } -} - -struct HttpMethodVisitor; - -impl<'de> Visitor<'de> for HttpMethodVisitor { - type Value = HttpMethod; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "the string representation of HttpMethod") - } - - fn visit_str(self, s: &str) -> Result - where - E: serde::de::Error, - { - match HttpMethod::from_str(s) { - Ok(method) => Ok(method), - Err(_) => Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(s), - &self, - )), - } - } -} - -/// Deserialize required to read `HttpMethod` from database. -impl<'de> Deserialize<'de> for HttpMethod { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - deserializer.deserialize_str(HttpMethodVisitor) - } -} - -/// Serialize required to write `HttpMethod` to database. -impl Serialize for HttpMethod { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(&self.to_string()) - } -} - -/// Convenience to cast `HttpMethod` to `http::Method`. -/// Not all `http::Method` variants are valid `HttpMethod` variants, hence why we -/// can't just use the former or implement `From`. -impl From for http::Method { - fn from(val: HttpMethod) -> Self { - match val { - HttpMethod::DELETE => http::Method::DELETE, - HttpMethod::GET => http::Method::GET, - HttpMethod::PATCH => http::Method::PATCH, - HttpMethod::POST => http::Method::POST, - HttpMethod::PUT => http::Method::PUT, - } - } -} - -impl From<&HttpMethod> for http::Method { - fn from(val: &HttpMethod) -> Self { - match val { - HttpMethod::DELETE => http::Method::DELETE, - HttpMethod::GET => http::Method::GET, - HttpMethod::PATCH => http::Method::PATCH, - HttpMethod::POST => http::Method::POST, - HttpMethod::PUT => http::Method::PUT, - } - } -} - -/// `JobParameters` required for the `WebhookConsumer` to execute a webhook. -/// These parameters should match the exported Webhook interface that PostHog plugins. -/// implement. See: https://github.com/PostHog/plugin-scaffold/blob/main/src/types.ts#L15. -#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] -pub struct WebhookJobParameters { - body: String, - headers: collections::HashMap, - method: HttpMethod, - url: String, -} - /// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. pub struct WebhookConsumer<'p> { /// An identifier for this consumer. Used to mark jobs we have consumed. @@ -432,6 +308,12 @@ mod tests { headers: collections::HashMap::new(), method: HttpMethod::POST, url: "localhost".to_owned(), + + team_id: Some(1), + plugin_id: Some(2), + plugin_config_id: Some(3), + + max_attempts: 1, }; // enqueue takes ownership of the job enqueued to avoid bugs that can cause duplicate jobs. // Normally, a separate application would be enqueueing jobs for us to consume, so no ownership diff --git a/hook-producer/Cargo.toml b/hook-producer/Cargo.toml index 47ef532..ef1a24b 100644 --- a/hook-producer/Cargo.toml +++ b/hook-producer/Cargo.toml @@ -6,11 +6,19 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -axum = { version="0.7.1", features=["http2"] } -tokio = { workspace = true } -eyre = {workspace = true } -tracing = {workspace = true} -tracing-subscriber = {workspace = true} +axum = { version = "0.7.1", features = ["http2"] } envconfig = { workspace = true } +eyre = { workspace = true } +hook-common = { path = "../hook-common" } +http-body-util = { workspace = true } metrics = { workspace = true } metrics-exporter-prometheus = { workspace = true } +serde = { workspace = true } +serde_derive = { workspace = true } +serde_json = { workspace = true } +sqlx = { workspace = true } +tokio = { workspace = true } +tower = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +url = { workspace = true } diff --git a/hook-producer/src/config.rs b/hook-producer/src/config.rs index 9d093c6..87fad5d 100644 --- a/hook-producer/src/config.rs +++ b/hook-producer/src/config.rs @@ -7,6 +7,15 @@ pub struct Config { #[envconfig(from = "BIND_PORT", default = "8000")] pub port: u16, + + #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] + pub database_url: String, + + #[envconfig(default = "job_queue")] + pub table_name: String, + + #[envconfig(default = "default")] + pub queue_name: String, } impl Config { diff --git a/hook-producer/src/handlers/app.rs b/hook-producer/src/handlers/app.rs new file mode 100644 index 0000000..911a04d --- /dev/null +++ b/hook-producer/src/handlers/app.rs @@ -0,0 +1,60 @@ +use axum::{routing, Router}; +use metrics_exporter_prometheus::PrometheusHandle; + +use hook_common::pgqueue::PgQueue; + +use super::webhook; + +pub fn app(pg_pool: PgQueue, metrics: Option) -> Router { + Router::new() + .route("/", routing::get(index)) + .route( + "/metrics", + routing::get(move || match metrics { + Some(ref recorder_handle) => std::future::ready(recorder_handle.render()), + None => std::future::ready("no metrics recorder installed".to_owned()), + }), + ) + .route("/webhook", routing::post(webhook::post).with_state(pg_pool)) + .layer(axum::middleware::from_fn(crate::metrics::track_metrics)) +} + +pub async fn index() -> &'static str { + "rusty hook" +} + +#[cfg(test)] +mod tests { + use super::*; + use axum::{ + body::Body, + http::{Request, StatusCode}, + }; + use hook_common::pgqueue::{PgQueue, RetryPolicy}; + use http_body_util::BodyExt; // for `collect` + use tower::ServiceExt; // for `call`, `oneshot`, and `ready` + + #[tokio::test] + async fn index() { + let pg_queue = PgQueue::new( + "test_index", + "job_queue", + "postgres://posthog:posthog@localhost:15432/test_database", + RetryPolicy::default(), + ) + .await + .expect("failed to construct pg_queue"); + + let app = app(pg_queue, None); + + let response = app + .oneshot(Request::builder().uri("/").body(Body::empty()).unwrap()) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + + let body = response.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(&body[..], b"rusty hook"); + } +} diff --git a/hook-producer/src/handlers/index.rs b/hook-producer/src/handlers/index.rs deleted file mode 100644 index 56896fa..0000000 --- a/hook-producer/src/handlers/index.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub async fn get() -> &'static str { - "rusty hook" -} diff --git a/hook-producer/src/handlers/mod.rs b/hook-producer/src/handlers/mod.rs index 2504073..88f9671 100644 --- a/hook-producer/src/handlers/mod.rs +++ b/hook-producer/src/handlers/mod.rs @@ -1,15 +1,4 @@ -use axum::{routing, Router}; +mod app; +mod webhook; -mod index; - -pub fn router() -> Router { - let recorder_handle = crate::metrics::setup_metrics_recorder(); - - Router::new() - .route("/", routing::get(index::get)) - .route( - "/metrics", - routing::get(move || std::future::ready(recorder_handle.render())), - ) - .layer(axum::middleware::from_fn(crate::metrics::track_metrics)) -} +pub use app::app; diff --git a/hook-producer/src/handlers/webhook.rs b/hook-producer/src/handlers/webhook.rs new file mode 100644 index 0000000..7de1126 --- /dev/null +++ b/hook-producer/src/handlers/webhook.rs @@ -0,0 +1,278 @@ +use axum::{extract::State, http::StatusCode, Json}; +use hook_common::webhook::WebhookJobParameters; +use serde_derive::Deserialize; +use url::Url; + +use hook_common::pgqueue::{NewJob, PgQueue}; +use serde::Serialize; +use tracing::{debug, error}; + +const MAX_BODY_SIZE: usize = 1_000_000; + +#[derive(Serialize, Deserialize)] +pub struct WebhookPostResponse { + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, +} + +pub async fn post( + State(pg_queue): State, + Json(payload): Json, +) -> Result, (StatusCode, Json)> { + debug!("received payload: {:?}", payload); + + if payload.body.len() > MAX_BODY_SIZE { + return Err(( + StatusCode::BAD_REQUEST, + Json(WebhookPostResponse { + error: Some("body too large".to_owned()), + }), + )); + } + + let url_hostname = get_hostname(&payload.url)?; + let job = NewJob::new(payload.max_attempts, payload, url_hostname.as_str()); + + pg_queue.enqueue(job).await.map_err(internal_error)?; + + Ok(Json(WebhookPostResponse { error: None })) +} + +fn internal_error(err: E) -> (StatusCode, Json) +where + E: std::error::Error, +{ + error!("internal error: {}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(WebhookPostResponse { + error: Some(err.to_string()), + }), + ) +} + +fn get_hostname(url_str: &str) -> Result)> { + let url = Url::parse(url_str).map_err(|_| { + ( + StatusCode::BAD_REQUEST, + Json(WebhookPostResponse { + error: Some("could not parse url".to_owned()), + }), + ) + })?; + + match url.host_str() { + Some(hostname) => Ok(hostname.to_owned()), + None => Err(( + StatusCode::BAD_REQUEST, + Json(WebhookPostResponse { + error: Some("couldn't extract hostname from url".to_owned()), + }), + )), + } +} + +#[cfg(test)] +mod tests { + use axum::{ + body::Body, + http::{self, Request, StatusCode}, + }; + use hook_common::pgqueue::{PgQueue, RetryPolicy}; + use hook_common::webhook::{HttpMethod, WebhookJobParameters}; + use http_body_util::BodyExt; // for `collect` + use std::collections; + use tower::ServiceExt; // for `call`, `oneshot`, and `ready` + + use crate::handlers::app; + + #[tokio::test] + async fn webhook_success() { + let pg_queue = PgQueue::new( + "test_index", + "job_queue", + "postgres://posthog:posthog@localhost:15432/test_database", + RetryPolicy::default(), + ) + .await + .expect("failed to construct pg_queue"); + + let app = app(pg_queue, None); + + let mut headers = collections::HashMap::new(); + headers.insert("Content-Type".to_owned(), "application/json".to_owned()); + let response = app + .oneshot( + Request::builder() + .method(http::Method::POST) + .uri("/webhook") + .header(http::header::CONTENT_TYPE, "application/json") + .body(Body::from( + serde_json::to_string(&WebhookJobParameters { + headers, + method: HttpMethod::POST, + url: "http://example.com/".to_owned(), + body: r#"{"a": "b"}"#.to_owned(), + + team_id: Some(1), + plugin_id: Some(2), + plugin_config_id: Some(3), + + max_attempts: 1, + }) + .unwrap(), + )) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + + let body = response.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(&body[..], b"{}"); + } + + #[tokio::test] + async fn webhook_bad_url() { + let pg_queue = PgQueue::new( + "test_index", + "job_queue", + "postgres://posthog:posthog@localhost:15432/test_database", + RetryPolicy::default(), + ) + .await + .expect("failed to construct pg_queue"); + + let app = app(pg_queue, None); + + let response = app + .oneshot( + Request::builder() + .method(http::Method::POST) + .uri("/webhook") + .header(http::header::CONTENT_TYPE, "application/json") + .body(Body::from( + serde_json::to_string(&WebhookJobParameters { + headers: collections::HashMap::new(), + method: HttpMethod::POST, + url: "invalid".to_owned(), + body: r#"{"a": "b"}"#.to_owned(), + + team_id: Some(1), + plugin_id: Some(2), + plugin_config_id: Some(3), + + max_attempts: 1, + }) + .unwrap(), + )) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + } + + #[tokio::test] + async fn webhook_payload_missing_fields() { + let pg_queue = PgQueue::new( + "test_index", + "job_queue", + "postgres://posthog:posthog@localhost:15432/test_database", + RetryPolicy::default(), + ) + .await + .expect("failed to construct pg_queue"); + + let app = app(pg_queue, None); + + let response = app + .oneshot( + Request::builder() + .method(http::Method::POST) + .uri("/webhook") + .header(http::header::CONTENT_TYPE, "application/json") + .body("{}".to_owned()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::UNPROCESSABLE_ENTITY); + } + + #[tokio::test] + async fn webhook_payload_not_json() { + let pg_queue = PgQueue::new( + "test_index", + "job_queue", + "postgres://posthog:posthog@localhost:15432/test_database", + RetryPolicy::default(), + ) + .await + .expect("failed to construct pg_queue"); + + let app = app(pg_queue, None); + + let response = app + .oneshot( + Request::builder() + .method(http::Method::POST) + .uri("/webhook") + .header(http::header::CONTENT_TYPE, "application/json") + .body("x".to_owned()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + } + + #[tokio::test] + async fn webhook_payload_body_too_large() { + let pg_queue = PgQueue::new( + "test_index", + "job_queue", + "postgres://posthog:posthog@localhost:15432/test_database", + RetryPolicy::default(), + ) + .await + .expect("failed to construct pg_queue"); + + let app = app(pg_queue, None); + + let bytes: Vec = vec![b'a'; 1_000_000 * 2]; + let long_string = String::from_utf8_lossy(&bytes); + + let response = app + .oneshot( + Request::builder() + .method(http::Method::POST) + .uri("/webhook") + .header(http::header::CONTENT_TYPE, "application/json") + .body(Body::from( + serde_json::to_string(&WebhookJobParameters { + headers: collections::HashMap::new(), + method: HttpMethod::POST, + url: "http://example.com".to_owned(), + body: long_string.to_string(), + + team_id: Some(1), + plugin_id: Some(2), + plugin_config_id: Some(3), + + max_attempts: 1, + }) + .unwrap(), + )) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + } +} diff --git a/hook-producer/src/main.rs b/hook-producer/src/main.rs index 118829b..29da8dd 100644 --- a/hook-producer/src/main.rs +++ b/hook-producer/src/main.rs @@ -1,10 +1,10 @@ use axum::Router; - use config::Config; use envconfig::Envconfig; - use eyre::Result; +use hook_common::pgqueue::{PgQueue, RetryPolicy}; + mod config; mod handlers; mod metrics; @@ -21,10 +21,24 @@ async fn listen(app: Router, bind: String) -> Result<()> { async fn main() { tracing_subscriber::fmt::init(); - let app = handlers::router(); - let config = Config::init_from_env().expect("failed to load configuration from env"); + let pg_queue = PgQueue::new( + // TODO: Coupling the queue name to the PgQueue object doesn't seem ideal from the producer + // side, but we don't need more than one queue for now. + &config.queue_name, + &config.table_name, + &config.database_url, + // TODO: It seems unnecessary that the producer side needs to know about the retry policy. + RetryPolicy::default(), + ) + .await + .expect("failed to initialize queue"); + + let recorder_handle = crate::metrics::setup_metrics_recorder(); + + let app = handlers::app(pg_queue, Some(recorder_handle)); + match listen(app, config.bind()).await { Ok(_) => {} Err(e) => tracing::error!("failed to start hook-producer http server, {}", e), From 74c52079098463680207a05ed43e8650d0779cd5 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 12 Dec 2023 11:44:21 -0700 Subject: [PATCH 044/130] Add indexes, drop redundant column --- migrations/20231129172339_job_queue_table.sql | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/migrations/20231129172339_job_queue_table.sql b/migrations/20231129172339_job_queue_table.sql index 4631f0b..8627556 100644 --- a/migrations/20231129172339_job_queue_table.sql +++ b/migrations/20231129172339_job_queue_table.sql @@ -10,7 +10,6 @@ CREATE TABLE job_queue( attempt INT NOT NULL DEFAULT 0, attempted_at TIMESTAMPTZ DEFAULT NULL, attempted_by TEXT[] DEFAULT ARRAY[]::TEXT[], - completed_at TIMESTAMPTZ DEFAULT NULL, created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), errors jsonb[], max_attempts INT NOT NULL DEFAULT 1, @@ -21,3 +20,9 @@ CREATE TABLE job_queue( status job_status NOT NULL DEFAULT 'available'::job_status, target TEXT NOT NULL ); + +-- Needed for `dequeue` queries +CREATE INDEX idx_queue_scheduled_at ON job_queue(queue, status, scheduled_at); + +-- Needed for UPDATE-ing incomplete jobs with a specific target (i.e. slow destinations) +CREATE INDEX idx_queue_target ON job_queue(queue, status, target); \ No newline at end of file From c00bc04db16994971934a11686cc43521999b6e5 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 14 Dec 2023 17:24:43 -0700 Subject: [PATCH 045/130] Append final errors to error array and treat request errors as retryable --- hook-common/src/pgqueue.rs | 4 ++++ hook-consumer/src/consumer.rs | 9 +++++++-- hook-consumer/src/error.rs | 2 -- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index fb2211b..47938ec 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -248,6 +248,7 @@ UPDATE SET finished_at = NOW(), status = 'failed'::job_status + errors = array_append("{0}".errors, $3) WHERE "{0}".id = $2 AND queue = $1 @@ -261,6 +262,7 @@ RETURNING sqlx::query(&base_query) .bind(&failed_job.queue) .bind(failed_job.id) + .bind(&failed_job.error) .execute(&mut *self.connection) .await .map_err(|error| PgJobError::QueryError { @@ -394,6 +396,7 @@ UPDATE SET finished_at = NOW(), status = 'failed'::job_status + errors = array_append("{0}".errors, $3) WHERE "{0}".id = $2 AND queue = $1 @@ -406,6 +409,7 @@ RETURNING sqlx::query(&base_query) .bind(&failed_job.queue) .bind(failed_job.id) + .bind(&failed_job.error) .execute(&mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 406dd73..dd8ab42 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -42,7 +42,8 @@ impl<'p> WebhookConsumer<'p> { let client = reqwest::Client::builder() .default_headers(headers) .timeout(request_timeout) - .build()?; + .build() + .expect("failed to construct reqwest client for webhook consumer"); Ok(Self { name: name.to_owned(), @@ -174,7 +175,11 @@ async fn send_webhook( .headers(headers) .body(body) .send() - .await?; + .await + .map_err(|e| WebhookConsumerError::RetryableWebhookError { + reason: e.to_string(), + retry_after: None, + })?; let status = response.status(); diff --git a/hook-consumer/src/error.rs b/hook-consumer/src/error.rs index 34f0619..a196643 100644 --- a/hook-consumer/src/error.rs +++ b/hook-consumer/src/error.rs @@ -18,8 +18,6 @@ pub enum WebhookConsumerError { QueueError(#[from] pgqueue::PgQueueError), #[error("an error occurred in the underlying job")] PgJobError(String), - #[error("an error occurred when attempting to send a request")] - RequestError(#[from] reqwest::Error), #[error("a webhook could not be delivered but it could be retried later: {reason}")] RetryableWebhookError { reason: String, From 9084c6e5c519013c2a4ee65f8cd5dd2b2fc0d844 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Fri, 15 Dec 2023 05:35:26 -0700 Subject: [PATCH 046/130] Remove Result from WebhookConsumer::new --- hook-consumer/src/consumer.rs | 10 +++++----- hook-consumer/src/main.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index dd8ab42..59b8e9f 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -32,7 +32,7 @@ impl<'p> WebhookConsumer<'p> { poll_interval: time::Duration, request_timeout: time::Duration, max_concurrent_jobs: usize, - ) -> Result { + ) -> Self { let mut headers = header::HeaderMap::new(); headers.insert( header::CONTENT_TYPE, @@ -45,13 +45,13 @@ impl<'p> WebhookConsumer<'p> { .build() .expect("failed to construct reqwest client for webhook consumer"); - Ok(Self { + Self { name: name.to_owned(), queue, poll_interval, client, max_concurrent_jobs, - }) + } } /// Wait until a job becomes available in our queue. @@ -333,8 +333,8 @@ mod tests { time::Duration::from_millis(100), time::Duration::from_millis(5000), 10, - ) - .expect("consumer failed to initialize"); + ); + let consumed_job = consumer .wait_for_job() .await diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index f165b32..bf76503 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -29,7 +29,7 @@ async fn main() -> Result<(), WebhookConsumerError> { config.poll_interval.0, config.request_timeout.0, config.max_concurrent_jobs, - )?; + ); let _ = consumer.run().await; From 71bf5531cefa8e71a64dad3e4e887072ea9cd83b Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 13 Dec 2023 12:03:43 -0700 Subject: [PATCH 047/130] Add hook-janitor skeleton --- Cargo.lock | 139 ++++++++++++++++++ Cargo.toml | 23 +-- hook-common/Cargo.toml | 3 + hook-common/src/lib.rs | 1 + {hook-producer => hook-common}/src/metrics.rs | 0 hook-janitor/Cargo.toml | 28 ++++ hook-janitor/src/cleanup.rs | 36 +++++ hook-janitor/src/config.rs | 66 +++++++++ hook-janitor/src/handlers/app.rs | 21 +++ hook-janitor/src/handlers/mod.rs | 3 + hook-janitor/src/kafka_producer.rs | 48 ++++++ hook-janitor/src/main.rs | 86 +++++++++++ hook-janitor/src/webhooks.rs | 54 +++++++ hook-producer/Cargo.toml | 2 +- hook-producer/src/handlers/app.rs | 7 +- hook-producer/src/main.rs | 4 +- 16 files changed, 506 insertions(+), 15 deletions(-) rename {hook-producer => hook-common}/src/metrics.rs (100%) create mode 100644 hook-janitor/Cargo.toml create mode 100644 hook-janitor/src/cleanup.rs create mode 100644 hook-janitor/src/config.rs create mode 100644 hook-janitor/src/handlers/app.rs create mode 100644 hook-janitor/src/handlers/mod.rs create mode 100644 hook-janitor/src/kafka_producer.rs create mode 100644 hook-janitor/src/main.rs create mode 100644 hook-janitor/src/webhooks.rs diff --git a/Cargo.lock b/Cargo.lock index 73fd7cb..c86f175 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -404,6 +404,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "cmake" +version = "0.1.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +dependencies = [ + "cc", +] + [[package]] name = "concurrent-queue" version = "2.4.0" @@ -985,8 +994,11 @@ dependencies = [ name = "hook-common" version = "0.1.0" dependencies = [ + "axum", "chrono", "http 0.2.11", + "metrics", + "metrics-exporter-prometheus", "serde", "serde_derive", "sqlx", @@ -1013,6 +1025,32 @@ dependencies = [ "url", ] +[[package]] +name = "hook-janitor" +version = "0.1.0" +dependencies = [ + "async-trait", + "axum", + "envconfig", + "eyre", + "futures", + "hook-common", + "http-body-util", + "metrics", + "metrics-exporter-prometheus", + "rdkafka", + "serde", + "serde_derive", + "serde_json", + "sqlx", + "thiserror", + "tokio", + "tower", + "tracing", + "tracing-subscriber", + "url", +] + [[package]] name = "hook-producer" version = "0.1.0" @@ -1329,6 +1367,18 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "libz-sys" +version = "1.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -1594,6 +1644,27 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "object" version = "0.32.1" @@ -1821,6 +1892,16 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit", +] + [[package]] name = "proc-macro2" version = "1.0.70" @@ -1894,6 +1975,38 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "rdkafka" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f16c17f411935214a5870e40aff9291f8b40a73e97bf8de29e5959c473d5ef33" +dependencies = [ + "futures-channel", + "futures-util", + "libc", + "log", + "rdkafka-sys", + "serde", + "serde_derive", + "serde_json", + "slab", + "tokio", +] + +[[package]] +name = "rdkafka-sys" +version = "4.7.0+2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55e0d2f9ba6253f6ec72385e453294f8618e9e15c2c6aba2a5c01ccf9622d615" +dependencies = [ + "cmake", + "libc", + "libz-sys", + "num_enum", + "openssl-sys", + "pkg-config", +] + [[package]] name = "redox_syscall" version = "0.4.1" @@ -2619,6 +2732,23 @@ dependencies = [ "tracing", ] +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" + +[[package]] +name = "toml_edit" +version = "0.19.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + [[package]] name = "tower" version = "0.4.13" @@ -3063,6 +3193,15 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +[[package]] +name = "winnow" +version = "0.5.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c830786f7720c2fd27a1a0e27a709dbd3c4d009b56d098fc742d4f4eab91fe2" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" diff --git a/Cargo.toml b/Cargo.toml index 60a7219..535c242 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,14 +1,24 @@ [workspace] resolver = "2" -members = ["hook-common", "hook-producer", "hook-consumer"] +members = ["hook-common", "hook-producer", "hook-consumer", "hook-janitor"] [workspace.dependencies] +async-trait = "0.1.74" +axum = { version = "0.7.1", features = ["http2"] } chrono = { version = "0.4" } +envconfig = "0.10.0" +eyre = "0.6.9" +futures = { version = "0.3.29" } +http = { version = "0.2" } +http-body-util = "0.1.0" +metrics = "0.21.1" +metrics-exporter-prometheus = "0.12.1" +rdkafka = { version = "0.35.0", features = ["cmake-build", "ssl"] } +regex = "1.10.2" serde = { version = "1.0" } serde_derive = { version = "1.0" } serde_json = { version = "1.0" } -thiserror = { version = "1.0" } sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-native-tls", @@ -17,14 +27,9 @@ sqlx = { version = "0.7", features = [ "json", "chrono", ] } +thiserror = { version = "1.0" } tokio = { version = "1.34.0", features = ["full"] } -eyre = "0.6.9" +tower = "0.4.13" tracing = "0.1.40" tracing-subscriber = "0.3.18" -envconfig = "0.10.0" -metrics = "0.21.1" -metrics-exporter-prometheus = "0.12.1" -http = { version = "0.2" } url = { version = "2.5.0 " } -tower = "0.4.13" -http-body-util = "0.1.0" diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml index 24d1a0d..213d2e9 100644 --- a/hook-common/Cargo.toml +++ b/hook-common/Cargo.toml @@ -6,8 +6,11 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +axum = { workspace = true, features = ["http2"] } chrono = { workspace = true } http = { workspace = true } +metrics = { workspace = true } +metrics-exporter-prometheus = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } sqlx = { workspace = true } diff --git a/hook-common/src/lib.rs b/hook-common/src/lib.rs index 3138f08..3b154c8 100644 --- a/hook-common/src/lib.rs +++ b/hook-common/src/lib.rs @@ -1,2 +1,3 @@ +pub mod metrics; pub mod pgqueue; pub mod webhook; diff --git a/hook-producer/src/metrics.rs b/hook-common/src/metrics.rs similarity index 100% rename from hook-producer/src/metrics.rs rename to hook-common/src/metrics.rs diff --git a/hook-janitor/Cargo.toml b/hook-janitor/Cargo.toml new file mode 100644 index 0000000..f23626b --- /dev/null +++ b/hook-janitor/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "hook-janitor" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = { workspace = true } +axum = { workspace = true } +envconfig = { workspace = true } +eyre = { workspace = true } +futures = { workspace = true } +hook-common = { path = "../hook-common" } +http-body-util = { workspace = true } +metrics = { workspace = true } +metrics-exporter-prometheus = { workspace = true } +rdkafka = { workspace = true } +serde = { workspace = true } +serde_derive = { workspace = true } +serde_json = { workspace = true } +sqlx = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } +tower = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +url = { workspace = true } diff --git a/hook-janitor/src/cleanup.rs b/hook-janitor/src/cleanup.rs new file mode 100644 index 0000000..e6e91e0 --- /dev/null +++ b/hook-janitor/src/cleanup.rs @@ -0,0 +1,36 @@ +use async_trait::async_trait; +use std::result::Result; +use std::str::FromStr; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum CleanerError { + #[error("pool creation failed with: {error}")] + PoolCreationError { error: sqlx::Error }, + #[error("invalid cleaner mode")] + InvalidCleanerMode, +} + +// Mode names, used by config/environment parsing to verify the mode is supported. +#[derive(Debug)] +pub enum CleanerModeName { + Webhooks, +} + +impl FromStr for CleanerModeName { + type Err = CleanerError; + + fn from_str(s: &str) -> Result { + match s { + "webhooks" => Ok(CleanerModeName::Webhooks), + _ => Err(CleanerError::InvalidCleanerMode), + } + } +} + +// Right now, all this trait does is allow us to call `cleanup` in a loop in `main.rs`. There may +// be other benefits as we build this out, or we could remove it if it doesn't end up being useful. +#[async_trait] +pub trait Cleaner { + async fn cleanup(&self); +} diff --git a/hook-janitor/src/config.rs b/hook-janitor/src/config.rs new file mode 100644 index 0000000..89621a2 --- /dev/null +++ b/hook-janitor/src/config.rs @@ -0,0 +1,66 @@ +use envconfig::Envconfig; + +#[derive(Envconfig)] +pub struct Config { + #[envconfig(from = "BIND_HOST", default = "0.0.0.0")] + pub host: String, + + #[envconfig(from = "BIND_PORT", default = "8000")] + pub port: u16, + + #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] + pub database_url: String, + + #[envconfig(default = "job_queue")] + pub table_name: String, + + #[envconfig(default = "default")] + pub queue_name: String, + + #[envconfig(default = "30")] + pub cleanup_interval_secs: u64, + + #[envconfig(default = "10000")] + pub cleanup_batch_size: u32, + + // The cleanup task needs to have special knowledge of the queue it's cleaning up. This is so it + // can do things like flush the proper app_metrics or plugin_log_entries, and so it knows what + // to expect in the job's payload JSONB column. + #[envconfig(default = "webhooks")] + pub mode: String, + + #[envconfig(nested = true)] + pub kafka: KafkaConfig, +} + +#[derive(Envconfig, Clone)] +pub struct KafkaConfig { + #[envconfig(default = "20")] + pub kafka_producer_linger_ms: u32, // Maximum time between producer batches during low traffic + + #[envconfig(default = "400")] + pub kafka_producer_queue_mib: u32, // Size of the in-memory producer queue in mebibytes + + #[envconfig(default = "20000")] + pub kafka_message_timeout_ms: u32, // Time before we stop retrying producing a message: 20 seconds + + #[envconfig(default = "none")] + pub kafka_compression_codec: String, // none, gzip, snappy, lz4, zstd + + #[envconfig(default = "false")] + pub kafka_tls: bool, + + #[envconfig(default = "app_metrics")] + pub app_metrics_topic: String, + + #[envconfig(default = "plugin_log_entries")] + pub plugin_log_entries_topic: String, + + pub kafka_hosts: String, +} + +impl Config { + pub fn bind(&self) -> String { + format!("{}:{}", self.host, self.port) + } +} diff --git a/hook-janitor/src/handlers/app.rs b/hook-janitor/src/handlers/app.rs new file mode 100644 index 0000000..279fa0e --- /dev/null +++ b/hook-janitor/src/handlers/app.rs @@ -0,0 +1,21 @@ +use axum::{routing, Router}; +use metrics_exporter_prometheus::PrometheusHandle; + +use hook_common::metrics; + +pub fn app(metrics: Option) -> Router { + Router::new() + .route("/", routing::get(index)) + .route( + "/metrics", + routing::get(move || match metrics { + Some(ref recorder_handle) => std::future::ready(recorder_handle.render()), + None => std::future::ready("no metrics recorder installed".to_owned()), + }), + ) + .layer(axum::middleware::from_fn(metrics::track_metrics)) +} + +pub async fn index() -> &'static str { + "rusty-hook janitor" +} diff --git a/hook-janitor/src/handlers/mod.rs b/hook-janitor/src/handlers/mod.rs new file mode 100644 index 0000000..a884c04 --- /dev/null +++ b/hook-janitor/src/handlers/mod.rs @@ -0,0 +1,3 @@ +mod app; + +pub use app::app; diff --git a/hook-janitor/src/kafka_producer.rs b/hook-janitor/src/kafka_producer.rs new file mode 100644 index 0000000..4e905b3 --- /dev/null +++ b/hook-janitor/src/kafka_producer.rs @@ -0,0 +1,48 @@ +use crate::config::KafkaConfig; + +use rdkafka::error::{KafkaError, RDKafkaErrorCode}; +use rdkafka::producer::{DeliveryFuture, FutureProducer, FutureRecord, Producer}; +use rdkafka::util::Timeout; +use rdkafka::ClientConfig; +use std::{str::FromStr, time::Duration}; +use tokio::sync::Semaphore; +use tracing::debug; + +// TODO: Take stats recording pieces that we want from `capture-rs`. +pub struct KafkaContext {} + +impl rdkafka::ClientContext for KafkaContext {} + +pub async fn create_kafka_producer( + config: &KafkaConfig, +) -> Result, KafkaError> { + let mut client_config = ClientConfig::new(); + client_config + .set("bootstrap.servers", &config.kafka_hosts) + .set("statistics.interval.ms", "10000") + .set("linger.ms", config.kafka_producer_linger_ms.to_string()) + .set( + "message.timeout.ms", + config.kafka_message_timeout_ms.to_string(), + ) + .set( + "compression.codec", + config.kafka_compression_codec.to_owned(), + ) + .set( + "queue.buffering.max.kbytes", + (config.kafka_producer_queue_mib * 1024).to_string(), + ); + + if config.kafka_tls { + client_config + .set("security.protocol", "ssl") + .set("enable.ssl.certificate.verification", "false"); + }; + + debug!("rdkafka configuration: {:?}", client_config); + let producer: FutureProducer = + client_config.create_with_context(KafkaContext {})?; + + Ok(producer) +} diff --git a/hook-janitor/src/main.rs b/hook-janitor/src/main.rs new file mode 100644 index 0000000..b487fda --- /dev/null +++ b/hook-janitor/src/main.rs @@ -0,0 +1,86 @@ +use axum::Router; +use cleanup::{Cleaner, CleanerModeName}; +use config::Config; +use envconfig::Envconfig; +use eyre::Result; +use futures::future::{select, Either}; +use kafka_producer::create_kafka_producer; +use std::{str::FromStr, time::Duration}; +use tokio::sync::Semaphore; +use webhooks::WebhookCleaner; + +use hook_common::metrics; + +mod cleanup; +mod config; +mod handlers; +mod kafka_producer; +mod webhooks; + +async fn listen(app: Router, bind: String) -> Result<()> { + let listener = tokio::net::TcpListener::bind(bind).await?; + + axum::serve(listener, app).await?; + + Ok(()) +} + +async fn cleanup_loop(cleaner: Box, interval_secs: u64) -> Result<()> { + let semaphore = Semaphore::new(1); + let mut interval = tokio::time::interval(Duration::from_secs(interval_secs)); + + loop { + let _permit = semaphore.acquire().await; + interval.tick().await; + cleaner.cleanup().await; + drop(_permit); + } +} + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::init(); + + let config = Config::init_from_env().expect("failed to load configuration from env"); + + let mode_name = CleanerModeName::from_str(&config.mode) + .unwrap_or_else(|_| panic!("invalid cleaner mode: {}", config.mode)); + + let cleaner = match mode_name { + CleanerModeName::Webhooks => { + let kafka_producer = create_kafka_producer(&config.kafka) + .await + .expect("failed to create kafka producer"); + + Box::new( + WebhookCleaner::new( + &config.queue_name, + &config.table_name, + &config.database_url, + config.cleanup_batch_size, + kafka_producer, + config.kafka.app_metrics_topic.to_owned(), + config.kafka.plugin_log_entries_topic.to_owned(), + ) + .expect("unable to create webhook cleaner"), + ) + } + }; + + let cleanup_loop = Box::pin(cleanup_loop(cleaner, config.cleanup_interval_secs)); + + let recorder_handle = metrics::setup_metrics_recorder(); + let app = handlers::app(Some(recorder_handle)); + let http_server = Box::pin(listen(app, config.bind())); + + match select(http_server, cleanup_loop).await { + Either::Left((listen_result, _)) => match listen_result { + Ok(_) => {} + Err(e) => tracing::error!("failed to start hook-janitor http server, {}", e), + }, + Either::Right((cleanup_result, _)) => match cleanup_result { + Ok(_) => {} + Err(e) => tracing::error!("hook-janitor cleanup task exited, {}", e), + }, + }; +} diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs new file mode 100644 index 0000000..a6cd9ff --- /dev/null +++ b/hook-janitor/src/webhooks.rs @@ -0,0 +1,54 @@ +use async_trait::async_trait; + +use rdkafka::producer::FutureProducer; +use sqlx::postgres::{PgPool, PgPoolOptions}; + +use crate::cleanup::{Cleaner, CleanerError}; +use crate::kafka_producer::KafkaContext; + +pub struct WebhookCleaner { + queue_name: String, + table_name: String, + pg_pool: PgPool, + batch_size: u32, + kafka_producer: FutureProducer, + app_metrics_topic: String, + plugin_log_entries_topic: String, +} + +impl WebhookCleaner { + pub fn new( + queue_name: &str, + table_name: &str, + database_url: &str, + batch_size: u32, + kafka_producer: FutureProducer, + app_metrics_topic: String, + plugin_log_entries_topic: String, + ) -> Result { + let queue_name = queue_name.to_owned(); + let table_name = table_name.to_owned(); + let pg_pool = PgPoolOptions::new() + .connect_lazy(database_url) + .map_err(|error| CleanerError::PoolCreationError { error })?; + + Ok(Self { + queue_name, + table_name, + pg_pool, + batch_size, + kafka_producer, + app_metrics_topic, + plugin_log_entries_topic, + }) + } +} + +#[async_trait] +impl Cleaner for WebhookCleaner { + async fn cleanup(&self) { + // TODO: collect stats on completed/failed rows + // TODO: push metrics about those rows into `app_metrics` + // TODO: delete those completed/failed rows + } +} diff --git a/hook-producer/Cargo.toml b/hook-producer/Cargo.toml index ef1a24b..f4b1165 100644 --- a/hook-producer/Cargo.toml +++ b/hook-producer/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -axum = { version = "0.7.1", features = ["http2"] } +axum = { workspace = true } envconfig = { workspace = true } eyre = { workspace = true } hook-common = { path = "../hook-common" } diff --git a/hook-producer/src/handlers/app.rs b/hook-producer/src/handlers/app.rs index 911a04d..1666676 100644 --- a/hook-producer/src/handlers/app.rs +++ b/hook-producer/src/handlers/app.rs @@ -1,6 +1,7 @@ use axum::{routing, Router}; use metrics_exporter_prometheus::PrometheusHandle; +use hook_common::metrics; use hook_common::pgqueue::PgQueue; use super::webhook; @@ -16,11 +17,11 @@ pub fn app(pg_pool: PgQueue, metrics: Option) -> Router { }), ) .route("/webhook", routing::post(webhook::post).with_state(pg_pool)) - .layer(axum::middleware::from_fn(crate::metrics::track_metrics)) + .layer(axum::middleware::from_fn(metrics::track_metrics)) } pub async fn index() -> &'static str { - "rusty hook" + "rusty-hook producer" } #[cfg(test)] @@ -55,6 +56,6 @@ mod tests { assert_eq!(response.status(), StatusCode::OK); let body = response.into_body().collect().await.unwrap().to_bytes(); - assert_eq!(&body[..], b"rusty hook"); + assert_eq!(&body[..], b"rusty-hook producer"); } } diff --git a/hook-producer/src/main.rs b/hook-producer/src/main.rs index 29da8dd..7c2b73c 100644 --- a/hook-producer/src/main.rs +++ b/hook-producer/src/main.rs @@ -3,11 +3,11 @@ use config::Config; use envconfig::Envconfig; use eyre::Result; +use hook_common::metrics; use hook_common::pgqueue::{PgQueue, RetryPolicy}; mod config; mod handlers; -mod metrics; async fn listen(app: Router, bind: String) -> Result<()> { let listener = tokio::net::TcpListener::bind(bind).await?; @@ -35,7 +35,7 @@ async fn main() { .await .expect("failed to initialize queue"); - let recorder_handle = crate::metrics::setup_metrics_recorder(); + let recorder_handle = metrics::setup_metrics_recorder(); let app = handlers::app(pg_queue, Some(recorder_handle)); From d2d929d95f45c2e4f0649c70a0dcf95655bcc29c Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 14 Dec 2023 13:13:30 -0700 Subject: [PATCH 048/130] Add Kafka message types for app_metrics and plugin_log_entries --- Cargo.lock | 51 +++++++ Cargo.toml | 1 + hook-common/Cargo.toml | 3 + hook-common/src/kafka_messages/app_metrics.rs | 123 +++++++++++++++++ hook-common/src/kafka_messages/mod.rs | 20 +++ hook-common/src/kafka_messages/plugin_logs.rs | 130 ++++++++++++++++++ hook-common/src/lib.rs | 1 + 7 files changed, 329 insertions(+) create mode 100644 hook-common/src/kafka_messages/app_metrics.rs create mode 100644 hook-common/src/kafka_messages/mod.rs create mode 100644 hook-common/src/kafka_messages/plugin_logs.rs diff --git a/Cargo.lock b/Cargo.lock index c86f175..5b0aedc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,6 +30,15 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "aho-corasick" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +dependencies = [ + "memchr", +] + [[package]] name = "allocator-api2" version = "0.2.16" @@ -215,6 +224,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" + [[package]] name = "atomic-waker" version = "1.1.2" @@ -999,11 +1014,14 @@ dependencies = [ "http 0.2.11", "metrics", "metrics-exporter-prometheus", + "regex", "serde", "serde_derive", + "serde_json", "sqlx", "thiserror", "tokio", + "uuid", ] [[package]] @@ -2016,6 +2034,35 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "regex" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + [[package]] name = "reqwest" version = "0.11.22" @@ -2902,6 +2949,10 @@ name = "uuid" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +dependencies = [ + "atomic", + "getrandom", +] [[package]] name = "valuable" diff --git a/Cargo.toml b/Cargo.toml index 535c242..faf8644 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,3 +33,4 @@ tower = "0.4.13" tracing = "0.1.40" tracing-subscriber = "0.3.18" url = { version = "2.5.0 " } +uuid = { version = "1.6.1", features = ["v7"] } diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml index 213d2e9..2c32d9d 100644 --- a/hook-common/Cargo.toml +++ b/hook-common/Cargo.toml @@ -11,10 +11,13 @@ chrono = { workspace = true } http = { workspace = true } metrics = { workspace = true } metrics-exporter-prometheus = { workspace = true } +regex = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } +serde_json = { workspace = true } sqlx = { workspace = true } thiserror = { workspace = true } +uuid = { workspace = true } [dev-dependencies] tokio = { workspace = true } # We need a runtime for async tests diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs new file mode 100644 index 0000000..a753064 --- /dev/null +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -0,0 +1,123 @@ +use chrono::{DateTime, Utc}; +use serde::{Serialize, Serializer}; +use uuid::Uuid; + +use super::{serialize_datetime, serialize_uuid}; + +#[derive(Serialize)] +pub enum AppMetricCategory { + ProcessEvent, + OnEvent, + ScheduledTask, + Webhook, + ComposeWebhook, +} + +#[derive(Serialize)] +pub enum ErrorType { + Timeout, + Connection, + HttpStatus(u16), +} + +#[derive(Serialize)] +pub struct ErrorDetails { + pub error: Error, + // TODO: The plugin-server sends the entire raw event with errors. In order to do this, we'll + // have to pass the entire event when we enqueue items, and store it in the Parameters JSONB + // column. We should see if it's possible to work around this before we commit to it. + // + // event: Value, +} + +#[derive(Serialize)] +pub struct Error { + pub name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option, + // TODO: Realistically, it doesn't seem likely that we'll generate Rust stack traces and put + // them here. I think this was more useful in plugin-server when the stack could come from + // plugin code. + #[serde(skip_serializing_if = "Option::is_none")] + pub stack: Option, +} + +#[derive(Serialize)] +pub struct AppMetric { + #[serde(serialize_with = "serialize_datetime")] + pub timestamp: DateTime, + pub team_id: u32, + pub plugin_config_id: u32, + #[serde(skip_serializing_if = "Option::is_none")] + pub job_id: Option, + #[serde(serialize_with = "serialize_category")] + pub category: AppMetricCategory, + pub successes: u32, + pub successes_on_retry: u32, + pub failures: u32, + #[serde(serialize_with = "serialize_uuid")] + pub error_uuid: Uuid, + #[serde(serialize_with = "serialize_error_type")] + pub error_type: ErrorType, + pub error_details: Error, +} + +fn serialize_category(category: &AppMetricCategory, serializer: S) -> Result +where + S: Serializer, +{ + let category_str = match category { + AppMetricCategory::ProcessEvent => "processEvent", + AppMetricCategory::OnEvent => "onEvent", + AppMetricCategory::ScheduledTask => "scheduledTask", + AppMetricCategory::Webhook => "webhook", + AppMetricCategory::ComposeWebhook => "composeWebhook", + }; + serializer.serialize_str(category_str) +} + +fn serialize_error_type(error_type: &ErrorType, serializer: S) -> Result +where + S: Serializer, +{ + let error_type = match error_type { + ErrorType::Connection => "Connection Error".to_owned(), + ErrorType::Timeout => "Timeout".to_owned(), + ErrorType::HttpStatus(s) => format!("HTTP Status: {}", s), + }; + serializer.serialize_str(&error_type) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_app_metric_serialization() { + use chrono::prelude::*; + + let app_metric = AppMetric { + timestamp: Utc.with_ymd_and_hms(2023, 12, 14, 12, 2, 0).unwrap(), + team_id: 123, + plugin_config_id: 456, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 10, + successes_on_retry: 0, + failures: 2, + error_uuid: Uuid::parse_str("550e8400-e29b-41d4-a716-446655447777").unwrap(), + error_type: ErrorType::Connection, + error_details: Error { + name: "FooError".to_owned(), + message: Some("Error Message".to_owned()), + stack: None, + }, + }; + + let serialized_json = serde_json::to_string(&app_metric).unwrap(); + + let expected_json = r#"{"timestamp":"2023-12-14 12:02:00","team_id":123,"plugin_config_id":456,"category":"webhook","successes":10,"successes_on_retry":0,"failures":2,"error_uuid":"550e8400-e29b-41d4-a716-446655447777","error_type":"Connection Error","error_details":{"name":"FooError","message":"Error Message"}}"#; + + assert_eq!(serialized_json, expected_json); + } +} diff --git a/hook-common/src/kafka_messages/mod.rs b/hook-common/src/kafka_messages/mod.rs new file mode 100644 index 0000000..1449f56 --- /dev/null +++ b/hook-common/src/kafka_messages/mod.rs @@ -0,0 +1,20 @@ +pub mod app_metrics; +pub mod plugin_logs; + +use chrono::{DateTime, Utc}; +use serde::Serializer; +use uuid::Uuid; + +fn serialize_uuid(uuid: &Uuid, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&uuid.to_string()) +} + +fn serialize_datetime(datetime: &DateTime, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&datetime.format("%Y-%m-%d %H:%M:%S%.f").to_string()) +} diff --git a/hook-common/src/kafka_messages/plugin_logs.rs b/hook-common/src/kafka_messages/plugin_logs.rs new file mode 100644 index 0000000..8f8bb43 --- /dev/null +++ b/hook-common/src/kafka_messages/plugin_logs.rs @@ -0,0 +1,130 @@ +use chrono::{DateTime, Utc}; +use serde::{Serialize, Serializer}; +use uuid::Uuid; + +use super::{serialize_datetime, serialize_uuid}; + +#[allow(dead_code)] +#[derive(Serialize)] +pub enum PluginLogEntrySource { + System, + Plugin, + Console, +} + +#[allow(dead_code)] +#[derive(Serialize)] +pub enum PluginLogEntryType { + Debug, + Log, + Info, + Warn, + Error, +} + +#[derive(Serialize)] +pub struct PluginLogEntry { + #[serde(serialize_with = "serialize_source")] + pub source: PluginLogEntrySource, + #[serde(rename = "type", serialize_with = "serialize_type")] + pub type_: PluginLogEntryType, + #[serde(serialize_with = "serialize_uuid")] + pub id: Uuid, + pub team_id: u32, + pub plugin_id: u32, + pub plugin_config_id: u32, + #[serde(serialize_with = "serialize_datetime")] + pub timestamp: DateTime, + #[serde(serialize_with = "serialize_message")] + pub message: String, + #[serde(serialize_with = "serialize_uuid")] + pub instance_id: Uuid, +} + +fn serialize_source(source: &PluginLogEntrySource, serializer: S) -> Result +where + S: Serializer, +{ + let source_str = match source { + PluginLogEntrySource::System => "SYSTEM", + PluginLogEntrySource::Plugin => "PLUGIN", + PluginLogEntrySource::Console => "CONSOLE", + }; + serializer.serialize_str(source_str) +} + +fn serialize_type(type_: &PluginLogEntryType, serializer: S) -> Result +where + S: Serializer, +{ + let type_str = match type_ { + PluginLogEntryType::Debug => "DEBUG", + PluginLogEntryType::Log => "LOG", + PluginLogEntryType::Info => "INFO", + PluginLogEntryType::Warn => "WARN", + PluginLogEntryType::Error => "ERROR", + }; + serializer.serialize_str(type_str) +} + +fn serialize_message(msg: &String, serializer: S) -> Result +where + S: Serializer, +{ + if msg.len() > 50_000 { + return Err(serde::ser::Error::custom( + "Message is too long for ClickHouse", + )); + } + + serializer.serialize_str(msg) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_plugin_log_entry_serialization() { + use chrono::prelude::*; + + let log_entry = PluginLogEntry { + source: PluginLogEntrySource::Plugin, + type_: PluginLogEntryType::Warn, + id: Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap(), + team_id: 4, + plugin_id: 5, + plugin_config_id: 6, + timestamp: Utc.with_ymd_and_hms(2023, 12, 14, 12, 2, 0).unwrap(), + message: "My message!".to_string(), + instance_id: Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(), + }; + + let serialized_json = serde_json::to_string(&log_entry).unwrap(); + + assert_eq!( + serialized_json, + r#"{"source":"PLUGIN","type":"WARN","id":"550e8400-e29b-41d4-a716-446655440000","team_id":4,"plugin_id":5,"plugin_config_id":6,"timestamp":"2023-12-14 12:02:00","message":"My message!","instance_id":"00000000-0000-0000-0000-000000000000"}"# + ); + } + + #[test] + fn test_plugin_log_entry_message_too_long() { + use chrono::prelude::*; + + let log_entry = PluginLogEntry { + source: PluginLogEntrySource::Plugin, + type_: PluginLogEntryType::Warn, + id: Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap(), + team_id: 4, + plugin_id: 5, + plugin_config_id: 6, + timestamp: Utc.with_ymd_and_hms(2023, 12, 14, 12, 2, 0).unwrap(), + message: "My message!".repeat(10_000).to_string(), + instance_id: Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(), + }; + + let err = serde_json::to_string(&log_entry).unwrap_err(); + assert_eq!(err.to_string(), "Message is too long for ClickHouse"); + } +} diff --git a/hook-common/src/lib.rs b/hook-common/src/lib.rs index 3b154c8..7d9ef37 100644 --- a/hook-common/src/lib.rs +++ b/hook-common/src/lib.rs @@ -1,3 +1,4 @@ +pub mod kafka_messages; pub mod metrics; pub mod pgqueue; pub mod webhook; From a938a107faf8f48cfa176887177b52971f81de14 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Mon, 18 Dec 2023 09:40:15 -0700 Subject: [PATCH 049/130] Squelch clippy complaints --- hook-janitor/src/kafka_producer.rs | 7 ++----- hook-janitor/src/webhooks.rs | 1 + 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/hook-janitor/src/kafka_producer.rs b/hook-janitor/src/kafka_producer.rs index 4e905b3..4845e94 100644 --- a/hook-janitor/src/kafka_producer.rs +++ b/hook-janitor/src/kafka_producer.rs @@ -1,11 +1,8 @@ use crate::config::KafkaConfig; -use rdkafka::error::{KafkaError, RDKafkaErrorCode}; -use rdkafka::producer::{DeliveryFuture, FutureProducer, FutureRecord, Producer}; -use rdkafka::util::Timeout; +use rdkafka::error::KafkaError; +use rdkafka::producer::FutureProducer; use rdkafka::ClientConfig; -use std::{str::FromStr, time::Duration}; -use tokio::sync::Semaphore; use tracing::debug; // TODO: Take stats recording pieces that we want from `capture-rs`. diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index a6cd9ff..e8895f1 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -6,6 +6,7 @@ use sqlx::postgres::{PgPool, PgPoolOptions}; use crate::cleanup::{Cleaner, CleanerError}; use crate::kafka_producer::KafkaContext; +#[allow(dead_code)] pub struct WebhookCleaner { queue_name: String, table_name: String, From 5a5c3bed8c3f6c78bbe869b8a83cbd288cd90eb2 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Mon, 18 Dec 2023 10:32:35 -0700 Subject: [PATCH 050/130] Remove pointless Result --- hook-janitor/src/main.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/hook-janitor/src/main.rs b/hook-janitor/src/main.rs index b487fda..46223aa 100644 --- a/hook-janitor/src/main.rs +++ b/hook-janitor/src/main.rs @@ -25,7 +25,7 @@ async fn listen(app: Router, bind: String) -> Result<()> { Ok(()) } -async fn cleanup_loop(cleaner: Box, interval_secs: u64) -> Result<()> { +async fn cleanup_loop(cleaner: Box, interval_secs: u64) { let semaphore = Semaphore::new(1); let mut interval = tokio::time::interval(Duration::from_secs(interval_secs)); @@ -78,9 +78,8 @@ async fn main() { Ok(_) => {} Err(e) => tracing::error!("failed to start hook-janitor http server, {}", e), }, - Either::Right((cleanup_result, _)) => match cleanup_result { - Ok(_) => {} - Err(e) => tracing::error!("hook-janitor cleanup task exited, {}", e), - }, + Either::Right((_, _)) => { + tracing::error!("hook-janitor cleanup task exited") + } }; } From 769c8f22fc83f348ef44a805111b2395e4322e9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Mon, 18 Dec 2023 11:34:38 +0100 Subject: [PATCH 051/130] refactor: Add a metadata field to job queue --- hook-common/src/pgqueue.rs | 110 ++++++++++++------ hook-common/src/webhook.rs | 14 +-- hook-consumer/src/consumer.rs | 41 ++++--- hook-producer/src/handlers/webhook.rs | 106 +++++++++++------ migrations/20231129172339_job_queue_table.sql | 5 +- 5 files changed, 179 insertions(+), 97 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 47938ec..5288ade 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -74,9 +74,12 @@ impl FromStr for JobStatus { /// JobParameters are stored and read to and from a JSONB field, so we accept anything that fits `sqlx::types::Json`. pub type JobParameters = sqlx::types::Json; +/// JobMetadata are stored and read to and from a JSONB field, so we accept anything that fits `sqlx::types::Json`. +pub type JobMetadata = sqlx::types::Json; + /// A Job to be executed by a worker dequeueing a PgQueue. #[derive(sqlx::FromRow, Debug)] -pub struct Job { +pub struct Job { /// A unique id identifying a job. pub id: i64, /// A number corresponding to the current job attempt. @@ -89,6 +92,8 @@ pub struct Job { pub created_at: chrono::DateTime, /// The current job's number of max attempts. pub max_attempts: i32, + /// Arbitrary job metadata stored as JSON. + pub metadata: JobMetadata, /// Arbitrary job parameters stored as JSON. pub parameters: JobParameters, /// The queue this job belongs to. @@ -99,7 +104,7 @@ pub struct Job { pub target: String, } -impl Job { +impl Job { /// Return true if this job attempt is greater or equal to the maximum number of possible attempts. pub fn is_gte_max_attempts(&self) -> bool { self.attempt >= self.max_attempts @@ -146,19 +151,19 @@ impl Job { /// A Job that can be updated in PostgreSQL. #[derive(Debug)] -pub struct PgJob { - pub job: Job, +pub struct PgJob { + pub job: Job, pub table: String, pub connection: sqlx::pool::PoolConnection, pub retry_policy: RetryPolicy, } -impl PgJob { +impl PgJob { pub async fn retry( mut self, error: E, preferred_retry_interval: Option, - ) -> Result, PgJobError>> { + ) -> Result, PgJobError>> { if self.job.is_gte_max_attempts() { return Err(PgJobError::RetryInvalidError { job: self, @@ -203,7 +208,7 @@ RETURNING Ok(retryable_job) } - pub async fn complete(mut self) -> Result>> { + pub async fn complete(mut self) -> Result>> { let completed_job = self.job.complete(); let base_query = format!( @@ -238,7 +243,7 @@ RETURNING pub async fn fail( mut self, error: E, - ) -> Result, PgJobError>> { + ) -> Result, PgJobError>> { let failed_job = self.job.fail(error); let base_query = format!( @@ -277,19 +282,19 @@ RETURNING /// A Job within an open PostgreSQL transaction. /// This implementation allows 'hiding' the job from any other workers running SKIP LOCKED queries. #[derive(Debug)] -pub struct PgTransactionJob<'c, J> { - pub job: Job, +pub struct PgTransactionJob<'c, J, M> { + pub job: Job, pub table: String, pub transaction: sqlx::Transaction<'c, sqlx::postgres::Postgres>, pub retry_policy: RetryPolicy, } -impl<'c, J> PgTransactionJob<'c, J> { +impl<'c, J, M> PgTransactionJob<'c, J, M> { pub async fn retry( mut self, error: E, preferred_retry_interval: Option, - ) -> Result, PgJobError>> { + ) -> Result, PgJobError>> { if self.job.is_gte_max_attempts() { return Err(PgJobError::RetryInvalidError { job: self, @@ -343,7 +348,9 @@ RETURNING Ok(retryable_job) } - pub async fn complete(mut self) -> Result>> { + pub async fn complete( + mut self, + ) -> Result>> { let completed_job = self.job.complete(); let base_query = format!( @@ -386,7 +393,7 @@ RETURNING pub async fn fail( mut self, error: E, - ) -> Result, PgJobError>> { + ) -> Result, PgJobError>> { let failed_job = self.job.fail(error); let base_query = format!( @@ -461,19 +468,22 @@ pub struct FailedJob { } /// A NewJob to be enqueued into a PgQueue. -pub struct NewJob { +pub struct NewJob { /// The maximum amount of attempts this NewJob has to complete. pub max_attempts: i32, /// The JSON-deserializable parameters for this NewJob. + pub metadata: JobMetadata, + /// The JSON-deserializable parameters for this NewJob. pub parameters: JobParameters, /// The target of the NewJob. E.g. an endpoint or service we are trying to reach. pub target: String, } -impl NewJob { - pub fn new(max_attempts: i32, parameters: J, target: &str) -> Self { +impl NewJob { + pub fn new(max_attempts: i32, metadata: M, parameters: J, target: &str) -> Self { Self { max_attempts, + metadata: sqlx::types::Json(metadata), parameters: sqlx::types::Json(parameters), target: target.to_owned(), } @@ -583,10 +593,11 @@ impl PgQueue { /// Dequeue a Job from this PgQueue to work on it. pub async fn dequeue< J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, + M: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, >( &self, attempted_by: &str, - ) -> PgQueueResult>> { + ) -> PgQueueResult>> { let mut connection = self .pool .acquire() @@ -628,7 +639,7 @@ RETURNING &self.table ); - let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) + let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) .bind(&self.name) .bind(attempted_by) .fetch_one(&mut *connection) @@ -662,10 +673,11 @@ RETURNING pub async fn dequeue_tx< 'a, J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, + M: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, >( &self, attempted_by: &str, - ) -> PgQueueResult>> { + ) -> PgQueueResult>> { let mut tx = self .pool .begin() @@ -707,7 +719,7 @@ RETURNING &self.table ); - let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) + let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) .bind(&self.name) .bind(attempted_by) .fetch_one(&mut *tx) @@ -732,23 +744,27 @@ RETURNING /// Enqueue a Job into this PgQueue. /// We take ownership of NewJob to enforce a specific NewJob is only enqueued once. - pub async fn enqueue( + pub async fn enqueue< + J: serde::Serialize + std::marker::Sync, + M: serde::Serialize + std::marker::Sync, + >( &self, - job: NewJob, + job: NewJob, ) -> PgQueueResult<()> { // TODO: Escaping. I think sqlx doesn't support identifiers. let base_query = format!( r#" INSERT INTO {} - (attempt, created_at, scheduled_at, max_attempts, parameters, queue, status, target) + (attempt, created_at, scheduled_at, max_attempts, metadata, parameters, queue, status, target) VALUES - (0, NOW(), NOW(), $1, $2, $3, 'available'::job_status, $4) + (0, NOW(), NOW(), $1, $2, $3, $4, 'available'::job_status, $5) "#, &self.table ); sqlx::query(&base_query) .bind(job.max_attempts) + .bind(&job.metadata) .bind(&job.parameters) .bind(&self.name) .bind(&job.target) @@ -767,6 +783,23 @@ VALUES mod tests { use super::*; + #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] + struct JobMetadata { + team_id: u32, + plugin_config_id: u32, + plugin_id: u32, + } + + impl Default for JobMetadata { + fn default() -> Self { + Self { + team_id: 0, + plugin_config_id: 1, + plugin_id: 2, + } + } + } + #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] struct JobParameters { method: String, @@ -798,8 +831,9 @@ mod tests { async fn test_can_dequeue_job() { let job_target = job_target(); let job_parameters = JobParameters::default(); + let job_metadata = JobMetadata::default(); let worker_id = worker_id(); - let new_job = NewJob::new(1, job_parameters, &job_target); + let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); let queue = PgQueue::new( "test_can_dequeue_job", @@ -812,7 +846,7 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); - let pg_job: PgJob = queue + let pg_job: PgJob = queue .dequeue(&worker_id) .await .expect("failed to dequeue job") @@ -839,7 +873,7 @@ mod tests { .await .expect("failed to connect to local test postgresql database"); - let pg_job: Option> = queue + let pg_job: Option> = queue .dequeue(&worker_id) .await .expect("failed to dequeue job"); @@ -850,9 +884,10 @@ mod tests { #[tokio::test] async fn test_can_dequeue_tx_job() { let job_target = job_target(); + let job_metadata = JobMetadata::default(); let job_parameters = JobParameters::default(); let worker_id = worker_id(); - let new_job = NewJob::new(1, job_parameters, &job_target); + let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); let queue = PgQueue::new( "test_can_dequeue_tx_job", @@ -865,7 +900,7 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); - let tx_job: PgTransactionJob<'_, JobParameters> = queue + let tx_job: PgTransactionJob<'_, JobParameters, JobMetadata> = queue .dequeue_tx(&worker_id) .await .expect("failed to dequeue job") @@ -875,6 +910,7 @@ mod tests { assert!(tx_job.job.attempted_by.contains(&worker_id)); assert_eq!(tx_job.job.attempted_by.len(), 1); assert_eq!(tx_job.job.max_attempts, 1); + assert_eq!(*tx_job.job.metadata.as_ref(), JobMetadata::default()); assert_eq!(*tx_job.job.parameters.as_ref(), JobParameters::default()); assert_eq!(tx_job.job.status, JobStatus::Running); assert_eq!(tx_job.job.target, job_target); @@ -892,7 +928,7 @@ mod tests { .await .expect("failed to connect to local test postgresql database"); - let tx_job: Option> = queue + let tx_job: Option> = queue .dequeue_tx(&worker_id) .await .expect("failed to dequeue job"); @@ -904,8 +940,9 @@ mod tests { async fn test_can_retry_job_with_remaining_attempts() { let job_target = job_target(); let job_parameters = JobParameters::default(); + let job_metadata = JobMetadata::default(); let worker_id = worker_id(); - let new_job = NewJob::new(2, job_parameters, &job_target); + let new_job = NewJob::new(2, job_metadata, job_parameters, &job_target); let retry_policy = RetryPolicy { backoff_coefficient: 0, initial_interval: time::Duration::from_secs(0), @@ -922,7 +959,7 @@ mod tests { .expect("failed to connect to local test postgresql database"); queue.enqueue(new_job).await.expect("failed to enqueue job"); - let job: PgJob = queue + let job: PgJob = queue .dequeue(&worker_id) .await .expect("failed to dequeue job") @@ -931,7 +968,7 @@ mod tests { .retry("a very reasonable failure reason", None) .await .expect("failed to retry job"); - let retried_job: PgJob = queue + let retried_job: PgJob = queue .dequeue(&worker_id) .await .expect("failed to dequeue job") @@ -954,8 +991,9 @@ mod tests { async fn test_cannot_retry_job_without_remaining_attempts() { let job_target = job_target(); let job_parameters = JobParameters::default(); + let job_metadata = JobMetadata::default(); let worker_id = worker_id(); - let new_job = NewJob::new(1, job_parameters, &job_target); + let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); let retry_policy = RetryPolicy { backoff_coefficient: 0, initial_interval: time::Duration::from_secs(0), @@ -973,7 +1011,7 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); - let job: PgJob = queue + let job: PgJob = queue .dequeue(&worker_id) .await .expect("failed to dequeue job") diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index b17959c..64968fc 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -124,16 +124,14 @@ pub struct WebhookJobParameters { pub headers: collections::HashMap, pub method: HttpMethod, pub url: String, +} - // These should be set if the Webhook is associated with a plugin `composeWebhook` invocation. +/// `JobParameters` required for the `WebhookConsumer` to execute a webhook. +/// These parameters should match the exported Webhook interface that PostHog plugins. +/// implement. See: https://github.com/PostHog/plugin-scaffold/blob/main/src/types.ts#L15. +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub struct WebhookJobMetadata { pub team_id: Option, pub plugin_id: Option, pub plugin_config_id: Option, - - #[serde(default = "default_max_attempts")] - pub max_attempts: i32, -} - -fn default_max_attempts() -> i32 { - 3 } diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 59b8e9f..9e4b3b7 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -4,7 +4,7 @@ use std::time; use async_std::task; use hook_common::pgqueue::{PgJobError, PgQueue, PgQueueError, PgTransactionJob}; -use hook_common::webhook::{HttpMethod, WebhookJobParameters}; +use hook_common::webhook::{HttpMethod, WebhookJobMetadata, WebhookJobParameters}; use http::StatusCode; use reqwest::header; use tokio::sync; @@ -57,7 +57,8 @@ impl<'p> WebhookConsumer<'p> { /// Wait until a job becomes available in our queue. async fn wait_for_job<'a>( &self, - ) -> Result, WebhookConsumerError> { + ) -> Result, WebhookConsumerError> + { loop { if let Some(job) = self.queue.dequeue_tx(&self.name).await? { return Ok(job); @@ -102,7 +103,7 @@ impl<'p> WebhookConsumer<'p> { /// * `request_timeout`: A timeout for the HTTP request. async fn process_webhook_job( client: reqwest::Client, - webhook_job: PgTransactionJob<'_, WebhookJobParameters>, + webhook_job: PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata>, ) -> Result<(), WebhookConsumerError> { match send_webhook( client, @@ -261,9 +262,10 @@ mod tests { queue: &PgQueue, max_attempts: i32, job_parameters: WebhookJobParameters, + job_metadata: WebhookJobMetadata, ) -> Result<(), PgQueueError> { let job_target = job_parameters.url.to_owned(); - let new_job = NewJob::new(max_attempts, job_parameters, &job_target); + let new_job = NewJob::new(max_attempts, job_metadata, job_parameters, &job_target); queue.enqueue(new_job).await?; Ok(()) } @@ -308,25 +310,29 @@ mod tests { .await .expect("failed to connect to PG"); - let webhook_job = WebhookJobParameters { + let webhook_job_parameters = WebhookJobParameters { body: "a webhook job body. much wow.".to_owned(), headers: collections::HashMap::new(), method: HttpMethod::POST, url: "localhost".to_owned(), - - team_id: Some(1), - plugin_id: Some(2), - plugin_config_id: Some(3), - - max_attempts: 1, + }; + let webhook_job_metadata = WebhookJobMetadata { + team_id: None, + plugin_id: None, + plugin_config_id: None, }; // enqueue takes ownership of the job enqueued to avoid bugs that can cause duplicate jobs. // Normally, a separate application would be enqueueing jobs for us to consume, so no ownership // conflicts would arise. However, in this test we need to do the enqueueing ourselves. // So, we clone the job to keep it around and assert the values returned by wait_for_job. - enqueue_job(&queue, 1, webhook_job.clone()) - .await - .expect("failed to enqueue job"); + enqueue_job( + &queue, + 1, + webhook_job_parameters.clone(), + webhook_job_metadata, + ) + .await + .expect("failed to enqueue job"); let consumer = WebhookConsumer::new( &worker_id, &queue, @@ -344,9 +350,12 @@ mod tests { assert!(consumed_job.job.attempted_by.contains(&worker_id)); assert_eq!(consumed_job.job.attempted_by.len(), 1); assert_eq!(consumed_job.job.max_attempts, 1); - assert_eq!(*consumed_job.job.parameters.as_ref(), webhook_job); + assert_eq!( + *consumed_job.job.parameters.as_ref(), + webhook_job_parameters + ); assert_eq!(consumed_job.job.status, JobStatus::Running); - assert_eq!(consumed_job.job.target, webhook_job.url); + assert_eq!(consumed_job.job.target, webhook_job_parameters.url); consumed_job .complete() diff --git a/hook-producer/src/handlers/webhook.rs b/hook-producer/src/handlers/webhook.rs index 7de1126..3947320 100644 --- a/hook-producer/src/handlers/webhook.rs +++ b/hook-producer/src/handlers/webhook.rs @@ -1,5 +1,5 @@ use axum::{extract::State, http::StatusCode, Json}; -use hook_common::webhook::WebhookJobParameters; +use hook_common::webhook::{WebhookJobMetadata, WebhookJobParameters}; use serde_derive::Deserialize; use url::Url; @@ -15,13 +15,27 @@ pub struct WebhookPostResponse { error: Option, } +/// The body of a request made to create a webhook Job. +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub struct WebhookPostRequestBody { + parameters: WebhookJobParameters, + metadata: WebhookJobMetadata, + + #[serde(default = "default_max_attempts")] + max_attempts: u32, +} + +fn default_max_attempts() -> u32 { + 3 +} + pub async fn post( State(pg_queue): State, - Json(payload): Json, + Json(payload): Json, ) -> Result, (StatusCode, Json)> { debug!("received payload: {:?}", payload); - if payload.body.len() > MAX_BODY_SIZE { + if payload.parameters.body.len() > MAX_BODY_SIZE { return Err(( StatusCode::BAD_REQUEST, Json(WebhookPostResponse { @@ -30,8 +44,22 @@ pub async fn post( )); } - let url_hostname = get_hostname(&payload.url)?; - let job = NewJob::new(payload.max_attempts, payload, url_hostname.as_str()); + let url_hostname = get_hostname(&payload.parameters.url)?; + // We could cast to i32, but this ensures we are not wrapping. + let max_attempts = i32::try_from(payload.max_attempts).map_err(|_| { + ( + StatusCode::BAD_REQUEST, + Json(WebhookPostResponse { + error: Some("invalid number of max attempts".to_owned()), + }), + ) + })?; + let job = NewJob::new( + max_attempts, + payload.metadata, + payload.parameters, + url_hostname.as_str(), + ); pg_queue.enqueue(job).await.map_err(internal_error)?; @@ -74,6 +102,8 @@ fn get_hostname(url_str: &str) -> Result Date: Mon, 18 Dec 2023 11:43:11 +0100 Subject: [PATCH 052/130] fix: Update webhook.rs docs --- hook-common/src/webhook.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index 64968fc..ce7d3ae 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -126,9 +126,8 @@ pub struct WebhookJobParameters { pub url: String, } -/// `JobParameters` required for the `WebhookConsumer` to execute a webhook. -/// These parameters should match the exported Webhook interface that PostHog plugins. -/// implement. See: https://github.com/PostHog/plugin-scaffold/blob/main/src/types.ts#L15. +/// `JobMetadata` required for the `WebhookConsumer` to execute a webhook. +/// These should be set if the Webhook is associated with a plugin `composeWebhook` invocation. #[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub struct WebhookJobMetadata { pub team_id: Option, From 491c2697d29aa995e972478964b4d8cfc85bff20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Mon, 18 Dec 2023 12:08:08 +0100 Subject: [PATCH 053/130] feat: Use new WebhookJobError struct for error reporting --- Cargo.lock | 156 +++++++++--------- Cargo.toml | 1 + hook-common/Cargo.toml | 1 + hook-common/src/kafka_messages/app_metrics.rs | 10 +- hook-common/src/kafka_messages/mod.rs | 4 +- hook-common/src/webhook.rs | 116 +++++++++++++ hook-consumer/Cargo.toml | 2 +- hook-consumer/src/consumer.rs | 117 +++++++------ hook-consumer/src/error.rs | 27 +-- hook-consumer/src/main.rs | 4 +- 10 files changed, 294 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5b0aedc..ba9f4bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,13 +100,13 @@ dependencies = [ [[package]] name = "async-global-executor" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b4353121d5644cdf2beb5726ab752e79a8db1ebb52031770ec47db31d245526" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.1.1", "async-executor", - "async-io 2.2.1", + "async-io 2.2.2", "async-lock 3.2.0", "blocking", "futures-lite 2.1.0", @@ -135,9 +135,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6d3b15875ba253d1110c740755e246537483f152fa334f91abd7fe84c88b3ff" +checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" dependencies = [ "async-lock 3.2.0", "cfg-if", @@ -146,7 +146,7 @@ dependencies = [ "futures-lite 2.1.0", "parking", "polling 3.3.1", - "rustix 0.38.27", + "rustix 0.38.28", "slab", "tracing", "windows-sys 0.52.0", @@ -200,9 +200,9 @@ dependencies = [ [[package]] name = "async-task" -version = "4.5.0" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4eb2cdb97421e01129ccb49169d8279ed21e829929144f4a22a6e54ac549ca1" +checksum = "e1d90cd0b264dfdd8eb5bad0a2c217c1f88fa96a8573f40e7b12de23fb468f46" [[package]] name = "async-trait" @@ -212,7 +212,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -265,7 +265,7 @@ dependencies = [ "http 1.0.0", "http-body 1.0.0", "http-body-util", - "hyper 1.0.1", + "hyper 1.1.0", "hyper-util", "itoa", "matchit", @@ -439,9 +439,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "core-foundation" @@ -485,22 +485,21 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset", - "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "b9bcf5bdbfdd6030fb4a1c497b5d5fc5921aa2f60d359a17e249c0e6df3de153" dependencies = [ "cfg-if", "crossbeam-utils", @@ -508,9 +507,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" dependencies = [ "cfg-if", ] @@ -648,9 +647,9 @@ dependencies = [ [[package]] name = "eyre" -version = "0.6.10" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bbb8258be8305fb0237d7b295f47bb24ff1b136a535f473baf40e70468515aa" +checksum = "b6267a1fa6f59179ea4afc8e50fd8612a3cc60bc858f786ff877a4a8cb042799" dependencies = [ "indenter", "once_cell", @@ -813,7 +812,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -980,9 +979,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hkdf" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac", ] @@ -998,11 +997,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1015,6 +1014,7 @@ dependencies = [ "metrics", "metrics-exporter-prometheus", "regex", + "reqwest", "serde", "serde_derive", "serde_json", @@ -1115,9 +1115,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http 0.2.11", @@ -1161,9 +1161,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -1171,12 +1171,12 @@ dependencies = [ "futures-util", "h2 0.3.22", "http 0.2.11", - "http-body 0.4.5", + "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -1185,9 +1185,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403f9214f3e703236b221f1a9cd88ec8b4adfa5296de01ab96216361f4692f56" +checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" dependencies = [ "bytes", "futures-channel", @@ -1209,7 +1209,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.27", + "hyper 0.14.28", "native-tls", "tokio", "tokio-native-tls", @@ -1226,7 +1226,7 @@ dependencies = [ "futures-util", "http 1.0.0", "http-body 1.0.0", - "hyper 1.0.1", + "hyper 1.1.0", "pin-project-lite", "socket2 0.5.5", "tokio", @@ -1331,9 +1331,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" @@ -1364,9 +1364,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.150" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libm" @@ -1481,12 +1481,12 @@ dependencies = [ [[package]] name = "metrics-exporter-prometheus" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" +checksum = "1d4fa7ce7c4862db464a37b0b31d89bca874562f034bd7993895572783d02950" dependencies = [ "base64", - "hyper 0.14.27", + "hyper 0.14.28", "indexmap 1.9.3", "ipnet", "metrics", @@ -1505,7 +1505,7 @@ checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -1721,7 +1721,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -1815,7 +1815,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -1893,7 +1893,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.27", + "rustix 0.38.28", "tracing", "windows-sys 0.52.0", ] @@ -2065,9 +2065,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ "base64", "bytes", @@ -2076,8 +2076,8 @@ dependencies = [ "futures-util", "h2 0.3.22", "http 0.2.11", - "http-body 0.4.5", - "hyper 0.14.27", + "http-body 0.4.6", + "hyper 0.14.28", "hyper-tls", "ipnet", "js-sys", @@ -2143,9 +2143,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.27" +version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfeae074e687625746172d639330f1de242a178bf3189b51e35a7a21573513ac" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ "bitflags 2.4.1", "errno", @@ -2162,9 +2162,9 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "schannel" @@ -2221,7 +2221,7 @@ checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -2620,9 +2620,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.39" +version = "2.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" dependencies = [ "proc-macro2", "quote", @@ -2665,28 +2665,28 @@ dependencies = [ "cfg-if", "fastrand 2.0.1", "redox_syscall", - "rustix 0.38.27", + "rustix 0.38.28", "windows-sys 0.48.0", ] [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -2716,9 +2716,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.34.0" +version = "1.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +checksum = "841d45b238a16291a4e1584e61820b8ae57d696cc5015c459c229ccc6990cc1c" dependencies = [ "backtrace", "bytes", @@ -2741,7 +2741,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -2844,7 +2844,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -3020,7 +3020,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", "wasm-bindgen-shared", ] @@ -3054,7 +3054,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3246,9 +3246,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.28" +version = "0.5.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c830786f7720c2fd27a1a0e27a709dbd3c4d009b56d098fc742d4f4eab91fe2" +checksum = "9b5c3db89721d50d0e2a673f5043fc4722f76dcc352d7b1ab8b8288bed4ed2c5" dependencies = [ "memchr", ] @@ -3265,22 +3265,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.29" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d075cf85bbb114e933343e087b92f2146bac0d55b534cbb8188becf0039948e" +checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.29" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86cd5ca076997b97ef09d3ad65efe811fa68c9e874cb636ccb211223a813b0c2" +checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index faf8644..0a0a2f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ http-body-util = "0.1.0" metrics = "0.21.1" metrics-exporter-prometheus = "0.12.1" rdkafka = { version = "0.35.0", features = ["cmake-build", "ssl"] } +reqwest = { version = "0.11" } regex = "1.10.2" serde = { version = "1.0" } serde_derive = { version = "1.0" } diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml index 2c32d9d..6350ba4 100644 --- a/hook-common/Cargo.toml +++ b/hook-common/Cargo.toml @@ -11,6 +11,7 @@ chrono = { workspace = true } http = { workspace = true } metrics = { workspace = true } metrics-exporter-prometheus = { workspace = true } +reqwest = { workspace = true } regex = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index a753064..327f4b8 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -13,14 +13,16 @@ pub enum AppMetricCategory { ComposeWebhook, } -#[derive(Serialize)] +#[derive(Serialize, Debug)] pub enum ErrorType { Timeout, Connection, HttpStatus(u16), + Parse, + MaxAttempts, } -#[derive(Serialize)] +#[derive(Serialize, Debug)] pub struct ErrorDetails { pub error: Error, // TODO: The plugin-server sends the entire raw event with errors. In order to do this, we'll @@ -30,7 +32,7 @@ pub struct ErrorDetails { // event: Value, } -#[derive(Serialize)] +#[derive(Serialize, Debug)] pub struct Error { pub name: String, #[serde(skip_serializing_if = "Option::is_none")] @@ -84,6 +86,8 @@ where ErrorType::Connection => "Connection Error".to_owned(), ErrorType::Timeout => "Timeout".to_owned(), ErrorType::HttpStatus(s) => format!("HTTP Status: {}", s), + ErrorType::Parse => "Parse Error".to_owned(), + ErrorType::MaxAttempts => "Maximum attempts exceeded".to_owned(), }; serializer.serialize_str(&error_type) } diff --git a/hook-common/src/kafka_messages/mod.rs b/hook-common/src/kafka_messages/mod.rs index 1449f56..a29e219 100644 --- a/hook-common/src/kafka_messages/mod.rs +++ b/hook-common/src/kafka_messages/mod.rs @@ -5,14 +5,14 @@ use chrono::{DateTime, Utc}; use serde::Serializer; use uuid::Uuid; -fn serialize_uuid(uuid: &Uuid, serializer: S) -> Result +pub fn serialize_uuid(uuid: &Uuid, serializer: S) -> Result where S: Serializer, { serializer.serialize_str(&uuid.to_string()) } -fn serialize_datetime(datetime: &DateTime, serializer: S) -> Result +pub fn serialize_datetime(datetime: &DateTime, serializer: S) -> Result where S: Serializer, { diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index ce7d3ae..ea7dfb4 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -1,9 +1,11 @@ use std::collections; +use std::convert::From; use std::fmt; use std::str::FromStr; use serde::{de::Visitor, Deserialize, Serialize}; +use crate::kafka_messages::{app_metrics, serialize_uuid}; use crate::pgqueue::PgQueueError; /// Supported HTTP methods for webhooks. @@ -134,3 +136,117 @@ pub struct WebhookJobMetadata { pub plugin_id: Option, pub plugin_config_id: Option, } + +/// An error originating during a Webhook Job invocation. +#[derive(Serialize, Debug)] +pub struct WebhookJobError { + pub r#type: app_metrics::ErrorType, + pub details: app_metrics::ErrorDetails, + #[serde(serialize_with = "serialize_uuid")] + pub uuid: uuid::Uuid, +} + +impl From for WebhookJobError { + fn from(error: reqwest::Error) -> Self { + if error.is_body() || error.is_decode() { + WebhookJobError::new_parse(&error.to_string()) + } else if error.is_timeout() { + WebhookJobError::new_timeout(&error.to_string()) + } else if error.is_status() { + WebhookJobError::new_http_status( + error.status().expect("status code is defined").into(), + &error.to_string(), + ) + } else if error.is_connect() + || error.is_builder() + || error.is_request() + || error.is_redirect() + { + // Builder errors seem to be related to unable to setup TLS, so I'm bundling them in connection. + WebhookJobError::new_connection(&error.to_string()) + } else { + // We can't match on Kind as some types do not have an associated variant in Kind (e.g. Timeout). + unreachable!("We have covered all reqwest::Error types.") + } + } +} + +impl WebhookJobError { + pub fn new_timeout(message: &str) -> Self { + let error_details = app_metrics::Error { + name: "timeout".to_owned(), + message: Some(message.to_owned()), + stack: None, + }; + Self { + r#type: app_metrics::ErrorType::Timeout, + details: app_metrics::ErrorDetails { + error: error_details, + }, + uuid: uuid::Uuid::now_v7(), + } + } + + pub fn new_connection(message: &str) -> Self { + let error_details = app_metrics::Error { + name: "connection error".to_owned(), + message: Some(message.to_owned()), + stack: None, + }; + Self { + r#type: app_metrics::ErrorType::Connection, + details: app_metrics::ErrorDetails { + error: error_details, + }, + uuid: uuid::Uuid::now_v7(), + } + } + + pub fn new_http_status(status_code: u16, message: &str) -> Self { + let error_details = app_metrics::Error { + name: "http status".to_owned(), + message: Some(message.to_owned()), + stack: None, + }; + Self { + r#type: app_metrics::ErrorType::HttpStatus(status_code), + details: app_metrics::ErrorDetails { + error: error_details, + }, + uuid: uuid::Uuid::now_v7(), + } + } + + pub fn new_parse(message: &str) -> Self { + let error_details = app_metrics::Error { + name: "parse error".to_owned(), + message: Some(message.to_owned()), + stack: None, + }; + Self { + r#type: app_metrics::ErrorType::Parse, + details: app_metrics::ErrorDetails { + error: error_details, + }, + uuid: uuid::Uuid::now_v7(), + } + } + + pub fn new_max_attempts(max_attempts: i32) -> Self { + let error_details = app_metrics::Error { + name: "maximum attempts exceeded".to_owned(), + message: Some(format!( + "Exceeded maximum number of attempts ({}) for webhook", + max_attempts + )), + stack: None, + }; + Self { + r#type: app_metrics::ErrorType::MaxAttempts, + details: app_metrics::ErrorDetails { + error: error_details, + }, + uuid: uuid::Uuid::now_v7(), + } + } +} diff --git a/hook-consumer/Cargo.toml b/hook-consumer/Cargo.toml index 5ff1eb0..35c64b5 100644 --- a/hook-consumer/Cargo.toml +++ b/hook-consumer/Cargo.toml @@ -10,7 +10,7 @@ envconfig = { workspace = true } futures = "0.3" hook-common = { path = "../hook-common" } http = { version = "0.2" } -reqwest = { version = "0.11" } +reqwest = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } sqlx = { workspace = true } diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 9e4b3b7..bf7573f 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -4,12 +4,12 @@ use std::time; use async_std::task; use hook_common::pgqueue::{PgJobError, PgQueue, PgQueueError, PgTransactionJob}; -use hook_common::webhook::{HttpMethod, WebhookJobMetadata, WebhookJobParameters}; +use hook_common::webhook::{HttpMethod, WebhookJobError, WebhookJobMetadata, WebhookJobParameters}; use http::StatusCode; use reqwest::header; use tokio::sync; -use crate::error::WebhookConsumerError; +use crate::error::{ConsumerError, WebhookError}; /// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. pub struct WebhookConsumer<'p> { @@ -57,8 +57,7 @@ impl<'p> WebhookConsumer<'p> { /// Wait until a job becomes available in our queue. async fn wait_for_job<'a>( &self, - ) -> Result, WebhookConsumerError> - { + ) -> Result, ConsumerError> { loop { if let Some(job) = self.queue.dequeue_tx(&self.name).await? { return Ok(job); @@ -69,7 +68,7 @@ impl<'p> WebhookConsumer<'p> { } /// Run this consumer to continuously process any jobs that become available. - pub async fn run(&self) -> Result<(), WebhookConsumerError> { + pub async fn run(&self) -> Result<(), ConsumerError> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); loop { @@ -104,7 +103,7 @@ impl<'p> WebhookConsumer<'p> { async fn process_webhook_job( client: reqwest::Client, webhook_job: PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata>, -) -> Result<(), WebhookConsumerError> { +) -> Result<(), ConsumerError> { match send_webhook( client, &webhook_job.job.parameters.method, @@ -118,31 +117,54 @@ async fn process_webhook_job( webhook_job .complete() .await - .map_err(|error| WebhookConsumerError::PgJobError(error.to_string()))?; + .map_err(|error| ConsumerError::PgJobError(error.to_string()))?; + Ok(()) + } + Err(WebhookError::ParseHeadersError(e)) => { + webhook_job + .fail(WebhookJobError::new_parse(&e.to_string())) + .await + .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; Ok(()) } - Err(WebhookConsumerError::RetryableWebhookError { - reason, - retry_after, - }) => match webhook_job.retry(reason.to_string(), retry_after).await { - Ok(_) => Ok(()), - Err(PgJobError::RetryInvalidError { - job: webhook_job, - error: fail_error, - }) => { - webhook_job - .fail(fail_error.to_string()) - .await - .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; - Ok(()) + Err(WebhookError::ParseHttpMethodError(e)) => { + webhook_job + .fail(WebhookJobError::new_parse(&e)) + .await + .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + Ok(()) + } + Err(WebhookError::ParseUrlError(e)) => { + webhook_job + .fail(WebhookJobError::new_parse(&e.to_string())) + .await + .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + Ok(()) + } + Err(WebhookError::RetryableRequestError { error, retry_after }) => { + match webhook_job + .retry(WebhookJobError::from(error), retry_after) + .await + { + Ok(_) => Ok(()), + Err(PgJobError::RetryInvalidError { + job: webhook_job, .. + }) => { + let max_attempts = webhook_job.job.max_attempts; + webhook_job + .fail(WebhookJobError::new_max_attempts(max_attempts)) + .await + .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + Ok(()) + } + Err(job_error) => Err(ConsumerError::PgJobError(job_error.to_string())), } - Err(job_error) => Err(WebhookConsumerError::PgJobError(job_error.to_string())), - }, - Err(error) => { + } + Err(WebhookError::NonRetryableRetryableRequestError(error)) => { webhook_job - .fail(error.to_string()) + .fail(WebhookJobError::from(error)) .await - .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; + .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; Ok(()) } } @@ -163,12 +185,12 @@ async fn send_webhook( url: &str, headers: &collections::HashMap, body: String, -) -> Result { +) -> Result<(), WebhookError> { let method: http::Method = method.into(); - let url: reqwest::Url = (url).parse().map_err(WebhookConsumerError::ParseUrlError)?; + let url: reqwest::Url = (url).parse().map_err(WebhookError::ParseUrlError)?; let headers: reqwest::header::HeaderMap = (headers) .try_into() - .map_err(WebhookConsumerError::ParseHeadersError)?; + .map_err(WebhookError::ParseHeadersError)?; let body = reqwest::Body::from(body); let response = client @@ -177,27 +199,28 @@ async fn send_webhook( .body(body) .send() .await - .map_err(|e| WebhookConsumerError::RetryableWebhookError { - reason: e.to_string(), + .map_err(|e| WebhookError::RetryableRequestError { + error: e, retry_after: None, })?; - let status = response.status(); - - if status.is_success() { - Ok(response) - } else if is_retryable_status(status) { - let retry_after = parse_retry_after_header(response.headers()); - - Err(WebhookConsumerError::RetryableWebhookError { - reason: format!("retryable status code {}", status), - retry_after, - }) - } else { - Err(WebhookConsumerError::NonRetryableWebhookError(format!( - "non-retryable status code {}", - status - ))) + match response.error_for_status_ref() { + Ok(_) => Ok(()), + Err(err) => { + if is_retryable_status( + err.status() + .expect("status code is set as error is generated from a response"), + ) { + let retry_after = parse_retry_after_header(response.headers()); + + Err(WebhookError::RetryableRequestError { + error: err, + retry_after, + }) + } else { + Err(WebhookError::NonRetryableRetryableRequestError(err)) + } + } } } diff --git a/hook-consumer/src/error.rs b/hook-consumer/src/error.rs index a196643..b05d476 100644 --- a/hook-consumer/src/error.rs +++ b/hook-consumer/src/error.rs @@ -3,26 +3,31 @@ use std::time; use hook_common::pgqueue; use thiserror::Error; -/// Enumeration of errors for operations with WebhookConsumer. +/// Enumeration of errors related to webhook job processing in the WebhookConsumer. #[derive(Error, Debug)] -pub enum WebhookConsumerError { - #[error("timed out while waiting for jobs to be available")] - TimeoutError, +pub enum WebhookError { #[error("{0} is not a valid HttpMethod")] ParseHttpMethodError(String), #[error("error parsing webhook headers")] ParseHeadersError(http::Error), #[error("error parsing webhook url")] ParseUrlError(url::ParseError), + #[error("a webhook could not be delivered but it could be retried later: {error}")] + RetryableRequestError { + error: reqwest::Error, + retry_after: Option, + }, + #[error("a webhook could not be delivered and it cannot be retried further: {0}")] + NonRetryableRetryableRequestError(reqwest::Error), +} + +/// Enumeration of errors related to initialization and consumption of webhook jobs. +#[derive(Error, Debug)] +pub enum ConsumerError { + #[error("timed out while waiting for jobs to be available")] + TimeoutError, #[error("an error occurred in the underlying queue")] QueueError(#[from] pgqueue::PgQueueError), #[error("an error occurred in the underlying job")] PgJobError(String), - #[error("a webhook could not be delivered but it could be retried later: {reason}")] - RetryableWebhookError { - reason: String, - retry_after: Option, - }, - #[error("a webhook could not be delivered and it cannot be retried further: {0}")] - NonRetryableWebhookError(String), } diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index bf76503..bb02526 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -3,10 +3,10 @@ use envconfig::Envconfig; use hook_common::pgqueue::{PgQueue, RetryPolicy}; use hook_consumer::config::Config; use hook_consumer::consumer::WebhookConsumer; -use hook_consumer::error::WebhookConsumerError; +use hook_consumer::error::ConsumerError; #[tokio::main] -async fn main() -> Result<(), WebhookConsumerError> { +async fn main() -> Result<(), ConsumerError> { let config = Config::init_from_env().expect("Invalid configuration:"); let retry_policy = RetryPolicy::new( From c64c3d054c7f9701dfc170e79e6f77f9f36a995d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Tue, 19 Dec 2023 12:15:08 +0100 Subject: [PATCH 054/130] fix: Return response from send_webhook --- hook-consumer/src/consumer.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index bf7573f..9430126 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -185,7 +185,7 @@ async fn send_webhook( url: &str, headers: &collections::HashMap, body: String, -) -> Result<(), WebhookError> { +) -> Result { let method: http::Method = method.into(); let url: reqwest::Url = (url).parse().map_err(WebhookError::ParseUrlError)?; let headers: reqwest::header::HeaderMap = (headers) @@ -204,15 +204,15 @@ async fn send_webhook( retry_after: None, })?; - match response.error_for_status_ref() { - Ok(_) => Ok(()), + let retry_after = parse_retry_after_header(response.headers()); + + match response.error_for_status() { + Ok(response) => Ok(response), Err(err) => { if is_retryable_status( err.status() .expect("status code is set as error is generated from a response"), ) { - let retry_after = parse_retry_after_header(response.headers()); - Err(WebhookError::RetryableRequestError { error: err, retry_after, From d80644b2fa8325dcd121f71656d13a6860f0cb2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Tue, 19 Dec 2023 15:02:18 +0100 Subject: [PATCH 055/130] refactor: Re-use underlying error when failing after retry --- hook-common/src/kafka_messages/app_metrics.rs | 2 -- hook-common/src/webhook.rs | 22 ++----------------- hook-consumer/src/consumer.rs | 7 +++--- 3 files changed, 5 insertions(+), 26 deletions(-) diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index 327f4b8..8964144 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -19,7 +19,6 @@ pub enum ErrorType { Connection, HttpStatus(u16), Parse, - MaxAttempts, } #[derive(Serialize, Debug)] @@ -87,7 +86,6 @@ where ErrorType::Timeout => "Timeout".to_owned(), ErrorType::HttpStatus(s) => format!("HTTP Status: {}", s), ErrorType::Parse => "Parse Error".to_owned(), - ErrorType::MaxAttempts => "Maximum attempts exceeded".to_owned(), }; serializer.serialize_str(&error_type) } diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index ea7dfb4..d8c174d 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -146,8 +146,8 @@ pub struct WebhookJobError { pub uuid: uuid::Uuid, } -impl From for WebhookJobError { - fn from(error: reqwest::Error) -> Self { +impl From<&reqwest::Error> for WebhookJobError { + fn from(error: &reqwest::Error) -> Self { if error.is_body() || error.is_decode() { WebhookJobError::new_parse(&error.to_string()) } else if error.is_timeout() { @@ -231,22 +231,4 @@ impl WebhookJobError { uuid: uuid::Uuid::now_v7(), } } - - pub fn new_max_attempts(max_attempts: i32) -> Self { - let error_details = app_metrics::Error { - name: "maximum attempts exceeded".to_owned(), - message: Some(format!( - "Exceeded maximum number of attempts ({}) for webhook", - max_attempts - )), - stack: None, - }; - Self { - r#type: app_metrics::ErrorType::MaxAttempts, - details: app_metrics::ErrorDetails { - error: error_details, - }, - uuid: uuid::Uuid::now_v7(), - } - } } diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 9430126..8a2ecdc 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -143,16 +143,15 @@ async fn process_webhook_job( } Err(WebhookError::RetryableRequestError { error, retry_after }) => { match webhook_job - .retry(WebhookJobError::from(error), retry_after) + .retry(WebhookJobError::from(&error), retry_after) .await { Ok(_) => Ok(()), Err(PgJobError::RetryInvalidError { job: webhook_job, .. }) => { - let max_attempts = webhook_job.job.max_attempts; webhook_job - .fail(WebhookJobError::new_max_attempts(max_attempts)) + .fail(WebhookJobError::from(&error)) .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; Ok(()) @@ -162,7 +161,7 @@ async fn process_webhook_job( } Err(WebhookError::NonRetryableRetryableRequestError(error)) => { webhook_job - .fail(WebhookJobError::from(error)) + .fail(WebhookJobError::from(&error)) .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; Ok(()) From e4ee369fb24c2f6ee410a8349da0b78953d90e86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Tue, 19 Dec 2023 16:45:58 +0100 Subject: [PATCH 056/130] chore: Update docs --- hook-common/src/webhook.rs | 4 ++++ hook-consumer/src/consumer.rs | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index d8c174d..488d527 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -138,6 +138,7 @@ pub struct WebhookJobMetadata { } /// An error originating during a Webhook Job invocation. +/// This is to be serialized to be stored as an error whenever retrying or failing a webhook job. #[derive(Serialize, Debug)] pub struct WebhookJobError { pub r#type: app_metrics::ErrorType, @@ -146,6 +147,9 @@ pub struct WebhookJobError { pub uuid: uuid::Uuid, } +/// Webhook jobs boil down to an HTTP request, so it's useful to have a way to convert from &reqwest::Error. +/// For the convertion we check all possible error types with the associated is_* methods provided by reqwest. +/// Some precision may be lost as our app_metrics::ErrorType does not support the same number of variants. impl From<&reqwest::Error> for WebhookJobError { fn from(error: &reqwest::Error) -> Self { if error.is_body() || error.is_decode() { diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 8a2ecdc..12d4b38 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -98,8 +98,8 @@ impl<'p> WebhookConsumer<'p> { /// /// # Arguments /// +/// * `client`: An HTTP client to execute the webhook job request. /// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. -/// * `request_timeout`: A timeout for the HTTP request. async fn process_webhook_job( client: reqwest::Client, webhook_job: PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata>, @@ -173,11 +173,11 @@ async fn process_webhook_job( /// /// # Arguments /// +/// * `client`: An HTTP client to execute the HTTP request. /// * `method`: The HTTP method to use in the HTTP request. /// * `url`: The URL we are targetting with our request. Parsing this URL fail. /// * `headers`: Key, value pairs of HTTP headers in a `std::collections::HashMap`. Can fail if headers are not valid. /// * `body`: The body of the request. Ownership is required. -/// * `timeout`: A timeout for the HTTP request. async fn send_webhook( client: reqwest::Client, method: &HttpMethod, From f4803070a5aca3803bb9d56ec5574341c85f8d81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Tue, 19 Dec 2023 17:10:37 +0100 Subject: [PATCH 057/130] refactor: Have Connection Error catch all reqwest errors --- hook-common/src/webhook.rs | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index 488d527..2bf8db3 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -152,25 +152,18 @@ pub struct WebhookJobError { /// Some precision may be lost as our app_metrics::ErrorType does not support the same number of variants. impl From<&reqwest::Error> for WebhookJobError { fn from(error: &reqwest::Error) -> Self { - if error.is_body() || error.is_decode() { - WebhookJobError::new_parse(&error.to_string()) - } else if error.is_timeout() { + if error.is_timeout() { WebhookJobError::new_timeout(&error.to_string()) } else if error.is_status() { WebhookJobError::new_http_status( error.status().expect("status code is defined").into(), &error.to_string(), ) - } else if error.is_connect() - || error.is_builder() - || error.is_request() - || error.is_redirect() - { - // Builder errors seem to be related to unable to setup TLS, so I'm bundling them in connection. - WebhookJobError::new_connection(&error.to_string()) } else { - // We can't match on Kind as some types do not have an associated variant in Kind (e.g. Timeout). - unreachable!("We have covered all reqwest::Error types.") + // Catch all other errors as `app_metrics::ErrorType::Connection` errors. + // Not all of `reqwest::Error` may strictly be connection errors, so our supported error types may need an extension + // depending on how strict error reporting has to be. + WebhookJobError::new_connection(&error.to_string()) } } } From f9b32ae4c04add0281f7f0d5390a61dc8bcdfc27 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 19 Dec 2023 09:53:20 -0700 Subject: [PATCH 058/130] Implement Webhook cleanup --- hook-common/src/kafka_messages/app_metrics.rs | 66 ++-- hook-common/src/kafka_messages/mod.rs | 10 + hook-common/src/webhook.rs | 10 +- hook-janitor/src/cleanup.rs | 2 - hook-janitor/src/config.rs | 3 - hook-janitor/src/main.rs | 2 - hook-janitor/src/webhooks.rs | 328 +++++++++++++++++- 7 files changed, 361 insertions(+), 60 deletions(-) diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index 8964144..4396643 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -1,10 +1,10 @@ use chrono::{DateTime, Utc}; -use serde::{Serialize, Serializer}; +use serde::{Deserialize, Serialize, Serializer}; use uuid::Uuid; -use super::{serialize_datetime, serialize_uuid}; +use super::{serialize_datetime, serialize_optional_uuid}; -#[derive(Serialize)] +#[derive(Serialize, Debug)] pub enum AppMetricCategory { ProcessEvent, OnEvent, @@ -13,7 +13,7 @@ pub enum AppMetricCategory { ComposeWebhook, } -#[derive(Serialize, Debug)] +#[derive(Deserialize, Serialize, Debug)] pub enum ErrorType { Timeout, Connection, @@ -21,29 +21,23 @@ pub enum ErrorType { Parse, } -#[derive(Serialize, Debug)] +#[derive(Deserialize, Serialize, Debug)] pub struct ErrorDetails { pub error: Error, - // TODO: The plugin-server sends the entire raw event with errors. In order to do this, we'll - // have to pass the entire event when we enqueue items, and store it in the Parameters JSONB - // column. We should see if it's possible to work around this before we commit to it. - // - // event: Value, } -#[derive(Serialize, Debug)] +#[derive(Deserialize, Serialize, Debug)] pub struct Error { pub name: String, #[serde(skip_serializing_if = "Option::is_none")] pub message: Option, - // TODO: Realistically, it doesn't seem likely that we'll generate Rust stack traces and put - // them here. I think this was more useful in plugin-server when the stack could come from - // plugin code. + // This field will only be useful if we start running plugins in Rust (via a WASM runtime or + // something) and want to provide the user with stack traces like we do for TypeScript plugins. #[serde(skip_serializing_if = "Option::is_none")] pub stack: Option, } -#[derive(Serialize)] +#[derive(Serialize, Debug)] pub struct AppMetric { #[serde(serialize_with = "serialize_datetime")] pub timestamp: DateTime, @@ -56,11 +50,18 @@ pub struct AppMetric { pub successes: u32, pub successes_on_retry: u32, pub failures: u32, - #[serde(serialize_with = "serialize_uuid")] - pub error_uuid: Uuid, - #[serde(serialize_with = "serialize_error_type")] - pub error_type: ErrorType, - pub error_details: Error, + #[serde( + serialize_with = "serialize_optional_uuid", + skip_serializing_if = "Option::is_none" + )] + pub error_uuid: Option, + #[serde( + serialize_with = "serialize_error_type", + skip_serializing_if = "Option::is_none" + )] + pub error_type: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub error_details: Option, } fn serialize_category(category: &AppMetricCategory, serializer: S) -> Result @@ -77,10 +78,15 @@ where serializer.serialize_str(category_str) } -fn serialize_error_type(error_type: &ErrorType, serializer: S) -> Result +fn serialize_error_type(error_type: &Option, serializer: S) -> Result where S: Serializer, { + let error_type = match error_type { + Some(error_type) => error_type, + None => return serializer.serialize_none(), + }; + let error_type = match error_type { ErrorType::Connection => "Connection Error".to_owned(), ErrorType::Timeout => "Timeout".to_owned(), @@ -107,18 +113,20 @@ mod tests { successes: 10, successes_on_retry: 0, failures: 2, - error_uuid: Uuid::parse_str("550e8400-e29b-41d4-a716-446655447777").unwrap(), - error_type: ErrorType::Connection, - error_details: Error { - name: "FooError".to_owned(), - message: Some("Error Message".to_owned()), - stack: None, - }, + error_uuid: Some(Uuid::parse_str("550e8400-e29b-41d4-a716-446655447777").unwrap()), + error_type: Some(ErrorType::Connection), + error_details: Some(ErrorDetails { + error: Error { + name: "FooError".to_owned(), + message: Some("Error Message".to_owned()), + stack: None, + }, + }), }; let serialized_json = serde_json::to_string(&app_metric).unwrap(); - let expected_json = r#"{"timestamp":"2023-12-14 12:02:00","team_id":123,"plugin_config_id":456,"category":"webhook","successes":10,"successes_on_retry":0,"failures":2,"error_uuid":"550e8400-e29b-41d4-a716-446655447777","error_type":"Connection Error","error_details":{"name":"FooError","message":"Error Message"}}"#; + let expected_json = r#"{"timestamp":"2023-12-14 12:02:00","team_id":123,"plugin_config_id":456,"category":"webhook","successes":10,"successes_on_retry":0,"failures":2,"error_uuid":"550e8400-e29b-41d4-a716-446655447777","error_type":"Connection Error","error_details":{"error":{"name":"FooError","message":"Error Message"}}}"#; assert_eq!(serialized_json, expected_json); } diff --git a/hook-common/src/kafka_messages/mod.rs b/hook-common/src/kafka_messages/mod.rs index a29e219..72b49e1 100644 --- a/hook-common/src/kafka_messages/mod.rs +++ b/hook-common/src/kafka_messages/mod.rs @@ -12,6 +12,16 @@ where serializer.serialize_str(&uuid.to_string()) } +pub fn serialize_optional_uuid(uuid: &Option, serializer: S) -> Result +where + S: Serializer, +{ + match uuid { + Some(uuid) => serializer.serialize_str(&uuid.to_string()), + None => serializer.serialize_none(), + } +} + pub fn serialize_datetime(datetime: &DateTime, serializer: S) -> Result where S: Serializer, diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index 2bf8db3..d320ce0 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -5,7 +5,7 @@ use std::str::FromStr; use serde::{de::Visitor, Deserialize, Serialize}; -use crate::kafka_messages::{app_metrics, serialize_uuid}; +use crate::kafka_messages::app_metrics; use crate::pgqueue::PgQueueError; /// Supported HTTP methods for webhooks. @@ -139,12 +139,10 @@ pub struct WebhookJobMetadata { /// An error originating during a Webhook Job invocation. /// This is to be serialized to be stored as an error whenever retrying or failing a webhook job. -#[derive(Serialize, Debug)] +#[derive(Deserialize, Serialize, Debug)] pub struct WebhookJobError { pub r#type: app_metrics::ErrorType, pub details: app_metrics::ErrorDetails, - #[serde(serialize_with = "serialize_uuid")] - pub uuid: uuid::Uuid, } /// Webhook jobs boil down to an HTTP request, so it's useful to have a way to convert from &reqwest::Error. @@ -180,7 +178,6 @@ impl WebhookJobError { details: app_metrics::ErrorDetails { error: error_details, }, - uuid: uuid::Uuid::now_v7(), } } @@ -195,7 +192,6 @@ impl WebhookJobError { details: app_metrics::ErrorDetails { error: error_details, }, - uuid: uuid::Uuid::now_v7(), } } @@ -210,7 +206,6 @@ impl WebhookJobError { details: app_metrics::ErrorDetails { error: error_details, }, - uuid: uuid::Uuid::now_v7(), } } @@ -225,7 +220,6 @@ impl WebhookJobError { details: app_metrics::ErrorDetails { error: error_details, }, - uuid: uuid::Uuid::now_v7(), } } } diff --git a/hook-janitor/src/cleanup.rs b/hook-janitor/src/cleanup.rs index e6e91e0..82b9130 100644 --- a/hook-janitor/src/cleanup.rs +++ b/hook-janitor/src/cleanup.rs @@ -5,8 +5,6 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum CleanerError { - #[error("pool creation failed with: {error}")] - PoolCreationError { error: sqlx::Error }, #[error("invalid cleaner mode")] InvalidCleanerMode, } diff --git a/hook-janitor/src/config.rs b/hook-janitor/src/config.rs index 89621a2..c1efb85 100644 --- a/hook-janitor/src/config.rs +++ b/hook-janitor/src/config.rs @@ -20,9 +20,6 @@ pub struct Config { #[envconfig(default = "30")] pub cleanup_interval_secs: u64, - #[envconfig(default = "10000")] - pub cleanup_batch_size: u32, - // The cleanup task needs to have special knowledge of the queue it's cleaning up. This is so it // can do things like flush the proper app_metrics or plugin_log_entries, and so it knows what // to expect in the job's payload JSONB column. diff --git a/hook-janitor/src/main.rs b/hook-janitor/src/main.rs index 46223aa..5de3ec4 100644 --- a/hook-janitor/src/main.rs +++ b/hook-janitor/src/main.rs @@ -57,10 +57,8 @@ async fn main() { &config.queue_name, &config.table_name, &config.database_url, - config.cleanup_batch_size, kafka_producer, config.kafka.app_metrics_topic.to_owned(), - config.kafka.plugin_log_entries_topic.to_owned(), ) .expect("unable to create webhook cleaner"), ) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index e8895f1..6b10ce0 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -1,20 +1,87 @@ -use async_trait::async_trait; +use std::time::Duration; -use rdkafka::producer::FutureProducer; -use sqlx::postgres::{PgPool, PgPoolOptions}; +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use futures::future::join_all; +use hook_common::webhook::WebhookJobError; +use rdkafka::error::KafkaError; +use rdkafka::producer::{FutureProducer, FutureRecord}; +use serde_json::error::Error as SerdeError; +use sqlx::postgres::{PgPool, PgPoolOptions, Postgres}; +use sqlx::types::{chrono, Uuid}; +use sqlx::Transaction; +use thiserror::Error; +use tracing::{debug, error}; -use crate::cleanup::{Cleaner, CleanerError}; +use crate::cleanup::Cleaner; use crate::kafka_producer::KafkaContext; -#[allow(dead_code)] +use hook_common::kafka_messages::app_metrics::{AppMetric, AppMetricCategory}; + +#[derive(Error, Debug)] +pub enum WebhookCleanerError { + #[error("failed to create postgres pool: {error}")] + PoolCreationError { error: sqlx::Error }, + #[error("failed to acquire conn and start txn: {error}")] + StartTxnError { error: sqlx::Error }, + #[error("failed to get completed rows: {error}")] + GetCompletedRowsError { error: sqlx::Error }, + #[error("failed to get failed rows: {error}")] + GetFailedRowsError { error: sqlx::Error }, + #[error("failed to serialize rows: {error}")] + SerializeRowsError { error: SerdeError }, + #[error("failed to produce to kafka: {error}")] + KafkaProduceError { error: KafkaError }, + #[error("failed to produce to kafka (timeout)")] + KafkaProduceCanceled, + #[error("failed to delete rows: {error}")] + DeleteRowsError { error: sqlx::Error }, + #[error("failed to commit txn: {error}")] + CommitTxnError { error: sqlx::Error }, +} + +type Result = std::result::Result; + pub struct WebhookCleaner { queue_name: String, table_name: String, pg_pool: PgPool, - batch_size: u32, kafka_producer: FutureProducer, app_metrics_topic: String, - plugin_log_entries_topic: String, +} + +#[derive(sqlx::FromRow, Debug)] +struct CompletedRow { + // App Metrics truncates/aggregates rows on the hour, so we take advantage of that to GROUP BY + // and aggregate to select fewer rows. + hour: DateTime, + // A note about the `try_from`s: Postgres returns all of those types as `bigint` (i64), but + // we know their true sizes, and so we can convert them to the correct types here. If this + // ever fails then something has gone wrong. + #[sqlx(try_from = "i64")] + team_id: u32, + #[sqlx(try_from = "i64")] + plugin_config_id: u32, + #[sqlx(try_from = "i64")] + successes: u32, +} + +#[derive(sqlx::FromRow, Debug)] +struct FailedRow { + // App Metrics truncates/aggregates rows on the hour, so we take advantage of that to GROUP BY + // and aggregate to select fewer rows. + hour: DateTime, + // A note about the `try_from`s: Postgres returns all of those types as `bigint` (i64), but + // we know their true sizes, and so we can convert them to the correct types here. If this + // ever fails then something has gone wrong. + #[sqlx(try_from = "i64")] + team_id: u32, + #[sqlx(try_from = "i64")] + plugin_config_id: u32, + #[sqlx(json)] + last_error: WebhookJobError, + #[sqlx(try_from = "i64")] + failures: u32, } impl WebhookCleaner { @@ -22,34 +89,263 @@ impl WebhookCleaner { queue_name: &str, table_name: &str, database_url: &str, - batch_size: u32, kafka_producer: FutureProducer, app_metrics_topic: String, - plugin_log_entries_topic: String, - ) -> Result { + ) -> Result { let queue_name = queue_name.to_owned(); let table_name = table_name.to_owned(); let pg_pool = PgPoolOptions::new() + .acquire_timeout(Duration::from_secs(10)) .connect_lazy(database_url) - .map_err(|error| CleanerError::PoolCreationError { error })?; + .map_err(|error| WebhookCleanerError::PoolCreationError { error })?; Ok(Self { queue_name, table_name, pg_pool, - batch_size, kafka_producer, app_metrics_topic, - plugin_log_entries_topic, }) } + + async fn start_serializable_txn(&self) -> Result> { + let mut tx = self + .pg_pool + .begin() + .await + .map_err(|e| WebhookCleanerError::StartTxnError { error: e })?; + + // We use serializable isolation so that we observe a snapshot of the DB at the time we + // start the cleanup process. This prevents us from accidentally deleting rows that are + // added (or become 'completed' or 'failed') after we start the cleanup process. + sqlx::query("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE") + .execute(&mut *tx) + .await + .map_err(|e| WebhookCleanerError::StartTxnError { error: e })?; + + Ok(tx) + } + + async fn get_completed_rows( + &self, + tx: &mut Transaction<'_, Postgres>, + ) -> Result> { + let base_query = format!( + r#" + SELECT DATE_TRUNC('hour', finished_at) AS hour, + metadata->>'team_id' AS team_id, + metadata->>'plugin_config_id' AS plugin_config_id, + count(*) as successes + FROM {0} + WHERE status = 'completed' + AND queue = $1 + GROUP BY hour, team_id, plugin_config_id + ORDER BY hour, team_id, plugin_config_id; + "#, + self.table_name + ); + + let rows = sqlx::query_as::<_, CompletedRow>(&base_query) + .bind(&self.queue_name) + .fetch_all(&mut **tx) + .await + .map_err(|e| WebhookCleanerError::GetCompletedRowsError { error: e })?; + + Ok(rows) + } + + async fn serialize_completed_rows( + &self, + completed_rows: Vec, + ) -> Result> { + let mut payloads = Vec::new(); + + for row in completed_rows { + let app_metric = AppMetric { + timestamp: row.hour, + team_id: row.team_id, + plugin_config_id: row.plugin_config_id, + job_id: None, + category: AppMetricCategory::Webhook, + successes: row.successes, + successes_on_retry: 0, + failures: 0, + error_uuid: None, + error_type: None, + error_details: None, + }; + + let payload = serde_json::to_string(&app_metric) + .map_err(|e| WebhookCleanerError::SerializeRowsError { error: e })?; + + payloads.push(payload) + } + + Ok(payloads) + } + + async fn get_failed_rows(&self, tx: &mut Transaction<'_, Postgres>) -> Result> { + let base_query = format!( + r#" + SELECT DATE_TRUNC('hour', finished_at) AS hour, + metadata->>'team_id' AS team_id, + metadata->>'plugin_config_id' AS plugin_config_id, + errors[-1] AS last_error, + count(*) as failures + FROM {0} + WHERE status = 'failed' + AND queue = $1 + GROUP BY hour, team_id, plugin_config_id, last_error + ORDER BY hour, team_id, plugin_config_id, last_error; + "#, + self.table_name + ); + + let rows = sqlx::query_as::<_, FailedRow>(&base_query) + .bind(&self.queue_name) + .fetch_all(&mut **tx) + .await + .map_err(|e| WebhookCleanerError::GetFailedRowsError { error: e })?; + + Ok(rows) + } + + async fn serialize_failed_rows(&self, failed_rows: Vec) -> Result> { + let mut payloads = Vec::new(); + + for row in failed_rows { + let app_metric = AppMetric { + timestamp: row.hour, + team_id: row.team_id, + plugin_config_id: row.plugin_config_id, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 0, + successes_on_retry: 0, + failures: row.failures, + error_uuid: Some(Uuid::now_v7()), + error_type: Some(row.last_error.r#type), + error_details: Some(row.last_error.details), + }; + + let payload = serde_json::to_string(&app_metric) + .map_err(|e| WebhookCleanerError::SerializeRowsError { error: e })?; + + payloads.push(payload) + } + + Ok(payloads) + } + + async fn send_messages_to_kafka(&self, payloads: Vec) -> Result<()> { + let mut delivery_futures = Vec::new(); + + for payload in payloads { + match self.kafka_producer.send_result(FutureRecord { + topic: self.app_metrics_topic.as_str(), + payload: Some(&payload), + partition: None, + key: None::<&str>, + timestamp: None, + headers: None, + }) { + Ok(future) => delivery_futures.push(future), + Err((error, _)) => return Err(WebhookCleanerError::KafkaProduceError { error }), + } + } + + for result in join_all(delivery_futures).await { + match result { + Ok(Ok(_)) => {} + Ok(Err((error, _))) => { + return Err(WebhookCleanerError::KafkaProduceError { error }) + } + Err(_) => { + // Cancelled due to timeout while retrying + return Err(WebhookCleanerError::KafkaProduceCanceled); + } + } + } + + Ok(()) + } + + async fn delete_observed_rows(&self, tx: &mut Transaction<'_, Postgres>) -> Result { + // This DELETE is only safe because we are in serializable isolation mode, see the note + // in `start_serializable_txn`. + let base_query = format!( + r#" + DELETE FROM {0} + WHERE status IN ('failed', 'completed') + AND queue = $1; + "#, + self.table_name + ); + + let result = sqlx::query(&base_query) + .bind(&self.queue_name) + .execute(&mut **tx) + .await + .map_err(|e| WebhookCleanerError::DeleteRowsError { error: e })?; + + Ok(result.rows_affected()) + } + + async fn commit_txn(&self, tx: Transaction<'_, Postgres>) -> Result<()> { + tx.commit() + .await + .map_err(|e| WebhookCleanerError::CommitTxnError { error: e })?; + + Ok(()) + } + + async fn cleanup_impl(&self) -> Result<()> { + debug!("WebhookCleaner starting cleanup"); + + // Note that we select all completed and failed rows without any pagination at the moment. + // We aggregrate as much as possible with GROUP BY, truncating the timestamp down to the + // hour just like App Metrics does. A completed row is 24 bytes (and aggregates an entire + // hour per `plugin_config_id`), and a failed row is 104 + the message length (and + // aggregates an entire hour per `plugin_config_id` per `error`), so we can fit a lot of + // rows in memory. It seems unlikely we'll need to paginate, but that can be added in the + // future if necessary. + + let mut tx = self.start_serializable_txn().await?; + let completed_rows = self.get_completed_rows(&mut tx).await?; + let mut payloads = self.serialize_completed_rows(completed_rows).await?; + let failed_rows = self.get_failed_rows(&mut tx).await?; + let mut failed_payloads = self.serialize_failed_rows(failed_rows).await?; + payloads.append(&mut failed_payloads); + let mut rows_deleted: u64 = 0; + if !payloads.is_empty() { + self.send_messages_to_kafka(payloads).await?; + rows_deleted = self.delete_observed_rows(&mut tx).await?; + self.commit_txn(tx).await?; + } + + debug!( + "WebhookCleaner finished cleanup, deleted {} rows", + rows_deleted + ); + + Ok(()) + } } #[async_trait] impl Cleaner for WebhookCleaner { async fn cleanup(&self) { - // TODO: collect stats on completed/failed rows - // TODO: push metrics about those rows into `app_metrics` - // TODO: delete those completed/failed rows + match self.cleanup_impl().await { + Ok(_) => {} + Err(error) => { + error!(error = ?error, "WebhookCleaner::cleanup failed"); + } + } } } + +#[cfg(test)] +mod tests { + #[tokio::test] + async fn test() {} +} From 72aa509cccef0a6df87e5e69a63ce4e9db020d95 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 19 Dec 2023 09:59:14 -0700 Subject: [PATCH 059/130] Drop unnecessary asyncs --- hook-janitor/src/webhooks.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 6b10ce0..7e6f540 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -154,10 +154,7 @@ impl WebhookCleaner { Ok(rows) } - async fn serialize_completed_rows( - &self, - completed_rows: Vec, - ) -> Result> { + fn serialize_completed_rows(&self, completed_rows: Vec) -> Result> { let mut payloads = Vec::new(); for row in completed_rows { @@ -210,7 +207,7 @@ impl WebhookCleaner { Ok(rows) } - async fn serialize_failed_rows(&self, failed_rows: Vec) -> Result> { + fn serialize_failed_rows(&self, failed_rows: Vec) -> Result> { let mut payloads = Vec::new(); for row in failed_rows { @@ -312,9 +309,9 @@ impl WebhookCleaner { let mut tx = self.start_serializable_txn().await?; let completed_rows = self.get_completed_rows(&mut tx).await?; - let mut payloads = self.serialize_completed_rows(completed_rows).await?; + let mut payloads = self.serialize_completed_rows(completed_rows)?; let failed_rows = self.get_failed_rows(&mut tx).await?; - let mut failed_payloads = self.serialize_failed_rows(failed_rows).await?; + let mut failed_payloads = self.serialize_failed_rows(failed_rows)?; payloads.append(&mut failed_payloads); let mut rows_deleted: u64 = 0; if !payloads.is_empty() { From 4fe10877dc9a0345f7904bda99a32bd70bac773d Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 19 Dec 2023 12:39:25 -0700 Subject: [PATCH 060/130] Make WebhookJobMetadata fields required --- hook-common/src/webhook.rs | 6 +++--- hook-consumer/src/consumer.rs | 6 +++--- hook-producer/src/handlers/webhook.rs | 18 +++++++++--------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index d320ce0..9a21b83 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -132,9 +132,9 @@ pub struct WebhookJobParameters { /// These should be set if the Webhook is associated with a plugin `composeWebhook` invocation. #[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub struct WebhookJobMetadata { - pub team_id: Option, - pub plugin_id: Option, - pub plugin_config_id: Option, + pub team_id: u32, + pub plugin_id: u32, + pub plugin_config_id: u32, } /// An error originating during a Webhook Job invocation. diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 12d4b38..633381a 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -339,9 +339,9 @@ mod tests { url: "localhost".to_owned(), }; let webhook_job_metadata = WebhookJobMetadata { - team_id: None, - plugin_id: None, - plugin_config_id: None, + team_id: 1, + plugin_id: 2, + plugin_config_id: 3, }; // enqueue takes ownership of the job enqueued to avoid bugs that can cause duplicate jobs. // Normally, a separate application would be enqueueing jobs for us to consume, so no ownership diff --git a/hook-producer/src/handlers/webhook.rs b/hook-producer/src/handlers/webhook.rs index 3947320..18aebf3 100644 --- a/hook-producer/src/handlers/webhook.rs +++ b/hook-producer/src/handlers/webhook.rs @@ -146,9 +146,9 @@ mod tests { body: r#"{"a": "b"}"#.to_owned(), }, metadata: WebhookJobMetadata { - team_id: Some(1), - plugin_id: Some(2), - plugin_config_id: Some(3), + team_id: 1, + plugin_id: 2, + plugin_config_id: 3, }, max_attempts: 1, }) @@ -193,9 +193,9 @@ mod tests { body: r#"{"a": "b"}"#.to_owned(), }, metadata: WebhookJobMetadata { - team_id: Some(1), - plugin_id: Some(2), - plugin_config_id: Some(3), + team_id: 1, + plugin_id: 2, + plugin_config_id: 3, }, max_attempts: 1, }) @@ -296,9 +296,9 @@ mod tests { body: long_string.to_string(), }, metadata: WebhookJobMetadata { - team_id: Some(1), - plugin_id: Some(2), - plugin_config_id: Some(3), + team_id: 1, + plugin_id: 2, + plugin_config_id: 3, }, max_attempts: 1, }) From 9ad59d24504e58d13ba35503d1cd2ad1d46d26e3 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 19 Dec 2023 13:28:50 -0700 Subject: [PATCH 061/130] Switch to sqlx::test for per-test DBs and fixtures --- .env | 1 + Cargo.toml | 7 ++- hook-common/src/pgqueue.rs | 65 +++++++++++++-------- hook-consumer/src/consumer.rs | 13 +++-- hook-producer/src/handlers/app.rs | 17 +++--- hook-producer/src/handlers/webhook.rs | 83 +++++++++++---------------- 6 files changed, 92 insertions(+), 94 deletions(-) create mode 100644 .env diff --git a/.env b/.env new file mode 100644 index 0000000..43eda2a --- /dev/null +++ b/.env @@ -0,0 +1 @@ +DATABASE_URL=postgres://posthog:posthog@localhost:15432/test_database diff --git a/Cargo.toml b/Cargo.toml index 0a0a2f5..2481c1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,12 +21,13 @@ serde = { version = "1.0" } serde_derive = { version = "1.0" } serde_json = { version = "1.0" } sqlx = { version = "0.7", features = [ + "chrono", + "json", + "migrate", + "postgres", "runtime-tokio", "tls-native-tls", - "postgres", "uuid", - "json", - "chrono", ] } thiserror = { version = "1.0" } tokio = { version = "1.34.0", features = ["full"] } diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 5288ade..8b36b6d 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -590,6 +590,23 @@ impl PgQueue { }) } + pub async fn new_from_pool( + queue_name: &str, + table_name: &str, + pool: PgPool, + retry_policy: RetryPolicy, + ) -> PgQueueResult { + let name = queue_name.to_owned(); + let table = table_name.to_owned(); + + Ok(Self { + name, + pool, + retry_policy, + table, + }) + } + /// Dequeue a Job from this PgQueue to work on it. pub async fn dequeue< J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, @@ -827,18 +844,18 @@ mod tests { "https://myhost/endpoint".to_owned() } - #[tokio::test] - async fn test_can_dequeue_job() { + #[sqlx::test(migrations = "../migrations")] + async fn test_can_dequeue_job(db: PgPool) { let job_target = job_target(); let job_parameters = JobParameters::default(); let job_metadata = JobMetadata::default(); let worker_id = worker_id(); let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); - let queue = PgQueue::new( + let queue = PgQueue::new_from_pool( "test_can_dequeue_job", "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", + db, RetryPolicy::default(), ) .await @@ -861,13 +878,13 @@ mod tests { assert_eq!(pg_job.job.target, job_target); } - #[tokio::test] - async fn test_dequeue_returns_none_on_no_jobs() { + #[sqlx::test(migrations = "../migrations")] + async fn test_dequeue_returns_none_on_no_jobs(db: PgPool) { let worker_id = worker_id(); - let queue = PgQueue::new( + let queue = PgQueue::new_from_pool( "test_dequeue_returns_none_on_no_jobs", "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", + db, RetryPolicy::default(), ) .await @@ -881,18 +898,18 @@ mod tests { assert!(pg_job.is_none()); } - #[tokio::test] - async fn test_can_dequeue_tx_job() { + #[sqlx::test(migrations = "../migrations")] + async fn test_can_dequeue_tx_job(db: PgPool) { let job_target = job_target(); let job_metadata = JobMetadata::default(); let job_parameters = JobParameters::default(); let worker_id = worker_id(); let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); - let queue = PgQueue::new( + let queue = PgQueue::new_from_pool( "test_can_dequeue_tx_job", "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", + db, RetryPolicy::default(), ) .await @@ -916,13 +933,13 @@ mod tests { assert_eq!(tx_job.job.target, job_target); } - #[tokio::test] - async fn test_dequeue_tx_returns_none_on_no_jobs() { + #[sqlx::test(migrations = "../migrations")] + async fn test_dequeue_tx_returns_none_on_no_jobs(db: PgPool) { let worker_id = worker_id(); - let queue = PgQueue::new( + let queue = PgQueue::new_from_pool( "test_dequeue_tx_returns_none_on_no_jobs", "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", + db, RetryPolicy::default(), ) .await @@ -936,8 +953,8 @@ mod tests { assert!(tx_job.is_none()); } - #[tokio::test] - async fn test_can_retry_job_with_remaining_attempts() { + #[sqlx::test(migrations = "../migrations")] + async fn test_can_retry_job_with_remaining_attempts(db: PgPool) { let job_target = job_target(); let job_parameters = JobParameters::default(); let job_metadata = JobMetadata::default(); @@ -949,10 +966,10 @@ mod tests { maximum_interval: None, }; - let queue = PgQueue::new( + let queue = PgQueue::new_from_pool( "test_can_retry_job_with_remaining_attempts", "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", + db, retry_policy, ) .await @@ -986,9 +1003,9 @@ mod tests { assert_eq!(retried_job.job.target, job_target); } - #[tokio::test] + #[sqlx::test(migrations = "../migrations")] #[should_panic(expected = "failed to retry job")] - async fn test_cannot_retry_job_without_remaining_attempts() { + async fn test_cannot_retry_job_without_remaining_attempts(db: PgPool) { let job_target = job_target(); let job_parameters = JobParameters::default(); let job_metadata = JobMetadata::default(); @@ -1000,10 +1017,10 @@ mod tests { maximum_interval: None, }; - let queue = PgQueue::new( + let queue = PgQueue::new_from_pool( "test_cannot_retry_job_without_remaining_attempts", "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", + db, retry_policy, ) .await diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 633381a..d17578b 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -272,6 +272,8 @@ mod tests { // See: https://github.com/rust-lang/rust/issues/46379. #[allow(unused_imports)] use hook_common::pgqueue::{JobStatus, NewJob, RetryPolicy}; + #[allow(unused_imports)] + use sqlx::PgPool; /// Use process id as a worker id for tests. #[allow(dead_code)] @@ -322,13 +324,12 @@ mod tests { assert_eq!(duration, None); } - #[tokio::test] - async fn test_wait_for_job() { + #[sqlx::test(migrations = "../migrations")] + async fn test_wait_for_job(db: PgPool) { let worker_id = worker_id(); let queue_name = "test_wait_for_job".to_string(); let table_name = "job_queue".to_string(); - let db_url = "postgres://posthog:posthog@localhost:15432/test_database".to_string(); - let queue = PgQueue::new(&queue_name, &table_name, &db_url, RetryPolicy::default()) + let queue = PgQueue::new_from_pool(&queue_name, &table_name, db, RetryPolicy::default()) .await .expect("failed to connect to PG"); @@ -385,8 +386,8 @@ mod tests { .expect("job not successfully completed"); } - #[tokio::test] - async fn test_send_webhook() { + #[sqlx::test(migrations = "../migrations")] + async fn test_send_webhook(_: PgPool) { let method = HttpMethod::POST; let url = "http://localhost:18081/echo"; let headers = collections::HashMap::new(); diff --git a/hook-producer/src/handlers/app.rs b/hook-producer/src/handlers/app.rs index 1666676..c3309de 100644 --- a/hook-producer/src/handlers/app.rs +++ b/hook-producer/src/handlers/app.rs @@ -33,18 +33,15 @@ mod tests { }; use hook_common::pgqueue::{PgQueue, RetryPolicy}; use http_body_util::BodyExt; // for `collect` + use sqlx::PgPool; use tower::ServiceExt; // for `call`, `oneshot`, and `ready` - #[tokio::test] - async fn index() { - let pg_queue = PgQueue::new( - "test_index", - "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), - ) - .await - .expect("failed to construct pg_queue"); + #[sqlx::test(migrations = "../migrations")] + async fn index(db: PgPool) { + let pg_queue = + PgQueue::new_from_pool("test_index", "job_queue", db, RetryPolicy::default()) + .await + .expect("failed to construct pg_queue"); let app = app(pg_queue, None); diff --git a/hook-producer/src/handlers/webhook.rs b/hook-producer/src/handlers/webhook.rs index 18aebf3..e2864e8 100644 --- a/hook-producer/src/handlers/webhook.rs +++ b/hook-producer/src/handlers/webhook.rs @@ -110,22 +110,19 @@ mod tests { }; use hook_common::pgqueue::{PgQueue, RetryPolicy}; use hook_common::webhook::{HttpMethod, WebhookJobParameters}; - use http_body_util::BodyExt; // for `collect` + use http_body_util::BodyExt; + use sqlx::PgPool; // for `collect` use std::collections; use tower::ServiceExt; // for `call`, `oneshot`, and `ready` use crate::handlers::app; - #[tokio::test] - async fn webhook_success() { - let pg_queue = PgQueue::new( - "test_index", - "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), - ) - .await - .expect("failed to construct pg_queue"); + #[sqlx::test(migrations = "../migrations")] + async fn webhook_success(db: PgPool) { + let pg_queue = + PgQueue::new_from_pool("test_index", "job_queue", db, RetryPolicy::default()) + .await + .expect("failed to construct pg_queue"); let app = app(pg_queue, None); @@ -165,16 +162,12 @@ mod tests { assert_eq!(&body[..], b"{}"); } - #[tokio::test] - async fn webhook_bad_url() { - let pg_queue = PgQueue::new( - "test_index", - "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), - ) - .await - .expect("failed to construct pg_queue"); + #[sqlx::test(migrations = "../migrations")] + async fn webhook_bad_url(db: PgPool) { + let pg_queue = + PgQueue::new_from_pool("test_index", "job_queue", db, RetryPolicy::default()) + .await + .expect("failed to construct pg_queue"); let app = app(pg_queue, None); @@ -209,16 +202,12 @@ mod tests { assert_eq!(response.status(), StatusCode::BAD_REQUEST); } - #[tokio::test] - async fn webhook_payload_missing_fields() { - let pg_queue = PgQueue::new( - "test_index", - "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), - ) - .await - .expect("failed to construct pg_queue"); + #[sqlx::test(migrations = "../migrations")] + async fn webhook_payload_missing_fields(db: PgPool) { + let pg_queue = + PgQueue::new_from_pool("test_index", "job_queue", db, RetryPolicy::default()) + .await + .expect("failed to construct pg_queue"); let app = app(pg_queue, None); @@ -237,16 +226,12 @@ mod tests { assert_eq!(response.status(), StatusCode::UNPROCESSABLE_ENTITY); } - #[tokio::test] - async fn webhook_payload_not_json() { - let pg_queue = PgQueue::new( - "test_index", - "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), - ) - .await - .expect("failed to construct pg_queue"); + #[sqlx::test(migrations = "../migrations")] + async fn webhook_payload_not_json(db: PgPool) { + let pg_queue = + PgQueue::new_from_pool("test_index", "job_queue", db, RetryPolicy::default()) + .await + .expect("failed to construct pg_queue"); let app = app(pg_queue, None); @@ -265,16 +250,12 @@ mod tests { assert_eq!(response.status(), StatusCode::BAD_REQUEST); } - #[tokio::test] - async fn webhook_payload_body_too_large() { - let pg_queue = PgQueue::new( - "test_index", - "job_queue", - "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), - ) - .await - .expect("failed to construct pg_queue"); + #[sqlx::test(migrations = "../migrations")] + async fn webhook_payload_body_too_large(db: PgPool) { + let pg_queue = + PgQueue::new_from_pool("test_index", "job_queue", db, RetryPolicy::default()) + .await + .expect("failed to construct pg_queue"); let app = app(pg_queue, None); From 9359e7c846152aec9a43cf0c2734fabb7a8a9f0f Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 19 Dec 2023 15:55:21 -0700 Subject: [PATCH 062/130] Add tests, fix some bugs found by said tests --- Cargo.lock | 1 + Cargo.toml | 2 +- hook-janitor/src/fixtures/webhook_cleanup.sql | 81 ++++++++++ hook-janitor/src/webhooks.rs | 140 +++++++++++++++--- 4 files changed, 206 insertions(+), 18 deletions(-) create mode 100644 hook-janitor/src/fixtures/webhook_cleanup.sql diff --git a/Cargo.lock b/Cargo.lock index ba9f4bb..fbfcc50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2009,6 +2009,7 @@ dependencies = [ "serde_json", "slab", "tokio", + "tracing", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 2481c1d..a29806a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ http = { version = "0.2" } http-body-util = "0.1.0" metrics = "0.21.1" metrics-exporter-prometheus = "0.12.1" -rdkafka = { version = "0.35.0", features = ["cmake-build", "ssl"] } +rdkafka = { version = "0.35.0", features = ["cmake-build", "ssl", "tracing"] } reqwest = { version = "0.11" } regex = "1.10.2" serde = { version = "1.0" } diff --git a/hook-janitor/src/fixtures/webhook_cleanup.sql b/hook-janitor/src/fixtures/webhook_cleanup.sql new file mode 100644 index 0000000..6f29d30 --- /dev/null +++ b/hook-janitor/src/fixtures/webhook_cleanup.sql @@ -0,0 +1,81 @@ +INSERT INTO + job_queue ( + errors, + metadata, + finished_at, + parameters, + queue, + status, + target + ) +VALUES + -- team:1, plugin_config:2, completed in hour 20 + ( + NULL, + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'webhooks', + 'completed', + 'https://myhost/endpoint' + ), + -- another team:1, plugin_config:2, completed in hour 20 + ( + NULL, + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'webhooks', + 'completed', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:2, completed in hour 21 (different hour) + ( + NULL, + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 21:01:18.799371+00', + '{}', + 'webhooks', + 'completed', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:3, completed in hour 20 (different plugin_config) + ( + NULL, + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3}', + '2023-12-19 20:01:18.80335+00', + '{}', + 'webhooks', + 'completed', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:2, completed but in a different queue + ( + NULL, + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'not-webhooks', + 'completed', + 'https://myhost/endpoint' + ), + -- team:2, plugin_config:4, completed in hour 20 (different team) + ( + NULL, + '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'webhooks', + 'completed', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:2, failed in hour 20 + ( + ARRAY ['{"type":"Timeout","details":{"error":{"name":"timeout"}}}'::jsonb], + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'webhooks', + 'failed', + 'https://myhost/endpoint' + ); \ No newline at end of file diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 7e6f540..e30f71a 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -108,6 +108,26 @@ impl WebhookCleaner { }) } + #[allow(dead_code)] // This is used in tests. + pub fn new_from_pool( + queue_name: &str, + table_name: &str, + pg_pool: PgPool, + kafka_producer: FutureProducer, + app_metrics_topic: String, + ) -> Result { + let queue_name = queue_name.to_owned(); + let table_name = table_name.to_owned(); + + Ok(Self { + queue_name, + table_name, + pg_pool, + kafka_producer, + app_metrics_topic, + }) + } + async fn start_serializable_txn(&self) -> Result> { let mut tx = self .pg_pool @@ -118,6 +138,9 @@ impl WebhookCleaner { // We use serializable isolation so that we observe a snapshot of the DB at the time we // start the cleanup process. This prevents us from accidentally deleting rows that are // added (or become 'completed' or 'failed') after we start the cleanup process. + // + // If we find that this has a significant performance impact, we could instead move + // rows to a temporary table for processing and then deletion. sqlx::query("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE") .execute(&mut *tx) .await @@ -133,8 +156,8 @@ impl WebhookCleaner { let base_query = format!( r#" SELECT DATE_TRUNC('hour', finished_at) AS hour, - metadata->>'team_id' AS team_id, - metadata->>'plugin_config_id' AS plugin_config_id, + (metadata->>'team_id')::bigint AS team_id, + (metadata->>'plugin_config_id')::bigint AS plugin_config_id, count(*) as successes FROM {0} WHERE status = 'completed' @@ -185,9 +208,13 @@ impl WebhookCleaner { let base_query = format!( r#" SELECT DATE_TRUNC('hour', finished_at) AS hour, - metadata->>'team_id' AS team_id, - metadata->>'plugin_config_id' AS plugin_config_id, - errors[-1] AS last_error, + (metadata->>'team_id')::bigint AS team_id, + (metadata->>'plugin_config_id')::bigint AS plugin_config_id, + CASE + WHEN array_length(errors, 1) > 1 + THEN errors[array_length(errors, 1)] + ELSE errors[1] + END AS last_error, count(*) as failures FROM {0} WHERE status = 'failed' @@ -302,27 +329,34 @@ impl WebhookCleaner { // Note that we select all completed and failed rows without any pagination at the moment. // We aggregrate as much as possible with GROUP BY, truncating the timestamp down to the // hour just like App Metrics does. A completed row is 24 bytes (and aggregates an entire - // hour per `plugin_config_id`), and a failed row is 104 + the message length (and - // aggregates an entire hour per `plugin_config_id` per `error`), so we can fit a lot of - // rows in memory. It seems unlikely we'll need to paginate, but that can be added in the + // hour per `plugin_config_id`), and a failed row is 104 bytes + the error message length + // (and aggregates an entire hour per `plugin_config_id` per `error`), so we can fit a lot + // of rows in memory. It seems unlikely we'll need to paginate, but that can be added in the // future if necessary. let mut tx = self.start_serializable_txn().await?; + let completed_rows = self.get_completed_rows(&mut tx).await?; - let mut payloads = self.serialize_completed_rows(completed_rows)?; + let completed_agg_row_count = completed_rows.len(); + let completed_kafka_payloads = self.serialize_completed_rows(completed_rows)?; + let failed_rows = self.get_failed_rows(&mut tx).await?; - let mut failed_payloads = self.serialize_failed_rows(failed_rows)?; - payloads.append(&mut failed_payloads); + let failed_agg_row_count = failed_rows.len(); + let mut failed_kafka_payloads = self.serialize_failed_rows(failed_rows)?; + + let mut all_kafka_payloads = completed_kafka_payloads; + all_kafka_payloads.append(&mut failed_kafka_payloads); + let mut rows_deleted: u64 = 0; - if !payloads.is_empty() { - self.send_messages_to_kafka(payloads).await?; + if !all_kafka_payloads.is_empty() { + self.send_messages_to_kafka(all_kafka_payloads).await?; rows_deleted = self.delete_observed_rows(&mut tx).await?; self.commit_txn(tx).await?; } debug!( - "WebhookCleaner finished cleanup, deleted {} rows", - rows_deleted + "WebhookCleaner finished cleanup, deleted {} rows ({} completed+aggregated, {} failed+aggregated)", + rows_deleted, completed_agg_row_count, failed_agg_row_count ); Ok(()) @@ -343,6 +377,78 @@ impl Cleaner for WebhookCleaner { #[cfg(test)] mod tests { - #[tokio::test] - async fn test() {} + use super::*; + use crate::config; + use crate::kafka_producer::{create_kafka_producer, KafkaContext}; + use rdkafka::mocking::MockCluster; + use rdkafka::producer::{DefaultProducerContext, FutureProducer}; + use sqlx::PgPool; + + const APP_METRICS_TOPIC: &str = "app_metrics"; + + async fn create_mock_kafka() -> ( + MockCluster<'static, DefaultProducerContext>, + FutureProducer, + ) { + let cluster = MockCluster::new(1).expect("failed to create mock brokers"); + + let config = config::KafkaConfig { + kafka_producer_linger_ms: 0, + kafka_producer_queue_mib: 50, + kafka_message_timeout_ms: 5000, + kafka_compression_codec: "none".to_string(), + kafka_hosts: cluster.bootstrap_servers(), + app_metrics_topic: APP_METRICS_TOPIC.to_string(), + plugin_log_entries_topic: "plugin_log_entries".to_string(), + kafka_tls: false, + }; + + ( + cluster, + create_kafka_producer(&config) + .await + .expect("failed to create mocked kafka producer"), + ) + } + + #[sqlx::test(migrations = "../migrations", fixtures("webhook_cleanup"))] + async fn test_cleanup_impl(db: PgPool) { + let (mock_cluster, mock_producer) = create_mock_kafka().await; + mock_cluster + .create_topic(APP_METRICS_TOPIC, 1, 1) + .expect("failed to create mock app_metrics topic"); + + let table_name = "job_queue"; + let queue_name = "webhooks"; + + let webhook_cleaner = WebhookCleaner::new_from_pool( + &queue_name, + &table_name, + db, + mock_producer, + APP_METRICS_TOPIC.to_owned(), + ) + .expect("unable to create webhook cleaner"); + + let _ = webhook_cleaner + .cleanup_impl() + .await + .expect("webbook cleanup_impl failed"); + + // TODO: I spent a lot of time trying to get the mock Kafka consumer to work, but I think + // I've identified an issue with the rust-rdkafka library: + // https://github.com/fede1024/rust-rdkafka/issues/629#issuecomment-1863555417 + // + // I wanted to test the messages put on the AppMetrics topic, but I think we need to figure + // out that issue about in order to do so. (Capture uses the MockProducer but not a + // Consumer, fwiw.) + // + // For now, I'll probably have to make `cleanup_impl` return the row information so at + // least we can inspect that for correctness. + } + + // #[sqlx::test] + // async fn test_serializable_isolation() { + // TODO: I'm going to add a test that verifies new rows aren't visible during the txn. + // } } From cc206a54216cf30d72900cedb5904ec1d2685cee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 16:50:39 +0100 Subject: [PATCH 063/130] feat: Give non-transactional consumer a chance --- hook-consumer/src/config.rs | 3 +++ hook-consumer/src/consumer.rs | 20 +++++++++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/hook-consumer/src/config.rs b/hook-consumer/src/config.rs index 8e4bde9..36c120a 100644 --- a/hook-consumer/src/config.rs +++ b/hook-consumer/src/config.rs @@ -26,6 +26,9 @@ pub struct Config { #[envconfig(nested = true)] pub retry_policy: RetryPolicyConfig, + #[envconfig(default = "true")] + pub transactional: bool, + #[envconfig(default = "job_queue")] pub table_name: String, } diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 12d4b38..04600f1 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -23,6 +23,8 @@ pub struct WebhookConsumer<'p> { client: reqwest::Client, /// Maximum number of concurrent jobs being processed. max_concurrent_jobs: usize, + /// Indicates whether we are holding an open transaction while processing or not. + transactional: bool, } impl<'p> WebhookConsumer<'p> { @@ -54,6 +56,19 @@ impl<'p> WebhookConsumer<'p> { } } + /// Wait until a job becomes available in our queue. + async fn wait_for_job_tx<'a>( + &self, + ) -> Result, WebhookConsumerError> { + loop { + if let Some(job) = self.queue.dequeue_tx(&self.name).await? { + return Ok(job); + } else { + task::sleep(self.poll_interval).await; + } + } + } + /// Wait until a job becomes available in our queue. async fn wait_for_job<'a>( &self, @@ -72,7 +87,10 @@ impl<'p> WebhookConsumer<'p> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); loop { - let webhook_job = self.wait_for_job().await?; + let webhook_job = match self.transactional { + true => self.wait_for_job_tx().await, + false => self.wait_for_job().await, + }?; // reqwest::Client internally wraps with Arc, so this allocation is cheap. let client = self.client.clone(); From bd4dc4bdc5b01d52047cc439d4e1723d9c05e0f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 14 Dec 2023 12:30:31 +0100 Subject: [PATCH 064/130] refactor: Two clients one for each mode --- hook-consumer/src/consumer.rs | 137 +++++++++++++++++++++++++++++++--- 1 file changed, 126 insertions(+), 11 deletions(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 04600f1..83ea319 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -23,8 +23,6 @@ pub struct WebhookConsumer<'p> { client: reqwest::Client, /// Maximum number of concurrent jobs being processed. max_concurrent_jobs: usize, - /// Indicates whether we are holding an open transaction while processing or not. - transactional: bool, } impl<'p> WebhookConsumer<'p> { @@ -57,11 +55,9 @@ impl<'p> WebhookConsumer<'p> { } /// Wait until a job becomes available in our queue. - async fn wait_for_job_tx<'a>( - &self, - ) -> Result, WebhookConsumerError> { + async fn wait_for_job<'a>(&self) -> Result, WebhookConsumerError> { loop { - if let Some(job) = self.queue.dequeue_tx(&self.name).await? { + if let Some(job) = self.queue.dequeue(&self.name).await? { return Ok(job); } else { task::sleep(self.poll_interval).await; @@ -69,6 +65,68 @@ impl<'p> WebhookConsumer<'p> { } } + /// Run this consumer to continuously process any jobs that become available. + pub async fn run(&self) -> Result<(), WebhookConsumerError> { + let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); + + loop { + let webhook_job = self.wait_for_job().await?; + + // reqwest::Client internally wraps with Arc, so this allocation is cheap. + let client = self.client.clone(); + let permit = semaphore.clone().acquire_owned().await.unwrap(); + + tokio::spawn(async move { + let result = process_webhook_job(client, webhook_job).await; + drop(permit); + result + }); + } + } +} + +/// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. +pub struct WebhookTransactionConsumer<'p> { + /// An identifier for this consumer. Used to mark jobs we have consumed. + name: String, + /// The queue we will be dequeuing jobs from. + queue: &'p PgQueue, + /// The interval for polling the queue. + poll_interval: time::Duration, + /// The client used for HTTP requests. + client: reqwest::Client, + /// Maximum number of concurrent jobs being processed. + max_concurrent_jobs: usize, +} + +impl<'p> WebhookTransactionConsumer<'p> { + pub fn new( + name: &str, + queue: &'p PgQueue, + poll_interval: time::Duration, + request_timeout: time::Duration, + max_concurrent_jobs: usize, + ) -> Result { + let mut headers = header::HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static("application/json"), + ); + + let client = reqwest::Client::builder() + .default_headers(headers) + .timeout(request_timeout) + .build()?; + + Ok(Self { + name: name.to_owned(), + queue, + poll_interval, + client, + max_concurrent_jobs, + }) + } + /// Wait until a job becomes available in our queue. async fn wait_for_job<'a>( &self, @@ -87,17 +145,14 @@ impl<'p> WebhookConsumer<'p> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); loop { - let webhook_job = match self.transactional { - true => self.wait_for_job_tx().await, - false => self.wait_for_job().await, - }?; + let webhook_job = self.wait_for_job().await?; // reqwest::Client internally wraps with Arc, so this allocation is cheap. let client = self.client.clone(); let permit = semaphore.clone().acquire_owned().await.unwrap(); tokio::spawn(async move { - let result = process_webhook_job(client, webhook_job).await; + let result = process_webhook_job_tx(client, webhook_job).await; drop(permit); result }); @@ -187,6 +242,66 @@ async fn process_webhook_job( } } +/// Process a webhook job by transitioning it to its appropriate state after its request is sent. +/// After we finish, the webhook job will be set as completed (if the request was successful), retryable (if the request +/// was unsuccessful but we can still attempt a retry), or failed (if the request was unsuccessful and no more retries +/// may be attempted). +/// +/// A webhook job is considered retryable after a failing request if: +/// 1. The job has attempts remaining (i.e. hasn't reached `max_attempts`), and... +/// 2. The status code indicates retrying at a later point could resolve the issue. This means: 429 and any 5XX. +/// +/// # Arguments +/// +/// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. +/// * `request_timeout`: A timeout for the HTTP request. +async fn process_webhook_job( + client: reqwest::Client, + webhook_job: PgJob, +) -> Result<(), WebhookConsumerError> { + match send_webhook( + client, + &webhook_job.job.parameters.method, + &webhook_job.job.parameters.url, + &webhook_job.job.parameters.headers, + webhook_job.job.parameters.body.clone(), + ) + .await + { + Ok(_) => { + webhook_job + .complete() + .await + .map_err(|error| WebhookConsumerError::PgJobError(error.to_string()))?; + Ok(()) + } + Err(WebhookConsumerError::RetryableWebhookError { + reason, + retry_after, + }) => match webhook_job.retry(reason.to_string(), retry_after).await { + Ok(_) => Ok(()), + Err(PgJobError::RetryInvalidError { + job: webhook_job, + error: fail_error, + }) => { + webhook_job + .fail(fail_error.to_string()) + .await + .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; + Ok(()) + } + Err(job_error) => Err(WebhookConsumerError::PgJobError(job_error.to_string())), + }, + Err(error) => { + webhook_job + .fail(error.to_string()) + .await + .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; + Ok(()) + } + } +} + /// Make an HTTP request to a webhook endpoint. /// /// # Arguments From 3747c3450bc1a87240ed16a8d6d4ac20a3f216f1 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 20 Dec 2023 07:04:59 -0700 Subject: [PATCH 065/130] Handle some feedback --- README.md | 3 +++ hook-janitor/src/webhooks.rs | 10 +++------- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index a3a674c..f579cd3 100644 --- a/README.md +++ b/README.md @@ -15,5 +15,8 @@ docker compose -f docker-compose.yml up -d --wait 2. Test: ```bash +# Note that tests require a DATABASE_URL environment variable to be set, e.g.: +# export DATABASE_URL=postgres://posthog:posthog@localhost:15432/test_database +# But there is an .env file in the project root that should be used automatically. cargo test ``` diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index e30f71a..a9b05e5 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -210,11 +210,7 @@ impl WebhookCleaner { SELECT DATE_TRUNC('hour', finished_at) AS hour, (metadata->>'team_id')::bigint AS team_id, (metadata->>'plugin_config_id')::bigint AS plugin_config_id, - CASE - WHEN array_length(errors, 1) > 1 - THEN errors[array_length(errors, 1)] - ELSE errors[1] - END AS last_error, + errors[array_upper(errors, 1)] AS last_error, count(*) as failures FROM {0} WHERE status = 'failed' @@ -342,10 +338,10 @@ impl WebhookCleaner { let failed_rows = self.get_failed_rows(&mut tx).await?; let failed_agg_row_count = failed_rows.len(); - let mut failed_kafka_payloads = self.serialize_failed_rows(failed_rows)?; + let failed_kafka_payloads = self.serialize_failed_rows(failed_rows)?; let mut all_kafka_payloads = completed_kafka_payloads; - all_kafka_payloads.append(&mut failed_kafka_payloads); + all_kafka_payloads.extend(failed_kafka_payloads.into_iter()); let mut rows_deleted: u64 = 0; if !all_kafka_payloads.is_empty() { From d28dfef201c40d2cfe4f708f5230bd83b418b123 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 20 Dec 2023 15:25:56 +0100 Subject: [PATCH 066/130] refactor: Support both modes in single client with WebhookJob trait --- Cargo.lock | 1 + hook-common/Cargo.toml | 1 + hook-common/src/pgqueue.rs | 179 +++++++++++++++++--------------- hook-consumer/src/consumer.rs | 187 +++++++++++----------------------- hook-consumer/src/main.rs | 6 +- 5 files changed, 164 insertions(+), 210 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ba9f4bb..130f765 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1008,6 +1008,7 @@ dependencies = [ name = "hook-common" version = "0.1.0" dependencies = [ + "async-trait", "axum", "chrono", "http 0.2.11", diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml index 6350ba4..9b20396 100644 --- a/hook-common/Cargo.toml +++ b/hook-common/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +async-trait = { workspace = true } axum = { workspace = true, features = ["http2"] } chrono = { workspace = true } http = { workspace = true } diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 5288ade..78fe2fc 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -6,6 +6,7 @@ use std::default::Default; use std::str::FromStr; use std::time; +use async_trait::async_trait; use chrono; use serde; use sqlx::postgres::{PgPool, PgPoolOptions}; @@ -149,6 +150,22 @@ impl Job { } } +#[async_trait] +pub trait PgQueueJob { + async fn complete(mut self) -> Result>>; + + async fn fail( + mut self, + error: E, + ) -> Result, PgJobError>>; + + async fn retry( + mut self, + error: E, + preferred_retry_interval: Option, + ) -> Result, PgJobError>>; +} + /// A Job that can be updated in PostgreSQL. #[derive(Debug)] pub struct PgJob { @@ -158,22 +175,10 @@ pub struct PgJob { pub retry_policy: RetryPolicy, } -impl PgJob { - pub async fn retry( - mut self, - error: E, - preferred_retry_interval: Option, - ) -> Result, PgJobError>> { - if self.job.is_gte_max_attempts() { - return Err(PgJobError::RetryInvalidError { - job: self, - error: "Maximum attempts reached".to_owned(), - }); - } - let retryable_job = self.job.retry(error); - let retry_interval = self - .retry_policy - .time_until_next_retry(&retryable_job, preferred_retry_interval); +#[async_trait] +impl PgQueueJob for PgJob { + async fn complete(mut self) -> Result>>> { + let completed_job = self.job.complete(); let base_query = format!( r#" @@ -181,9 +186,7 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'available'::job_status, - scheduled_at = NOW() + $3, - errors = array_append("{0}".errors, $4) + status = 'completed'::job_status WHERE "{0}".id = $2 AND queue = $1 @@ -194,10 +197,8 @@ RETURNING ); sqlx::query(&base_query) - .bind(&retryable_job.queue) - .bind(retryable_job.id) - .bind(retry_interval) - .bind(&retryable_job.error) + .bind(&completed_job.queue) + .bind(completed_job.id) .execute(&mut *self.connection) .await .map_err(|error| PgJobError::QueryError { @@ -205,11 +206,14 @@ RETURNING error, })?; - Ok(retryable_job) + Ok(completed_job) } - pub async fn complete(mut self) -> Result>> { - let completed_job = self.job.complete(); + async fn fail( + mut self, + error: E, + ) -> Result, PgJobError>>> { + let failed_job = self.job.fail(error); let base_query = format!( r#" @@ -217,19 +221,22 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'completed'::job_status + status = 'failed'::job_status + errors = array_append("{0}".errors, $3) WHERE "{0}".id = $2 AND queue = $1 RETURNING "{0}".* + "#, &self.table ); sqlx::query(&base_query) - .bind(&completed_job.queue) - .bind(completed_job.id) + .bind(&failed_job.queue) + .bind(failed_job.id) + .bind(&failed_job.error) .execute(&mut *self.connection) .await .map_err(|error| PgJobError::QueryError { @@ -237,14 +244,24 @@ RETURNING error, })?; - Ok(completed_job) + Ok(failed_job) } - pub async fn fail( + async fn retry( mut self, error: E, - ) -> Result, PgJobError>> { - let failed_job = self.job.fail(error); + preferred_retry_interval: Option, + ) -> Result, PgJobError>>> { + if self.job.is_gte_max_attempts() { + return Err(PgJobError::RetryInvalidError { + job: Box::new(self), + error: "Maximum attempts reached".to_owned(), + }); + } + let retryable_job = self.job.retry(error); + let retry_interval = self + .retry_policy + .time_until_next_retry(&retryable_job, preferred_retry_interval); let base_query = format!( r#" @@ -252,22 +269,23 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'failed'::job_status - errors = array_append("{0}".errors, $3) + status = 'available'::job_status, + scheduled_at = NOW() + $3, + errors = array_append("{0}".errors, $4) WHERE "{0}".id = $2 AND queue = $1 RETURNING "{0}".* - "#, &self.table ); sqlx::query(&base_query) - .bind(&failed_job.queue) - .bind(failed_job.id) - .bind(&failed_job.error) + .bind(&retryable_job.queue) + .bind(retryable_job.id) + .bind(retry_interval) + .bind(&retryable_job.error) .execute(&mut *self.connection) .await .map_err(|error| PgJobError::QueryError { @@ -275,7 +293,7 @@ RETURNING error, })?; - Ok(failed_job) + Ok(retryable_job) } } @@ -289,22 +307,12 @@ pub struct PgTransactionJob<'c, J, M> { pub retry_policy: RetryPolicy, } -impl<'c, J, M> PgTransactionJob<'c, J, M> { - pub async fn retry( +#[async_trait] +impl<'c, J: std::marker::Send, M: std::marker::Send> PgQueueJob for PgTransactionJob<'c, J, M> { + async fn complete( mut self, - error: E, - preferred_retry_interval: Option, - ) -> Result, PgJobError>> { - if self.job.is_gte_max_attempts() { - return Err(PgJobError::RetryInvalidError { - job: self, - error: "Maximum attempts reached".to_owned(), - }); - } - let retryable_job = self.job.retry(error); - let retry_interval = self - .retry_policy - .time_until_next_retry(&retryable_job, preferred_retry_interval); + ) -> Result>>> { + let completed_job = self.job.complete(); let base_query = format!( r#" @@ -312,24 +320,19 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'available'::job_status, - scheduled_at = NOW() + $3, - errors = array_append("{0}".errors, $4) + status = 'completed'::job_status WHERE "{0}".id = $2 AND queue = $1 RETURNING "{0}".* - "#, &self.table ); sqlx::query(&base_query) - .bind(&retryable_job.queue) - .bind(retryable_job.id) - .bind(retry_interval) - .bind(&retryable_job.error) + .bind(&completed_job.queue) + .bind(completed_job.id) .execute(&mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { @@ -345,13 +348,14 @@ RETURNING error, })?; - Ok(retryable_job) + Ok(completed_job) } - pub async fn complete( + async fn fail( mut self, - ) -> Result>> { - let completed_job = self.job.complete(); + error: E, + ) -> Result, PgJobError>>> { + let failed_job = self.job.fail(error); let base_query = format!( r#" @@ -359,7 +363,8 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'completed'::job_status + status = 'failed'::job_status + errors = array_append("{0}".errors, $3) WHERE "{0}".id = $2 AND queue = $1 @@ -370,8 +375,9 @@ RETURNING ); sqlx::query(&base_query) - .bind(&completed_job.queue) - .bind(completed_job.id) + .bind(&failed_job.queue) + .bind(failed_job.id) + .bind(&failed_job.error) .execute(&mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { @@ -387,14 +393,24 @@ RETURNING error, })?; - Ok(completed_job) + Ok(failed_job) } - pub async fn fail( + async fn retry( mut self, error: E, - ) -> Result, PgJobError>> { - let failed_job = self.job.fail(error); + preferred_retry_interval: Option, + ) -> Result, PgJobError>>> { + if self.job.is_gte_max_attempts() { + return Err(PgJobError::RetryInvalidError { + job: Box::new(self), + error: "Maximum attempts reached".to_owned(), + }); + } + let retryable_job = self.job.retry(error); + let retry_interval = self + .retry_policy + .time_until_next_retry(&retryable_job, preferred_retry_interval); let base_query = format!( r#" @@ -402,21 +418,24 @@ UPDATE "{0}" SET finished_at = NOW(), - status = 'failed'::job_status - errors = array_append("{0}".errors, $3) + status = 'available'::job_status, + scheduled_at = NOW() + $3, + errors = array_append("{0}".errors, $4) WHERE "{0}".id = $2 AND queue = $1 RETURNING "{0}".* + "#, &self.table ); sqlx::query(&base_query) - .bind(&failed_job.queue) - .bind(failed_job.id) - .bind(&failed_job.error) + .bind(&retryable_job.queue) + .bind(retryable_job.id) + .bind(retry_interval) + .bind(&retryable_job.error) .execute(&mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { @@ -432,7 +451,7 @@ RETURNING error, })?; - Ok(failed_job) + Ok(retryable_job) } } diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 83ea319..4fdc8e4 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -3,7 +3,9 @@ use std::sync::Arc; use std::time; use async_std::task; -use hook_common::pgqueue::{PgJobError, PgQueue, PgQueueError, PgTransactionJob}; +use hook_common::pgqueue::{ + PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob, +}; use hook_common::webhook::{HttpMethod, WebhookJobError, WebhookJobMetadata, WebhookJobParameters}; use http::StatusCode; use reqwest::header; @@ -11,6 +13,32 @@ use tokio::sync; use crate::error::{ConsumerError, WebhookError}; +/// A WebhookJob is any PgQueueJob that returns webhook required parameters and metadata. +trait WebhookJob: PgQueueJob + std::marker::Send { + fn parameters<'a>(&'a self) -> &'a WebhookJobParameters; + fn metadata<'a>(&'a self) -> &'a WebhookJobMetadata; +} + +impl WebhookJob for PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata> { + fn parameters<'a>(&'a self) -> &'a WebhookJobParameters { + &self.job.parameters + } + + fn metadata<'a>(&'a self) -> &'a WebhookJobMetadata { + &self.job.metadata + } +} + +impl WebhookJob for PgJob { + fn parameters<'a>(&'a self) -> &'a WebhookJobParameters { + &self.job.parameters + } + + fn metadata<'a>(&'a self) -> &'a WebhookJobMetadata { + &self.job.metadata + } +} + /// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. pub struct WebhookConsumer<'p> { /// An identifier for this consumer. Used to mark jobs we have consumed. @@ -55,7 +83,9 @@ impl<'p> WebhookConsumer<'p> { } /// Wait until a job becomes available in our queue. - async fn wait_for_job<'a>(&self) -> Result, WebhookConsumerError> { + async fn wait_for_job<'a>( + &self, + ) -> Result, ConsumerError> { loop { if let Some(job) = self.queue.dequeue(&self.name).await? { return Ok(job); @@ -65,8 +95,21 @@ impl<'p> WebhookConsumer<'p> { } } + /// Wait until a job becomes available in our queue in transactional mode. + async fn wait_for_job_tx<'a>( + &self, + ) -> Result, ConsumerError> { + loop { + if let Some(job) = self.queue.dequeue_tx(&self.name).await? { + return Ok(job); + } else { + task::sleep(self.poll_interval).await; + } + } + } + /// Run this consumer to continuously process any jobs that become available. - pub async fn run(&self) -> Result<(), WebhookConsumerError> { + pub async fn run(&self) -> Result<(), ConsumerError> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); loop { @@ -83,76 +126,20 @@ impl<'p> WebhookConsumer<'p> { }); } } -} - -/// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. -pub struct WebhookTransactionConsumer<'p> { - /// An identifier for this consumer. Used to mark jobs we have consumed. - name: String, - /// The queue we will be dequeuing jobs from. - queue: &'p PgQueue, - /// The interval for polling the queue. - poll_interval: time::Duration, - /// The client used for HTTP requests. - client: reqwest::Client, - /// Maximum number of concurrent jobs being processed. - max_concurrent_jobs: usize, -} - -impl<'p> WebhookTransactionConsumer<'p> { - pub fn new( - name: &str, - queue: &'p PgQueue, - poll_interval: time::Duration, - request_timeout: time::Duration, - max_concurrent_jobs: usize, - ) -> Result { - let mut headers = header::HeaderMap::new(); - headers.insert( - header::CONTENT_TYPE, - header::HeaderValue::from_static("application/json"), - ); - - let client = reqwest::Client::builder() - .default_headers(headers) - .timeout(request_timeout) - .build()?; - - Ok(Self { - name: name.to_owned(), - queue, - poll_interval, - client, - max_concurrent_jobs, - }) - } - - /// Wait until a job becomes available in our queue. - async fn wait_for_job<'a>( - &self, - ) -> Result, ConsumerError> { - loop { - if let Some(job) = self.queue.dequeue_tx(&self.name).await? { - return Ok(job); - } else { - task::sleep(self.poll_interval).await; - } - } - } - /// Run this consumer to continuously process any jobs that become available. - pub async fn run(&self) -> Result<(), ConsumerError> { + /// Run this consumer to continuously process any jobs that become available in transactional mode. + pub async fn run_tx(&self) -> Result<(), ConsumerError> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); loop { - let webhook_job = self.wait_for_job().await?; + let webhook_job = self.wait_for_job_tx().await?; // reqwest::Client internally wraps with Arc, so this allocation is cheap. let client = self.client.clone(); let permit = semaphore.clone().acquire_owned().await.unwrap(); tokio::spawn(async move { - let result = process_webhook_job_tx(client, webhook_job).await; + let result = process_webhook_job(client, webhook_job).await; drop(permit); result }); @@ -173,16 +160,18 @@ impl<'p> WebhookTransactionConsumer<'p> { /// /// * `client`: An HTTP client to execute the webhook job request. /// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. -async fn process_webhook_job( +async fn process_webhook_job( client: reqwest::Client, - webhook_job: PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata>, + webhook_job: W, ) -> Result<(), ConsumerError> { + let parameters = webhook_job.parameters(); + match send_webhook( client, - &webhook_job.job.parameters.method, - &webhook_job.job.parameters.url, - &webhook_job.job.parameters.headers, - webhook_job.job.parameters.body.clone(), + ¶meters.method, + ¶meters.url, + ¶meters.headers, + parameters.body.clone(), ) .await { @@ -242,66 +231,6 @@ async fn process_webhook_job( } } -/// Process a webhook job by transitioning it to its appropriate state after its request is sent. -/// After we finish, the webhook job will be set as completed (if the request was successful), retryable (if the request -/// was unsuccessful but we can still attempt a retry), or failed (if the request was unsuccessful and no more retries -/// may be attempted). -/// -/// A webhook job is considered retryable after a failing request if: -/// 1. The job has attempts remaining (i.e. hasn't reached `max_attempts`), and... -/// 2. The status code indicates retrying at a later point could resolve the issue. This means: 429 and any 5XX. -/// -/// # Arguments -/// -/// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. -/// * `request_timeout`: A timeout for the HTTP request. -async fn process_webhook_job( - client: reqwest::Client, - webhook_job: PgJob, -) -> Result<(), WebhookConsumerError> { - match send_webhook( - client, - &webhook_job.job.parameters.method, - &webhook_job.job.parameters.url, - &webhook_job.job.parameters.headers, - webhook_job.job.parameters.body.clone(), - ) - .await - { - Ok(_) => { - webhook_job - .complete() - .await - .map_err(|error| WebhookConsumerError::PgJobError(error.to_string()))?; - Ok(()) - } - Err(WebhookConsumerError::RetryableWebhookError { - reason, - retry_after, - }) => match webhook_job.retry(reason.to_string(), retry_after).await { - Ok(_) => Ok(()), - Err(PgJobError::RetryInvalidError { - job: webhook_job, - error: fail_error, - }) => { - webhook_job - .fail(fail_error.to_string()) - .await - .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; - Ok(()) - } - Err(job_error) => Err(WebhookConsumerError::PgJobError(job_error.to_string())), - }, - Err(error) => { - webhook_job - .fail(error.to_string()) - .await - .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; - Ok(()) - } - } -} - /// Make an HTTP request to a webhook endpoint. /// /// # Arguments diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index bb02526..49c2e76 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -31,7 +31,11 @@ async fn main() -> Result<(), ConsumerError> { config.max_concurrent_jobs, ); - let _ = consumer.run().await; + if config.transactional { + consumer.run_tx().await?; + } else { + consumer.run().await?; + } Ok(()) } From b747d4c242b5abd0b959190a99797c717b3c7239 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 20 Dec 2023 15:31:21 +0100 Subject: [PATCH 067/130] fix: Elide lifetimes --- hook-consumer/src/consumer.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 4fdc8e4..73bac1c 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -13,28 +13,28 @@ use tokio::sync; use crate::error::{ConsumerError, WebhookError}; -/// A WebhookJob is any PgQueueJob that returns webhook required parameters and metadata. +/// A WebhookJob is any PgQueueJob that returns a reference to webhook parameters and metadata. trait WebhookJob: PgQueueJob + std::marker::Send { - fn parameters<'a>(&'a self) -> &'a WebhookJobParameters; - fn metadata<'a>(&'a self) -> &'a WebhookJobMetadata; + fn parameters(&self) -> &WebhookJobParameters; + fn metadata(&self) -> &WebhookJobMetadata; } impl WebhookJob for PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata> { - fn parameters<'a>(&'a self) -> &'a WebhookJobParameters { + fn parameters(&self) -> &WebhookJobParameters { &self.job.parameters } - fn metadata<'a>(&'a self) -> &'a WebhookJobMetadata { + fn metadata(&self) -> &WebhookJobMetadata { &self.job.metadata } } impl WebhookJob for PgJob { - fn parameters<'a>(&'a self) -> &'a WebhookJobParameters { + fn parameters(&self) -> &WebhookJobParameters { &self.job.parameters } - fn metadata<'a>(&'a self) -> &'a WebhookJobMetadata { + fn metadata(&self) -> &WebhookJobMetadata { &self.job.metadata } } From 059fc99cb9bc4e381534a5cdcd7361b269c4b87a Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 20 Dec 2023 07:54:54 -0700 Subject: [PATCH 068/130] Add SerializableTxn for a little safety --- hook-janitor/src/webhooks.rs | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index a9b05e5..71fe567 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -84,6 +84,10 @@ struct FailedRow { failures: u32, } +// A simple wrapper type that ensures we don't use any old Transaction object when we need one +// that has set the isolation level to serializable. +struct SerializableTxn<'a>(Transaction<'a, Postgres>); + impl WebhookCleaner { pub fn new( queue_name: &str, @@ -128,7 +132,7 @@ impl WebhookCleaner { }) } - async fn start_serializable_txn(&self) -> Result> { + async fn start_serializable_txn(&self) -> Result { let mut tx = self .pg_pool .begin() @@ -146,13 +150,10 @@ impl WebhookCleaner { .await .map_err(|e| WebhookCleanerError::StartTxnError { error: e })?; - Ok(tx) + Ok(SerializableTxn(tx)) } - async fn get_completed_rows( - &self, - tx: &mut Transaction<'_, Postgres>, - ) -> Result> { + async fn get_completed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { let base_query = format!( r#" SELECT DATE_TRUNC('hour', finished_at) AS hour, @@ -170,7 +171,7 @@ impl WebhookCleaner { let rows = sqlx::query_as::<_, CompletedRow>(&base_query) .bind(&self.queue_name) - .fetch_all(&mut **tx) + .fetch_all(&mut *tx.0) .await .map_err(|e| WebhookCleanerError::GetCompletedRowsError { error: e })?; @@ -204,7 +205,7 @@ impl WebhookCleaner { Ok(payloads) } - async fn get_failed_rows(&self, tx: &mut Transaction<'_, Postgres>) -> Result> { + async fn get_failed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { let base_query = format!( r#" SELECT DATE_TRUNC('hour', finished_at) AS hour, @@ -223,7 +224,7 @@ impl WebhookCleaner { let rows = sqlx::query_as::<_, FailedRow>(&base_query) .bind(&self.queue_name) - .fetch_all(&mut **tx) + .fetch_all(&mut *tx.0) .await .map_err(|e| WebhookCleanerError::GetFailedRowsError { error: e })?; @@ -290,7 +291,7 @@ impl WebhookCleaner { Ok(()) } - async fn delete_observed_rows(&self, tx: &mut Transaction<'_, Postgres>) -> Result { + async fn delete_observed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result { // This DELETE is only safe because we are in serializable isolation mode, see the note // in `start_serializable_txn`. let base_query = format!( @@ -304,15 +305,15 @@ impl WebhookCleaner { let result = sqlx::query(&base_query) .bind(&self.queue_name) - .execute(&mut **tx) + .execute(&mut *tx.0) .await .map_err(|e| WebhookCleanerError::DeleteRowsError { error: e })?; Ok(result.rows_affected()) } - async fn commit_txn(&self, tx: Transaction<'_, Postgres>) -> Result<()> { - tx.commit() + async fn commit_txn(&self, tx: SerializableTxn<'_>) -> Result<()> { + tx.0.commit() .await .map_err(|e| WebhookCleanerError::CommitTxnError { error: e })?; From 8aeaad7e474e5592f6be47a53a1044c5314005f3 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 20 Dec 2023 08:18:04 -0700 Subject: [PATCH 069/130] Cleanup AppMetric creation and serialization --- hook-janitor/src/webhooks.rs | 139 +++++++++++++++++------------------ 1 file changed, 66 insertions(+), 73 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 71fe567..d4b6255 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -66,6 +66,24 @@ struct CompletedRow { successes: u32, } +impl From for AppMetric { + fn from(row: CompletedRow) -> Self { + AppMetric { + timestamp: row.hour, + team_id: row.team_id, + plugin_config_id: row.plugin_config_id, + job_id: None, + category: AppMetricCategory::Webhook, + successes: row.successes, + successes_on_retry: 0, + failures: 0, + error_uuid: None, + error_type: None, + error_details: None, + } + } +} + #[derive(sqlx::FromRow, Debug)] struct FailedRow { // App Metrics truncates/aggregates rows on the hour, so we take advantage of that to GROUP BY @@ -84,6 +102,24 @@ struct FailedRow { failures: u32, } +impl From for AppMetric { + fn from(row: FailedRow) -> Self { + AppMetric { + timestamp: row.hour, + team_id: row.team_id, + plugin_config_id: row.plugin_config_id, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 0, + successes_on_retry: 0, + failures: row.failures, + error_uuid: Some(Uuid::now_v7()), + error_type: Some(row.last_error.r#type), + error_details: Some(row.last_error.details), + } + } +} + // A simple wrapper type that ensures we don't use any old Transaction object when we need one // that has set the isolation level to serializable. struct SerializableTxn<'a>(Transaction<'a, Postgres>); @@ -178,33 +214,6 @@ impl WebhookCleaner { Ok(rows) } - fn serialize_completed_rows(&self, completed_rows: Vec) -> Result> { - let mut payloads = Vec::new(); - - for row in completed_rows { - let app_metric = AppMetric { - timestamp: row.hour, - team_id: row.team_id, - plugin_config_id: row.plugin_config_id, - job_id: None, - category: AppMetricCategory::Webhook, - successes: row.successes, - successes_on_retry: 0, - failures: 0, - error_uuid: None, - error_type: None, - error_details: None, - }; - - let payload = serde_json::to_string(&app_metric) - .map_err(|e| WebhookCleanerError::SerializeRowsError { error: e })?; - - payloads.push(payload) - } - - Ok(payloads) - } - async fn get_failed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { let base_query = format!( r#" @@ -231,34 +240,13 @@ impl WebhookCleaner { Ok(rows) } - fn serialize_failed_rows(&self, failed_rows: Vec) -> Result> { - let mut payloads = Vec::new(); - - for row in failed_rows { - let app_metric = AppMetric { - timestamp: row.hour, - team_id: row.team_id, - plugin_config_id: row.plugin_config_id, - job_id: None, - category: AppMetricCategory::Webhook, - successes: 0, - successes_on_retry: 0, - failures: row.failures, - error_uuid: Some(Uuid::now_v7()), - error_type: Some(row.last_error.r#type), - error_details: Some(row.last_error.details), - }; - - let payload = serde_json::to_string(&app_metric) - .map_err(|e| WebhookCleanerError::SerializeRowsError { error: e })?; - - payloads.push(payload) - } + async fn send_metrics_to_kafka(&self, metrics: Vec) -> Result<()> { + let payloads: Vec = metrics + .into_iter() + .map(|metric| serde_json::to_string(&metric)) + .collect::, SerdeError>>() + .map_err(|e| WebhookCleanerError::SerializeRowsError { error: e })?; - Ok(payloads) - } - - async fn send_messages_to_kafka(&self, payloads: Vec) -> Result<()> { let mut delivery_futures = Vec::new(); for payload in payloads { @@ -332,30 +320,35 @@ impl WebhookCleaner { // future if necessary. let mut tx = self.start_serializable_txn().await?; + let mut rows_processed = 0; + + { + let completed_rows = self.get_completed_rows(&mut tx).await?; + rows_processed += completed_rows.len(); + let completed_app_metrics: Vec = + completed_rows.into_iter().map(Into::into).collect(); + self.send_metrics_to_kafka(completed_app_metrics).await?; + } - let completed_rows = self.get_completed_rows(&mut tx).await?; - let completed_agg_row_count = completed_rows.len(); - let completed_kafka_payloads = self.serialize_completed_rows(completed_rows)?; - - let failed_rows = self.get_failed_rows(&mut tx).await?; - let failed_agg_row_count = failed_rows.len(); - let failed_kafka_payloads = self.serialize_failed_rows(failed_rows)?; - - let mut all_kafka_payloads = completed_kafka_payloads; - all_kafka_payloads.extend(failed_kafka_payloads.into_iter()); + { + let failed_rows = self.get_failed_rows(&mut tx).await?; + rows_processed += failed_rows.len(); + let failed_app_metrics: Vec = + failed_rows.into_iter().map(Into::into).collect(); + self.send_metrics_to_kafka(failed_app_metrics).await?; + } - let mut rows_deleted: u64 = 0; - if !all_kafka_payloads.is_empty() { - self.send_messages_to_kafka(all_kafka_payloads).await?; - rows_deleted = self.delete_observed_rows(&mut tx).await?; + if rows_processed != 0 { + let rows_deleted = self.delete_observed_rows(&mut tx).await?; self.commit_txn(tx).await?; + debug!( + "WebhookCleaner finished cleanup, processed and deleted {} rows", + rows_deleted + ); + } else { + debug!("WebhookCleaner finished cleanup, no-op"); } - debug!( - "WebhookCleaner finished cleanup, deleted {} rows ({} completed+aggregated, {} failed+aggregated)", - rows_deleted, completed_agg_row_count, failed_agg_row_count - ); - Ok(()) } } From a94ee2f6dec7fdac47a524b9caf56c01ed362584 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 20 Dec 2023 08:31:13 -0700 Subject: [PATCH 070/130] Clean up row counts --- hook-janitor/src/webhooks.rs | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index d4b6255..5ccf6dc 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -241,6 +241,10 @@ impl WebhookCleaner { } async fn send_metrics_to_kafka(&self, metrics: Vec) -> Result<()> { + if metrics.is_empty() { + return Ok(()); + } + let payloads: Vec = metrics .into_iter() .map(|metric| serde_json::to_string(&metric)) @@ -320,33 +324,34 @@ impl WebhookCleaner { // future if necessary. let mut tx = self.start_serializable_txn().await?; - let mut rows_processed = 0; - { + let completed_agg_row_count = { let completed_rows = self.get_completed_rows(&mut tx).await?; - rows_processed += completed_rows.len(); + let row_count = completed_rows.len(); let completed_app_metrics: Vec = completed_rows.into_iter().map(Into::into).collect(); self.send_metrics_to_kafka(completed_app_metrics).await?; - } + row_count + }; - { + let failed_agg_row_count = { let failed_rows = self.get_failed_rows(&mut tx).await?; - rows_processed += failed_rows.len(); + let row_count = failed_rows.len(); let failed_app_metrics: Vec = failed_rows.into_iter().map(Into::into).collect(); self.send_metrics_to_kafka(failed_app_metrics).await?; - } + row_count + }; - if rows_processed != 0 { + if completed_agg_row_count + failed_agg_row_count != 0 { let rows_deleted = self.delete_observed_rows(&mut tx).await?; self.commit_txn(tx).await?; debug!( - "WebhookCleaner finished cleanup, processed and deleted {} rows", - rows_deleted + "WebhookCleaner finished cleanup, processed and deleted {} rows ({}/{} aggregated completed/failed rows)", + rows_deleted, completed_agg_row_count, failed_agg_row_count ); } else { - debug!("WebhookCleaner finished cleanup, no-op"); + debug!("WebhookCleaner finished cleanup, there were no rows to process"); } Ok(()) From 66ea83428f7bd5502475a21e8d0992d868a7645f Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 20 Dec 2023 08:53:47 -0700 Subject: [PATCH 071/130] Tweak ErrorType, add some notes about stability of serialized types --- hook-common/src/kafka_messages/app_metrics.rs | 24 ++++++++++++------- hook-common/src/webhook.rs | 16 ++++++------- hook-janitor/src/fixtures/webhook_cleanup.sql | 14 +++++++++-- 3 files changed, 35 insertions(+), 19 deletions(-) diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index 4396643..cecf604 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -13,14 +13,20 @@ pub enum AppMetricCategory { ComposeWebhook, } +// NOTE: These are stored in Postgres and deserialized by the cleanup/janitor process, so these +// names need to remain stable, or new variants need to be deployed to the cleanup/janitor +// process before they are used. #[derive(Deserialize, Serialize, Debug)] pub enum ErrorType { - Timeout, - Connection, - HttpStatus(u16), - Parse, + TimeoutError, + ConnectionError, + BadHttpStatus(u16), + ParseError, } +// NOTE: This is stored in Postgres and deserialized by the cleanup/janitor process, so this +// shouldn't change. It is intended to replicate the shape of `error_details` used in the +// plugin-server and by the frontend. #[derive(Deserialize, Serialize, Debug)] pub struct ErrorDetails { pub error: Error, @@ -88,10 +94,10 @@ where }; let error_type = match error_type { - ErrorType::Connection => "Connection Error".to_owned(), - ErrorType::Timeout => "Timeout".to_owned(), - ErrorType::HttpStatus(s) => format!("HTTP Status: {}", s), - ErrorType::Parse => "Parse Error".to_owned(), + ErrorType::ConnectionError => "Connection Error".to_owned(), + ErrorType::TimeoutError => "Timeout".to_owned(), + ErrorType::BadHttpStatus(s) => format!("HTTP Status: {}", s), + ErrorType::ParseError => "Parse Error".to_owned(), }; serializer.serialize_str(&error_type) } @@ -114,7 +120,7 @@ mod tests { successes_on_retry: 0, failures: 2, error_uuid: Some(Uuid::parse_str("550e8400-e29b-41d4-a716-446655447777").unwrap()), - error_type: Some(ErrorType::Connection), + error_type: Some(ErrorType::ConnectionError), error_details: Some(ErrorDetails { error: Error { name: "FooError".to_owned(), diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index 9a21b83..bb1b5be 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -169,12 +169,12 @@ impl From<&reqwest::Error> for WebhookJobError { impl WebhookJobError { pub fn new_timeout(message: &str) -> Self { let error_details = app_metrics::Error { - name: "timeout".to_owned(), + name: "Timeout Error".to_owned(), message: Some(message.to_owned()), stack: None, }; Self { - r#type: app_metrics::ErrorType::Timeout, + r#type: app_metrics::ErrorType::TimeoutError, details: app_metrics::ErrorDetails { error: error_details, }, @@ -183,12 +183,12 @@ impl WebhookJobError { pub fn new_connection(message: &str) -> Self { let error_details = app_metrics::Error { - name: "connection error".to_owned(), + name: "Connection Error".to_owned(), message: Some(message.to_owned()), stack: None, }; Self { - r#type: app_metrics::ErrorType::Connection, + r#type: app_metrics::ErrorType::ConnectionError, details: app_metrics::ErrorDetails { error: error_details, }, @@ -197,12 +197,12 @@ impl WebhookJobError { pub fn new_http_status(status_code: u16, message: &str) -> Self { let error_details = app_metrics::Error { - name: "http status".to_owned(), + name: "Bad Http Status".to_owned(), message: Some(message.to_owned()), stack: None, }; Self { - r#type: app_metrics::ErrorType::HttpStatus(status_code), + r#type: app_metrics::ErrorType::BadHttpStatus(status_code), details: app_metrics::ErrorDetails { error: error_details, }, @@ -211,12 +211,12 @@ impl WebhookJobError { pub fn new_parse(message: &str) -> Self { let error_details = app_metrics::Error { - name: "parse error".to_owned(), + name: "Parse Error".to_owned(), message: Some(message.to_owned()), stack: None, }; Self { - r#type: app_metrics::ErrorType::Parse, + r#type: app_metrics::ErrorType::ParseError, details: app_metrics::ErrorDetails { error: error_details, }, diff --git a/hook-janitor/src/fixtures/webhook_cleanup.sql b/hook-janitor/src/fixtures/webhook_cleanup.sql index 6f29d30..e4ea082 100644 --- a/hook-janitor/src/fixtures/webhook_cleanup.sql +++ b/hook-janitor/src/fixtures/webhook_cleanup.sql @@ -19,7 +19,7 @@ VALUES 'completed', 'https://myhost/endpoint' ), - -- another team:1, plugin_config:2, completed in hour 20 + -- team:1, plugin_config:2, completed in hour 20 (purposeful duplicate) ( NULL, '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', @@ -71,7 +71,17 @@ VALUES ), -- team:1, plugin_config:2, failed in hour 20 ( - ARRAY ['{"type":"Timeout","details":{"error":{"name":"timeout"}}}'::jsonb], + ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"timeout"}}}'::jsonb], + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'webhooks', + 'failed', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:2, failed in hour 20 (purposeful duplicate) + ( + ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"timeout"}}}'::jsonb], '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '{}', From dd8faee798efe8796d2fb17df5f054c1f7f75c9d Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 20 Dec 2023 15:41:25 -0700 Subject: [PATCH 072/130] Finish up tests --- Cargo.lock | 1 + Cargo.toml | 2 +- hook-common/src/kafka_messages/app_metrics.rs | 94 ++++- hook-common/src/kafka_messages/mod.rs | 29 +- hook-common/src/kafka_messages/plugin_logs.rs | 4 +- hook-janitor/src/fixtures/webhook_cleanup.sql | 74 +++- hook-janitor/src/webhooks.rs | 372 ++++++++++++++++-- 7 files changed, 510 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fbfcc50..c36b1da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2953,6 +2953,7 @@ checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" dependencies = [ "atomic", "getrandom", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a29806a..1f6a38b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,4 +35,4 @@ tower = "0.4.13" tracing = "0.1.40" tracing-subscriber = "0.3.18" url = { version = "2.5.0 " } -uuid = { version = "1.6.1", features = ["v7"] } +uuid = { version = "1.6.1", features = ["v7", "serde"] } diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index cecf604..5aff62c 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -1,10 +1,10 @@ use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use uuid::Uuid; -use super::{serialize_datetime, serialize_optional_uuid}; +use super::{deserialize_datetime, serialize_datetime}; -#[derive(Serialize, Debug)] +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub enum AppMetricCategory { ProcessEvent, OnEvent, @@ -16,7 +16,7 @@ pub enum AppMetricCategory { // NOTE: These are stored in Postgres and deserialized by the cleanup/janitor process, so these // names need to remain stable, or new variants need to be deployed to the cleanup/janitor // process before they are used. -#[derive(Deserialize, Serialize, Debug)] +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub enum ErrorType { TimeoutError, ConnectionError, @@ -27,12 +27,12 @@ pub enum ErrorType { // NOTE: This is stored in Postgres and deserialized by the cleanup/janitor process, so this // shouldn't change. It is intended to replicate the shape of `error_details` used in the // plugin-server and by the frontend. -#[derive(Deserialize, Serialize, Debug)] +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub struct ErrorDetails { pub error: Error, } -#[derive(Deserialize, Serialize, Debug)] +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub struct Error { pub name: String, #[serde(skip_serializing_if = "Option::is_none")] @@ -43,26 +43,30 @@ pub struct Error { pub stack: Option, } -#[derive(Serialize, Debug)] +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub struct AppMetric { - #[serde(serialize_with = "serialize_datetime")] + #[serde( + serialize_with = "serialize_datetime", + deserialize_with = "deserialize_datetime" + )] pub timestamp: DateTime, pub team_id: u32, pub plugin_config_id: u32, #[serde(skip_serializing_if = "Option::is_none")] pub job_id: Option, - #[serde(serialize_with = "serialize_category")] + #[serde( + serialize_with = "serialize_category", + deserialize_with = "deserialize_category" + )] pub category: AppMetricCategory, pub successes: u32, pub successes_on_retry: u32, pub failures: u32, - #[serde( - serialize_with = "serialize_optional_uuid", - skip_serializing_if = "Option::is_none" - )] pub error_uuid: Option, #[serde( serialize_with = "serialize_error_type", + deserialize_with = "deserialize_error_type", + default, skip_serializing_if = "Option::is_none" )] pub error_type: Option, @@ -84,6 +88,35 @@ where serializer.serialize_str(category_str) } +fn deserialize_category<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let s: String = Deserialize::deserialize(deserializer)?; + + let category = match &s[..] { + "processEvent" => AppMetricCategory::ProcessEvent, + "onEvent" => AppMetricCategory::OnEvent, + "scheduledTask" => AppMetricCategory::ScheduledTask, + "webhook" => AppMetricCategory::Webhook, + "composeWebhook" => AppMetricCategory::ComposeWebhook, + _ => { + return Err(serde::de::Error::unknown_variant( + &s, + &[ + "processEvent", + "onEvent", + "scheduledTask", + "webhook", + "composeWebhook", + ], + )) + } + }; + + Ok(category) +} + fn serialize_error_type(error_type: &Option, serializer: S) -> Result where S: Serializer, @@ -102,6 +135,41 @@ where serializer.serialize_str(&error_type) } +fn deserialize_error_type<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let opt = Option::::deserialize(deserializer)?; + let error_type = match opt { + Some(s) => { + let error_type = match &s[..] { + "Connection Error" => ErrorType::ConnectionError, + "Timeout" => ErrorType::TimeoutError, + _ if s.starts_with("HTTP Status:") => { + let status = &s["HTTP Status:".len()..]; + ErrorType::BadHttpStatus(status.parse().map_err(serde::de::Error::custom)?) + } + "Parse Error" => ErrorType::ParseError, + _ => { + return Err(serde::de::Error::unknown_variant( + &s, + &[ + "Connection Error", + "Timeout", + "HTTP Status: ", + "Parse Error", + ], + )) + } + }; + Some(error_type) + } + None => None, + }; + + Ok(error_type) +} + #[cfg(test)] mod tests { use super::*; diff --git a/hook-common/src/kafka_messages/mod.rs b/hook-common/src/kafka_messages/mod.rs index 72b49e1..f548563 100644 --- a/hook-common/src/kafka_messages/mod.rs +++ b/hook-common/src/kafka_messages/mod.rs @@ -1,30 +1,25 @@ pub mod app_metrics; pub mod plugin_logs; -use chrono::{DateTime, Utc}; -use serde::Serializer; -use uuid::Uuid; +use chrono::{DateTime, NaiveDateTime, Utc}; +use serde::{Deserialize, Deserializer, Serializer}; -pub fn serialize_uuid(uuid: &Uuid, serializer: S) -> Result +pub fn serialize_datetime(datetime: &DateTime, serializer: S) -> Result where S: Serializer, { - serializer.serialize_str(&uuid.to_string()) + serializer.serialize_str(&datetime.format("%Y-%m-%d %H:%M:%S").to_string()) } -pub fn serialize_optional_uuid(uuid: &Option, serializer: S) -> Result +pub fn deserialize_datetime<'de, D>(deserializer: D) -> Result, D::Error> where - S: Serializer, + D: Deserializer<'de>, { - match uuid { - Some(uuid) => serializer.serialize_str(&uuid.to_string()), - None => serializer.serialize_none(), - } -} + let formatted: String = Deserialize::deserialize(deserializer)?; + let datetime = match NaiveDateTime::parse_from_str(&formatted, "%Y-%m-%d %H:%M:%S") { + Ok(d) => d.and_utc(), + Err(_) => return Err(serde::de::Error::custom("Invalid datetime format")), + }; -pub fn serialize_datetime(datetime: &DateTime, serializer: S) -> Result -where - S: Serializer, -{ - serializer.serialize_str(&datetime.format("%Y-%m-%d %H:%M:%S%.f").to_string()) + Ok(datetime) } diff --git a/hook-common/src/kafka_messages/plugin_logs.rs b/hook-common/src/kafka_messages/plugin_logs.rs index 8f8bb43..e761fa4 100644 --- a/hook-common/src/kafka_messages/plugin_logs.rs +++ b/hook-common/src/kafka_messages/plugin_logs.rs @@ -2,7 +2,7 @@ use chrono::{DateTime, Utc}; use serde::{Serialize, Serializer}; use uuid::Uuid; -use super::{serialize_datetime, serialize_uuid}; +use super::serialize_datetime; #[allow(dead_code)] #[derive(Serialize)] @@ -28,7 +28,6 @@ pub struct PluginLogEntry { pub source: PluginLogEntrySource, #[serde(rename = "type", serialize_with = "serialize_type")] pub type_: PluginLogEntryType, - #[serde(serialize_with = "serialize_uuid")] pub id: Uuid, pub team_id: u32, pub plugin_id: u32, @@ -37,7 +36,6 @@ pub struct PluginLogEntry { pub timestamp: DateTime, #[serde(serialize_with = "serialize_message")] pub message: String, - #[serde(serialize_with = "serialize_uuid")] pub instance_id: Uuid, } diff --git a/hook-janitor/src/fixtures/webhook_cleanup.sql b/hook-janitor/src/fixtures/webhook_cleanup.sql index e4ea082..4aeb231 100644 --- a/hook-janitor/src/fixtures/webhook_cleanup.sql +++ b/hook-janitor/src/fixtures/webhook_cleanup.sql @@ -71,7 +71,7 @@ VALUES ), -- team:1, plugin_config:2, failed in hour 20 ( - ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"timeout"}}}'::jsonb], + ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '{}', @@ -81,11 +81,81 @@ VALUES ), -- team:1, plugin_config:2, failed in hour 20 (purposeful duplicate) ( - ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"timeout"}}}'::jsonb], + ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '{}', 'webhooks', 'failed', 'https://myhost/endpoint' + ), + -- team:1, plugin_config:2, failed in hour 20 (different error) + ( + ARRAY ['{"type":"ConnectionError","details":{"error":{"name":"Connection Error"}}}'::jsonb], + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'webhooks', + 'failed', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:2, failed in hour 21 (different hour) + ( + ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 21:01:18.799371+00', + '{}', + 'webhooks', + 'failed', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:3, failed in hour 20 (different plugin_config) + ( + ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'webhooks', + 'failed', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:2, failed but in a different queue + ( + ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'not-webhooks', + 'failed', + 'https://myhost/endpoint' + ), + -- team:2, plugin_config:4, failed in hour 20 (purposeful duplicate) + ( + ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], + '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'webhooks', + 'failed', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:2, available + ( + NULL, + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 20:01:18.799371+00', + '{"body": "hello world", "headers": {}, "method": "POST", "url": "https://myhost/endpoint"}', + 'webhooks', + 'available', + 'https://myhost/endpoint' + ), + -- team:1, plugin_config:2, running + ( + NULL, + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '2023-12-19 20:01:18.799371+00', + '{}', + 'webhooks', + 'running', + 'https://myhost/endpoint' ); \ No newline at end of file diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 5ccf6dc..fb48a57 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -124,6 +124,12 @@ impl From for AppMetric { // that has set the isolation level to serializable. struct SerializableTxn<'a>(Transaction<'a, Postgres>); +struct CleanupStats { + rows_processed: u64, + completed_agg_row_count: usize, + failed_agg_row_count: usize, +} + impl WebhookCleaner { pub fn new( queue_name: &str, @@ -312,7 +318,7 @@ impl WebhookCleaner { Ok(()) } - async fn cleanup_impl(&self) -> Result<()> { + async fn cleanup_impl(&self) -> Result { debug!("WebhookCleaner starting cleanup"); // Note that we select all completed and failed rows without any pagination at the moment. @@ -343,18 +349,17 @@ impl WebhookCleaner { row_count }; + let mut rows_processed = 0; if completed_agg_row_count + failed_agg_row_count != 0 { - let rows_deleted = self.delete_observed_rows(&mut tx).await?; + rows_processed = self.delete_observed_rows(&mut tx).await?; self.commit_txn(tx).await?; - debug!( - "WebhookCleaner finished cleanup, processed and deleted {} rows ({}/{} aggregated completed/failed rows)", - rows_deleted, completed_agg_row_count, failed_agg_row_count - ); - } else { - debug!("WebhookCleaner finished cleanup, there were no rows to process"); } - Ok(()) + Ok(CleanupStats { + rows_processed, + completed_agg_row_count, + failed_agg_row_count, + }) } } @@ -362,7 +367,18 @@ impl WebhookCleaner { impl Cleaner for WebhookCleaner { async fn cleanup(&self) { match self.cleanup_impl().await { - Ok(_) => {} + Ok(stats) => { + if stats.rows_processed > 0 { + debug!( + rows_processed = stats.rows_processed, + completed_agg_row_count = stats.completed_agg_row_count, + failed_agg_row_count = stats.failed_agg_row_count, + "WebhookCleaner::cleanup finished" + ); + } else { + debug!("WebhookCleaner finished cleanup, there were no rows to process"); + } + } Err(error) => { error!(error = ?error, "WebhookCleaner::cleanup failed"); } @@ -375,9 +391,18 @@ mod tests { use super::*; use crate::config; use crate::kafka_producer::{create_kafka_producer, KafkaContext}; + use hook_common::kafka_messages::app_metrics::{ + Error as WebhookError, ErrorDetails, ErrorType, + }; + use hook_common::pgqueue::{NewJob, PgJob, PgQueue, RetryPolicy}; + use hook_common::webhook::{HttpMethod, WebhookJobMetadata, WebhookJobParameters}; + use rdkafka::consumer::{Consumer, StreamConsumer}; use rdkafka::mocking::MockCluster; use rdkafka::producer::{DefaultProducerContext, FutureProducer}; - use sqlx::PgPool; + use rdkafka::{ClientConfig, Message}; + use sqlx::{PgPool, Row}; + use std::collections::HashMap; + use std::str::FromStr; const APP_METRICS_TOPIC: &str = "app_metrics"; @@ -406,6 +431,18 @@ mod tests { ) } + fn check_app_metric_vector_equality(v1: &[AppMetric], v2: &[AppMetric]) { + // Ignores `error_uuid`s. + assert_eq!(v1.len(), v2.len()); + for (item1, item2) in v1.iter().zip(v2) { + let mut item1 = item1.clone(); + item1.error_uuid = None; + let mut item2 = item2.clone(); + item2.error_uuid = None; + assert_eq!(item1, item2); + } + } + #[sqlx::test(migrations = "../migrations", fixtures("webhook_cleanup"))] async fn test_cleanup_impl(db: PgPool) { let (mock_cluster, mock_producer) = create_mock_kafka().await; @@ -413,37 +450,312 @@ mod tests { .create_topic(APP_METRICS_TOPIC, 1, 1) .expect("failed to create mock app_metrics topic"); - let table_name = "job_queue"; - let queue_name = "webhooks"; + let consumer: StreamConsumer = ClientConfig::new() + .set("bootstrap.servers", mock_cluster.bootstrap_servers()) + .set("group.id", "mock") + .set("auto.offset.reset", "earliest") + .create() + .expect("failed to create mock consumer"); + consumer.subscribe(&[APP_METRICS_TOPIC]).unwrap(); let webhook_cleaner = WebhookCleaner::new_from_pool( - &queue_name, - &table_name, + &"webhooks", + &"job_queue", db, mock_producer, APP_METRICS_TOPIC.to_owned(), ) .expect("unable to create webhook cleaner"); - let _ = webhook_cleaner + let cleanup_stats = webhook_cleaner .cleanup_impl() .await .expect("webbook cleanup_impl failed"); - // TODO: I spent a lot of time trying to get the mock Kafka consumer to work, but I think - // I've identified an issue with the rust-rdkafka library: - // https://github.com/fede1024/rust-rdkafka/issues/629#issuecomment-1863555417 - // - // I wanted to test the messages put on the AppMetrics topic, but I think we need to figure - // out that issue about in order to do so. (Capture uses the MockProducer but not a - // Consumer, fwiw.) - // - // For now, I'll probably have to make `cleanup_impl` return the row information so at - // least we can inspect that for correctness. + // Rows from other queues and rows that are not 'completed' or 'failed' should not be + // processed. + assert_eq!(cleanup_stats.rows_processed, 11); + + let mut received_app_metrics = Vec::new(); + for _ in 0..(cleanup_stats.completed_agg_row_count + cleanup_stats.failed_agg_row_count) { + let kafka_msg = consumer.recv().await.unwrap(); + let payload_str = String::from_utf8(kafka_msg.payload().unwrap().to_vec()).unwrap(); + let app_metric: AppMetric = serde_json::from_str(&payload_str).unwrap(); + received_app_metrics.push(app_metric); + } + + let expected_app_metrics = vec![ + AppMetric { + timestamp: DateTime::::from_str("2023-12-19T20:00:00Z").unwrap(), + team_id: 1, + plugin_config_id: 2, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 2, + successes_on_retry: 0, + failures: 0, + error_uuid: None, + error_type: None, + error_details: None, + }, + AppMetric { + timestamp: DateTime::::from_str("2023-12-19T20:00:00Z").unwrap(), + team_id: 1, + plugin_config_id: 3, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 1, + successes_on_retry: 0, + failures: 0, + error_uuid: None, + error_type: None, + error_details: None, + }, + AppMetric { + timestamp: DateTime::::from_str("2023-12-19T20:00:00Z").unwrap(), + team_id: 2, + plugin_config_id: 4, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 1, + successes_on_retry: 0, + failures: 0, + error_uuid: None, + error_type: None, + error_details: None, + }, + AppMetric { + timestamp: DateTime::::from_str("2023-12-19T21:00:00Z").unwrap(), + team_id: 1, + plugin_config_id: 2, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 1, + successes_on_retry: 0, + failures: 0, + error_uuid: None, + error_type: None, + error_details: None, + }, + AppMetric { + timestamp: DateTime::::from_str("2023-12-19T20:00:00Z").unwrap(), + team_id: 1, + plugin_config_id: 2, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 0, + successes_on_retry: 0, + failures: 1, + error_uuid: Some(Uuid::parse_str("018c8935-d038-714a-957c-0df43d42e377").unwrap()), + error_type: Some(ErrorType::ConnectionError), + error_details: Some(ErrorDetails { + error: WebhookError { + name: "Connection Error".to_owned(), + message: None, + stack: None, + }, + }), + }, + AppMetric { + timestamp: DateTime::::from_str("2023-12-19T20:00:00Z").unwrap(), + team_id: 1, + plugin_config_id: 2, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 0, + successes_on_retry: 0, + failures: 2, + error_uuid: Some(Uuid::parse_str("018c8935-d038-714a-957c-0df43d42e377").unwrap()), + error_type: Some(ErrorType::TimeoutError), + error_details: Some(ErrorDetails { + error: WebhookError { + name: "Timeout".to_owned(), + message: None, + stack: None, + }, + }), + }, + AppMetric { + timestamp: DateTime::::from_str("2023-12-19T20:00:00Z").unwrap(), + team_id: 1, + plugin_config_id: 3, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 0, + successes_on_retry: 0, + failures: 1, + error_uuid: Some(Uuid::parse_str("018c8935-d038-714a-957c-0df43d42e377").unwrap()), + error_type: Some(ErrorType::TimeoutError), + error_details: Some(ErrorDetails { + error: WebhookError { + name: "Timeout".to_owned(), + message: None, + stack: None, + }, + }), + }, + AppMetric { + timestamp: DateTime::::from_str("2023-12-19T20:00:00Z").unwrap(), + team_id: 2, + plugin_config_id: 4, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 0, + successes_on_retry: 0, + failures: 1, + error_uuid: Some(Uuid::parse_str("018c8935-d038-714a-957c-0df43d42e377").unwrap()), + error_type: Some(ErrorType::TimeoutError), + error_details: Some(ErrorDetails { + error: WebhookError { + name: "Timeout".to_owned(), + message: None, + stack: None, + }, + }), + }, + AppMetric { + timestamp: DateTime::::from_str("2023-12-19T21:00:00Z").unwrap(), + team_id: 1, + plugin_config_id: 2, + job_id: None, + category: AppMetricCategory::Webhook, + successes: 0, + successes_on_retry: 0, + failures: 1, + error_uuid: Some(Uuid::parse_str("018c8935-d038-714a-957c-0df43d42e377").unwrap()), + error_type: Some(ErrorType::TimeoutError), + error_details: Some(ErrorDetails { + error: WebhookError { + name: "Timeout".to_owned(), + message: None, + stack: None, + }, + }), + }, + ]; + + check_app_metric_vector_equality(&expected_app_metrics, &received_app_metrics); } - // #[sqlx::test] - // async fn test_serializable_isolation() { - // TODO: I'm going to add a test that verifies new rows aren't visible during the txn. - // } + #[sqlx::test(migrations = "../migrations", fixtures("webhook_cleanup"))] + async fn test_serializable_isolation(db: PgPool) { + let (_, mock_producer) = create_mock_kafka().await; + let webhook_cleaner = WebhookCleaner::new_from_pool( + &"webhooks", + &"job_queue", + db.clone(), + mock_producer, + APP_METRICS_TOPIC.to_owned(), + ) + .expect("unable to create webhook cleaner"); + + let queue = + PgQueue::new_from_pool("webhooks", "job_queue", db.clone(), RetryPolicy::default()) + .await + .expect("failed to connect to local test postgresql database"); + + async fn get_count_from_new_conn(db: &PgPool, status: &str) -> i64 { + let mut conn = db.acquire().await.unwrap(); + let count: i64 = sqlx::query( + "SELECT count(*) FROM job_queue WHERE queue = 'webhooks' AND status = $1::job_status", + ) + .bind(&status) + .fetch_one(&mut *conn) + .await + .unwrap() + .get(0); + count + } + + // Important! Serializable txn is started here. + let mut tx = webhook_cleaner.start_serializable_txn().await.unwrap(); + webhook_cleaner.get_completed_rows(&mut tx).await.unwrap(); + webhook_cleaner.get_failed_rows(&mut tx).await.unwrap(); + + // All 13 rows in the queue are visible from outside the txn. + // The 11 the cleaner will process, plus 1 available and 1 running. + assert_eq!(get_count_from_new_conn(&db, "completed").await, 5); + assert_eq!(get_count_from_new_conn(&db, "failed").await, 6); + assert_eq!(get_count_from_new_conn(&db, "available").await, 1); + assert_eq!(get_count_from_new_conn(&db, "running").await, 1); + + { + // The fixtures include an available job, so let's complete it while the txn is open. + let webhook_job: PgJob = queue + .dequeue(&"worker_id") + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + webhook_job + .complete() + .await + .expect("failed to complete job"); + } + + { + // Enqueue and complete another job while the txn is open. + let job_parameters = WebhookJobParameters { + body: "foo".to_owned(), + headers: HashMap::new(), + method: HttpMethod::POST, + url: "http://example.com".to_owned(), + }; + let job_metadata = WebhookJobMetadata { + team_id: 1, + plugin_id: 2, + plugin_config_id: 3, + }; + let new_job = NewJob::new(1, job_metadata, job_parameters, &"target"); + queue.enqueue(new_job).await.expect("failed to enqueue job"); + let webhook_job: PgJob = queue + .dequeue(&"worker_id") + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + webhook_job + .complete() + .await + .expect("failed to complete job"); + } + + { + // Enqueue another available job while the txn is open. + let job_parameters = WebhookJobParameters { + body: "foo".to_owned(), + headers: HashMap::new(), + method: HttpMethod::POST, + url: "http://example.com".to_owned(), + }; + let job_metadata = WebhookJobMetadata { + team_id: 1, + plugin_id: 2, + plugin_config_id: 3, + }; + let new_job = NewJob::new(1, job_metadata, job_parameters, &"target"); + queue.enqueue(new_job).await.expect("failed to enqueue job"); + } + + // There are now 2 more completed rows (jobs added above) than before, visible from outside the txn. + assert_eq!(get_count_from_new_conn(&db, "completed").await, 7); + assert_eq!(get_count_from_new_conn(&db, "available").await, 1); + + let rows_processed = webhook_cleaner.delete_observed_rows(&mut tx).await.unwrap(); + // The 11 rows that were in the queue when the txn started should be deleted. + assert_eq!(rows_processed, 11); + + // We haven't committed, so the rows are still visible from outside the txn. + assert_eq!(get_count_from_new_conn(&db, "completed").await, 7); + assert_eq!(get_count_from_new_conn(&db, "available").await, 1); + + webhook_cleaner.commit_txn(tx).await.unwrap(); + + // We have committed, what remains are: + // * The 1 available job we completed while the txn was open. + // * The 2 brand new jobs we added while the txn was open. + // * The 1 running job that didn't change. + assert_eq!(get_count_from_new_conn(&db, "completed").await, 2); + assert_eq!(get_count_from_new_conn(&db, "failed").await, 0); + assert_eq!(get_count_from_new_conn(&db, "available").await, 1); + assert_eq!(get_count_from_new_conn(&db, "running").await, 1); + } } From d6035105e36ac604dad7bfa8805ed5608c92be22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 21 Dec 2023 12:10:26 +0100 Subject: [PATCH 073/130] refactor: Only one run method --- hook-consumer/src/consumer.rs | 73 +++++++++++++++++++++-------------- hook-consumer/src/main.rs | 6 +-- 2 files changed, 44 insertions(+), 35 deletions(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 73bac1c..921a065 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -109,42 +109,55 @@ impl<'p> WebhookConsumer<'p> { } /// Run this consumer to continuously process any jobs that become available. - pub async fn run(&self) -> Result<(), ConsumerError> { + pub async fn run(&self, transactional: bool) -> Result<(), ConsumerError> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); - loop { - let webhook_job = self.wait_for_job().await?; - - // reqwest::Client internally wraps with Arc, so this allocation is cheap. - let client = self.client.clone(); - let permit = semaphore.clone().acquire_owned().await.unwrap(); - - tokio::spawn(async move { - let result = process_webhook_job(client, webhook_job).await; - drop(permit); - result - }); + if transactional { + loop { + let webhook_job = self.wait_for_job_tx().await?; + spawn_webhook_job_processing_task( + self.client.clone(), + semaphore.clone(), + webhook_job, + ) + .await; + } + } else { + loop { + let webhook_job = self.wait_for_job().await?; + spawn_webhook_job_processing_task( + self.client.clone(), + semaphore.clone(), + webhook_job, + ) + .await; + } } } +} - /// Run this consumer to continuously process any jobs that become available in transactional mode. - pub async fn run_tx(&self) -> Result<(), ConsumerError> { - let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); - - loop { - let webhook_job = self.wait_for_job_tx().await?; - - // reqwest::Client internally wraps with Arc, so this allocation is cheap. - let client = self.client.clone(); - let permit = semaphore.clone().acquire_owned().await.unwrap(); +/// Spawn a Tokio task to process a Webhook Job once we successfully acquire a permit. +/// +/// # Arguments +/// +/// * `client`: An HTTP client to execute the webhook job request. +/// * `semaphore`: A semaphore used for rate limiting purposes. This function will panic if this semaphore is closed. +/// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. +async fn spawn_webhook_job_processing_task( + client: reqwest::Client, + semaphore: Arc, + webhook_job: W, +) -> tokio::task::JoinHandle> { + let permit = semaphore + .acquire_owned() + .await + .expect("semaphore has been closed"); - tokio::spawn(async move { - let result = process_webhook_job(client, webhook_job).await; - drop(permit); - result - }); - } - } + tokio::spawn(async move { + let result = process_webhook_job(client, webhook_job).await; + drop(permit); + result + }) } /// Process a webhook job by transitioning it to its appropriate state after its request is sent. diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index 49c2e76..86b8094 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -31,11 +31,7 @@ async fn main() -> Result<(), ConsumerError> { config.max_concurrent_jobs, ); - if config.transactional { - consumer.run_tx().await?; - } else { - consumer.run().await?; - } + consumer.run(config.transactional).await?; Ok(()) } From fe972c7a65208bea3412f9d0b59d2e467897a3d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 8 Dec 2023 16:50:39 +0100 Subject: [PATCH 074/130] feat: Give non-transactional consumer a chance --- hook-consumer/src/consumer.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 921a065..19fa0a8 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -51,6 +51,8 @@ pub struct WebhookConsumer<'p> { client: reqwest::Client, /// Maximum number of concurrent jobs being processed. max_concurrent_jobs: usize, + /// Indicates whether we are holding an open transaction while processing or not. + transactional: bool, } impl<'p> WebhookConsumer<'p> { @@ -82,6 +84,19 @@ impl<'p> WebhookConsumer<'p> { } } + /// Wait until a job becomes available in our queue. + async fn wait_for_job_tx<'a>( + &self, + ) -> Result, WebhookConsumerError> { + loop { + if let Some(job) = self.queue.dequeue_tx(&self.name).await? { + return Ok(job); + } else { + task::sleep(self.poll_interval).await; + } + } + } + /// Wait until a job becomes available in our queue. async fn wait_for_job<'a>( &self, From dc97d8c168924ac372d487489d843fbdddaf2e98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 14 Dec 2023 12:30:31 +0100 Subject: [PATCH 075/130] refactor: Two clients one for each mode --- hook-consumer/src/consumer.rs | 130 ++++++++++++++++++++++++++++++++-- 1 file changed, 124 insertions(+), 6 deletions(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 19fa0a8..0127d25 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -51,8 +51,6 @@ pub struct WebhookConsumer<'p> { client: reqwest::Client, /// Maximum number of concurrent jobs being processed. max_concurrent_jobs: usize, - /// Indicates whether we are holding an open transaction while processing or not. - transactional: bool, } impl<'p> WebhookConsumer<'p> { @@ -85,11 +83,9 @@ impl<'p> WebhookConsumer<'p> { } /// Wait until a job becomes available in our queue. - async fn wait_for_job_tx<'a>( - &self, - ) -> Result, WebhookConsumerError> { + async fn wait_for_job<'a>(&self) -> Result, WebhookConsumerError> { loop { - if let Some(job) = self.queue.dequeue_tx(&self.name).await? { + if let Some(job) = self.queue.dequeue(&self.name).await? { return Ok(job); } else { task::sleep(self.poll_interval).await; @@ -97,6 +93,68 @@ impl<'p> WebhookConsumer<'p> { } } + /// Run this consumer to continuously process any jobs that become available. + pub async fn run(&self) -> Result<(), WebhookConsumerError> { + let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); + + loop { + let webhook_job = self.wait_for_job().await?; + + // reqwest::Client internally wraps with Arc, so this allocation is cheap. + let client = self.client.clone(); + let permit = semaphore.clone().acquire_owned().await.unwrap(); + + tokio::spawn(async move { + let result = process_webhook_job(client, webhook_job).await; + drop(permit); + result + }); + } + } +} + +/// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. +pub struct WebhookTransactionConsumer<'p> { + /// An identifier for this consumer. Used to mark jobs we have consumed. + name: String, + /// The queue we will be dequeuing jobs from. + queue: &'p PgQueue, + /// The interval for polling the queue. + poll_interval: time::Duration, + /// The client used for HTTP requests. + client: reqwest::Client, + /// Maximum number of concurrent jobs being processed. + max_concurrent_jobs: usize, +} + +impl<'p> WebhookTransactionConsumer<'p> { + pub fn new( + name: &str, + queue: &'p PgQueue, + poll_interval: time::Duration, + request_timeout: time::Duration, + max_concurrent_jobs: usize, + ) -> Result { + let mut headers = header::HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static("application/json"), + ); + + let client = reqwest::Client::builder() + .default_headers(headers) + .timeout(request_timeout) + .build()?; + + Ok(Self { + name: name.to_owned(), + queue, + poll_interval, + client, + max_concurrent_jobs, + }) + } + /// Wait until a job becomes available in our queue. async fn wait_for_job<'a>( &self, @@ -259,6 +317,66 @@ async fn process_webhook_job( } } +/// Process a webhook job by transitioning it to its appropriate state after its request is sent. +/// After we finish, the webhook job will be set as completed (if the request was successful), retryable (if the request +/// was unsuccessful but we can still attempt a retry), or failed (if the request was unsuccessful and no more retries +/// may be attempted). +/// +/// A webhook job is considered retryable after a failing request if: +/// 1. The job has attempts remaining (i.e. hasn't reached `max_attempts`), and... +/// 2. The status code indicates retrying at a later point could resolve the issue. This means: 429 and any 5XX. +/// +/// # Arguments +/// +/// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. +/// * `request_timeout`: A timeout for the HTTP request. +async fn process_webhook_job( + client: reqwest::Client, + webhook_job: PgJob, +) -> Result<(), WebhookConsumerError> { + match send_webhook( + client, + &webhook_job.job.parameters.method, + &webhook_job.job.parameters.url, + &webhook_job.job.parameters.headers, + webhook_job.job.parameters.body.clone(), + ) + .await + { + Ok(_) => { + webhook_job + .complete() + .await + .map_err(|error| WebhookConsumerError::PgJobError(error.to_string()))?; + Ok(()) + } + Err(WebhookConsumerError::RetryableWebhookError { + reason, + retry_after, + }) => match webhook_job.retry(reason.to_string(), retry_after).await { + Ok(_) => Ok(()), + Err(PgJobError::RetryInvalidError { + job: webhook_job, + error: fail_error, + }) => { + webhook_job + .fail(fail_error.to_string()) + .await + .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; + Ok(()) + } + Err(job_error) => Err(WebhookConsumerError::PgJobError(job_error.to_string())), + }, + Err(error) => { + webhook_job + .fail(error.to_string()) + .await + .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; + Ok(()) + } + } +} + /// Make an HTTP request to a webhook endpoint. /// /// # Arguments From acd0d1379768fa035baa9fb73fadecca149a6c1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 14 Dec 2023 14:49:50 +0100 Subject: [PATCH 076/130] refactor: PgQueue no longer takes a RetryPolicy --- hook-common/src/pgqueue.rs | 97 +++-------------------------------- hook-common/src/retry.rs | 55 ++++++++++++++++++++ hook-consumer/src/consumer.rs | 48 +++++++++++------ hook-consumer/src/main.rs | 13 ++--- 4 files changed, 98 insertions(+), 115 deletions(-) create mode 100644 hook-common/src/retry.rs diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 78fe2fc..509d84e 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -1,8 +1,6 @@ //! # PgQueue //! //! A job queue implementation backed by a PostgreSQL table. - -use std::default::Default; use std::str::FromStr; use std::time; @@ -172,7 +170,6 @@ pub struct PgJob { pub job: Job, pub table: String, pub connection: sqlx::pool::PoolConnection, - pub retry_policy: RetryPolicy, } #[async_trait] @@ -259,9 +256,6 @@ RETURNING }); } let retryable_job = self.job.retry(error); - let retry_interval = self - .retry_policy - .time_until_next_retry(&retryable_job, preferred_retry_interval); let base_query = format!( r#" @@ -304,7 +298,6 @@ pub struct PgTransactionJob<'c, J, M> { pub job: Job, pub table: String, pub transaction: sqlx::Transaction<'c, sqlx::postgres::Postgres>, - pub retry_policy: RetryPolicy, } #[async_trait] @@ -408,9 +401,6 @@ RETURNING }); } let retryable_job = self.job.retry(error); - let retry_interval = self - .retry_policy - .time_until_next_retry(&retryable_job, preferred_retry_interval); let base_query = format!( r#" @@ -509,61 +499,6 @@ impl NewJob { } } -#[derive(Copy, Clone, Debug)] -/// The retry policy that PgQueue will use to determine how to set scheduled_at when enqueuing a retry. -pub struct RetryPolicy { - /// Coefficient to multiply initial_interval with for every past attempt. - backoff_coefficient: u32, - /// The backoff interval for the first retry. - initial_interval: time::Duration, - /// The maximum possible backoff between retries. - maximum_interval: Option, -} - -impl RetryPolicy { - pub fn new( - backoff_coefficient: u32, - initial_interval: time::Duration, - maximum_interval: Option, - ) -> Self { - Self { - backoff_coefficient, - initial_interval, - maximum_interval, - } - } - - /// Calculate the time until the next retry for a given RetryableJob. - pub fn time_until_next_retry( - &self, - job: &RetryableJob, - preferred_retry_interval: Option, - ) -> time::Duration { - let candidate_interval = - self.initial_interval * self.backoff_coefficient.pow(job.attempt as u32); - - match (preferred_retry_interval, self.maximum_interval) { - (Some(duration), Some(max_interval)) => std::cmp::min( - std::cmp::max(std::cmp::min(candidate_interval, max_interval), duration), - max_interval, - ), - (Some(duration), None) => std::cmp::max(candidate_interval, duration), - (None, Some(max_interval)) => std::cmp::min(candidate_interval, max_interval), - (None, None) => candidate_interval, - } - } -} - -impl Default for RetryPolicy { - fn default() -> Self { - Self { - backoff_coefficient: 2, - initial_interval: time::Duration::from_secs(1), - maximum_interval: None, - } - } -} - /// A queue implemented on top of a PostgreSQL table. #[derive(Clone)] pub struct PgQueue { @@ -571,8 +506,6 @@ pub struct PgQueue { name: String, /// A connection pool used to connect to the PostgreSQL database. pool: PgPool, - /// The retry policy to be assigned to Jobs in this PgQueue. - retry_policy: RetryPolicy, /// The identifier of the PostgreSQL table this queue runs on. table: String, } @@ -588,25 +521,14 @@ impl PgQueue { /// * `table_name`: The name for the table the queue will use in PostgreSQL. /// * `url`: A URL pointing to where the PostgreSQL database is hosted. /// * `worker_name`: The name of the worker that is operating with this queue. - /// * `retry_policy`: A retry policy to pass to jobs from this queue. - pub async fn new( - queue_name: &str, - table_name: &str, - url: &str, - retry_policy: RetryPolicy, - ) -> PgQueueResult { + pub async fn new(queue_name: &str, table_name: &str, url: &str) -> PgQueueResult { let name = queue_name.to_owned(); let table = table_name.to_owned(); let pool = PgPoolOptions::new() .connect_lazy(url) .map_err(|error| PgQueueError::PoolCreationError { error })?; - Ok(Self { - name, - pool, - retry_policy, - table, - }) + Ok(Self { name, pool, table }) } /// Dequeue a Job from this PgQueue to work on it. @@ -669,7 +591,6 @@ RETURNING job, table: self.table.to_owned(), connection, - retry_policy: self.retry_policy, })), // Although connection would be closed once it goes out of scope, sqlx recommends explicitly calling close(). @@ -749,7 +670,6 @@ RETURNING job, table: self.table.to_owned(), transaction: tx, - retry_policy: self.retry_policy, })), // Transaction is rolledback on drop. @@ -801,6 +721,7 @@ VALUES #[cfg(test)] mod tests { use super::*; + use crate::retry::RetryPolicy; #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] struct JobMetadata { @@ -858,7 +779,6 @@ mod tests { "test_can_dequeue_job", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to connect to local test postgresql database"); @@ -887,7 +807,6 @@ mod tests { "test_dequeue_returns_none_on_no_jobs", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to connect to local test postgresql database"); @@ -912,7 +831,6 @@ mod tests { "test_can_dequeue_tx_job", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to connect to local test postgresql database"); @@ -942,7 +860,6 @@ mod tests { "test_dequeue_tx_returns_none_on_no_jobs", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to connect to local test postgresql database"); @@ -972,7 +889,6 @@ mod tests { "test_can_retry_job_with_remaining_attempts", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - retry_policy, ) .await .expect("failed to connect to local test postgresql database"); @@ -983,8 +899,9 @@ mod tests { .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue"); + let retry_interval = retry_policy.time_until_next_retry(job.job.attempt as u32, None); let _ = job - .retry("a very reasonable failure reason", None) + .retry("a very reasonable failure reason", retry_interval) .await .expect("failed to retry job"); let retried_job: PgJob = queue @@ -1023,7 +940,6 @@ mod tests { "test_cannot_retry_job_without_remaining_attempts", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - retry_policy, ) .await .expect("failed to connect to local test postgresql database"); @@ -1035,7 +951,8 @@ mod tests { .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue"); - job.retry("a very reasonable failure reason", None) + let retry_interval = retry_policy.time_until_next_retry(job.job.attempt as u32, None); + job.retry("a very reasonable failure reason", retry_interval) .await .expect("failed to retry job"); } diff --git a/hook-common/src/retry.rs b/hook-common/src/retry.rs new file mode 100644 index 0000000..140da19 --- /dev/null +++ b/hook-common/src/retry.rs @@ -0,0 +1,55 @@ +use std::time; + +#[derive(Copy, Clone, Debug)] +/// The retry policy that PgQueue will use to determine how to set scheduled_at when enqueuing a retry. +pub struct RetryPolicy { + /// Coefficient to multiply initial_interval with for every past attempt. + backoff_coefficient: u32, + /// The backoff interval for the first retry. + initial_interval: time::Duration, + /// The maximum possible backoff between retries. + maximum_interval: Option, +} + +impl RetryPolicy { + pub fn new( + backoff_coefficient: u32, + initial_interval: time::Duration, + maximum_interval: Option, + ) -> Self { + Self { + backoff_coefficient, + initial_interval, + maximum_interval, + } + } + + /// Calculate the time until the next retry for a given RetryableJob. + pub fn time_until_next_retry( + &self, + attempt: u32, + preferred_retry_interval: Option, + ) -> time::Duration { + let candidate_interval = self.initial_interval * self.backoff_coefficient.pow(attempt); + + match (preferred_retry_interval, self.maximum_interval) { + (Some(duration), Some(max_interval)) => std::cmp::min( + std::cmp::max(std::cmp::min(candidate_interval, max_interval), duration), + max_interval, + ), + (Some(duration), None) => std::cmp::max(candidate_interval, duration), + (None, Some(max_interval)) => std::cmp::min(candidate_interval, max_interval), + (None, None) => candidate_interval, + } + } +} + +impl Default for RetryPolicy { + fn default() -> Self { + Self { + backoff_coefficient: 2, + initial_interval: time::Duration::from_secs(1), + maximum_interval: None, + } + } +} diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 0127d25..d6ebac7 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -51,6 +51,8 @@ pub struct WebhookConsumer<'p> { client: reqwest::Client, /// Maximum number of concurrent jobs being processed. max_concurrent_jobs: usize, + /// The retry policy used to calculate retry intervals when a job fails with a retryable error. + retry_policy: RetryPolicy, } impl<'p> WebhookConsumer<'p> { @@ -96,6 +98,7 @@ impl<'p> WebhookConsumer<'p> { /// Run this consumer to continuously process any jobs that become available. pub async fn run(&self) -> Result<(), WebhookConsumerError> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); + let retry_policy = self.retry_policy.clone(); loop { let webhook_job = self.wait_for_job().await?; @@ -105,7 +108,7 @@ impl<'p> WebhookConsumer<'p> { let permit = semaphore.clone().acquire_owned().await.unwrap(); tokio::spawn(async move { - let result = process_webhook_job(client, webhook_job).await; + let result = process_webhook_job(client, webhook_job, &retry_policy).await; drop(permit); result }); @@ -125,6 +128,8 @@ pub struct WebhookTransactionConsumer<'p> { client: reqwest::Client, /// Maximum number of concurrent jobs being processed. max_concurrent_jobs: usize, + /// The retry policy used to calculate retry intervals when a job fails with a retryable error. + retry_policy: RetryPolicy, } impl<'p> WebhookTransactionConsumer<'p> { @@ -134,6 +139,7 @@ impl<'p> WebhookTransactionConsumer<'p> { poll_interval: time::Duration, request_timeout: time::Duration, max_concurrent_jobs: usize, + retry_policy: RetryPolicy, ) -> Result { let mut headers = header::HeaderMap::new(); headers.insert( @@ -152,6 +158,7 @@ impl<'p> WebhookTransactionConsumer<'p> { poll_interval, client, max_concurrent_jobs, + retry_policy, }) } @@ -184,6 +191,7 @@ impl<'p> WebhookTransactionConsumer<'p> { /// Run this consumer to continuously process any jobs that become available. pub async fn run(&self, transactional: bool) -> Result<(), ConsumerError> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); + let retry_policy = self.retry_policy.clone(); if transactional { loop { @@ -333,6 +341,7 @@ async fn process_webhook_job( async fn process_webhook_job( client: reqwest::Client, webhook_job: PgJob, + retry_policy: &RetryPolicy, ) -> Result<(), WebhookConsumerError> { match send_webhook( client, @@ -353,20 +362,27 @@ async fn process_webhook_job( Err(WebhookConsumerError::RetryableWebhookError { reason, retry_after, - }) => match webhook_job.retry(reason.to_string(), retry_after).await { - Ok(_) => Ok(()), - Err(PgJobError::RetryInvalidError { - job: webhook_job, - error: fail_error, - }) => { - webhook_job - .fail(fail_error.to_string()) - .await - .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; - Ok(()) + }) => { + let retry_interval = + retry_policy.time_until_next_retry(webhook_job.job.attempt as u32, retry_after); + + match webhook_job.retry(reason.to_string(), retry_interval).await { + Ok(_) => Ok(()), + Err(PgJobError::RetryInvalidError { + job: webhook_job, + error: fail_error, + }) => { + webhook_job + .fail(fail_error.to_string()) + .await + .map_err(|job_error| { + WebhookConsumerError::PgJobError(job_error.to_string()) + })?; + Ok(()) + } + Err(job_error) => Err(WebhookConsumerError::PgJobError(job_error.to_string())), } - Err(job_error) => Err(WebhookConsumerError::PgJobError(job_error.to_string())), - }, + } Err(error) => { webhook_job .fail(error.to_string()) @@ -479,7 +495,7 @@ mod tests { // This is due to a long-standing cargo bug that reports imports and helper functions as unused. // See: https://github.com/rust-lang/rust/issues/46379. #[allow(unused_imports)] - use hook_common::pgqueue::{JobStatus, NewJob, RetryPolicy}; + use hook_common::pgqueue::{JobStatus, NewJob}; /// Use process id as a worker id for tests. #[allow(dead_code)] @@ -536,7 +552,7 @@ mod tests { let queue_name = "test_wait_for_job".to_string(); let table_name = "job_queue".to_string(); let db_url = "postgres://posthog:posthog@localhost:15432/test_database".to_string(); - let queue = PgQueue::new(&queue_name, &table_name, &db_url, RetryPolicy::default()) + let queue = PgQueue::new(&queue_name, &table_name, &db_url) .await .expect("failed to connect to PG"); diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index 86b8094..dd9d3e7 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -1,6 +1,6 @@ use envconfig::Envconfig; -use hook_common::pgqueue::{PgQueue, RetryPolicy}; +use hook_common::{pgqueue::PgQueue, retry::RetryPolicy}; use hook_consumer::config::Config; use hook_consumer::consumer::WebhookConsumer; use hook_consumer::error::ConsumerError; @@ -14,14 +14,9 @@ async fn main() -> Result<(), ConsumerError> { config.retry_policy.initial_interval.0, Some(config.retry_policy.maximum_interval.0), ); - let queue = PgQueue::new( - &config.queue_name, - &config.table_name, - &config.database_url, - retry_policy, - ) - .await - .expect("failed to initialize queue"); + let queue = PgQueue::new(&config.queue_name, &config.table_name, &config.database_url) + .await + .expect("failed to initialize queue"); let consumer = WebhookConsumer::new( &config.consumer_name, From 0f61af3ccda26f905dcf44c9b30cb35f4965e764 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 21 Dec 2023 12:36:40 +0100 Subject: [PATCH 077/130] fix: Rebase on changes and fix all conflicts --- hook-common/src/lib.rs | 1 + hook-common/src/pgqueue.rs | 6 +- hook-common/src/retry.rs | 6 +- hook-consumer/src/consumer.rs | 177 ++++---------------------- hook-consumer/src/main.rs | 1 + hook-producer/src/handlers/app.rs | 3 +- hook-producer/src/handlers/webhook.rs | 7 +- hook-producer/src/main.rs | 4 +- 8 files changed, 36 insertions(+), 169 deletions(-) diff --git a/hook-common/src/lib.rs b/hook-common/src/lib.rs index 7d9ef37..8e63ded 100644 --- a/hook-common/src/lib.rs +++ b/hook-common/src/lib.rs @@ -1,4 +1,5 @@ pub mod kafka_messages; pub mod metrics; pub mod pgqueue; +pub mod retry; pub mod webhook; diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 509d84e..00ce57f 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -160,7 +160,7 @@ pub trait PgQueueJob { async fn retry( mut self, error: E, - preferred_retry_interval: Option, + retry_interval: time::Duration, ) -> Result, PgJobError>>; } @@ -247,7 +247,7 @@ RETURNING async fn retry( mut self, error: E, - preferred_retry_interval: Option, + retry_interval: time::Duration, ) -> Result, PgJobError>>> { if self.job.is_gte_max_attempts() { return Err(PgJobError::RetryInvalidError { @@ -392,7 +392,7 @@ RETURNING async fn retry( mut self, error: E, - preferred_retry_interval: Option, + retry_interval: time::Duration, ) -> Result, PgJobError>>> { if self.job.is_gte_max_attempts() { return Err(PgJobError::RetryInvalidError { diff --git a/hook-common/src/retry.rs b/hook-common/src/retry.rs index 140da19..f72b0d1 100644 --- a/hook-common/src/retry.rs +++ b/hook-common/src/retry.rs @@ -4,11 +4,11 @@ use std::time; /// The retry policy that PgQueue will use to determine how to set scheduled_at when enqueuing a retry. pub struct RetryPolicy { /// Coefficient to multiply initial_interval with for every past attempt. - backoff_coefficient: u32, + pub backoff_coefficient: u32, /// The backoff interval for the first retry. - initial_interval: time::Duration, + pub initial_interval: time::Duration, /// The maximum possible backoff between retries. - maximum_interval: Option, + pub maximum_interval: Option, } impl RetryPolicy { diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index d6ebac7..b1e5071 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -3,10 +3,11 @@ use std::sync::Arc; use std::time; use async_std::task; -use hook_common::pgqueue::{ - PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob, +use hook_common::{ + pgqueue::{PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, + retry::RetryPolicy, + webhook::{HttpMethod, WebhookJobError, WebhookJobMetadata, WebhookJobParameters}, }; -use hook_common::webhook::{HttpMethod, WebhookJobError, WebhookJobMetadata, WebhookJobParameters}; use http::StatusCode; use reqwest::header; use tokio::sync; @@ -17,6 +18,7 @@ use crate::error::{ConsumerError, WebhookError}; trait WebhookJob: PgQueueJob + std::marker::Send { fn parameters(&self) -> &WebhookJobParameters; fn metadata(&self) -> &WebhookJobMetadata; + fn attempt(&self) -> i32; } impl WebhookJob for PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata> { @@ -27,6 +29,10 @@ impl WebhookJob for PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadat fn metadata(&self) -> &WebhookJobMetadata { &self.job.metadata } + + fn attempt(&self) -> i32 { + self.job.attempt + } } impl WebhookJob for PgJob { @@ -37,6 +43,10 @@ impl WebhookJob for PgJob { fn metadata(&self) -> &WebhookJobMetadata { &self.job.metadata } + + fn attempt(&self) -> i32 { + self.job.attempt + } } /// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. @@ -62,6 +72,7 @@ impl<'p> WebhookConsumer<'p> { poll_interval: time::Duration, request_timeout: time::Duration, max_concurrent_jobs: usize, + retry_policy: RetryPolicy, ) -> Self { let mut headers = header::HeaderMap::new(); headers.insert( @@ -81,85 +92,8 @@ impl<'p> WebhookConsumer<'p> { poll_interval, client, max_concurrent_jobs, - } - } - - /// Wait until a job becomes available in our queue. - async fn wait_for_job<'a>(&self) -> Result, WebhookConsumerError> { - loop { - if let Some(job) = self.queue.dequeue(&self.name).await? { - return Ok(job); - } else { - task::sleep(self.poll_interval).await; - } - } - } - - /// Run this consumer to continuously process any jobs that become available. - pub async fn run(&self) -> Result<(), WebhookConsumerError> { - let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); - let retry_policy = self.retry_policy.clone(); - - loop { - let webhook_job = self.wait_for_job().await?; - - // reqwest::Client internally wraps with Arc, so this allocation is cheap. - let client = self.client.clone(); - let permit = semaphore.clone().acquire_owned().await.unwrap(); - - tokio::spawn(async move { - let result = process_webhook_job(client, webhook_job, &retry_policy).await; - drop(permit); - result - }); - } - } -} - -/// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. -pub struct WebhookTransactionConsumer<'p> { - /// An identifier for this consumer. Used to mark jobs we have consumed. - name: String, - /// The queue we will be dequeuing jobs from. - queue: &'p PgQueue, - /// The interval for polling the queue. - poll_interval: time::Duration, - /// The client used for HTTP requests. - client: reqwest::Client, - /// Maximum number of concurrent jobs being processed. - max_concurrent_jobs: usize, - /// The retry policy used to calculate retry intervals when a job fails with a retryable error. - retry_policy: RetryPolicy, -} - -impl<'p> WebhookTransactionConsumer<'p> { - pub fn new( - name: &str, - queue: &'p PgQueue, - poll_interval: time::Duration, - request_timeout: time::Duration, - max_concurrent_jobs: usize, - retry_policy: RetryPolicy, - ) -> Result { - let mut headers = header::HeaderMap::new(); - headers.insert( - header::CONTENT_TYPE, - header::HeaderValue::from_static("application/json"), - ); - - let client = reqwest::Client::builder() - .default_headers(headers) - .timeout(request_timeout) - .build()?; - - Ok(Self { - name: name.to_owned(), - queue, - poll_interval, - client, - max_concurrent_jobs, retry_policy, - }) + } } /// Wait until a job becomes available in our queue. @@ -191,7 +125,6 @@ impl<'p> WebhookTransactionConsumer<'p> { /// Run this consumer to continuously process any jobs that become available. pub async fn run(&self, transactional: bool) -> Result<(), ConsumerError> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); - let retry_policy = self.retry_policy.clone(); if transactional { loop { @@ -199,6 +132,7 @@ impl<'p> WebhookTransactionConsumer<'p> { spawn_webhook_job_processing_task( self.client.clone(), semaphore.clone(), + self.retry_policy, webhook_job, ) .await; @@ -209,6 +143,7 @@ impl<'p> WebhookTransactionConsumer<'p> { spawn_webhook_job_processing_task( self.client.clone(), semaphore.clone(), + self.retry_policy, webhook_job, ) .await; @@ -227,6 +162,7 @@ impl<'p> WebhookTransactionConsumer<'p> { async fn spawn_webhook_job_processing_task( client: reqwest::Client, semaphore: Arc, + retry_policy: RetryPolicy, webhook_job: W, ) -> tokio::task::JoinHandle> { let permit = semaphore @@ -235,7 +171,7 @@ async fn spawn_webhook_job_processing_task( .expect("semaphore has been closed"); tokio::spawn(async move { - let result = process_webhook_job(client, webhook_job).await; + let result = process_webhook_job(client, webhook_job, &retry_policy).await; drop(permit); result }) @@ -257,6 +193,7 @@ async fn spawn_webhook_job_processing_task( async fn process_webhook_job( client: reqwest::Client, webhook_job: W, + retry_policy: &RetryPolicy, ) -> Result<(), ConsumerError> { let parameters = webhook_job.parameters(); @@ -298,8 +235,11 @@ async fn process_webhook_job( Ok(()) } Err(WebhookError::RetryableRequestError { error, retry_after }) => { + let retry_interval = + retry_policy.time_until_next_retry(webhook_job.attempt() as u32, retry_after); + match webhook_job - .retry(WebhookJobError::from(&error), retry_after) + .retry(WebhookJobError::from(&error), retry_interval) .await { Ok(_) => Ok(()), @@ -325,74 +265,6 @@ async fn process_webhook_job( } } -/// Process a webhook job by transitioning it to its appropriate state after its request is sent. -/// After we finish, the webhook job will be set as completed (if the request was successful), retryable (if the request -/// was unsuccessful but we can still attempt a retry), or failed (if the request was unsuccessful and no more retries -/// may be attempted). -/// -/// A webhook job is considered retryable after a failing request if: -/// 1. The job has attempts remaining (i.e. hasn't reached `max_attempts`), and... -/// 2. The status code indicates retrying at a later point could resolve the issue. This means: 429 and any 5XX. -/// -/// # Arguments -/// -/// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. -/// * `request_timeout`: A timeout for the HTTP request. -async fn process_webhook_job( - client: reqwest::Client, - webhook_job: PgJob, - retry_policy: &RetryPolicy, -) -> Result<(), WebhookConsumerError> { - match send_webhook( - client, - &webhook_job.job.parameters.method, - &webhook_job.job.parameters.url, - &webhook_job.job.parameters.headers, - webhook_job.job.parameters.body.clone(), - ) - .await - { - Ok(_) => { - webhook_job - .complete() - .await - .map_err(|error| WebhookConsumerError::PgJobError(error.to_string()))?; - Ok(()) - } - Err(WebhookConsumerError::RetryableWebhookError { - reason, - retry_after, - }) => { - let retry_interval = - retry_policy.time_until_next_retry(webhook_job.job.attempt as u32, retry_after); - - match webhook_job.retry(reason.to_string(), retry_interval).await { - Ok(_) => Ok(()), - Err(PgJobError::RetryInvalidError { - job: webhook_job, - error: fail_error, - }) => { - webhook_job - .fail(fail_error.to_string()) - .await - .map_err(|job_error| { - WebhookConsumerError::PgJobError(job_error.to_string()) - })?; - Ok(()) - } - Err(job_error) => Err(WebhookConsumerError::PgJobError(job_error.to_string())), - } - } - Err(error) => { - webhook_job - .fail(error.to_string()) - .await - .map_err(|job_error| WebhookConsumerError::PgJobError(job_error.to_string()))?; - Ok(()) - } - } -} - /// Make an HTTP request to a webhook endpoint. /// /// # Arguments @@ -585,6 +457,7 @@ mod tests { time::Duration::from_millis(100), time::Duration::from_millis(5000), 10, + RetryPolicy::default(), ); let consumed_job = consumer diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index dd9d3e7..3cefc1d 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -24,6 +24,7 @@ async fn main() -> Result<(), ConsumerError> { config.poll_interval.0, config.request_timeout.0, config.max_concurrent_jobs, + retry_policy, ); consumer.run(config.transactional).await?; diff --git a/hook-producer/src/handlers/app.rs b/hook-producer/src/handlers/app.rs index 1666676..78d4dc5 100644 --- a/hook-producer/src/handlers/app.rs +++ b/hook-producer/src/handlers/app.rs @@ -31,7 +31,7 @@ mod tests { body::Body, http::{Request, StatusCode}, }; - use hook_common::pgqueue::{PgQueue, RetryPolicy}; + use hook_common::pgqueue::PgQueue; use http_body_util::BodyExt; // for `collect` use tower::ServiceExt; // for `call`, `oneshot`, and `ready` @@ -41,7 +41,6 @@ mod tests { "test_index", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to construct pg_queue"); diff --git a/hook-producer/src/handlers/webhook.rs b/hook-producer/src/handlers/webhook.rs index 3947320..ca0cc37 100644 --- a/hook-producer/src/handlers/webhook.rs +++ b/hook-producer/src/handlers/webhook.rs @@ -108,7 +108,7 @@ mod tests { body::Body, http::{self, Request, StatusCode}, }; - use hook_common::pgqueue::{PgQueue, RetryPolicy}; + use hook_common::pgqueue::PgQueue; use hook_common::webhook::{HttpMethod, WebhookJobParameters}; use http_body_util::BodyExt; // for `collect` use std::collections; @@ -122,7 +122,6 @@ mod tests { "test_index", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to construct pg_queue"); @@ -171,7 +170,6 @@ mod tests { "test_index", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to construct pg_queue"); @@ -215,7 +213,6 @@ mod tests { "test_index", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to construct pg_queue"); @@ -243,7 +240,6 @@ mod tests { "test_index", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to construct pg_queue"); @@ -271,7 +267,6 @@ mod tests { "test_index", "job_queue", "postgres://posthog:posthog@localhost:15432/test_database", - RetryPolicy::default(), ) .await .expect("failed to construct pg_queue"); diff --git a/hook-producer/src/main.rs b/hook-producer/src/main.rs index 7c2b73c..39f4500 100644 --- a/hook-producer/src/main.rs +++ b/hook-producer/src/main.rs @@ -4,7 +4,7 @@ use envconfig::Envconfig; use eyre::Result; use hook_common::metrics; -use hook_common::pgqueue::{PgQueue, RetryPolicy}; +use hook_common::pgqueue::PgQueue; mod config; mod handlers; @@ -29,8 +29,6 @@ async fn main() { &config.queue_name, &config.table_name, &config.database_url, - // TODO: It seems unnecessary that the producer side needs to know about the retry policy. - RetryPolicy::default(), ) .await .expect("failed to initialize queue"); From 9b7ab99d19327b274adf6c9b2bfb877c8eb0a59d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 21 Dec 2023 15:15:59 +0100 Subject: [PATCH 078/130] feat: Provide a function to start serving metrics --- Cargo.lock | 1 + hook-common/Cargo.toml | 1 + hook-common/src/metrics.rs | 16 ++++++++++++++++ 3 files changed, 18 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 130f765..ac2c8fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1035,6 +1035,7 @@ dependencies = [ "futures", "hook-common", "http 0.2.11", + "metrics", "reqwest", "serde", "serde_derive", diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml index 9b20396..00c7bd2 100644 --- a/hook-common/Cargo.toml +++ b/hook-common/Cargo.toml @@ -18,6 +18,7 @@ serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } sqlx = { workspace = true } +tokio = { workspace = true } thiserror = { workspace = true } uuid = { workspace = true } diff --git a/hook-common/src/metrics.rs b/hook-common/src/metrics.rs index dbdc7b1..7d881ea 100644 --- a/hook-common/src/metrics.rs +++ b/hook-common/src/metrics.rs @@ -2,9 +2,25 @@ use std::time::Instant; use axum::{ body::Body, extract::MatchedPath, http::Request, middleware::Next, response::IntoResponse, + routing::get, Router, }; use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; +/// Bind a TcpListener on the provided bind address to serve metrics on it. +pub async fn serve_metrics(bind: &str) -> Result<(), std::io::Error> { + let recorder_handle = setup_metrics_recorder(); + + let router = Router::new() + .route("/metrics", get(recorder_handle.render())) + .layer(axum::middleware::from_fn(track_metrics)); + + let listener = tokio::net::TcpListener::bind(bind).await?; + + axum::serve(listener, router).await?; + + Ok(()) +} + pub fn setup_metrics_recorder() -> PrometheusHandle { const EXPONENTIAL_SECONDS: &[f64] = &[ 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, From ec3728606fa18ff0c6b8db5d8be1660c740d2779 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 21 Dec 2023 15:16:12 +0100 Subject: [PATCH 079/130] feat: Serve basic count metrics on consumer --- hook-consumer/Cargo.toml | 1 + hook-consumer/src/config.rs | 13 +++++++++ hook-consumer/src/consumer.rs | 54 ++++++++++++++++++++++++++++++++++- hook-consumer/src/main.rs | 9 +++++- 4 files changed, 75 insertions(+), 2 deletions(-) diff --git a/hook-consumer/Cargo.toml b/hook-consumer/Cargo.toml index 35c64b5..2733a59 100644 --- a/hook-consumer/Cargo.toml +++ b/hook-consumer/Cargo.toml @@ -10,6 +10,7 @@ envconfig = { workspace = true } futures = "0.3" hook-common = { path = "../hook-common" } http = { version = "0.2" } +metrics = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } diff --git a/hook-consumer/src/config.rs b/hook-consumer/src/config.rs index 36c120a..9169512 100644 --- a/hook-consumer/src/config.rs +++ b/hook-consumer/src/config.rs @@ -5,6 +5,12 @@ use envconfig::Envconfig; #[derive(Envconfig, Clone)] pub struct Config { + #[envconfig(from = "BIND_HOST", default = "0.0.0.0")] + pub host: String, + + #[envconfig(from = "BIND_PORT", default = "8001")] + pub port: u16, + #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] pub database_url: String, @@ -33,6 +39,13 @@ pub struct Config { pub table_name: String, } +impl Config { + /// Produce a host:port address for binding a TcpListener. + pub fn bind(&self) -> String { + format!("{}:{}", self.host, self.port) + } +} + #[derive(Debug, Clone, Copy)] pub struct EnvMsDuration(pub time::Duration); diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index b1e5071..065555e 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -19,6 +19,8 @@ trait WebhookJob: PgQueueJob + std::marker::Send { fn parameters(&self) -> &WebhookJobParameters; fn metadata(&self) -> &WebhookJobMetadata; fn attempt(&self) -> i32; + fn queue(&self) -> String; + fn target(&self) -> String; } impl WebhookJob for PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata> { @@ -33,6 +35,14 @@ impl WebhookJob for PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadat fn attempt(&self) -> i32 { self.job.attempt } + + fn queue(&self) -> String { + self.job.queue.to_owned() + } + + fn target(&self) -> String { + self.job.target.to_owned() + } } impl WebhookJob for PgJob { @@ -47,6 +57,14 @@ impl WebhookJob for PgJob { fn attempt(&self) -> i32 { self.job.attempt } + + fn queue(&self) -> String { + self.job.queue.to_owned() + } + + fn target(&self) -> String { + self.job.target.to_owned() + } } /// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. @@ -170,6 +188,13 @@ async fn spawn_webhook_job_processing_task( .await .expect("semaphore has been closed"); + let labels = [ + ("queue", webhook_job.queue()), + ("target", webhook_job.target()), + ]; + + metrics::increment_counter!("webhook_jobs_total", &labels); + tokio::spawn(async move { let result = process_webhook_job(client, webhook_job, &retry_policy).await; drop(permit); @@ -197,6 +222,11 @@ async fn process_webhook_job( ) -> Result<(), ConsumerError> { let parameters = webhook_job.parameters(); + let labels = [ + ("queue", webhook_job.queue()), + ("target", webhook_job.target()), + ]; + match send_webhook( client, ¶meters.method, @@ -211,6 +241,9 @@ async fn process_webhook_job( .complete() .await .map_err(|error| ConsumerError::PgJobError(error.to_string()))?; + + metrics::increment_counter!("webhook_jobs_completed", &labels); + Ok(()) } Err(WebhookError::ParseHeadersError(e)) => { @@ -218,6 +251,9 @@ async fn process_webhook_job( .fail(WebhookJobError::new_parse(&e.to_string())) .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + + metrics::increment_counter!("webhook_jobs_failed", &labels); + Ok(()) } Err(WebhookError::ParseHttpMethodError(e)) => { @@ -225,6 +261,9 @@ async fn process_webhook_job( .fail(WebhookJobError::new_parse(&e)) .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + + metrics::increment_counter!("webhook_jobs_failed", &labels); + Ok(()) } Err(WebhookError::ParseUrlError(e)) => { @@ -232,6 +271,9 @@ async fn process_webhook_job( .fail(WebhookJobError::new_parse(&e.to_string())) .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + + metrics::increment_counter!("webhook_jobs_failed", &labels); + Ok(()) } Err(WebhookError::RetryableRequestError { error, retry_after }) => { @@ -242,7 +284,11 @@ async fn process_webhook_job( .retry(WebhookJobError::from(&error), retry_interval) .await { - Ok(_) => Ok(()), + Ok(_) => { + metrics::increment_counter!("webhook_jobs_retried", &labels); + + Ok(()) + } Err(PgJobError::RetryInvalidError { job: webhook_job, .. }) => { @@ -250,6 +296,9 @@ async fn process_webhook_job( .fail(WebhookJobError::from(&error)) .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + + metrics::increment_counter!("webhook_jobs_failed", &labels); + Ok(()) } Err(job_error) => Err(ConsumerError::PgJobError(job_error.to_string())), @@ -260,6 +309,9 @@ async fn process_webhook_job( .fail(WebhookJobError::from(&error)) .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + + metrics::increment_counter!("webhook_jobs_failed", &labels); + Ok(()) } } diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index 3cefc1d..b3ed22c 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -1,6 +1,6 @@ use envconfig::Envconfig; -use hook_common::{pgqueue::PgQueue, retry::RetryPolicy}; +use hook_common::{metrics::serve_metrics, pgqueue::PgQueue, retry::RetryPolicy}; use hook_consumer::config::Config; use hook_consumer::consumer::WebhookConsumer; use hook_consumer::error::ConsumerError; @@ -27,6 +27,13 @@ async fn main() -> Result<(), ConsumerError> { retry_policy, ); + let bind = config.bind(); + tokio::task::spawn(async move { + serve_metrics(&bind) + .await + .expect("failed to start serving metrics"); + }); + consumer.run(config.transactional).await?; Ok(()) From 0a014cb2da24f98b649c11d053bd79b840070791 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 21 Dec 2023 15:20:30 +0100 Subject: [PATCH 080/130] refactor: Use tokio::time::interval instead of async_std::task::sleep --- Cargo.lock | 381 +--------------------------------- hook-consumer/Cargo.toml | 1 - hook-consumer/src/consumer.rs | 13 +- 3 files changed, 15 insertions(+), 380 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ac2c8fc..7cc6645 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -60,150 +60,6 @@ dependencies = [ "libc", ] -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - -[[package]] -name = "async-channel" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" -dependencies = [ - "concurrent-queue", - "event-listener 4.0.0", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-executor" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" -dependencies = [ - "async-lock 3.2.0", - "async-task", - "concurrent-queue", - "fastrand 2.0.1", - "futures-lite 2.1.0", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" -dependencies = [ - "async-channel 2.1.1", - "async-executor", - "async-io 2.2.2", - "async-lock 3.2.0", - "blocking", - "futures-lite 2.1.0", - "once_cell", -] - -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2 0.4.10", - "waker-fn", -] - -[[package]] -name = "async-io" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" -dependencies = [ - "async-lock 3.2.0", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite 2.1.0", - "parking", - "polling 3.3.1", - "rustix 0.38.28", - "slab", - "tracing", - "windows-sys 0.52.0", -] - -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - -[[package]] -name = "async-lock" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" -dependencies = [ - "event-listener 4.0.0", - "event-listener-strategy", - "pin-project-lite", -] - -[[package]] -name = "async-std" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" -dependencies = [ - "async-channel 1.9.0", - "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", - "crossbeam-utils", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite 1.13.0", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - -[[package]] -name = "async-task" -version = "4.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d90cd0b264dfdd8eb5bad0a2c217c1f88fa96a8573f40e7b12de23fb468f46" - [[package]] name = "async-trait" version = "0.1.74" @@ -230,12 +86,6 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - [[package]] name = "atomic-write-file" version = "0.1.2" @@ -356,22 +206,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "blocking" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" -dependencies = [ - "async-channel 2.1.1", - "async-lock 3.2.0", - "async-task", - "fastrand 2.0.1", - "futures-io", - "futures-lite 2.1.0", - "piper", - "tracing", -] - [[package]] name = "bumpalo" version = "3.14.0" @@ -428,15 +262,6 @@ dependencies = [ "cc", ] -[[package]] -name = "concurrent-queue" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "const-oid" version = "0.9.6" @@ -624,27 +449,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "event-listener" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.0", - "pin-project-lite", -] - [[package]] name = "eyre" version = "0.6.11" @@ -655,15 +459,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - [[package]] name = "fastrand" version = "2.0.1" @@ -776,34 +571,6 @@ version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" -[[package]] -name = "futures-lite" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-lite" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeee267a1883f7ebef3700f262d2d54de95dfaf38189015a74fdc4e0c7ad8143" -dependencies = [ - "fastrand 2.0.1", - "futures-core", - "futures-io", - "parking", - "pin-project-lite", -] - [[package]] name = "futures-macro" version = "0.3.29" @@ -872,18 +639,6 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" -[[package]] -name = "gloo-timers" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "h2" version = "0.3.22" @@ -1029,7 +784,6 @@ dependencies = [ name = "hook-consumer" version = "0.1.0" dependencies = [ - "async-std", "chrono", "envconfig", "futures", @@ -1178,7 +932,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2", "tokio", "tower-service", "tracing", @@ -1230,7 +984,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.1.0", "pin-project-lite", - "socket2 0.5.5", + "socket2", "tokio", "tower", "tower-service", @@ -1296,26 +1050,6 @@ dependencies = [ "hashbrown 0.14.3", ] -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipnet" version = "2.9.0" @@ -1346,15 +1080,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -1399,12 +1124,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - [[package]] name = "linux-raw-sys" version = "0.4.12" @@ -1426,9 +1145,6 @@ name = "log" version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" -dependencies = [ - "value-bag", -] [[package]] name = "mach2" @@ -1750,12 +1466,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - [[package]] name = "parking_lot" version = "0.12.1" @@ -1832,17 +1542,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "piper" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" -dependencies = [ - "atomic-waker", - "fastrand 2.0.1", - "futures-io", -] - [[package]] name = "pkcs1" version = "0.7.5" @@ -1870,36 +1569,6 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" -[[package]] -name = "polling" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] - -[[package]] -name = "polling" -version = "3.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" -dependencies = [ - "cfg-if", - "concurrent-queue", - "pin-project-lite", - "rustix 0.38.28", - "tracing", - "windows-sys 0.52.0", -] - [[package]] name = "portable-atomic" version = "1.6.0" @@ -2129,20 +1798,6 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" -[[package]] -name = "rustix" -version = "0.37.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - [[package]] name = "rustix" version = "0.38.28" @@ -2152,7 +1807,7 @@ dependencies = [ "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.12", + "linux-raw-sys", "windows-sys 0.52.0", ] @@ -2330,16 +1985,6 @@ version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.5" @@ -2414,7 +2059,7 @@ dependencies = [ "crossbeam-queue", "dotenvy", "either", - "event-listener 2.5.3", + "event-listener", "futures-channel", "futures-core", "futures-intrusive", @@ -2665,9 +2310,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", - "fastrand 2.0.1", + "fastrand", "redox_syscall", - "rustix 0.38.28", + "rustix", "windows-sys 0.48.0", ] @@ -2730,7 +2375,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2", "tokio-macros", "windows-sys 0.48.0", ] @@ -2962,12 +2607,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "value-bag" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72e1902dde2bd6441347de2b70b7f5d59bf157c6c62f0c44572607a1d55bbe" - [[package]] name = "vcpkg" version = "0.2.15" @@ -2980,12 +2619,6 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" -[[package]] -name = "waker-fn" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" - [[package]] name = "want" version = "0.3.1" diff --git a/hook-consumer/Cargo.toml b/hook-consumer/Cargo.toml index 2733a59..fc8ee4a 100644 --- a/hook-consumer/Cargo.toml +++ b/hook-consumer/Cargo.toml @@ -4,7 +4,6 @@ version = "0.1.0" edition = "2021" [dependencies] -async-std = { version = "1.12" } chrono = { workspace = true } envconfig = { workspace = true } futures = "0.3" diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 065555e..de857c5 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -2,7 +2,6 @@ use std::collections; use std::sync::Arc; use std::time; -use async_std::task; use hook_common::{ pgqueue::{PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, retry::RetryPolicy, @@ -118,11 +117,13 @@ impl<'p> WebhookConsumer<'p> { async fn wait_for_job<'a>( &self, ) -> Result, ConsumerError> { + let mut interval = tokio::time::interval(self.poll_interval); + loop { + interval.tick().await; + if let Some(job) = self.queue.dequeue(&self.name).await? { return Ok(job); - } else { - task::sleep(self.poll_interval).await; } } } @@ -131,11 +132,13 @@ impl<'p> WebhookConsumer<'p> { async fn wait_for_job_tx<'a>( &self, ) -> Result, ConsumerError> { + let mut interval = tokio::time::interval(self.poll_interval); + loop { + interval.tick().await; + if let Some(job) = self.queue.dequeue_tx(&self.name).await? { return Ok(job); - } else { - task::sleep(self.poll_interval).await; } } } From f3e1252aed2a91f23a642bf38e466f4f3e123ceb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 21 Dec 2023 15:43:42 +0100 Subject: [PATCH 081/130] feat: Track webhook job processing --- hook-common/src/metrics.rs | 5 +--- hook-consumer/src/consumer.rs | 49 +++++++++++++++++------------------ 2 files changed, 25 insertions(+), 29 deletions(-) diff --git a/hook-common/src/metrics.rs b/hook-common/src/metrics.rs index 7d881ea..f83edb5 100644 --- a/hook-common/src/metrics.rs +++ b/hook-common/src/metrics.rs @@ -27,10 +27,7 @@ pub fn setup_metrics_recorder() -> PrometheusHandle { ]; PrometheusBuilder::new() - .set_buckets_for_metric( - Matcher::Full("http_requests_duration_seconds".to_string()), - EXPONENTIAL_SECONDS, - ) + .set_buckets(EXPONENTIAL_SECONDS) .unwrap() .install_recorder() .unwrap() diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index de857c5..8e67949 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use std::time; use hook_common::{ - pgqueue::{PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, + pgqueue::{Job, PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, retry::RetryPolicy, webhook::{HttpMethod, WebhookJobError, WebhookJobMetadata, WebhookJobParameters}, }; @@ -13,38 +13,26 @@ use tokio::sync; use crate::error::{ConsumerError, WebhookError}; -/// A WebhookJob is any PgQueueJob that returns a reference to webhook parameters and metadata. +/// A WebhookJob is any PgQueueJob with WebhookJobParameters and WebhookJobMetadata. trait WebhookJob: PgQueueJob + std::marker::Send { fn parameters(&self) -> &WebhookJobParameters; fn metadata(&self) -> &WebhookJobMetadata; - fn attempt(&self) -> i32; - fn queue(&self) -> String; - fn target(&self) -> String; -} - -impl WebhookJob for PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata> { - fn parameters(&self) -> &WebhookJobParameters { - &self.job.parameters - } - - fn metadata(&self) -> &WebhookJobMetadata { - &self.job.metadata - } + fn job(&self) -> &Job; fn attempt(&self) -> i32 { - self.job.attempt + self.job().attempt } fn queue(&self) -> String { - self.job.queue.to_owned() + self.job().queue.to_owned() } fn target(&self) -> String { - self.job.target.to_owned() + self.job().target.to_owned() } } -impl WebhookJob for PgJob { +impl WebhookJob for PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadata> { fn parameters(&self) -> &WebhookJobParameters { &self.job.parameters } @@ -53,16 +41,22 @@ impl WebhookJob for PgJob { &self.job.metadata } - fn attempt(&self) -> i32 { - self.job.attempt + fn job(&self) -> &Job { + &self.job } +} - fn queue(&self) -> String { - self.job.queue.to_owned() +impl WebhookJob for PgJob { + fn parameters(&self) -> &WebhookJobParameters { + &self.job.parameters } - fn target(&self) -> String { - self.job.target.to_owned() + fn metadata(&self) -> &WebhookJobMetadata { + &self.job.metadata + } + + fn job(&self) -> &Job { + &self.job } } @@ -199,8 +193,13 @@ async fn spawn_webhook_job_processing_task( metrics::increment_counter!("webhook_jobs_total", &labels); tokio::spawn(async move { + let now = tokio::time::Instant::now(); let result = process_webhook_job(client, webhook_job, &retry_policy).await; drop(permit); + + let elapsed = now.elapsed().as_secs_f64(); + metrics::histogram!("webhook_jobs_processing_duration_seconds", elapsed, &labels); + result }) } From 0e54ceaa639c6ae5103a818d89caa953e9b15003 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 21 Dec 2023 15:53:58 +0100 Subject: [PATCH 082/130] fix: Remove unused imports --- hook-common/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hook-common/src/metrics.rs b/hook-common/src/metrics.rs index f83edb5..4411fea 100644 --- a/hook-common/src/metrics.rs +++ b/hook-common/src/metrics.rs @@ -4,7 +4,7 @@ use axum::{ body::Body, extract::MatchedPath, http::Request, middleware::Next, response::IntoResponse, routing::get, Router, }; -use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; +use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; /// Bind a TcpListener on the provided bind address to serve metrics on it. pub async fn serve_metrics(bind: &str) -> Result<(), std::io::Error> { From 1f8088bd7c7d101f3e99775f0351bf9baf866dfb Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 21 Dec 2023 09:58:27 -0700 Subject: [PATCH 083/130] Update hook-common/src/kafka_messages/app_metrics.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Tomás Farías Santana --- hook-common/src/kafka_messages/app_metrics.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index 5aff62c..145d4af 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -128,8 +128,8 @@ where let error_type = match error_type { ErrorType::ConnectionError => "Connection Error".to_owned(), - ErrorType::TimeoutError => "Timeout".to_owned(), - ErrorType::BadHttpStatus(s) => format!("HTTP Status: {}", s), + ErrorType::TimeoutError => "Timeout Error".to_owned(), + ErrorType::BadHttpStatus(s) => format!("Bad HTTP Status: {}", s), ErrorType::ParseError => "Parse Error".to_owned(), }; serializer.serialize_str(&error_type) From 2220b6bf2a331765b852f7ae4ff78d031a7f33bb Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 21 Dec 2023 09:59:21 -0700 Subject: [PATCH 084/130] Update hook-common/src/kafka_messages/app_metrics.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Tomás Farías Santana --- hook-common/src/kafka_messages/app_metrics.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index 145d4af..94ec24e 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -144,9 +144,9 @@ where Some(s) => { let error_type = match &s[..] { "Connection Error" => ErrorType::ConnectionError, - "Timeout" => ErrorType::TimeoutError, - _ if s.starts_with("HTTP Status:") => { - let status = &s["HTTP Status:".len()..]; + "Timeout Error" => ErrorType::TimeoutError, + _ if s.starts_with("Bad HTTP Status:") => { + let status = &s["Bad HTTP Status:".len()..]; ErrorType::BadHttpStatus(status.parse().map_err(serde::de::Error::custom)?) } "Parse Error" => ErrorType::ParseError, From 50884295fa660f78510c42493ce88bd61b669ba4 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 21 Dec 2023 09:59:30 -0700 Subject: [PATCH 085/130] Update hook-common/src/kafka_messages/app_metrics.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Tomás Farías Santana --- hook-common/src/kafka_messages/app_metrics.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index 94ec24e..9acc411 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -155,8 +155,8 @@ where &s, &[ "Connection Error", - "Timeout", - "HTTP Status: ", + "Timeout Error", + "Bad HTTP Status: ", "Parse Error", ], )) From db076bf7a61fa0f0d5548ea1d37581fbaa072e5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 22 Dec 2023 11:43:21 +0100 Subject: [PATCH 086/130] refactor: Split-up router creation from metrics serving to allow callers to add more routes --- hook-common/src/metrics.rs | 20 ++++++++++++-------- hook-consumer/src/main.rs | 7 +++++-- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/hook-common/src/metrics.rs b/hook-common/src/metrics.rs index 4411fea..3d9e4c0 100644 --- a/hook-common/src/metrics.rs +++ b/hook-common/src/metrics.rs @@ -6,14 +6,9 @@ use axum::{ }; use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; -/// Bind a TcpListener on the provided bind address to serve metrics on it. -pub async fn serve_metrics(bind: &str) -> Result<(), std::io::Error> { - let recorder_handle = setup_metrics_recorder(); - - let router = Router::new() - .route("/metrics", get(recorder_handle.render())) - .layer(axum::middleware::from_fn(track_metrics)); - +/// Bind a `TcpListener` on the provided bind address to serve a `Router` on it. +/// This function is intended to take a Router as returned by `setup_metrics_router`, potentially with more routes added by the caller. +pub async fn serve(router: Router, bind: &str) -> Result<(), std::io::Error> { let listener = tokio::net::TcpListener::bind(bind).await?; axum::serve(listener, router).await?; @@ -21,6 +16,15 @@ pub async fn serve_metrics(bind: &str) -> Result<(), std::io::Error> { Ok(()) } +/// Build a Router for a metrics endpoint. +pub fn setup_metrics_router() -> Router { + let recorder_handle = setup_metrics_recorder(); + + Router::new() + .route("/metrics", get(recorder_handle.render())) + .layer(axum::middleware::from_fn(track_metrics)) +} + pub fn setup_metrics_recorder() -> PrometheusHandle { const EXPONENTIAL_SECONDS: &[f64] = &[ 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index b3ed22c..38c2ee2 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -1,6 +1,8 @@ use envconfig::Envconfig; -use hook_common::{metrics::serve_metrics, pgqueue::PgQueue, retry::RetryPolicy}; +use hook_common::{ + metrics::serve, metrics::setup_metrics_router, pgqueue::PgQueue, retry::RetryPolicy, +}; use hook_consumer::config::Config; use hook_consumer::consumer::WebhookConsumer; use hook_consumer::error::ConsumerError; @@ -29,7 +31,8 @@ async fn main() -> Result<(), ConsumerError> { let bind = config.bind(); tokio::task::spawn(async move { - serve_metrics(&bind) + let router = setup_metrics_router(); + serve(router, &bind) .await .expect("failed to start serving metrics"); }); From 232c54084a7db5f6baea351045b04f72e848edf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 22 Dec 2023 14:24:23 +0100 Subject: [PATCH 087/130] refactor: Only track duration seconds on success --- hook-consumer/src/consumer.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 8e67949..ecfa4b0 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -193,13 +193,8 @@ async fn spawn_webhook_job_processing_task( metrics::increment_counter!("webhook_jobs_total", &labels); tokio::spawn(async move { - let now = tokio::time::Instant::now(); let result = process_webhook_job(client, webhook_job, &retry_policy).await; drop(permit); - - let elapsed = now.elapsed().as_secs_f64(); - metrics::histogram!("webhook_jobs_processing_duration_seconds", elapsed, &labels); - result }) } @@ -229,15 +224,20 @@ async fn process_webhook_job( ("target", webhook_job.target()), ]; - match send_webhook( + let now = tokio::time::Instant::now(); + + let send_result = send_webhook( client, ¶meters.method, ¶meters.url, ¶meters.headers, parameters.body.clone(), ) - .await - { + .await; + + let elapsed = now.elapsed().as_secs_f64(); + + match send_result { Ok(_) => { webhook_job .complete() @@ -245,6 +245,7 @@ async fn process_webhook_job( .map_err(|error| ConsumerError::PgJobError(error.to_string()))?; metrics::increment_counter!("webhook_jobs_completed", &labels); + metrics::histogram!("webhook_jobs_processing_duration_seconds", elapsed, &labels); Ok(()) } From 83426b8613c82ef26098339f04135f35b13c1bf0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 3 Jan 2024 11:06:30 +0100 Subject: [PATCH 088/130] feat: Implement support for retrying jobs to different queue (#19) Co-authored-by: Brett Hoerner --- .dockerignore | 5 + .github/workflows/docker-hook-consumer.yml | 63 +++ .github/workflows/docker-hook-janitor.yml | 63 +++ .github/workflows/docker-hook-producer.yml | 63 +++ Dockerfile | 31 ++ hook-common/src/pgqueue.rs | 522 +++++++++++++-------- hook-common/src/retry.rs | 208 +++++++- hook-consumer/src/config.rs | 3 + hook-consumer/src/consumer.rs | 15 +- hook-consumer/src/main.rs | 9 +- 10 files changed, 747 insertions(+), 235 deletions(-) create mode 100644 .dockerignore create mode 100644 .github/workflows/docker-hook-consumer.yml create mode 100644 .github/workflows/docker-hook-janitor.yml create mode 100644 .github/workflows/docker-hook-producer.yml create mode 100644 Dockerfile diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..9879089 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +target +docker +.env +.git +.github diff --git a/.github/workflows/docker-hook-consumer.yml b/.github/workflows/docker-hook-consumer.yml new file mode 100644 index 0000000..5975120 --- /dev/null +++ b/.github/workflows/docker-hook-consumer.yml @@ -0,0 +1,63 @@ +name: Build hook-consumer docker image + +on: + workflow_dispatch: + push: + branches: + - 'main' + +permissions: + packages: write + +jobs: + build: + name: build and publish hook-consumer image + runs-on: buildjet-4vcpu-ubuntu-2204-arm + steps: + + - name: Check Out Repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: ghcr.io/hook-consumer + tags: | + type=ref,event=pr + type=ref,event=branch + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=sha + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push consumer + id: docker_build_hook_consumer + uses: docker/build-push-action@v4 + with: + context: ./ + file: ./Dockerfile + builder: ${{ steps.buildx.outputs.name }} + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/arm64 + cache-from: type=gha + cache-to: type=gha,mode=max + build-args: BIN=hook-consumer + + - name: Hook-consumer image digest + run: echo ${{ steps.docker_build_hook_consumer.outputs.digest }} diff --git a/.github/workflows/docker-hook-janitor.yml b/.github/workflows/docker-hook-janitor.yml new file mode 100644 index 0000000..2649821 --- /dev/null +++ b/.github/workflows/docker-hook-janitor.yml @@ -0,0 +1,63 @@ +name: Build hook-janitor docker image + +on: + workflow_dispatch: + push: + branches: + - 'main' + +permissions: + packages: write + +jobs: + build: + name: build and publish hook-janitor image + runs-on: buildjet-4vcpu-ubuntu-2204-arm + steps: + + - name: Check Out Repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: ghcr.io/hook-janitor + tags: | + type=ref,event=pr + type=ref,event=branch + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=sha + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push janitor + id: docker_build_hook_janitor + uses: docker/build-push-action@v4 + with: + context: ./ + file: ./Dockerfile + builder: ${{ steps.buildx.outputs.name }} + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/arm64 + cache-from: type=gha + cache-to: type=gha,mode=max + build-args: BIN=hook-janitor + + - name: Hook-janitor image digest + run: echo ${{ steps.docker_build_hook_janitor.outputs.digest }} diff --git a/.github/workflows/docker-hook-producer.yml b/.github/workflows/docker-hook-producer.yml new file mode 100644 index 0000000..d5e131f --- /dev/null +++ b/.github/workflows/docker-hook-producer.yml @@ -0,0 +1,63 @@ +name: Build hook-producer docker image + +on: + workflow_dispatch: + push: + branches: + - 'main' + +permissions: + packages: write + +jobs: + build: + name: build and publish hook-producer image + runs-on: buildjet-4vcpu-ubuntu-2204-arm + steps: + + - name: Check Out Repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: ghcr.io/hook-producer + tags: | + type=ref,event=pr + type=ref,event=branch + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=sha + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push producer + id: docker_build_hook_producer + uses: docker/build-push-action@v4 + with: + context: ./ + file: ./Dockerfile + builder: ${{ steps.buildx.outputs.name }} + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/arm64 + cache-from: type=gha + cache-to: type=gha,mode=max + build-args: BIN=hook-producer + + - name: Hook-producer image digest + run: echo ${{ steps.docker_build_hook_producer.outputs.digest }} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..959fd17 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,31 @@ +FROM docker.io/lukemathwalker/cargo-chef:latest-rust-1.74.0-buster AS chef +ARG BIN +WORKDIR app + +FROM chef AS planner +ARG BIN + +COPY . . +RUN cargo chef prepare --recipe-path recipe.json --bin $BIN + +FROM chef AS builder +ARG BIN + +# Ensure working C compile setup (not installed by default in arm64 images) +RUN apt update && apt install build-essential cmake -y + +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --release --recipe-path recipe.json + +COPY . . +RUN cargo build --release --bin $BIN + +FROM debian:bullseye-20230320-slim AS runtime +ARG BIN +ENV ENTRYPOINT=/usr/local/bin/$BIN +WORKDIR app + +USER nobody + +COPY --from=builder /app/target/release/$BIN /usr/local/bin +ENTRYPOINT [ $ENTRYPOINT ] diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index a47864d..39a09ce 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -109,42 +109,103 @@ impl Job { self.attempt >= self.max_attempts } - /// Consume Job to retry it. - /// This returns a RetryableJob that can be enqueued by PgQueue. - /// - /// # Arguments - /// - /// * `error`: Any JSON-serializable value to be stored as an error. - fn retry(self, error: E) -> RetryableJob { + /// Consume `Job` to transition it to a `RetryableJob`, i.e. a `Job` that may be retried. + fn retryable(self) -> RetryableJob { RetryableJob { id: self.id, attempt: self.attempt, - error: sqlx::types::Json(error), queue: self.queue, + retry_queue: None, } } - /// Consume Job to complete it. - /// This returns a CompletedJob that can be marked as completed by PgQueue. - fn complete(self) -> CompletedJob { - CompletedJob { + /// Consume `Job` to complete it. + /// A `CompletedJob` is finalized and cannot be used further; it is returned for reporting or inspection. + /// + /// # Arguments + /// + /// * `table`: The table where this job will be marked as completed. + /// * `executor`: Any sqlx::Executor that can execute the UPDATE query required to mark this `Job` as completed. + async fn complete<'c, E>(self, table: &str, executor: E) -> Result + where + E: sqlx::Executor<'c, Database = sqlx::Postgres>, + { + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + status = 'completed'::job_status +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + "#, + table + ); + + sqlx::query(&base_query) + .bind(&self.queue) + .bind(self.id) + .execute(executor) + .await?; + + Ok(CompletedJob { id: self.id, queue: self.queue, - } + }) } - /// Consume Job to fail it. - /// This returns a FailedJob that can be marked as failed by PgQueue. + /// Consume `Job` to fail it. + /// A `FailedJob` is finalized and cannot be used further; it is returned for reporting or inspection. /// /// # Arguments /// /// * `error`: Any JSON-serializable value to be stored as an error. - fn fail(self, error: E) -> FailedJob { - FailedJob { + /// * `table`: The table where this job will be marked as failed. + /// * `executor`: Any sqlx::Executor that can execute the UPDATE query required to mark this `Job` as failed. + async fn fail<'c, E, S>( + self, + error: S, + table: &str, + executor: E, + ) -> Result, sqlx::Error> + where + S: serde::Serialize + std::marker::Sync + std::marker::Send, + E: sqlx::Executor<'c, Database = sqlx::Postgres>, + { + let json_error = sqlx::types::Json(error); + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + status = 'failed'::job_status + errors = array_append("{0}".errors, $3) +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + "#, + &table + ); + + sqlx::query(&base_query) + .bind(&self.queue) + .bind(self.id) + .bind(&json_error) + .execute(executor) + .await?; + + Ok(FailedJob { id: self.id, - error: sqlx::types::Json(error), + error: json_error, queue: self.queue, - } + }) } } @@ -161,7 +222,8 @@ pub trait PgQueueJob { mut self, error: E, retry_interval: time::Duration, - ) -> Result, PgJobError>>; + queue: &str, + ) -> Result>>; } /// A Job that can be updated in PostgreSQL. @@ -175,28 +237,9 @@ pub struct PgJob { #[async_trait] impl PgQueueJob for PgJob { async fn complete(mut self) -> Result>>> { - let completed_job = self.job.complete(); - - let base_query = format!( - r#" -UPDATE - "{0}" -SET - finished_at = NOW(), - status = 'completed'::job_status -WHERE - "{0}".id = $2 - AND queue = $1 -RETURNING - "{0}".* - "#, - &self.table - ); - - sqlx::query(&base_query) - .bind(&completed_job.queue) - .bind(completed_job.id) - .execute(&mut *self.connection) + let completed_job = self + .job + .complete(&self.table, &mut *self.connection) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -210,31 +253,9 @@ RETURNING mut self, error: E, ) -> Result, PgJobError>>> { - let failed_job = self.job.fail(error); - - let base_query = format!( - r#" -UPDATE - "{0}" -SET - finished_at = NOW(), - status = 'failed'::job_status - errors = array_append("{0}".errors, $3) -WHERE - "{0}".id = $2 - AND queue = $1 -RETURNING - "{0}".* - - "#, - &self.table - ); - - sqlx::query(&base_query) - .bind(&failed_job.queue) - .bind(failed_job.id) - .bind(&failed_job.error) - .execute(&mut *self.connection) + let failed_job = self + .job + .fail(error, &self.table, &mut *self.connection) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -248,46 +269,27 @@ RETURNING mut self, error: E, retry_interval: time::Duration, - ) -> Result, PgJobError>>> { + queue: &str, + ) -> Result>>> { if self.job.is_gte_max_attempts() { return Err(PgJobError::RetryInvalidError { job: Box::new(self), error: "Maximum attempts reached".to_owned(), }); } - let retryable_job = self.job.retry(error); - - let base_query = format!( - r#" -UPDATE - "{0}" -SET - finished_at = NOW(), - status = 'available'::job_status, - scheduled_at = NOW() + $3, - errors = array_append("{0}".errors, $4) -WHERE - "{0}".id = $2 - AND queue = $1 -RETURNING - "{0}".* - "#, - &self.table - ); - sqlx::query(&base_query) - .bind(&retryable_job.queue) - .bind(retryable_job.id) - .bind(retry_interval) - .bind(&retryable_job.error) - .execute(&mut *self.connection) + let retried_job = self + .job + .retryable() + .queue(queue) + .retry(error, retry_interval, &self.table, &mut *self.connection) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), error, })?; - Ok(retryable_job) + Ok(retried_job) } } @@ -305,28 +307,9 @@ impl<'c, J: std::marker::Send, M: std::marker::Send> PgQueueJob for PgTransactio async fn complete( mut self, ) -> Result>>> { - let completed_job = self.job.complete(); - - let base_query = format!( - r#" -UPDATE - "{0}" -SET - finished_at = NOW(), - status = 'completed'::job_status -WHERE - "{0}".id = $2 - AND queue = $1 -RETURNING - "{0}".* - "#, - &self.table - ); - - sqlx::query(&base_query) - .bind(&completed_job.queue) - .bind(completed_job.id) - .execute(&mut *self.transaction) + let completed_job = self + .job + .complete(&self.table, &mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -344,34 +327,13 @@ RETURNING Ok(completed_job) } - async fn fail( + async fn fail( mut self, - error: E, - ) -> Result, PgJobError>>> { - let failed_job = self.job.fail(error); - - let base_query = format!( - r#" -UPDATE - "{0}" -SET - finished_at = NOW(), - status = 'failed'::job_status - errors = array_append("{0}".errors, $3) -WHERE - "{0}".id = $2 - AND queue = $1 -RETURNING - "{0}".* - "#, - &self.table - ); - - sqlx::query(&base_query) - .bind(&failed_job.queue) - .bind(failed_job.id) - .bind(&failed_job.error) - .execute(&mut *self.transaction) + error: S, + ) -> Result, PgJobError>>> { + let failed_job = self + .job + .fail(error, &self.table, &mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -393,40 +355,22 @@ RETURNING mut self, error: E, retry_interval: time::Duration, - ) -> Result, PgJobError>>> { + queue: &str, + ) -> Result>>> { + // Ideally, the transition to RetryableJob should be fallible. + // But taking ownership of self when we return this error makes things difficult. if self.job.is_gte_max_attempts() { return Err(PgJobError::RetryInvalidError { job: Box::new(self), error: "Maximum attempts reached".to_owned(), }); } - let retryable_job = self.job.retry(error); - - let base_query = format!( - r#" -UPDATE - "{0}" -SET - finished_at = NOW(), - status = 'available'::job_status, - scheduled_at = NOW() + $3, - errors = array_append("{0}".errors, $4) -WHERE - "{0}".id = $2 - AND queue = $1 -RETURNING - "{0}".* - "#, - &self.table - ); - - sqlx::query(&base_query) - .bind(&retryable_job.queue) - .bind(retryable_job.id) - .bind(retry_interval) - .bind(&retryable_job.error) - .execute(&mut *self.transaction) + let retried_job = self + .job + .retryable() + .queue(queue) + .retry(error, retry_interval, &self.table, &mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -441,42 +385,127 @@ RETURNING error, })?; - Ok(retryable_job) + Ok(retried_job) } } /// A Job that has failed but can still be enqueued into a PgQueue to be retried at a later point. /// The time until retry will depend on the PgQueue's RetryPolicy. -pub struct RetryableJob { +pub struct RetryableJob { /// A unique id identifying a job. - id: i64, + pub id: i64, /// A number corresponding to the current job attempt. pub attempt: i32, - /// Any JSON-serializable value to be stored as an error. - pub error: sqlx::types::Json, /// A unique id identifying a job queue. - pub queue: String, + queue: String, + /// An optional separate queue where to enqueue this job when retrying. + retry_queue: Option, +} + +impl RetryableJob { + /// Set the queue for a `RetryableJob`. + /// If not set, `Job` will be retried to its original queue on calling `retry`. + fn queue(mut self, queue: &str) -> Self { + self.retry_queue = Some(queue.to_owned()); + self + } + + /// Return the queue that a `Job` is to be retried into. + fn retry_queue(&self) -> &str { + self.retry_queue.as_ref().unwrap_or(&self.queue) + } + + /// Consume `Job` to retry it. + /// A `RetriedJob` cannot be used further; it is returned for reporting or inspection. + /// + /// # Arguments + /// + /// * `error`: Any JSON-serializable value to be stored as an error. + /// * `retry_interval`: The duration until the `Job` is to be retried again. Used to set `scheduled_at`. + /// * `table`: The table where this job will be marked as completed. + /// * `executor`: Any sqlx::Executor that can execute the UPDATE query required to mark this `Job` as completed. + async fn retry<'c, S, E>( + self, + error: S, + retry_interval: time::Duration, + table: &str, + executor: E, + ) -> Result + where + S: serde::Serialize + std::marker::Sync + std::marker::Send, + E: sqlx::Executor<'c, Database = sqlx::Postgres>, + { + let json_error = sqlx::types::Json(error); + let base_query = format!( + r#" +UPDATE + "{0}" +SET + finished_at = NOW(), + errors = array_append("{0}".errors, $4), + queue = $5, + status = 'available'::job_status, + scheduled_at = NOW() + $3 +WHERE + "{0}".id = $2 + AND queue = $1 +RETURNING + "{0}".* + "#, + &table + ); + + sqlx::query(&base_query) + .bind(&self.queue) + .bind(self.id) + .bind(retry_interval) + .bind(&json_error) + .bind(self.retry_queue()) + .execute(executor) + .await?; + + Ok(RetriedJob { + id: self.id, + table: table.to_owned(), + queue: self.queue, + retry_queue: self.retry_queue.to_owned(), + }) + } } -/// A Job that has completed to be enqueued into a PgQueue and marked as completed. +/// State a `Job` is transitioned to after successfully completing. +#[derive(Debug)] pub struct CompletedJob { /// A unique id identifying a job. - id: i64, + pub id: i64, /// A unique id identifying a job queue. pub queue: String, } -/// A Job that has failed to be enqueued into a PgQueue and marked as failed. +/// State a `Job` is transitioned to after it has been enqueued for retrying. +#[derive(Debug)] +pub struct RetriedJob { + /// A unique id identifying a job. + pub id: i64, + /// A unique id identifying a job queue. + pub queue: String, + pub retry_queue: Option, + pub table: String, +} + +/// State a `Job` is transitioned to after exhausting all of their attempts. +#[derive(Debug)] pub struct FailedJob { /// A unique id identifying a job. - id: i64, + pub id: i64, /// Any JSON-serializable value to be stored as an error. pub error: sqlx::types::Json, /// A unique id identifying a job queue. pub queue: String, } -/// A NewJob to be enqueued into a PgQueue. +/// This struct represents a new job being created to be enqueued into a `PgQueue`. +#[derive(Debug)] pub struct NewJob { /// The maximum amount of attempts this NewJob has to complete. pub max_attempts: i32, @@ -513,14 +542,13 @@ pub struct PgQueue { pub type PgQueueResult = std::result::Result; impl PgQueue { - /// Initialize a new PgQueue backed by table in PostgreSQL. + /// Initialize a new PgQueue backed by table in PostgreSQL by intializing a connection pool to the database in `url`. /// /// # Arguments /// /// * `queue_name`: A name for the queue we are going to initialize. /// * `table_name`: The name for the table the queue will use in PostgreSQL. /// * `url`: A URL pointing to where the PostgreSQL database is hosted. - /// * `worker_name`: The name of the worker that is operating with this queue. pub async fn new(queue_name: &str, table_name: &str, url: &str) -> PgQueueResult { let name = queue_name.to_owned(); let table = table_name.to_owned(); @@ -531,6 +559,13 @@ impl PgQueue { Ok(Self { name, pool, table }) } + /// Initialize a new PgQueue backed by table in PostgreSQL from a provided connection pool. + /// + /// # Arguments + /// + /// * `queue_name`: A name for the queue we are going to initialize. + /// * `table_name`: The name for the table the queue will use in PostgreSQL. + /// * `pool`: A database connection pool to be used by this queue. pub async fn new_from_pool( queue_name: &str, table_name: &str, @@ -542,7 +577,8 @@ impl PgQueue { Ok(Self { name, pool, table }) } - /// Dequeue a Job from this PgQueue to work on it. + /// Dequeue a `Job` from this `PgQueue`. + /// The `Job` will be updated to `'running'` status, so any other `dequeue` calls will skip it. pub async fn dequeue< J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, M: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, @@ -620,7 +656,9 @@ RETURNING } } - /// Dequeue a Job from this PgQueue to work on it. + /// Dequeue a `Job` from this `PgQueue` and hold the transaction. + /// Any other `dequeue_tx` calls will skip rows locked, so by holding a transaction we ensure only one worker can dequeue a job. + /// Holding a transaction open can have performance implications, but it means no `'running'` state is required. pub async fn dequeue_tx< 'a, J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, @@ -692,8 +730,8 @@ RETURNING } } - /// Enqueue a Job into this PgQueue. - /// We take ownership of NewJob to enforce a specific NewJob is only enqueued once. + /// Enqueue a `NewJob` into this PgQueue. + /// We take ownership of `NewJob` to enforce a specific `NewJob` is only enqueued once. pub async fn enqueue< J: serde::Serialize + std::marker::Sync, M: serde::Serialize + std::marker::Sync, @@ -875,19 +913,16 @@ mod tests { let job_metadata = JobMetadata::default(); let worker_id = worker_id(); let new_job = NewJob::new(2, job_metadata, job_parameters, &job_target); - let retry_policy = RetryPolicy { - backoff_coefficient: 0, - initial_interval: time::Duration::from_secs(0), - maximum_interval: None, - }; + let table_name = "job_queue".to_owned(); + let queue_name = "test_can_retry_job_with_remaining_attempts".to_owned(); - let queue = PgQueue::new_from_pool( - "test_can_retry_job_with_remaining_attempts", - "job_queue", - db, - ) - .await - .expect("failed to connect to local test postgresql database"); + let retry_policy = RetryPolicy::build(0, time::Duration::from_secs(0)) + .queue(&queue_name) + .provide(); + + let queue = PgQueue::new_from_pool(&queue_name, &table_name, db) + .await + .expect("failed to connect to local test postgresql database"); queue.enqueue(new_job).await.expect("failed to enqueue job"); let job: PgJob = queue @@ -895,11 +930,18 @@ mod tests { .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue"); - let retry_interval = retry_policy.time_until_next_retry(job.job.attempt as u32, None); + + let retry_interval = retry_policy.retry_interval(job.job.attempt as u32, None); + let retry_queue = retry_policy.retry_queue(&job.job.queue).to_owned(); let _ = job - .retry("a very reasonable failure reason", retry_interval) + .retry( + "a very reasonable failure reason", + retry_interval, + &retry_queue, + ) .await .expect("failed to retry job"); + let retried_job: PgJob = queue .dequeue(&worker_id) .await @@ -918,6 +960,72 @@ mod tests { assert_eq!(retried_job.job.target, job_target); } + #[sqlx::test(migrations = "../migrations")] + async fn test_can_retry_job_to_different_queue(db: PgPool) { + let job_target = job_target(); + let job_parameters = JobParameters::default(); + let job_metadata = JobMetadata::default(); + let worker_id = worker_id(); + let new_job = NewJob::new(2, job_metadata, job_parameters, &job_target); + let table_name = "job_queue".to_owned(); + let queue_name = "test_can_retry_job_to_different_queue".to_owned(); + let retry_queue_name = "test_can_retry_job_to_different_queue_retry".to_owned(); + + let retry_policy = RetryPolicy::build(0, time::Duration::from_secs(0)) + .queue(&retry_queue_name) + .provide(); + + let queue = PgQueue::new_from_pool(&queue_name, &table_name, db.clone()) + .await + .expect("failed to connect to queue in local test postgresql database"); + + queue.enqueue(new_job).await.expect("failed to enqueue job"); + let job: PgJob = queue + .dequeue(&worker_id) + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + + let retry_interval = retry_policy.retry_interval(job.job.attempt as u32, None); + let retry_queue = retry_policy.retry_queue(&job.job.queue).to_owned(); + let _ = job + .retry( + "a very reasonable failure reason", + retry_interval, + &retry_queue, + ) + .await + .expect("failed to retry job"); + + let retried_job_not_found: Option> = queue + .dequeue(&worker_id) + .await + .expect("failed to dequeue job"); + + assert!(retried_job_not_found.is_none()); + + let queue = PgQueue::new_from_pool(&retry_queue_name, &table_name, db) + .await + .expect("failed to connect to retry queue in local test postgresql database"); + + let retried_job: PgJob = queue + .dequeue(&worker_id) + .await + .expect("failed to dequeue job") + .expect("job not found in retry queue"); + + assert_eq!(retried_job.job.attempt, 2); + assert!(retried_job.job.attempted_by.contains(&worker_id)); + assert_eq!(retried_job.job.attempted_by.len(), 2); + assert_eq!(retried_job.job.max_attempts, 2); + assert_eq!( + *retried_job.job.parameters.as_ref(), + JobParameters::default() + ); + assert_eq!(retried_job.job.status, JobStatus::Running); + assert_eq!(retried_job.job.target, job_target); + } + #[sqlx::test(migrations = "../migrations")] #[should_panic(expected = "failed to retry job")] async fn test_cannot_retry_job_without_remaining_attempts(db: PgPool) { @@ -926,11 +1034,7 @@ mod tests { let job_metadata = JobMetadata::default(); let worker_id = worker_id(); let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); - let retry_policy = RetryPolicy { - backoff_coefficient: 0, - initial_interval: time::Duration::from_secs(0), - maximum_interval: None, - }; + let retry_policy = RetryPolicy::build(0, time::Duration::from_secs(0)).provide(); let queue = PgQueue::new_from_pool( "test_cannot_retry_job_without_remaining_attempts", @@ -947,8 +1051,10 @@ mod tests { .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue"); - let retry_interval = retry_policy.time_until_next_retry(job.job.attempt as u32, None); - job.retry("a very reasonable failure reason", retry_interval) + + let retry_interval = retry_policy.retry_interval(job.job.attempt as u32, None); + + job.retry("a very reasonable failure reason", retry_interval, "any") .await .expect("failed to retry job"); } diff --git a/hook-common/src/retry.rs b/hook-common/src/retry.rs index f72b0d1..b00f967 100644 --- a/hook-common/src/retry.rs +++ b/hook-common/src/retry.rs @@ -1,7 +1,10 @@ +//! # Retry +//! +//! Module providing a `RetryPolicy` struct to configure job retrying. use std::time; -#[derive(Copy, Clone, Debug)] -/// The retry policy that PgQueue will use to determine how to set scheduled_at when enqueuing a retry. +#[derive(Clone, Debug)] +/// A retry policy to determine retry parameters for a job. pub struct RetryPolicy { /// Coefficient to multiply initial_interval with for every past attempt. pub backoff_coefficient: u32, @@ -9,47 +12,214 @@ pub struct RetryPolicy { pub initial_interval: time::Duration, /// The maximum possible backoff between retries. pub maximum_interval: Option, + /// An optional queue to send WebhookJob retries to. + pub queue: Option, } impl RetryPolicy { - pub fn new( - backoff_coefficient: u32, - initial_interval: time::Duration, - maximum_interval: Option, - ) -> Self { - Self { - backoff_coefficient, - initial_interval, - maximum_interval, - } + /// Initialize a `RetryPolicyBuilder`. + pub fn build(backoff_coefficient: u32, initial_interval: time::Duration) -> RetryPolicyBuilder { + RetryPolicyBuilder::new(backoff_coefficient, initial_interval) } - /// Calculate the time until the next retry for a given RetryableJob. - pub fn time_until_next_retry( + /// Determine interval for retrying at a given attempt number. + /// If not `None`, this method will respect `preferred_retry_interval` as long as it falls within `candidate_interval <= preferred_retry_interval <= maximum_interval`. + pub fn retry_interval( &self, attempt: u32, preferred_retry_interval: Option, ) -> time::Duration { - let candidate_interval = self.initial_interval * self.backoff_coefficient.pow(attempt); + let candidate_interval = + self.initial_interval * self.backoff_coefficient.pow(attempt.saturating_sub(1)); match (preferred_retry_interval, self.maximum_interval) { - (Some(duration), Some(max_interval)) => std::cmp::min( - std::cmp::max(std::cmp::min(candidate_interval, max_interval), duration), - max_interval, - ), + (Some(duration), Some(max_interval)) => { + let min_interval_allowed = std::cmp::min(candidate_interval, max_interval); + + if min_interval_allowed <= duration && duration <= max_interval { + duration + } else { + min_interval_allowed + } + } (Some(duration), None) => std::cmp::max(candidate_interval, duration), (None, Some(max_interval)) => std::cmp::min(candidate_interval, max_interval), (None, None) => candidate_interval, } } + + /// Determine the queue to be used for retrying. + /// Only whether a queue is configured in this RetryPolicy is used to determine which queue to use for retrying. + /// This may be extended in the future to support more decision parameters. + pub fn retry_queue<'s>(&'s self, current_queue: &'s str) -> &'s str { + if let Some(new_queue) = &self.queue { + new_queue + } else { + current_queue + } + } } impl Default for RetryPolicy { + fn default() -> Self { + RetryPolicyBuilder::default().provide() + } +} + +/// Builder pattern struct to provide a `RetryPolicy`. +pub struct RetryPolicyBuilder { + /// Coefficient to multiply initial_interval with for every past attempt. + pub backoff_coefficient: u32, + /// The backoff interval for the first retry. + pub initial_interval: time::Duration, + /// The maximum possible backoff between retries. + pub maximum_interval: Option, + /// An optional queue to send WebhookJob retries to. + pub queue: Option, +} + +impl Default for RetryPolicyBuilder { fn default() -> Self { Self { backoff_coefficient: 2, initial_interval: time::Duration::from_secs(1), maximum_interval: None, + queue: None, + } + } +} + +impl RetryPolicyBuilder { + pub fn new(backoff_coefficient: u32, initial_interval: time::Duration) -> Self { + Self { + backoff_coefficient, + initial_interval, + ..RetryPolicyBuilder::default() + } + } + + pub fn maximum_interval(mut self, interval: time::Duration) -> RetryPolicyBuilder { + self.maximum_interval = Some(interval); + self + } + + pub fn queue(mut self, queue: &str) -> RetryPolicyBuilder { + self.queue = Some(queue.to_owned()); + self + } + + /// Provide a `RetryPolicy` according to build parameters provided thus far. + pub fn provide(&self) -> RetryPolicy { + RetryPolicy { + backoff_coefficient: self.backoff_coefficient, + initial_interval: self.initial_interval, + maximum_interval: self.maximum_interval, + queue: self.queue.clone(), } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_constant_retry_interval() { + let retry_policy = RetryPolicy::build(1, time::Duration::from_secs(2)).provide(); + let first_interval = retry_policy.retry_interval(1, None); + let second_interval = retry_policy.retry_interval(2, None); + let third_interval = retry_policy.retry_interval(3, None); + + assert_eq!(first_interval, time::Duration::from_secs(2)); + assert_eq!(second_interval, time::Duration::from_secs(2)); + assert_eq!(third_interval, time::Duration::from_secs(2)); + } + + #[test] + fn test_retry_interval_never_exceeds_maximum() { + let retry_policy = RetryPolicy::build(2, time::Duration::from_secs(2)) + .maximum_interval(time::Duration::from_secs(4)) + .provide(); + let first_interval = retry_policy.retry_interval(1, None); + let second_interval = retry_policy.retry_interval(2, None); + let third_interval = retry_policy.retry_interval(3, None); + let fourth_interval = retry_policy.retry_interval(4, None); + + assert_eq!(first_interval, time::Duration::from_secs(2)); + assert_eq!(second_interval, time::Duration::from_secs(4)); + assert_eq!(third_interval, time::Duration::from_secs(4)); + assert_eq!(fourth_interval, time::Duration::from_secs(4)); + } + + #[test] + fn test_retry_interval_increases_with_coefficient() { + let retry_policy = RetryPolicy::build(2, time::Duration::from_secs(2)).provide(); + let first_interval = retry_policy.retry_interval(1, None); + let second_interval = retry_policy.retry_interval(2, None); + let third_interval = retry_policy.retry_interval(3, None); + + assert_eq!(first_interval, time::Duration::from_secs(2)); + assert_eq!(second_interval, time::Duration::from_secs(4)); + assert_eq!(third_interval, time::Duration::from_secs(8)); + } + + #[test] + fn test_retry_interval_respects_preferred() { + let retry_policy = RetryPolicy::build(1, time::Duration::from_secs(2)).provide(); + let preferred = time::Duration::from_secs(999); + let first_interval = retry_policy.retry_interval(1, Some(preferred)); + let second_interval = retry_policy.retry_interval(2, Some(preferred)); + let third_interval = retry_policy.retry_interval(3, Some(preferred)); + + assert_eq!(first_interval, preferred); + assert_eq!(second_interval, preferred); + assert_eq!(third_interval, preferred); + } + + #[test] + fn test_retry_interval_ignores_small_preferred() { + let retry_policy = RetryPolicy::build(1, time::Duration::from_secs(5)).provide(); + let preferred = time::Duration::from_secs(2); + let first_interval = retry_policy.retry_interval(1, Some(preferred)); + let second_interval = retry_policy.retry_interval(2, Some(preferred)); + let third_interval = retry_policy.retry_interval(3, Some(preferred)); + + assert_eq!(first_interval, time::Duration::from_secs(5)); + assert_eq!(second_interval, time::Duration::from_secs(5)); + assert_eq!(third_interval, time::Duration::from_secs(5)); + } + + #[test] + fn test_retry_interval_ignores_large_preferred() { + let retry_policy = RetryPolicy::build(2, time::Duration::from_secs(2)) + .maximum_interval(time::Duration::from_secs(4)) + .provide(); + let preferred = time::Duration::from_secs(10); + let first_interval = retry_policy.retry_interval(1, Some(preferred)); + let second_interval = retry_policy.retry_interval(2, Some(preferred)); + let third_interval = retry_policy.retry_interval(3, Some(preferred)); + + assert_eq!(first_interval, time::Duration::from_secs(2)); + assert_eq!(second_interval, time::Duration::from_secs(4)); + assert_eq!(third_interval, time::Duration::from_secs(4)); + } + + #[test] + fn test_returns_retry_queue_if_set() { + let retry_queue_name = "retry_queue".to_owned(); + let retry_policy = RetryPolicy::build(0, time::Duration::from_secs(0)) + .queue(&retry_queue_name) + .provide(); + let current_queue = "queue".to_owned(); + + assert_eq!(retry_policy.retry_queue(¤t_queue), retry_queue_name); + } + + #[test] + fn test_returns_queue_if_retry_queue_not_set() { + let retry_policy = RetryPolicy::build(0, time::Duration::from_secs(0)).provide(); + let current_queue = "queue".to_owned(); + + assert_eq!(retry_policy.retry_queue(¤t_queue), current_queue); + } +} diff --git a/hook-consumer/src/config.rs b/hook-consumer/src/config.rs index 9169512..6525b25 100644 --- a/hook-consumer/src/config.rs +++ b/hook-consumer/src/config.rs @@ -72,4 +72,7 @@ pub struct RetryPolicyConfig { #[envconfig(default = "100000")] pub maximum_interval: EnvMsDuration, + + #[envconfig(default = "default")] + pub retry_queue_name: String, } diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 99e97f0..42cb1ff 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -13,7 +13,7 @@ use tokio::sync; use crate::error::{ConsumerError, WebhookError}; -/// A WebhookJob is any PgQueueJob with WebhookJobParameters and WebhookJobMetadata. +/// A WebhookJob is any `PgQueueJob` with `WebhookJobParameters` and `WebhookJobMetadata`. trait WebhookJob: PgQueueJob + std::marker::Send { fn parameters(&self) -> &WebhookJobParameters; fn metadata(&self) -> &WebhookJobMetadata; @@ -147,7 +147,7 @@ impl<'p> WebhookConsumer<'p> { spawn_webhook_job_processing_task( self.client.clone(), semaphore.clone(), - self.retry_policy, + self.retry_policy.clone(), webhook_job, ) .await; @@ -155,10 +155,11 @@ impl<'p> WebhookConsumer<'p> { } else { loop { let webhook_job = self.wait_for_job().await?; + spawn_webhook_job_processing_task( self.client.clone(), semaphore.clone(), - self.retry_policy, + self.retry_policy.clone(), webhook_job, ) .await; @@ -173,6 +174,7 @@ impl<'p> WebhookConsumer<'p> { /// /// * `client`: An HTTP client to execute the webhook job request. /// * `semaphore`: A semaphore used for rate limiting purposes. This function will panic if this semaphore is closed. +/// * `retry_policy`: The retry policy used to set retry parameters if a job fails and has remaining attempts. /// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. async fn spawn_webhook_job_processing_task( client: reqwest::Client, @@ -212,6 +214,7 @@ async fn spawn_webhook_job_processing_task( /// /// * `client`: An HTTP client to execute the webhook job request. /// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. +/// * `retry_policy`: The retry policy used to set retry parameters if a job fails and has remaining attempts. async fn process_webhook_job( client: reqwest::Client, webhook_job: W, @@ -281,10 +284,12 @@ async fn process_webhook_job( } Err(WebhookError::RetryableRequestError { error, retry_after }) => { let retry_interval = - retry_policy.time_until_next_retry(webhook_job.attempt() as u32, retry_after); + retry_policy.retry_interval(webhook_job.attempt() as u32, retry_after); + let current_queue = webhook_job.queue(); + let retry_queue = retry_policy.retry_queue(¤t_queue); match webhook_job - .retry(WebhookJobError::from(&error), retry_interval) + .retry(WebhookJobError::from(&error), retry_interval, retry_queue) .await { Ok(_) => { diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index 38c2ee2..c71b8eb 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -1,3 +1,4 @@ +//! Consume `PgQueue` jobs to run webhook calls. use envconfig::Envconfig; use hook_common::{ @@ -11,11 +12,13 @@ use hook_consumer::error::ConsumerError; async fn main() -> Result<(), ConsumerError> { let config = Config::init_from_env().expect("Invalid configuration:"); - let retry_policy = RetryPolicy::new( + let retry_policy = RetryPolicy::build( config.retry_policy.backoff_coefficient, config.retry_policy.initial_interval.0, - Some(config.retry_policy.maximum_interval.0), - ); + ) + .maximum_interval(config.retry_policy.maximum_interval.0) + .queue(&config.retry_policy.retry_queue_name) + .provide(); let queue = PgQueue::new(&config.queue_name, &config.table_name, &config.database_url) .await .expect("failed to initialize queue"); From 76476efd75bcd8db3decae7454821ec3fdf17eed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 3 Jan 2024 12:11:03 +0100 Subject: [PATCH 089/130] fix: Add org name to image path (#22) --- .github/workflows/docker-hook-consumer.yml | 2 +- .github/workflows/docker-hook-janitor.yml | 2 +- .github/workflows/docker-hook-producer.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker-hook-consumer.yml b/.github/workflows/docker-hook-consumer.yml index 5975120..db92074 100644 --- a/.github/workflows/docker-hook-consumer.yml +++ b/.github/workflows/docker-hook-consumer.yml @@ -25,7 +25,7 @@ jobs: id: meta uses: docker/metadata-action@v4 with: - images: ghcr.io/hook-consumer + images: ghcr.io/posthog/hook-consumer tags: | type=ref,event=pr type=ref,event=branch diff --git a/.github/workflows/docker-hook-janitor.yml b/.github/workflows/docker-hook-janitor.yml index 2649821..b426d51 100644 --- a/.github/workflows/docker-hook-janitor.yml +++ b/.github/workflows/docker-hook-janitor.yml @@ -25,7 +25,7 @@ jobs: id: meta uses: docker/metadata-action@v4 with: - images: ghcr.io/hook-janitor + images: ghcr.io/posthog/hook-janitor tags: | type=ref,event=pr type=ref,event=branch diff --git a/.github/workflows/docker-hook-producer.yml b/.github/workflows/docker-hook-producer.yml index d5e131f..ec2594f 100644 --- a/.github/workflows/docker-hook-producer.yml +++ b/.github/workflows/docker-hook-producer.yml @@ -25,7 +25,7 @@ jobs: id: meta uses: docker/metadata-action@v4 with: - images: ghcr.io/hook-producer + images: ghcr.io/posthog/hook-producer tags: | type=ref,event=pr type=ref,event=branch From a277c452190ab7397e70508b3441e1c94364f66c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 5 Jan 2024 11:28:19 +0100 Subject: [PATCH 090/130] refactor: Deploy the migration container too (#24) --- .github/workflows/docker-migrator.yml | 62 +++++++++++++++++++++++++++ Dockerfile.migrate | 16 +++++++ Dockerfile.sqlx | 5 --- bin/migrate | 4 ++ docker-compose.yml | 6 +-- 5 files changed, 83 insertions(+), 10 deletions(-) create mode 100644 .github/workflows/docker-migrator.yml create mode 100644 Dockerfile.migrate delete mode 100644 Dockerfile.sqlx create mode 100755 bin/migrate diff --git a/.github/workflows/docker-migrator.yml b/.github/workflows/docker-migrator.yml new file mode 100644 index 0000000..73d7afa --- /dev/null +++ b/.github/workflows/docker-migrator.yml @@ -0,0 +1,62 @@ +name: Build hook-migrator docker image + +on: + workflow_dispatch: + push: + branches: + - 'main' + +permissions: + packages: write + +jobs: + build: + name: build and publish hook-migrator image + runs-on: buildjet-4vcpu-ubuntu-2204-arm + steps: + + - name: Check Out Repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: ghcr.io/posthog/hook-migrator + tags: | + type=ref,event=pr + type=ref,event=branch + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=sha + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push migrator + id: docker_build_hook_migrator + uses: docker/build-push-action@v4 + with: + context: ./ + file: ./Dockerfile.migrate + builder: ${{ steps.buildx.outputs.name }} + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/arm64 + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Hook-migrator image digest + run: echo ${{ steps.docker_build_hook_migrator.outputs.digest }} diff --git a/Dockerfile.migrate b/Dockerfile.migrate new file mode 100644 index 0000000..4779077 --- /dev/null +++ b/Dockerfile.migrate @@ -0,0 +1,16 @@ +FROM docker.io/library/rust:1.74.0-buster as builder + +RUN apt update && apt install build-essential cmake -y +RUN cargo install sqlx-cli@0.7.3 --no-default-features --features native-tls,postgres --root /app/target/release/ + +FROM debian:bullseye-20230320-slim AS runtime +WORKDIR /sqlx + +ADD bin /sqlx/bin/ +ADD migrations /sqlx/migrations/ + +COPY --from=builder /app/target/release/bin/sqlx /usr/local/bin + +RUN chmod +x ./bin/migrate + +CMD ["./bin/migrate"] diff --git a/Dockerfile.sqlx b/Dockerfile.sqlx deleted file mode 100644 index c55dfaa..0000000 --- a/Dockerfile.sqlx +++ /dev/null @@ -1,5 +0,0 @@ -FROM docker.io/library/rust:1.74.0 - -RUN cargo install sqlx-cli --no-default-features --features native-tls,postgres - -WORKDIR /sqlx diff --git a/bin/migrate b/bin/migrate new file mode 100755 index 0000000..6e36fc4 --- /dev/null +++ b/bin/migrate @@ -0,0 +1,4 @@ +#!/bin/sh + +sqlx database create +sqlx migrate run diff --git a/docker-compose.yml b/docker-compose.yml index afaf48e..d951864 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -19,18 +19,14 @@ services: container_name: setup-test-db build: context: . - dockerfile: Dockerfile.sqlx + dockerfile: Dockerfile.migrate restart: on-failure - command: > - sh -c "sqlx database create && sqlx migrate run" depends_on: db: condition: service_healthy restart: true environment: DATABASE_URL: postgres://posthog:posthog@db:5432/test_database - volumes: - - ./migrations:/sqlx/migrations/ echo_server: image: docker.io/library/caddy:2 From 3b5229dca92740e0fe0a1af833144c433f109278 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Fri, 5 Jan 2024 08:29:24 -0700 Subject: [PATCH 091/130] Change dequeue ORDER BY and rename finished_at (#23) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Tomás Farías Santana --- hook-common/src/pgqueue.rs | 12 +++++++----- hook-janitor/src/fixtures/webhook_cleanup.sql | 2 +- hook-janitor/src/webhooks.rs | 6 +++--- migrations/20231129172339_job_queue_table.sql | 12 ++++++------ 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 39a09ce..ec5b684 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -135,7 +135,7 @@ impl Job { UPDATE "{0}" SET - finished_at = NOW(), + last_attempt_finished_at = NOW(), status = 'completed'::job_status WHERE "{0}".id = $2 @@ -182,7 +182,7 @@ RETURNING UPDATE "{0}" SET - finished_at = NOW(), + last_attempt_finished_at = NOW(), status = 'failed'::job_status errors = array_append("{0}".errors, $3) WHERE @@ -441,7 +441,7 @@ impl RetryableJob { UPDATE "{0}" SET - finished_at = NOW(), + last_attempt_finished_at = NOW(), errors = array_append("{0}".errors, $4), queue = $5, status = 'available'::job_status, @@ -606,7 +606,8 @@ WITH available_in_queue AS ( AND scheduled_at <= NOW() AND queue = $1 ORDER BY - id + attempt, + scheduled_at LIMIT 1 FOR UPDATE SKIP LOCKED ) @@ -687,7 +688,8 @@ WITH available_in_queue AS ( AND scheduled_at <= NOW() AND queue = $1 ORDER BY - id + attempt, + scheduled_at LIMIT 1 FOR UPDATE SKIP LOCKED ) diff --git a/hook-janitor/src/fixtures/webhook_cleanup.sql b/hook-janitor/src/fixtures/webhook_cleanup.sql index 4aeb231..bddaf26 100644 --- a/hook-janitor/src/fixtures/webhook_cleanup.sql +++ b/hook-janitor/src/fixtures/webhook_cleanup.sql @@ -2,7 +2,7 @@ INSERT INTO job_queue ( errors, metadata, - finished_at, + last_attempt_finished_at, parameters, queue, status, diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 57b984d..b14dba1 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -198,13 +198,13 @@ impl WebhookCleaner { async fn get_completed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { let base_query = format!( r#" - SELECT DATE_TRUNC('hour', finished_at) AS hour, + SELECT DATE_TRUNC('hour', last_attempt_finished_at) AS hour, (metadata->>'team_id')::bigint AS team_id, (metadata->>'plugin_config_id')::bigint AS plugin_config_id, count(*) as successes FROM {0} WHERE status = 'completed' - AND queue = $1 + AND queue = $1 GROUP BY hour, team_id, plugin_config_id ORDER BY hour, team_id, plugin_config_id; "#, @@ -223,7 +223,7 @@ impl WebhookCleaner { async fn get_failed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { let base_query = format!( r#" - SELECT DATE_TRUNC('hour', finished_at) AS hour, + SELECT DATE_TRUNC('hour', last_attempt_finished_at) AS hour, (metadata->>'team_id')::bigint AS team_id, (metadata->>'plugin_config_id')::bigint AS plugin_config_id, errors[array_upper(errors, 1)] AS last_error, diff --git a/migrations/20231129172339_job_queue_table.sql b/migrations/20231129172339_job_queue_table.sql index 7efa154..bf8c3df 100644 --- a/migrations/20231129172339_job_queue_table.sql +++ b/migrations/20231129172339_job_queue_table.sql @@ -9,21 +9,21 @@ CREATE TABLE job_queue( id BIGSERIAL PRIMARY KEY, attempt INT NOT NULL DEFAULT 0, attempted_at TIMESTAMPTZ DEFAULT NULL, - attempted_by TEXT[] DEFAULT ARRAY[]::TEXT[], + attempted_by TEXT [] DEFAULT ARRAY [] :: TEXT [], created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - errors JSONB[], + errors JSONB [], max_attempts INT NOT NULL DEFAULT 1, metadata JSONB, - finished_at TIMESTAMPTZ DEFAULT NULL, + last_attempt_finished_at TIMESTAMPTZ DEFAULT NULL, parameters JSONB, - queue TEXT NOT NULL DEFAULT 'default'::text, + queue TEXT NOT NULL DEFAULT 'default' :: text, scheduled_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - status job_status NOT NULL DEFAULT 'available'::job_status, + status job_status NOT NULL DEFAULT 'available' :: job_status, target TEXT NOT NULL ); -- Needed for `dequeue` queries -CREATE INDEX idx_queue_scheduled_at ON job_queue(queue, status, scheduled_at); +CREATE INDEX idx_queue_scheduled_at ON job_queue(queue, status, scheduled_at, attempt); -- Needed for UPDATE-ing incomplete jobs with a specific target (i.e. slow destinations) CREATE INDEX idx_queue_target ON job_queue(queue, status, target); From 7d519ab19981759aacc0a01ae63859996e0e131e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Mon, 8 Jan 2024 19:22:59 +0100 Subject: [PATCH 092/130] refactor: Remove table name config and hardcode table name (#25) --- hook-common/src/pgqueue.rs | 187 +++++++++----------------- hook-consumer/src/config.rs | 3 - hook-consumer/src/consumer.rs | 3 +- hook-consumer/src/main.rs | 2 +- hook-janitor/src/config.rs | 3 - hook-janitor/src/main.rs | 1 - hook-janitor/src/webhooks.rs | 44 ++---- hook-producer/src/config.rs | 3 - hook-producer/src/handlers/app.rs | 2 +- hook-producer/src/handlers/webhook.rs | 10 +- hook-producer/src/main.rs | 1 - 11 files changed, 88 insertions(+), 171 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index ec5b684..04573d5 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -124,29 +124,25 @@ impl Job { /// /// # Arguments /// - /// * `table`: The table where this job will be marked as completed. /// * `executor`: Any sqlx::Executor that can execute the UPDATE query required to mark this `Job` as completed. - async fn complete<'c, E>(self, table: &str, executor: E) -> Result + async fn complete<'c, E>(self, executor: E) -> Result where E: sqlx::Executor<'c, Database = sqlx::Postgres>, { - let base_query = format!( - r#" + let base_query = r#" UPDATE - "{0}" + "job_queue" SET last_attempt_finished_at = NOW(), status = 'completed'::job_status WHERE - "{0}".id = $2 + "job_queue".id = $2 AND queue = $1 RETURNING - "{0}".* - "#, - table - ); + "job_queue".* + "#; - sqlx::query(&base_query) + sqlx::query(base_query) .bind(&self.queue) .bind(self.id) .execute(executor) @@ -164,37 +160,28 @@ RETURNING /// # Arguments /// /// * `error`: Any JSON-serializable value to be stored as an error. - /// * `table`: The table where this job will be marked as failed. /// * `executor`: Any sqlx::Executor that can execute the UPDATE query required to mark this `Job` as failed. - async fn fail<'c, E, S>( - self, - error: S, - table: &str, - executor: E, - ) -> Result, sqlx::Error> + async fn fail<'c, E, S>(self, error: S, executor: E) -> Result, sqlx::Error> where S: serde::Serialize + std::marker::Sync + std::marker::Send, E: sqlx::Executor<'c, Database = sqlx::Postgres>, { let json_error = sqlx::types::Json(error); - let base_query = format!( - r#" + let base_query = r#" UPDATE - "{0}" + "job_queue" SET last_attempt_finished_at = NOW(), status = 'failed'::job_status - errors = array_append("{0}".errors, $3) + errors = array_append("job_queue".errors, $3) WHERE - "{0}".id = $2 + "job_queue".id = $2 AND queue = $1 RETURNING - "{0}".* - "#, - &table - ); + "job_queue".* + "#; - sqlx::query(&base_query) + sqlx::query(base_query) .bind(&self.queue) .bind(self.id) .bind(&json_error) @@ -230,7 +217,6 @@ pub trait PgQueueJob { #[derive(Debug)] pub struct PgJob { pub job: Job, - pub table: String, pub connection: sqlx::pool::PoolConnection, } @@ -239,7 +225,7 @@ impl PgQueueJob for PgJob { async fn complete(mut self) -> Result>>> { let completed_job = self .job - .complete(&self.table, &mut *self.connection) + .complete(&mut *self.connection) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -255,7 +241,7 @@ impl PgQueueJob for PgJob { ) -> Result, PgJobError>>> { let failed_job = self .job - .fail(error, &self.table, &mut *self.connection) + .fail(error, &mut *self.connection) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -282,7 +268,7 @@ impl PgQueueJob for PgJob { .job .retryable() .queue(queue) - .retry(error, retry_interval, &self.table, &mut *self.connection) + .retry(error, retry_interval, &mut *self.connection) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -298,7 +284,6 @@ impl PgQueueJob for PgJob { #[derive(Debug)] pub struct PgTransactionJob<'c, J, M> { pub job: Job, - pub table: String, pub transaction: sqlx::Transaction<'c, sqlx::postgres::Postgres>, } @@ -309,7 +294,7 @@ impl<'c, J: std::marker::Send, M: std::marker::Send> PgQueueJob for PgTransactio ) -> Result>>> { let completed_job = self .job - .complete(&self.table, &mut *self.transaction) + .complete(&mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -333,7 +318,7 @@ impl<'c, J: std::marker::Send, M: std::marker::Send> PgQueueJob for PgTransactio ) -> Result, PgJobError>>> { let failed_job = self .job - .fail(error, &self.table, &mut *self.transaction) + .fail(error, &mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -370,7 +355,7 @@ impl<'c, J: std::marker::Send, M: std::marker::Send> PgQueueJob for PgTransactio .job .retryable() .queue(queue) - .retry(error, retry_interval, &self.table, &mut *self.transaction) + .retry(error, retry_interval, &mut *self.transaction) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -422,13 +407,11 @@ impl RetryableJob { /// /// * `error`: Any JSON-serializable value to be stored as an error. /// * `retry_interval`: The duration until the `Job` is to be retried again. Used to set `scheduled_at`. - /// * `table`: The table where this job will be marked as completed. /// * `executor`: Any sqlx::Executor that can execute the UPDATE query required to mark this `Job` as completed. async fn retry<'c, S, E>( self, error: S, retry_interval: time::Duration, - table: &str, executor: E, ) -> Result where @@ -436,26 +419,23 @@ impl RetryableJob { E: sqlx::Executor<'c, Database = sqlx::Postgres>, { let json_error = sqlx::types::Json(error); - let base_query = format!( - r#" + let base_query = r#" UPDATE - "{0}" + "job_queue" SET last_attempt_finished_at = NOW(), - errors = array_append("{0}".errors, $4), + errors = array_append("job_queue".errors, $4), queue = $5, status = 'available'::job_status, scheduled_at = NOW() + $3 WHERE - "{0}".id = $2 + "job_queue".id = $2 AND queue = $1 RETURNING - "{0}".* - "#, - &table - ); + "job_queue".* + "#; - sqlx::query(&base_query) + sqlx::query(base_query) .bind(&self.queue) .bind(self.id) .bind(retry_interval) @@ -466,7 +446,6 @@ RETURNING Ok(RetriedJob { id: self.id, - table: table.to_owned(), queue: self.queue, retry_queue: self.retry_queue.to_owned(), }) @@ -490,7 +469,6 @@ pub struct RetriedJob { /// A unique id identifying a job queue. pub queue: String, pub retry_queue: Option, - pub table: String, } /// State a `Job` is transitioned to after exhausting all of their attempts. @@ -535,8 +513,6 @@ pub struct PgQueue { name: String, /// A connection pool used to connect to the PostgreSQL database. pool: PgPool, - /// The identifier of the PostgreSQL table this queue runs on. - table: String, } pub type PgQueueResult = std::result::Result; @@ -547,16 +523,14 @@ impl PgQueue { /// # Arguments /// /// * `queue_name`: A name for the queue we are going to initialize. - /// * `table_name`: The name for the table the queue will use in PostgreSQL. /// * `url`: A URL pointing to where the PostgreSQL database is hosted. - pub async fn new(queue_name: &str, table_name: &str, url: &str) -> PgQueueResult { + pub async fn new(queue_name: &str, url: &str) -> PgQueueResult { let name = queue_name.to_owned(); - let table = table_name.to_owned(); let pool = PgPoolOptions::new() .connect_lazy(url) .map_err(|error| PgQueueError::PoolCreationError { error })?; - Ok(Self { name, pool, table }) + Ok(Self { name, pool }) } /// Initialize a new PgQueue backed by table in PostgreSQL from a provided connection pool. @@ -564,17 +538,11 @@ impl PgQueue { /// # Arguments /// /// * `queue_name`: A name for the queue we are going to initialize. - /// * `table_name`: The name for the table the queue will use in PostgreSQL. /// * `pool`: A database connection pool to be used by this queue. - pub async fn new_from_pool( - queue_name: &str, - table_name: &str, - pool: PgPool, - ) -> PgQueueResult { + pub async fn new_from_pool(queue_name: &str, pool: PgPool) -> PgQueueResult { let name = queue_name.to_owned(); - let table = table_name.to_owned(); - Ok(Self { name, pool, table }) + Ok(Self { name, pool }) } /// Dequeue a `Job` from this `PgQueue`. @@ -594,13 +562,12 @@ impl PgQueue { // The query that follows uses a FOR UPDATE SKIP LOCKED clause. // For more details on this see: 2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5. - let base_query = format!( - r#" + let base_query = r#" WITH available_in_queue AS ( SELECT id FROM - "{0}" + "job_queue" WHERE status = 'available' AND scheduled_at <= NOW() @@ -612,34 +579,28 @@ WITH available_in_queue AS ( FOR UPDATE SKIP LOCKED ) UPDATE - "{0}" + "job_queue" SET attempted_at = NOW(), status = 'running'::job_status, - attempt = "{0}".attempt + 1, - attempted_by = array_append("{0}".attempted_by, $2::text) + attempt = "job_queue".attempt + 1, + attempted_by = array_append("job_queue".attempted_by, $2::text) FROM available_in_queue WHERE - "{0}".id = available_in_queue.id + "job_queue".id = available_in_queue.id RETURNING - "{0}".* - "#, - &self.table - ); + "job_queue".* + "#; - let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) + let query_result: Result, sqlx::Error> = sqlx::query_as(base_query) .bind(&self.name) .bind(attempted_by) .fetch_one(&mut *connection) .await; match query_result { - Ok(job) => Ok(Some(PgJob { - job, - table: self.table.to_owned(), - connection, - })), + Ok(job) => Ok(Some(PgJob { job, connection })), // Although connection would be closed once it goes out of scope, sqlx recommends explicitly calling close(). // See: https://docs.rs/sqlx/latest/sqlx/postgres/any/trait.AnyConnectionBackend.html#tymethod.close. @@ -676,13 +637,12 @@ RETURNING // The query that follows uses a FOR UPDATE SKIP LOCKED clause. // For more details on this see: 2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5. - let base_query = format!( - r#" + let base_query = r#" WITH available_in_queue AS ( SELECT id FROM - "{0}" + "job_queue" WHERE status = 'available' AND scheduled_at <= NOW() @@ -694,23 +654,21 @@ WITH available_in_queue AS ( FOR UPDATE SKIP LOCKED ) UPDATE - "{0}" + "job_queue" SET attempted_at = NOW(), status = 'running'::job_status, - attempt = "{0}".attempt + 1, - attempted_by = array_append("{0}".attempted_by, $2::text) + attempt = "job_queue".attempt + 1, + attempted_by = array_append("job_queue".attempted_by, $2::text) FROM available_in_queue WHERE - "{0}".id = available_in_queue.id + "job_queue".id = available_in_queue.id RETURNING - "{0}".* - "#, - &self.table - ); + "job_queue".* + "#; - let query_result: Result, sqlx::Error> = sqlx::query_as(&base_query) + let query_result: Result, sqlx::Error> = sqlx::query_as(base_query) .bind(&self.name) .bind(attempted_by) .fetch_one(&mut *tx) @@ -719,7 +677,6 @@ RETURNING match query_result { Ok(job) => Ok(Some(PgTransactionJob { job, - table: self.table.to_owned(), transaction: tx, })), @@ -742,17 +699,14 @@ RETURNING job: NewJob, ) -> PgQueueResult<()> { // TODO: Escaping. I think sqlx doesn't support identifiers. - let base_query = format!( - r#" -INSERT INTO {} + let base_query = r#" +INSERT INTO job_queue (attempt, created_at, scheduled_at, max_attempts, metadata, parameters, queue, status, target) VALUES (0, NOW(), NOW(), $1, $2, $3, $4, 'available'::job_status, $5) - "#, - &self.table - ); + "#; - sqlx::query(&base_query) + sqlx::query(base_query) .bind(job.max_attempts) .bind(&job.metadata) .bind(&job.parameters) @@ -826,7 +780,7 @@ mod tests { let worker_id = worker_id(); let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); - let queue = PgQueue::new_from_pool("test_can_dequeue_job", "job_queue", db) + let queue = PgQueue::new_from_pool("test_can_dequeue_job", db) .await .expect("failed to connect to local test postgresql database"); @@ -850,7 +804,7 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_dequeue_returns_none_on_no_jobs(db: PgPool) { let worker_id = worker_id(); - let queue = PgQueue::new_from_pool("test_dequeue_returns_none_on_no_jobs", "job_queue", db) + let queue = PgQueue::new_from_pool("test_dequeue_returns_none_on_no_jobs", db) .await .expect("failed to connect to local test postgresql database"); @@ -870,7 +824,7 @@ mod tests { let worker_id = worker_id(); let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); - let queue = PgQueue::new_from_pool("test_can_dequeue_tx_job", "job_queue", db) + let queue = PgQueue::new_from_pool("test_can_dequeue_tx_job", db) .await .expect("failed to connect to local test postgresql database"); @@ -895,10 +849,9 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_dequeue_tx_returns_none_on_no_jobs(db: PgPool) { let worker_id = worker_id(); - let queue = - PgQueue::new_from_pool("test_dequeue_tx_returns_none_on_no_jobs", "job_queue", db) - .await - .expect("failed to connect to local test postgresql database"); + let queue = PgQueue::new_from_pool("test_dequeue_tx_returns_none_on_no_jobs", db) + .await + .expect("failed to connect to local test postgresql database"); let tx_job: Option> = queue .dequeue_tx(&worker_id) @@ -915,14 +868,13 @@ mod tests { let job_metadata = JobMetadata::default(); let worker_id = worker_id(); let new_job = NewJob::new(2, job_metadata, job_parameters, &job_target); - let table_name = "job_queue".to_owned(); let queue_name = "test_can_retry_job_with_remaining_attempts".to_owned(); let retry_policy = RetryPolicy::build(0, time::Duration::from_secs(0)) .queue(&queue_name) .provide(); - let queue = PgQueue::new_from_pool(&queue_name, &table_name, db) + let queue = PgQueue::new_from_pool(&queue_name, db) .await .expect("failed to connect to local test postgresql database"); @@ -969,7 +921,6 @@ mod tests { let job_metadata = JobMetadata::default(); let worker_id = worker_id(); let new_job = NewJob::new(2, job_metadata, job_parameters, &job_target); - let table_name = "job_queue".to_owned(); let queue_name = "test_can_retry_job_to_different_queue".to_owned(); let retry_queue_name = "test_can_retry_job_to_different_queue_retry".to_owned(); @@ -977,7 +928,7 @@ mod tests { .queue(&retry_queue_name) .provide(); - let queue = PgQueue::new_from_pool(&queue_name, &table_name, db.clone()) + let queue = PgQueue::new_from_pool(&queue_name, db.clone()) .await .expect("failed to connect to queue in local test postgresql database"); @@ -1006,7 +957,7 @@ mod tests { assert!(retried_job_not_found.is_none()); - let queue = PgQueue::new_from_pool(&retry_queue_name, &table_name, db) + let queue = PgQueue::new_from_pool(&retry_queue_name, db) .await .expect("failed to connect to retry queue in local test postgresql database"); @@ -1038,13 +989,9 @@ mod tests { let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); let retry_policy = RetryPolicy::build(0, time::Duration::from_secs(0)).provide(); - let queue = PgQueue::new_from_pool( - "test_cannot_retry_job_without_remaining_attempts", - "job_queue", - db, - ) - .await - .expect("failed to connect to local test postgresql database"); + let queue = PgQueue::new_from_pool("test_cannot_retry_job_without_remaining_attempts", db) + .await + .expect("failed to connect to local test postgresql database"); queue.enqueue(new_job).await.expect("failed to enqueue job"); diff --git a/hook-consumer/src/config.rs b/hook-consumer/src/config.rs index 6525b25..01f94e7 100644 --- a/hook-consumer/src/config.rs +++ b/hook-consumer/src/config.rs @@ -34,9 +34,6 @@ pub struct Config { #[envconfig(default = "true")] pub transactional: bool, - - #[envconfig(default = "job_queue")] - pub table_name: String, } impl Config { diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 42cb1ff..671c7b9 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -484,8 +484,7 @@ mod tests { async fn test_wait_for_job(db: PgPool) { let worker_id = worker_id(); let queue_name = "test_wait_for_job".to_string(); - let table_name = "job_queue".to_string(); - let queue = PgQueue::new_from_pool(&queue_name, &table_name, db) + let queue = PgQueue::new_from_pool(&queue_name, db) .await .expect("failed to connect to PG"); diff --git a/hook-consumer/src/main.rs b/hook-consumer/src/main.rs index c71b8eb..4182348 100644 --- a/hook-consumer/src/main.rs +++ b/hook-consumer/src/main.rs @@ -19,7 +19,7 @@ async fn main() -> Result<(), ConsumerError> { .maximum_interval(config.retry_policy.maximum_interval.0) .queue(&config.retry_policy.retry_queue_name) .provide(); - let queue = PgQueue::new(&config.queue_name, &config.table_name, &config.database_url) + let queue = PgQueue::new(&config.queue_name, &config.database_url) .await .expect("failed to initialize queue"); diff --git a/hook-janitor/src/config.rs b/hook-janitor/src/config.rs index c1efb85..64db0e6 100644 --- a/hook-janitor/src/config.rs +++ b/hook-janitor/src/config.rs @@ -11,9 +11,6 @@ pub struct Config { #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] pub database_url: String, - #[envconfig(default = "job_queue")] - pub table_name: String, - #[envconfig(default = "default")] pub queue_name: String, diff --git a/hook-janitor/src/main.rs b/hook-janitor/src/main.rs index 5de3ec4..7d7e223 100644 --- a/hook-janitor/src/main.rs +++ b/hook-janitor/src/main.rs @@ -55,7 +55,6 @@ async fn main() { Box::new( WebhookCleaner::new( &config.queue_name, - &config.table_name, &config.database_url, kafka_producer, config.kafka.app_metrics_topic.to_owned(), diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index b14dba1..18a21c9 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -44,7 +44,6 @@ type Result = std::result::Result; pub struct WebhookCleaner { queue_name: String, - table_name: String, pg_pool: PgPool, kafka_producer: FutureProducer, app_metrics_topic: String, @@ -133,13 +132,11 @@ struct CleanupStats { impl WebhookCleaner { pub fn new( queue_name: &str, - table_name: &str, database_url: &str, kafka_producer: FutureProducer, app_metrics_topic: String, ) -> Result { let queue_name = queue_name.to_owned(); - let table_name = table_name.to_owned(); let pg_pool = PgPoolOptions::new() .acquire_timeout(Duration::from_secs(10)) .connect_lazy(database_url) @@ -147,7 +144,6 @@ impl WebhookCleaner { Ok(Self { queue_name, - table_name, pg_pool, kafka_producer, app_metrics_topic, @@ -157,17 +153,14 @@ impl WebhookCleaner { #[allow(dead_code)] // This is used in tests. pub fn new_from_pool( queue_name: &str, - table_name: &str, pg_pool: PgPool, kafka_producer: FutureProducer, app_metrics_topic: String, ) -> Result { let queue_name = queue_name.to_owned(); - let table_name = table_name.to_owned(); Ok(Self { queue_name, - table_name, pg_pool, kafka_producer, app_metrics_topic, @@ -196,22 +189,19 @@ impl WebhookCleaner { } async fn get_completed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { - let base_query = format!( - r#" + let base_query = r#" SELECT DATE_TRUNC('hour', last_attempt_finished_at) AS hour, (metadata->>'team_id')::bigint AS team_id, (metadata->>'plugin_config_id')::bigint AS plugin_config_id, count(*) as successes - FROM {0} + FROM job_queue WHERE status = 'completed' AND queue = $1 GROUP BY hour, team_id, plugin_config_id ORDER BY hour, team_id, plugin_config_id; - "#, - self.table_name - ); + "#; - let rows = sqlx::query_as::<_, CompletedRow>(&base_query) + let rows = sqlx::query_as::<_, CompletedRow>(base_query) .bind(&self.queue_name) .fetch_all(&mut *tx.0) .await @@ -221,23 +211,20 @@ impl WebhookCleaner { } async fn get_failed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { - let base_query = format!( - r#" + let base_query = r#" SELECT DATE_TRUNC('hour', last_attempt_finished_at) AS hour, (metadata->>'team_id')::bigint AS team_id, (metadata->>'plugin_config_id')::bigint AS plugin_config_id, errors[array_upper(errors, 1)] AS last_error, count(*) as failures - FROM {0} + FROM job_queue WHERE status = 'failed' AND queue = $1 GROUP BY hour, team_id, plugin_config_id, last_error ORDER BY hour, team_id, plugin_config_id, last_error; - "#, - self.table_name - ); + "#; - let rows = sqlx::query_as::<_, FailedRow>(&base_query) + let rows = sqlx::query_as::<_, FailedRow>(base_query) .bind(&self.queue_name) .fetch_all(&mut *tx.0) .await @@ -292,16 +279,13 @@ impl WebhookCleaner { async fn delete_observed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result { // This DELETE is only safe because we are in serializable isolation mode, see the note // in `start_serializable_txn`. - let base_query = format!( - r#" - DELETE FROM {0} + let base_query = r#" + DELETE FROM job_queue WHERE status IN ('failed', 'completed') AND queue = $1; - "#, - self.table_name - ); + "#; - let result = sqlx::query(&base_query) + let result = sqlx::query(base_query) .bind(&self.queue_name) .execute(&mut *tx.0) .await @@ -460,7 +444,6 @@ mod tests { let webhook_cleaner = WebhookCleaner::new_from_pool( &"webhooks", - &"job_queue", db, mock_producer, APP_METRICS_TOPIC.to_owned(), @@ -642,14 +625,13 @@ mod tests { let (_, mock_producer) = create_mock_kafka().await; let webhook_cleaner = WebhookCleaner::new_from_pool( &"webhooks", - &"job_queue", db.clone(), mock_producer, APP_METRICS_TOPIC.to_owned(), ) .expect("unable to create webhook cleaner"); - let queue = PgQueue::new_from_pool("webhooks", "job_queue", db.clone()) + let queue = PgQueue::new_from_pool("webhooks", db.clone()) .await .expect("failed to connect to local test postgresql database"); diff --git a/hook-producer/src/config.rs b/hook-producer/src/config.rs index 87fad5d..8daf04e 100644 --- a/hook-producer/src/config.rs +++ b/hook-producer/src/config.rs @@ -11,9 +11,6 @@ pub struct Config { #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] pub database_url: String, - #[envconfig(default = "job_queue")] - pub table_name: String, - #[envconfig(default = "default")] pub queue_name: String, } diff --git a/hook-producer/src/handlers/app.rs b/hook-producer/src/handlers/app.rs index e588dbd..b4d099c 100644 --- a/hook-producer/src/handlers/app.rs +++ b/hook-producer/src/handlers/app.rs @@ -38,7 +38,7 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn index(db: PgPool) { - let pg_queue = PgQueue::new_from_pool("test_index", "job_queue", db) + let pg_queue = PgQueue::new_from_pool("test_index", db) .await .expect("failed to construct pg_queue"); diff --git a/hook-producer/src/handlers/webhook.rs b/hook-producer/src/handlers/webhook.rs index 72923b2..ab1552c 100644 --- a/hook-producer/src/handlers/webhook.rs +++ b/hook-producer/src/handlers/webhook.rs @@ -119,7 +119,7 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn webhook_success(db: PgPool) { - let pg_queue = PgQueue::new_from_pool("test_index", "job_queue", db) + let pg_queue = PgQueue::new_from_pool("test_index", db) .await .expect("failed to construct pg_queue"); @@ -163,7 +163,7 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn webhook_bad_url(db: PgPool) { - let pg_queue = PgQueue::new_from_pool("test_index", "job_queue", db) + let pg_queue = PgQueue::new_from_pool("test_index", db) .await .expect("failed to construct pg_queue"); @@ -202,7 +202,7 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn webhook_payload_missing_fields(db: PgPool) { - let pg_queue = PgQueue::new_from_pool("test_index", "job_queue", db) + let pg_queue = PgQueue::new_from_pool("test_index", db) .await .expect("failed to construct pg_queue"); @@ -225,7 +225,7 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn webhook_payload_not_json(db: PgPool) { - let pg_queue = PgQueue::new_from_pool("test_index", "job_queue", db) + let pg_queue = PgQueue::new_from_pool("test_index", db) .await .expect("failed to construct pg_queue"); @@ -248,7 +248,7 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn webhook_payload_body_too_large(db: PgPool) { - let pg_queue = PgQueue::new_from_pool("test_index", "job_queue", db) + let pg_queue = PgQueue::new_from_pool("test_index", db) .await .expect("failed to construct pg_queue"); diff --git a/hook-producer/src/main.rs b/hook-producer/src/main.rs index 39f4500..d0190c2 100644 --- a/hook-producer/src/main.rs +++ b/hook-producer/src/main.rs @@ -27,7 +27,6 @@ async fn main() { // TODO: Coupling the queue name to the PgQueue object doesn't seem ideal from the producer // side, but we don't need more than one queue for now. &config.queue_name, - &config.table_name, &config.database_url, ) .await From ead1bf28ef8d092848cceedf8e2ae8867b02276d Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 9 Jan 2024 02:13:30 -0700 Subject: [PATCH 093/130] Minor query syntax/ordering cleanup (#26) --- hook-common/src/pgqueue.rs | 56 +++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 04573d5..35f5b4a 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -131,15 +131,15 @@ impl Job { { let base_query = r#" UPDATE - "job_queue" + job_queue SET last_attempt_finished_at = NOW(), status = 'completed'::job_status WHERE - "job_queue".id = $2 - AND queue = $1 + queue = $1 + AND id = $2 RETURNING - "job_queue".* + job_queue.* "#; sqlx::query(base_query) @@ -169,16 +169,16 @@ RETURNING let json_error = sqlx::types::Json(error); let base_query = r#" UPDATE - "job_queue" + job_queue SET last_attempt_finished_at = NOW(), status = 'failed'::job_status - errors = array_append("job_queue".errors, $3) + errors = array_append(errors, $3) WHERE - "job_queue".id = $2 - AND queue = $1 + queue = $1 + AND id = $2 RETURNING - "job_queue".* + job_queue.* "#; sqlx::query(base_query) @@ -421,18 +421,18 @@ impl RetryableJob { let json_error = sqlx::types::Json(error); let base_query = r#" UPDATE - "job_queue" + job_queue SET last_attempt_finished_at = NOW(), - errors = array_append("job_queue".errors, $4), - queue = $5, status = 'available'::job_status, - scheduled_at = NOW() + $3 + scheduled_at = NOW() + $3, + errors = array_append(errors, $4), + queue = $5 WHERE - "job_queue".id = $2 - AND queue = $1 + queue = $1 + AND id = $2 RETURNING - "job_queue".* + job_queue.* "#; sqlx::query(base_query) @@ -567,7 +567,7 @@ WITH available_in_queue AS ( SELECT id FROM - "job_queue" + job_queue WHERE status = 'available' AND scheduled_at <= NOW() @@ -579,18 +579,18 @@ WITH available_in_queue AS ( FOR UPDATE SKIP LOCKED ) UPDATE - "job_queue" + job_queue SET attempted_at = NOW(), status = 'running'::job_status, - attempt = "job_queue".attempt + 1, - attempted_by = array_append("job_queue".attempted_by, $2::text) + attempt = attempt + 1, + attempted_by = array_append(attempted_by, $2::text) FROM available_in_queue WHERE - "job_queue".id = available_in_queue.id + job_queue.id = available_in_queue.id RETURNING - "job_queue".* + job_queue.* "#; let query_result: Result, sqlx::Error> = sqlx::query_as(base_query) @@ -642,7 +642,7 @@ WITH available_in_queue AS ( SELECT id FROM - "job_queue" + job_queue WHERE status = 'available' AND scheduled_at <= NOW() @@ -654,18 +654,18 @@ WITH available_in_queue AS ( FOR UPDATE SKIP LOCKED ) UPDATE - "job_queue" + job_queue SET attempted_at = NOW(), status = 'running'::job_status, - attempt = "job_queue".attempt + 1, - attempted_by = array_append("job_queue".attempted_by, $2::text) + attempt = attempt + 1, + attempted_by = array_append(attempted_by, $2::text) FROM available_in_queue WHERE - "job_queue".id = available_in_queue.id + job_queue.id = available_in_queue.id RETURNING - "job_queue".* + job_queue.* "#; let query_result: Result, sqlx::Error> = sqlx::query_as(base_query) From 892d30b5c53fc4f4a724997f1fc370da93c88db7 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 9 Jan 2024 09:23:09 -0700 Subject: [PATCH 094/130] Add metrics, fix metrics endpoint, bump deps (#27) --- Cargo.lock | 271 +++++++++++--------------- Cargo.toml | 4 +- hook-common/src/metrics.rs | 9 +- hook-consumer/src/consumer.rs | 19 +- hook-janitor/src/webhooks.rs | 17 +- hook-producer/src/handlers/app.rs | 16 +- hook-producer/src/handlers/mod.rs | 2 +- hook-producer/src/handlers/webhook.rs | 20 +- hook-producer/src/main.rs | 7 +- 9 files changed, 172 insertions(+), 193 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fdef24d..e4da816 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,9 +19,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "getrandom", @@ -62,13 +62,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -104,9 +104,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "202651474fe73c62d9e0a56c6133f7a0ff1dc1c8cf7a5b03381af2a26553ac9d" +checksum = "d09dbe0e490df5da9d69b36dca48a76635288a82f92eca90024883a56202026d" dependencies = [ "async-trait", "axum-core", @@ -133,13 +133,14 @@ dependencies = [ "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "axum-core" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77cb22c689c44d4c07b0ab44ebc25d69d8ae601a2f28fb8d672d344178fa17aa" +checksum = "e87c8503f93e6d144ee5690907ba22db7ba79ab001a932ab99034f0fe836b3df" dependencies = [ "async-trait", "bytes", @@ -153,6 +154,7 @@ dependencies = [ "sync_wrapper", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -172,9 +174,9 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.5" +version = "0.21.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "c79fed4cdb43e993fcdadc7e58a09fd0e3e649c4436fa11da71c9f1f3ee7feb9" [[package]] name = "base64ct" @@ -286,9 +288,9 @@ checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -310,34 +312,27 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crossbeam-epoch" -version = "0.9.16" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", - "memoffset", ] [[package]] name = "crossbeam-queue" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bcf5bdbfdd6030fb4a1c497b5d5fc5921aa2f60d359a17e249c0e6df3de153" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.17" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crypto-common" @@ -514,9 +509,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -529,9 +524,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -539,15 +534,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -567,38 +562,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -660,9 +655,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d308f63daf4181410c242d34c11f928dcb3aa105852019e043c9d1f4e4368a" +checksum = "991910e35c615d8cab86b5ab04be67e6ad24d2bf5f4f11fdbbed26da999bbeab" dependencies = [ "bytes", "fnv", @@ -948,7 +943,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.0", + "h2 0.4.1", "http 1.0.0", "http-body 1.0.0", "httparse", @@ -973,9 +968,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca339002caeb0d159cc6e023dff48e199f081e42fa039895c7c6f38b37f2e9d" +checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" dependencies = [ "bytes", "futures-channel", @@ -986,16 +981,14 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", - "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1091,9 +1084,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.151" +version = "0.2.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" [[package]] name = "libm" @@ -1114,9 +1107,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "5f526fdd09d99e19742883e43de41e1aa9e36db0c7ab7f935165d611c5cccc66" dependencies = [ "cc", "libc", @@ -1146,15 +1139,6 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" -[[package]] -name = "mach2" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" -dependencies = [ - "libc", -] - [[package]] name = "matchit" version = "0.7.3" @@ -1173,38 +1157,29 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" - -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "metrics" -version = "0.21.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" +checksum = "77b9e10a211c839210fd7f99954bda26e5f8e26ec686ad68da6a32df7c80e782" dependencies = [ "ahash", - "metrics-macros", "portable-atomic", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.12.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d4fa7ce7c4862db464a37b0b31d89bca874562f034bd7993895572783d02950" +checksum = "83a4c4718a371ddfb7806378f23617876eea8b82e5ff1324516bcd283249d9ea" dependencies = [ "base64", "hyper 0.14.28", + "hyper-tls", "indexmap 1.9.3", "ipnet", "metrics", @@ -1215,22 +1190,11 @@ dependencies = [ "tracing", ] -[[package]] -name = "metrics-macros" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.41", -] - [[package]] name = "metrics-util" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" +checksum = "2670b8badcc285d486261e2e9f1615b506baff91427b61bd336a472b65bbf5ed" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1403,9 +1367,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -1418,9 +1382,9 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "openssl" -version = "0.10.61" +version = "0.10.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" +checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -1439,7 +1403,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1450,9 +1414,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.97" +version = "0.9.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" +checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" dependencies = [ "cc", "libc", @@ -1527,7 +1491,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1565,9 +1529,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a" [[package]] name = "portable-atomic" @@ -1593,22 +1557,21 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" dependencies = [ "unicode-ident", ] [[package]] name = "quanta" -version = "0.11.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" dependencies = [ "crossbeam-utils", "libc", - "mach2", "once_cell", "raw-cpuid", "wasi", @@ -1618,9 +1581,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -1657,11 +1620,11 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.7.0" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", ] [[package]] @@ -1826,11 +1789,11 @@ checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1864,29 +1827,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.193" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" dependencies = [ "itoa", "ryu", @@ -1895,9 +1858,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" dependencies = [ "itoa", "serde", @@ -2268,9 +2231,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.41" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -2306,35 +2269,35 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", "fastrand", "redox_syscall", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "thiserror" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -2364,9 +2327,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.0" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841d45b238a16291a4e1584e61820b8ae57d696cc5015c459c229ccc6990cc1c" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ "backtrace", "bytes", @@ -2389,7 +2352,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -2492,7 +2455,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -2657,7 +2620,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", "wasm-bindgen-shared", ] @@ -2691,7 +2654,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2742,11 +2705,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.0", ] [[package]] @@ -2883,9 +2846,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.30" +version = "0.5.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b5c3db89721d50d0e2a673f5043fc4722f76dcc352d7b1ab8b8288bed4ed2c5" +checksum = "b7520bbdec7211caa7c4e682eb1fbe07abe20cee6756b6e00f537c82c11816aa" dependencies = [ "memchr", ] @@ -2902,22 +2865,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.31" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.31" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1f6a38b..b7cb8fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,8 +12,8 @@ eyre = "0.6.9" futures = { version = "0.3.29" } http = { version = "0.2" } http-body-util = "0.1.0" -metrics = "0.21.1" -metrics-exporter-prometheus = "0.12.1" +metrics = "0.22.0" +metrics-exporter-prometheus = "0.13.0" rdkafka = { version = "0.35.0", features = ["cmake-build", "ssl", "tracing"] } reqwest = { version = "0.11" } regex = "1.10.2" diff --git a/hook-common/src/metrics.rs b/hook-common/src/metrics.rs index 3d9e4c0..1f57c5e 100644 --- a/hook-common/src/metrics.rs +++ b/hook-common/src/metrics.rs @@ -21,7 +21,10 @@ pub fn setup_metrics_router() -> Router { let recorder_handle = setup_metrics_recorder(); Router::new() - .route("/metrics", get(recorder_handle.render())) + .route( + "/metrics", + get(move || std::future::ready(recorder_handle.render())), + ) .layer(axum::middleware::from_fn(track_metrics)) } @@ -63,8 +66,8 @@ pub async fn track_metrics(req: Request, next: Next) -> impl IntoResponse ("status", status), ]; - metrics::increment_counter!("http_requests_total", &labels); - metrics::histogram!("http_requests_duration_seconds", latency, &labels); + metrics::counter!("http_requests_total", &labels).increment(1); + metrics::histogram!("http_requests_duration_seconds", &labels).record(latency); response } diff --git a/hook-consumer/src/consumer.rs b/hook-consumer/src/consumer.rs index 671c7b9..2114ef8 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-consumer/src/consumer.rs @@ -192,7 +192,7 @@ async fn spawn_webhook_job_processing_task( ("target", webhook_job.target()), ]; - metrics::increment_counter!("webhook_jobs_total", &labels); + metrics::counter!("webhook_jobs_total", &labels).increment(1); tokio::spawn(async move { let result = process_webhook_job(client, webhook_job, &retry_policy).await; @@ -247,8 +247,9 @@ async fn process_webhook_job( .await .map_err(|error| ConsumerError::PgJobError(error.to_string()))?; - metrics::increment_counter!("webhook_jobs_completed", &labels); - metrics::histogram!("webhook_jobs_processing_duration_seconds", elapsed, &labels); + metrics::counter!("webhook_jobs_completed", &labels).increment(1); + metrics::histogram!("webhook_jobs_processing_duration_seconds", &labels) + .record(elapsed); Ok(()) } @@ -258,7 +259,7 @@ async fn process_webhook_job( .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; - metrics::increment_counter!("webhook_jobs_failed", &labels); + metrics::counter!("webhook_jobs_failed", &labels).increment(1); Ok(()) } @@ -268,7 +269,7 @@ async fn process_webhook_job( .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; - metrics::increment_counter!("webhook_jobs_failed", &labels); + metrics::counter!("webhook_jobs_failed", &labels).increment(1); Ok(()) } @@ -278,7 +279,7 @@ async fn process_webhook_job( .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; - metrics::increment_counter!("webhook_jobs_failed", &labels); + metrics::counter!("webhook_jobs_failed", &labels).increment(1); Ok(()) } @@ -293,7 +294,7 @@ async fn process_webhook_job( .await { Ok(_) => { - metrics::increment_counter!("webhook_jobs_retried", &labels); + metrics::counter!("webhook_jobs_retried", &labels).increment(1); Ok(()) } @@ -305,7 +306,7 @@ async fn process_webhook_job( .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; - metrics::increment_counter!("webhook_jobs_failed", &labels); + metrics::counter!("webhook_jobs_failed", &labels).increment(1); Ok(()) } @@ -318,7 +319,7 @@ async fn process_webhook_job( .await .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; - metrics::increment_counter!("webhook_jobs_failed", &labels); + metrics::counter!("webhook_jobs_failed", &labels).increment(1); Ok(()) } diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 18a21c9..228187a 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::time::{Duration, Instant}; use async_trait::async_trait; use chrono::{DateTime, Utc}; @@ -350,9 +350,23 @@ impl WebhookCleaner { #[async_trait] impl Cleaner for WebhookCleaner { async fn cleanup(&self) { + let start_time = Instant::now(); + match self.cleanup_impl().await { Ok(stats) => { + metrics::counter!("webhook_cleanup_runs",).increment(1); + if stats.rows_processed > 0 { + let elapsed_time = start_time.elapsed().as_secs_f64(); + metrics::histogram!("webhook_cleanup_duration").record(elapsed_time); + + metrics::counter!("webhook_cleanup_rows_processed",) + .increment(stats.rows_processed); + metrics::counter!("webhook_cleanup_completed_agg_row_count",) + .increment(stats.completed_agg_row_count as u64); + metrics::counter!("webhook_cleanup_failed_agg_row_count",) + .increment(stats.failed_agg_row_count as u64); + debug!( rows_processed = stats.rows_processed, completed_agg_row_count = stats.completed_agg_row_count, @@ -364,6 +378,7 @@ impl Cleaner for WebhookCleaner { } } Err(error) => { + metrics::counter!("webhook_cleanup_failures",).increment(1); error!(error = ?error, "WebhookCleaner::cleanup failed"); } } diff --git a/hook-producer/src/handlers/app.rs b/hook-producer/src/handlers/app.rs index b4d099c..2cafc4a 100644 --- a/hook-producer/src/handlers/app.rs +++ b/hook-producer/src/handlers/app.rs @@ -1,23 +1,13 @@ use axum::{routing, Router}; -use metrics_exporter_prometheus::PrometheusHandle; -use hook_common::metrics; use hook_common::pgqueue::PgQueue; use super::webhook; -pub fn app(pg_pool: PgQueue, metrics: Option) -> Router { - Router::new() +pub fn add_routes(router: Router, pg_pool: PgQueue) -> Router { + router .route("/", routing::get(index)) - .route( - "/metrics", - routing::get(move || match metrics { - Some(ref recorder_handle) => std::future::ready(recorder_handle.render()), - None => std::future::ready("no metrics recorder installed".to_owned()), - }), - ) .route("/webhook", routing::post(webhook::post).with_state(pg_pool)) - .layer(axum::middleware::from_fn(metrics::track_metrics)) } pub async fn index() -> &'static str { @@ -42,7 +32,7 @@ mod tests { .await .expect("failed to construct pg_queue"); - let app = app(pg_queue, None); + let app = add_routes(Router::new(), pg_queue); let response = app .oneshot(Request::builder().uri("/").body(Body::empty()).unwrap()) diff --git a/hook-producer/src/handlers/mod.rs b/hook-producer/src/handlers/mod.rs index 88f9671..e392f8a 100644 --- a/hook-producer/src/handlers/mod.rs +++ b/hook-producer/src/handlers/mod.rs @@ -1,4 +1,4 @@ mod app; mod webhook; -pub use app::app; +pub use app::add_routes; diff --git a/hook-producer/src/handlers/webhook.rs b/hook-producer/src/handlers/webhook.rs index ab1552c..62a4aaa 100644 --- a/hook-producer/src/handlers/webhook.rs +++ b/hook-producer/src/handlers/webhook.rs @@ -1,3 +1,5 @@ +use std::time::Instant; + use axum::{extract::State, http::StatusCode, Json}; use hook_common::webhook::{WebhookJobMetadata, WebhookJobParameters}; use serde_derive::Deserialize; @@ -61,8 +63,13 @@ pub async fn post( url_hostname.as_str(), ); + let start_time = Instant::now(); + pg_queue.enqueue(job).await.map_err(internal_error)?; + let elapsed_time = start_time.elapsed().as_secs_f64(); + metrics::histogram!("webhook_producer_enqueue").record(elapsed_time); + Ok(Json(WebhookPostResponse { error: None })) } @@ -107,6 +114,7 @@ mod tests { use axum::{ body::Body, http::{self, Request, StatusCode}, + Router, }; use hook_common::pgqueue::PgQueue; use hook_common::webhook::{HttpMethod, WebhookJobParameters}; @@ -115,7 +123,7 @@ mod tests { use std::collections; use tower::ServiceExt; // for `call`, `oneshot`, and `ready` - use crate::handlers::app; + use crate::handlers::app::add_routes; #[sqlx::test(migrations = "../migrations")] async fn webhook_success(db: PgPool) { @@ -123,7 +131,7 @@ mod tests { .await .expect("failed to construct pg_queue"); - let app = app(pg_queue, None); + let app = add_routes(Router::new(), pg_queue); let mut headers = collections::HashMap::new(); headers.insert("Content-Type".to_owned(), "application/json".to_owned()); @@ -167,7 +175,7 @@ mod tests { .await .expect("failed to construct pg_queue"); - let app = app(pg_queue, None); + let app = add_routes(Router::new(), pg_queue); let response = app .oneshot( @@ -206,7 +214,7 @@ mod tests { .await .expect("failed to construct pg_queue"); - let app = app(pg_queue, None); + let app = add_routes(Router::new(), pg_queue); let response = app .oneshot( @@ -229,7 +237,7 @@ mod tests { .await .expect("failed to construct pg_queue"); - let app = app(pg_queue, None); + let app = add_routes(Router::new(), pg_queue); let response = app .oneshot( @@ -252,7 +260,7 @@ mod tests { .await .expect("failed to construct pg_queue"); - let app = app(pg_queue, None); + let app = add_routes(Router::new(), pg_queue); let bytes: Vec = vec![b'a'; 1_000_000 * 2]; let long_string = String::from_utf8_lossy(&bytes); diff --git a/hook-producer/src/main.rs b/hook-producer/src/main.rs index d0190c2..f078f2f 100644 --- a/hook-producer/src/main.rs +++ b/hook-producer/src/main.rs @@ -3,7 +3,7 @@ use config::Config; use envconfig::Envconfig; use eyre::Result; -use hook_common::metrics; +use hook_common::metrics::setup_metrics_router; use hook_common::pgqueue::PgQueue; mod config; @@ -32,9 +32,8 @@ async fn main() { .await .expect("failed to initialize queue"); - let recorder_handle = metrics::setup_metrics_recorder(); - - let app = handlers::app(pg_queue, Some(recorder_handle)); + let router = setup_metrics_router(); + let app = handlers::add_routes(router, pg_queue); match listen(app, config.bind()).await { Ok(_) => {} From 35d7e331aee28fa5cf2f76196d5664b6d1620492 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 9 Jan 2024 09:33:54 -0700 Subject: [PATCH 095/130] Get total row counts in janitor for metrics and consistency checking (#28) --- hook-janitor/src/webhooks.rs | 114 ++++++++++++++++++++++++++++------- 1 file changed, 92 insertions(+), 22 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 228187a..de02d07 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -9,7 +9,7 @@ use rdkafka::producer::{FutureProducer, FutureRecord}; use serde_json::error::Error as SerdeError; use sqlx::postgres::{PgPool, PgPoolOptions, Postgres}; use sqlx::types::{chrono, Uuid}; -use sqlx::Transaction; +use sqlx::{Row, Transaction}; use thiserror::Error; use tracing::{debug, error}; @@ -24,6 +24,8 @@ pub enum WebhookCleanerError { PoolCreationError { error: sqlx::Error }, #[error("failed to acquire conn and start txn: {error}")] StartTxnError { error: sqlx::Error }, + #[error("failed to get row count: {error}")] + GetRowCountError { error: sqlx::Error }, #[error("failed to get completed rows: {error}")] GetCompletedRowsError { error: sqlx::Error }, #[error("failed to get failed rows: {error}")] @@ -36,6 +38,10 @@ pub enum WebhookCleanerError { KafkaProduceCanceled, #[error("failed to delete rows: {error}")] DeleteRowsError { error: sqlx::Error }, + #[error("attempted to delete a different number of rows than expected")] + DeleteConsistencyError, + #[error("failed to rollback txn: {error}")] + RollbackTxnError { error: sqlx::Error }, #[error("failed to commit txn: {error}")] CommitTxnError { error: sqlx::Error }, } @@ -125,8 +131,10 @@ struct SerializableTxn<'a>(Transaction<'a, Postgres>); struct CleanupStats { rows_processed: u64, - completed_agg_row_count: usize, - failed_agg_row_count: usize, + completed_row_count: u64, + completed_agg_row_count: u64, + failed_row_count: u64, + failed_agg_row_count: u64, } impl WebhookCleaner { @@ -188,7 +196,32 @@ impl WebhookCleaner { Ok(SerializableTxn(tx)) } - async fn get_completed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { + async fn get_row_count_for_status( + &self, + tx: &mut SerializableTxn<'_>, + status: &str, + ) -> Result { + let base_query = r#" + SELECT count(*) FROM job_queue + WHERE queue = $1 + AND status = $2::job_status; + "#; + + let count: i64 = sqlx::query(base_query) + .bind(&self.queue_name) + .bind(status) + .fetch_one(&mut *tx.0) + .await + .map_err(|e| WebhookCleanerError::GetRowCountError { error: e })? + .get(0); + + Ok(count as u64) + } + + async fn get_completed_agg_rows( + &self, + tx: &mut SerializableTxn<'_>, + ) -> Result> { let base_query = r#" SELECT DATE_TRUNC('hour', last_attempt_finished_at) AS hour, (metadata->>'team_id')::bigint AS team_id, @@ -210,7 +243,7 @@ impl WebhookCleaner { Ok(rows) } - async fn get_failed_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { + async fn get_failed_agg_rows(&self, tx: &mut SerializableTxn<'_>) -> Result> { let base_query = r#" SELECT DATE_TRUNC('hour', last_attempt_finished_at) AS hour, (metadata->>'team_id')::bigint AS team_id, @@ -294,6 +327,14 @@ impl WebhookCleaner { Ok(result.rows_affected()) } + async fn rollback_txn(&self, tx: SerializableTxn<'_>) -> Result<()> { + tx.0.rollback() + .await + .map_err(|e| WebhookCleanerError::RollbackTxnError { error: e })?; + + Ok(()) + } + async fn commit_txn(&self, tx: SerializableTxn<'_>) -> Result<()> { tx.0.commit() .await @@ -315,33 +356,53 @@ impl WebhookCleaner { let mut tx = self.start_serializable_txn().await?; - let completed_agg_row_count = { - let completed_rows = self.get_completed_rows(&mut tx).await?; - let row_count = completed_rows.len(); + let (completed_row_count, completed_agg_row_count) = { + let completed_row_count = self.get_row_count_for_status(&mut tx, "completed").await?; + let completed_agg_rows = self.get_completed_agg_rows(&mut tx).await?; + let agg_row_count = completed_agg_rows.len() as u64; let completed_app_metrics: Vec = - completed_rows.into_iter().map(Into::into).collect(); + completed_agg_rows.into_iter().map(Into::into).collect(); self.send_metrics_to_kafka(completed_app_metrics).await?; - row_count + (completed_row_count, agg_row_count) }; - let failed_agg_row_count = { - let failed_rows = self.get_failed_rows(&mut tx).await?; - let row_count = failed_rows.len(); + let (failed_row_count, failed_agg_row_count) = { + let failed_row_count = self.get_row_count_for_status(&mut tx, "failed").await?; + let failed_agg_rows = self.get_failed_agg_rows(&mut tx).await?; + let agg_row_count = failed_agg_rows.len() as u64; let failed_app_metrics: Vec = - failed_rows.into_iter().map(Into::into).collect(); + failed_agg_rows.into_iter().map(Into::into).collect(); self.send_metrics_to_kafka(failed_app_metrics).await?; - row_count + (failed_row_count, agg_row_count) }; - let mut rows_processed = 0; + let mut rows_deleted = 0; if completed_agg_row_count + failed_agg_row_count != 0 { - rows_processed = self.delete_observed_rows(&mut tx).await?; + rows_deleted = self.delete_observed_rows(&mut tx).await?; + + if rows_deleted != completed_row_count + failed_row_count { + // This should never happen, but if it does, we want to know about it (and abort the + // txn). + error!( + attempted_rows_deleted = rows_deleted, + completed_row_count = completed_row_count, + failed_row_count = failed_row_count, + "WebhookCleaner::cleanup attempted to delete a different number of rows than expected" + ); + + self.rollback_txn(tx).await?; + + return Err(WebhookCleanerError::DeleteConsistencyError); + } + self.commit_txn(tx).await?; } Ok(CleanupStats { - rows_processed, + rows_processed: rows_deleted, + completed_row_count, completed_agg_row_count, + failed_row_count, failed_agg_row_count, }) } @@ -362,14 +423,20 @@ impl Cleaner for WebhookCleaner { metrics::counter!("webhook_cleanup_rows_processed",) .increment(stats.rows_processed); + metrics::counter!("webhook_cleanup_completed_row_count",) + .increment(stats.completed_row_count); metrics::counter!("webhook_cleanup_completed_agg_row_count",) - .increment(stats.completed_agg_row_count as u64); + .increment(stats.completed_agg_row_count); + metrics::counter!("webhook_cleanup_failed_row_count",) + .increment(stats.failed_row_count); metrics::counter!("webhook_cleanup_failed_agg_row_count",) - .increment(stats.failed_agg_row_count as u64); + .increment(stats.failed_agg_row_count); debug!( rows_processed = stats.rows_processed, + completed_row_count = stats.completed_row_count, completed_agg_row_count = stats.completed_agg_row_count, + failed_row_count = stats.failed_row_count, failed_agg_row_count = stats.failed_agg_row_count, "WebhookCleaner::cleanup finished" ); @@ -665,8 +732,11 @@ mod tests { // Important! Serializable txn is started here. let mut tx = webhook_cleaner.start_serializable_txn().await.unwrap(); - webhook_cleaner.get_completed_rows(&mut tx).await.unwrap(); - webhook_cleaner.get_failed_rows(&mut tx).await.unwrap(); + webhook_cleaner + .get_completed_agg_rows(&mut tx) + .await + .unwrap(); + webhook_cleaner.get_failed_agg_rows(&mut tx).await.unwrap(); // All 13 rows in the queue are visible from outside the txn. // The 11 the cleaner will process, plus 1 available and 1 running. From 86e6a26ef049f522af5cd0774a92328b8c7161ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Tue, 9 Jan 2024 18:24:11 +0100 Subject: [PATCH 096/130] refactor: Rename producer and consumer images (#29) --- ...ocker-hook-producer.yml => docker-hook-api.yml} | 14 +++++++------- ...er-hook-consumer.yml => docker-hook-worker.yml} | 14 +++++++------- 2 files changed, 14 insertions(+), 14 deletions(-) rename .github/workflows/{docker-hook-producer.yml => docker-hook-api.yml} (80%) rename .github/workflows/{docker-hook-consumer.yml => docker-hook-worker.yml} (80%) diff --git a/.github/workflows/docker-hook-producer.yml b/.github/workflows/docker-hook-api.yml similarity index 80% rename from .github/workflows/docker-hook-producer.yml rename to .github/workflows/docker-hook-api.yml index ec2594f..8c645bc 100644 --- a/.github/workflows/docker-hook-producer.yml +++ b/.github/workflows/docker-hook-api.yml @@ -1,4 +1,4 @@ -name: Build hook-producer docker image +name: Build hook-api docker image on: workflow_dispatch: @@ -11,7 +11,7 @@ permissions: jobs: build: - name: build and publish hook-producer image + name: build and publish hook-api image runs-on: buildjet-4vcpu-ubuntu-2204-arm steps: @@ -25,7 +25,7 @@ jobs: id: meta uses: docker/metadata-action@v4 with: - images: ghcr.io/posthog/hook-producer + images: ghcr.io/posthog/hook-api tags: | type=ref,event=pr type=ref,event=branch @@ -44,8 +44,8 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push producer - id: docker_build_hook_producer + - name: Build and push api + id: docker_build_hook_api uses: docker/build-push-action@v4 with: context: ./ @@ -59,5 +59,5 @@ jobs: cache-to: type=gha,mode=max build-args: BIN=hook-producer - - name: Hook-producer image digest - run: echo ${{ steps.docker_build_hook_producer.outputs.digest }} + - name: Hook-api image digest + run: echo ${{ steps.docker_build_hook_api.outputs.digest }} diff --git a/.github/workflows/docker-hook-consumer.yml b/.github/workflows/docker-hook-worker.yml similarity index 80% rename from .github/workflows/docker-hook-consumer.yml rename to .github/workflows/docker-hook-worker.yml index db92074..a5d6c92 100644 --- a/.github/workflows/docker-hook-consumer.yml +++ b/.github/workflows/docker-hook-worker.yml @@ -1,4 +1,4 @@ -name: Build hook-consumer docker image +name: Build hook-worker docker image on: workflow_dispatch: @@ -11,7 +11,7 @@ permissions: jobs: build: - name: build and publish hook-consumer image + name: build and publish hook-worker image runs-on: buildjet-4vcpu-ubuntu-2204-arm steps: @@ -25,7 +25,7 @@ jobs: id: meta uses: docker/metadata-action@v4 with: - images: ghcr.io/posthog/hook-consumer + images: ghcr.io/posthog/hook-worker tags: | type=ref,event=pr type=ref,event=branch @@ -44,8 +44,8 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push consumer - id: docker_build_hook_consumer + - name: Build and push worker + id: docker_build_hook_worker uses: docker/build-push-action@v4 with: context: ./ @@ -59,5 +59,5 @@ jobs: cache-to: type=gha,mode=max build-args: BIN=hook-consumer - - name: Hook-consumer image digest - run: echo ${{ steps.docker_build_hook_consumer.outputs.digest }} + - name: Hook-worker image digest + run: echo ${{ steps.docker_build_hook_worker.outputs.digest }} From 29c2415466665b48c12b0dfa09b601b2006ef51f Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 9 Jan 2024 10:35:24 -0700 Subject: [PATCH 097/130] Login to DockerHub in CI (#30) --- .github/workflows/docker-hook-api.yml | 8 +++++++- .github/workflows/docker-hook-janitor.yml | 8 +++++++- .github/workflows/docker-hook-worker.yml | 8 +++++++- .github/workflows/docker-migrator.yml | 8 +++++++- .github/workflows/rust.yml | 6 ++++++ 5 files changed, 34 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docker-hook-api.yml b/.github/workflows/docker-hook-api.yml index 8c645bc..abcd9f6 100644 --- a/.github/workflows/docker-hook-api.yml +++ b/.github/workflows/docker-hook-api.yml @@ -37,7 +37,13 @@ jobs: id: buildx uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to ghcr.io uses: docker/login-action@v2 with: registry: ghcr.io diff --git a/.github/workflows/docker-hook-janitor.yml b/.github/workflows/docker-hook-janitor.yml index b426d51..1f4cae9 100644 --- a/.github/workflows/docker-hook-janitor.yml +++ b/.github/workflows/docker-hook-janitor.yml @@ -37,7 +37,13 @@ jobs: id: buildx uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to ghcr.io uses: docker/login-action@v2 with: registry: ghcr.io diff --git a/.github/workflows/docker-hook-worker.yml b/.github/workflows/docker-hook-worker.yml index a5d6c92..25e4b3a 100644 --- a/.github/workflows/docker-hook-worker.yml +++ b/.github/workflows/docker-hook-worker.yml @@ -37,7 +37,13 @@ jobs: id: buildx uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to ghcr.io uses: docker/login-action@v2 with: registry: ghcr.io diff --git a/.github/workflows/docker-migrator.yml b/.github/workflows/docker-migrator.yml index 73d7afa..1ad3893 100644 --- a/.github/workflows/docker-migrator.yml +++ b/.github/workflows/docker-migrator.yml @@ -37,7 +37,13 @@ jobs: id: buildx uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to ghcr.io uses: docker/login-action@v2 with: registry: ghcr.io diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index b811c1a..f3aafb0 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -43,6 +43,12 @@ jobs: with: toolchain: stable + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Stop/Start stack with Docker Compose shell: bash run: | From 2499ea6a1f63f0584403462869b181364d0a9db6 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 9 Jan 2024 12:26:46 -0700 Subject: [PATCH 098/130] Login to Docker before QEMU (#32) --- .github/workflows/docker-hook-api.yml | 26 +++++++++++------------ .github/workflows/docker-hook-janitor.yml | 26 +++++++++++------------ .github/workflows/docker-hook-worker.yml | 26 +++++++++++------------ .github/workflows/docker-migrator.yml | 26 +++++++++++------------ 4 files changed, 52 insertions(+), 52 deletions(-) diff --git a/.github/workflows/docker-hook-api.yml b/.github/workflows/docker-hook-api.yml index abcd9f6..5ae94f5 100644 --- a/.github/workflows/docker-hook-api.yml +++ b/.github/workflows/docker-hook-api.yml @@ -17,6 +17,19 @@ jobs: - name: Check Out Repo uses: actions/checkout@v3 + + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to ghcr.io + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU uses: docker/setup-qemu-action@v2 @@ -37,19 +50,6 @@ jobs: id: buildx uses: docker/setup-buildx-action@v2 - - name: Login to DockerHub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to ghcr.io - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push api id: docker_build_hook_api uses: docker/build-push-action@v4 diff --git a/.github/workflows/docker-hook-janitor.yml b/.github/workflows/docker-hook-janitor.yml index 1f4cae9..12b9dde 100644 --- a/.github/workflows/docker-hook-janitor.yml +++ b/.github/workflows/docker-hook-janitor.yml @@ -18,6 +18,19 @@ jobs: - name: Check Out Repo uses: actions/checkout@v3 + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to ghcr.io + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Set up QEMU uses: docker/setup-qemu-action@v2 @@ -37,19 +50,6 @@ jobs: id: buildx uses: docker/setup-buildx-action@v2 - - name: Login to DockerHub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to ghcr.io - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push janitor id: docker_build_hook_janitor uses: docker/build-push-action@v4 diff --git a/.github/workflows/docker-hook-worker.yml b/.github/workflows/docker-hook-worker.yml index 25e4b3a..0526291 100644 --- a/.github/workflows/docker-hook-worker.yml +++ b/.github/workflows/docker-hook-worker.yml @@ -17,6 +17,19 @@ jobs: - name: Check Out Repo uses: actions/checkout@v3 + + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to ghcr.io + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU uses: docker/setup-qemu-action@v2 @@ -37,19 +50,6 @@ jobs: id: buildx uses: docker/setup-buildx-action@v2 - - name: Login to DockerHub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to ghcr.io - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push worker id: docker_build_hook_worker uses: docker/build-push-action@v4 diff --git a/.github/workflows/docker-migrator.yml b/.github/workflows/docker-migrator.yml index 1ad3893..d239da4 100644 --- a/.github/workflows/docker-migrator.yml +++ b/.github/workflows/docker-migrator.yml @@ -18,6 +18,19 @@ jobs: - name: Check Out Repo uses: actions/checkout@v3 + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to ghcr.io + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Set up QEMU uses: docker/setup-qemu-action@v2 @@ -37,19 +50,6 @@ jobs: id: buildx uses: docker/setup-buildx-action@v2 - - name: Login to DockerHub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to ghcr.io - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push migrator id: docker_build_hook_migrator uses: docker/build-push-action@v4 From 46f9f93bb78450a055504e1562934481595f1d79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 10 Jan 2024 16:01:29 +0100 Subject: [PATCH 099/130] refactor: Rename producer for api and consumer for worker (#31) Co-authored-by: Brett Hoerner --- .github/workflows/docker-hook-api.yml | 2 +- .github/workflows/docker-hook-worker.yml | 2 +- Cargo.lock | 44 +++++++++---------- Cargo.toml | 2 +- {hook-producer => hook-api}/Cargo.toml | 2 +- {hook-producer => hook-api}/src/config.rs | 0 .../src/handlers/app.rs | 4 +- .../src/handlers/mod.rs | 0 .../src/handlers/webhook.rs | 2 +- {hook-producer => hook-api}/src/main.rs | 4 +- hook-common/src/webhook.rs | 4 +- hook-janitor/src/kafka_producer.rs | 5 +-- {hook-consumer => hook-worker}/Cargo.toml | 2 +- {hook-consumer => hook-worker}/README.md | 2 +- {hook-consumer => hook-worker}/src/config.rs | 4 +- {hook-consumer => hook-worker}/src/error.rs | 4 +- {hook-consumer => hook-worker}/src/lib.rs | 2 +- {hook-consumer => hook-worker}/src/main.rs | 14 +++--- .../consumer.rs => hook-worker/src/worker.rs | 42 +++++++++--------- 19 files changed, 70 insertions(+), 71 deletions(-) rename {hook-producer => hook-api}/Cargo.toml (96%) rename {hook-producer => hook-api}/src/config.rs (100%) rename {hook-producer => hook-api}/src/handlers/app.rs (93%) rename {hook-producer => hook-api}/src/handlers/mod.rs (100%) rename {hook-producer => hook-api}/src/handlers/webhook.rs (99%) rename {hook-producer => hook-api}/src/main.rs (88%) rename {hook-consumer => hook-worker}/Cargo.toml (95%) rename {hook-consumer => hook-worker}/README.md (67%) rename {hook-consumer => hook-worker}/src/config.rs (96%) rename {hook-consumer => hook-worker}/src/error.rs (96%) rename {hook-consumer => hook-worker}/src/lib.rs (63%) rename {hook-consumer => hook-worker}/src/main.rs (78%) rename hook-consumer/src/consumer.rs => hook-worker/src/worker.rs (93%) diff --git a/.github/workflows/docker-hook-api.yml b/.github/workflows/docker-hook-api.yml index 5ae94f5..6331413 100644 --- a/.github/workflows/docker-hook-api.yml +++ b/.github/workflows/docker-hook-api.yml @@ -63,7 +63,7 @@ jobs: platforms: linux/arm64 cache-from: type=gha cache-to: type=gha,mode=max - build-args: BIN=hook-producer + build-args: BIN=hook-api - name: Hook-api image digest run: echo ${{ steps.docker_build_hook_api.outputs.digest }} diff --git a/.github/workflows/docker-hook-worker.yml b/.github/workflows/docker-hook-worker.yml index 0526291..1f06542 100644 --- a/.github/workflows/docker-hook-worker.yml +++ b/.github/workflows/docker-hook-worker.yml @@ -63,7 +63,7 @@ jobs: platforms: linux/arm64 cache-from: type=gha cache-to: type=gha,mode=max - build-args: BIN=hook-consumer + build-args: BIN=hook-worker - name: Hook-worker image digest run: echo ${{ steps.docker_build_hook_worker.outputs.digest }} diff --git a/Cargo.lock b/Cargo.lock index e4da816..2bf16f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -755,43 +755,46 @@ dependencies = [ ] [[package]] -name = "hook-common" +name = "hook-api" version = "0.1.0" dependencies = [ - "async-trait", "axum", - "chrono", - "http 0.2.11", + "envconfig", + "eyre", + "hook-common", + "http-body-util", "metrics", "metrics-exporter-prometheus", - "regex", - "reqwest", "serde", "serde_derive", "serde_json", "sqlx", - "thiserror", "tokio", - "uuid", + "tower", + "tracing", + "tracing-subscriber", + "url", ] [[package]] -name = "hook-consumer" +name = "hook-common" version = "0.1.0" dependencies = [ + "async-trait", + "axum", "chrono", - "envconfig", - "futures", - "hook-common", "http 0.2.11", "metrics", + "metrics-exporter-prometheus", + "regex", "reqwest", "serde", "serde_derive", + "serde_json", "sqlx", "thiserror", "tokio", - "url", + "uuid", ] [[package]] @@ -821,24 +824,21 @@ dependencies = [ ] [[package]] -name = "hook-producer" +name = "hook-worker" version = "0.1.0" dependencies = [ - "axum", + "chrono", "envconfig", - "eyre", + "futures", "hook-common", - "http-body-util", + "http 0.2.11", "metrics", - "metrics-exporter-prometheus", + "reqwest", "serde", "serde_derive", - "serde_json", "sqlx", + "thiserror", "tokio", - "tower", - "tracing", - "tracing-subscriber", "url", ] diff --git a/Cargo.toml b/Cargo.toml index b7cb8fa..0b48fb2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace] resolver = "2" -members = ["hook-common", "hook-producer", "hook-consumer", "hook-janitor"] +members = ["hook-common", "hook-api", "hook-worker", "hook-janitor"] [workspace.dependencies] async-trait = "0.1.74" diff --git a/hook-producer/Cargo.toml b/hook-api/Cargo.toml similarity index 96% rename from hook-producer/Cargo.toml rename to hook-api/Cargo.toml index f4b1165..96c897c 100644 --- a/hook-producer/Cargo.toml +++ b/hook-api/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "hook-producer" +name = "hook-api" version = "0.1.0" edition = "2021" diff --git a/hook-producer/src/config.rs b/hook-api/src/config.rs similarity index 100% rename from hook-producer/src/config.rs rename to hook-api/src/config.rs diff --git a/hook-producer/src/handlers/app.rs b/hook-api/src/handlers/app.rs similarity index 93% rename from hook-producer/src/handlers/app.rs rename to hook-api/src/handlers/app.rs index 2cafc4a..73c29e1 100644 --- a/hook-producer/src/handlers/app.rs +++ b/hook-api/src/handlers/app.rs @@ -11,7 +11,7 @@ pub fn add_routes(router: Router, pg_pool: PgQueue) -> Router { } pub async fn index() -> &'static str { - "rusty-hook producer" + "rusty-hook api" } #[cfg(test)] @@ -42,6 +42,6 @@ mod tests { assert_eq!(response.status(), StatusCode::OK); let body = response.into_body().collect().await.unwrap().to_bytes(); - assert_eq!(&body[..], b"rusty-hook producer"); + assert_eq!(&body[..], b"rusty-hook api"); } } diff --git a/hook-producer/src/handlers/mod.rs b/hook-api/src/handlers/mod.rs similarity index 100% rename from hook-producer/src/handlers/mod.rs rename to hook-api/src/handlers/mod.rs diff --git a/hook-producer/src/handlers/webhook.rs b/hook-api/src/handlers/webhook.rs similarity index 99% rename from hook-producer/src/handlers/webhook.rs rename to hook-api/src/handlers/webhook.rs index 62a4aaa..16ebc6d 100644 --- a/hook-producer/src/handlers/webhook.rs +++ b/hook-api/src/handlers/webhook.rs @@ -68,7 +68,7 @@ pub async fn post( pg_queue.enqueue(job).await.map_err(internal_error)?; let elapsed_time = start_time.elapsed().as_secs_f64(); - metrics::histogram!("webhook_producer_enqueue").record(elapsed_time); + metrics::histogram!("webhook_api_enqueue").record(elapsed_time); Ok(Json(WebhookPostResponse { error: None })) } diff --git a/hook-producer/src/main.rs b/hook-api/src/main.rs similarity index 88% rename from hook-producer/src/main.rs rename to hook-api/src/main.rs index f078f2f..dc2739a 100644 --- a/hook-producer/src/main.rs +++ b/hook-api/src/main.rs @@ -24,7 +24,7 @@ async fn main() { let config = Config::init_from_env().expect("failed to load configuration from env"); let pg_queue = PgQueue::new( - // TODO: Coupling the queue name to the PgQueue object doesn't seem ideal from the producer + // TODO: Coupling the queue name to the PgQueue object doesn't seem ideal from the api // side, but we don't need more than one queue for now. &config.queue_name, &config.database_url, @@ -37,6 +37,6 @@ async fn main() { match listen(app, config.bind()).await { Ok(_) => {} - Err(e) => tracing::error!("failed to start hook-producer http server, {}", e), + Err(e) => tracing::error!("failed to start hook-api http server, {}", e), } } diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index bb1b5be..475f3de 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -117,7 +117,7 @@ impl From<&HttpMethod> for http::Method { } } -/// `JobParameters` required for the `WebhookConsumer` to execute a webhook. +/// `JobParameters` required for the `WebhookWorker` to execute a webhook. /// These parameters should match the exported Webhook interface that PostHog plugins. /// implement. See: https://github.com/PostHog/plugin-scaffold/blob/main/src/types.ts#L15. #[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] @@ -128,7 +128,7 @@ pub struct WebhookJobParameters { pub url: String, } -/// `JobMetadata` required for the `WebhookConsumer` to execute a webhook. +/// `JobMetadata` required for the `WebhookWorker` to execute a webhook. /// These should be set if the Webhook is associated with a plugin `composeWebhook` invocation. #[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub struct WebhookJobMetadata { diff --git a/hook-janitor/src/kafka_producer.rs b/hook-janitor/src/kafka_producer.rs index 4845e94..1d0144c 100644 --- a/hook-janitor/src/kafka_producer.rs +++ b/hook-janitor/src/kafka_producer.rs @@ -38,8 +38,7 @@ pub async fn create_kafka_producer( }; debug!("rdkafka configuration: {:?}", client_config); - let producer: FutureProducer = - client_config.create_with_context(KafkaContext {})?; + let api: FutureProducer = client_config.create_with_context(KafkaContext {})?; - Ok(producer) + Ok(api) } diff --git a/hook-consumer/Cargo.toml b/hook-worker/Cargo.toml similarity index 95% rename from hook-consumer/Cargo.toml rename to hook-worker/Cargo.toml index fc8ee4a..c84d348 100644 --- a/hook-consumer/Cargo.toml +++ b/hook-worker/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "hook-consumer" +name = "hook-worker" version = "0.1.0" edition = "2021" diff --git a/hook-consumer/README.md b/hook-worker/README.md similarity index 67% rename from hook-consumer/README.md rename to hook-worker/README.md index 1adab6e..9b1884a 100644 --- a/hook-consumer/README.md +++ b/hook-worker/README.md @@ -1,2 +1,2 @@ -# hook-consumer +# hook-worker Consume and process webhook jobs diff --git a/hook-consumer/src/config.rs b/hook-worker/src/config.rs similarity index 96% rename from hook-consumer/src/config.rs rename to hook-worker/src/config.rs index 01f94e7..6f16c89 100644 --- a/hook-consumer/src/config.rs +++ b/hook-worker/src/config.rs @@ -14,8 +14,8 @@ pub struct Config { #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] pub database_url: String, - #[envconfig(default = "consumer")] - pub consumer_name: String, + #[envconfig(default = "worker")] + pub worker_name: String, #[envconfig(default = "default")] pub queue_name: String, diff --git a/hook-consumer/src/error.rs b/hook-worker/src/error.rs similarity index 96% rename from hook-consumer/src/error.rs rename to hook-worker/src/error.rs index b05d476..d025998 100644 --- a/hook-consumer/src/error.rs +++ b/hook-worker/src/error.rs @@ -3,7 +3,7 @@ use std::time; use hook_common::pgqueue; use thiserror::Error; -/// Enumeration of errors related to webhook job processing in the WebhookConsumer. +/// Enumeration of errors related to webhook job processing in the WebhookWorker. #[derive(Error, Debug)] pub enum WebhookError { #[error("{0} is not a valid HttpMethod")] @@ -23,7 +23,7 @@ pub enum WebhookError { /// Enumeration of errors related to initialization and consumption of webhook jobs. #[derive(Error, Debug)] -pub enum ConsumerError { +pub enum WorkerError { #[error("timed out while waiting for jobs to be available")] TimeoutError, #[error("an error occurred in the underlying queue")] diff --git a/hook-consumer/src/lib.rs b/hook-worker/src/lib.rs similarity index 63% rename from hook-consumer/src/lib.rs rename to hook-worker/src/lib.rs index b99481b..22823c9 100644 --- a/hook-consumer/src/lib.rs +++ b/hook-worker/src/lib.rs @@ -1,3 +1,3 @@ pub mod config; -pub mod consumer; pub mod error; +pub mod worker; diff --git a/hook-consumer/src/main.rs b/hook-worker/src/main.rs similarity index 78% rename from hook-consumer/src/main.rs rename to hook-worker/src/main.rs index 4182348..10b34d7 100644 --- a/hook-consumer/src/main.rs +++ b/hook-worker/src/main.rs @@ -4,12 +4,12 @@ use envconfig::Envconfig; use hook_common::{ metrics::serve, metrics::setup_metrics_router, pgqueue::PgQueue, retry::RetryPolicy, }; -use hook_consumer::config::Config; -use hook_consumer::consumer::WebhookConsumer; -use hook_consumer::error::ConsumerError; +use hook_worker::config::Config; +use hook_worker::error::WorkerError; +use hook_worker::worker::WebhookWorker; #[tokio::main] -async fn main() -> Result<(), ConsumerError> { +async fn main() -> Result<(), WorkerError> { let config = Config::init_from_env().expect("Invalid configuration:"); let retry_policy = RetryPolicy::build( @@ -23,8 +23,8 @@ async fn main() -> Result<(), ConsumerError> { .await .expect("failed to initialize queue"); - let consumer = WebhookConsumer::new( - &config.consumer_name, + let worker = WebhookWorker::new( + &config.worker_name, &queue, config.poll_interval.0, config.request_timeout.0, @@ -40,7 +40,7 @@ async fn main() -> Result<(), ConsumerError> { .expect("failed to start serving metrics"); }); - consumer.run(config.transactional).await?; + worker.run(config.transactional).await?; Ok(()) } diff --git a/hook-consumer/src/consumer.rs b/hook-worker/src/worker.rs similarity index 93% rename from hook-consumer/src/consumer.rs rename to hook-worker/src/worker.rs index 2114ef8..02ab7e9 100644 --- a/hook-consumer/src/consumer.rs +++ b/hook-worker/src/worker.rs @@ -11,7 +11,7 @@ use http::StatusCode; use reqwest::header; use tokio::sync; -use crate::error::{ConsumerError, WebhookError}; +use crate::error::{WebhookError, WorkerError}; /// A WebhookJob is any `PgQueueJob` with `WebhookJobParameters` and `WebhookJobMetadata`. trait WebhookJob: PgQueueJob + std::marker::Send { @@ -60,9 +60,9 @@ impl WebhookJob for PgJob { } } -/// A consumer to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. -pub struct WebhookConsumer<'p> { - /// An identifier for this consumer. Used to mark jobs we have consumed. +/// A worker to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. +pub struct WebhookWorker<'p> { + /// An identifier for this worker. Used to mark jobs we have consumed. name: String, /// The queue we will be dequeuing jobs from. queue: &'p PgQueue, @@ -76,7 +76,7 @@ pub struct WebhookConsumer<'p> { retry_policy: RetryPolicy, } -impl<'p> WebhookConsumer<'p> { +impl<'p> WebhookWorker<'p> { pub fn new( name: &str, queue: &'p PgQueue, @@ -95,7 +95,7 @@ impl<'p> WebhookConsumer<'p> { .default_headers(headers) .timeout(request_timeout) .build() - .expect("failed to construct reqwest client for webhook consumer"); + .expect("failed to construct reqwest client for webhook worker"); Self { name: name.to_owned(), @@ -110,7 +110,7 @@ impl<'p> WebhookConsumer<'p> { /// Wait until a job becomes available in our queue. async fn wait_for_job<'a>( &self, - ) -> Result, ConsumerError> { + ) -> Result, WorkerError> { let mut interval = tokio::time::interval(self.poll_interval); loop { @@ -125,7 +125,7 @@ impl<'p> WebhookConsumer<'p> { /// Wait until a job becomes available in our queue in transactional mode. async fn wait_for_job_tx<'a>( &self, - ) -> Result, ConsumerError> { + ) -> Result, WorkerError> { let mut interval = tokio::time::interval(self.poll_interval); loop { @@ -137,8 +137,8 @@ impl<'p> WebhookConsumer<'p> { } } - /// Run this consumer to continuously process any jobs that become available. - pub async fn run(&self, transactional: bool) -> Result<(), ConsumerError> { + /// Run this worker to continuously process any jobs that become available. + pub async fn run(&self, transactional: bool) -> Result<(), WorkerError> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); if transactional { @@ -181,7 +181,7 @@ async fn spawn_webhook_job_processing_task( semaphore: Arc, retry_policy: RetryPolicy, webhook_job: W, -) -> tokio::task::JoinHandle> { +) -> tokio::task::JoinHandle> { let permit = semaphore .acquire_owned() .await @@ -219,7 +219,7 @@ async fn process_webhook_job( client: reqwest::Client, webhook_job: W, retry_policy: &RetryPolicy, -) -> Result<(), ConsumerError> { +) -> Result<(), WorkerError> { let parameters = webhook_job.parameters(); let labels = [ @@ -245,7 +245,7 @@ async fn process_webhook_job( webhook_job .complete() .await - .map_err(|error| ConsumerError::PgJobError(error.to_string()))?; + .map_err(|error| WorkerError::PgJobError(error.to_string()))?; metrics::counter!("webhook_jobs_completed", &labels).increment(1); metrics::histogram!("webhook_jobs_processing_duration_seconds", &labels) @@ -257,7 +257,7 @@ async fn process_webhook_job( webhook_job .fail(WebhookJobError::new_parse(&e.to_string())) .await - .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + .map_err(|job_error| WorkerError::PgJobError(job_error.to_string()))?; metrics::counter!("webhook_jobs_failed", &labels).increment(1); @@ -267,7 +267,7 @@ async fn process_webhook_job( webhook_job .fail(WebhookJobError::new_parse(&e)) .await - .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + .map_err(|job_error| WorkerError::PgJobError(job_error.to_string()))?; metrics::counter!("webhook_jobs_failed", &labels).increment(1); @@ -277,7 +277,7 @@ async fn process_webhook_job( webhook_job .fail(WebhookJobError::new_parse(&e.to_string())) .await - .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + .map_err(|job_error| WorkerError::PgJobError(job_error.to_string()))?; metrics::counter!("webhook_jobs_failed", &labels).increment(1); @@ -304,20 +304,20 @@ async fn process_webhook_job( webhook_job .fail(WebhookJobError::from(&error)) .await - .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + .map_err(|job_error| WorkerError::PgJobError(job_error.to_string()))?; metrics::counter!("webhook_jobs_failed", &labels).increment(1); Ok(()) } - Err(job_error) => Err(ConsumerError::PgJobError(job_error.to_string())), + Err(job_error) => Err(WorkerError::PgJobError(job_error.to_string())), } } Err(WebhookError::NonRetryableRetryableRequestError(error)) => { webhook_job .fail(WebhookJobError::from(&error)) .await - .map_err(|job_error| ConsumerError::PgJobError(job_error.to_string()))?; + .map_err(|job_error| WorkerError::PgJobError(job_error.to_string()))?; metrics::counter!("webhook_jobs_failed", &labels).increment(1); @@ -512,7 +512,7 @@ mod tests { ) .await .expect("failed to enqueue job"); - let consumer = WebhookConsumer::new( + let worker = WebhookWorker::new( &worker_id, &queue, time::Duration::from_millis(100), @@ -521,7 +521,7 @@ mod tests { RetryPolicy::default(), ); - let consumed_job = consumer + let consumed_job = worker .wait_for_job() .await .expect("failed to wait and read job"); From d6cc49f4d290a8125a3dc390c3664511c0d68694 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 10 Jan 2024 08:53:09 -0700 Subject: [PATCH 100/130] Sync default ports with charts (#33) --- hook-api/src/config.rs | 2 +- hook-janitor/src/config.rs | 2 +- hook-worker/src/config.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hook-api/src/config.rs b/hook-api/src/config.rs index 8daf04e..3fe88b3 100644 --- a/hook-api/src/config.rs +++ b/hook-api/src/config.rs @@ -5,7 +5,7 @@ pub struct Config { #[envconfig(from = "BIND_HOST", default = "0.0.0.0")] pub host: String, - #[envconfig(from = "BIND_PORT", default = "8000")] + #[envconfig(from = "BIND_PORT", default = "3300")] pub port: u16, #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] diff --git a/hook-janitor/src/config.rs b/hook-janitor/src/config.rs index 64db0e6..852b7cf 100644 --- a/hook-janitor/src/config.rs +++ b/hook-janitor/src/config.rs @@ -5,7 +5,7 @@ pub struct Config { #[envconfig(from = "BIND_HOST", default = "0.0.0.0")] pub host: String, - #[envconfig(from = "BIND_PORT", default = "8000")] + #[envconfig(from = "BIND_PORT", default = "3302")] pub port: u16, #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] diff --git a/hook-worker/src/config.rs b/hook-worker/src/config.rs index 6f16c89..8b2b4ba 100644 --- a/hook-worker/src/config.rs +++ b/hook-worker/src/config.rs @@ -8,7 +8,7 @@ pub struct Config { #[envconfig(from = "BIND_HOST", default = "0.0.0.0")] pub host: String, - #[envconfig(from = "BIND_PORT", default = "8001")] + #[envconfig(from = "BIND_PORT", default = "3301")] pub port: u16, #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] From 878f11201df2e5777967229f176e971c39d00a9e Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 10 Jan 2024 09:39:01 -0700 Subject: [PATCH 101/130] Log errors in worker (#34) --- Cargo.lock | 2 ++ hook-worker/Cargo.toml | 2 ++ hook-worker/src/main.rs | 2 ++ hook-worker/src/worker.rs | 9 ++++++++- 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 2bf16f1..d242eca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -839,6 +839,8 @@ dependencies = [ "sqlx", "thiserror", "tokio", + "tracing", + "tracing-subscriber", "url", ] diff --git a/hook-worker/Cargo.toml b/hook-worker/Cargo.toml index c84d348..f694898 100644 --- a/hook-worker/Cargo.toml +++ b/hook-worker/Cargo.toml @@ -15,5 +15,7 @@ serde = { workspace = true } serde_derive = { workspace = true } sqlx = { workspace = true } thiserror = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } tokio = { workspace = true } url = { version = "2.2" } diff --git a/hook-worker/src/main.rs b/hook-worker/src/main.rs index 10b34d7..fbd4746 100644 --- a/hook-worker/src/main.rs +++ b/hook-worker/src/main.rs @@ -10,6 +10,8 @@ use hook_worker::worker::WebhookWorker; #[tokio::main] async fn main() -> Result<(), WorkerError> { + tracing_subscriber::fmt::init(); + let config = Config::init_from_env().expect("Invalid configuration:"); let retry_policy = RetryPolicy::build( diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index 02ab7e9..cc5082e 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -10,6 +10,7 @@ use hook_common::{ use http::StatusCode; use reqwest::header; use tokio::sync; +use tracing::error; use crate::error::{WebhookError, WorkerError}; @@ -197,7 +198,13 @@ async fn spawn_webhook_job_processing_task( tokio::spawn(async move { let result = process_webhook_job(client, webhook_job, &retry_policy).await; drop(permit); - result + match result { + Ok(_) => Ok(()), + Err(error) => { + error!("failed to process webhook job: {}", error); + Err(error) + } + } }) } From b0966788b0b959f6d286bc9b91d48d23159e925f Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 10 Jan 2024 09:58:01 -0700 Subject: [PATCH 102/130] Log PgJobError String (#35) --- hook-worker/src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hook-worker/src/error.rs b/hook-worker/src/error.rs index d025998..614fe72 100644 --- a/hook-worker/src/error.rs +++ b/hook-worker/src/error.rs @@ -28,6 +28,6 @@ pub enum WorkerError { TimeoutError, #[error("an error occurred in the underlying queue")] QueueError(#[from] pgqueue::PgQueueError), - #[error("an error occurred in the underlying job")] + #[error("an error occurred in the underlying job: {0}")] PgJobError(String), } From 35877d3d9d94738197312525ffc4290d2bb56542 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 10 Jan 2024 10:27:52 -0700 Subject: [PATCH 103/130] Install ca-certificates (#37) --- Dockerfile | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Dockerfile b/Dockerfile index 959fd17..74682f6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,6 +21,13 @@ COPY . . RUN cargo build --release --bin $BIN FROM debian:bullseye-20230320-slim AS runtime + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + "ca-certificates" \ + && \ + rm -rf /var/lib/apt/lists/* + ARG BIN ENV ENTRYPOINT=/usr/local/bin/$BIN WORKDIR app From 5011f18a44e7ea69f8293152367c0d82e8f92523 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 10 Jan 2024 18:34:05 +0100 Subject: [PATCH 104/130] fix: Syntax error in fail method (#38) --- hook-common/src/pgqueue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 35f5b4a..d3a409b 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -172,7 +172,7 @@ UPDATE job_queue SET last_attempt_finished_at = NOW(), - status = 'failed'::job_status + status = 'failed'::job_status, errors = array_append(errors, $3) WHERE queue = $1 From 9b7d313433876ec685b0d6dc1136252ff2af7c18 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 11 Jan 2024 09:42:10 -0700 Subject: [PATCH 105/130] Log productive janitor runs at info level (#40) --- hook-janitor/src/webhooks.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index de02d07..6f28a15 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -11,7 +11,7 @@ use sqlx::postgres::{PgPool, PgPoolOptions, Postgres}; use sqlx::types::{chrono, Uuid}; use sqlx::{Row, Transaction}; use thiserror::Error; -use tracing::{debug, error}; +use tracing::{debug, error, info}; use crate::cleanup::Cleaner; use crate::kafka_producer::KafkaContext; @@ -432,7 +432,7 @@ impl Cleaner for WebhookCleaner { metrics::counter!("webhook_cleanup_failed_agg_row_count",) .increment(stats.failed_agg_row_count); - debug!( + info!( rows_processed = stats.rows_processed, completed_row_count = stats.completed_row_count, completed_agg_row_count = stats.completed_agg_row_count, From 677a094f64908ddced814f1a569d2ba1bdaa70e3 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 11 Jan 2024 09:45:47 -0700 Subject: [PATCH 106/130] Drop target from Prom labels (#41) --- hook-worker/src/worker.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index cc5082e..8d8c824 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -188,10 +188,7 @@ async fn spawn_webhook_job_processing_task( .await .expect("semaphore has been closed"); - let labels = [ - ("queue", webhook_job.queue()), - ("target", webhook_job.target()), - ]; + let labels = [("queue", webhook_job.queue())]; metrics::counter!("webhook_jobs_total", &labels).increment(1); @@ -229,10 +226,7 @@ async fn process_webhook_job( ) -> Result<(), WorkerError> { let parameters = webhook_job.parameters(); - let labels = [ - ("queue", webhook_job.queue()), - ("target", webhook_job.target()), - ]; + let labels = [("queue", webhook_job.queue())]; let now = tokio::time::Instant::now(); From 2437bd55567a43adbd05397919368334f322cf58 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 11 Jan 2024 09:55:10 -0700 Subject: [PATCH 107/130] Add user-agent (#42) --- hook-worker/src/worker.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index 8d8c824..77c6adb 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -94,6 +94,7 @@ impl<'p> WebhookWorker<'p> { let client = reqwest::Client::builder() .default_headers(headers) + .user_agent("PostHog Webhook Worker") .timeout(request_timeout) .build() .expect("failed to construct reqwest client for webhook worker"); From d5dd35d9017bf0f01f65ed0ea38dd54a6faca47a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Thu, 11 Jan 2024 17:55:28 +0100 Subject: [PATCH 108/130] feat: Set idle tx timeout in migration (#39) --- migrations/20240110180056_set_idle_in_transaction_timeout.sql | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 migrations/20240110180056_set_idle_in_transaction_timeout.sql diff --git a/migrations/20240110180056_set_idle_in_transaction_timeout.sql b/migrations/20240110180056_set_idle_in_transaction_timeout.sql new file mode 100644 index 0000000..e171131 --- /dev/null +++ b/migrations/20240110180056_set_idle_in_transaction_timeout.sql @@ -0,0 +1,2 @@ +-- If running worker in transactional mode, this ensures we clean up any open transactions. +ALTER USER current_user SET idle_in_transaction_session_timeout = '2min'; From 1982b0d8d9b0a45ddc3438c17fab369ad1acae37 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 11 Jan 2024 10:15:58 -0700 Subject: [PATCH 109/130] Fix app_metrics topic, don't encode null error_uuid (#43) --- hook-common/src/kafka_messages/app_metrics.rs | 1 + hook-janitor/src/config.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index 9acc411..13f4f2e 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -62,6 +62,7 @@ pub struct AppMetric { pub successes: u32, pub successes_on_retry: u32, pub failures: u32, + #[serde(skip_serializing_if = "Option::is_none")] pub error_uuid: Option, #[serde( serialize_with = "serialize_error_type", diff --git a/hook-janitor/src/config.rs b/hook-janitor/src/config.rs index 852b7cf..252a670 100644 --- a/hook-janitor/src/config.rs +++ b/hook-janitor/src/config.rs @@ -44,7 +44,7 @@ pub struct KafkaConfig { #[envconfig(default = "false")] pub kafka_tls: bool, - #[envconfig(default = "app_metrics")] + #[envconfig(default = "clickhouse_app_metrics")] pub app_metrics_topic: String, #[envconfig(default = "plugin_log_entries")] From 51a5c5f0211291075489b36143c3184a1e16d6a1 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Fri, 12 Jan 2024 04:03:30 -0700 Subject: [PATCH 110/130] Remove idle_in_transaction_session_timeout migration (#44) --- migrations/20240110180056_set_idle_in_transaction_timeout.sql | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 migrations/20240110180056_set_idle_in_transaction_timeout.sql diff --git a/migrations/20240110180056_set_idle_in_transaction_timeout.sql b/migrations/20240110180056_set_idle_in_transaction_timeout.sql deleted file mode 100644 index e171131..0000000 --- a/migrations/20240110180056_set_idle_in_transaction_timeout.sql +++ /dev/null @@ -1,2 +0,0 @@ --- If running worker in transactional mode, this ensures we clean up any open transactions. -ALTER USER current_user SET idle_in_transaction_session_timeout = '2min'; From 8900f50655a998d859d6d4560d87a00b67e482dd Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Fri, 12 Jan 2024 09:14:47 -0700 Subject: [PATCH 111/130] Use signed integers for plugin_id/plugin_config_id (#45) --- hook-common/src/kafka_messages/app_metrics.rs | 2 +- hook-common/src/kafka_messages/plugin_logs.rs | 4 ++-- hook-common/src/pgqueue.rs | 4 ++-- hook-common/src/webhook.rs | 4 ++-- hook-janitor/src/webhooks.rs | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/hook-common/src/kafka_messages/app_metrics.rs b/hook-common/src/kafka_messages/app_metrics.rs index 13f4f2e..f941f58 100644 --- a/hook-common/src/kafka_messages/app_metrics.rs +++ b/hook-common/src/kafka_messages/app_metrics.rs @@ -51,7 +51,7 @@ pub struct AppMetric { )] pub timestamp: DateTime, pub team_id: u32, - pub plugin_config_id: u32, + pub plugin_config_id: i32, #[serde(skip_serializing_if = "Option::is_none")] pub job_id: Option, #[serde( diff --git a/hook-common/src/kafka_messages/plugin_logs.rs b/hook-common/src/kafka_messages/plugin_logs.rs index e761fa4..fb83580 100644 --- a/hook-common/src/kafka_messages/plugin_logs.rs +++ b/hook-common/src/kafka_messages/plugin_logs.rs @@ -30,8 +30,8 @@ pub struct PluginLogEntry { pub type_: PluginLogEntryType, pub id: Uuid, pub team_id: u32, - pub plugin_id: u32, - pub plugin_config_id: u32, + pub plugin_id: i32, + pub plugin_config_id: i32, #[serde(serialize_with = "serialize_datetime")] pub timestamp: DateTime, #[serde(serialize_with = "serialize_message")] diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index d3a409b..fa2b5eb 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -731,8 +731,8 @@ mod tests { #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] struct JobMetadata { team_id: u32, - plugin_config_id: u32, - plugin_id: u32, + plugin_config_id: i32, + plugin_id: i32, } impl Default for JobMetadata { diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index 475f3de..11e0285 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -133,8 +133,8 @@ pub struct WebhookJobParameters { #[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub struct WebhookJobMetadata { pub team_id: u32, - pub plugin_id: u32, - pub plugin_config_id: u32, + pub plugin_id: i32, + pub plugin_config_id: i32, } /// An error originating during a Webhook Job invocation. diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 6f28a15..0c2941f 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -66,7 +66,7 @@ struct CompletedRow { #[sqlx(try_from = "i64")] team_id: u32, #[sqlx(try_from = "i64")] - plugin_config_id: u32, + plugin_config_id: i32, #[sqlx(try_from = "i64")] successes: u32, } @@ -100,7 +100,7 @@ struct FailedRow { #[sqlx(try_from = "i64")] team_id: u32, #[sqlx(try_from = "i64")] - plugin_config_id: u32, + plugin_config_id: i32, #[sqlx(json)] last_error: WebhookJobError, #[sqlx(try_from = "i64")] From 58d573a2b40db67344b0952aa4811a4d000aba33 Mon Sep 17 00:00:00 2001 From: Xavier Vello Date: Mon, 15 Jan 2024 17:35:59 +0100 Subject: [PATCH 112/130] feat: make sure metrics cover all axum endpoints (#46) --- Cargo.lock | 1 + hook-api/src/main.rs | 6 +++--- hook-common/src/metrics.rs | 6 +++--- hook-janitor/src/handlers/app.rs | 16 ++-------------- hook-janitor/src/main.rs | 5 ++--- hook-worker/Cargo.toml | 1 + hook-worker/src/main.rs | 5 +++-- 7 files changed, 15 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d242eca..5157ea1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -827,6 +827,7 @@ dependencies = [ name = "hook-worker" version = "0.1.0" dependencies = [ + "axum", "chrono", "envconfig", "futures", diff --git a/hook-api/src/main.rs b/hook-api/src/main.rs index dc2739a..4fbbdfb 100644 --- a/hook-api/src/main.rs +++ b/hook-api/src/main.rs @@ -3,7 +3,7 @@ use config::Config; use envconfig::Envconfig; use eyre::Result; -use hook_common::metrics::setup_metrics_router; +use hook_common::metrics::setup_metrics_routes; use hook_common::pgqueue::PgQueue; mod config; @@ -32,8 +32,8 @@ async fn main() { .await .expect("failed to initialize queue"); - let router = setup_metrics_router(); - let app = handlers::add_routes(router, pg_queue); + let app = handlers::add_routes(Router::new(), pg_queue); + let app = setup_metrics_routes(app); match listen(app, config.bind()).await { Ok(_) => {} diff --git a/hook-common/src/metrics.rs b/hook-common/src/metrics.rs index 1f57c5e..0e1ef2d 100644 --- a/hook-common/src/metrics.rs +++ b/hook-common/src/metrics.rs @@ -16,11 +16,11 @@ pub async fn serve(router: Router, bind: &str) -> Result<(), std::io::Error> { Ok(()) } -/// Build a Router for a metrics endpoint. -pub fn setup_metrics_router() -> Router { +/// Add the prometheus endpoint and middleware to a router, should be called last. +pub fn setup_metrics_routes(router: Router) -> Router { let recorder_handle = setup_metrics_recorder(); - Router::new() + router .route( "/metrics", get(move || std::future::ready(recorder_handle.render())), diff --git a/hook-janitor/src/handlers/app.rs b/hook-janitor/src/handlers/app.rs index 279fa0e..cab0e0d 100644 --- a/hook-janitor/src/handlers/app.rs +++ b/hook-janitor/src/handlers/app.rs @@ -1,19 +1,7 @@ use axum::{routing, Router}; -use metrics_exporter_prometheus::PrometheusHandle; -use hook_common::metrics; - -pub fn app(metrics: Option) -> Router { - Router::new() - .route("/", routing::get(index)) - .route( - "/metrics", - routing::get(move || match metrics { - Some(ref recorder_handle) => std::future::ready(recorder_handle.render()), - None => std::future::ready("no metrics recorder installed".to_owned()), - }), - ) - .layer(axum::middleware::from_fn(metrics::track_metrics)) +pub fn app() -> Router { + Router::new().route("/", routing::get(index)) } pub async fn index() -> &'static str { diff --git a/hook-janitor/src/main.rs b/hook-janitor/src/main.rs index 7d7e223..63068c3 100644 --- a/hook-janitor/src/main.rs +++ b/hook-janitor/src/main.rs @@ -9,7 +9,7 @@ use std::{str::FromStr, time::Duration}; use tokio::sync::Semaphore; use webhooks::WebhookCleaner; -use hook_common::metrics; +use hook_common::metrics::setup_metrics_routes; mod cleanup; mod config; @@ -66,8 +66,7 @@ async fn main() { let cleanup_loop = Box::pin(cleanup_loop(cleaner, config.cleanup_interval_secs)); - let recorder_handle = metrics::setup_metrics_recorder(); - let app = handlers::app(Some(recorder_handle)); + let app = setup_metrics_routes(handlers::app()); let http_server = Box::pin(listen(app, config.bind())); match select(http_server, cleanup_loop).await { diff --git a/hook-worker/Cargo.toml b/hook-worker/Cargo.toml index f694898..11da0a8 100644 --- a/hook-worker/Cargo.toml +++ b/hook-worker/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] +axum = { workspace = true } chrono = { workspace = true } envconfig = { workspace = true } futures = "0.3" diff --git a/hook-worker/src/main.rs b/hook-worker/src/main.rs index fbd4746..cc17169 100644 --- a/hook-worker/src/main.rs +++ b/hook-worker/src/main.rs @@ -1,8 +1,9 @@ //! Consume `PgQueue` jobs to run webhook calls. +use axum::Router; use envconfig::Envconfig; use hook_common::{ - metrics::serve, metrics::setup_metrics_router, pgqueue::PgQueue, retry::RetryPolicy, + metrics::serve, metrics::setup_metrics_routes, pgqueue::PgQueue, retry::RetryPolicy, }; use hook_worker::config::Config; use hook_worker::error::WorkerError; @@ -36,7 +37,7 @@ async fn main() -> Result<(), WorkerError> { let bind = config.bind(); tokio::task::spawn(async move { - let router = setup_metrics_router(); + let router = setup_metrics_routes(Router::new()); serve(router, &bind) .await .expect("failed to start serving metrics"); From 9fa90fdf2358819c6a8e0f7e8a39f2c29c606eb6 Mon Sep 17 00:00:00 2001 From: Xavier Vello Date: Tue, 16 Jan 2024 17:51:40 +0100 Subject: [PATCH 113/130] improve janitor metrics (#48) --- hook-janitor/src/webhooks.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 0c2941f..4248a47 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -412,15 +412,15 @@ impl WebhookCleaner { impl Cleaner for WebhookCleaner { async fn cleanup(&self) { let start_time = Instant::now(); + metrics::counter!("webhook_cleanup_attempts",).increment(1); match self.cleanup_impl().await { Ok(stats) => { - metrics::counter!("webhook_cleanup_runs",).increment(1); + metrics::counter!("webhook_cleanup_success",).increment(1); if stats.rows_processed > 0 { let elapsed_time = start_time.elapsed().as_secs_f64(); metrics::histogram!("webhook_cleanup_duration").record(elapsed_time); - metrics::counter!("webhook_cleanup_rows_processed",) .increment(stats.rows_processed); metrics::counter!("webhook_cleanup_completed_row_count",) From b5029a95b617ad6149514941321824a70f8295c6 Mon Sep 17 00:00:00 2001 From: Xavier Vello Date: Tue, 16 Jan 2024 18:50:43 +0100 Subject: [PATCH 114/130] feat: add readiness and liveness endpoints to all roles (#47) --- Cargo.lock | 37 +++ Cargo.toml | 1 + hook-api/src/handlers/app.rs | 2 + hook-common/Cargo.toml | 2 + hook-common/src/health.rs | 346 +++++++++++++++++++++++++++++ hook-common/src/lib.rs | 1 + hook-janitor/Cargo.toml | 1 + hook-janitor/src/handlers/app.rs | 11 +- hook-janitor/src/kafka_producer.rs | 21 +- hook-janitor/src/main.rs | 25 ++- hook-janitor/src/webhooks.rs | 7 +- hook-worker/Cargo.toml | 1 + hook-worker/src/main.rs | 19 +- hook-worker/src/worker.rs | 17 +- 14 files changed, 477 insertions(+), 14 deletions(-) create mode 100644 hook-common/src/health.rs diff --git a/Cargo.lock b/Cargo.lock index 5157ea1..17b608c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -355,6 +355,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] + [[package]] name = "digest" version = "0.10.7" @@ -793,7 +802,9 @@ dependencies = [ "serde_json", "sqlx", "thiserror", + "time", "tokio", + "tracing", "uuid", ] @@ -816,6 +827,7 @@ dependencies = [ "serde_json", "sqlx", "thiserror", + "time", "tokio", "tower", "tracing", @@ -839,6 +851,7 @@ dependencies = [ "serde_derive", "sqlx", "thiserror", + "time", "tokio", "tracing", "tracing-subscriber", @@ -1542,6 +1555,12 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -2313,6 +2332,24 @@ dependencies = [ "once_cell", ] +[[package]] +name = "time" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +dependencies = [ + "deranged", + "powerfmt", + "serde", + "time-core", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + [[package]] name = "tinyvec" version = "1.6.0" diff --git a/Cargo.toml b/Cargo.toml index 0b48fb2..b400593 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,7 @@ sqlx = { version = "0.7", features = [ "tls-native-tls", "uuid", ] } +time = { version = "0.3.20" } thiserror = { version = "1.0" } tokio = { version = "1.34.0", features = ["full"] } tower = "0.4.13" diff --git a/hook-api/src/handlers/app.rs b/hook-api/src/handlers/app.rs index 73c29e1..7b1e840 100644 --- a/hook-api/src/handlers/app.rs +++ b/hook-api/src/handlers/app.rs @@ -7,6 +7,8 @@ use super::webhook; pub fn add_routes(router: Router, pg_pool: PgQueue) -> Router { router .route("/", routing::get(index)) + .route("/_readiness", routing::get(index)) + .route("/_liveness", routing::get(index)) // No async loop for now, just check axum health .route("/webhook", routing::post(webhook::post).with_state(pg_pool)) } diff --git a/hook-common/Cargo.toml b/hook-common/Cargo.toml index 00c7bd2..ea7ce2f 100644 --- a/hook-common/Cargo.toml +++ b/hook-common/Cargo.toml @@ -18,8 +18,10 @@ serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } sqlx = { workspace = true } +time = { workspace = true } tokio = { workspace = true } thiserror = { workspace = true } +tracing = { workspace = true } uuid = { workspace = true } [dev-dependencies] diff --git a/hook-common/src/health.rs b/hook-common/src/health.rs new file mode 100644 index 0000000..c5c79c9 --- /dev/null +++ b/hook-common/src/health.rs @@ -0,0 +1,346 @@ +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use std::collections::HashMap; +use std::ops::Add; +use std::sync::{Arc, RwLock}; + +use time::Duration; +use tokio::sync::mpsc; +use tracing::{info, warn}; + +/// Health reporting for components of the service. +/// +/// FIXME: copied over from capture, make sure to keep in sync until we share the crate +/// +/// The capture server contains several asynchronous loops, and +/// the process can only be trusted with user data if all the +/// loops are properly running and reporting. +/// +/// HealthRegistry allows an arbitrary number of components to +/// be registered and report their health. The process' health +/// status is the combination of these individual health status: +/// - if any component is unhealthy, the process is unhealthy +/// - if all components recently reported healthy, the process is healthy +/// - if a component failed to report healthy for its defined deadline, +/// it is considered unhealthy, and the check fails. +/// +/// Trying to merge the k8s concepts of liveness and readiness in +/// a single state is full of foot-guns, so HealthRegistry does not +/// try to do it. Each probe should have its separate instance of +/// the registry to avoid confusions. + +#[derive(Default, Debug)] +pub struct HealthStatus { + /// The overall status: true of all components are healthy + pub healthy: bool, + /// Current status of each registered component, for display + pub components: HashMap, +} +impl IntoResponse for HealthStatus { + /// Computes the axum status code based on the overall health status, + /// and prints each component status in the body for debugging. + fn into_response(self) -> Response { + let body = format!("{:?}", self); + match self.healthy { + true => (StatusCode::OK, body), + false => (StatusCode::INTERNAL_SERVER_ERROR, body), + } + .into_response() + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum ComponentStatus { + /// Automatically set when a component is newly registered + Starting, + /// Recently reported healthy, will need to report again before the date + HealthyUntil(time::OffsetDateTime), + /// Reported unhealthy + Unhealthy, + /// Automatically set when the HealthyUntil deadline is reached + Stalled, +} +struct HealthMessage { + component: String, + status: ComponentStatus, +} + +pub struct HealthHandle { + component: String, + deadline: Duration, + sender: mpsc::Sender, +} + +impl HealthHandle { + /// Asynchronously report healthy, returns when the message is queued. + /// Must be called more frequently than the configured deadline. + pub async fn report_healthy(&self) { + self.report_status(ComponentStatus::HealthyUntil( + time::OffsetDateTime::now_utc().add(self.deadline), + )) + .await + } + + /// Asynchronously report component status, returns when the message is queued. + pub async fn report_status(&self, status: ComponentStatus) { + let message = HealthMessage { + component: self.component.clone(), + status, + }; + if let Err(err) = self.sender.send(message).await { + warn!("failed to report heath status: {}", err) + } + } + + /// Synchronously report as healthy, returns when the message is queued. + /// Must be called more frequently than the configured deadline. + pub fn report_healthy_blocking(&self) { + self.report_status_blocking(ComponentStatus::HealthyUntil( + time::OffsetDateTime::now_utc().add(self.deadline), + )) + } + + /// Asynchronously report component status, returns when the message is queued. + pub fn report_status_blocking(&self, status: ComponentStatus) { + let message = HealthMessage { + component: self.component.clone(), + status, + }; + if let Err(err) = self.sender.blocking_send(message) { + warn!("failed to report heath status: {}", err) + } + } +} + +#[derive(Clone)] +pub struct HealthRegistry { + name: String, + components: Arc>>, + sender: mpsc::Sender, +} + +impl HealthRegistry { + pub fn new(name: &str) -> Self { + let (tx, mut rx) = mpsc::channel::(16); + let registry = Self { + name: name.to_owned(), + components: Default::default(), + sender: tx, + }; + + let components = registry.components.clone(); + tokio::spawn(async move { + while let Some(message) = rx.recv().await { + if let Ok(mut map) = components.write() { + _ = map.insert(message.component, message.status); + } else { + // Poisoned mutex: Just warn, the probes will fail and the process restart + warn!("poisoned HeathRegistry mutex") + } + } + }); + + registry + } + + /// Registers a new component in the registry. The returned handle should be passed + /// to the component, to allow it to frequently report its health status. + pub async fn register(&self, component: String, deadline: time::Duration) -> HealthHandle { + let handle = HealthHandle { + component, + deadline, + sender: self.sender.clone(), + }; + handle.report_status(ComponentStatus::Starting).await; + handle + } + + /// Returns the overall process status, computed from the status of all the components + /// currently registered. Can be used as an axum handler. + pub fn get_status(&self) -> HealthStatus { + let components = self + .components + .read() + .expect("poisoned HeathRegistry mutex"); + + let result = HealthStatus { + healthy: !components.is_empty(), // unhealthy if no component has registered yet + components: Default::default(), + }; + let now = time::OffsetDateTime::now_utc(); + + let result = components + .iter() + .fold(result, |mut result, (name, status)| { + match status { + ComponentStatus::HealthyUntil(until) => { + if until.gt(&now) { + _ = result.components.insert(name.clone(), status.clone()) + } else { + result.healthy = false; + _ = result + .components + .insert(name.clone(), ComponentStatus::Stalled) + } + } + _ => { + result.healthy = false; + _ = result.components.insert(name.clone(), status.clone()) + } + } + result + }); + match result.healthy { + true => info!("{} health check ok", self.name), + false => warn!("{} health check failed: {:?}", self.name, result.components), + } + result + } +} + +#[cfg(test)] +mod tests { + use crate::health::{ComponentStatus, HealthRegistry, HealthStatus}; + use axum::http::StatusCode; + use axum::response::IntoResponse; + use std::ops::{Add, Sub}; + use time::{Duration, OffsetDateTime}; + + async fn assert_or_retry(check: F) + where + F: Fn() -> bool, + { + assert_or_retry_for_duration(check, Duration::seconds(5)).await + } + + async fn assert_or_retry_for_duration(check: F, timeout: Duration) + where + F: Fn() -> bool, + { + let deadline = OffsetDateTime::now_utc().add(timeout); + while !check() && OffsetDateTime::now_utc().lt(&deadline) { + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + } + assert!(check()) + } + #[tokio::test] + async fn defaults_to_unhealthy() { + let registry = HealthRegistry::new("liveness"); + assert!(!registry.get_status().healthy); + } + + #[tokio::test] + async fn one_component() { + let registry = HealthRegistry::new("liveness"); + + // New components are registered in Starting + let handle = registry + .register("one".to_string(), Duration::seconds(30)) + .await; + assert_or_retry(|| registry.get_status().components.len() == 1).await; + let mut status = registry.get_status(); + assert!(!status.healthy); + assert_eq!( + status.components.get("one"), + Some(&ComponentStatus::Starting) + ); + + // Status goes healthy once the component reports + handle.report_healthy().await; + assert_or_retry(|| registry.get_status().healthy).await; + status = registry.get_status(); + assert_eq!(status.components.len(), 1); + + // Status goes unhealthy if the components says so + handle.report_status(ComponentStatus::Unhealthy).await; + assert_or_retry(|| !registry.get_status().healthy).await; + status = registry.get_status(); + assert_eq!(status.components.len(), 1); + assert_eq!( + status.components.get("one"), + Some(&ComponentStatus::Unhealthy) + ); + } + + #[tokio::test] + async fn staleness_check() { + let registry = HealthRegistry::new("liveness"); + let handle = registry + .register("one".to_string(), Duration::seconds(30)) + .await; + + // Status goes healthy once the component reports + handle.report_healthy().await; + assert_or_retry(|| registry.get_status().healthy).await; + let mut status = registry.get_status(); + assert_eq!(status.components.len(), 1); + + // If the component's ping is too old, it is considered stalled and the healthcheck fails + // FIXME: we should mock the time instead + handle + .report_status(ComponentStatus::HealthyUntil( + OffsetDateTime::now_utc().sub(Duration::seconds(1)), + )) + .await; + assert_or_retry(|| !registry.get_status().healthy).await; + status = registry.get_status(); + assert_eq!(status.components.len(), 1); + assert_eq!( + status.components.get("one"), + Some(&ComponentStatus::Stalled) + ); + } + + #[tokio::test] + async fn several_components() { + let registry = HealthRegistry::new("liveness"); + let handle1 = registry + .register("one".to_string(), Duration::seconds(30)) + .await; + let handle2 = registry + .register("two".to_string(), Duration::seconds(30)) + .await; + assert_or_retry(|| registry.get_status().components.len() == 2).await; + + // First component going healthy is not enough + handle1.report_healthy().await; + assert_or_retry(|| { + registry.get_status().components.get("one").unwrap() != &ComponentStatus::Starting + }) + .await; + assert!(!registry.get_status().healthy); + + // Second component going healthy brings the health to green + handle2.report_healthy().await; + assert_or_retry(|| { + registry.get_status().components.get("two").unwrap() != &ComponentStatus::Starting + }) + .await; + assert!(registry.get_status().healthy); + + // First component going unhealthy takes down the health to red + handle1.report_status(ComponentStatus::Unhealthy).await; + assert_or_retry(|| !registry.get_status().healthy).await; + + // First component recovering returns the health to green + handle1.report_healthy().await; + assert_or_retry(|| registry.get_status().healthy).await; + + // Second component going unhealthy takes down the health to red + handle2.report_status(ComponentStatus::Unhealthy).await; + assert_or_retry(|| !registry.get_status().healthy).await; + } + + #[tokio::test] + async fn into_response() { + let nok = HealthStatus::default().into_response(); + assert_eq!(nok.status(), StatusCode::INTERNAL_SERVER_ERROR); + + let ok = HealthStatus { + healthy: true, + components: Default::default(), + } + .into_response(); + assert_eq!(ok.status(), StatusCode::OK); + } +} diff --git a/hook-common/src/lib.rs b/hook-common/src/lib.rs index 8e63ded..7f49049 100644 --- a/hook-common/src/lib.rs +++ b/hook-common/src/lib.rs @@ -1,3 +1,4 @@ +pub mod health; pub mod kafka_messages; pub mod metrics; pub mod pgqueue; diff --git a/hook-janitor/Cargo.toml b/hook-janitor/Cargo.toml index f23626b..96a80eb 100644 --- a/hook-janitor/Cargo.toml +++ b/hook-janitor/Cargo.toml @@ -20,6 +20,7 @@ serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } sqlx = { workspace = true } +time = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } tower = { workspace = true } diff --git a/hook-janitor/src/handlers/app.rs b/hook-janitor/src/handlers/app.rs index cab0e0d..507a1cb 100644 --- a/hook-janitor/src/handlers/app.rs +++ b/hook-janitor/src/handlers/app.rs @@ -1,7 +1,12 @@ -use axum::{routing, Router}; +use axum::{routing::get, Router}; +use hook_common::health::HealthRegistry; +use std::future::ready; -pub fn app() -> Router { - Router::new().route("/", routing::get(index)) +pub fn app(liveness: HealthRegistry) -> Router { + Router::new() + .route("/", get(index)) + .route("/_readiness", get(index)) + .route("/_liveness", get(move || ready(liveness.get_status()))) } pub async fn index() -> &'static str { diff --git a/hook-janitor/src/kafka_producer.rs b/hook-janitor/src/kafka_producer.rs index 1d0144c..ba36866 100644 --- a/hook-janitor/src/kafka_producer.rs +++ b/hook-janitor/src/kafka_producer.rs @@ -1,17 +1,27 @@ use crate::config::KafkaConfig; +use hook_common::health::HealthHandle; use rdkafka::error::KafkaError; use rdkafka::producer::FutureProducer; use rdkafka::ClientConfig; use tracing::debug; -// TODO: Take stats recording pieces that we want from `capture-rs`. -pub struct KafkaContext {} +pub struct KafkaContext { + liveness: HealthHandle, +} + +impl rdkafka::ClientContext for KafkaContext { + fn stats(&self, _: rdkafka::Statistics) { + // Signal liveness, as the main rdkafka loop is running and calling us + self.liveness.report_healthy_blocking(); -impl rdkafka::ClientContext for KafkaContext {} + // TODO: Take stats recording pieces that we want from `capture-rs`. + } +} pub async fn create_kafka_producer( config: &KafkaConfig, + liveness: HealthHandle, ) -> Result, KafkaError> { let mut client_config = ClientConfig::new(); client_config @@ -38,7 +48,10 @@ pub async fn create_kafka_producer( }; debug!("rdkafka configuration: {:?}", client_config); - let api: FutureProducer = client_config.create_with_context(KafkaContext {})?; + let api: FutureProducer = + client_config.create_with_context(KafkaContext { liveness })?; + + // TODO: ping the kafka brokers to confirm configuration is OK (copy capture) Ok(api) } diff --git a/hook-janitor/src/main.rs b/hook-janitor/src/main.rs index 63068c3..15d0068 100644 --- a/hook-janitor/src/main.rs +++ b/hook-janitor/src/main.rs @@ -4,6 +4,7 @@ use config::Config; use envconfig::Envconfig; use eyre::Result; use futures::future::{select, Either}; +use hook_common::health::{HealthHandle, HealthRegistry}; use kafka_producer::create_kafka_producer; use std::{str::FromStr, time::Duration}; use tokio::sync::Semaphore; @@ -25,13 +26,14 @@ async fn listen(app: Router, bind: String) -> Result<()> { Ok(()) } -async fn cleanup_loop(cleaner: Box, interval_secs: u64) { +async fn cleanup_loop(cleaner: Box, interval_secs: u64, liveness: HealthHandle) { let semaphore = Semaphore::new(1); let mut interval = tokio::time::interval(Duration::from_secs(interval_secs)); loop { let _permit = semaphore.acquire().await; interval.tick().await; + liveness.report_healthy().await; cleaner.cleanup().await; drop(_permit); } @@ -46,9 +48,14 @@ async fn main() { let mode_name = CleanerModeName::from_str(&config.mode) .unwrap_or_else(|_| panic!("invalid cleaner mode: {}", config.mode)); + let liveness = HealthRegistry::new("liveness"); + let cleaner = match mode_name { CleanerModeName::Webhooks => { - let kafka_producer = create_kafka_producer(&config.kafka) + let kafka_liveness = liveness + .register("rdkafka".to_string(), time::Duration::seconds(30)) + .await; + let kafka_producer = create_kafka_producer(&config.kafka, kafka_liveness) .await .expect("failed to create kafka producer"); @@ -64,9 +71,19 @@ async fn main() { } }; - let cleanup_loop = Box::pin(cleanup_loop(cleaner, config.cleanup_interval_secs)); + let cleanup_liveness = liveness + .register( + "cleanup_loop".to_string(), + time::Duration::seconds(config.cleanup_interval_secs as i64 * 2), + ) + .await; + let cleanup_loop = Box::pin(cleanup_loop( + cleaner, + config.cleanup_interval_secs, + cleanup_liveness, + )); - let app = setup_metrics_routes(handlers::app()); + let app = setup_metrics_routes(handlers::app(liveness)); let http_server = Box::pin(listen(app, config.bind())); match select(http_server, cleanup_loop).await { diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 4248a47..7aa9aa3 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -457,6 +457,7 @@ mod tests { use super::*; use crate::config; use crate::kafka_producer::{create_kafka_producer, KafkaContext}; + use hook_common::health::HealthRegistry; use hook_common::kafka_messages::app_metrics::{ Error as WebhookError, ErrorDetails, ErrorType, }; @@ -476,6 +477,10 @@ mod tests { MockCluster<'static, DefaultProducerContext>, FutureProducer, ) { + let registry = HealthRegistry::new("liveness"); + let handle = registry + .register("one".to_string(), time::Duration::seconds(30)) + .await; let cluster = MockCluster::new(1).expect("failed to create mock brokers"); let config = config::KafkaConfig { @@ -491,7 +496,7 @@ mod tests { ( cluster, - create_kafka_producer(&config) + create_kafka_producer(&config, handle) .await .expect("failed to create mocked kafka producer"), ) diff --git a/hook-worker/Cargo.toml b/hook-worker/Cargo.toml index 11da0a8..6ed5796 100644 --- a/hook-worker/Cargo.toml +++ b/hook-worker/Cargo.toml @@ -15,6 +15,7 @@ reqwest = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } sqlx = { workspace = true } +time = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/hook-worker/src/main.rs b/hook-worker/src/main.rs index cc17169..d036d54 100644 --- a/hook-worker/src/main.rs +++ b/hook-worker/src/main.rs @@ -1,7 +1,10 @@ //! Consume `PgQueue` jobs to run webhook calls. +use axum::routing::get; use axum::Router; use envconfig::Envconfig; +use std::future::ready; +use hook_common::health::HealthRegistry; use hook_common::{ metrics::serve, metrics::setup_metrics_routes, pgqueue::PgQueue, retry::RetryPolicy, }; @@ -15,6 +18,11 @@ async fn main() -> Result<(), WorkerError> { let config = Config::init_from_env().expect("Invalid configuration:"); + let liveness = HealthRegistry::new("liveness"); + let worker_liveness = liveness + .register("worker".to_string(), time::Duration::seconds(60)) // TODO: compute the value from worker params + .await; + let retry_policy = RetryPolicy::build( config.retry_policy.backoff_coefficient, config.retry_policy.initial_interval.0, @@ -33,11 +41,16 @@ async fn main() -> Result<(), WorkerError> { config.request_timeout.0, config.max_concurrent_jobs, retry_policy, + worker_liveness, ); + let router = Router::new() + .route("/", get(index)) + .route("/_readiness", get(index)) + .route("/_liveness", get(move || ready(liveness.get_status()))); + let router = setup_metrics_routes(router); let bind = config.bind(); tokio::task::spawn(async move { - let router = setup_metrics_routes(Router::new()); serve(router, &bind) .await .expect("failed to start serving metrics"); @@ -47,3 +60,7 @@ async fn main() -> Result<(), WorkerError> { Ok(()) } + +pub async fn index() -> &'static str { + "rusty-hook worker" +} diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index 77c6adb..1041422 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -2,6 +2,7 @@ use std::collections; use std::sync::Arc; use std::time; +use hook_common::health::HealthHandle; use hook_common::{ pgqueue::{Job, PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, retry::RetryPolicy, @@ -75,6 +76,8 @@ pub struct WebhookWorker<'p> { max_concurrent_jobs: usize, /// The retry policy used to calculate retry intervals when a job fails with a retryable error. retry_policy: RetryPolicy, + /// The liveness check handle, to call on a schedule to report healthy + liveness: HealthHandle, } impl<'p> WebhookWorker<'p> { @@ -85,6 +88,7 @@ impl<'p> WebhookWorker<'p> { request_timeout: time::Duration, max_concurrent_jobs: usize, retry_policy: RetryPolicy, + liveness: HealthHandle, ) -> Self { let mut headers = header::HeaderMap::new(); headers.insert( @@ -106,6 +110,7 @@ impl<'p> WebhookWorker<'p> { client, max_concurrent_jobs, retry_policy, + liveness, } } @@ -117,6 +122,7 @@ impl<'p> WebhookWorker<'p> { loop { interval.tick().await; + self.liveness.report_healthy().await; if let Some(job) = self.queue.dequeue(&self.name).await? { return Ok(job); @@ -132,6 +138,7 @@ impl<'p> WebhookWorker<'p> { loop { interval.tick().await; + self.liveness.report_healthy().await; if let Some(job) = self.queue.dequeue_tx(&self.name).await? { return Ok(job); @@ -157,7 +164,6 @@ impl<'p> WebhookWorker<'p> { } else { loop { let webhook_job = self.wait_for_job().await?; - spawn_webhook_job_processing_task( self.client.clone(), semaphore.clone(), @@ -430,6 +436,8 @@ mod tests { // This is due to a long-standing cargo bug that reports imports and helper functions as unused. // See: https://github.com/rust-lang/rust/issues/46379. #[allow(unused_imports)] + use hook_common::health::HealthRegistry; + #[allow(unused_imports)] use hook_common::pgqueue::{JobStatus, NewJob}; #[allow(unused_imports)] use sqlx::PgPool; @@ -502,6 +510,10 @@ mod tests { plugin_id: 2, plugin_config_id: 3, }; + let registry = HealthRegistry::new("liveness"); + let liveness = registry + .register("worker".to_string(), ::time::Duration::seconds(30)) + .await; // enqueue takes ownership of the job enqueued to avoid bugs that can cause duplicate jobs. // Normally, a separate application would be enqueueing jobs for us to consume, so no ownership // conflicts would arise. However, in this test we need to do the enqueueing ourselves. @@ -521,6 +533,7 @@ mod tests { time::Duration::from_millis(5000), 10, RetryPolicy::default(), + liveness, ); let consumed_job = worker @@ -543,6 +556,8 @@ mod tests { .complete() .await .expect("job not successfully completed"); + + assert!(registry.get_status().healthy) } #[sqlx::test(migrations = "../migrations")] From c181c50580625699a077b81fcfada9a7d5c1dd5b Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 16 Jan 2024 12:56:04 -0700 Subject: [PATCH 115/130] Add queue depth gauges (#49) --- hook-janitor/src/webhooks.rs | 49 +++++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 7aa9aa3..4b4f9a4 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -24,6 +24,8 @@ pub enum WebhookCleanerError { PoolCreationError { error: sqlx::Error }, #[error("failed to acquire conn and start txn: {error}")] StartTxnError { error: sqlx::Error }, + #[error("failed to get queue depth: {error}")] + GetQueueDepthError { error: sqlx::Error }, #[error("failed to get row count: {error}")] GetRowCountError { error: sqlx::Error }, #[error("failed to get completed rows: {error}")] @@ -107,6 +109,14 @@ struct FailedRow { failures: u32, } +#[derive(sqlx::FromRow, Debug)] +struct QueueDepth { + oldest_created_at_untried: DateTime, + count_untried: i64, + oldest_created_at_retries: DateTime, + count_retries: i64, +} + impl From for AppMetric { fn from(row: FailedRow) -> Self { AppMetric { @@ -175,6 +185,33 @@ impl WebhookCleaner { }) } + async fn get_queue_depth(&self) -> Result { + let mut conn = self + .pg_pool + .acquire() + .await + .map_err(|e| WebhookCleanerError::StartTxnError { error: e })?; + + let base_query = r#" + SELECT + COALESCE(MIN(CASE WHEN attempt = 0 THEN created_at END), now()) AS oldest_created_at_untried, + SUM(CASE WHEN attempt = 0 THEN 1 ELSE 0 END) AS count_untried, + COALESCE(MIN(CASE WHEN attempt > 0 THEN created_at END), now()) AS oldest_created_at_retries, + SUM(CASE WHEN attempt > 0 THEN 1 ELSE 0 END) AS count_retries + FROM job_queue + WHERE status = 'available' + AND queue = $1; + "#; + + let row = sqlx::query_as::<_, QueueDepth>(base_query) + .bind(&self.queue_name) + .fetch_one(&mut *conn) + .await + .map_err(|e| WebhookCleanerError::GetQueueDepthError { error: e })?; + + Ok(row) + } + async fn start_serializable_txn(&self) -> Result { let mut tx = self .pg_pool @@ -229,7 +266,7 @@ impl WebhookCleaner { count(*) as successes FROM job_queue WHERE status = 'completed' - AND queue = $1 + AND queue = $1 GROUP BY hour, team_id, plugin_config_id ORDER BY hour, team_id, plugin_config_id; "#; @@ -354,6 +391,16 @@ impl WebhookCleaner { // of rows in memory. It seems unlikely we'll need to paginate, but that can be added in the // future if necessary. + let queue_depth = self.get_queue_depth().await?; + metrics::gauge!("queue_depth_oldest_created_at_untried") + .set(queue_depth.oldest_created_at_untried.timestamp() as f64); + metrics::gauge!("queue_depth", &[("status", "untried")]) + .set(queue_depth.count_untried as f64); + metrics::gauge!("queue_depth_oldest_created_at_retries") + .set(queue_depth.oldest_created_at_retries.timestamp() as f64); + metrics::gauge!("queue_depth", &[("status", "retries")]) + .set(queue_depth.count_retries as f64); + let mut tx = self.start_serializable_txn().await?; let (completed_row_count, completed_agg_row_count) = { From 1422683d87708eb5e4261f3ebf92594b92ea8793 Mon Sep 17 00:00:00 2001 From: Xavier Vello Date: Wed, 17 Jan 2024 10:53:51 +0100 Subject: [PATCH 116/130] feat: add webhook_worker_saturation_percent metric for autoscaling (#50) --- hook-worker/src/worker.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index 1041422..7fe6808 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -149,9 +149,14 @@ impl<'p> WebhookWorker<'p> { /// Run this worker to continuously process any jobs that become available. pub async fn run(&self, transactional: bool) -> Result<(), WorkerError> { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); + let report_semaphore_utilization = || { + metrics::gauge!("webhook_worker_saturation_percent") + .set(1f64 - semaphore.available_permits() as f64 / self.max_concurrent_jobs as f64); + }; if transactional { loop { + report_semaphore_utilization(); let webhook_job = self.wait_for_job_tx().await?; spawn_webhook_job_processing_task( self.client.clone(), @@ -163,6 +168,7 @@ impl<'p> WebhookWorker<'p> { } } else { loop { + report_semaphore_utilization(); let webhook_job = self.wait_for_job().await?; spawn_webhook_job_processing_task( self.client.clone(), From 23ef5e276d1dc85333500aaa20bafa62a3594f5f Mon Sep 17 00:00:00 2001 From: Xavier Vello Date: Wed, 17 Jan 2024 12:43:36 +0100 Subject: [PATCH 117/130] fix:fix janitor get_queue_depth when queue is empty (#54) --- hook-janitor/src/webhooks.rs | 46 ++++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 4b4f9a4..1d536c0 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -195,9 +195,9 @@ impl WebhookCleaner { let base_query = r#" SELECT COALESCE(MIN(CASE WHEN attempt = 0 THEN created_at END), now()) AS oldest_created_at_untried, - SUM(CASE WHEN attempt = 0 THEN 1 ELSE 0 END) AS count_untried, + COALESCE(SUM(CASE WHEN attempt = 0 THEN 1 ELSE 0 END), 0) AS count_untried, COALESCE(MIN(CASE WHEN attempt > 0 THEN created_at END), now()) AS oldest_created_at_retries, - SUM(CASE WHEN attempt > 0 THEN 1 ELSE 0 END) AS count_retries + COALESCE(SUM(CASE WHEN attempt > 0 THEN 1 ELSE 0 END), 0) AS count_retries FROM job_queue WHERE status = 'available' AND queue = $1; @@ -513,6 +513,7 @@ mod tests { use rdkafka::consumer::{Consumer, StreamConsumer}; use rdkafka::mocking::MockCluster; use rdkafka::producer::{DefaultProducerContext, FutureProducer}; + use rdkafka::types::{RDKafkaApiKey, RDKafkaRespErr}; use rdkafka::{ClientConfig, Message}; use sqlx::{PgPool, Row}; use std::collections::HashMap; @@ -754,6 +755,47 @@ mod tests { check_app_metric_vector_equality(&expected_app_metrics, &received_app_metrics); } + #[sqlx::test(migrations = "../migrations")] + async fn test_cleanup_impl_empty_queue(db: PgPool) { + let (mock_cluster, mock_producer) = create_mock_kafka().await; + mock_cluster + .create_topic(APP_METRICS_TOPIC, 1, 1) + .expect("failed to create mock app_metrics topic"); + + // No payload should be produced to kafka as the queue is empty. + // Set a non-retriable produce error that would bubble-up when cleanup_impl is called. + let err = [RDKafkaRespErr::RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE; 1]; + mock_cluster.request_errors(RDKafkaApiKey::Produce, &err); + + let consumer: StreamConsumer = ClientConfig::new() + .set("bootstrap.servers", mock_cluster.bootstrap_servers()) + .set("group.id", "mock") + .set("auto.offset.reset", "earliest") + .create() + .expect("failed to create mock consumer"); + consumer.subscribe(&[APP_METRICS_TOPIC]).unwrap(); + + let webhook_cleaner = WebhookCleaner::new_from_pool( + &"webhooks", + db, + mock_producer, + APP_METRICS_TOPIC.to_owned(), + ) + .expect("unable to create webhook cleaner"); + + let cleanup_stats = webhook_cleaner + .cleanup_impl() + .await + .expect("webbook cleanup_impl failed"); + + // Reported metrics are all zeroes + assert_eq!(cleanup_stats.rows_processed, 0); + assert_eq!(cleanup_stats.completed_row_count, 0); + assert_eq!(cleanup_stats.completed_agg_row_count, 0); + assert_eq!(cleanup_stats.failed_row_count, 0); + assert_eq!(cleanup_stats.failed_agg_row_count, 0); + } + #[sqlx::test(migrations = "../migrations", fixtures("webhook_cleanup"))] async fn test_serializable_isolation(db: PgPool) { let (_, mock_producer) = create_mock_kafka().await; From 9c9ebd5033ac8dcb1c283a5a31203ea682a8a26f Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 17 Jan 2024 07:56:37 -0700 Subject: [PATCH 118/130] Remove queue filters from janitor (#52) --- hook-janitor/src/config.rs | 3 -- hook-janitor/src/main.rs | 1 - hook-janitor/src/webhooks.rs | 82 ++++++++++++------------------------ 3 files changed, 27 insertions(+), 59 deletions(-) diff --git a/hook-janitor/src/config.rs b/hook-janitor/src/config.rs index 252a670..389de03 100644 --- a/hook-janitor/src/config.rs +++ b/hook-janitor/src/config.rs @@ -11,9 +11,6 @@ pub struct Config { #[envconfig(default = "postgres://posthog:posthog@localhost:15432/test_database")] pub database_url: String, - #[envconfig(default = "default")] - pub queue_name: String, - #[envconfig(default = "30")] pub cleanup_interval_secs: u64, diff --git a/hook-janitor/src/main.rs b/hook-janitor/src/main.rs index 15d0068..46ee375 100644 --- a/hook-janitor/src/main.rs +++ b/hook-janitor/src/main.rs @@ -61,7 +61,6 @@ async fn main() { Box::new( WebhookCleaner::new( - &config.queue_name, &config.database_url, kafka_producer, config.kafka.app_metrics_topic.to_owned(), diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 1d536c0..5ac9d55 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -51,7 +51,6 @@ pub enum WebhookCleanerError { type Result = std::result::Result; pub struct WebhookCleaner { - queue_name: String, pg_pool: PgPool, kafka_producer: FutureProducer, app_metrics_topic: String, @@ -149,19 +148,16 @@ struct CleanupStats { impl WebhookCleaner { pub fn new( - queue_name: &str, database_url: &str, kafka_producer: FutureProducer, app_metrics_topic: String, ) -> Result { - let queue_name = queue_name.to_owned(); let pg_pool = PgPoolOptions::new() .acquire_timeout(Duration::from_secs(10)) .connect_lazy(database_url) .map_err(|error| WebhookCleanerError::PoolCreationError { error })?; Ok(Self { - queue_name, pg_pool, kafka_producer, app_metrics_topic, @@ -170,15 +166,11 @@ impl WebhookCleaner { #[allow(dead_code)] // This is used in tests. pub fn new_from_pool( - queue_name: &str, pg_pool: PgPool, kafka_producer: FutureProducer, app_metrics_topic: String, ) -> Result { - let queue_name = queue_name.to_owned(); - Ok(Self { - queue_name, pg_pool, kafka_producer, app_metrics_topic, @@ -199,12 +191,10 @@ impl WebhookCleaner { COALESCE(MIN(CASE WHEN attempt > 0 THEN created_at END), now()) AS oldest_created_at_retries, COALESCE(SUM(CASE WHEN attempt > 0 THEN 1 ELSE 0 END), 0) AS count_retries FROM job_queue - WHERE status = 'available' - AND queue = $1; + WHERE status = 'available'; "#; let row = sqlx::query_as::<_, QueueDepth>(base_query) - .bind(&self.queue_name) .fetch_one(&mut *conn) .await .map_err(|e| WebhookCleanerError::GetQueueDepthError { error: e })?; @@ -240,12 +230,10 @@ impl WebhookCleaner { ) -> Result { let base_query = r#" SELECT count(*) FROM job_queue - WHERE queue = $1 - AND status = $2::job_status; + WHERE status = $1::job_status; "#; let count: i64 = sqlx::query(base_query) - .bind(&self.queue_name) .bind(status) .fetch_one(&mut *tx.0) .await @@ -266,13 +254,11 @@ impl WebhookCleaner { count(*) as successes FROM job_queue WHERE status = 'completed' - AND queue = $1 GROUP BY hour, team_id, plugin_config_id ORDER BY hour, team_id, plugin_config_id; "#; let rows = sqlx::query_as::<_, CompletedRow>(base_query) - .bind(&self.queue_name) .fetch_all(&mut *tx.0) .await .map_err(|e| WebhookCleanerError::GetCompletedRowsError { error: e })?; @@ -289,13 +275,11 @@ impl WebhookCleaner { count(*) as failures FROM job_queue WHERE status = 'failed' - AND queue = $1 GROUP BY hour, team_id, plugin_config_id, last_error ORDER BY hour, team_id, plugin_config_id, last_error; "#; let rows = sqlx::query_as::<_, FailedRow>(base_query) - .bind(&self.queue_name) .fetch_all(&mut *tx.0) .await .map_err(|e| WebhookCleanerError::GetFailedRowsError { error: e })?; @@ -352,11 +336,9 @@ impl WebhookCleaner { let base_query = r#" DELETE FROM job_queue WHERE status IN ('failed', 'completed') - AND queue = $1; "#; let result = sqlx::query(base_query) - .bind(&self.queue_name) .execute(&mut *tx.0) .await .map_err(|e| WebhookCleanerError::DeleteRowsError { error: e })?; @@ -577,22 +559,17 @@ mod tests { .expect("failed to create mock consumer"); consumer.subscribe(&[APP_METRICS_TOPIC]).unwrap(); - let webhook_cleaner = WebhookCleaner::new_from_pool( - &"webhooks", - db, - mock_producer, - APP_METRICS_TOPIC.to_owned(), - ) - .expect("unable to create webhook cleaner"); + let webhook_cleaner = + WebhookCleaner::new_from_pool(db, mock_producer, APP_METRICS_TOPIC.to_owned()) + .expect("unable to create webhook cleaner"); let cleanup_stats = webhook_cleaner .cleanup_impl() .await .expect("webbook cleanup_impl failed"); - // Rows from other queues and rows that are not 'completed' or 'failed' should not be - // processed. - assert_eq!(cleanup_stats.rows_processed, 11); + // Rows that are not 'completed' or 'failed' should not be processed. + assert_eq!(cleanup_stats.rows_processed, 13); let mut received_app_metrics = Vec::new(); for _ in 0..(cleanup_stats.completed_agg_row_count + cleanup_stats.failed_agg_row_count) { @@ -609,7 +586,7 @@ mod tests { plugin_config_id: 2, job_id: None, category: AppMetricCategory::Webhook, - successes: 2, + successes: 3, successes_on_retry: 0, failures: 0, error_uuid: None, @@ -682,7 +659,7 @@ mod tests { category: AppMetricCategory::Webhook, successes: 0, successes_on_retry: 0, - failures: 2, + failures: 3, error_uuid: Some(Uuid::parse_str("018c8935-d038-714a-957c-0df43d42e377").unwrap()), error_type: Some(ErrorType::TimeoutError), error_details: Some(ErrorDetails { @@ -799,13 +776,9 @@ mod tests { #[sqlx::test(migrations = "../migrations", fixtures("webhook_cleanup"))] async fn test_serializable_isolation(db: PgPool) { let (_, mock_producer) = create_mock_kafka().await; - let webhook_cleaner = WebhookCleaner::new_from_pool( - &"webhooks", - db.clone(), - mock_producer, - APP_METRICS_TOPIC.to_owned(), - ) - .expect("unable to create webhook cleaner"); + let webhook_cleaner = + WebhookCleaner::new_from_pool(db.clone(), mock_producer, APP_METRICS_TOPIC.to_owned()) + .expect("unable to create webhook cleaner"); let queue = PgQueue::new_from_pool("webhooks", db.clone()) .await @@ -813,14 +786,13 @@ mod tests { async fn get_count_from_new_conn(db: &PgPool, status: &str) -> i64 { let mut conn = db.acquire().await.unwrap(); - let count: i64 = sqlx::query( - "SELECT count(*) FROM job_queue WHERE queue = 'webhooks' AND status = $1::job_status", - ) - .bind(&status) - .fetch_one(&mut *conn) - .await - .unwrap() - .get(0); + let count: i64 = + sqlx::query("SELECT count(*) FROM job_queue WHERE status = $1::job_status") + .bind(&status) + .fetch_one(&mut *conn) + .await + .unwrap() + .get(0); count } @@ -832,10 +804,10 @@ mod tests { .unwrap(); webhook_cleaner.get_failed_agg_rows(&mut tx).await.unwrap(); - // All 13 rows in the queue are visible from outside the txn. - // The 11 the cleaner will process, plus 1 available and 1 running. - assert_eq!(get_count_from_new_conn(&db, "completed").await, 5); - assert_eq!(get_count_from_new_conn(&db, "failed").await, 6); + // All 15 rows in the DB are visible from outside the txn. + // The 13 the cleaner will process, plus 1 available and 1 running. + assert_eq!(get_count_from_new_conn(&db, "completed").await, 6); + assert_eq!(get_count_from_new_conn(&db, "failed").await, 7); assert_eq!(get_count_from_new_conn(&db, "available").await, 1); assert_eq!(get_count_from_new_conn(&db, "running").await, 1); @@ -896,15 +868,15 @@ mod tests { } // There are now 2 more completed rows (jobs added above) than before, visible from outside the txn. - assert_eq!(get_count_from_new_conn(&db, "completed").await, 7); + assert_eq!(get_count_from_new_conn(&db, "completed").await, 8); assert_eq!(get_count_from_new_conn(&db, "available").await, 1); let rows_processed = webhook_cleaner.delete_observed_rows(&mut tx).await.unwrap(); - // The 11 rows that were in the queue when the txn started should be deleted. - assert_eq!(rows_processed, 11); + // The 13 rows in the DB when the txn started should be deleted. + assert_eq!(rows_processed, 13); // We haven't committed, so the rows are still visible from outside the txn. - assert_eq!(get_count_from_new_conn(&db, "completed").await, 7); + assert_eq!(get_count_from_new_conn(&db, "completed").await, 8); assert_eq!(get_count_from_new_conn(&db, "available").await, 1); webhook_cleaner.commit_txn(tx).await.unwrap(); From 099053e0b1891be416ea39967589a64d3e4349db Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Wed, 17 Jan 2024 08:27:37 -0700 Subject: [PATCH 119/130] Make retry_queue_name truly optional from env to main (#53) --- hook-janitor/src/webhooks.rs | 10 +++------- hook-worker/src/config.rs | 3 +-- hook-worker/src/main.rs | 14 +++++++++----- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 5ac9d55..bc01605 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -752,13 +752,9 @@ mod tests { .expect("failed to create mock consumer"); consumer.subscribe(&[APP_METRICS_TOPIC]).unwrap(); - let webhook_cleaner = WebhookCleaner::new_from_pool( - &"webhooks", - db, - mock_producer, - APP_METRICS_TOPIC.to_owned(), - ) - .expect("unable to create webhook cleaner"); + let webhook_cleaner = + WebhookCleaner::new_from_pool(db, mock_producer, APP_METRICS_TOPIC.to_owned()) + .expect("unable to create webhook cleaner"); let cleanup_stats = webhook_cleaner .cleanup_impl() diff --git a/hook-worker/src/config.rs b/hook-worker/src/config.rs index 8b2b4ba..8484671 100644 --- a/hook-worker/src/config.rs +++ b/hook-worker/src/config.rs @@ -70,6 +70,5 @@ pub struct RetryPolicyConfig { #[envconfig(default = "100000")] pub maximum_interval: EnvMsDuration, - #[envconfig(default = "default")] - pub retry_queue_name: String, + pub retry_queue_name: Option, } diff --git a/hook-worker/src/main.rs b/hook-worker/src/main.rs index d036d54..bf6b4fd 100644 --- a/hook-worker/src/main.rs +++ b/hook-worker/src/main.rs @@ -23,13 +23,17 @@ async fn main() -> Result<(), WorkerError> { .register("worker".to_string(), time::Duration::seconds(60)) // TODO: compute the value from worker params .await; - let retry_policy = RetryPolicy::build( + let mut retry_policy_builder = RetryPolicy::build( config.retry_policy.backoff_coefficient, config.retry_policy.initial_interval.0, ) - .maximum_interval(config.retry_policy.maximum_interval.0) - .queue(&config.retry_policy.retry_queue_name) - .provide(); + .maximum_interval(config.retry_policy.maximum_interval.0); + + retry_policy_builder = match &config.retry_policy.retry_queue_name { + Some(retry_queue_name) => retry_policy_builder.queue(retry_queue_name), + _ => retry_policy_builder, + }; + let queue = PgQueue::new(&config.queue_name, &config.database_url) .await .expect("failed to initialize queue"); @@ -40,7 +44,7 @@ async fn main() -> Result<(), WorkerError> { config.poll_interval.0, config.request_timeout.0, config.max_concurrent_jobs, - retry_policy, + retry_policy_builder.provide(), worker_liveness, ); From 58474c06d22bb5d3a1a2df6289e9cdce847c6b9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Wed, 17 Jan 2024 20:17:15 +0100 Subject: [PATCH 120/130] refactor: Use new NonEmptyString config type for queue names (#55) --- hook-worker/src/config.rs | 28 ++++++++++++++++++++++++++-- hook-worker/src/main.rs | 9 +++++---- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/hook-worker/src/config.rs b/hook-worker/src/config.rs index 8484671..74342f7 100644 --- a/hook-worker/src/config.rs +++ b/hook-worker/src/config.rs @@ -18,7 +18,7 @@ pub struct Config { pub worker_name: String, #[envconfig(default = "default")] - pub queue_name: String, + pub queue_name: NonEmptyString, #[envconfig(default = "100")] pub poll_interval: EnvMsDuration, @@ -70,5 +70,29 @@ pub struct RetryPolicyConfig { #[envconfig(default = "100000")] pub maximum_interval: EnvMsDuration, - pub retry_queue_name: Option, + pub retry_queue_name: Option, +} + +#[derive(Debug, Clone)] +pub struct NonEmptyString(pub String); + +impl NonEmptyString { + pub fn as_str(&self) -> &str { + &self.0 + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct StringIsEmptyError; + +impl FromStr for NonEmptyString { + type Err = StringIsEmptyError; + + fn from_str(s: &str) -> Result { + if s.is_empty() { + Err(StringIsEmptyError) + } else { + Ok(NonEmptyString(s.to_owned())) + } + } } diff --git a/hook-worker/src/main.rs b/hook-worker/src/main.rs index bf6b4fd..bfee9ad 100644 --- a/hook-worker/src/main.rs +++ b/hook-worker/src/main.rs @@ -29,12 +29,13 @@ async fn main() -> Result<(), WorkerError> { ) .maximum_interval(config.retry_policy.maximum_interval.0); - retry_policy_builder = match &config.retry_policy.retry_queue_name { - Some(retry_queue_name) => retry_policy_builder.queue(retry_queue_name), - _ => retry_policy_builder, + retry_policy_builder = if let Some(retry_queue_name) = &config.retry_policy.retry_queue_name { + retry_policy_builder.queue(retry_queue_name.as_str()) + } else { + retry_policy_builder }; - let queue = PgQueue::new(&config.queue_name, &config.database_url) + let queue = PgQueue::new(config.queue_name.as_str(), &config.database_url) .await .expect("failed to initialize queue"); From 499f1432169f0fd8872e16c250c48500c3d10914 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Thu, 18 Jan 2024 10:10:33 -0700 Subject: [PATCH 121/130] Log error rather than exiting process on dequeue error (#51) --- hook-worker/src/main.rs | 2 +- hook-worker/src/worker.rs | 35 ++++++++++++++++++++--------------- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/hook-worker/src/main.rs b/hook-worker/src/main.rs index bfee9ad..345fa3d 100644 --- a/hook-worker/src/main.rs +++ b/hook-worker/src/main.rs @@ -61,7 +61,7 @@ async fn main() -> Result<(), WorkerError> { .expect("failed to start serving metrics"); }); - worker.run(config.transactional).await?; + worker.run(config.transactional).await; Ok(()) } diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index 7fe6808..c526c3f 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -115,17 +115,20 @@ impl<'p> WebhookWorker<'p> { } /// Wait until a job becomes available in our queue. - async fn wait_for_job<'a>( - &self, - ) -> Result, WorkerError> { + async fn wait_for_job<'a>(&self) -> PgJob { let mut interval = tokio::time::interval(self.poll_interval); loop { interval.tick().await; self.liveness.report_healthy().await; - if let Some(job) = self.queue.dequeue(&self.name).await? { - return Ok(job); + match self.queue.dequeue(&self.name).await { + Ok(Some(job)) => return job, + Ok(None) => continue, + Err(error) => { + error!("error while trying to dequeue job: {}", error); + continue; + } } } } @@ -133,21 +136,26 @@ impl<'p> WebhookWorker<'p> { /// Wait until a job becomes available in our queue in transactional mode. async fn wait_for_job_tx<'a>( &self, - ) -> Result, WorkerError> { + ) -> PgTransactionJob<'a, WebhookJobParameters, WebhookJobMetadata> { let mut interval = tokio::time::interval(self.poll_interval); loop { interval.tick().await; self.liveness.report_healthy().await; - if let Some(job) = self.queue.dequeue_tx(&self.name).await? { - return Ok(job); + match self.queue.dequeue_tx(&self.name).await { + Ok(Some(job)) => return job, + Ok(None) => continue, + Err(error) => { + error!("error while trying to dequeue_tx job: {}", error); + continue; + } } } } /// Run this worker to continuously process any jobs that become available. - pub async fn run(&self, transactional: bool) -> Result<(), WorkerError> { + pub async fn run(&self, transactional: bool) { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); let report_semaphore_utilization = || { metrics::gauge!("webhook_worker_saturation_percent") @@ -157,7 +165,7 @@ impl<'p> WebhookWorker<'p> { if transactional { loop { report_semaphore_utilization(); - let webhook_job = self.wait_for_job_tx().await?; + let webhook_job = self.wait_for_job_tx().await; spawn_webhook_job_processing_task( self.client.clone(), semaphore.clone(), @@ -169,7 +177,7 @@ impl<'p> WebhookWorker<'p> { } else { loop { report_semaphore_utilization(); - let webhook_job = self.wait_for_job().await?; + let webhook_job = self.wait_for_job().await; spawn_webhook_job_processing_task( self.client.clone(), semaphore.clone(), @@ -542,10 +550,7 @@ mod tests { liveness, ); - let consumed_job = worker - .wait_for_job() - .await - .expect("failed to wait and read job"); + let consumed_job = worker.wait_for_job().await; assert_eq!(consumed_job.job.attempt, 1); assert!(consumed_job.job.attempted_by.contains(&worker_id)); From a4b9943d7f2e9ede94c6089658d4c1b869ca2579 Mon Sep 17 00:00:00 2001 From: Xavier Vello Date: Fri, 19 Jan 2024 12:28:03 +0100 Subject: [PATCH 122/130] add webhook_cleanup_last_success_timestamp metric for alerting (#59) --- hook-common/src/metrics.rs | 11 ++++++++++- hook-janitor/src/webhooks.rs | 3 +++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/hook-common/src/metrics.rs b/hook-common/src/metrics.rs index 0e1ef2d..66bcfc9 100644 --- a/hook-common/src/metrics.rs +++ b/hook-common/src/metrics.rs @@ -1,4 +1,4 @@ -use std::time::Instant; +use std::time::{Instant, SystemTime}; use axum::{ body::Body, extract::MatchedPath, http::Request, middleware::Next, response::IntoResponse, @@ -71,3 +71,12 @@ pub async fn track_metrics(req: Request, next: Next) -> impl IntoResponse response } + +/// Returns the number of seconds since the Unix epoch, to use in prom gauges. +/// Saturates to zero if the system time is set before epoch. +pub fn get_current_timestamp_seconds() -> f64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_secs() as f64 +} diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index bc01605..705e1b3 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -17,6 +17,7 @@ use crate::cleanup::Cleaner; use crate::kafka_producer::KafkaContext; use hook_common::kafka_messages::app_metrics::{AppMetric, AppMetricCategory}; +use hook_common::metrics::get_current_timestamp_seconds; #[derive(Error, Debug)] pub enum WebhookCleanerError { @@ -446,6 +447,8 @@ impl Cleaner for WebhookCleaner { match self.cleanup_impl().await { Ok(stats) => { metrics::counter!("webhook_cleanup_success",).increment(1); + metrics::gauge!("webhook_cleanup_last_success_timestamp",) + .set(get_current_timestamp_seconds()); if stats.rows_processed > 0 { let elapsed_time = start_time.elapsed().as_secs_f64(); From e2b5dcb3e02f80f261e2691ce6fff64edc7717bc Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Fri, 19 Jan 2024 05:28:51 -0700 Subject: [PATCH 123/130] Change created_at to scheduled_at for metrics (#58) --- hook-janitor/src/webhooks.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 705e1b3..9c33c5e 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -111,9 +111,9 @@ struct FailedRow { #[derive(sqlx::FromRow, Debug)] struct QueueDepth { - oldest_created_at_untried: DateTime, + oldest_scheduled_at_untried: DateTime, count_untried: i64, - oldest_created_at_retries: DateTime, + oldest_scheduled_at_retries: DateTime, count_retries: i64, } @@ -187,9 +187,9 @@ impl WebhookCleaner { let base_query = r#" SELECT - COALESCE(MIN(CASE WHEN attempt = 0 THEN created_at END), now()) AS oldest_created_at_untried, + COALESCE(MIN(CASE WHEN attempt = 0 THEN scheduled_at END), now()) AS oldest_scheduled_at_untried, COALESCE(SUM(CASE WHEN attempt = 0 THEN 1 ELSE 0 END), 0) AS count_untried, - COALESCE(MIN(CASE WHEN attempt > 0 THEN created_at END), now()) AS oldest_created_at_retries, + COALESCE(MIN(CASE WHEN attempt > 0 THEN scheduled_at END), now()) AS oldest_scheduled_at_retries, COALESCE(SUM(CASE WHEN attempt > 0 THEN 1 ELSE 0 END), 0) AS count_retries FROM job_queue WHERE status = 'available'; @@ -374,15 +374,16 @@ impl WebhookCleaner { // of rows in memory. It seems unlikely we'll need to paginate, but that can be added in the // future if necessary. + let untried_status = [("status", "untried")]; + let retries_status = [("status", "retries")]; + let queue_depth = self.get_queue_depth().await?; - metrics::gauge!("queue_depth_oldest_created_at_untried") - .set(queue_depth.oldest_created_at_untried.timestamp() as f64); - metrics::gauge!("queue_depth", &[("status", "untried")]) - .set(queue_depth.count_untried as f64); - metrics::gauge!("queue_depth_oldest_created_at_retries") - .set(queue_depth.oldest_created_at_retries.timestamp() as f64); - metrics::gauge!("queue_depth", &[("status", "retries")]) - .set(queue_depth.count_retries as f64); + metrics::gauge!("queue_depth_oldest_scheduled", &untried_status) + .set(queue_depth.oldest_scheduled_at_untried.timestamp() as f64); + metrics::gauge!("queue_depth", &untried_status).set(queue_depth.count_untried as f64); + metrics::gauge!("queue_depth_oldest_scheduled", &retries_status) + .set(queue_depth.oldest_scheduled_at_retries.timestamp() as f64); + metrics::gauge!("queue_depth", &retries_status).set(queue_depth.count_retries as f64); let mut tx = self.start_serializable_txn().await?; From 6729401db21f737c45bc672af12d57321b8e6b26 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 30 Jan 2024 09:40:34 -0700 Subject: [PATCH 124/130] =?UTF-8?q?Add=20very=20basic=20version=20of=20job?= =?UTF-8?q?=20unstuck-ing=20for=20non-txn=20jobs=20that=20hang=20=E2=80=A6?= =?UTF-8?q?=20(#57)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- hook-janitor/src/fixtures/webhook_cleanup.sql | 16 ++++++ hook-janitor/src/webhooks.rs | 55 ++++++++++++++++++- 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/hook-janitor/src/fixtures/webhook_cleanup.sql b/hook-janitor/src/fixtures/webhook_cleanup.sql index bddaf26..5dfa827 100644 --- a/hook-janitor/src/fixtures/webhook_cleanup.sql +++ b/hook-janitor/src/fixtures/webhook_cleanup.sql @@ -2,6 +2,7 @@ INSERT INTO job_queue ( errors, metadata, + attempted_at, last_attempt_finished_at, parameters, queue, @@ -14,6 +15,7 @@ VALUES NULL, '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'webhooks', 'completed', @@ -24,6 +26,7 @@ VALUES NULL, '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'webhooks', 'completed', @@ -34,6 +37,7 @@ VALUES NULL, '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 21:01:18.799371+00', + '2023-12-19 21:01:18.799371+00', '{}', 'webhooks', 'completed', @@ -44,6 +48,7 @@ VALUES NULL, '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3}', '2023-12-19 20:01:18.80335+00', + '2023-12-19 20:01:18.80335+00', '{}', 'webhooks', 'completed', @@ -54,6 +59,7 @@ VALUES NULL, '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'not-webhooks', 'completed', @@ -64,6 +70,7 @@ VALUES NULL, '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'webhooks', 'completed', @@ -74,6 +81,7 @@ VALUES ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'webhooks', 'failed', @@ -84,6 +92,7 @@ VALUES ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'webhooks', 'failed', @@ -94,6 +103,7 @@ VALUES ARRAY ['{"type":"ConnectionError","details":{"error":{"name":"Connection Error"}}}'::jsonb], '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'webhooks', 'failed', @@ -104,6 +114,7 @@ VALUES ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 21:01:18.799371+00', + '2023-12-19 21:01:18.799371+00', '{}', 'webhooks', 'failed', @@ -114,6 +125,7 @@ VALUES ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'webhooks', 'failed', @@ -124,6 +136,7 @@ VALUES ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'not-webhooks', 'failed', @@ -134,6 +147,7 @@ VALUES ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{}', 'webhooks', 'failed', @@ -144,6 +158,7 @@ VALUES NULL, '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', + '2023-12-19 20:01:18.799371+00', '{"body": "hello world", "headers": {}, "method": "POST", "url": "https://myhost/endpoint"}', 'webhooks', 'available', @@ -154,6 +169,7 @@ VALUES NULL, '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', + now() - '1 hour' :: interval, '{}', 'webhooks', 'running', diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 9c33c5e..ee8ff43 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -23,8 +23,12 @@ use hook_common::metrics::get_current_timestamp_seconds; pub enum WebhookCleanerError { #[error("failed to create postgres pool: {error}")] PoolCreationError { error: sqlx::Error }, + #[error("failed to acquire conn: {error}")] + AcquireConnError { error: sqlx::Error }, #[error("failed to acquire conn and start txn: {error}")] StartTxnError { error: sqlx::Error }, + #[error("failed to reschedule stuck jobs: {error}")] + RescheduleStuckJobsError { error: sqlx::Error }, #[error("failed to get queue depth: {error}")] GetQueueDepthError { error: sqlx::Error }, #[error("failed to get row count: {error}")] @@ -140,6 +144,7 @@ impl From for AppMetric { struct SerializableTxn<'a>(Transaction<'a, Postgres>); struct CleanupStats { + jobs_unstuck_count: u64, rows_processed: u64, completed_row_count: u64, completed_agg_row_count: u64, @@ -178,12 +183,51 @@ impl WebhookCleaner { }) } + async fn reschedule_stuck_jobs(&self) -> Result { + let mut conn = self + .pg_pool + .acquire() + .await + .map_err(|e| WebhookCleanerError::AcquireConnError { error: e })?; + + // The "non-transactional" worker runs the risk of crashing and leaving jobs permanently in + // the `running` state. This query will reschedule any jobs that have been in the running + // state for more than 2 minutes (which is *much* longer than we expect any Webhook job to + // take). + // + // We don't need to increment the `attempt` counter here because the worker already did that + // when it moved the job into `running`. + // + // If the previous worker was somehow stalled for 2 minutes and completes the task, that + // will mean we sent duplicate Webhooks. Success stats should not be affected, since both + // will update the same job row, which will only be processed once by the janitor. + + let base_query = r#" + UPDATE + job_queue + SET + status = 'available'::job_status, + last_attempt_finished_at = NOW(), + scheduled_at = NOW() + WHERE + status = 'running'::job_status + AND attempted_at < NOW() - INTERVAL '2 minutes' + "#; + + let result = sqlx::query(base_query) + .execute(&mut *conn) + .await + .map_err(|e| WebhookCleanerError::RescheduleStuckJobsError { error: e })?; + + Ok(result.rows_affected()) + } + async fn get_queue_depth(&self) -> Result { let mut conn = self .pg_pool .acquire() .await - .map_err(|e| WebhookCleanerError::StartTxnError { error: e })?; + .map_err(|e| WebhookCleanerError::AcquireConnError { error: e })?; let base_query = r#" SELECT @@ -377,6 +421,8 @@ impl WebhookCleaner { let untried_status = [("status", "untried")]; let retries_status = [("status", "retries")]; + let jobs_unstuck_count = self.reschedule_stuck_jobs().await?; + let queue_depth = self.get_queue_depth().await?; metrics::gauge!("queue_depth_oldest_scheduled", &untried_status) .set(queue_depth.oldest_scheduled_at_untried.timestamp() as f64); @@ -430,6 +476,7 @@ impl WebhookCleaner { } Ok(CleanupStats { + jobs_unstuck_count, rows_processed: rows_deleted, completed_row_count, completed_agg_row_count, @@ -450,6 +497,8 @@ impl Cleaner for WebhookCleaner { metrics::counter!("webhook_cleanup_success",).increment(1); metrics::gauge!("webhook_cleanup_last_success_timestamp",) .set(get_current_timestamp_seconds()); + metrics::counter!("webhook_cleanup_jobs_unstuck") + .increment(stats.jobs_unstuck_count); if stats.rows_processed > 0 { let elapsed_time = start_time.elapsed().as_secs_f64(); @@ -572,6 +621,9 @@ mod tests { .await .expect("webbook cleanup_impl failed"); + // The one 'running' job is transitioned to 'available'. + assert_eq!(cleanup_stats.jobs_unstuck_count, 1); + // Rows that are not 'completed' or 'failed' should not be processed. assert_eq!(cleanup_stats.rows_processed, 13); @@ -766,6 +818,7 @@ mod tests { .expect("webbook cleanup_impl failed"); // Reported metrics are all zeroes + assert_eq!(cleanup_stats.jobs_unstuck_count, 0); assert_eq!(cleanup_stats.rows_processed, 0); assert_eq!(cleanup_stats.completed_row_count, 0); assert_eq!(cleanup_stats.completed_agg_row_count, 0); From da6250b783b28a4570398997df9bfd2645f4cb30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Far=C3=ADas=20Santana?= Date: Fri, 2 Feb 2024 13:45:41 +0000 Subject: [PATCH 125/130] fix: Use a good index for dequeue (#61) --- migrations/20240202003133_better_dequeue_index.sql | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 migrations/20240202003133_better_dequeue_index.sql diff --git a/migrations/20240202003133_better_dequeue_index.sql b/migrations/20240202003133_better_dequeue_index.sql new file mode 100644 index 0000000..a619fb1 --- /dev/null +++ b/migrations/20240202003133_better_dequeue_index.sql @@ -0,0 +1,10 @@ +-- Dequeue is not hitting this index, so dropping is safe this time. +DROP INDEX idx_queue_scheduled_at; + +/* +Partial index used for dequeuing from job_queue. + +Dequeue only looks at available jobs so a partial index serves us well. +Moreover, dequeue sorts jobs by attempt and scheduled_at, which matches this index. +*/ +CREATE INDEX idx_queue_dequeue_partial ON job_queue(queue, attempt, scheduled_at) WHERE status = 'available' :: job_status; From 8559b127ef1d05d14185621c11afe1533ec6299b Mon Sep 17 00:00:00 2001 From: Xavier Vello Date: Mon, 5 Feb 2024 11:24:56 +0100 Subject: [PATCH 126/130] declare a PG application name visible in PG stats (#62) Co-authored-by: Brett Hoerner --- hook-api/src/config.rs | 3 +++ hook-api/src/main.rs | 2 ++ hook-common/src/pgqueue.rs | 16 ++++++++++++---- hook-janitor/src/webhooks.rs | 9 ++++++--- hook-worker/src/config.rs | 3 +++ hook-worker/src/main.rs | 11 ++++++++--- 6 files changed, 34 insertions(+), 10 deletions(-) diff --git a/hook-api/src/config.rs b/hook-api/src/config.rs index 3fe88b3..55fa404 100644 --- a/hook-api/src/config.rs +++ b/hook-api/src/config.rs @@ -13,6 +13,9 @@ pub struct Config { #[envconfig(default = "default")] pub queue_name: String, + + #[envconfig(default = "100")] + pub max_pg_connections: u32, } impl Config { diff --git a/hook-api/src/main.rs b/hook-api/src/main.rs index 4fbbdfb..9a9a9fd 100644 --- a/hook-api/src/main.rs +++ b/hook-api/src/main.rs @@ -28,6 +28,8 @@ async fn main() { // side, but we don't need more than one queue for now. &config.queue_name, &config.database_url, + config.max_pg_connections, + "hook-api", ) .await .expect("failed to initialize queue"); diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index fa2b5eb..4dab918 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -7,7 +7,7 @@ use std::time; use async_trait::async_trait; use chrono; use serde; -use sqlx::postgres::{PgPool, PgPoolOptions}; +use sqlx::postgres::{PgConnectOptions, PgPool, PgPoolOptions}; use thiserror::Error; /// Enumeration of errors for operations with PgQueue. @@ -524,11 +524,19 @@ impl PgQueue { /// /// * `queue_name`: A name for the queue we are going to initialize. /// * `url`: A URL pointing to where the PostgreSQL database is hosted. - pub async fn new(queue_name: &str, url: &str) -> PgQueueResult { + pub async fn new( + queue_name: &str, + url: &str, + max_connections: u32, + app_name: &'static str, + ) -> PgQueueResult { let name = queue_name.to_owned(); + let options = PgConnectOptions::from_str(url) + .map_err(|error| PgQueueError::PoolCreationError { error })? + .application_name(app_name); let pool = PgPoolOptions::new() - .connect_lazy(url) - .map_err(|error| PgQueueError::PoolCreationError { error })?; + .max_connections(max_connections) + .connect_lazy_with(options); Ok(Self { name, pool }) } diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index ee8ff43..5cdf431 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -1,3 +1,4 @@ +use std::str::FromStr; use std::time::{Duration, Instant}; use async_trait::async_trait; @@ -7,7 +8,7 @@ use hook_common::webhook::WebhookJobError; use rdkafka::error::KafkaError; use rdkafka::producer::{FutureProducer, FutureRecord}; use serde_json::error::Error as SerdeError; -use sqlx::postgres::{PgPool, PgPoolOptions, Postgres}; +use sqlx::postgres::{PgConnectOptions, PgPool, PgPoolOptions, Postgres}; use sqlx::types::{chrono, Uuid}; use sqlx::{Row, Transaction}; use thiserror::Error; @@ -158,10 +159,12 @@ impl WebhookCleaner { kafka_producer: FutureProducer, app_metrics_topic: String, ) -> Result { + let options = PgConnectOptions::from_str(database_url) + .map_err(|error| WebhookCleanerError::PoolCreationError { error })? + .application_name("hook-janitor"); let pg_pool = PgPoolOptions::new() .acquire_timeout(Duration::from_secs(10)) - .connect_lazy(database_url) - .map_err(|error| WebhookCleanerError::PoolCreationError { error })?; + .connect_lazy_with(options); Ok(Self { pg_pool, diff --git a/hook-worker/src/config.rs b/hook-worker/src/config.rs index 74342f7..477ff74 100644 --- a/hook-worker/src/config.rs +++ b/hook-worker/src/config.rs @@ -29,6 +29,9 @@ pub struct Config { #[envconfig(default = "1024")] pub max_concurrent_jobs: usize, + #[envconfig(default = "100")] + pub max_pg_connections: u32, + #[envconfig(nested = true)] pub retry_policy: RetryPolicyConfig, diff --git a/hook-worker/src/main.rs b/hook-worker/src/main.rs index 345fa3d..6cad3fd 100644 --- a/hook-worker/src/main.rs +++ b/hook-worker/src/main.rs @@ -35,9 +35,14 @@ async fn main() -> Result<(), WorkerError> { retry_policy_builder }; - let queue = PgQueue::new(config.queue_name.as_str(), &config.database_url) - .await - .expect("failed to initialize queue"); + let queue = PgQueue::new( + config.queue_name.as_str(), + &config.database_url, + config.max_pg_connections, + "hook-worker", + ) + .await + .expect("failed to initialize queue"); let worker = WebhookWorker::new( &config.worker_name, From 615c61d9f617028ae6ab1fd3f34d40536f886363 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Mon, 5 Feb 2024 10:31:56 -0700 Subject: [PATCH 127/130] Add proper e2e histrogram based on metadata created_at (#64) --- Cargo.lock | 1 + hook-api/Cargo.toml | 1 + hook-api/src/handlers/webhook.rs | 11 ++++--- hook-common/src/kafka_messages/mod.rs | 9 ++++-- hook-common/src/webhook.rs | 7 +++++ hook-janitor/src/fixtures/webhook_cleanup.sql | 30 +++++++++---------- hook-janitor/src/webhooks.rs | 2 ++ hook-worker/src/worker.rs | 8 +++++ 8 files changed, 47 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17b608c..836810e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -768,6 +768,7 @@ name = "hook-api" version = "0.1.0" dependencies = [ "axum", + "chrono", "envconfig", "eyre", "hook-common", diff --git a/hook-api/Cargo.toml b/hook-api/Cargo.toml index 96c897c..5e3530e 100644 --- a/hook-api/Cargo.toml +++ b/hook-api/Cargo.toml @@ -9,6 +9,7 @@ edition = "2021" axum = { workspace = true } envconfig = { workspace = true } eyre = { workspace = true } +chrono = { workspace = true } hook-common = { path = "../hook-common" } http-body-util = { workspace = true } metrics = { workspace = true } diff --git a/hook-api/src/handlers/webhook.rs b/hook-api/src/handlers/webhook.rs index 16ebc6d..3712aa2 100644 --- a/hook-api/src/handlers/webhook.rs +++ b/hook-api/src/handlers/webhook.rs @@ -1,13 +1,12 @@ use std::time::Instant; use axum::{extract::State, http::StatusCode, Json}; -use hook_common::webhook::{WebhookJobMetadata, WebhookJobParameters}; -use serde_derive::Deserialize; -use url::Url; - use hook_common::pgqueue::{NewJob, PgQueue}; +use hook_common::webhook::{WebhookJobMetadata, WebhookJobParameters}; use serde::Serialize; +use serde_derive::Deserialize; use tracing::{debug, error}; +use url::Url; const MAX_BODY_SIZE: usize = 1_000_000; @@ -116,6 +115,7 @@ mod tests { http::{self, Request, StatusCode}, Router, }; + use chrono::Utc; use hook_common::pgqueue::PgQueue; use hook_common::webhook::{HttpMethod, WebhookJobParameters}; use http_body_util::BodyExt; @@ -153,6 +153,7 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, + created_at: Utc::now(), }, max_attempts: 1, }) @@ -195,6 +196,7 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, + created_at: Utc::now(), }, max_attempts: 1, }) @@ -283,6 +285,7 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, + created_at: Utc::now(), }, max_attempts: 1, }) diff --git a/hook-common/src/kafka_messages/mod.rs b/hook-common/src/kafka_messages/mod.rs index f548563..a1de9d5 100644 --- a/hook-common/src/kafka_messages/mod.rs +++ b/hook-common/src/kafka_messages/mod.rs @@ -16,9 +16,12 @@ where D: Deserializer<'de>, { let formatted: String = Deserialize::deserialize(deserializer)?; - let datetime = match NaiveDateTime::parse_from_str(&formatted, "%Y-%m-%d %H:%M:%S") { - Ok(d) => d.and_utc(), - Err(_) => return Err(serde::de::Error::custom("Invalid datetime format")), + let datetime = match DateTime::parse_from_rfc3339(&formatted) { + Ok(d) => d.with_timezone(&Utc), + Err(_) => match NaiveDateTime::parse_from_str(&formatted, "%Y-%m-%d %H:%M:%S") { + Ok(d) => d.and_utc(), + Err(_) => return Err(serde::de::Error::custom("Invalid datetime format")), + }, }; Ok(datetime) diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index 11e0285..4122c20 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -3,9 +3,11 @@ use std::convert::From; use std::fmt; use std::str::FromStr; +use chrono::{DateTime, Utc}; use serde::{de::Visitor, Deserialize, Serialize}; use crate::kafka_messages::app_metrics; +use crate::kafka_messages::{deserialize_datetime, serialize_datetime}; use crate::pgqueue::PgQueueError; /// Supported HTTP methods for webhooks. @@ -135,6 +137,11 @@ pub struct WebhookJobMetadata { pub team_id: u32, pub plugin_id: i32, pub plugin_config_id: i32, + #[serde( + serialize_with = "serialize_datetime", + deserialize_with = "deserialize_datetime" + )] + pub created_at: DateTime, } /// An error originating during a Webhook Job invocation. diff --git a/hook-janitor/src/fixtures/webhook_cleanup.sql b/hook-janitor/src/fixtures/webhook_cleanup.sql index 5dfa827..5f2f6c1 100644 --- a/hook-janitor/src/fixtures/webhook_cleanup.sql +++ b/hook-janitor/src/fixtures/webhook_cleanup.sql @@ -13,7 +13,7 @@ VALUES -- team:1, plugin_config:2, completed in hour 20 ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -24,7 +24,7 @@ VALUES -- team:1, plugin_config:2, completed in hour 20 (purposeful duplicate) ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -35,7 +35,7 @@ VALUES -- team:1, plugin_config:2, completed in hour 21 (different hour) ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 21:01:18.799371+00', '2023-12-19 21:01:18.799371+00', '{}', @@ -46,7 +46,7 @@ VALUES -- team:1, plugin_config:3, completed in hour 20 (different plugin_config) ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.80335+00', '2023-12-19 20:01:18.80335+00', '{}', @@ -57,7 +57,7 @@ VALUES -- team:1, plugin_config:2, completed but in a different queue ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -68,7 +68,7 @@ VALUES -- team:2, plugin_config:4, completed in hour 20 (different team) ( NULL, - '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4}', + '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -79,7 +79,7 @@ VALUES -- team:1, plugin_config:2, failed in hour 20 ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -90,7 +90,7 @@ VALUES -- team:1, plugin_config:2, failed in hour 20 (purposeful duplicate) ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -101,7 +101,7 @@ VALUES -- team:1, plugin_config:2, failed in hour 20 (different error) ( ARRAY ['{"type":"ConnectionError","details":{"error":{"name":"Connection Error"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -112,7 +112,7 @@ VALUES -- team:1, plugin_config:2, failed in hour 21 (different hour) ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 21:01:18.799371+00', '2023-12-19 21:01:18.799371+00', '{}', @@ -123,7 +123,7 @@ VALUES -- team:1, plugin_config:3, failed in hour 20 (different plugin_config) ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -134,7 +134,7 @@ VALUES -- team:1, plugin_config:2, failed but in a different queue ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -145,7 +145,7 @@ VALUES -- team:2, plugin_config:4, failed in hour 20 (purposeful duplicate) ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4}', + '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -156,7 +156,7 @@ VALUES -- team:1, plugin_config:2, available ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{"body": "hello world", "headers": {}, "method": "POST", "url": "https://myhost/endpoint"}', @@ -167,7 +167,7 @@ VALUES -- team:1, plugin_config:2, running ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', '2023-12-19 20:01:18.799371+00', now() - '1 hour' :: interval, '{}', diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 5cdf431..c1390a7 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -892,6 +892,7 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, + created_at: Utc::now(), }; let new_job = NewJob::new(1, job_metadata, job_parameters, &"target"); queue.enqueue(new_job).await.expect("failed to enqueue job"); @@ -918,6 +919,7 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, + created_at: Utc::now(), }; let new_job = NewJob::new(1, job_metadata, job_parameters, &"target"); queue.enqueue(new_job).await.expect("failed to enqueue job"); diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index c526c3f..14edea8 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -2,6 +2,7 @@ use std::collections; use std::sync::Arc; use std::time; +use chrono::Utc; use hook_common::health::HealthHandle; use hook_common::{ pgqueue::{Job, PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, @@ -264,6 +265,10 @@ async fn process_webhook_job( match send_result { Ok(_) => { + let end_to_end_duration = Utc::now() - webhook_job.metadata().created_at; + metrics::histogram!("webhook_jobs_end_to_end_duration_seconds", &labels) + .record((end_to_end_duration.num_milliseconds() as f64) / 1_000_f64); + webhook_job .complete() .await @@ -450,6 +455,8 @@ mod tests { // This is due to a long-standing cargo bug that reports imports and helper functions as unused. // See: https://github.com/rust-lang/rust/issues/46379. #[allow(unused_imports)] + use chrono::Utc; + #[allow(unused_imports)] use hook_common::health::HealthRegistry; #[allow(unused_imports)] use hook_common::pgqueue::{JobStatus, NewJob}; @@ -523,6 +530,7 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, + created_at: Utc::now(), }; let registry = HealthRegistry::new("liveness"); let liveness = registry From 304852cf2b92626e382a43af5076ca4e9a225729 Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Mon, 5 Feb 2024 11:48:49 -0700 Subject: [PATCH 128/130] Revert "Add proper e2e histrogram based on metadata created_at (#64)" (#66) --- Cargo.lock | 1 - hook-api/Cargo.toml | 1 - hook-api/src/handlers/webhook.rs | 11 +++---- hook-common/src/kafka_messages/mod.rs | 9 ++---- hook-common/src/webhook.rs | 7 ----- hook-janitor/src/fixtures/webhook_cleanup.sql | 30 +++++++++---------- hook-janitor/src/webhooks.rs | 2 -- hook-worker/src/worker.rs | 8 ----- 8 files changed, 22 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 836810e..17b608c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -768,7 +768,6 @@ name = "hook-api" version = "0.1.0" dependencies = [ "axum", - "chrono", "envconfig", "eyre", "hook-common", diff --git a/hook-api/Cargo.toml b/hook-api/Cargo.toml index 5e3530e..96c897c 100644 --- a/hook-api/Cargo.toml +++ b/hook-api/Cargo.toml @@ -9,7 +9,6 @@ edition = "2021" axum = { workspace = true } envconfig = { workspace = true } eyre = { workspace = true } -chrono = { workspace = true } hook-common = { path = "../hook-common" } http-body-util = { workspace = true } metrics = { workspace = true } diff --git a/hook-api/src/handlers/webhook.rs b/hook-api/src/handlers/webhook.rs index 3712aa2..16ebc6d 100644 --- a/hook-api/src/handlers/webhook.rs +++ b/hook-api/src/handlers/webhook.rs @@ -1,13 +1,14 @@ use std::time::Instant; use axum::{extract::State, http::StatusCode, Json}; -use hook_common::pgqueue::{NewJob, PgQueue}; use hook_common::webhook::{WebhookJobMetadata, WebhookJobParameters}; -use serde::Serialize; use serde_derive::Deserialize; -use tracing::{debug, error}; use url::Url; +use hook_common::pgqueue::{NewJob, PgQueue}; +use serde::Serialize; +use tracing::{debug, error}; + const MAX_BODY_SIZE: usize = 1_000_000; #[derive(Serialize, Deserialize)] @@ -115,7 +116,6 @@ mod tests { http::{self, Request, StatusCode}, Router, }; - use chrono::Utc; use hook_common::pgqueue::PgQueue; use hook_common::webhook::{HttpMethod, WebhookJobParameters}; use http_body_util::BodyExt; @@ -153,7 +153,6 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, - created_at: Utc::now(), }, max_attempts: 1, }) @@ -196,7 +195,6 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, - created_at: Utc::now(), }, max_attempts: 1, }) @@ -285,7 +283,6 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, - created_at: Utc::now(), }, max_attempts: 1, }) diff --git a/hook-common/src/kafka_messages/mod.rs b/hook-common/src/kafka_messages/mod.rs index a1de9d5..f548563 100644 --- a/hook-common/src/kafka_messages/mod.rs +++ b/hook-common/src/kafka_messages/mod.rs @@ -16,12 +16,9 @@ where D: Deserializer<'de>, { let formatted: String = Deserialize::deserialize(deserializer)?; - let datetime = match DateTime::parse_from_rfc3339(&formatted) { - Ok(d) => d.with_timezone(&Utc), - Err(_) => match NaiveDateTime::parse_from_str(&formatted, "%Y-%m-%d %H:%M:%S") { - Ok(d) => d.and_utc(), - Err(_) => return Err(serde::de::Error::custom("Invalid datetime format")), - }, + let datetime = match NaiveDateTime::parse_from_str(&formatted, "%Y-%m-%d %H:%M:%S") { + Ok(d) => d.and_utc(), + Err(_) => return Err(serde::de::Error::custom("Invalid datetime format")), }; Ok(datetime) diff --git a/hook-common/src/webhook.rs b/hook-common/src/webhook.rs index 4122c20..11e0285 100644 --- a/hook-common/src/webhook.rs +++ b/hook-common/src/webhook.rs @@ -3,11 +3,9 @@ use std::convert::From; use std::fmt; use std::str::FromStr; -use chrono::{DateTime, Utc}; use serde::{de::Visitor, Deserialize, Serialize}; use crate::kafka_messages::app_metrics; -use crate::kafka_messages::{deserialize_datetime, serialize_datetime}; use crate::pgqueue::PgQueueError; /// Supported HTTP methods for webhooks. @@ -137,11 +135,6 @@ pub struct WebhookJobMetadata { pub team_id: u32, pub plugin_id: i32, pub plugin_config_id: i32, - #[serde( - serialize_with = "serialize_datetime", - deserialize_with = "deserialize_datetime" - )] - pub created_at: DateTime, } /// An error originating during a Webhook Job invocation. diff --git a/hook-janitor/src/fixtures/webhook_cleanup.sql b/hook-janitor/src/fixtures/webhook_cleanup.sql index 5f2f6c1..5dfa827 100644 --- a/hook-janitor/src/fixtures/webhook_cleanup.sql +++ b/hook-janitor/src/fixtures/webhook_cleanup.sql @@ -13,7 +13,7 @@ VALUES -- team:1, plugin_config:2, completed in hour 20 ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -24,7 +24,7 @@ VALUES -- team:1, plugin_config:2, completed in hour 20 (purposeful duplicate) ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -35,7 +35,7 @@ VALUES -- team:1, plugin_config:2, completed in hour 21 (different hour) ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 21:01:18.799371+00', '2023-12-19 21:01:18.799371+00', '{}', @@ -46,7 +46,7 @@ VALUES -- team:1, plugin_config:3, completed in hour 20 (different plugin_config) ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3}', '2023-12-19 20:01:18.80335+00', '2023-12-19 20:01:18.80335+00', '{}', @@ -57,7 +57,7 @@ VALUES -- team:1, plugin_config:2, completed but in a different queue ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -68,7 +68,7 @@ VALUES -- team:2, plugin_config:4, completed in hour 20 (different team) ( NULL, - '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -79,7 +79,7 @@ VALUES -- team:1, plugin_config:2, failed in hour 20 ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -90,7 +90,7 @@ VALUES -- team:1, plugin_config:2, failed in hour 20 (purposeful duplicate) ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -101,7 +101,7 @@ VALUES -- team:1, plugin_config:2, failed in hour 20 (different error) ( ARRAY ['{"type":"ConnectionError","details":{"error":{"name":"Connection Error"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -112,7 +112,7 @@ VALUES -- team:1, plugin_config:2, failed in hour 21 (different hour) ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 21:01:18.799371+00', '2023-12-19 21:01:18.799371+00', '{}', @@ -123,7 +123,7 @@ VALUES -- team:1, plugin_config:3, failed in hour 20 (different plugin_config) ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 3}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -134,7 +134,7 @@ VALUES -- team:1, plugin_config:2, failed but in a different queue ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -145,7 +145,7 @@ VALUES -- team:2, plugin_config:4, failed in hour 20 (purposeful duplicate) ( ARRAY ['{"type":"TimeoutError","details":{"error":{"name":"Timeout"}}}'::jsonb], - '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 2, "plugin_id": 99, "plugin_config_id": 4}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{}', @@ -156,7 +156,7 @@ VALUES -- team:1, plugin_config:2, available ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', '2023-12-19 20:01:18.799371+00', '{"body": "hello world", "headers": {}, "method": "POST", "url": "https://myhost/endpoint"}', @@ -167,7 +167,7 @@ VALUES -- team:1, plugin_config:2, running ( NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2, "created_at": "2023-02-05T16:35:06.650Z"}', + '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', '2023-12-19 20:01:18.799371+00', now() - '1 hour' :: interval, '{}', diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index c1390a7..5cdf431 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -892,7 +892,6 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, - created_at: Utc::now(), }; let new_job = NewJob::new(1, job_metadata, job_parameters, &"target"); queue.enqueue(new_job).await.expect("failed to enqueue job"); @@ -919,7 +918,6 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, - created_at: Utc::now(), }; let new_job = NewJob::new(1, job_metadata, job_parameters, &"target"); queue.enqueue(new_job).await.expect("failed to enqueue job"); diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index 14edea8..c526c3f 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -2,7 +2,6 @@ use std::collections; use std::sync::Arc; use std::time; -use chrono::Utc; use hook_common::health::HealthHandle; use hook_common::{ pgqueue::{Job, PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, @@ -265,10 +264,6 @@ async fn process_webhook_job( match send_result { Ok(_) => { - let end_to_end_duration = Utc::now() - webhook_job.metadata().created_at; - metrics::histogram!("webhook_jobs_end_to_end_duration_seconds", &labels) - .record((end_to_end_duration.num_milliseconds() as f64) / 1_000_f64); - webhook_job .complete() .await @@ -455,8 +450,6 @@ mod tests { // This is due to a long-standing cargo bug that reports imports and helper functions as unused. // See: https://github.com/rust-lang/rust/issues/46379. #[allow(unused_imports)] - use chrono::Utc; - #[allow(unused_imports)] use hook_common::health::HealthRegistry; #[allow(unused_imports)] use hook_common::pgqueue::{JobStatus, NewJob}; @@ -530,7 +523,6 @@ mod tests { team_id: 1, plugin_id: 2, plugin_config_id: 3, - created_at: Utc::now(), }; let registry = HealthRegistry::new("liveness"); let liveness = registry From 54bf761a35333d2ffb915922d2abf52bdac89f3f Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 6 Feb 2024 08:27:34 -0700 Subject: [PATCH 129/130] Dequeue multiple items at a time (#60) --- hook-common/src/pgqueue.rs | 428 ++++++++++++++++++++++++++--------- hook-janitor/src/webhooks.rs | 14 +- hook-worker/src/config.rs | 3 + hook-worker/src/main.rs | 1 + hook-worker/src/worker.rs | 175 +++++++++----- 5 files changed, 453 insertions(+), 168 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index 4dab918..af91fbd 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -1,14 +1,17 @@ //! # PgQueue //! //! A job queue implementation backed by a PostgreSQL table. -use std::str::FromStr; use std::time; +use std::{str::FromStr, sync::Arc}; use async_trait::async_trait; use chrono; use serde; +use sqlx::postgres::any::AnyConnectionBackend; use sqlx::postgres::{PgConnectOptions, PgPool, PgPoolOptions}; use thiserror::Error; +use tokio::sync::Mutex; +use tracing::error; /// Enumeration of errors for operations with PgQueue. /// Errors that can originate from sqlx and are wrapped by us to provide additional context. @@ -24,16 +27,22 @@ pub enum PgQueueError { ParseJobStatusError(String), #[error("{0} is not a valid HttpMethod")] ParseHttpMethodError(String), + #[error("transaction was already closed")] + TransactionAlreadyClosedError, } #[derive(Error, Debug)] pub enum PgJobError { #[error("retry is an invalid state for this PgJob: {error}")] RetryInvalidError { job: T, error: String }, + #[error("connection failed with: {error}")] + ConnectionError { error: sqlx::Error }, #[error("{command} query failed with: {error}")] QueryError { command: String, error: sqlx::Error }, #[error("transaction {command} failed with: {error}")] TransactionError { command: String, error: sqlx::Error }, + #[error("transaction was already closed")] + TransactionAlreadyClosedError, } /// Enumeration of possible statuses for a Job. @@ -217,20 +226,39 @@ pub trait PgQueueJob { #[derive(Debug)] pub struct PgJob { pub job: Job, - pub connection: sqlx::pool::PoolConnection, + pub pool: PgPool, +} + +// Container struct for a batch of PgJobs. +pub struct PgBatch { + pub jobs: Vec>, +} + +impl PgJob { + async fn acquire_conn( + &mut self, + ) -> Result, PgJobError>>> + { + self.pool + .acquire() + .await + .map_err(|error| PgJobError::ConnectionError { error }) + } } #[async_trait] impl PgQueueJob for PgJob { async fn complete(mut self) -> Result>>> { - let completed_job = self - .job - .complete(&mut *self.connection) - .await - .map_err(|error| PgJobError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; + let mut connection = self.acquire_conn().await?; + + let completed_job = + self.job + .complete(&mut *connection) + .await + .map_err(|error| PgJobError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; Ok(completed_job) } @@ -239,9 +267,11 @@ impl PgQueueJob for PgJob { mut self, error: E, ) -> Result, PgJobError>>> { + let mut connection = self.acquire_conn().await?; + let failed_job = self .job - .fail(error, &mut *self.connection) + .fail(error, &mut *connection) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -264,11 +294,13 @@ impl PgQueueJob for PgJob { }); } + let mut connection = self.acquire_conn().await?; + let retried_job = self .job .retryable() .queue(queue) - .retry(error, retry_interval, &mut *self.connection) + .retry(error, retry_interval, &mut *connection) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), @@ -284,7 +316,39 @@ impl PgQueueJob for PgJob { #[derive(Debug)] pub struct PgTransactionJob<'c, J, M> { pub job: Job, - pub transaction: sqlx::Transaction<'c, sqlx::postgres::Postgres>, + + /// The open transaction this job came from. If multiple jobs were queried at once, then this + /// transaction will be shared between them (across async tasks and threads as necessary). See + /// below for more information. + shared_txn: Arc>>>, +} + +// Container struct for a batch of PgTransactionJob. Includes a reference to the shared transaction +// for committing the work when all of the jobs are finished. +pub struct PgTransactionBatch<'c, J, M> { + pub jobs: Vec>, + + /// The open transaction the jobs in the Vec came from. This should be used to commit or + /// rollback when all of the work is finished. + shared_txn: Arc>>>, +} + +impl<'c, J, M> PgTransactionBatch<'_, J, M> { + pub async fn commit(self) -> PgQueueResult<()> { + let mut txn_guard = self.shared_txn.lock().await; + + txn_guard + .as_deref_mut() + .ok_or(PgQueueError::TransactionAlreadyClosedError)? + .commit() + .await + .map_err(|e| PgQueueError::QueryError { + command: "COMMIT".to_owned(), + error: e, + })?; + + Ok(()) + } } #[async_trait] @@ -292,22 +356,20 @@ impl<'c, J: std::marker::Send, M: std::marker::Send> PgQueueJob for PgTransactio async fn complete( mut self, ) -> Result>>> { - let completed_job = self - .job - .complete(&mut *self.transaction) - .await - .map_err(|error| PgJobError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; + let mut txn_guard = self.shared_txn.lock().await; - self.transaction - .commit() - .await - .map_err(|error| PgJobError::TransactionError { - command: "COMMIT".to_owned(), - error, - })?; + let txn_ref = txn_guard + .as_deref_mut() + .ok_or(PgJobError::TransactionAlreadyClosedError)?; + + let completed_job = + self.job + .complete(txn_ref) + .await + .map_err(|error| PgJobError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; Ok(completed_job) } @@ -316,22 +378,20 @@ impl<'c, J: std::marker::Send, M: std::marker::Send> PgQueueJob for PgTransactio mut self, error: S, ) -> Result, PgJobError>>> { - let failed_job = self - .job - .fail(error, &mut *self.transaction) - .await - .map_err(|error| PgJobError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; + let mut txn_guard = self.shared_txn.lock().await; - self.transaction - .commit() - .await - .map_err(|error| PgJobError::TransactionError { - command: "COMMIT".to_owned(), - error, - })?; + let txn_ref = txn_guard + .as_deref_mut() + .ok_or(PgJobError::TransactionAlreadyClosedError)?; + + let failed_job = + self.job + .fail(error, txn_ref) + .await + .map_err(|error| PgJobError::QueryError { + command: "UPDATE".to_owned(), + error, + })?; Ok(failed_job) } @@ -351,25 +411,23 @@ impl<'c, J: std::marker::Send, M: std::marker::Send> PgQueueJob for PgTransactio }); } + let mut txn_guard = self.shared_txn.lock().await; + + let txn_ref = txn_guard + .as_deref_mut() + .ok_or(PgJobError::TransactionAlreadyClosedError)?; + let retried_job = self .job .retryable() .queue(queue) - .retry(error, retry_interval, &mut *self.transaction) + .retry(error, retry_interval, txn_ref) .await .map_err(|error| PgJobError::QueryError { command: "UPDATE".to_owned(), error, })?; - self.transaction - .commit() - .await - .map_err(|error| PgJobError::TransactionError { - command: "COMMIT".to_owned(), - error, - })?; - Ok(retried_job) } } @@ -553,15 +611,16 @@ impl PgQueue { Ok(Self { name, pool }) } - /// Dequeue a `Job` from this `PgQueue`. - /// The `Job` will be updated to `'running'` status, so any other `dequeue` calls will skip it. + /// Dequeue up to `limit` `Job`s from this `PgQueue`. + /// The `Job`s will be updated to `'running'` status, so any other `dequeue` calls will skip it. pub async fn dequeue< J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, M: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, >( &self, attempted_by: &str, - ) -> PgQueueResult>> { + limit: u32, + ) -> PgQueueResult>> { let mut connection = self .pool .acquire() @@ -583,7 +642,7 @@ WITH available_in_queue AS ( ORDER BY attempt, scheduled_at - LIMIT 1 + LIMIT $2 FOR UPDATE SKIP LOCKED ) UPDATE @@ -592,7 +651,7 @@ SET attempted_at = NOW(), status = 'running'::job_status, attempt = attempt + 1, - attempted_by = array_append(attempted_by, $2::text) + attempted_by = array_append(attempted_by, $3::text) FROM available_in_queue WHERE @@ -601,14 +660,29 @@ RETURNING job_queue.* "#; - let query_result: Result, sqlx::Error> = sqlx::query_as(base_query) + let query_result: Result>, sqlx::Error> = sqlx::query_as(base_query) .bind(&self.name) + .bind(limit as i64) .bind(attempted_by) - .fetch_one(&mut *connection) + .fetch_all(&mut *connection) .await; match query_result { - Ok(job) => Ok(Some(PgJob { job, connection })), + Ok(jobs) => { + if jobs.is_empty() { + return Ok(None); + } + + let pg_jobs: Vec> = jobs + .into_iter() + .map(|job| PgJob { + job, + pool: self.pool.clone(), + }) + .collect(); + + Ok(Some(PgBatch { jobs: pg_jobs })) + } // Although connection would be closed once it goes out of scope, sqlx recommends explicitly calling close(). // See: https://docs.rs/sqlx/latest/sqlx/postgres/any/trait.AnyConnectionBackend.html#tymethod.close. @@ -616,6 +690,7 @@ RETURNING let _ = connection.close().await; Ok(None) } + Err(e) => { let _ = connection.close().await; Err(PgQueueError::QueryError { @@ -626,9 +701,10 @@ RETURNING } } - /// Dequeue a `Job` from this `PgQueue` and hold the transaction. - /// Any other `dequeue_tx` calls will skip rows locked, so by holding a transaction we ensure only one worker can dequeue a job. - /// Holding a transaction open can have performance implications, but it means no `'running'` state is required. + /// Dequeue up to `limit` `Job`s from this `PgQueue` and hold the transaction. Any other + /// `dequeue_tx` calls will skip rows locked, so by holding a transaction we ensure only one + /// worker can dequeue a job. Holding a transaction open can have performance implications, but + /// it means no `'running'` state is required. pub async fn dequeue_tx< 'a, J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, @@ -636,7 +712,8 @@ RETURNING >( &self, attempted_by: &str, - ) -> PgQueueResult>> { + limit: u32, + ) -> PgQueueResult>> { let mut tx = self .pool .begin() @@ -658,7 +735,7 @@ WITH available_in_queue AS ( ORDER BY attempt, scheduled_at - LIMIT 1 + LIMIT $2 FOR UPDATE SKIP LOCKED ) UPDATE @@ -667,7 +744,7 @@ SET attempted_at = NOW(), status = 'running'::job_status, attempt = attempt + 1, - attempted_by = array_append(attempted_by, $2::text) + attempted_by = array_append(attempted_by, $3::text) FROM available_in_queue WHERE @@ -676,20 +753,38 @@ RETURNING job_queue.* "#; - let query_result: Result, sqlx::Error> = sqlx::query_as(base_query) + let query_result: Result>, sqlx::Error> = sqlx::query_as(base_query) .bind(&self.name) + .bind(limit as i64) .bind(attempted_by) - .fetch_one(&mut *tx) + .fetch_all(&mut *tx) .await; match query_result { - Ok(job) => Ok(Some(PgTransactionJob { - job, - transaction: tx, - })), + Ok(jobs) => { + if jobs.is_empty() { + return Ok(None); + } + + let shared_txn = Arc::new(Mutex::new(Some(tx))); + + let pg_jobs: Vec> = jobs + .into_iter() + .map(|job| PgTransactionJob { + job, + shared_txn: shared_txn.clone(), + }) + .collect(); + + Ok(Some(PgTransactionBatch { + jobs: pg_jobs, + shared_txn: shared_txn.clone(), + })) + } - // Transaction is rolledback on drop. + // Transaction is rolled back on drop. Err(sqlx::Error::RowNotFound) => Ok(None), + Err(e) => Err(PgQueueError::QueryError { command: "UPDATE".to_owned(), error: e, @@ -736,7 +831,7 @@ mod tests { use super::*; use crate::retry::RetryPolicy; - #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] + #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug, Clone)] struct JobMetadata { team_id: u32, plugin_config_id: i32, @@ -753,7 +848,7 @@ mod tests { } } - #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] + #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug, Clone)] struct JobParameters { method: String, body: String, @@ -795,10 +890,13 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); let pg_job: PgJob = queue - .dequeue(&worker_id) + .dequeue(&worker_id, 1) .await - .expect("failed to dequeue job") - .expect("didn't find a job to dequeue"); + .expect("failed to dequeue jobs") + .expect("didn't find any jobs to dequeue") + .jobs + .pop() + .unwrap(); assert_eq!(pg_job.job.attempt, 1); assert!(pg_job.job.attempted_by.contains(&worker_id)); @@ -816,12 +914,62 @@ mod tests { .await .expect("failed to connect to local test postgresql database"); - let pg_job: Option> = queue - .dequeue(&worker_id) + let pg_jobs: Option> = queue + .dequeue(&worker_id, 1) .await - .expect("failed to dequeue job"); + .expect("failed to dequeue jobs"); + + assert!(pg_jobs.is_none()); + } + + #[sqlx::test(migrations = "../migrations")] + async fn test_can_dequeue_multiple_jobs(db: PgPool) { + let job_target = job_target(); + let job_metadata = JobMetadata::default(); + let job_parameters = JobParameters::default(); + let worker_id = worker_id(); - assert!(pg_job.is_none()); + let queue = PgQueue::new_from_pool("test_can_dequeue_multiple_jobs", db) + .await + .expect("failed to connect to local test postgresql database"); + + for _ in 0..5 { + queue + .enqueue(NewJob::new( + 1, + job_metadata.clone(), + job_parameters.clone(), + &job_target, + )) + .await + .expect("failed to enqueue job"); + } + + // Only get 4 jobs, leaving one in the queue. + let limit = 4; + let batch: PgBatch = queue + .dequeue(&worker_id, limit) + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + + // Complete those 4. + assert_eq!(batch.jobs.len(), limit as usize); + for job in batch.jobs { + job.complete().await.expect("failed to complete job"); + } + + // Try to get up to 4 jobs, but only 1 remains. + let batch: PgBatch = queue + .dequeue(&worker_id, limit) + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + + assert_eq!(batch.jobs.len(), 1); // Only one job should have been left in the queue. + for job in batch.jobs { + job.complete().await.expect("failed to complete job"); + } } #[sqlx::test(migrations = "../migrations")] @@ -830,19 +978,21 @@ mod tests { let job_metadata = JobMetadata::default(); let job_parameters = JobParameters::default(); let worker_id = worker_id(); - let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); let queue = PgQueue::new_from_pool("test_can_dequeue_tx_job", db) .await .expect("failed to connect to local test postgresql database"); + let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); queue.enqueue(new_job).await.expect("failed to enqueue job"); - let tx_job: PgTransactionJob<'_, JobParameters, JobMetadata> = queue - .dequeue_tx(&worker_id) + let mut batch: PgTransactionBatch<'_, JobParameters, JobMetadata> = queue + .dequeue_tx(&worker_id, 1) .await - .expect("failed to dequeue job") - .expect("didn't find a job to dequeue"); + .expect("failed to dequeue jobs") + .expect("didn't find any jobs to dequeue"); + + let tx_job = batch.jobs.pop().unwrap(); assert_eq!(tx_job.job.attempt, 1); assert!(tx_job.job.attempted_by.contains(&worker_id)); @@ -852,6 +1002,65 @@ mod tests { assert_eq!(*tx_job.job.parameters.as_ref(), JobParameters::default()); assert_eq!(tx_job.job.status, JobStatus::Running); assert_eq!(tx_job.job.target, job_target); + + // Transactional jobs must be completed, failed or retried before being dropped. This is + // to prevent logic bugs when using the shared txn. + tx_job.complete().await.expect("failed to complete job"); + + batch.commit().await.expect("failed to commit transaction"); + } + + #[sqlx::test(migrations = "../migrations")] + async fn test_can_dequeue_multiple_tx_jobs(db: PgPool) { + let job_target = job_target(); + let job_metadata = JobMetadata::default(); + let job_parameters = JobParameters::default(); + let worker_id = worker_id(); + + let queue = PgQueue::new_from_pool("test_can_dequeue_multiple_tx_jobs", db) + .await + .expect("failed to connect to local test postgresql database"); + + for _ in 0..5 { + queue + .enqueue(NewJob::new( + 1, + job_metadata.clone(), + job_parameters.clone(), + &job_target, + )) + .await + .expect("failed to enqueue job"); + } + + // Only get 4 jobs, leaving one in the queue. + let limit = 4; + let mut batch: PgTransactionBatch<'_, JobParameters, JobMetadata> = queue + .dequeue_tx(&worker_id, limit) + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + + assert_eq!(batch.jobs.len(), limit as usize); + + // Complete those 4 and commit. + for job in std::mem::take(&mut batch.jobs) { + job.complete().await.expect("failed to complete job"); + } + batch.commit().await.expect("failed to commit transaction"); + + // Try to get up to 4 jobs, but only 1 remains. + let mut batch: PgTransactionBatch<'_, JobParameters, JobMetadata> = queue + .dequeue_tx(&worker_id, limit) + .await + .expect("failed to dequeue job") + .expect("didn't find a job to dequeue"); + assert_eq!(batch.jobs.len(), 1); // Only one job should have been left in the queue. + + for job in std::mem::take(&mut batch.jobs) { + job.complete().await.expect("failed to complete job"); + } + batch.commit().await.expect("failed to commit transaction"); } #[sqlx::test(migrations = "../migrations")] @@ -861,12 +1070,12 @@ mod tests { .await .expect("failed to connect to local test postgresql database"); - let tx_job: Option> = queue - .dequeue_tx(&worker_id) + let batch: Option> = queue + .dequeue_tx(&worker_id, 1) .await .expect("failed to dequeue job"); - assert!(tx_job.is_none()); + assert!(batch.is_none()); } #[sqlx::test(migrations = "../migrations")] @@ -888,10 +1097,13 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); let job: PgJob = queue - .dequeue(&worker_id) + .dequeue(&worker_id, 1) .await .expect("failed to dequeue job") - .expect("didn't find a job to dequeue"); + .expect("didn't find a job to dequeue") + .jobs + .pop() + .unwrap(); let retry_interval = retry_policy.retry_interval(job.job.attempt as u32, None); let retry_queue = retry_policy.retry_queue(&job.job.queue).to_owned(); @@ -905,10 +1117,13 @@ mod tests { .expect("failed to retry job"); let retried_job: PgJob = queue - .dequeue(&worker_id) + .dequeue(&worker_id, 1) .await .expect("failed to dequeue job") - .expect("didn't find retried job to dequeue"); + .expect("didn't find retried job to dequeue") + .jobs + .pop() + .unwrap(); assert_eq!(retried_job.job.attempt, 2); assert!(retried_job.job.attempted_by.contains(&worker_id)); @@ -942,10 +1157,13 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); let job: PgJob = queue - .dequeue(&worker_id) + .dequeue(&worker_id, 1) .await .expect("failed to dequeue job") - .expect("didn't find a job to dequeue"); + .expect("didn't find a job to dequeue") + .jobs + .pop() + .unwrap(); let retry_interval = retry_policy.retry_interval(job.job.attempt as u32, None); let retry_queue = retry_policy.retry_queue(&job.job.queue).to_owned(); @@ -958,8 +1176,8 @@ mod tests { .await .expect("failed to retry job"); - let retried_job_not_found: Option> = queue - .dequeue(&worker_id) + let retried_job_not_found: Option> = queue + .dequeue(&worker_id, 1) .await .expect("failed to dequeue job"); @@ -970,10 +1188,13 @@ mod tests { .expect("failed to connect to retry queue in local test postgresql database"); let retried_job: PgJob = queue - .dequeue(&worker_id) + .dequeue(&worker_id, 1) .await .expect("failed to dequeue job") - .expect("job not found in retry queue"); + .expect("job not found in retry queue") + .jobs + .pop() + .unwrap(); assert_eq!(retried_job.job.attempt, 2); assert!(retried_job.job.attempted_by.contains(&worker_id)); @@ -1004,10 +1225,13 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); let job: PgJob = queue - .dequeue(&worker_id) + .dequeue(&worker_id, 1) .await .expect("failed to dequeue job") - .expect("didn't find a job to dequeue"); + .expect("didn't find a job to dequeue") + .jobs + .pop() + .unwrap(); let retry_interval = retry_policy.retry_interval(job.job.attempt as u32, None); diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index 5cdf431..e3b137c 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -870,10 +870,13 @@ mod tests { { // The fixtures include an available job, so let's complete it while the txn is open. let webhook_job: PgJob = queue - .dequeue(&"worker_id") + .dequeue(&"worker_id", 1) .await .expect("failed to dequeue job") - .expect("didn't find a job to dequeue"); + .expect("didn't find a job to dequeue") + .jobs + .pop() + .unwrap(); webhook_job .complete() .await @@ -896,10 +899,13 @@ mod tests { let new_job = NewJob::new(1, job_metadata, job_parameters, &"target"); queue.enqueue(new_job).await.expect("failed to enqueue job"); let webhook_job: PgJob = queue - .dequeue(&"worker_id") + .dequeue(&"worker_id", 1) .await .expect("failed to dequeue job") - .expect("didn't find a job to dequeue"); + .expect("didn't find a job to dequeue") + .jobs + .pop() + .unwrap(); webhook_job .complete() .await diff --git a/hook-worker/src/config.rs b/hook-worker/src/config.rs index 477ff74..32e49f7 100644 --- a/hook-worker/src/config.rs +++ b/hook-worker/src/config.rs @@ -37,6 +37,9 @@ pub struct Config { #[envconfig(default = "true")] pub transactional: bool, + + #[envconfig(default = "1")] + pub dequeue_batch_size: u32, } impl Config { diff --git a/hook-worker/src/main.rs b/hook-worker/src/main.rs index 6cad3fd..fede7d2 100644 --- a/hook-worker/src/main.rs +++ b/hook-worker/src/main.rs @@ -47,6 +47,7 @@ async fn main() -> Result<(), WorkerError> { let worker = WebhookWorker::new( &config.worker_name, &queue, + config.dequeue_batch_size, config.poll_interval.0, config.request_timeout.0, config.max_concurrent_jobs, diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index c526c3f..437a1d3 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -2,7 +2,9 @@ use std::collections; use std::sync::Arc; use std::time; +use futures::future::join_all; use hook_common::health::HealthHandle; +use hook_common::pgqueue::{PgBatch, PgTransactionBatch}; use hook_common::{ pgqueue::{Job, PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, retry::RetryPolicy, @@ -68,6 +70,8 @@ pub struct WebhookWorker<'p> { name: String, /// The queue we will be dequeuing jobs from. queue: &'p PgQueue, + /// The maximum number of jobs to dequeue in one query. + dequeue_batch_size: u32, /// The interval for polling the queue. poll_interval: time::Duration, /// The client used for HTTP requests. @@ -81,9 +85,11 @@ pub struct WebhookWorker<'p> { } impl<'p> WebhookWorker<'p> { + #[allow(clippy::too_many_arguments)] pub fn new( name: &str, queue: &'p PgQueue, + dequeue_batch_size: u32, poll_interval: time::Duration, request_timeout: time::Duration, max_concurrent_jobs: usize, @@ -106,6 +112,7 @@ impl<'p> WebhookWorker<'p> { Self { name: name.to_owned(), queue, + dequeue_batch_size, poll_interval, client, max_concurrent_jobs, @@ -114,16 +121,20 @@ impl<'p> WebhookWorker<'p> { } } - /// Wait until a job becomes available in our queue. - async fn wait_for_job<'a>(&self) -> PgJob { + /// Wait until at least one job becomes available in our queue. + async fn wait_for_jobs<'a>(&self) -> PgBatch { let mut interval = tokio::time::interval(self.poll_interval); loop { interval.tick().await; self.liveness.report_healthy().await; - match self.queue.dequeue(&self.name).await { - Ok(Some(job)) => return job, + match self + .queue + .dequeue(&self.name, self.dequeue_batch_size) + .await + { + Ok(Some(batch)) => return batch, Ok(None) => continue, Err(error) => { error!("error while trying to dequeue job: {}", error); @@ -133,18 +144,22 @@ impl<'p> WebhookWorker<'p> { } } - /// Wait until a job becomes available in our queue in transactional mode. - async fn wait_for_job_tx<'a>( + /// Wait until at least one job becomes available in our queue in transactional mode. + async fn wait_for_jobs_tx<'a>( &self, - ) -> PgTransactionJob<'a, WebhookJobParameters, WebhookJobMetadata> { + ) -> PgTransactionBatch<'a, WebhookJobParameters, WebhookJobMetadata> { let mut interval = tokio::time::interval(self.poll_interval); loop { interval.tick().await; self.liveness.report_healthy().await; - match self.queue.dequeue_tx(&self.name).await { - Ok(Some(job)) => return job, + match self + .queue + .dequeue_tx(&self.name, self.dequeue_batch_size) + .await + { + Ok(Some(batch)) => return batch, Ok(None) => continue, Err(error) => { error!("error while trying to dequeue_tx job: {}", error); @@ -162,70 +177,104 @@ impl<'p> WebhookWorker<'p> { .set(1f64 - semaphore.available_permits() as f64 / self.max_concurrent_jobs as f64); }; + let dequeue_batch_size_histogram = metrics::histogram!("webhook_dequeue_batch_size"); + if transactional { loop { report_semaphore_utilization(); - let webhook_job = self.wait_for_job_tx().await; - spawn_webhook_job_processing_task( - self.client.clone(), - semaphore.clone(), - self.retry_policy.clone(), - webhook_job, - ) - .await; + // TODO: We could grab semaphore permits here using something like: + // `min(semaphore.available_permits(), dequeue_batch_size)` + // And then dequeue only up to that many jobs. We'd then need to hand back the + // difference in permits based on how many jobs were dequeued. + let mut batch = self.wait_for_jobs_tx().await; + dequeue_batch_size_histogram.record(batch.jobs.len() as f64); + + // Get enough permits for the jobs before spawning a task. + let permits = semaphore + .clone() + .acquire_many_owned(batch.jobs.len() as u32) + .await + .expect("semaphore has been closed"); + + let client = self.client.clone(); + let retry_policy = self.retry_policy.clone(); + + tokio::spawn(async move { + let mut futures = Vec::new(); + + // We have to `take` the Vec of jobs from the batch to avoid a borrow checker + // error below when we commit. + for job in std::mem::take(&mut batch.jobs) { + let client = client.clone(); + let retry_policy = retry_policy.clone(); + + let future = + async move { process_webhook_job(client, job, &retry_policy).await }; + + futures.push(future); + } + + let results = join_all(futures).await; + for result in results { + if let Err(e) = result { + error!("error processing webhook job: {}", e); + } + } + + let _ = batch.commit().await.map_err(|e| { + error!("error committing transactional batch: {}", e); + }); + + drop(permits); + }); } } else { loop { report_semaphore_utilization(); - let webhook_job = self.wait_for_job().await; - spawn_webhook_job_processing_task( - self.client.clone(), - semaphore.clone(), - self.retry_policy.clone(), - webhook_job, - ) - .await; + // TODO: We could grab semaphore permits here using something like: + // `min(semaphore.available_permits(), dequeue_batch_size)` + // And then dequeue only up to that many jobs. We'd then need to hand back the + // difference in permits based on how many jobs were dequeued. + let batch = self.wait_for_jobs().await; + dequeue_batch_size_histogram.record(batch.jobs.len() as f64); + + // Get enough permits for the jobs before spawning a task. + let permits = semaphore + .clone() + .acquire_many_owned(batch.jobs.len() as u32) + .await + .expect("semaphore has been closed"); + + let client = self.client.clone(); + let retry_policy = self.retry_policy.clone(); + + tokio::spawn(async move { + let mut futures = Vec::new(); + + for job in batch.jobs { + let client = client.clone(); + let retry_policy = retry_policy.clone(); + + let future = + async move { process_webhook_job(client, job, &retry_policy).await }; + + futures.push(future); + } + + let results = join_all(futures).await; + for result in results { + if let Err(e) = result { + error!("error processing webhook job: {}", e); + } + } + + drop(permits); + }); } } } } -/// Spawn a Tokio task to process a Webhook Job once we successfully acquire a permit. -/// -/// # Arguments -/// -/// * `client`: An HTTP client to execute the webhook job request. -/// * `semaphore`: A semaphore used for rate limiting purposes. This function will panic if this semaphore is closed. -/// * `retry_policy`: The retry policy used to set retry parameters if a job fails and has remaining attempts. -/// * `webhook_job`: The webhook job to process as dequeued from `hook_common::pgqueue::PgQueue`. -async fn spawn_webhook_job_processing_task( - client: reqwest::Client, - semaphore: Arc, - retry_policy: RetryPolicy, - webhook_job: W, -) -> tokio::task::JoinHandle> { - let permit = semaphore - .acquire_owned() - .await - .expect("semaphore has been closed"); - - let labels = [("queue", webhook_job.queue())]; - - metrics::counter!("webhook_jobs_total", &labels).increment(1); - - tokio::spawn(async move { - let result = process_webhook_job(client, webhook_job, &retry_policy).await; - drop(permit); - match result { - Ok(_) => Ok(()), - Err(error) => { - error!("failed to process webhook job: {}", error); - Err(error) - } - } - }) -} - /// Process a webhook job by transitioning it to its appropriate state after its request is sent. /// After we finish, the webhook job will be set as completed (if the request was successful), retryable (if the request /// was unsuccessful but we can still attempt a retry), or failed (if the request was unsuccessful and no more retries @@ -248,6 +297,7 @@ async fn process_webhook_job( let parameters = webhook_job.parameters(); let labels = [("queue", webhook_job.queue())]; + metrics::counter!("webhook_jobs_total", &labels).increment(1); let now = tokio::time::Instant::now(); @@ -543,6 +593,7 @@ mod tests { let worker = WebhookWorker::new( &worker_id, &queue, + 1, time::Duration::from_millis(100), time::Duration::from_millis(5000), 10, @@ -550,7 +601,7 @@ mod tests { liveness, ); - let consumed_job = worker.wait_for_job().await; + let consumed_job = worker.wait_for_jobs().await.jobs.pop().unwrap(); assert_eq!(consumed_job.job.attempt, 1); assert!(consumed_job.job.attempted_by.contains(&worker_id)); From 26672aeb2c8d114e3f82c41916e935a3d753ee8a Mon Sep 17 00:00:00 2001 From: Brett Hoerner Date: Tue, 6 Feb 2024 08:36:10 -0700 Subject: [PATCH 130/130] Remove non-transactional mode (#65) --- hook-common/src/pgqueue.rs | 321 +----------------- hook-janitor/src/fixtures/webhook_cleanup.sql | 11 - hook-janitor/src/webhooks.rs | 78 +---- hook-worker/src/config.rs | 3 - hook-worker/src/main.rs | 2 +- hook-worker/src/worker.rs | 173 +++------- 6 files changed, 77 insertions(+), 511 deletions(-) diff --git a/hook-common/src/pgqueue.rs b/hook-common/src/pgqueue.rs index af91fbd..4a8b489 100644 --- a/hook-common/src/pgqueue.rs +++ b/hook-common/src/pgqueue.rs @@ -60,8 +60,6 @@ pub enum JobStatus { Discarded, /// A job that was unsuccessfully completed by a worker. Failed, - /// A job that was picked up by a worker and it's currentlly being run. - Running, } /// Allow casting JobStatus from strings. @@ -73,7 +71,6 @@ impl FromStr for JobStatus { "available" => Ok(JobStatus::Available), "completed" => Ok(JobStatus::Completed), "failed" => Ok(JobStatus::Failed), - "running" => Ok(JobStatus::Running), invalid => Err(PgQueueError::ParseJobStatusError(invalid.to_owned())), } } @@ -222,95 +219,6 @@ pub trait PgQueueJob { ) -> Result>>; } -/// A Job that can be updated in PostgreSQL. -#[derive(Debug)] -pub struct PgJob { - pub job: Job, - pub pool: PgPool, -} - -// Container struct for a batch of PgJobs. -pub struct PgBatch { - pub jobs: Vec>, -} - -impl PgJob { - async fn acquire_conn( - &mut self, - ) -> Result, PgJobError>>> - { - self.pool - .acquire() - .await - .map_err(|error| PgJobError::ConnectionError { error }) - } -} - -#[async_trait] -impl PgQueueJob for PgJob { - async fn complete(mut self) -> Result>>> { - let mut connection = self.acquire_conn().await?; - - let completed_job = - self.job - .complete(&mut *connection) - .await - .map_err(|error| PgJobError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; - - Ok(completed_job) - } - - async fn fail( - mut self, - error: E, - ) -> Result, PgJobError>>> { - let mut connection = self.acquire_conn().await?; - - let failed_job = self - .job - .fail(error, &mut *connection) - .await - .map_err(|error| PgJobError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; - - Ok(failed_job) - } - - async fn retry( - mut self, - error: E, - retry_interval: time::Duration, - queue: &str, - ) -> Result>>> { - if self.job.is_gte_max_attempts() { - return Err(PgJobError::RetryInvalidError { - job: Box::new(self), - error: "Maximum attempts reached".to_owned(), - }); - } - - let mut connection = self.acquire_conn().await?; - - let retried_job = self - .job - .retryable() - .queue(queue) - .retry(error, retry_interval, &mut *connection) - .await - .map_err(|error| PgJobError::QueryError { - command: "UPDATE".to_owned(), - error, - })?; - - Ok(retried_job) - } -} - /// A Job within an open PostgreSQL transaction. /// This implementation allows 'hiding' the job from any other workers running SKIP LOCKED queries. #[derive(Debug)] @@ -611,96 +519,6 @@ impl PgQueue { Ok(Self { name, pool }) } - /// Dequeue up to `limit` `Job`s from this `PgQueue`. - /// The `Job`s will be updated to `'running'` status, so any other `dequeue` calls will skip it. - pub async fn dequeue< - J: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, - M: for<'d> serde::Deserialize<'d> + std::marker::Send + std::marker::Unpin + 'static, - >( - &self, - attempted_by: &str, - limit: u32, - ) -> PgQueueResult>> { - let mut connection = self - .pool - .acquire() - .await - .map_err(|error| PgQueueError::ConnectionError { error })?; - - // The query that follows uses a FOR UPDATE SKIP LOCKED clause. - // For more details on this see: 2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5. - let base_query = r#" -WITH available_in_queue AS ( - SELECT - id - FROM - job_queue - WHERE - status = 'available' - AND scheduled_at <= NOW() - AND queue = $1 - ORDER BY - attempt, - scheduled_at - LIMIT $2 - FOR UPDATE SKIP LOCKED -) -UPDATE - job_queue -SET - attempted_at = NOW(), - status = 'running'::job_status, - attempt = attempt + 1, - attempted_by = array_append(attempted_by, $3::text) -FROM - available_in_queue -WHERE - job_queue.id = available_in_queue.id -RETURNING - job_queue.* - "#; - - let query_result: Result>, sqlx::Error> = sqlx::query_as(base_query) - .bind(&self.name) - .bind(limit as i64) - .bind(attempted_by) - .fetch_all(&mut *connection) - .await; - - match query_result { - Ok(jobs) => { - if jobs.is_empty() { - return Ok(None); - } - - let pg_jobs: Vec> = jobs - .into_iter() - .map(|job| PgJob { - job, - pool: self.pool.clone(), - }) - .collect(); - - Ok(Some(PgBatch { jobs: pg_jobs })) - } - - // Although connection would be closed once it goes out of scope, sqlx recommends explicitly calling close(). - // See: https://docs.rs/sqlx/latest/sqlx/postgres/any/trait.AnyConnectionBackend.html#tymethod.close. - Err(sqlx::Error::RowNotFound) => { - let _ = connection.close().await; - Ok(None) - } - - Err(e) => { - let _ = connection.close().await; - Err(PgQueueError::QueryError { - command: "UPDATE".to_owned(), - error: e, - }) - } - } - } - /// Dequeue up to `limit` `Job`s from this `PgQueue` and hold the transaction. Any other /// `dequeue_tx` calls will skip rows locked, so by holding a transaction we ensure only one /// worker can dequeue a job. Holding a transaction open can have performance implications, but @@ -742,7 +560,6 @@ UPDATE job_queue SET attempted_at = NOW(), - status = 'running'::job_status, attempt = attempt + 1, attempted_by = array_append(attempted_by, $3::text) FROM @@ -875,103 +692,6 @@ mod tests { "https://myhost/endpoint".to_owned() } - #[sqlx::test(migrations = "../migrations")] - async fn test_can_dequeue_job(db: PgPool) { - let job_target = job_target(); - let job_parameters = JobParameters::default(); - let job_metadata = JobMetadata::default(); - let worker_id = worker_id(); - let new_job = NewJob::new(1, job_metadata, job_parameters, &job_target); - - let queue = PgQueue::new_from_pool("test_can_dequeue_job", db) - .await - .expect("failed to connect to local test postgresql database"); - - queue.enqueue(new_job).await.expect("failed to enqueue job"); - - let pg_job: PgJob = queue - .dequeue(&worker_id, 1) - .await - .expect("failed to dequeue jobs") - .expect("didn't find any jobs to dequeue") - .jobs - .pop() - .unwrap(); - - assert_eq!(pg_job.job.attempt, 1); - assert!(pg_job.job.attempted_by.contains(&worker_id)); - assert_eq!(pg_job.job.attempted_by.len(), 1); - assert_eq!(pg_job.job.max_attempts, 1); - assert_eq!(*pg_job.job.parameters.as_ref(), JobParameters::default()); - assert_eq!(pg_job.job.status, JobStatus::Running); - assert_eq!(pg_job.job.target, job_target); - } - - #[sqlx::test(migrations = "../migrations")] - async fn test_dequeue_returns_none_on_no_jobs(db: PgPool) { - let worker_id = worker_id(); - let queue = PgQueue::new_from_pool("test_dequeue_returns_none_on_no_jobs", db) - .await - .expect("failed to connect to local test postgresql database"); - - let pg_jobs: Option> = queue - .dequeue(&worker_id, 1) - .await - .expect("failed to dequeue jobs"); - - assert!(pg_jobs.is_none()); - } - - #[sqlx::test(migrations = "../migrations")] - async fn test_can_dequeue_multiple_jobs(db: PgPool) { - let job_target = job_target(); - let job_metadata = JobMetadata::default(); - let job_parameters = JobParameters::default(); - let worker_id = worker_id(); - - let queue = PgQueue::new_from_pool("test_can_dequeue_multiple_jobs", db) - .await - .expect("failed to connect to local test postgresql database"); - - for _ in 0..5 { - queue - .enqueue(NewJob::new( - 1, - job_metadata.clone(), - job_parameters.clone(), - &job_target, - )) - .await - .expect("failed to enqueue job"); - } - - // Only get 4 jobs, leaving one in the queue. - let limit = 4; - let batch: PgBatch = queue - .dequeue(&worker_id, limit) - .await - .expect("failed to dequeue job") - .expect("didn't find a job to dequeue"); - - // Complete those 4. - assert_eq!(batch.jobs.len(), limit as usize); - for job in batch.jobs { - job.complete().await.expect("failed to complete job"); - } - - // Try to get up to 4 jobs, but only 1 remains. - let batch: PgBatch = queue - .dequeue(&worker_id, limit) - .await - .expect("failed to dequeue job") - .expect("didn't find a job to dequeue"); - - assert_eq!(batch.jobs.len(), 1); // Only one job should have been left in the queue. - for job in batch.jobs { - job.complete().await.expect("failed to complete job"); - } - } - #[sqlx::test(migrations = "../migrations")] async fn test_can_dequeue_tx_job(db: PgPool) { let job_target = job_target(); @@ -1000,7 +720,6 @@ mod tests { assert_eq!(tx_job.job.max_attempts, 1); assert_eq!(*tx_job.job.metadata.as_ref(), JobMetadata::default()); assert_eq!(*tx_job.job.parameters.as_ref(), JobParameters::default()); - assert_eq!(tx_job.job.status, JobStatus::Running); assert_eq!(tx_job.job.target, job_target); // Transactional jobs must be completed, failed or retried before being dropped. This is @@ -1096,14 +815,12 @@ mod tests { .expect("failed to connect to local test postgresql database"); queue.enqueue(new_job).await.expect("failed to enqueue job"); - let job: PgJob = queue - .dequeue(&worker_id, 1) + let mut batch: PgTransactionBatch<'_, JobParameters, JobMetadata> = queue + .dequeue_tx(&worker_id, 1) .await .expect("failed to dequeue job") - .expect("didn't find a job to dequeue") - .jobs - .pop() - .unwrap(); + .expect("didn't find a job to dequeue"); + let job = batch.jobs.pop().unwrap(); let retry_interval = retry_policy.retry_interval(job.job.attempt as u32, None); let retry_queue = retry_policy.retry_queue(&job.job.queue).to_owned(); @@ -1115,9 +832,10 @@ mod tests { ) .await .expect("failed to retry job"); + batch.commit().await.expect("failed to commit transaction"); - let retried_job: PgJob = queue - .dequeue(&worker_id, 1) + let retried_job: PgTransactionJob = queue + .dequeue_tx(&worker_id, 1) .await .expect("failed to dequeue job") .expect("didn't find retried job to dequeue") @@ -1133,7 +851,6 @@ mod tests { *retried_job.job.parameters.as_ref(), JobParameters::default() ); - assert_eq!(retried_job.job.status, JobStatus::Running); assert_eq!(retried_job.job.target, job_target); } @@ -1156,14 +873,12 @@ mod tests { .expect("failed to connect to queue in local test postgresql database"); queue.enqueue(new_job).await.expect("failed to enqueue job"); - let job: PgJob = queue - .dequeue(&worker_id, 1) + let mut batch: PgTransactionBatch = queue + .dequeue_tx(&worker_id, 1) .await .expect("failed to dequeue job") - .expect("didn't find a job to dequeue") - .jobs - .pop() - .unwrap(); + .expect("didn't find a job to dequeue"); + let job = batch.jobs.pop().unwrap(); let retry_interval = retry_policy.retry_interval(job.job.attempt as u32, None); let retry_queue = retry_policy.retry_queue(&job.job.queue).to_owned(); @@ -1175,9 +890,10 @@ mod tests { ) .await .expect("failed to retry job"); + batch.commit().await.expect("failed to commit transaction"); - let retried_job_not_found: Option> = queue - .dequeue(&worker_id, 1) + let retried_job_not_found: Option> = queue + .dequeue_tx(&worker_id, 1) .await .expect("failed to dequeue job"); @@ -1187,8 +903,8 @@ mod tests { .await .expect("failed to connect to retry queue in local test postgresql database"); - let retried_job: PgJob = queue - .dequeue(&worker_id, 1) + let retried_job: PgTransactionJob = queue + .dequeue_tx(&worker_id, 1) .await .expect("failed to dequeue job") .expect("job not found in retry queue") @@ -1204,7 +920,6 @@ mod tests { *retried_job.job.parameters.as_ref(), JobParameters::default() ); - assert_eq!(retried_job.job.status, JobStatus::Running); assert_eq!(retried_job.job.target, job_target); } @@ -1224,8 +939,8 @@ mod tests { queue.enqueue(new_job).await.expect("failed to enqueue job"); - let job: PgJob = queue - .dequeue(&worker_id, 1) + let job: PgTransactionJob = queue + .dequeue_tx(&worker_id, 1) .await .expect("failed to dequeue job") .expect("didn't find a job to dequeue") diff --git a/hook-janitor/src/fixtures/webhook_cleanup.sql b/hook-janitor/src/fixtures/webhook_cleanup.sql index 5dfa827..e0b9a7a 100644 --- a/hook-janitor/src/fixtures/webhook_cleanup.sql +++ b/hook-janitor/src/fixtures/webhook_cleanup.sql @@ -163,15 +163,4 @@ VALUES 'webhooks', 'available', 'https://myhost/endpoint' - ), - -- team:1, plugin_config:2, running - ( - NULL, - '{"team_id": 1, "plugin_id": 99, "plugin_config_id": 2}', - '2023-12-19 20:01:18.799371+00', - now() - '1 hour' :: interval, - '{}', - 'webhooks', - 'running', - 'https://myhost/endpoint' ); \ No newline at end of file diff --git a/hook-janitor/src/webhooks.rs b/hook-janitor/src/webhooks.rs index e3b137c..7f7fadd 100644 --- a/hook-janitor/src/webhooks.rs +++ b/hook-janitor/src/webhooks.rs @@ -28,8 +28,6 @@ pub enum WebhookCleanerError { AcquireConnError { error: sqlx::Error }, #[error("failed to acquire conn and start txn: {error}")] StartTxnError { error: sqlx::Error }, - #[error("failed to reschedule stuck jobs: {error}")] - RescheduleStuckJobsError { error: sqlx::Error }, #[error("failed to get queue depth: {error}")] GetQueueDepthError { error: sqlx::Error }, #[error("failed to get row count: {error}")] @@ -145,7 +143,6 @@ impl From for AppMetric { struct SerializableTxn<'a>(Transaction<'a, Postgres>); struct CleanupStats { - jobs_unstuck_count: u64, rows_processed: u64, completed_row_count: u64, completed_agg_row_count: u64, @@ -186,45 +183,6 @@ impl WebhookCleaner { }) } - async fn reschedule_stuck_jobs(&self) -> Result { - let mut conn = self - .pg_pool - .acquire() - .await - .map_err(|e| WebhookCleanerError::AcquireConnError { error: e })?; - - // The "non-transactional" worker runs the risk of crashing and leaving jobs permanently in - // the `running` state. This query will reschedule any jobs that have been in the running - // state for more than 2 minutes (which is *much* longer than we expect any Webhook job to - // take). - // - // We don't need to increment the `attempt` counter here because the worker already did that - // when it moved the job into `running`. - // - // If the previous worker was somehow stalled for 2 minutes and completes the task, that - // will mean we sent duplicate Webhooks. Success stats should not be affected, since both - // will update the same job row, which will only be processed once by the janitor. - - let base_query = r#" - UPDATE - job_queue - SET - status = 'available'::job_status, - last_attempt_finished_at = NOW(), - scheduled_at = NOW() - WHERE - status = 'running'::job_status - AND attempted_at < NOW() - INTERVAL '2 minutes' - "#; - - let result = sqlx::query(base_query) - .execute(&mut *conn) - .await - .map_err(|e| WebhookCleanerError::RescheduleStuckJobsError { error: e })?; - - Ok(result.rows_affected()) - } - async fn get_queue_depth(&self) -> Result { let mut conn = self .pg_pool @@ -424,8 +382,6 @@ impl WebhookCleaner { let untried_status = [("status", "untried")]; let retries_status = [("status", "retries")]; - let jobs_unstuck_count = self.reschedule_stuck_jobs().await?; - let queue_depth = self.get_queue_depth().await?; metrics::gauge!("queue_depth_oldest_scheduled", &untried_status) .set(queue_depth.oldest_scheduled_at_untried.timestamp() as f64); @@ -479,7 +435,6 @@ impl WebhookCleaner { } Ok(CleanupStats { - jobs_unstuck_count, rows_processed: rows_deleted, completed_row_count, completed_agg_row_count, @@ -500,8 +455,6 @@ impl Cleaner for WebhookCleaner { metrics::counter!("webhook_cleanup_success",).increment(1); metrics::gauge!("webhook_cleanup_last_success_timestamp",) .set(get_current_timestamp_seconds()); - metrics::counter!("webhook_cleanup_jobs_unstuck") - .increment(stats.jobs_unstuck_count); if stats.rows_processed > 0 { let elapsed_time = start_time.elapsed().as_secs_f64(); @@ -546,7 +499,8 @@ mod tests { use hook_common::kafka_messages::app_metrics::{ Error as WebhookError, ErrorDetails, ErrorType, }; - use hook_common::pgqueue::{NewJob, PgJob, PgQueue, PgQueueJob}; + use hook_common::pgqueue::PgQueueJob; + use hook_common::pgqueue::{NewJob, PgQueue, PgTransactionBatch}; use hook_common::webhook::{HttpMethod, WebhookJobMetadata, WebhookJobParameters}; use rdkafka::consumer::{Consumer, StreamConsumer}; use rdkafka::mocking::MockCluster; @@ -624,9 +578,6 @@ mod tests { .await .expect("webbook cleanup_impl failed"); - // The one 'running' job is transitioned to 'available'. - assert_eq!(cleanup_stats.jobs_unstuck_count, 1); - // Rows that are not 'completed' or 'failed' should not be processed. assert_eq!(cleanup_stats.rows_processed, 13); @@ -821,7 +772,6 @@ mod tests { .expect("webbook cleanup_impl failed"); // Reported metrics are all zeroes - assert_eq!(cleanup_stats.jobs_unstuck_count, 0); assert_eq!(cleanup_stats.rows_processed, 0); assert_eq!(cleanup_stats.completed_row_count, 0); assert_eq!(cleanup_stats.completed_agg_row_count, 0); @@ -865,22 +815,20 @@ mod tests { assert_eq!(get_count_from_new_conn(&db, "completed").await, 6); assert_eq!(get_count_from_new_conn(&db, "failed").await, 7); assert_eq!(get_count_from_new_conn(&db, "available").await, 1); - assert_eq!(get_count_from_new_conn(&db, "running").await, 1); { // The fixtures include an available job, so let's complete it while the txn is open. - let webhook_job: PgJob = queue - .dequeue(&"worker_id", 1) + let mut batch: PgTransactionBatch<'_, WebhookJobParameters, WebhookJobMetadata> = queue + .dequeue_tx(&"worker_id", 1) .await .expect("failed to dequeue job") - .expect("didn't find a job to dequeue") - .jobs - .pop() - .unwrap(); + .expect("didn't find a job to dequeue"); + let webhook_job = batch.jobs.pop().unwrap(); webhook_job .complete() .await .expect("failed to complete job"); + batch.commit().await.expect("failed to commit batch"); } { @@ -898,18 +846,17 @@ mod tests { }; let new_job = NewJob::new(1, job_metadata, job_parameters, &"target"); queue.enqueue(new_job).await.expect("failed to enqueue job"); - let webhook_job: PgJob = queue - .dequeue(&"worker_id", 1) + let mut batch: PgTransactionBatch<'_, WebhookJobParameters, WebhookJobMetadata> = queue + .dequeue_tx(&"worker_id", 1) .await .expect("failed to dequeue job") - .expect("didn't find a job to dequeue") - .jobs - .pop() - .unwrap(); + .expect("didn't find a job to dequeue"); + let webhook_job = batch.jobs.pop().unwrap(); webhook_job .complete() .await .expect("failed to complete job"); + batch.commit().await.expect("failed to commit batch"); } { @@ -950,6 +897,5 @@ mod tests { assert_eq!(get_count_from_new_conn(&db, "completed").await, 2); assert_eq!(get_count_from_new_conn(&db, "failed").await, 0); assert_eq!(get_count_from_new_conn(&db, "available").await, 1); - assert_eq!(get_count_from_new_conn(&db, "running").await, 1); } } diff --git a/hook-worker/src/config.rs b/hook-worker/src/config.rs index 32e49f7..ceb690f 100644 --- a/hook-worker/src/config.rs +++ b/hook-worker/src/config.rs @@ -35,9 +35,6 @@ pub struct Config { #[envconfig(nested = true)] pub retry_policy: RetryPolicyConfig, - #[envconfig(default = "true")] - pub transactional: bool, - #[envconfig(default = "1")] pub dequeue_batch_size: u32, } diff --git a/hook-worker/src/main.rs b/hook-worker/src/main.rs index fede7d2..2997dfc 100644 --- a/hook-worker/src/main.rs +++ b/hook-worker/src/main.rs @@ -67,7 +67,7 @@ async fn main() -> Result<(), WorkerError> { .expect("failed to start serving metrics"); }); - worker.run(config.transactional).await; + worker.run().await; Ok(()) } diff --git a/hook-worker/src/worker.rs b/hook-worker/src/worker.rs index 437a1d3..b83c909 100644 --- a/hook-worker/src/worker.rs +++ b/hook-worker/src/worker.rs @@ -4,9 +4,9 @@ use std::time; use futures::future::join_all; use hook_common::health::HealthHandle; -use hook_common::pgqueue::{PgBatch, PgTransactionBatch}; +use hook_common::pgqueue::PgTransactionBatch; use hook_common::{ - pgqueue::{Job, PgJob, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, + pgqueue::{Job, PgJobError, PgQueue, PgQueueError, PgQueueJob, PgTransactionJob}, retry::RetryPolicy, webhook::{HttpMethod, WebhookJobError, WebhookJobMetadata, WebhookJobParameters}, }; @@ -50,20 +50,6 @@ impl WebhookJob for PgTransactionJob<'_, WebhookJobParameters, WebhookJobMetadat } } -impl WebhookJob for PgJob { - fn parameters(&self) -> &WebhookJobParameters { - &self.job.parameters - } - - fn metadata(&self) -> &WebhookJobMetadata { - &self.job.metadata - } - - fn job(&self) -> &Job { - &self.job - } -} - /// A worker to poll `PgQueue` and spawn tasks to process webhooks when a job becomes available. pub struct WebhookWorker<'p> { /// An identifier for this worker. Used to mark jobs we have consumed. @@ -121,29 +107,6 @@ impl<'p> WebhookWorker<'p> { } } - /// Wait until at least one job becomes available in our queue. - async fn wait_for_jobs<'a>(&self) -> PgBatch { - let mut interval = tokio::time::interval(self.poll_interval); - - loop { - interval.tick().await; - self.liveness.report_healthy().await; - - match self - .queue - .dequeue(&self.name, self.dequeue_batch_size) - .await - { - Ok(Some(batch)) => return batch, - Ok(None) => continue, - Err(error) => { - error!("error while trying to dequeue job: {}", error); - continue; - } - } - } - } - /// Wait until at least one job becomes available in our queue in transactional mode. async fn wait_for_jobs_tx<'a>( &self, @@ -170,7 +133,7 @@ impl<'p> WebhookWorker<'p> { } /// Run this worker to continuously process any jobs that become available. - pub async fn run(&self, transactional: bool) { + pub async fn run(&self) { let semaphore = Arc::new(sync::Semaphore::new(self.max_concurrent_jobs)); let report_semaphore_utilization = || { metrics::gauge!("webhook_worker_saturation_percent") @@ -179,98 +142,53 @@ impl<'p> WebhookWorker<'p> { let dequeue_batch_size_histogram = metrics::histogram!("webhook_dequeue_batch_size"); - if transactional { - loop { - report_semaphore_utilization(); - // TODO: We could grab semaphore permits here using something like: - // `min(semaphore.available_permits(), dequeue_batch_size)` - // And then dequeue only up to that many jobs. We'd then need to hand back the - // difference in permits based on how many jobs were dequeued. - let mut batch = self.wait_for_jobs_tx().await; - dequeue_batch_size_histogram.record(batch.jobs.len() as f64); - - // Get enough permits for the jobs before spawning a task. - let permits = semaphore - .clone() - .acquire_many_owned(batch.jobs.len() as u32) - .await - .expect("semaphore has been closed"); - - let client = self.client.clone(); - let retry_policy = self.retry_policy.clone(); - - tokio::spawn(async move { - let mut futures = Vec::new(); - - // We have to `take` the Vec of jobs from the batch to avoid a borrow checker - // error below when we commit. - for job in std::mem::take(&mut batch.jobs) { - let client = client.clone(); - let retry_policy = retry_policy.clone(); - - let future = - async move { process_webhook_job(client, job, &retry_policy).await }; - - futures.push(future); - } + loop { + report_semaphore_utilization(); + // TODO: We could grab semaphore permits here using something like: + // `min(semaphore.available_permits(), dequeue_batch_size)` + // And then dequeue only up to that many jobs. We'd then need to hand back the + // difference in permits based on how many jobs were dequeued. + let mut batch = self.wait_for_jobs_tx().await; + dequeue_batch_size_histogram.record(batch.jobs.len() as f64); + + // Get enough permits for the jobs before spawning a task. + let permits = semaphore + .clone() + .acquire_many_owned(batch.jobs.len() as u32) + .await + .expect("semaphore has been closed"); - let results = join_all(futures).await; - for result in results { - if let Err(e) = result { - error!("error processing webhook job: {}", e); - } - } + let client = self.client.clone(); + let retry_policy = self.retry_policy.clone(); - let _ = batch.commit().await.map_err(|e| { - error!("error committing transactional batch: {}", e); - }); + tokio::spawn(async move { + let mut futures = Vec::new(); - drop(permits); - }); - } - } else { - loop { - report_semaphore_utilization(); - // TODO: We could grab semaphore permits here using something like: - // `min(semaphore.available_permits(), dequeue_batch_size)` - // And then dequeue only up to that many jobs. We'd then need to hand back the - // difference in permits based on how many jobs were dequeued. - let batch = self.wait_for_jobs().await; - dequeue_batch_size_histogram.record(batch.jobs.len() as f64); - - // Get enough permits for the jobs before spawning a task. - let permits = semaphore - .clone() - .acquire_many_owned(batch.jobs.len() as u32) - .await - .expect("semaphore has been closed"); - - let client = self.client.clone(); - let retry_policy = self.retry_policy.clone(); - - tokio::spawn(async move { - let mut futures = Vec::new(); - - for job in batch.jobs { - let client = client.clone(); - let retry_policy = retry_policy.clone(); - - let future = - async move { process_webhook_job(client, job, &retry_policy).await }; - - futures.push(future); - } + // We have to `take` the Vec of jobs from the batch to avoid a borrow checker + // error below when we commit. + for job in std::mem::take(&mut batch.jobs) { + let client = client.clone(); + let retry_policy = retry_policy.clone(); + + let future = + async move { process_webhook_job(client, job, &retry_policy).await }; - let results = join_all(futures).await; - for result in results { - if let Err(e) = result { - error!("error processing webhook job: {}", e); - } + futures.push(future); + } + + let results = join_all(futures).await; + for result in results { + if let Err(e) = result { + error!("error processing webhook job: {}", e); } + } - drop(permits); + let _ = batch.commit().await.map_err(|e| { + error!("error committing transactional batch: {}", e); }); - } + + drop(permits); + }); } } } @@ -601,7 +519,8 @@ mod tests { liveness, ); - let consumed_job = worker.wait_for_jobs().await.jobs.pop().unwrap(); + let mut batch = worker.wait_for_jobs_tx().await; + let consumed_job = batch.jobs.pop().unwrap(); assert_eq!(consumed_job.job.attempt, 1); assert!(consumed_job.job.attempted_by.contains(&worker_id)); @@ -611,13 +530,13 @@ mod tests { *consumed_job.job.parameters.as_ref(), webhook_job_parameters ); - assert_eq!(consumed_job.job.status, JobStatus::Running); assert_eq!(consumed_job.job.target, webhook_job_parameters.url); consumed_job .complete() .await .expect("job not successfully completed"); + batch.commit().await.expect("failed to commit batch"); assert!(registry.get_status().healthy) }