From 31495cbbb60afdf8a1877fb7b950a66741dfe1b3 Mon Sep 17 00:00:00 2001 From: teebz <5day4cast@protonmail.com> Date: Sun, 7 Jul 2024 14:58:15 -0400 Subject: [PATCH] * adds instructions on running with duckdb library * re-works weather data to be easier to use with oracle event related code * adds oracle database and queries for CRUD data operations * adds scoring and signing ETL job via /update route --- .gitignore | 5 +- Cargo.lock | 965 +++++-- Cargo.toml | 4 +- daemon/Cargo.toml | 8 +- .../observations/download_observations.rs | 11 +- .../domains/observations/xml_observation.rs | 2 - oracle/Cargo.toml | 67 +- oracle/README.md | 6 +- oracle/Settings.toml | 6 +- oracle/build.sh | 4 +- oracle/src/app_error.rs | 42 + oracle/src/db/event_data.rs | 2301 +++++++++++++++++ oracle/src/db/event_db_migrations.rs | 119 + oracle/src/db/mod.rs | 7 + oracle/src/db/weather_data.rs | 496 ++++ oracle/src/file_access.rs | 176 ++ oracle/src/lib.rs | 11 +- oracle/src/main.rs | 44 +- oracle/src/oracle.rs | 699 +++++ oracle/src/routes/events/mod.rs | 3 + oracle/src/routes/events/oracle_routes.rs | 218 ++ oracle/src/routes/files/download.rs | 35 +- oracle/src/routes/files/get_names.rs | 194 +- oracle/src/routes/files/mod.rs | 6 +- oracle/src/routes/files/upload.rs | 38 +- oracle/src/routes/mod.rs | 6 +- oracle/src/routes/stations/forecasts.rs | 222 -- oracle/src/routes/stations/get_stations.rs | 108 - oracle/src/routes/stations/mod.rs | 8 +- oracle/src/routes/stations/observations.rs | 203 -- oracle/src/routes/stations/weather_routes.rs | 134 + oracle/src/ser/mod.rs | 2 + oracle/src/ser/utc_datetime.rs | 19 + oracle/src/ser/utc_option_datetime.rs | 31 + oracle/src/startup.rs | 133 +- oracle/src/utils.rs | 81 +- oracle/tests/api/create_event.rs | 164 ++ oracle/tests/api/create_event_entry.rs | 167 ++ oracle/tests/api/etl_workflow.rs | 341 +++ oracle/tests/api/get_events.rs | 114 + oracle/tests/api/helpers.rs | 84 + oracle/tests/api/main.rs | 5 + 42 files changed, 6200 insertions(+), 1089 deletions(-) create mode 100644 oracle/src/app_error.rs create mode 100644 oracle/src/db/event_data.rs create mode 100644 oracle/src/db/event_db_migrations.rs create mode 100644 oracle/src/db/mod.rs create mode 100644 oracle/src/db/weather_data.rs create mode 100644 oracle/src/file_access.rs create mode 100644 oracle/src/oracle.rs create mode 100644 oracle/src/routes/events/mod.rs create mode 100644 oracle/src/routes/events/oracle_routes.rs delete mode 100644 oracle/src/routes/stations/forecasts.rs delete mode 100644 oracle/src/routes/stations/get_stations.rs delete mode 100644 oracle/src/routes/stations/observations.rs create mode 100644 oracle/src/routes/stations/weather_routes.rs create mode 100644 oracle/src/ser/mod.rs create mode 100644 oracle/src/ser/utc_datetime.rs create mode 100644 oracle/src/ser/utc_option_datetime.rs create mode 100644 oracle/tests/api/create_event.rs create mode 100644 oracle/tests/api/create_event_entry.rs create mode 100644 oracle/tests/api/etl_workflow.rs create mode 100644 oracle/tests/api/get_events.rs create mode 100644 oracle/tests/api/helpers.rs create mode 100644 oracle/tests/api/main.rs diff --git a/.gitignore b/.gitignore index b1164c1..694b72a 100644 --- a/.gitignore +++ b/.gitignore @@ -15,7 +15,10 @@ stations.xml data/ weather_data/ +event_data/ */.env .env duckdb_lib -*/duckdb_lib \ No newline at end of file +*/duckdb_lib +*.pem +*/test_data/* diff --git a/Cargo.lock b/Cargo.lock index c5e8135..de6f11d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,27 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + +[[package]] +name = "aes" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + [[package]] name = "ahash" version = "0.7.8" @@ -147,58 +168,25 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" -[[package]] -name = "arrow" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219d05930b81663fd3b32e3bde8ce5bff3c4d23052a99f11a8fa50a3b47b2658" -dependencies = [ - "arrow-arith 51.0.0", - "arrow-array 51.0.0", - "arrow-buffer 51.0.0", - "arrow-cast 51.0.0", - "arrow-data 51.0.0", - "arrow-ord 51.0.0", - "arrow-row 51.0.0", - "arrow-schema 51.0.0", - "arrow-select 51.0.0", - "arrow-string 51.0.0", -] - [[package]] name = "arrow" version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6127ea5e585a12ec9f742232442828ebaf264dfa5eefdd71282376c599562b77" dependencies = [ - "arrow-arith 52.1.0", - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-cast 52.1.0", + "arrow-arith", + "arrow-array", + "arrow-buffer", + "arrow-cast", "arrow-csv", - "arrow-data 52.1.0", + "arrow-data", "arrow-ipc", "arrow-json", - "arrow-ord 52.1.0", - "arrow-row 52.1.0", - "arrow-schema 52.1.0", - "arrow-select 52.1.0", - "arrow-string 52.1.0", -] - -[[package]] -name = "arrow-arith" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0272150200c07a86a390be651abdd320a2d12e84535f0837566ca87ecd8f95e0" -dependencies = [ - "arrow-array 51.0.0", - "arrow-buffer 51.0.0", - "arrow-data 51.0.0", - "arrow-schema 51.0.0", - "chrono", - "half", - "num", + "arrow-ord", + "arrow-row", + "arrow-schema", + "arrow-select", + "arrow-string", ] [[package]] @@ -207,31 +195,15 @@ version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7add7f39210b7d726e2a8efc0083e7bf06e8f2d15bdb4896b564dce4410fbf5d" dependencies = [ - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", "chrono", "half", "num", ] -[[package]] -name = "arrow-array" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8010572cf8c745e242d1b632bd97bd6d4f40fefed5ed1290a8f433abaa686fea" -dependencies = [ - "ahash 0.8.9", - "arrow-buffer 51.0.0", - "arrow-data 51.0.0", - "arrow-schema 51.0.0", - "chrono", - "half", - "hashbrown 0.14.3", - "num", -] - [[package]] name = "arrow-array" version = "52.1.0" @@ -239,26 +211,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81c16ec702d3898c2f5cfdc148443c6cd7dbe5bac28399859eb0a3d38f072827" dependencies = [ "ahash 0.8.9", - "arrow-buffer 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", + "arrow-buffer", + "arrow-data", + "arrow-schema", "chrono", "half", "hashbrown 0.14.3", "num", ] -[[package]] -name = "arrow-buffer" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0a2432f0cba5692bf4cb757469c66791394bac9ec7ce63c1afe74744c37b27" -dependencies = [ - "bytes", - "half", - "num", -] - [[package]] name = "arrow-buffer" version = "52.1.0" @@ -270,41 +231,21 @@ dependencies = [ "num", ] -[[package]] -name = "arrow-cast" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9abc10cd7995e83505cc290df9384d6e5412b207b79ce6bdff89a10505ed2cba" -dependencies = [ - "arrow-array 51.0.0", - "arrow-buffer 51.0.0", - "arrow-data 51.0.0", - "arrow-schema 51.0.0", - "arrow-select 51.0.0", - "atoi", - "base64 0.22.1", - "chrono", - "comfy-table", - "half", - "lexical-core", - "num", - "ryu", -] - [[package]] name = "arrow-cast" version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c7ef44f26ef4f8edc392a048324ed5d757ad09135eff6d5509e6450d39e0398" dependencies = [ - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", - "arrow-select 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", "atoi", "base64 0.22.1", "chrono", + "comfy-table", "half", "lexical-core", "num", @@ -317,11 +258,11 @@ version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f843490bd258c5182b66e888161bb6f198f49f3792f7c7f98198b924ae0f564" dependencies = [ - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-cast 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", "chrono", "csv", "csv-core", @@ -330,26 +271,14 @@ dependencies = [ "regex", ] -[[package]] -name = "arrow-data" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2742ac1f6650696ab08c88f6dd3f0eb68ce10f8c253958a18c943a68cd04aec5" -dependencies = [ - "arrow-buffer 51.0.0", - "arrow-schema 51.0.0", - "half", - "num", -] - [[package]] name = "arrow-data" version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a769666ffac256dd301006faca1ca553d0ae7cffcf4cd07095f73f95eb226514" dependencies = [ - "arrow-buffer 52.1.0", - "arrow-schema 52.1.0", + "arrow-buffer", + "arrow-schema", "half", "num", ] @@ -360,11 +289,11 @@ version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf9c3fb57390a1af0b7bb3b5558c1ee1f63905f3eccf49ae7676a8d1e6e5a72" dependencies = [ - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-cast 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", "flatbuffers", ] @@ -374,11 +303,11 @@ version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "654e7f3724176b66ddfacba31af397c48e106fbe4d281c8144e7d237df5acfd7" dependencies = [ - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-cast 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", "chrono", "half", "indexmap", @@ -388,51 +317,21 @@ dependencies = [ "serde_json", ] -[[package]] -name = "arrow-ord" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3e6b61e3dc468f503181dccc2fc705bdcc5f2f146755fa5b56d0a6c5943f412" -dependencies = [ - "arrow-array 51.0.0", - "arrow-buffer 51.0.0", - "arrow-data 51.0.0", - "arrow-schema 51.0.0", - "arrow-select 51.0.0", - "half", - "num", -] - [[package]] name = "arrow-ord" version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8008370e624e8e3c68174faaf793540287106cfda8ad1da862fdc53d8e096b4" dependencies = [ - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", - "arrow-select 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", "half", "num", ] -[[package]] -name = "arrow-row" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848ee52bb92eb459b811fb471175ea3afcf620157674c8794f539838920f9228" -dependencies = [ - "ahash 0.8.9", - "arrow-array 51.0.0", - "arrow-buffer 51.0.0", - "arrow-data 51.0.0", - "arrow-schema 51.0.0", - "half", - "hashbrown 0.14.3", -] - [[package]] name = "arrow-row" version = "52.1.0" @@ -440,41 +339,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca5e3a6b7fda8d9fe03f3b18a2d946354ea7f3c8e4076dbdb502ad50d9d44824" dependencies = [ "ahash 0.8.9", - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", "half", "hashbrown 0.14.3", ] -[[package]] -name = "arrow-schema" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d9483aaabe910c4781153ae1b6ae0393f72d9ef757d38d09d450070cf2e528" -dependencies = [ - "bitflags 2.4.2", -] - [[package]] name = "arrow-schema" version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dab1c12b40e29d9f3b699e0203c2a73ba558444c05e388a4377208f8f9c97eee" - -[[package]] -name = "arrow-select" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "849524fa70e0e3c5ab58394c770cb8f514d0122d20de08475f7b472ed8075830" dependencies = [ - "ahash 0.8.9", - "arrow-array 51.0.0", - "arrow-buffer 51.0.0", - "arrow-data 51.0.0", - "arrow-schema 51.0.0", - "num", + "bitflags 2.4.2", ] [[package]] @@ -484,41 +363,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e80159088ffe8c48965cb9b1a7c968b2729f29f37363df7eca177fc3281fe7c3" dependencies = [ "ahash 0.8.9", - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", "num", ] -[[package]] -name = "arrow-string" -version = "51.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9373cb5a021aee58863498c37eb484998ef13377f69989c6c5ccfbd258236cdb" -dependencies = [ - "arrow-array 51.0.0", - "arrow-buffer 51.0.0", - "arrow-data 51.0.0", - "arrow-schema 51.0.0", - "arrow-select 51.0.0", - "memchr", - "num", - "regex", - "regex-syntax", -] - [[package]] name = "arrow-string" version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fd04a6ea7de183648edbcb7a6dd925bbd04c210895f6384c780e27a9b54afcd" dependencies = [ - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-data 52.1.0", - "arrow-schema 52.1.0", - "arrow-select 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", "memchr", "num", "regex", @@ -569,6 +431,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" + [[package]] name = "atomic-waker" version = "1.1.2" @@ -679,6 +547,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.21.7" @@ -691,6 +565,79 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bech32" +version = "0.10.0-beta" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98f7eed2b2781a6f0b5c903471d48e15f56fb4e1165df8a9a2337fd1a59d45ea" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bip39" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +dependencies = [ + "bitcoin_hashes 0.11.0", + "serde", + "unicode-normalization", +] + +[[package]] +name = "bitcoin" +version = "0.31.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c85783c2fe40083ea54a33aa2f0ba58831d90fcd190f5bdc47e74e84d2a96ae" +dependencies = [ + "bech32", + "bitcoin-internals", + "bitcoin_hashes 0.13.0", + "hex-conservative", + "hex_lit", + "secp256k1", + "serde", +] + +[[package]] +name = "bitcoin-internals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" +dependencies = [ + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" + +[[package]] +name = "bitcoin_hashes" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +dependencies = [ + "bitcoin-internals", + "hex-conservative", + "serde", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -727,6 +674,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-padding" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" +dependencies = [ + "generic-array", +] + [[package]] name = "borsh" version = "1.3.1" @@ -818,6 +774,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher", +] + [[package]] name = "cc" version = "1.0.87" @@ -839,6 +804,30 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher", + "poly1305", + "zeroize", +] + [[package]] name = "chrono" version = "0.4.34" @@ -851,6 +840,17 @@ dependencies = [ "windows-targets 0.52.3", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", + "zeroize", +] + [[package]] name = "clap" version = "4.5.1" @@ -897,6 +897,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "colored" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f741c91823341bebf717d4c71bda820630ce065443b58bd1b7451af008355" +dependencies = [ + "is-terminal", + "lazy_static", + "winapi", +] + [[package]] name = "comfy-table" version = "7.1.0" @@ -1019,6 +1030,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core", "typenum", ] @@ -1048,7 +1060,7 @@ name = "daemon" version = "0.4.0" dependencies = [ "anyhow", - "arrow 52.1.0", + "arrow", "async-compression", "async-throttle", "clap", @@ -1103,6 +1115,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", + "subtle", ] [[package]] @@ -1126,6 +1139,23 @@ dependencies = [ "winapi", ] +[[package]] +name = "dlctix" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddf463d1b82d740c8f7b60702f0b89547c5ee2783f33e603accf5e7c8b4308fa" +dependencies = [ + "bitcoin", + "hex", + "musig2", + "rand", + "secp", + "secp256k1", + "serde", + "serdect", + "sha2", +] + [[package]] name = "dlv-list" version = "0.5.2" @@ -1135,24 +1165,37 @@ dependencies = [ "const-random", ] +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "duckdb" -version = "0.10.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "424ede399a5d1084e65c0888fda71e407e5809400c92ff2cf510bfd1697b9c76" +checksum = "626373a331b49f94b24edc4e53a59b0b354f085ac3b339d43d31da7a9b145004" dependencies = [ - "arrow 51.0.0", + "arrow", "cast", "fallible-iterator", "fallible-streaming-iterator", "hashlink", "libduckdb-sys", "memchr", + "num-integer", "rust_decimal", "smallvec", "strum", ] +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + [[package]] name = "encoding_rs" version = "0.8.33" @@ -1196,6 +1239,16 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +[[package]] +name = "fern" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" +dependencies = [ + "colored", + "log", +] + [[package]] name = "filetime" version = "0.2.23" @@ -1258,6 +1311,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "funty" version = "2.0.0" @@ -1458,6 +1517,33 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "379dada1584ad501b383485dd706b8afb7a70fcbc7f4da7d780638a5a6124a60" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-conservative" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" + +[[package]] +name = "hex_lit" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + [[package]] name = "http" version = "1.1.0" @@ -1546,6 +1632,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", + "webpki-roots", ] [[package]] @@ -1621,10 +1708,21 @@ dependencies = [ name = "indexmap" version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +dependencies = [ + "equivalent", + "hashbrown 0.14.3", + "serde", +] + +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "equivalent", - "hashbrown 0.14.3", + "block-padding", + "generic-array", ] [[package]] @@ -1662,6 +1760,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.10" @@ -1766,12 +1873,11 @@ checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libduckdb-sys" -version = "0.10.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b3f02cecc430f61561bde538d42af4be2d9d5a8b058f74883e460bc1055461" +checksum = "fa48143af4679c674db9ad7951ff1d3ce67b8b55578e523d96af54152df6c13b" dependencies = [ "autocfg", - "cc", "flate2", "pkg-config", "serde", @@ -1887,6 +1993,32 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mockall" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.50", +] + [[package]] name = "multer" version = "3.1.0" @@ -1904,6 +2036,24 @@ dependencies = [ "version_check", ] +[[package]] +name = "musig2" +version = "0.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed08befaac75bfb31ca5e87678c4e8490bcd21d0c98ccb4f12f4065a7567e83" +dependencies = [ + "base16ct", + "hmac", + "once_cell", + "rand", + "secp", + "secp256k1", + "serde", + "serdect", + "sha2", + "subtle", +] + [[package]] name = "native-tls" version = "0.2.11" @@ -1922,6 +2072,12 @@ dependencies = [ "tempfile", ] +[[package]] +name = "negentropy" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e664971378a3987224f7a0e10059782035e89899ae403718ee07de85bec42afe" + [[package]] name = "nom" version = "7.1.3" @@ -1932,6 +2088,36 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nostr" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f08db214560a34bf7c4c1fea09a8461b9412bae58ba06e99ce3177d89fa1e0a6" +dependencies = [ + "aes", + "base64 0.21.7", + "bip39", + "bitcoin", + "cbc", + "chacha20", + "chacha20poly1305", + "getrandom", + "instant", + "js-sys", + "negentropy", + "once_cell", + "reqwest", + "scrypt", + "serde", + "serde_json", + "tracing", + "unicode-normalization", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "num" version = "0.4.1" @@ -2039,6 +2225,12 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +[[package]] +name = "opaque-debug" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + [[package]] name = "openssl" version = "0.10.64" @@ -2099,29 +2291,39 @@ version = "0.4.0" dependencies = [ "anyhow", "axum", + "base64 0.22.1", + "bincode", "clap", "config", + "dlctix", "duckdb", + "fern", "futures", "h2", "hyper", + "itertools", "log", "mime", + "mockall", + "nostr", "num_cpus", "openssl", + "pem-rfc7468", + "rand", "regex", "rustix", "scooby", "serde", - "slog", - "slog-async", - "slog-term", + "serde_json", + "thiserror", "time", "tokio", "tokio-util", "toml", "tower", "tower-http", + "utoipa", + "utoipa-scalar", "uuid", ] @@ -2199,13 +2401,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f22ba0d95db56dde8685e3fadcb915cdaadda31ab8abbe3ff7f0ad1ef333267" dependencies = [ "ahash 0.8.9", - "arrow-array 52.1.0", - "arrow-buffer 52.1.0", - "arrow-cast 52.1.0", - "arrow-data 52.1.0", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", "arrow-ipc", - "arrow-schema 52.1.0", - "arrow-select 52.1.0", + "arrow-schema", + "arrow-select", "base64 0.22.1", "brotli", "bytes", @@ -2237,6 +2439,17 @@ dependencies = [ "syn 2.0.50", ] +[[package]] +name = "password-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core", + "subtle", +] + [[package]] name = "paste" version = "1.0.14" @@ -2249,6 +2462,25 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest", + "hmac", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -2338,6 +2570,17 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug", + "universal-hash", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -2350,6 +2593,32 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "predicates" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" +dependencies = [ + "anstyle", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" + +[[package]] +name = "predicates-tree" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -2368,6 +2637,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", + "syn 1.0.109", "version_check", ] @@ -2411,6 +2681,52 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "quinn" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" +dependencies = [ + "bytes", + "rand", + "ring", + "rustc-hash", + "rustls", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + +[[package]] +name = "quinn-udp" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +dependencies = [ + "libc", + "once_cell", + "socket2", + "windows-sys 0.52.0", +] + [[package]] name = "quote" version = "1.0.35" @@ -2551,7 +2867,10 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "quinn", + "rustls", "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", @@ -2559,6 +2878,8 @@ dependencies = [ "system-configuration", "tokio", "tokio-native-tls", + "tokio-rustls", + "tokio-socks", "tokio-util", "tower-service", "url", @@ -2566,6 +2887,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", + "webpki-roots", "winreg", ] @@ -2703,6 +3025,12 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc_version" version = "0.4.0" @@ -2732,6 +3060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afabcee0551bd1aa3e18e5adbf2c0544722014b899adb31bd186ec638d3da97e" dependencies = [ "once_cell", + "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -2777,6 +3106,15 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher", +] + [[package]] name = "schannel" version = "0.1.23" @@ -2798,12 +3136,60 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "scrypt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" +dependencies = [ + "password-hash", + "pbkdf2", + "salsa20", + "sha2", +] + [[package]] name = "seahash" version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" +[[package]] +name = "secp" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c4754628ff9006f80c6abd1cd1e88c5ca6f5a60eab151ad2e16268aab3514d0" +dependencies = [ + "base16ct", + "once_cell", + "rand", + "secp256k1", + "serde", + "serdect", + "subtle", +] + +[[package]] +name = "secp256k1" +version = "0.28.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" +dependencies = [ + "bitcoin_hashes 0.13.0", + "rand", + "secp256k1-sys", + "serde", +] + +[[package]] +name = "secp256k1-sys" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" +dependencies = [ + "cc", +] + [[package]] name = "security-framework" version = "2.9.2" @@ -2873,10 +3259,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" dependencies = [ + "indexmap", "itoa", "ryu", "serde", @@ -2913,6 +3300,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + [[package]] name = "sha2" version = "0.10.8" @@ -3160,20 +3557,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "termtree" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" + [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c" dependencies = [ "proc-macro2", "quote", @@ -3307,6 +3710,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-socks" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d4770b8024672c1101b3f6733eab95b18007dbe0847a8afe341fcf79e06043f" +dependencies = [ + "either", + "futures-util", + "thiserror", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.10" @@ -3503,9 +3918,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] @@ -3522,6 +3937,16 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "untrusted" version = "0.9.0" @@ -3537,6 +3962,7 @@ dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] [[package]] @@ -3545,13 +3971,53 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +[[package]] +name = "utoipa" +version = "4.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23" +dependencies = [ + "indexmap", + "serde", + "serde_json", + "utoipa-gen", +] + +[[package]] +name = "utoipa-gen" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bf0e16c02bc4bf5322ab65f10ab1149bdbcaa782cba66dc7057370a3f8190be" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "regex", + "syn 2.0.50", + "uuid", +] + +[[package]] +name = "utoipa-scalar" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3ab4b7269d14d93626b0bfedf212f1b0995cb7d13d35daba21d579511e7fae8" +dependencies = [ + "axum", + "serde", + "serde_json", + "utoipa", +] + [[package]] name = "uuid" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" dependencies = [ + "atomic", "getrandom", + "serde", ] [[package]] @@ -3685,6 +4151,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index bd0b051..a2ea7ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,10 +11,10 @@ slog = "2.7.0" slog-term = "2.9.0" slog-async = "2.7.0" clap = { version = "4.3.23", features = ["derive", "env"] } -reqwest = {version = "0.12.5", features=["stream"]} +reqwest = { version = "0.12.5", features = ["stream"] } tokio = { version = "1.35.1", features = ["full"] } tokio-util = "0.7.10" -time = "0.3.31" +time = { version = "0.3.36", features = ["parsing", "formatting", "macros"] } openssl = { version = "0.10.60", features = ["vendored"] } config = "0.14.0" rustix = "0.38.19" diff --git a/daemon/Cargo.toml b/daemon/Cargo.toml index 40f0441..3c9abf9 100644 --- a/daemon/Cargo.toml +++ b/daemon/Cargo.toml @@ -8,23 +8,23 @@ repository = "https://github.com/tee8z/noaa-data-pipeline" [dependencies] anyhow = "1.0.75" -async-compression = { version= "0.4.11", features = ["tokio", "gzip"]} +async-compression = { version = "0.4.11", features = ["tokio", "gzip"] } parquet = "52.1.0" log = "0.4.18" slog = "2.7.0" slog-term = "2.9.0" slog-async = "2.7.0" clap = { version = "4.3.23", features = ["derive", "env"] } -reqwest = { version = "0.12.5", features = ["stream","multipart", "json"] } +reqwest = { version = "0.12.5", features = ["stream", "multipart", "json"] } serde = { version = "1.0.195", features = ["derive"] } tokio = { version = "1.35.1", features = ["full", "signal"] } -tokio-util = { version = "0.7.10",features = ["codec", "compat"] } +tokio-util = { version = "0.7.10", features = ["codec", "compat"] } serde_json = "1.0.107" async-throttle = "0.3.2" futures = "0.3.28" arrow = "52.1.0" parquet_derive = "52.1.0" -time = { version = "0.3.29", features = ["parsing"] } +time = { version = "0.3.36", features = ["parsing"] } reqwest-retry = "0.6.0" reqwest-middleware = "0.3.2" serde-xml-rs = "0.6.0" diff --git a/daemon/src/domains/observations/download_observations.rs b/daemon/src/domains/observations/download_observations.rs index 360ff78..fe01b78 100644 --- a/daemon/src/domains/observations/download_observations.rs +++ b/daemon/src/domains/observations/download_observations.rs @@ -34,13 +34,18 @@ impl TryFrom for CurrentWeather { latitude: val.latitude.unwrap_or(String::from("")).parse::()?, longitude: val.longitude.unwrap_or(String::from("")).parse::()?, generated_at: OffsetDateTime::parse( - &&val - .observation_time + &val.observation_time .clone() .unwrap_or(OffsetDateTime::now_utc().to_string()), &Rfc3339, ) - .map_err(|e| anyhow!("error parsing observation_time time: {} {:?}", e, val.observation_time))?, + .map_err(|e| { + anyhow!( + "error parsing observation_time time: {} {:?}", + e, + val.observation_time + ) + })?, temperature_value: val .temp_c .unwrap_or(String::from("")) diff --git a/daemon/src/domains/observations/xml_observation.rs b/daemon/src/domains/observations/xml_observation.rs index 56e7ec1..56a251d 100644 --- a/daemon/src/domains/observations/xml_observation.rs +++ b/daemon/src/domains/observations/xml_observation.rs @@ -25,7 +25,6 @@ pub struct ObservationData { #[serde(rename = "data")] pub data: CurrentData, - } #[derive(Serialize, Deserialize)] @@ -86,4 +85,3 @@ pub struct QualityControlFlags { #[serde(rename = "no_signal")] pub no_signal: Option, } - diff --git a/oracle/Cargo.toml b/oracle/Cargo.toml index 8b6b93f..242c552 100644 --- a/oracle/Cargo.toml +++ b/oracle/Cargo.toml @@ -7,32 +7,57 @@ repository = "https://github.com/tee8z/noaa-data-pipeline" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tokio = { version = "1.35.1", features = ["macros", "rt-multi-thread"] } -tokio-util = "0.7.10" -time = { version = "0.3.25", features = ["parsing", "formatting"] } -uuid = { version = "1.4.1", features = ["v4"] } anyhow = "1.0.72" -mime = "0.3.17" +axum = { version = "0.7.5", features = [ + "macros", + "tokio", + "tracing", + "multipart", + "http1", +] } +base64 = "0.22.1" +bincode = "1.3.3" +clap = { version = "4.3.23", features = ["derive", "env"] } +config = "0.14.0" +dlctix = "0.0.6" +nostr = "0.33.0" +duckdb = { version = "1.0.0" } +fern = { version = "0.6.2", features = ["colored"] } +futures = "0.3.28" hyper = "1.4.0" h2 = "0.4.5" -axum = { version = "0.7.5", features = ["macros", "tracing", "multipart"] } -tower-http = { version= "0.5.2", features = ["fs", "cors"] } -futures = "0.3.28" -serde = { version="1.0.188", features= ["derive"]} -openssl = { version = "0.10.60", features = ["vendored"] } +itertools = "0.13.0" +log = "0.4.21" +mime = "0.3.17" num_cpus = "1.16.0" -config = "0.14.0" -log = "0.4.18" -slog = "2.7.0" -slog-term = "2.9.0" -slog-async = "2.7.0" -clap = { version = "4.3.23", features = ["derive", "env"] } -toml = "0.8.10" +openssl = { version = "0.10.60", features = ["vendored"] } +pem-rfc7468 = { version = "0.7.0", features = ["alloc"] } +rand = "0.8.5" +regex = "1.10.3" rustix = "0.38.19" scooby = "0.5.0" -duckdb = { version = "0.10.2", features = ["bundled"] } -regex = "1.10.3" +serde = { version = "1.0.188", features = ["derive"] } +serde_json = "1.0.117" +time = { version = "0.3.36", features = ["parsing", "formatting", "macros"] } +thiserror = "1.0.62" +tokio = { version = "1.35.1", features = [ + "macros", + "rt-multi-thread", + "signal", +] } +tokio-util = "0.7.10" +toml = "0.8.10" +tower-http = { version = "0.5.2", features = ["fs", "cors"] } +utoipa = { version = "4.2.3", features = [ + "axum_extras", + "time", + "uuid", + "preserve_order", + "preserve_path_order", +] } +utoipa-scalar = { version = "0.1.0", features = ["axum"] } +uuid = { version = "1.4.1", features = ["v7", "serde"] } [dev-dependencies] -duckdb = { version = "0.10.2" } -tower = "0.4.13" \ No newline at end of file +tower = "0.4.13" +mockall = "0.13.0" diff --git a/oracle/README.md b/oracle/README.md index 9a1661d..c383591 100644 --- a/oracle/README.md +++ b/oracle/README.md @@ -6,6 +6,8 @@ When linking against a DuckDB library already on the system (so not using any of wget https://github.com/duckdb/duckdb/releases/download/v1.0.0/libduckdb-linux-amd64.zip mkdir duckdb_lib unzip libduckdb-linux-amd64.zip -d duckdb_lib +cp duckdb_lib/lib*.so* /usr/local/lib/ +ldconfig ``` * Then set the DUCKDB_LIB_DIR var to (or whatever the full path to the folder holding the library is at) ``` @@ -79,7 +81,7 @@ curl -H "Content-Type: multipart/form-data" -F "file=@/home/tee8z/repos/noaa-dat > Accept: */* > Content-Length: 36281 > Content-Type: multipart/form-data; boundary=------------------------d8b9fa0cf983802b -> +> * We are completely uploaded and fine * Mark bundle as not supporting multiuse < HTTP/1.1 200 OK @@ -89,7 +91,7 @@ curl -H "Content-Type: multipart/form-data" -F "file=@/home/tee8z/repos/noaa-dat < vary: access-control-request-headers < content-length: 0 < date: Sun, 14 Jan 2024 14:43:23 GMT -< +< * Connection #0 to host localhost left intact ``` diff --git a/oracle/Settings.toml b/oracle/Settings.toml index 82d0310..6e149bc 100644 --- a/oracle/Settings.toml +++ b/oracle/Settings.toml @@ -1,6 +1,8 @@ -level = "info" +level = "debug" data_dir = "./weather_data" ui_dir = "./oracle/ui" host = "127.0.0.1" port = "9100" -remote_url = "http://127.0.0.1:9100" \ No newline at end of file +remote_url = "http://127.0.0.1:9100" +event_db = "event_data" +private_key_path = "./key" diff --git a/oracle/build.sh b/oracle/build.sh index 40523c7..82c7ee3 100755 --- a/oracle/build.sh +++ b/oracle/build.sh @@ -17,6 +17,8 @@ fi wget "https://github.com/duckdb/duckdb/releases/download/v1.0.0/libduckdb-linux-amd64.zip" unzip libduckdb-linux-amd64.zip -d duckdb_lib +cp duckdb_lib/lib*.so* /usr/local/lib/ +ldconfig rm libduckdb-linux-amd64.zip echo "$(pwd)/duckdb_lib" -DUCKDB_LIB_DIR="$(pwd)/duckdb_lib" cargo build \ No newline at end of file +DUCKDB_LIB_DIR="$(pwd)/duckdb_lib" cargo build diff --git a/oracle/src/app_error.rs b/oracle/src/app_error.rs new file mode 100644 index 0000000..83fb07b --- /dev/null +++ b/oracle/src/app_error.rs @@ -0,0 +1,42 @@ +use crate::{file_access, weather_data}; +use axum::{ + response::{IntoResponse, Response}, + Json, +}; +use hyper::StatusCode; +use log::error; +use serde_json::json; +use std::borrow::Borrow; + +#[derive(thiserror::Error, Debug)] +pub enum AppError { + #[error("Failed to validate request: {0}")] + Request(#[from] anyhow::Error), + #[error("Failed to get weather data: {0}")] + WeatherData(#[from] weather_data::Error), + #[error("Failed to parse times for file data: {0}")] + FileAccess(#[from] file_access::Error), +} + +impl IntoResponse for AppError { + fn into_response(self) -> Response { + error!("error handling request: {}", self.to_string()); + + let (status, error_message) = match self.borrow() { + AppError::Request(_) => (StatusCode::BAD_REQUEST, self.to_string()), + AppError::WeatherData(e) => match e { + weather_data::Error::Query(_) | &weather_data::Error::FileAccess(_) => ( + StatusCode::INTERNAL_SERVER_ERROR, + String::from("internal error"), + ), + _ => (StatusCode::BAD_REQUEST, self.to_string()), + }, + AppError::FileAccess(_) => (StatusCode::BAD_REQUEST, self.to_string()), + }; + + let body = Json(json!({ + "error": error_message, + })); + (status, body).into_response() + } +} diff --git a/oracle/src/db/event_data.rs b/oracle/src/db/event_data.rs new file mode 100644 index 0000000..a30fd76 --- /dev/null +++ b/oracle/src/db/event_data.rs @@ -0,0 +1,2301 @@ +use super::{run_migrations, weather_data, Forecast, Observation}; +use crate::oracle::Oracle; +use crate::utc_datetime; +use anyhow::anyhow; +use dlctix::bitcoin::XOnlyPublicKey; +use dlctix::secp::{MaybeScalar, Scalar}; +use dlctix::EventAnnouncement; +use duckdb::arrow::datatypes::ToByteSlice; +use duckdb::types::{OrderedMap, ToSqlOutput, Type, Value}; +use duckdb::{ + ffi, params, params_from_iter, AccessMode, Config, Connection, ErrorCode, Row, ToSql, +}; +use itertools::Itertools; +use log::{debug, info}; +use regex::Regex; +use scooby::postgres::{insert_into, select, update, with, Aliasable, Joinable, Parameters}; +use serde::{Deserialize, Serialize}; +use serde_json::to_vec; +use std::collections::HashMap; +use std::time::Duration as StdDuration; +use time::format_description::well_known::Rfc3339; +use time::macros::format_description; +use time::{Date, Duration, OffsetDateTime, UtcOffset}; +use tokio::time::timeout; +use utoipa::{IntoParams, ToSchema}; +use uuid::Uuid; + +pub struct EventData { + connection_path: String, + retry_duration: StdDuration, + retry_max_attemps: i32, +} + +impl EventData { + pub fn new(path: &str) -> Result { + let connection_path = format!("{}/events.db3", path); + let mut conn = Connection::open(connection_path.clone())?; + run_migrations(&mut conn)?; + Ok(Self { + connection_path, + retry_duration: StdDuration::from_millis(100), + retry_max_attemps: 5, + }) + } + + async fn new_readonly_connection(&self) -> Result { + let config = Config::default().access_mode(AccessMode::ReadOnly)?; + Connection::open_with_flags(self.connection_path.clone(), config) + } + + pub async fn new_readonly_connection_retry(&self) -> Result { + let mut attempt = 0; + loop { + match timeout(self.retry_duration, self.new_readonly_connection()).await { + Ok(Ok(connection)) => return Ok(connection), + Ok(Err(e)) => { + if attempt >= self.retry_max_attemps + || !e.to_string().contains("Could not set lock on file") + { + return Err(e); + } + info!("Retrying: {}", e); + attempt += 1; + } + Err(_) => { + return Err(duckdb::Error::DuckDBFailure( + duckdb::ffi::Error { + code: duckdb::ErrorCode::DatabaseLocked, + extended_code: 0, + }, + None, + )); + } + } + } + } + + async fn new_write_connection(&self) -> Result { + let config = Config::default().access_mode(AccessMode::ReadWrite)?; + Connection::open_with_flags(self.connection_path.clone(), config) + } + + pub async fn new_write_connection_retry(&self) -> Result { + let mut attempt = 0; + loop { + match timeout(self.retry_duration, self.new_write_connection()).await { + Ok(Ok(connection)) => return Ok(connection), + Ok(Err(e)) => { + if attempt >= self.retry_max_attemps + || !e.to_string().contains("Could not set lock on file") + { + return Err(e); + } + info!("Retrying: {}", e); + attempt += 1; + } + Err(_) => { + return Err(duckdb::Error::DuckDBFailure( + duckdb::ffi::Error { + code: duckdb::ErrorCode::DatabaseLocked, + extended_code: 0, + }, + None, + )); + } + } + } + } + + pub async fn get_stored_public_key(&self) -> Result { + let select = select("pubkey").from("oracle_metadata"); + let conn = self.new_readonly_connection_retry().await?; + let mut stmt = conn.prepare(&select.to_string())?; + let key: Vec = stmt.query_row([], |row| row.get(0))?; + //TODO: use a custom error here so we don't need to panic + let converted_key = XOnlyPublicKey::from_slice(&key).expect("invalid pubkey"); + Ok(converted_key) + } + + pub async fn add_oracle_metadata(&self, pubkey: XOnlyPublicKey) -> Result<(), duckdb::Error> { + let pubkey_raw = pubkey.serialize().to_vec(); + //TODO: Add the ability to change the name via config + let name = String::from("4casttruth"); + let conn = self.new_write_connection_retry().await?; + let mut stmt = conn.prepare("INSERT INTO oracle_metadata (pubkey,name) VALUES(?,?)")?; + stmt.execute([pubkey_raw, name.into()])?; + Ok(()) + } + + // Call as an ETL process to update the weather for running events + pub async fn update_weather_station_data( + &self, + event_id: Uuid, + weather: Vec, + ) -> Result<(), duckdb::Error> { + //1) grab events that are using this weather data + //2) add new weather data to table + let weather_ids = self.add_weather_readings(weather).await?; + + //3) create join between weather and events + self.batch_add_weather_to_event(event_id, weather_ids) + .await?; + + Ok(()) + } + + pub async fn add_weather_readings( + &self, + weather: Vec, + ) -> Result, duckdb::Error> { + let params: Vec<(Uuid, Value, Forecasted, Option)> = weather + .iter() + .map(|weather| { + let weather_id = Uuid::now_v7(); + ( + weather_id, + Value::Text(weather.station_id.clone()), + weather.forecasted.clone(), + weather.observed.clone(), + ) + }) + .collect(); + let weather_ids: Vec = params.iter().map(|row| row.0).collect(); + let mut param_placeholders = Parameters::new(); + let params_values: Vec<(String, String, String, String)> = params + .iter() + .map(|vals| { + ( + param_placeholders.next(), + param_placeholders.next(), + vals.2.to_raw_sql(), + vals.3 + .clone() + .map_or("Null".to_string(), |x| x.to_raw_sql()), + ) + }) + .collect(); + + let insert_weather = insert_into("weather") + .columns(("id", "station_id", "forecasted", "observed")) + .values(params_values); + let query_str = self.prepare_query(insert_weather.to_string()); + debug!("query_str: {}", query_str); + let insert_values: Vec = params + .into_iter() + .flat_map(|(a, b, _, _)| vec![Value::Text(a.to_string()), b]) + .collect(); + debug!("insert values: {:?}", insert_values); + + let conn = self.new_write_connection_retry().await?; + let mut weather_stmt = conn.prepare(&query_str)?; + weather_stmt.execute(params_from_iter(insert_values.iter()))?; + Ok(weather_ids) + } + + pub async fn batch_add_weather_to_event( + &self, + event_id: Uuid, + weather_ids: Vec, + ) -> Result<(), duckdb::Error> { + let params: Vec<(String, Uuid, String)> = weather_ids + .iter() + .map(|weather_id| { + let event_weather_id = Uuid::now_v7().to_string(); + (event_weather_id, event_id, weather_id.to_string()) + }) + .collect(); + let mut param_placeholders = Parameters::new(); + let params_values: Vec<(String, String, String)> = params + .iter() + .map(|_| { + ( + param_placeholders.next(), + param_placeholders.next(), + param_placeholders.next(), + ) + }) + .collect(); + + let insert_event_weather = insert_into("events_weather") + .columns(("id", "event_id", "weather_id")) + .values(params_values); + let query_str = self.prepare_query(insert_event_weather.to_string()); + debug!("query_str: {}", query_str); + let insert_values: Vec = params + .into_iter() + .flat_map(|(a, b, c)| vec![a, b.to_string(), c]) + .collect(); + + info!("insert values: {:?}", insert_values); + + let conn = self.new_write_connection_retry().await?; + let mut weather_stmt = conn.prepare(&query_str)?; + weather_stmt.execute(params_from_iter(insert_values.iter()))?; + Ok(()) + } + pub async fn add_event(&self, event: CreateEventData) -> Result { + let locations_sql = format!("[{}]", event.locations.join(",")); + + let signing_date = OffsetDateTime::format(event.signing_date, &Rfc3339) + .map_err(|e| duckdb::Error::ToSqlConversionFailure(Box::new(e)))?; + let observation_date = OffsetDateTime::format(event.observation_date, &Rfc3339) + .map_err(|e| duckdb::Error::ToSqlConversionFailure(Box::new(e)))?; + let nonce = to_vec(&event.nonce).unwrap(); + let annoucement_bytes = to_vec(&event.event_annoucement).unwrap(); + let conn = self.new_write_connection_retry().await?; + let mut stmt = conn.prepare( + "INSERT INTO events ( + id, + total_allowed_entries, + number_of_places_win, + number_of_values_per_entry, + nonce, + signing_date, + observation_date, + locations, + event_annoucement) VALUES(?,?,?,?,?,?,?,?,?)", + )?; + stmt.execute(params![ + event.id.to_string(), + event.total_allowed_entries, + event.number_of_places_win, + event.number_of_values_per_entry, + nonce, + signing_date, + observation_date, + locations_sql, + annoucement_bytes, + ])?; + + Ok(event.into()) + } + + pub async fn add_event_entry( + &self, + entry: WeatherEntry, + ) -> Result { + //TODO: have these transactions happen in the same transaction + self.add_entry(entry.clone()).await?; + self.add_entry_choices(entry.clone()).await?; + Ok(entry) + } + + pub async fn add_entry(&self, entry: WeatherEntry) -> Result<(), duckdb::Error> { + let conn = self.new_write_connection_retry().await?; + // TODO: add check on the INSERT transaction to verify we never go over number of allowed entries in an event + // (current worse case is just a couple of extra entries made it into the event, doesn't change how we sign the result) + let insert_query = "INSERT INTO events_entries (id, event_id) VALUES(?,?)"; + let mut event_stmt = conn.prepare(insert_query)?; + + debug!("query_str: {}", insert_query); + let insert_values = params![entry.id.to_string(), entry.event_id.to_string()]; + + event_stmt.execute(insert_values)?; + Ok(()) + } + + pub async fn add_entry_choices(&self, entry: WeatherEntry) -> Result<(), duckdb::Error> { + #[allow(clippy::type_complexity)] + let params: Vec<( + Uuid, + String, + Option, + Option, + Option, + )> = entry + .expected_observations + .iter() + .map(|weather_choices| { + ( + entry.id, + weather_choices.stations.clone(), + weather_choices.temp_low.clone(), + weather_choices.temp_high.clone(), + weather_choices.wind_speed.clone(), + ) + }) + .collect(); + + let mut param_placeholders = Parameters::new(); + let params_values: Vec<(String, String, String, String, String)> = params + .iter() + .map(|_| { + ( + param_placeholders.next(), + param_placeholders.next(), + param_placeholders.next(), + param_placeholders.next(), + param_placeholders.next(), + ) + }) + .collect(); + + let insert_event_weather = insert_into("expected_observations") + .columns(("entry_id", "station", "temp_low", "temp_high", "wind_speed")) + .values(params_values); + let query_str = self.prepare_query(insert_event_weather.to_string()); + debug!("query_str: {}", query_str); + let insert_values: Vec = params + .into_iter() + .flat_map(|(a, b, c, d, e)| { + let temp_low = match c { + Some(c) => Value::Text(c.to_string()), + None => Value::Null, + }; + let temp_high = match d { + Some(d) => Value::Text(d.to_string()), + None => Value::Null, + }; + let wind_speed = match e { + Some(e) => Value::Text(e.to_string()), + None => Value::Null, + }; + vec![ + Value::Text(a.to_string()), + Value::Text(b), + temp_low, + temp_high, + wind_speed, + ] + }) + .collect(); + + info!("insert values: {:?}", insert_values); + + let conn = self.new_write_connection_retry().await?; + let mut weather_stmt = conn.prepare(&query_str)?; + weather_stmt.execute(params_from_iter(insert_values.iter()))?; + Ok(()) + } + pub async fn update_event_attestation(&self, event: &SignEvent) -> Result<(), duckdb::Error> { + let entry_score_update_query = update("events") + .set("attestation_signature", "$1") + .where_("events.id = $2"); + + let query_str = self.prepare_query(entry_score_update_query.to_string()); + debug!("query_str: {}", query_str); + + let conn = self.new_write_connection_retry().await?; + let mut stmt = conn.prepare(&query_str)?; + + let Some(attestation) = event.attestation else { + return Err(duckdb::Error::InvalidParameterCount(1, 2)); + }; + let attestation_bytes = to_vec(&attestation).unwrap(); + stmt.execute(params![attestation_bytes, event.id.to_string()])?; + Ok(()) + } + + ///Danger: a raw SQL query is used, input is not escaped with '?' + pub async fn update_entry_scores( + &self, + entry_scores: Vec<(Uuid, i64)>, + ) -> Result<(), duckdb::Error> { + let number_entry_scores = entry_scores.len(); + info!("number_entry_scores: {:?}", number_entry_scores); + + let mut entry_score_values = String::new(); + entry_score_values.push_str("VALUES"); + for (index, val) in entry_scores.iter().enumerate() { + entry_score_values.push_str(&format!("('{}',{})", val.0, val.1)); + if index + 1 < number_entry_scores { + entry_score_values.push(','); + } + } + + info!("entry_score_values: {}", entry_score_values); + + let mut entry_ids = String::new(); + entry_ids.push('('); + for (index, val) in entry_scores.iter().enumerate() { + entry_ids.push_str(&format!("'{}'", &val.0.to_string())); + if index + 1 < number_entry_scores { + entry_ids.push(','); + } + } + entry_ids.push(')'); + info!("entry_ids: {}", entry_ids); + let scores_temp_select = select("score") + .from((entry_score_values).as_("scores(entry_id, score)")) + .where_("scores.entry_id = events_entries.id::TEXT") + .to_string(); + let entry_score_update_query = update("events_entries") + .set("score", format!("({})", scores_temp_select)) + .where_(format!("events_entries.id::TEXT IN {}", entry_ids)); + + let query_str = entry_score_update_query.to_string(); + debug!("query_str: {}", query_str); + + let conn = self.new_write_connection_retry().await?; + let mut stmt = conn.prepare(&query_str)?; + stmt.execute([])?; + Ok(()) + } + + pub async fn get_event_weather(&self, event_id: Uuid) -> Result, duckdb::Error> { + let event_weather = select(("station_id", "observed", "forecasted")) + .from( + "events_weather" + .join("events") + .on("events_weather.event_id = events.id") + .join("weather") + .on("weather.id = events_weather.weather_id"), + ) + .where_("event_id = ?"); + let query_str = event_weather.to_string(); + debug!("query_str: {}", query_str); + + let conn = self.new_readonly_connection_retry().await?; + let mut stmt = conn.prepare(&query_str)?; + let mut event_weather_rows = stmt.query([event_id.to_string()])?; + let mut event_weather = vec![]; + while let Some(row) = event_weather_rows.next()? { + let data: Weather = row.try_into()?; + event_weather.push(data); + } + Ok(vec![]) + } + + pub async fn get_event_weather_entries( + &self, + event_id: &Uuid, + ) -> Result, duckdb::Error> { + // Query 1 + let event_entries_select = + select(("events_entries.id", "events_entries.event_id", "score")) + .from( + "events_entries" + .join("events") + .on("events_entries.event_id = events.id"), + ) + .where_("events_entries.event_id = ?") + .group_by(("events_entries.id", "events_entries.event_id", "score")); + + let query_str = event_entries_select.to_string(); + debug!("query_str: {}", query_str); + + let conn = self.new_readonly_connection_retry().await?; + let mut stmt = conn.prepare(&query_str)?; + let mut weather_entry_rows = stmt.query([event_id.to_string()])?; + let mut weather_entries = vec![]; + while let Some(row) = weather_entry_rows.next()? { + let data: WeatherEntry = row.try_into()?; + weather_entries.push(data); + } + + // Query 2 + let entry_choices = select(( + "entry_id", + "station", + "temp_low::TEXT", + "temp_high::TEXT", + "wind_speed::TEXT", + )) + .from( + "expected_observations" + .join("events_entries") + .on("events_entries.id = expected_observations.entry_id"), + ) + .where_("events_entries.event_id = $1"); + let entry_choices_query_str = self.prepare_query(entry_choices.to_string()); + debug!("query_str: {}", entry_choices_query_str); + let mut stmt_choices = conn.prepare(&entry_choices_query_str)?; + let mut rows = stmt_choices.query([event_id.to_string()])?; + + //Combine query results + let mut weather_choices: HashMap> = HashMap::new(); + while let Some(row) = rows.next()? { + let data: WeatherChoicesWithEntry = row.try_into()?; + if let Some(entry_choices) = weather_choices.get_mut(&data.entry_id) { + entry_choices.push(data.into()); + } else { + weather_choices.insert(data.entry_id, vec![data.into()]); + } + } + + for weather_entry in weather_entries.iter_mut() { + if let Some(choices) = weather_choices.get(&weather_entry.id) { + weather_entry.expected_observations = choices.clone(); + } + } + + Ok(weather_entries) + } + + pub async fn get_weather_entry( + &self, + event_id: &Uuid, + entry_id: &Uuid, + ) -> Result { + // Query 1 + let event_entry = select(( + "events_entries.id as id", + "events_entries.event_id as event_id", + "score", + )) + .from("events_entries") + .where_("events_entries.id = $1 AND events_entries.event_id = $2"); + + let conn = self.new_readonly_connection_retry().await?; + let query_str = self.prepare_query(event_entry.to_string()); + debug!("query_str: {}", query_str); + + let mut stmt = conn.prepare(&query_str)?; + let sql_params_entry = params_from_iter(vec![entry_id.to_string(), event_id.to_string()]); + let mut weather_entry: WeatherEntry = + stmt.query_row(sql_params_entry, |row| row.try_into())?; + + // Query 2 + let entry_choices = select(( + "station", + "temp_low::TEXT", + "temp_high::TEXT", + "wind_speed::TEXT", + )) + .from("expected_observations") + .where_("expected_observations.entry_id = $1"); + let entry_choices_query_str = self.prepare_query(entry_choices.to_string()); + debug!("query_str: {}", entry_choices_query_str); + let sql_params = params_from_iter(vec![entry_id.to_string()]); + + let mut stmt_choices = conn.prepare(&entry_choices_query_str)?; + let mut rows = stmt_choices.query(sql_params)?; + let mut weather_choices: Vec = vec![]; + while let Some(row) = rows.next()? { + let data: WeatherChoices = row.try_into()?; + weather_choices.push(data); + } + + weather_entry.expected_observations = weather_choices; + Ok(weather_entry) + } + + pub async fn filtered_list_events( + &self, + filter: EventFilter, + ) -> Result, duckdb::Error> { + let mut events = self.get_filtered_event_summarys(filter).await?; + for event in events.iter_mut() { + event.weather = self.get_event_weather(event.id).await?; + } + Ok(events) + } + + async fn get_filtered_event_summarys( + &self, + filter: EventFilter, + ) -> Result, duckdb::Error> { + let event_entries_select = select(("Count(id) as total_entries", "event_id")) + .from("events_entries") + .group_by("event_id"); + + let mut event_select = with("event_entries") + .as_(event_entries_select) + .select(( + "id", + "signing_date::TEXT", + "observation_date::TEXT", + "locations", + "total_allowed_entries", + "event_entries.total_entries as total_entries", + "number_of_places_win", + "number_of_values_per_entry", + "attestation_signature", + )) + .from( + "events" + .join("event_entries") + .on("event_entries.event_id = events.id"), + ); + if let Some(ids) = filter.event_ids.clone() { + let mut event_ids_val = String::new(); + event_ids_val.push('('); + for (index, _) in ids.iter().enumerate() { + event_ids_val.push('?'); + if index < ids.len() { + event_ids_val.push(','); + } + } + event_ids_val.push(')'); + let where_clause = format!("events.id IN {}", event_ids_val); + event_select = event_select.clone().where_(where_clause); + } + if let Some(limit) = filter.limit { + event_select = event_select.clone().limit(limit); + } + + let conn = self.new_readonly_connection_retry().await?; + let query_str = self.prepare_query(event_select.to_string()); + debug!("query_str: {}", query_str); + let mut stmt = conn.prepare(&query_str)?; + let mut rows = if let Some(ids) = filter.event_ids { + let params: Vec = ids + .iter() + .map(|event_id| Value::Text(event_id.to_string())) + .collect(); + stmt.query(params_from_iter(params.iter())) + } else { + stmt.query([]) + }?; + let mut event_data: Vec = vec![]; + while let Some(row) = rows.next()? { + let data: EventSummary = row.try_into()?; + event_data.push(data.clone()); + } + + Ok(event_data) + } + + pub async fn get_event(&self, id: &Uuid) -> Result { + let mut event = self.get_basic_event(id).await?; + info!("event: {:?}", event); + let weather_entries: Vec = self.get_event_weather_entries(id).await?; + event.entries = weather_entries.clone(); + event.entry_ids = weather_entries.iter().map(|val| val.id).collect(); + let event_weather: Vec = self.get_event_weather(event.id).await?; + event.weather = event_weather; + info!("events: {:?}", event); + Ok(event) + } + + async fn get_basic_event(&self, id: &Uuid) -> Result { + let event_select = select(( + "id", + "signing_date::TEXT", + "observation_date::TEXT", + "event_annoucement", + "locations", + "total_allowed_entries", + "number_of_places_win", + "number_of_values_per_entry", + "attestation_signature", + "nonce", + )) + .from("events") + .where_("id = $1"); + + let conn = self.new_readonly_connection_retry().await?; + let query_str = self.prepare_query(event_select.to_string()); + debug!("query_str: {}", query_str); + let mut stmt = conn.prepare(&query_str)?; + let sql_params = params_from_iter(vec![id.to_string()]); + stmt.query_row(sql_params, |row| row.try_into()) + } + + pub async fn get_active_events(&self) -> Result, duckdb::Error> { + let event_entries_select = select(("Count(id) as total_entries", "event_id")) + .from("events_entries") + .group_by("event_id"); + + let event_select = with("event_entries") + .as_(event_entries_select) + .select(( + "id", + "signing_date::TEXT", + "observation_date::TEXT", + "locations", + "total_allowed_entries", + "total_entries", + "number_of_places_win", + "number_of_values_per_entry", + "attestation_signature", + )) + .from( + "events" + .join("event_entries") + .on("event_entries.event_id = events.id"), + ) + .where_("attestation_signature IS NULL"); //Only filter out events that have been signed + + let conn = self.new_readonly_connection_retry().await?; + let query_str = self.prepare_query(event_select.to_string()); + debug!("query_str: {}", query_str); + let mut stmt = conn.prepare(&query_str)?; + + let mut rows = stmt.query([])?; + let mut event_data: Vec = vec![]; + while let Some(row) = rows.next()? { + let data: ActiveEvent = row.try_into()?; + event_data.push(data); + } + + Ok(event_data) + } + + pub async fn get_events_to_sign( + &self, + event_ids: Vec, + ) -> Result, duckdb::Error> { + let mut event_ids_val = String::new(); + event_ids_val.push('('); + for (index, _) in event_ids.iter().enumerate() { + event_ids_val.push('?'); + if index + 1 < event_ids.len() { + event_ids_val.push(','); + } + } + event_ids_val.push(')'); + let where_clause = format!( + "attestation_signature IS NULL AND events.id IN {}", + event_ids_val + ); + + let event_select = select(( + "id", + "signing_date::TEXT", + "observation_date::TEXT", + "number_of_places_win", + "number_of_values_per_entry", + "attestation_signature", + "nonce", + "event_annoucement", + )) + .from("events") + .where_(where_clause); + + let params: Vec = event_ids + .iter() + .map(|event_id| Value::Text(event_id.to_string())) + .collect(); + + let conn = self.new_readonly_connection_retry().await?; + let query_str = self.prepare_query(event_select.to_string()); + debug!("query_str: {}", query_str); + let mut stmt = conn.prepare(&query_str)?; + + let mut rows = stmt.query(params_from_iter(params.iter()))?; + let mut event_data: Vec = vec![]; + while let Some(row) = rows.next()? { + let data: SignEvent = row.try_into()?; + event_data.push(data); + } + + Ok(event_data) + } + + fn prepare_query(&self, query: String) -> String { + let re = Regex::new(r"\$(\d+)").unwrap(); + let fixed_params = re.replace_all(&query, "?"); + fixed_params.to_string() + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct CreateEvent { + /// Client needs to provide a valid Uuidv7 + pub id: Uuid, + #[serde(with = "utc_datetime")] + /// Time at which the attestation will be added to the event, needs to be after the observation date + pub signing_date: OffsetDateTime, + #[serde(with = "utc_datetime")] + /// Date of when the weather observations occured (midnight UTC), all entries must be made before this time + pub observation_date: OffsetDateTime, + /// NOAA observation stations used in this event + pub locations: Vec, + /// The number of values that can be selected per entry in the event (default to number_of_locations * 3, (temp_low, temp_high, wind_speed)) + pub number_of_values_per_entry: usize, + /// Total number of allowed entries into the event + pub total_allowed_entries: usize, + /// Total amount of places that are part of the winnings split + pub number_of_places_win: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CreateEventData { + /// Provide UUIDv7 to use for looking up the event + pub id: Uuid, + #[serde(with = "utc_datetime")] + /// Time at which the attestation will be added to the event + pub signing_date: OffsetDateTime, + #[serde(with = "utc_datetime")] + /// Date of when the weather observations occured (midnight UTC), all entries must be made before this time + pub observation_date: OffsetDateTime, + // NOAA observation stations used in this event + pub locations: Vec, + /// The number of values that can be selected per entry in the event (default to number_of_locations * 3, (temp_low, temp_high, wind_speed)) + pub number_of_values_per_entry: i64, + pub total_allowed_entries: i64, + pub number_of_places_win: i64, + /// Used to sign the result of the event being watched + pub nonce: Scalar, + /// Used in constructing the dlctix transactions + pub event_annoucement: EventAnnouncement, +} + +impl CreateEventData { + //TODO: use the builder pattern + #[allow(clippy::too_many_arguments)] + pub fn new( + oracle: &Oracle, + event_id: Uuid, + observation_date: OffsetDateTime, + signing_date: OffsetDateTime, + locations: Vec, + total_allowed_entries: usize, + number_of_places_win: usize, + number_of_values_per_entry: usize, + ) -> Result { + if event_id.get_version_num() != 7 { + return Err(anyhow!( + "Client needs to provide a valid Uuidv7 for event id {}", + event_id + )); + } + if observation_date > signing_date { + return Err(anyhow::anyhow!( + "Signing date {} needs to be after observation date {}", + signing_date.format(&Rfc3339).unwrap(), + observation_date.format(&Rfc3339).unwrap() + )); + } + + let public_key = oracle.raw_public_key(); + // number_of_values_per_entry * 2 == max value, create array from max value to 0 + // determine all possible messages that we might sign + let max_number_of_points_per_value_in_entry = 2; + let possible_scores: Vec = (0..=(number_of_values_per_entry + * max_number_of_points_per_value_in_entry)) + .map(|val| val as i64) + .collect(); + + // allows us to have comps where say the top 3 scores split the pot + let possible_outcome_rankings: Vec> = possible_scores + .iter() + .combinations(number_of_places_win) + .filter(|combination| { + // Check if the combination is sorted in descending order, if not filter out of possible outcomes + combination.windows(2).all(|window| window[0] >= window[1]) + }) + .map(|combination| combination.into_iter().cloned().collect()) + .collect(); + info!("outcomes: {:?}", possible_outcome_rankings); + // holds all possible scoring results of the event + let outcome_messages: Vec> = possible_outcome_rankings + .into_iter() + .map(|inner_vec| { + inner_vec + .into_iter() + .flat_map(|num| num.to_be_bytes()) + .collect::>() + }) + .collect(); + + let mut rng = rand::thread_rng(); + let nonce = Scalar::random(&mut rng); + let nonce_point = nonce.base_point_mul(); + // Manually set expiry to 7 days after the signature should have been proveded so users can get their funds back + let expiry = signing_date + .saturating_add(Duration::DAY * 7) + .unix_timestamp() as u32; + + // The actual accounement the oracle is going to attest the outcome + let event_annoucement = EventAnnouncement { + oracle_pubkey: public_key.into(), + nonce_point, + outcome_messages, + expiry: Some(expiry), + }; + + Ok(Self { + id: event_id, + observation_date, + signing_date, + nonce, + total_allowed_entries: total_allowed_entries as i64, + number_of_places_win: number_of_places_win as i64, + number_of_values_per_entry: number_of_values_per_entry as i64, + locations, + event_annoucement, + }) + } +} + +impl From for Event { + fn from(value: CreateEventData) -> Self { + Self { + id: value.id, + signing_date: value.signing_date, + observation_date: value.observation_date, + locations: value.locations, + total_allowed_entries: value.total_allowed_entries, + number_of_places_win: value.number_of_places_win, + number_of_values_per_entry: value.number_of_values_per_entry, + event_annoucement: value.event_annoucement, + nonce: value.nonce, + status: EventStatus::default(), + entry_ids: vec![], + entries: vec![], + weather: vec![], + attestation: None, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, IntoParams)] +pub struct EventFilter { + // TODO: add more options, proper pagination and search + pub limit: Option, + pub event_ids: Option>, +} + +impl Default for EventFilter { + fn default() -> Self { + Self { + limit: Some(100_usize), + event_ids: None, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub struct SignEvent { + pub id: Uuid, + #[serde(with = "utc_datetime")] + pub signing_date: OffsetDateTime, + #[serde(with = "utc_datetime")] + pub observation_date: OffsetDateTime, + pub status: EventStatus, + pub nonce: Scalar, + pub event_annoucement: EventAnnouncement, + pub number_of_places_win: i64, + pub number_of_values_per_entry: i64, + pub attestation: Option, +} + +impl SignEvent { + pub fn update_status(&mut self) { + self.status = get_status(self.observation_date, self.attestation) + } +} + +impl<'a> TryFrom<&Row<'a>> for SignEvent { + type Error = duckdb::Error; + + fn try_from(row: &Row) -> Result { + //raw date format 2024-08-11 00:27:39.013046-04 + let sql_time_format = format_description!( + "[year]-[month]-[day] [hour]:[minute]:[second][optional [.[subsecond]]][offset_hour]" + ); + let mut sign_events = SignEvent { + id: row + .get::(0) + .map(|val| Uuid::parse_str(&val))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(0, Type::Any, Box::new(e)))?, + signing_date: row + .get::(1) + .map(|val| OffsetDateTime::parse(&val, &sql_time_format))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(1, Type::Any, Box::new(e)))?, + observation_date: row + .get::(2) + .map(|val| OffsetDateTime::parse(&val, &sql_time_format))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(2, Type::Any, Box::new(e)))?, + status: EventStatus::default(), + number_of_places_win: row.get::(3)?, + number_of_values_per_entry: row.get::(4)?, + attestation: row + .get::(5) + .map(|v| { + let blob_attestation = match v { + Value::Blob(raw) => raw, + _ => vec![], + }; + if !blob_attestation.is_empty() { + //TODO: handle the conversion more gracefully than unwrap + Some(MaybeScalar::from_slice(blob_attestation.to_byte_slice()).unwrap()) + } else { + None + } + }) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(5, Type::Any, Box::new(e)))?, + nonce: row + .get::(6) + .map(|raw| { + let blob = match raw { + Value::Blob(val) => val, + _ => vec![], + }; + serde_json::from_slice(&blob) + })? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(6, Type::Any, Box::new(e)))?, + event_annoucement: row + .get::(7) + .map(|raw| { + let blob = match raw { + Value::Blob(val) => val, + _ => vec![], + }; + serde_json::from_slice(&blob) + })? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(7, Type::Any, Box::new(e)))?, + }; + sign_events.update_status(); + Ok(sign_events) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub struct ActiveEvent { + pub id: Uuid, + pub locations: Vec, + #[serde(with = "utc_datetime")] + pub signing_date: OffsetDateTime, + #[serde(with = "utc_datetime")] + pub observation_date: OffsetDateTime, + pub status: EventStatus, + pub total_allowed_entries: i64, + pub total_entries: i64, + pub number_of_values_per_entry: i64, + pub number_of_places_win: i64, + pub attestation: Option, +} + +impl ActiveEvent { + pub fn update_status(&mut self) { + self.status = get_status(self.observation_date, self.attestation) + } +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub enum EventStatus { + /// Observation date has not passed yet and entries can be added + #[default] + Live, + /// Currently in the Observation date, entries cannot be added + Running, + /// Event Observation window has finished, not yet signed + Completed, + /// Event has completed and been signed by the oracle + Signed, +} + +impl std::fmt::Display for EventStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Live => write!(f, "live"), + Self::Running => write!(f, "running"), + Self::Completed => write!(f, "completed"), + Self::Signed => write!(f, "signed"), + } + } +} + +impl TryFrom<&str> for EventStatus { + type Error = anyhow::Error; + + fn try_from(s: &str) -> Result { + match s { + "live" => Ok(EventStatus::Live), + "running" => Ok(EventStatus::Running), + "completed" => Ok(EventStatus::Completed), + "signed" => Ok(EventStatus::Signed), + val => Err(anyhow!("invalid status: {}", val)), + } + } +} + +impl TryFrom for EventStatus { + type Error = anyhow::Error; + + fn try_from(s: String) -> Result { + match s.as_str() { + "live" => Ok(EventStatus::Live), + "running" => Ok(EventStatus::Running), + "completed" => Ok(EventStatus::Completed), + "signed" => Ok(EventStatus::Signed), + val => Err(anyhow!("invalid status: {}", val)), + } + } +} + +impl<'a> TryFrom<&Row<'a>> for ActiveEvent { + type Error = duckdb::Error; + + fn try_from(row: &Row) -> Result { + //raw date format 2024-08-11 00:27:39.013046-04 + let sql_time_format = format_description!( + "[year]-[month]-[day] [hour]:[minute]:[second][optional [.[subsecond]]][offset_hour]" + ); + let mut active_events = ActiveEvent { + id: row + .get::(0) + .map(|val| Uuid::parse_str(&val))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(0, Type::Any, Box::new(e)))?, + signing_date: row + .get::(1) + .map(|val| OffsetDateTime::parse(&val, &sql_time_format))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(1, Type::Any, Box::new(e)))?, + observation_date: row + .get::(2) + .map(|val| OffsetDateTime::parse(&val, &sql_time_format))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(2, Type::Any, Box::new(e)))?, + locations: row + .get::(3) + .map(|locations| { + let list_locations = match locations { + Value::List(list) => list, + _ => vec![], + }; + let mut locations_conv = vec![]; + for value in list_locations.iter() { + if let Value::Text(location) = value { + locations_conv.push(location.clone()) + } + } + locations_conv + }) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(3, Type::Any, Box::new(e)))?, + total_allowed_entries: row.get::(4)?, + status: EventStatus::default(), + total_entries: row.get::(5)?, + number_of_places_win: row.get::(6)?, + number_of_values_per_entry: row.get::(7)?, + attestation: row + .get::(8) + .map(|v| { + let blob_attestation = match v { + Value::Blob(raw) => raw, + _ => vec![], + }; + if !blob_attestation.is_empty() { + //TODO: handle the conversion more gracefully than unwrap + Some(MaybeScalar::from_slice(blob_attestation.to_byte_slice()).unwrap()) + } else { + None + } + }) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(8, Type::Any, Box::new(e)))?, + }; + active_events.update_status(); + Ok(active_events) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub struct EventSummary { + pub id: Uuid, + #[serde(with = "utc_datetime")] + /// Time at which the attestation will be added to the event + pub signing_date: OffsetDateTime, + #[serde(with = "utc_datetime")] + /// Date of when the weather observations occured + pub observation_date: OffsetDateTime, + /// NOAA observation stations used in this event + pub locations: Vec, + /// The number of values that can be selected per entry in the event (default to number_of_locations * 3, (temp_low, temp_high, wind_speed)) + pub number_of_values_per_entry: i64, + /// Current status of the event, where in the lifecyle are we (LIVE, RUNNING, COMPLETED, SIGNED, defaults to LIVE) + pub status: EventStatus, + /// Knowing the total number of entries, how many can place + /// The dlctix coordinator can determine how many transactions to create + pub total_allowed_entries: i64, + /// Needs to all be generated at the start + pub total_entries: i64, + pub number_of_places_win: i64, + /// The forecasted and observed values for each station on the event date + pub weather: Vec, + /// When added it means the oracle has signed that the current data is the final result + pub attestation: Option, +} + +impl EventSummary { + pub fn update_status(&mut self) { + self.status = get_status(self.observation_date, self.attestation) + } +} + +pub fn get_status( + observation_date: OffsetDateTime, + attestation: Option, +) -> EventStatus { + //always have the events run for a single day for now + if observation_date < OffsetDateTime::now_utc() + && observation_date.saturating_sub(Duration::days(1)) > OffsetDateTime::now_utc() + && attestation.is_none() + { + return EventStatus::Running; + } + + if observation_date < OffsetDateTime::now_utc() + && observation_date.saturating_sub(Duration::days(1)) < OffsetDateTime::now_utc() + && attestation.is_none() + { + return EventStatus::Completed; + } + + if attestation.is_some() { + return EventStatus::Signed; + } + //default to live + EventStatus::Live +} + +impl<'a> TryFrom<&Row<'a>> for EventSummary { + type Error = duckdb::Error; + + fn try_from(row: &Row) -> Result { + //raw date format 2024-08-11 00:27:39.013046-04 + let sql_time_format = format_description!( + "[year]-[month]-[day] [hour]:[minute]:[second][optional [.[subsecond]]][offset_hour]" + ); + let mut event_summary = EventSummary { + id: row + .get::(0) + .map(|val| Uuid::parse_str(&val))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(0, Type::Any, Box::new(e)))?, + signing_date: row + .get::(1) + .map(|val| OffsetDateTime::parse(&val, &sql_time_format))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(1, Type::Any, Box::new(e)))?, + observation_date: row + .get::(2) + .map(|val| OffsetDateTime::parse(&val, &sql_time_format))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(2, Type::Any, Box::new(e)))?, + status: EventStatus::default(), + locations: row + .get::(3) + .map(|locations| { + let list_locations = match locations { + Value::List(list) => list, + _ => vec![], + }; + let mut locations_conv = vec![]; + for value in list_locations.iter() { + if let Value::Text(location) = value { + locations_conv.push(location.clone()) + } + } + locations_conv + }) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(3, Type::Any, Box::new(e)))?, + total_allowed_entries: row.get::(4)?, + total_entries: row.get::(5)?, + number_of_places_win: row.get::(6)?, + number_of_values_per_entry: row.get::(7)?, + attestation: row + .get::(8) + .map(|v| { + let blob_attestation = match v { + Value::Blob(raw) => raw, + _ => vec![], + }; + if !blob_attestation.is_empty() { + //TODO: handle the conversion more gracefully than unwrap + Some(MaybeScalar::from_slice(blob_attestation.to_byte_slice()).unwrap()) + } else { + None + } + }) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(8, Type::Any, Box::new(e)))?, + weather: row + .get::(9) + .map(|raw| { + let list_weather = match raw { + Value::List(list) => list, + _ => vec![], + }; + let mut weather_data = vec![]; + for value in list_weather.iter() { + if let Value::Struct(data) = value { + let weather: Weather = match data.try_into() { + Ok(val) => val, + Err(e) => return Err(e), + }; + weather_data.push(weather) + } + } + Ok(weather_data) + })? + .map_err(|e| { + duckdb::Error::DuckDBFailure( + ffi::Error { + code: ErrorCode::TypeMismatch, + extended_code: 0, + }, + Some(e.to_string()), + ) + })?, + }; + event_summary.update_status(); + Ok(event_summary) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub struct Event { + pub id: Uuid, + #[serde(with = "utc_datetime")] + /// Time at which the attestation will be added to the event + pub signing_date: OffsetDateTime, + #[serde(with = "utc_datetime")] + /// Date of when the weather observations occured + pub observation_date: OffsetDateTime, + /// NOAA observation stations used in this event + pub locations: Vec, + /// The number of values that can be selected per entry in the event (default to number_of_locations * 3, (temp_low, temp_high, wind_speed)) + pub number_of_values_per_entry: i64, + /// Current status of the event, where in the lifecyle are we (LIVE, RUNNING, COMPLETED, SIGNED) + pub status: EventStatus, + /// Knowing the total number of entries, how many can place + /// The dlctix coordinator can determine how many transactions to create + pub total_allowed_entries: i64, + /// Needs to all be generated at the start + pub entry_ids: Vec, + pub number_of_places_win: i64, + /// All entries into this event, wont be returned until date of observation begins and will be ranked by score + pub entries: Vec, + /// The forecasted and observed values for each station on the event date + pub weather: Vec, + /// Nonce the oracle committed to use as part of signing final results + pub nonce: Scalar, + /// Holds the predefined outcomes the oracle will attest to at event complet + pub event_annoucement: EventAnnouncement, + /// When added it means the oracle has signed that the current data is the final result + pub attestation: Option, +} + +impl Event { + pub fn update_status(&mut self) { + self.status = get_status(self.observation_date, self.attestation) + } +} + +impl<'a> TryFrom<&Row<'a>> for Event { + type Error = duckdb::Error; + + fn try_from(row: &Row) -> Result { + //raw date format 2024-08-11 00:27:39.013046-04 + let sql_time_format = format_description!( + "[year]-[month]-[day] [hour]:[minute]:[second][optional [.[subsecond]]][offset_hour]" + ); + let mut oracle_event_data = Event { + id: row + .get::(0) + .map(|val| { + debug!("{}", val.to_string()); + Uuid::parse_str(&val) + })? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(0, Type::Any, Box::new(e)))?, + signing_date: row + .get::(1) + .map(|val| { + debug!("{}", val.to_string()); + OffsetDateTime::parse(&val, &sql_time_format) + })? + .map(|val| { + debug!("{}", val.to_string()); + val.to_offset(UtcOffset::UTC) + }) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(1, Type::Any, Box::new(e)))?, + observation_date: row + .get::(2) + .map(|val| OffsetDateTime::parse(&val, &sql_time_format))? + .map(|val| val.to_offset(UtcOffset::UTC)) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(2, Type::Any, Box::new(e)))?, + event_annoucement: row + .get::(3) + .map(|raw| { + let blob = match raw { + Value::Blob(val) => val, + _ => vec![], + }; + serde_json::from_slice(&blob) + })? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(3, Type::Any, Box::new(e)))?, + locations: row + .get::(4) + .map(|locations| { + let list_locations = match locations { + Value::List(list) => list, + _ => vec![], + }; + let mut locations_conv = vec![]; + for value in list_locations.iter() { + if let Value::Text(location) = value { + locations_conv.push(location.clone()) + } + } + info!("locations: {:?}", locations_conv); + locations_conv + }) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(4, Type::Any, Box::new(e)))?, + total_allowed_entries: row.get::(5)?, + number_of_places_win: row.get::(6)?, + number_of_values_per_entry: row.get::(7)?, + attestation: row + .get::(8) + .map(|v| { + info!("val: {:?}", v); + let blob_attestation = match v { + Value::Blob(raw) => raw, + _ => vec![], + }; + if !blob_attestation.is_empty() { + //TODO: handle the conversion more gracefully than unwrap + let converted: MaybeScalar = + serde_json::from_slice(&blob_attestation).unwrap(); + Some(converted) + } else { + None + } + }) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(8, Type::Any, Box::new(e)))?, + nonce: row + .get::(9) + .map(|raw| { + let blob = match raw { + Value::Blob(val) => val, + _ => vec![], + }; + serde_json::from_slice(&blob) + })? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(9, Type::Any, Box::new(e)))?, + status: EventStatus::default(), + //These nested values have to be made by more quries + entry_ids: vec![], + entries: vec![], + weather: vec![], + }; + oracle_event_data.update_status(); + Ok(oracle_event_data) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub struct Weather { + pub station_id: String, + pub observed: Option, + pub forecasted: Forecasted, +} + +impl<'a> TryFrom<&Row<'a>> for Weather { + type Error = duckdb::Error; + + fn try_from(row: &Row) -> Result { + let observed: Option = row + .get::(1) + .map(|raw_observed| match raw_observed.clone() { + Value::Struct(observed) => Some(observed.try_into().map_err(|e: anyhow::Error| { + duckdb::Error::DuckDBFailure( + ffi::Error { + code: ErrorCode::TypeMismatch, + extended_code: 0, + }, + Some(format!( + "error formatting observed: {:?} {}", + raw_observed, e + )), + ) + })), + _ => None, + }) + .and_then(|option_inner_result| match option_inner_result { + Some(inner_result) => inner_result.map(Some), + None => Ok(None), + })?; + + let forecasted: Forecasted = + row.get::(2) + .map(|raw_forecasted| match raw_forecasted.clone() { + Value::Struct(forecasted) => { + forecasted.try_into().map_err(|e: anyhow::Error| { + duckdb::Error::DuckDBFailure( + ffi::Error { + code: ErrorCode::TypeMismatch, + extended_code: 0, + }, + Some(format!( + "error formatting forecast: {:?} {}", + raw_forecasted, e + )), + ) + }) + } + _ => Err(duckdb::Error::DuckDBFailure( + ffi::Error { + code: ErrorCode::TypeMismatch, + extended_code: 0, + }, + None, + )), + })??; + Ok(Weather { + station_id: row.get::(0)?, + forecasted, + observed, + }) + } +} + +impl TryFrom<&Forecast> for Forecasted { + type Error = weather_data::Error; + fn try_from(value: &Forecast) -> Result { + let format = format_description!("[year]-[month]-[day]"); + let date = Date::parse(&value.date, format)?; + let datetime = date.with_hms(0, 0, 0).unwrap(); + let datetime_off = datetime.assume_offset(UtcOffset::from_hms(0, 0, 0).unwrap()); + Ok(Self { + date: datetime_off, + temp_low: value.temp_low, + temp_high: value.temp_high, + wind_speed: value.wind_speed, + }) + } +} + +impl TryInto for &OrderedMap { + type Error = duckdb::Error; + + fn try_into(self) -> Result { + let values: Vec<&Value> = self.values().collect(); + + let station_id = values + .first() + .ok_or_else(|| { + duckdb::Error::DuckDBFailure( + ffi::Error { + code: ErrorCode::TypeMismatch, + extended_code: 0, + }, + Some(String::from("unable to convert station_id")), + ) + }) + .and_then(|raw_station| match raw_station { + Value::Text(station) => Ok(station.clone()), + _ => Err(duckdb::Error::DuckDBFailure( + ffi::Error { + code: ErrorCode::TypeMismatch, + extended_code: 0, + }, + Some(format!( + "error converting station id into string: {:?}", + raw_station + )), + )), + })?; + let observed: Option = if let Some(Value::Struct(observed)) = values.get(1) { + let observed_converted = observed.try_into().map_err(|e| { + duckdb::Error::DuckDBFailure( + ffi::Error { + code: ErrorCode::TypeMismatch, + extended_code: 0, + }, + Some(format!("error converting observed: {}", e)), + ) + })?; + Some(observed_converted) + } else { + None + }; + let forecasted = values + .get(2) + .ok_or_else(|| anyhow!("forecasted not found in the map")) + .and_then(|raw_forecasted| match raw_forecasted { + Value::Struct(forecasted) => forecasted.try_into(), + _ => Err(anyhow!( + "error converting forecasted into struct: {:?}", + raw_forecasted + )), + }) + .map_err(|e| { + duckdb::Error::DuckDBFailure( + ffi::Error { + code: ErrorCode::TypeMismatch, + extended_code: 0, + }, + Some(e.to_string()), + ) + })?; + Ok(Weather { + station_id, + observed, + forecasted, + }) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub struct Observed { + #[serde(with = "utc_datetime")] + pub date: OffsetDateTime, + pub temp_low: i64, + pub temp_high: i64, + pub wind_speed: i64, +} + +impl TryFrom<&Observation> for Observed { + type Error = weather_data::Error; + fn try_from(value: &Observation) -> Result { + Ok(Self { + date: OffsetDateTime::parse(&value.start_time, &Rfc3339)?, + temp_low: value.temp_low.round() as i64, + temp_high: value.temp_high.round() as i64, + wind_speed: value.wind_speed, + }) + } +} + +impl TryInto for &OrderedMap { + type Error = anyhow::Error; + + fn try_into(self) -> Result { + debug!("raw observed: {:?}", self); + let values: Vec<&Value> = self.values().collect(); + let date = values + .first() + .ok_or_else(|| anyhow!("date not found in the map")) + .and_then(|raw_date| match raw_date { + Value::Timestamp(duckdb::types::TimeUnit::Microsecond, raw_date) => Ok(raw_date), + v => Err(anyhow!( + "error converting observed date into OffsetDatetime: {:?}, {:?}", + raw_date, + v + )), + }) + .and_then(|timestamp| { + OffsetDateTime::from_unix_timestamp_nanos((*timestamp as i128) * 1000_i128).map_err( + |e| { + anyhow!( + "error parsing observed date into offsetdatetime: {} {}", + timestamp, + e + ) + }, + ) + }) + .map(|val| val.to_offset(UtcOffset::UTC))?; + + let temp_low = values + .get(1) + .ok_or_else(|| anyhow!("temp_low not found in the map")) + .and_then(|raw_temp| match raw_temp { + Value::Int(temp) => Ok(*temp as i64), + _ => Err(anyhow!("error converting temp into int: {:?}", raw_temp)), + })?; + + let temp_high = values + .get(2) + .ok_or_else(|| anyhow!("temp_high not found in the map")) + .and_then(|raw_temp| match raw_temp { + Value::Int(temp) => Ok(*temp as i64), + _ => Err(anyhow!("error converting temp into int: {:?}", raw_temp)), + })?; + + let wind_speed = values + .get(3) + .ok_or_else(|| anyhow!("wind_speed not found in the map")) + .and_then(|raw_speed| match raw_speed { + Value::Int(speed) => Ok(*speed as i64), + _ => Err(anyhow!( + "error converting wind_speed into int: {:?}", + raw_speed + )), + })?; + + Ok(Observed { + date, + temp_low, + temp_high, + wind_speed, + }) + } +} + +impl TryInto for OrderedMap { + type Error = anyhow::Error; + + fn try_into(self) -> Result { + debug!("raw observed: {:?}", self); + let values: Vec<&Value> = self.values().collect(); + let date = values + .first() + .ok_or_else(|| anyhow!("date not found in the map")) + .and_then(|raw_date| match raw_date { + Value::Timestamp(duckdb::types::TimeUnit::Microsecond, raw_date) => Ok(raw_date), + v => Err(anyhow!( + "error converting observed date into OffsetDatetime: {:?}, {:?}", + raw_date, + v + )), + }) + .and_then(|timestamp| { + OffsetDateTime::from_unix_timestamp_nanos((*timestamp as i128) * 1000_i128).map_err( + |e| { + anyhow!( + "error parsing observed date into offsetdatetime: {} {}", + timestamp, + e + ) + }, + ) + }) + .map(|val| val.to_offset(UtcOffset::UTC))?; + + let temp_low = values + .get(1) + .ok_or_else(|| anyhow!("temp_low not found in the map")) + .and_then(|raw_temp| match raw_temp { + Value::Int(temp) => Ok(*temp as i64), + _ => Err(anyhow!("error converting temp into int: {:?}", raw_temp)), + })?; + + let temp_high = values + .get(2) + .ok_or_else(|| anyhow!("temp_high not found in the map")) + .and_then(|raw_temp| match raw_temp { + Value::Int(temp) => Ok(*temp as i64), + _ => Err(anyhow!("error converting temp into int: {:?}", raw_temp)), + })?; + + let wind_speed = values + .get(3) + .ok_or_else(|| anyhow!("wind_speed not found in the map")) + .and_then(|raw_speed| match raw_speed { + Value::Int(speed) => Ok(*speed as i64), + _ => Err(anyhow!( + "error converting wind_speed into int: {:?}", + raw_speed + )), + })?; + + Ok(Observed { + date, + temp_low, + temp_high, + wind_speed, + }) + } +} + +impl ToSql for Observed { + fn to_sql(&self) -> duckdb::Result> { + let ordered_struct: OrderedMap = OrderedMap::from(vec![ + ( + String::from("date"), + Value::Text(self.date.format(&Rfc3339).unwrap()), + ), + (String::from("temp_low"), Value::Int(self.temp_low as i32)), + (String::from("temp_high"), Value::Int(self.temp_high as i32)), + ( + String::from("wind_speed"), + Value::Int(self.wind_speed as i32), + ), + ]); + Ok(ToSqlOutput::Owned(Value::Struct(ordered_struct))) + } +} + +impl ToRawSql for Observed { + fn to_raw_sql(&self) -> String { + // Done because the rust library doesn't natively support writing structs to the db just yet, + // Eventually we should be able to delete this code + // example of how to write a struct to duckdb: `INSERT INTO t1 VALUES (ROW('a', 42));` + let mut vals = String::new(); + vals.push_str("ROW('"); + let data_str = self.date.format(&Rfc3339).unwrap(); + vals.push_str(&data_str); + vals.push_str(r#"',"#); + vals.push_str(&format!("{}", self.temp_low)); + vals.push(','); + vals.push_str(&format!("{}", self.temp_high)); + vals.push(','); + vals.push_str(&format!("{}", self.wind_speed)); + vals.push(')'); + vals + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub struct Forecasted { + #[serde(with = "utc_datetime")] + pub date: OffsetDateTime, + pub temp_low: i64, + pub temp_high: i64, + pub wind_speed: i64, +} + +impl TryInto for &OrderedMap { + type Error = anyhow::Error; + + fn try_into(self) -> Result { + let values: Vec<&Value> = self.values().collect(); + let date = values + .first() + .ok_or_else(|| anyhow!("date not found in the map")) + .and_then(|raw_date| match raw_date { + Value::Timestamp(duckdb::types::TimeUnit::Microsecond, raw_date) => Ok(raw_date), + _ => Err(anyhow!( + "error converting date into OffsetDatetime: {:?}", + raw_date + )), + }) + .and_then(|timestamp| { + OffsetDateTime::from_unix_timestamp_nanos((*timestamp as i128) * 1000_i128).map_err( + |e| { + anyhow!( + "error parsing forecast date into offsetdatetime: {} {}", + timestamp, + e + ) + }, + ) + }) + .map(|val| val.to_offset(UtcOffset::UTC))?; + + let temp_low = values + .get(1) + .ok_or_else(|| anyhow!("temp_low not found in the map")) + .and_then(|raw_temp| match raw_temp { + Value::Int(temp) => Ok(*temp as i64), + _ => Err(anyhow!("error converting temp into int: {:?}", raw_temp)), + })?; + + let temp_high = values + .get(2) + .ok_or_else(|| anyhow!("temp_high not found in the map")) + .and_then(|raw_temp| match raw_temp { + Value::Int(temp) => Ok(*temp as i64), + _ => Err(anyhow!("error converting temp into int: {:?}", raw_temp)), + })?; + + let wind_speed = values + .get(3) + .ok_or_else(|| anyhow!("wind_speed not found in the map")) + .and_then(|raw_speed| match raw_speed { + Value::Int(speed) => Ok(*speed as i64), + _ => Err(anyhow!( + "error converting wind_speed into int: {:?}", + raw_speed + )), + })?; + + Ok(Forecasted { + date, + temp_low, + temp_high, + wind_speed, + }) + } +} + +impl TryInto for OrderedMap { + type Error = anyhow::Error; + + fn try_into(self) -> Result { + let values: Vec<&Value> = self.values().collect(); + let date = values + .first() + .ok_or_else(|| anyhow!("date not found in the map")) + .and_then(|raw_date| match raw_date { + Value::Timestamp(duckdb::types::TimeUnit::Microsecond, raw_date) => Ok(raw_date), + _ => Err(anyhow!( + "error converting date into OffsetDatetime: {:?}", + raw_date + )), + }) + .and_then(|timestamp| { + OffsetDateTime::from_unix_timestamp_nanos((*timestamp as i128) * 1000_i128).map_err( + |e| { + anyhow!( + "error parsing forecast date into offsetdatetime: {} {}", + timestamp, + e + ) + }, + ) + }) + .map(|val| val.to_offset(UtcOffset::UTC))?; + + let temp_low = values + .get(1) + .ok_or_else(|| anyhow!("temp_low not found in the map")) + .and_then(|raw_temp| match raw_temp { + Value::Int(temp) => Ok(*temp as i64), + _ => Err(anyhow!("error converting temp into int: {:?}", raw_temp)), + })?; + + let temp_high = values + .get(2) + .ok_or_else(|| anyhow!("temp_high not found in the map")) + .and_then(|raw_temp| match raw_temp { + Value::Int(temp) => Ok(*temp as i64), + _ => Err(anyhow!("error converting temp into int: {:?}", raw_temp)), + })?; + + let wind_speed = values + .get(3) + .ok_or_else(|| anyhow!("wind_speed not found in the map")) + .and_then(|raw_speed| match raw_speed { + Value::Int(speed) => Ok(*speed as i64), + _ => Err(anyhow!( + "error converting wind_speed into int: {:?}", + raw_speed + )), + })?; + + Ok(Forecasted { + date, + temp_low, + temp_high, + wind_speed, + }) + } +} + +pub trait ToRawSql { + /// Converts Rust value to raw valid DuckDB sql string (if user input make sure to validate before adding to db) + fn to_raw_sql(&self) -> String; +} + +impl ToRawSql for Forecasted { + fn to_raw_sql(&self) -> String { + // Done because the rust library doesn't natively support writing structs to the db just yet, + // Eventually we should be able to delete this code + // example of how to write a struct to duckdb: `INSERT INTO t1 VALUES (ROW('a', 42));` + let mut vals = String::new(); + vals.push_str("ROW('"); + let data_str = self.date.format(&Rfc3339).unwrap(); + vals.push_str(&data_str); + vals.push_str(r#"',"#); + vals.push_str(&format!("{}", self.temp_low)); + vals.push(','); + vals.push_str(&format!("{}", self.temp_high)); + vals.push(','); + vals.push_str(&format!("{}", self.wind_speed)); + vals.push(')'); + vals + } +} + +impl ToSql for Forecasted { + fn to_sql(&self) -> duckdb::Result> { + let ordered_struct: OrderedMap = OrderedMap::from(vec![ + ( + String::from("date"), + Value::Text(self.date.format(&Rfc3339).unwrap()), + ), + (String::from("temp_low"), Value::Int(self.temp_low as i32)), + (String::from("temp_high"), Value::Int(self.temp_high as i32)), + ( + String::from("wind_speed"), + Value::Int(self.wind_speed as i32), + ), + ]); + Ok(ToSqlOutput::Owned(Value::Struct(ordered_struct))) + } +} + +// Once submitted for now don't allow changes +// Decide if we want to add a pubkey for who submitted the entry? +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct AddEventEntry { + /// Client needs to provide a valid Uuidv7 + pub id: Uuid, + pub event_id: Uuid, + pub expected_observations: Vec, +} + +impl From for WeatherEntry { + fn from(value: AddEventEntry) -> Self { + WeatherEntry { + id: value.id, + event_id: value.event_id, + expected_observations: value.expected_observations, + score: None, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub struct WeatherEntry { + pub id: Uuid, + pub event_id: Uuid, + pub expected_observations: Vec, + /// A score wont appear until the observation_date has begun + pub score: Option, +} + +impl TryInto for &OrderedMap { + type Error = anyhow::Error; + fn try_into(self) -> Result { + debug!("raw weather entry: {:?}", self); + let values: Vec<&Value> = self.values().collect(); + let id = values + .first() + .ok_or_else(|| anyhow!("id not found in the map")) + .and_then(|raw_id| match raw_id { + Value::Text(id) => Ok(id), + _ => Err(anyhow!( + "error converting weather entry id into string: {:?}", + raw_id + )), + }) + .and_then(|id| { + Uuid::parse_str(id) + .map_err(|e| anyhow!("error converting weather entry id into uuid: {}", e)) + })?; + + let event_id = values + .get(1) + .ok_or_else(|| anyhow!("event_id not found in the map")) + .and_then(|raw_id| match raw_id { + Value::Text(id) => Ok(id), + _ => Err(anyhow!( + "error converting weather event id into string: {:?}", + raw_id + )), + }) + .and_then(|id| { + Uuid::parse_str(id) + .map_err(|e| anyhow!("error converting weather event id into uuid: {}", e)) + })?; + + let expected_observations = values + .get(2) + .ok_or_else(|| anyhow!("expect_observations not found in the map")) + .and_then(|raw| match raw { + Value::List(expected_observations) => Ok(expected_observations), + _ => Err(anyhow!( + "error converting expect_observations into struct: {:?}", + raw + )), + }) + .and_then(|weather_choices| { + let mut converted = vec![]; + for weather_choice in weather_choices { + let weather_struct_choice = match weather_choice { + Value::Struct(weather_choice_struct) => weather_choice_struct.try_into()?, + _ => { + return Err(anyhow!( + "error converting weather_choice into struct: {:?}", + weather_choice + )) + } + }; + converted.push(weather_struct_choice); + } + Ok(converted) + })?; + + let score = values.get(3).and_then(|raw_id| match raw_id { + Value::Int(id) => Some(*id as i64), + _ => None, + }); + + Ok(WeatherEntry { + id, + event_id, + score, + expected_observations, + }) + } +} + +impl<'a> TryFrom<&Row<'a>> for WeatherEntry { + type Error = duckdb::Error; + + fn try_from(row: &Row) -> Result { + Ok(WeatherEntry { + id: row + .get::(0) + .map(|val| Uuid::parse_str(&val))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(0, Type::Any, Box::new(e)))?, + event_id: row + .get::(1) + .map(|val| Uuid::parse_str(&val))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(1, Type::Any, Box::new(e)))?, + score: row + .get::>(2) + .map(|val| val.filter(|&val| val != 0))?, + expected_observations: vec![], + }) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct WeatherChoicesWithEntry { + pub entry_id: Uuid, + // NOAA weather stations we're using + pub stations: String, + pub temp_high: Option, + pub temp_low: Option, + pub wind_speed: Option, +} + +impl<'a> TryFrom<&Row<'a>> for WeatherChoicesWithEntry { + type Error = duckdb::Error; + fn try_from(row: &Row<'a>) -> Result { + Ok(WeatherChoicesWithEntry { + entry_id: row + .get::(0) + .map(|val| Uuid::parse_str(&val))? + .map_err(|e| duckdb::Error::FromSqlConversionFailure(0, Type::Any, Box::new(e)))?, + stations: row + .get::(1) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(1, Type::Any, Box::new(e)))?, + temp_low: row + .get::>(2) + .map(|raw| raw.and_then(|inner| ValueOptions::try_from(inner).ok())) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(2, Type::Any, Box::new(e)))?, + temp_high: row + .get::>(3) + .map(|raw| raw.and_then(|inner| ValueOptions::try_from(inner).ok())) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(3, Type::Any, Box::new(e)))?, + wind_speed: row + .get::>(4) + .map(|raw| raw.and_then(|inner| ValueOptions::try_from(inner).ok())) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(4, Type::Any, Box::new(e)))?, + }) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub struct WeatherChoices { + // NOAA weather stations we're using + pub stations: String, + pub temp_high: Option, + pub temp_low: Option, + pub wind_speed: Option, +} + +impl From for WeatherChoices { + fn from(value: WeatherChoicesWithEntry) -> Self { + Self { + stations: value.stations, + temp_high: value.temp_high, + temp_low: value.temp_low, + wind_speed: value.wind_speed, + } + } +} + +impl<'a> TryFrom<&Row<'a>> for WeatherChoices { + type Error = duckdb::Error; + + fn try_from(row: &Row) -> Result { + Ok(WeatherChoices { + stations: row + .get::(0) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(0, Type::Any, Box::new(e)))?, + temp_low: row + .get::>(1) + .map(|raw| raw.and_then(|inner| ValueOptions::try_from(inner).ok())) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(1, Type::Any, Box::new(e)))?, + temp_high: row + .get::>(2) + .map(|raw| raw.and_then(|inner| ValueOptions::try_from(inner).ok())) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(2, Type::Any, Box::new(e)))?, + wind_speed: row + .get::>(3) + .map(|raw| raw.and_then(|inner| ValueOptions::try_from(inner).ok())) + .map_err(|e| duckdb::Error::FromSqlConversionFailure(3, Type::Any, Box::new(e)))?, + }) + } +} + +impl TryInto for &OrderedMap { + type Error = anyhow::Error; + fn try_into(self) -> Result { + debug!("raw weather choices: {:?}", self); + let values: Vec<&Value> = self.values().collect(); + let stations = values + .first() + .ok_or_else(|| anyhow!("stations not found in the map")) + .and_then(|raw_station| match raw_station { + Value::Text(station) => Ok(station.clone()), + _ => Err(anyhow!( + "error converting station id into string: {:?}", + raw_station + )), + })?; + let temp_low = values.get(1).and_then(|raw_temp| match raw_temp { + Value::Text(temp) => ValueOptions::try_from(temp.clone()).ok(), + _ => None, + }); + let temp_high = values.get(2).and_then(|raw_temp| match raw_temp { + Value::Text(temp) => ValueOptions::try_from(temp.clone()).ok(), + _ => None, + }); + let wind_speed = values + .get(3) + .and_then(|raw_wind_speed| match raw_wind_speed { + Value::Text(wind_speed) => ValueOptions::try_from(wind_speed.clone()).ok(), + _ => None, + }); + Ok(WeatherChoices { + stations, + temp_low, + temp_high, + wind_speed, + }) + } +} + +#[allow(clippy::from_over_into)] +impl Into for &WeatherChoices { + fn into(self) -> Value { + let temp_low = match self.temp_low.clone() { + Some(val) => Value::Text(val.to_string()), + None => Value::Null, + }; + let temp_high = match self.temp_high.clone() { + Some(val) => Value::Text(val.to_string()), + None => Value::Null, + }; + let wind_speed = match self.wind_speed.clone() { + Some(val) => Value::Text(val.to_string()), + None => Value::Null, + }; + let ordered_struct: OrderedMap = OrderedMap::from(vec![ + (String::from("stations"), Value::Text(self.stations.clone())), + (String::from("temp_low"), temp_low), + (String::from("temp_high"), temp_high), + (String::from("wind_speed"), wind_speed), + ]); + Value::Struct(ordered_struct) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)] +pub enum ValueOptions { + Over, + // Par is what was forecasted for this value + Par, + Under, +} + +impl std::fmt::Display for ValueOptions { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Over => write!(f, "over"), + Self::Par => write!(f, "par"), + Self::Under => write!(f, "under"), + } + } +} + +impl TryFrom<&str> for ValueOptions { + type Error = anyhow::Error; + + fn try_from(s: &str) -> Result { + match s { + "over" => Ok(ValueOptions::Over), + "par" => Ok(ValueOptions::Par), + "under" => Ok(ValueOptions::Under), + val => Err(anyhow!("invalid option: {}", val)), + } + } +} + +impl TryFrom for ValueOptions { + type Error = anyhow::Error; + + fn try_from(s: String) -> Result { + match s.as_str() { + "over" => Ok(ValueOptions::Over), + "par" => Ok(ValueOptions::Par), + "under" => Ok(ValueOptions::Under), + val => Err(anyhow!("invalid option: {}", val)), + } + } +} diff --git a/oracle/src/db/event_db_migrations.rs b/oracle/src/db/event_db_migrations.rs new file mode 100644 index 0000000..4df7aae --- /dev/null +++ b/oracle/src/db/event_db_migrations.rs @@ -0,0 +1,119 @@ +use duckdb::Connection; +use log::info; + +pub fn run_migrations(conn: &mut Connection) -> Result<(), duckdb::Error> { + create_version_table(conn)?; + let mut stmt = conn.prepare("SELECT version FROM db_version")?; + let mut rows = stmt.query([])?; + + let current_version = if let Some(row) = rows.next()? { + row.get(0)? + } else { + 0 + }; + + match current_version { + 0 => { + create_initial_schema(conn)?; + } + /*1 => { + migrate_to_version_2(conn)?; + }*/ + _ => info!("database is up-to-date."), + } + + Ok(()) +} + +pub fn create_version_table(conn: &mut Connection) -> Result<(), duckdb::Error> { + conn.execute( + "CREATE TABLE IF NOT EXISTS db_version ( version INTEGER PRIMARY KEY);", + [], + )?; + Ok(()) +} + +pub fn create_initial_schema(conn: &mut Connection) -> Result<(), duckdb::Error> { + let initial_schema = r#" + -- Table of information about the oracle, mostly to prevent multiple keys from being used with the same database + -- singleton_constant is a dummy column to ensure there is only one row + CREATE TABLE IF NOT EXISTS oracle_metadata + ( + pubkey BLOB NOT NULL UNIQUE PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + singleton_constant BOOLEAN NOT NULL DEFAULT TRUE, -- make sure there is only one row + CONSTRAINT one_row_check UNIQUE (singleton_constant) + ); + + CREATE TABLE IF NOT EXISTS events ( + id UUID PRIMARY KEY, + total_allowed_entries INTEGER NOT NULL, + number_of_places_win INTEGER NOT NULL, + number_of_values_per_entry INTEGER NOT NULL, + signing_date TIMESTAMPTZ NOT NULL, + observation_date TIMESTAMPTZ NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + nonce BLOB NOT NULL, + event_annoucement BLOB NOT NULL, + locations TEXT[] NOT NULL, + attestation_signature BLOB + ); + + CREATE TYPE options AS ENUM ('over', 'par', 'under'); + + CREATE TABLE IF NOT EXISTS events_entries + ( + id UUID PRIMARY KEY, + event_id UUID NOT NULL REFERENCES events (id), + score INTEGER NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ); + + CREATE SEQUENCE id_sequence START 1000; + CREATE TABLE IF NOT EXISTS expected_observations + ( + id BIGINT DEFAULT nextval('id_sequence') PRIMARY KEY, + entry_id UUID NOT NULL REFERENCES events_entries(id), + station TEXT NOT NULL, + temp_low options, + temp_high options, + wind_speed options + ); + + CREATE TABLE IF NOT EXISTS weather + ( + id UUID PRIMARY KEY, + station_id TEXT NOT NULL, + observed STRUCT(reading_date TIMESTAMPTZ, temp_low INTEGER, temp_high INTEGER, wind_speed INTEGER), + forecasted STRUCT(reading_date TIMESTAMPTZ, temp_low INTEGER, temp_high INTEGER, wind_speed INTEGER), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ); + + CREATE TABLE IF NOT EXISTS events_weather + ( + id UUID PRIMARY KEY, + event_id UUID NOT NULL REFERENCES events (id), + weather_id UUID NOT NULL REFERENCES weather (id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() + ); + + INSERT INTO db_version (version) VALUES (1); + "#; + conn.execute_batch(initial_schema)?; + Ok(()) +} + +/* how to add the next sql migration: +pub fn migrate_to_version_2(conn: &mut Connection) -> Result<(), duckdb::Error> { + let migration_2 = r#" + UPDATE db_version SET version = 2;"#;" + conn.execute_batch(migration_2)?; + Ok(()) +} +*/ diff --git a/oracle/src/db/mod.rs b/oracle/src/db/mod.rs new file mode 100644 index 0000000..65bb4b6 --- /dev/null +++ b/oracle/src/db/mod.rs @@ -0,0 +1,7 @@ +pub mod event_data; +pub mod event_db_migrations; +pub mod weather_data; + +pub use event_data::*; +pub use event_db_migrations::*; +pub use weather_data::{Forecast, Observation, Station, WeatherData}; diff --git a/oracle/src/db/weather_data.rs b/oracle/src/db/weather_data.rs new file mode 100644 index 0000000..615c0cc --- /dev/null +++ b/oracle/src/db/weather_data.rs @@ -0,0 +1,496 @@ +use crate::{file_access, FileAccess, FileData, FileParams, ForecastRequest, ObservationRequest}; +use axum::async_trait; +use duckdb::{ + arrow::array::{Float64Array, Int64Array, RecordBatch, StringArray}, + params_from_iter, Connection, +}; +use regex::Regex; +use scooby::postgres::{select, with, Aliasable, Parameters, Select}; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use time::{format_description::well_known::Rfc3339, Duration, OffsetDateTime}; + +pub struct WeatherAccess { + file_access: Arc, +} + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Failed to query duckdb: {0}")] + Query(#[from] duckdb::Error), + #[error("Failed to format time string: {0}")] + TimeFormat(#[from] time::error::Format), + #[error("Failed to parse time string: {0}")] + TimeParse(#[from] time::error::Parse), + #[error("Failed to access files: {0}")] + FileAccess(#[from] file_access::Error), +} + +#[async_trait] +pub trait WeatherData: Sync + Send { + async fn forecasts_data( + &self, + req: &ForecastRequest, + station_ids: Vec, + ) -> Result, Error>; + async fn observation_data( + &self, + req: &ObservationRequest, + station_ids: Vec, + ) -> Result, Error>; + async fn stations(&self) -> Result, Error>; +} + +impl WeatherAccess { + pub fn new(file_access: Arc) -> Result { + Ok(Self { file_access }) + } + + /// Creates new in-memory connection, making it so we always start with a fresh slate and no possible locking issues + pub fn open_connection(&self) -> Result { + let conn = Connection::open_in_memory()?; + conn.execute_batch("INSTALL parquet; LOAD parquet;")?; + Ok(conn) + } + + pub async fn query( + &self, + select: Select, + params: Vec, + ) -> Result, duckdb::Error> { + let re = Regex::new(r"\$(\d+)").unwrap(); + let binding = select.to_string(); + let fixed_params = re.replace_all(&binding, "?"); + let conn = self.open_connection()?; + let mut stmt = conn.prepare(&fixed_params)?; + let sql_params = params_from_iter(params.iter()); + Ok(stmt.query_arrow(sql_params)?.collect()) + } +} +#[async_trait] +impl WeatherData for WeatherAccess { + async fn forecasts_data( + &self, + req: &ForecastRequest, + station_ids: Vec, + ) -> Result, Error> { + let start_back_one_day = if let Some(start_date) = req.start { + start_date.saturating_sub(Duration::days(1)) + } else { + OffsetDateTime::now_utc().saturating_sub(Duration::days(1)) + }; + let mut file_params: FileParams = req.into(); + file_params.start = Some(start_back_one_day); + let parquet_files = self.file_access.grab_file_names(file_params).await?; + let file_paths = self.file_access.build_file_paths(parquet_files); + if file_paths.is_empty() { + return Ok(vec![]); + } + let mut placeholders = Parameters::new(); + + let mut daily_forecasts = select(( + "station_id", + "DATE_TRUNC('day', begin_time::TIMESTAMP)::TEXT".as_("date"), + "MIN(begin_time)".as_("start_time"), + "MAX(end_time)".as_("end_time"), + "MIN(min_temp)".as_("temp_low"), + "MAX(max_temp)".as_("temp_high"), + "MAX(wind_speed)".as_("wind_speed"), + )) + .from(format!( + "read_parquet(['{}'], union_by_name = true)", + file_paths.join("', '") + )); + + let mut values: Vec = vec![]; + if !station_ids.is_empty() { + daily_forecasts = daily_forecasts.where_(format!( + "station_id IN ({})", + placeholders.next_n(station_ids.len()) + )); + + for station_id in station_ids { + values.push(station_id); + } + } + if let Some(start) = &req.start { + daily_forecasts = daily_forecasts.where_(format!( + "(DATE_TRUNC('day', begin_time::TIMESTAMP)::TIMESTAMPTZ) >= {}::TIMESTAMPTZ", + placeholders.next() + )); + values.push(start.format(&Rfc3339)?.to_owned()); + } + + if let Some(end) = &req.end { + daily_forecasts = daily_forecasts.where_(format!( + "(DATE_TRUNC('day', end_time::TIMESTAMP)::TIMESTAMPTZ) <= {}::TIMESTAMPTZ", + placeholders.next() + )); + values.push(end.format(&Rfc3339)?.to_owned()); + } + daily_forecasts = daily_forecasts.group_by(("station_id", "begin_time")); + + let query = with("daily_forecasts") + .as_(daily_forecasts) + .select(( + "station_id", + "date", + "MIN(start_time)".as_("start_time"), + "MAX(end_time)".as_("end_time"), + "MIN(temp_low)".as_("temp_low"), + "MAX(temp_high)".as_("temp_high"), + "MAX(wind_speed)".as_("wind_speed"), + )) + .from("daily_forecasts") + .group_by(("station_id", "date")); + + let records = self.query(query, values).await?; + let forecasts: Forecasts = + records + .iter() + .map(|record| record.into()) + .fold(Forecasts::new(), |mut acc, obs| { + acc.merge(obs); + acc + }); + + Ok(forecasts.values) + } + + async fn observation_data( + &self, + req: &ObservationRequest, + station_ids: Vec, + ) -> Result, Error> { + let parquet_files = self.file_access.grab_file_names(req.into()).await?; + let file_paths = self.file_access.build_file_paths(parquet_files); + if file_paths.is_empty() { + return Ok(vec![]); + } + let mut placeholders = Parameters::new(); + let mut query = select(( + "station_id", + "min(generated_at)".as_("start_time"), + "max(generated_at)".as_("end_time"), + "min(temperature_value)".as_("temp_low"), + "max(temperature_value)".as_("temp_high"), + "max(wind_speed)".as_("wind_speed"), + )) + .from(format!( + "read_parquet(['{}'], union_by_name = true)", + file_paths.join("', '") + )); + + let mut values: Vec = vec![]; + if !station_ids.is_empty() { + query = query.where_(format!( + "station_id IN ({})", + placeholders.next_n(station_ids.len()) + )); + + for station_id in station_ids { + values.push(station_id); + } + } + if let Some(start) = &req.start { + query = query.where_(format!( + "generated_at::TIMESTAMPTZ >= {}::TIMESTAMPTZ", + placeholders.next() + )); + values.push(start.format(&Rfc3339)?.to_owned()); + } + + if let Some(end) = &req.end { + query = query.where_(format!( + "generated_at::TIMESTAMPTZ <= {}::TIMESTAMPTZ", + placeholders.next() + )); + values.push(end.format(&Rfc3339)?.to_owned()); + } + query = query.group_by("station_id"); + let records = self.query(query, values).await?; + let observations: Observations = + records + .iter() + .map(|record| record.into()) + .fold(Observations::new(), |mut acc, obs| { + acc.merge(obs); + acc + }); + Ok(observations.values) + } + + async fn stations(&self) -> Result, Error> { + let now = OffsetDateTime::now_utc(); + let start = now.saturating_sub(Duration::hours(4_i64)); + let parquet_files = self + .file_access + .grab_file_names(FileParams { + start: Some(start), + end: Some(now), + observations: Some(true), + forecasts: Some(false), + }) + .await?; + let file_paths = self.file_access.build_file_paths(parquet_files); + if file_paths.is_empty() { + return Ok(vec![]); + } + let mut query = + select(("station_id", "station_name", "latitude", "longitude")).from(format!( + "read_parquet(['{}'], union_by_name = true)", + file_paths.join("', '") + )); + query = query.group_by(("station_id", "station_name", "latitude", "longitude")); + + let records = self.query(query, vec![]).await?; + + let stations: Stations = + records + .iter() + .map(|record| record.into()) + .fold(Stations::new(), |mut acc, obs| { + acc.merge(obs); + acc + }); + + Ok(stations.values) + } +} + +struct Forecasts { + values: Vec, +} + +impl Forecasts { + pub fn new() -> Self { + Forecasts { values: Vec::new() } + } + + pub fn merge(&mut self, forecasts: Forecasts) -> &Forecasts { + self.values.extend(forecasts.values); + self + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Forecast { + pub station_id: String, + pub date: String, + pub start_time: String, + pub end_time: String, + pub temp_low: i64, + pub temp_high: i64, + pub wind_speed: i64, +} + +impl From<&RecordBatch> for Forecasts { + fn from(record_batch: &RecordBatch) -> Self { + let mut forecasts = Vec::new(); + let station_id_arr = record_batch + .column(0) + .as_any() + .downcast_ref::() + .expect("Expected StringArray in column 0"); + let date_arr = record_batch + .column(1) + .as_any() + .downcast_ref::() + .expect("Expected StringArray in column 1"); + let start_time_arr = record_batch + .column(2) + .as_any() + .downcast_ref::() + .expect("Expected StringArray in column 2"); + let end_time_arr = record_batch + .column(3) + .as_any() + .downcast_ref::() + .expect("Expected StringArray in column 3"); + let temp_low_arr = record_batch + .column(4) + .as_any() + .downcast_ref::() + .expect("Expected Int64Array in column 4"); + let temp_high_arr = record_batch + .column(5) + .as_any() + .downcast_ref::() + .expect("Expected Int64Array in column 5"); + let wind_speed_arr = record_batch + .column(6) + .as_any() + .downcast_ref::() + .expect("Expected Int64Array in column 6"); + + for row_index in 0..record_batch.num_rows() { + let station_id = station_id_arr.value(row_index).to_owned(); + let date = date_arr.value(row_index).to_owned(); + let start_time = start_time_arr.value(row_index).to_owned(); + let end_time = end_time_arr.value(row_index).to_owned(); + let temp_low = temp_low_arr.value(row_index); + let temp_high = temp_high_arr.value(row_index); + let wind_speed = wind_speed_arr.value(row_index); + + forecasts.push(Forecast { + station_id, + date, + start_time, + end_time, + temp_low, + temp_high, + wind_speed, + }); + } + + Self { values: forecasts } + } +} + +struct Observations { + values: Vec, +} + +impl Observations { + pub fn new() -> Self { + Observations { values: Vec::new() } + } + + pub fn merge(&mut self, observations: Observations) -> &Observations { + self.values.extend(observations.values); + self + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Observation { + pub station_id: String, + pub start_time: String, + pub end_time: String, + pub temp_low: f64, + pub temp_high: f64, + pub wind_speed: i64, +} + +impl From<&RecordBatch> for Observations { + fn from(record_batch: &RecordBatch) -> Self { + let mut observations = Vec::new(); + let station_id_arr = record_batch + .column(0) + .as_any() + .downcast_ref::() + .expect("Expected StringArray in column 0"); + let start_time_arr = record_batch + .column(1) + .as_any() + .downcast_ref::() + .expect("Expected StringArray in column 1"); + let end_time_arr = record_batch + .column(2) + .as_any() + .downcast_ref::() + .expect("Expected StringArray in column 2"); + let temp_low_arr = record_batch + .column(3) + .as_any() + .downcast_ref::() + .expect("Expected Float64Array in column 3"); + let temp_high_arr = record_batch + .column(4) + .as_any() + .downcast_ref::() + .expect("Expected Float64Array in column 4"); + let wind_speed_arr = record_batch + .column(5) + .as_any() + .downcast_ref::() + .expect("Expected Int64Array in column 4"); + + for row_index in 0..record_batch.num_rows() { + let station_id = station_id_arr.value(row_index).to_owned(); + let start_time = start_time_arr.value(row_index).to_owned(); + let end_time = end_time_arr.value(row_index).to_owned(); + let temp_low = temp_low_arr.value(row_index); + let temp_high = temp_high_arr.value(row_index); + let wind_speed = wind_speed_arr.value(row_index); + + observations.push(Observation { + station_id, + start_time, + end_time, + temp_low, + temp_high, + wind_speed, + }); + } + + Self { + values: observations, + } + } +} + +struct Stations { + values: Vec, +} + +impl Stations { + pub fn new() -> Self { + Stations { values: Vec::new() } + } + + pub fn merge(&mut self, stations: Stations) -> &Stations { + self.values.extend(stations.values); + self + } +} + +impl From<&RecordBatch> for Stations { + fn from(record_batch: &RecordBatch) -> Self { + let mut stations = Vec::new(); + let station_id_arr = record_batch + .column(0) + .as_any() + .downcast_ref::() + .expect("Expected StringArray in column 0"); + let station_name_arr = record_batch + .column(1) + .as_any() + .downcast_ref::() + .expect("Expected StringArray in column 1"); + let latitude_arr = record_batch + .column(2) + .as_any() + .downcast_ref::() + .expect("Expected Float64Array in column 2"); + let longitude_arr = record_batch + .column(3) + .as_any() + .downcast_ref::() + .expect("Expected Float64Array in column 3"); + + for row_index in 0..record_batch.num_rows() { + let station_id = station_id_arr.value(row_index).to_owned(); + let station_name = station_name_arr.value(row_index).to_owned(); + let latitude = latitude_arr.value(row_index); + let longitude = longitude_arr.value(row_index); + + stations.push(Station { + station_id, + station_name, + latitude, + longitude, + }); + } + + Self { values: stations } + } +} + +#[derive(Serialize, Deserialize)] +pub struct Station { + pub station_id: String, + pub station_name: String, + pub latitude: f64, + pub longitude: f64, +} diff --git a/oracle/src/file_access.rs b/oracle/src/file_access.rs new file mode 100644 index 0000000..2394d86 --- /dev/null +++ b/oracle/src/file_access.rs @@ -0,0 +1,176 @@ +use axum::async_trait; +use log::trace; +use serde::{Deserialize, Serialize}; +use time::{ + format_description::well_known::Rfc3339, macros::format_description, Date, OffsetDateTime, +}; +use tokio::fs; +use utoipa::IntoParams; + +use crate::{create_folder, subfolder_exists, utc_option_datetime}; + +#[derive(Clone, Deserialize, Serialize, IntoParams)] +pub struct FileParams { + #[serde(with = "utc_option_datetime")] + pub start: Option, + #[serde(with = "utc_option_datetime")] + pub end: Option, + pub observations: Option, + pub forecasts: Option, +} + +pub struct FileAccess { + data_dir: String, +} + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Failed to format time string: {0}")] + TimeFormat(#[from] time::error::Format), + #[error("Failed to parse time string: {0}")] + TimeParse(#[from] time::error::Parse), +} + +#[async_trait] +pub trait FileData: Send + Sync { + async fn grab_file_names(&self, params: FileParams) -> Result, Error>; + fn current_folder(&self) -> String; + fn build_file_paths(&self, file_names: Vec) -> Vec; + fn build_file_path(&self, filename: &str, file_generated_at: OffsetDateTime) -> String; +} + +impl FileAccess { + pub fn new(data_dir: String) -> Self { + Self { data_dir } + } + + fn add_filename( + &self, + entry: tokio::fs::DirEntry, + params: &FileParams, + ) -> Result, Error> { + if let Some(filename) = entry.file_name().to_str() { + let file_pieces: Vec = filename.split('_').map(|f| f.to_owned()).collect(); + let created_time = drop_suffix(file_pieces.last().unwrap(), ".parquet"); + trace!("parsed file time:{}", created_time); + + let file_generated_at = OffsetDateTime::parse(&created_time, &Rfc3339)?; + let valid_time_range = is_time_in_range(file_generated_at, params); + let file_data_type = file_pieces.first().unwrap(); + trace!("parsed file type:{}", file_data_type); + + if let Some(observations) = params.observations { + if observations && file_data_type.eq("observations") && valid_time_range { + return Ok(Some(filename.to_owned())); + } + } + + if let Some(forecasts) = params.forecasts { + if forecasts && file_data_type.eq("forecasts") && valid_time_range { + return Ok(Some(filename.to_owned())); + } + } + + if params.forecasts.is_none() && params.observations.is_none() && valid_time_range { + return Ok(Some(filename.to_owned())); + } + } + Ok(None) + } +} + +#[async_trait] +impl FileData for FileAccess { + fn build_file_paths(&self, file_names: Vec) -> Vec { + file_names + .iter() + .map(|file_name| { + let file_pieces: Vec = file_name.split('_').map(|f| f.to_owned()).collect(); + let created_time = drop_suffix(file_pieces.last().unwrap(), ".parquet"); + let file_generated_at = OffsetDateTime::parse(&created_time, &Rfc3339).unwrap(); + format!( + "{}/{}/{}", + self.data_dir, + file_generated_at.date(), + file_name + ) + }) + .collect() + } + + fn current_folder(&self) -> String { + let current_date = OffsetDateTime::now_utc().date(); + let subfolder = format!("{}/{}", self.data_dir, current_date); + if !subfolder_exists(&subfolder) { + create_folder(&subfolder) + } + subfolder + } + + fn build_file_path(&self, filename: &str, file_generated_at: OffsetDateTime) -> String { + format!( + "{}/{}/{}", + self.data_dir, + file_generated_at.date(), + filename + ) + } + + async fn grab_file_names(&self, params: FileParams) -> Result, Error> { + let mut files_names = vec![]; + if let Ok(mut entries) = fs::read_dir(self.data_dir.clone()).await { + while let Ok(Some(entry)) = entries.next_entry().await { + let path = entry.path(); + if !path.is_dir() { + continue; + } + if let Some(date) = entry.file_name().to_str() { + let format = format_description!("[year]-[month]-[day]"); + let directory_date = Date::parse(date, &format)?; + if !is_date_in_range(directory_date, ¶ms) { + continue; + } + + if let Ok(mut subentries) = fs::read_dir(path).await { + while let Ok(Some(subentries)) = subentries.next_entry().await { + if let Some(filename) = self.add_filename(subentries, ¶ms)? { + files_names.push(filename); + } + } + } + } + } + } + Ok(files_names) + } +} + +pub fn drop_suffix(input: &str, suffix: &str) -> String { + if let Some(stripped) = input.strip_suffix(suffix) { + stripped.to_string() + } else { + input.to_string() + } +} + +fn is_date_in_range(compare_to: Date, params: &FileParams) -> bool { + if let Some(start) = params.start { + return compare_to >= start.date(); + } + + if let Some(end) = params.end { + return compare_to <= end.date(); + } + true +} + +fn is_time_in_range(compare_to: OffsetDateTime, params: &FileParams) -> bool { + if let Some(start) = params.start { + return compare_to >= start; + } + + if let Some(end) = params.end { + return compare_to <= end; + } + true +} diff --git a/oracle/src/lib.rs b/oracle/src/lib.rs index 6989d43..2e567aa 100644 --- a/oracle/src/lib.rs +++ b/oracle/src/lib.rs @@ -1,7 +1,16 @@ -mod routes; +mod app_error; +mod db; +mod file_access; +pub mod oracle; +pub mod routes; +mod ser; mod startup; mod utils; +pub use app_error::AppError; +pub use db::*; +pub use file_access::{drop_suffix, Error, FileAccess, FileData, FileParams}; pub use routes::*; +pub use ser::*; pub use startup::*; pub use utils::*; diff --git a/oracle/src/main.rs b/oracle/src/main.rs index cc549af..d8f0f95 100644 --- a/oracle/src/main.rs +++ b/oracle/src/main.rs @@ -1,15 +1,26 @@ +use anyhow::anyhow; use axum::serve; -use oracle::{app, create_folder, get_config_info, setup_logger}; -use slog::info; +use futures::TryFutureExt; +use log::{error, info}; +use oracle::{app, build_app_state, create_folder, get_config_info, get_log_level, setup_logger}; use std::{net::SocketAddr, str::FromStr}; use tokio::{net::TcpListener, signal}; #[tokio::main] async fn main() -> anyhow::Result<()> { let cli: oracle::Cli = get_config_info(); - let logger = setup_logger(&cli); + let log_level = get_log_level(&cli); + setup_logger() + .level(log_level) + .level_for("duckdb", log_level) + .level_for("oracle", log_level) + .level_for("http_response", log_level) + .level_for("http_request", log_level) + .apply()?; let weather_data = cli.weather_dir.unwrap_or(String::from("./weather_data")); - create_folder(&logger, &weather_data.clone()); + create_folder(&weather_data.clone()); + let event_data = cli.event_db.unwrap_or(String::from("./event_data")); + create_folder(&event_data.clone()); let socket_addr = SocketAddr::from_str(&format!( "{}:{}", cli.domain.unwrap_or(String::from("127.0.0.1")), @@ -17,19 +28,30 @@ async fn main() -> anyhow::Result<()> { )) .unwrap(); - let std_listener = std::net::TcpListener::bind(socket_addr)?; - std_listener.set_nonblocking(true)?; - let listener = TcpListener::from_std(std_listener)?; + let listener = TcpListener::bind(socket_addr) + .map_err(|e| anyhow!("error binding to IO socket: {}", e.to_string())) + .await?; - info!(logger, "listening on http://{}", socket_addr); + info!("listening on http://{}", socket_addr); + info!("docs hosted @ http://{}/docs", socket_addr); - let app = app( - logger, + let app_state = build_app_state( cli.remote_url .unwrap_or(String::from("http://127.0.0.1:9100")), cli.ui_dir.unwrap_or(String::from("./ui")), weather_data, - ); + event_data, + cli.oracle_private_key + .unwrap_or(String::from("./oracle_private_key.pem")), + ) + .await + .map_err(|e| { + error!("error building app: {}", e); + e + })?; + + let app = app(app_state.clone()); + serve( listener, app.into_make_service_with_connect_info::(), diff --git a/oracle/src/oracle.rs b/oracle/src/oracle.rs new file mode 100644 index 0000000..2443c48 --- /dev/null +++ b/oracle/src/oracle.rs @@ -0,0 +1,699 @@ +use crate::{ + weather_data, ActiveEvent, AddEventEntry, CreateEvent, CreateEventData, Event, EventData, + EventFilter, EventStatus, EventSummary, Forecast, ForecastRequest, Observation, + ObservationRequest, SignEvent, ValueOptions, Weather, WeatherData, WeatherEntry, +}; +use anyhow::anyhow; +use base64::{engine::general_purpose, Engine}; +use dlctix::{ + bitcoin::key::Secp256k1, + musig2::secp256k1::{rand, PublicKey, SecretKey}, + secp::Point, +}; +use log::{debug, error, info, warn}; +use nostr::{key::Keys, nips::nip19::ToBech32}; +use pem_rfc7468::{decode_vec, encode_string}; +use std::{ + cmp, + fs::{metadata, File}, + io::{Read, Write}, + path::Path, + sync::Arc, +}; +use thiserror::Error; +use time::{Duration, OffsetDateTime}; +use utoipa::ToSchema; +use uuid::Uuid; + +#[derive(Error, Debug, ToSchema)] +pub enum Error { + #[error("{0}")] + NotFound(String), + #[error("Failed to get private key: {0}")] + PrivateKey(#[from] anyhow::Error), + #[error("Must have at least one outcome: {0}")] + MinOutcome(String), + #[error("Event maturity epoch must be in the future: {0}")] + EventMaturity(String), + #[error("Failed to convert private key into nostr keys: {0}")] + ConvertKey(#[from] nostr::key::Error), + #[error("Failed to convert public key into nostr base32 format: {0}")] + Base32Key(#[from] nostr::nips::nip19::Error), + #[error("Failed to query datasource: {0}")] + DataQuery(#[from] duckdb::Error), + #[error("Pubkeys in DB doesn't match with .pem")] + MismatchPubkey(String), + #[error("Invalid entry: {0}")] + BadEntry(String), + #[error("Invalid event: {0}")] + BadEvent(anyhow::Error), + #[error("{0}")] + WeatherData(#[from] weather_data::Error), + #[error("Failed to find winning outcome: {0}")] + OutcomeNotFound(String), +} + +pub struct Oracle { + event_data: Arc, + weather_data: Arc, //need this to be a trait so I can mock the weather data + private_key: SecretKey, + public_key: PublicKey, +} + +impl Oracle { + pub async fn new( + event_data: Arc, + weather_data: Arc, + private_key_file_path: &String, + ) -> Result { + let secret_key = get_key(private_key_file_path)?; + let secp = Secp256k1::new(); + let public_key = secret_key.public_key(&secp); + let oracle = Self { + event_data, + weather_data, + private_key: secret_key, + public_key, + }; + oracle.validate_oracle_metadata().await?; + Ok(oracle) + } + + pub async fn validate_oracle_metadata(&self) -> Result<(), Error> { + let stored_public_key = match self.event_data.get_stored_public_key().await { + Ok(key) => key, + Err(duckdb::Error::QueryReturnedNoRows) => { + self.add_meta_data().await?; + return Ok(()); + } + Err(e) => return Err(Error::DataQuery(e)), + }; + if stored_public_key != self.public_key.x_only_public_key().0 { + return Err(Error::MismatchPubkey(format!( + "stored_pubkey: {:?} pem_pubkey: {:?}", + stored_public_key, + self.public_key() + ))); + } + Ok(()) + } + + async fn add_meta_data(&self) -> Result<(), Error> { + self.event_data + .add_oracle_metadata(self.public_key.x_only_public_key().0) + .await + .map_err(Error::DataQuery) + } + + pub fn raw_public_key(&self) -> PublicKey { + self.public_key + } + + pub fn raw_private_key(&self) -> SecretKey { + self.private_key + } + + pub fn public_key(&self) -> String { + let key = Point::from(self.public_key).serialize(); + general_purpose::STANDARD.encode(key) + } + + pub fn npub(&self) -> Result { + let secret_key = self.private_key.display_secret().to_string(); + let keys = Keys::parse(secret_key)?; + + Ok(keys.public_key().to_bech32()?) + } + + pub async fn list_events(&self, filter: EventFilter) -> Result, Error> { + // TODO: add filter/pagination etc. + // filter on active event/completed event/time range of event + // if we're not careful, this endpoint might bring down the whole server + // just due to the amount of data that can come out of it + self.event_data + .filtered_list_events(filter) + .await + .map_err(Error::DataQuery) + } + + pub async fn get_event(&self, id: &Uuid) -> Result { + match self.event_data.get_event(id).await { + Ok(event_data) => Ok(event_data), + Err(duckdb::Error::QueryReturnedNoRows) => { + Err(Error::NotFound(format!("event with id {} not found", id))) + } + Err(e) => Err(Error::DataQuery(e)), + } + } + + pub async fn create_event(&self, event: CreateEvent) -> Result { + let oracle_event = CreateEventData::new( + self, + event.id, + event.observation_date, + event.signing_date, + event.locations, + event.total_allowed_entries, + event.number_of_places_win, + event.number_of_values_per_entry, + ) + .map_err(Error::BadEvent)?; + self.event_data + .add_event(oracle_event) + .await + .map_err(Error::DataQuery) + } + + pub async fn add_event_entry(&self, entry: AddEventEntry) -> Result { + //TODO: use the builder pattern on WeatherEntry and add the validation there + if entry.id.get_version_num() != 7 { + return Err(Error::BadEntry(format!( + "Client needs to provide a valid Uuidv7 for entry id {}", + entry.id + ))); + } + let event = match self.event_data.get_event(&entry.event_id).await { + Ok(event_data) => Ok(event_data), + Err(duckdb::Error::QueryReturnedNoRows) => Err(Error::NotFound(format!( + "event with id {} not found", + &entry.event_id + ))), + Err(e) => Err(Error::DataQuery(e)), + }?; + info!("event: {:?}", event); + // NOTE: It's not the end of the world if we do go over the allowed number of entries, + // worse case just means more people in the event, doesn't change our score mechanism + if event.total_allowed_entries < event.entry_ids.len() as i64 { + return Err(Error::BadEntry(format!( + "event {} is full, no more entries are allowed", + event.id + ))); + } + let num_choose_vals = entry.expected_observations.len() as i64; + if num_choose_vals > event.number_of_values_per_entry { + return Err(Error::BadEntry(format!( + "entry_id {0} not valid, choose too many values, max allowed {1} choose {2}", + entry.id, event.number_of_values_per_entry, num_choose_vals + ))); + } + let locations_choose: Vec = entry + .expected_observations + .clone() + .iter() + .map(|weather_vals| weather_vals.stations.clone()) + .collect(); + let all_valid_locations = locations_choose + .iter() + .all(|choose| event.locations.contains(choose)); + if !all_valid_locations { + return Err(Error::BadEntry(format!( + "entry_id {0} not valid, choose locations not in the even", + entry.id + ))); + } + self.event_data + .add_event_entry(entry.into()) + .await + .map_err(Error::DataQuery) + } + + pub async fn get_running_events(&self) -> Result, Error> { + match self.event_data.get_active_events().await { + Ok(event_data) => Ok(event_data), + Err(duckdb::Error::QueryReturnedNoRows) => Ok(vec![]), + Err(e) => Err(Error::DataQuery(e)), + } + } + + pub async fn get_event_entry( + &self, + event_id: &Uuid, + entry_id: &Uuid, + ) -> Result { + match self.event_data.get_weather_entry(event_id, entry_id).await { + Ok(event_data) => Ok(event_data), + Err(duckdb::Error::QueryReturnedNoRows) => Err(Error::NotFound(format!( + "entry with id {} not found for event {}", + &entry_id, &event_id + ))), + Err(e) => Err(Error::DataQuery(e)), + } + } + + pub async fn etl_data(&self, etl_process_id: usize) -> Result<(), Error> { + // NOTE: Making the assumption the number of active events will remain small, maybe 10 at most for now, + // Also assuming it's okay to have duplicate location weather reading rows for now (if this becomes a problem we will need to de-dup) + info!(" etl_process_id {}, starting etl process", etl_process_id); + debug!(" etl_process_id {}, getting running events", etl_process_id); + let events_to_update = self.get_running_events().await?; + debug!( + " etl_process_id {}, completed getting running events", + etl_process_id + ); + // 1) update weather readings + debug!( + " etl_process_id {}, updating weather readings", + etl_process_id + ); + self.update_event_weather_data(etl_process_id, events_to_update.clone()) + .await?; + debug!( + " etl_process_id {}, completed updating weather readings", + etl_process_id + ); + debug!(" etl_process_id {}, getting active events", etl_process_id); + // 2) update entry scores for running & completed events + let events: Vec = events_to_update + .iter() + .filter(|entry| { + (entry.status == EventStatus::Running || entry.status == EventStatus::Completed) + && entry.attestation.is_none() + }) + .cloned() + .collect(); + debug!( + " etl_process_id {}, completed getting active events", + etl_process_id + ); + debug!( + " etl_process_id {}, updating entry scores for active events", + etl_process_id + ); + self.update_active_events_entry_scores(etl_process_id, events) + .await?; + debug!( + " etl_process_id {}, completed entry scores for active events", + etl_process_id + ); + debug!(" etl_process_id {}, getting events to sign", etl_process_id); + // 3) sign results for events that are completed and need it + let events_to_sign: Vec = events_to_update + .iter() + .filter(|event| event.status == EventStatus::Completed && event.attestation.is_none()) + .map(|event| event.id) + .collect(); + debug!( + " etl_process_id {}, completed getting events to sign", + etl_process_id + ); + debug!( + " etl_process_id {}, adding oracle signature to events", + etl_process_id + ); + self.add_oracle_signature(etl_process_id, events_to_sign) + .await?; + debug!( + " etl_process_id {}, completed adding oracle signature to events", + etl_process_id + ); + info!(" etl_process_id {}, completed etl process", etl_process_id); + Ok(()) + } + + async fn update_event_weather_data( + &self, + etl_process_id: usize, + events_to_update: Vec, + ) -> Result<(), Error> { + for event in events_to_update { + info!( + "updating event {} with status {} weather data in process {}", + event.id, event.status, etl_process_id + ); + let forecast_data = self.event_forecast_data(&event).await?; + let weather = if event.observation_date > OffsetDateTime::now_utc() { + add_only_forecast_data(&event, forecast_data).await? + } else { + let observation_data = self.event_observation_data(&event).await?; + info!("above update ksks"); + add_forecast_data_and_observation_data(&event, forecast_data, observation_data) + .await? + }; + info!("above update"); + self.event_data + .update_weather_station_data(event.id, weather) + .await?; + info!( + "completed event {} weather data update {} in process {}", + event.id, event.status, etl_process_id + ); + } + info!( + "completed updating all event weather data in etl process {}", + etl_process_id + ); + Ok(()) + } + + async fn update_active_events_entry_scores( + &self, + etl_process_id: usize, + events: Vec, + ) -> Result<(), Error> { + info!( + "starting to update all event entry scores in etl process {}", + etl_process_id + ); + for event in events { + self.update_entry_scores(etl_process_id, event).await?; + } + info!( + "completed updating all event entry scores in etl process {}", + etl_process_id + ); + Ok(()) + } + + async fn update_entry_scores( + &self, + etl_process_id: usize, + event: ActiveEvent, + ) -> Result<(), Error> { + let entries: Vec = + self.event_data.get_event_weather_entries(&event.id).await?; + + let observation_data = self.event_observation_data(&event).await?; + let forecast_data = self.event_forecast_data(&event).await?; + let mut entry_scores: Vec<(Uuid, i64)> = vec![]; + + for entry in entries { + if entry.event_id != event.id { + warn!("entry {} not in this event {}", entry.id, event.id); + continue; + } + + // Score logic, match on Par 2pts, on Over 1pt, on Under 1pt + let mut total_score = 0; + let expected_observations = entry.expected_observations.clone(); + let locations = event.locations.clone(); + for location in locations { + let Some(choice) = expected_observations + .iter() + .find(|expected| expected.stations == location) + else { + continue; + }; + + let Some(forecast) = forecast_data + .iter() + .find(|forecast| forecast.station_id == location) + else { + warn!("no forecast found for: {}", location); + continue; + }; + + let Some(observation) = observation_data + .iter() + .find(|observation| observation.station_id == location) + else { + warn!("no observation found for: {}", location); + continue; + }; + + if let Some(high_temp) = choice.temp_high.clone() { + match high_temp { + ValueOptions::Over => { + if forecast.temp_high < observation.temp_high.round() as i64 { + total_score += 1; + } + } + ValueOptions::Par => { + if forecast.temp_high == observation.temp_high.round() as i64 { + total_score += 2; + } + } + ValueOptions::Under => { + if forecast.temp_high > observation.temp_high.round() as i64 { + total_score += 1; + } + } + } + } + + if let Some(temp_low) = choice.temp_low.clone() { + match temp_low { + ValueOptions::Over => { + if forecast.temp_low < observation.temp_low.round() as i64 { + total_score += 1; + } + } + ValueOptions::Par => { + if forecast.temp_low == observation.temp_low.round() as i64 { + total_score += 2; + } + } + ValueOptions::Under => { + if forecast.temp_low > observation.temp_low.round() as i64 { + total_score += 1; + } + } + } + } + + if let Some(wind_speed) = choice.wind_speed.clone() { + match wind_speed { + ValueOptions::Over => { + if forecast.wind_speed < observation.wind_speed { + total_score += 1; + } + } + ValueOptions::Par => { + if forecast.wind_speed == observation.wind_speed { + total_score += 2; + } + } + ValueOptions::Under => { + if forecast.wind_speed > observation.wind_speed { + total_score += 1; + } + } + } + } + } + + info!( + "updating entry {} for event {} to score {} in etl process {}", + entry.id, event.id, total_score, etl_process_id + ); + + entry_scores.push((entry.id, total_score)); + } + + self.event_data.update_entry_scores(entry_scores).await?; + + Ok(()) + } + + async fn add_oracle_signature( + &self, + etl_process_id: usize, + event_ids: Vec, + ) -> Result<(), Error> { + let mut events: Vec = self.event_data.get_events_to_sign(event_ids).await?; + info!("events: {:?}", events); + for event in events.iter_mut() { + let mut entries = self.event_data.get_event_weather_entries(&event.id).await?; + entries.sort_by_key(|entry| cmp::Reverse(entry.score)); + let winners: Vec = entries + .iter() + .take(event.number_of_places_win as usize) + .map(|entry| entry.score.unwrap_or_default()) // default means '0' was winning score + .collect(); + + let winner_bytes: Vec = winners + .clone() + .into_iter() + .flat_map(|num| num.to_be_bytes()) + .collect(); + + if event.signing_date < OffsetDateTime::now_utc() { + let outcome_index = event + .event_annoucement + .outcome_messages + .iter() + .position(|outcome| *outcome == winner_bytes); + + let winners_str = winners + .iter() + .map(|uuid| uuid.to_string()) + .collect::>() + .join(","); + + let Some(index) = outcome_index else { + // Something went horribly wrong, use the info from this log line to track refunding users based on DLC expiry (we set to 1 week) + error!("final result doesn't match any of the possible outcomes: event_id {} winners {} expiry {:?}", event.id, winners_str, event.event_annoucement.expiry); + + return Err(Error::OutcomeNotFound(format!( + "event_id {} outcome winning scores {} expiry {:?}", + event.id, winners_str, event.event_annoucement.expiry + ))); + }; + + info!( + "winners: event_id {} winning scores {}", + event.id, winners_str + ); + + event.attestation = event.event_annoucement.attestation_secret( + index, + self.private_key, + event.nonce, + ); + self.event_data.update_event_attestation(event).await?; + } + } + info!( + "completed adding oracle signature to all events that need it in etl process {}", + etl_process_id + ); + Ok(()) + } + + async fn event_forecast_data(&self, event: &ActiveEvent) -> Result, Error> { + let start_date = event.observation_date; + // Assumes all events are only a day long, may change in the future + let end_date = event.observation_date.saturating_add(Duration::days(1)); + // Assumes locations have been sanitized when the event was created + let station_ids = event.locations.join(","); + let forecast_requests = ForecastRequest { + start: Some(start_date), + end: Some(end_date), + station_ids: station_ids.clone(), + }; + self.weather_data + .forecasts_data(&forecast_requests, event.locations.clone()) + .await + .map_err(Error::WeatherData) + } + + async fn event_observation_data(&self, event: &ActiveEvent) -> Result, Error> { + let start_date = event.observation_date; + // Assumes all events are only a day long, may change in the future + let end_date = event.observation_date.saturating_add(Duration::days(1)); + let observation_requests = ObservationRequest { + start: Some(start_date), + end: Some(end_date), + station_ids: event.locations.join(","), + }; + self.weather_data + .observation_data(&observation_requests, event.locations.clone()) + .await + .map_err(Error::WeatherData) + } +} + +async fn add_only_forecast_data( + event: &ActiveEvent, + forecast_data: Vec, +) -> Result, Error> { + let mut all_weather: Vec = vec![]; + + for station_id in event.locations.clone() { + if let Some(forecast) = forecast_data + .iter() + .find(|forecast| forecast.station_id == station_id.clone()) + { + let weather = Weather { + station_id: station_id.clone(), + observed: None, + forecasted: forecast.try_into().map_err(Error::WeatherData)?, + }; + all_weather.push(weather); + } + } + Ok(all_weather) +} + +async fn add_forecast_data_and_observation_data( + event: &ActiveEvent, + forecast_data: Vec, + observation_data: Vec, +) -> Result, Error> { + let mut all_weather: Vec = vec![]; + + for station_id in event.locations.clone() { + if let Some(forecast) = forecast_data + .iter() + .find(|forecast| forecast.station_id == station_id.clone()) + { + let weather = if let Some(observation) = observation_data + .iter() + .find(|observation| observation.station_id == station_id.clone()) + { + Weather { + station_id: station_id.clone(), + observed: observation + .try_into() + .map(Some) + .map_err(Error::WeatherData)?, + forecasted: forecast.try_into().map_err(Error::WeatherData)?, + } + } else { + Weather { + station_id: station_id.clone(), + observed: None, + forecasted: forecast.try_into().map_err(Error::WeatherData)?, + } + }; + all_weather.push(weather); + } + } + Ok(all_weather) +} + +fn get_key(file_path: &String) -> Result { + if !is_pem_file(file_path) { + return Err(anyhow!("not a '.pem' file extension")); + } + + if metadata(file_path).is_ok() { + read_key(file_path) + } else { + let key = generate_new_key(); + save_key(file_path, key)?; + Ok(key) + } +} + +fn generate_new_key() -> SecretKey { + SecretKey::new(&mut rand::thread_rng()) +} + +fn is_pem_file(file_path: &String) -> bool { + Path::new(file_path) + .extension() + .and_then(|s| s.to_str()) + .map_or(false, |ext| ext == "pem") +} + +fn read_key(file_path: &String) -> Result { + let mut file = File::open(file_path)?; + let mut pem_data = String::new(); + file.read_to_string(&mut pem_data)?; + + // Decode the PEM content + let (label, decoded_key) = decode_vec(pem_data.as_bytes()).map_err(|e| anyhow!(e))?; + + // Verify the label + if label != "EC PRIVATE KEY" { + return Err(anyhow!("Invalid key format")); + } + + // Parse the private key + let secret_key = SecretKey::from_slice(&decoded_key)?; + Ok(secret_key) +} + +fn save_key(file_path: &String, key: SecretKey) -> Result<(), anyhow::Error> { + let pem = encode_string( + "EC PRIVATE KEY", + pem_rfc7468::LineEnding::LF, + &key.secret_bytes(), + ) + .map_err(|e| anyhow!("Failed to encode key: {}", e))?; + + // Private key file path needs to end in ".pem" + let mut file = File::create(file_path)?; + file.write_all(pem.as_bytes())?; + Ok(()) +} diff --git a/oracle/src/routes/events/mod.rs b/oracle/src/routes/events/mod.rs new file mode 100644 index 0000000..5880d5d --- /dev/null +++ b/oracle/src/routes/events/mod.rs @@ -0,0 +1,3 @@ +pub mod oracle_routes; + +pub use oracle_routes::*; diff --git a/oracle/src/routes/events/oracle_routes.rs b/oracle/src/routes/events/oracle_routes.rs new file mode 100644 index 0000000..64da8ae --- /dev/null +++ b/oracle/src/routes/events/oracle_routes.rs @@ -0,0 +1,218 @@ +use crate::{ + oracle, AddEventEntry, AppState, CreateEvent, Event, EventFilter, EventSummary, WeatherEntry, +}; +use axum::{ + extract::{Path, Query, State}, + http::StatusCode, + response::{ErrorResponse, IntoResponse, Response}, + Json, +}; +use log::{error, info}; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::{borrow::Borrow, sync::Arc}; +use tokio::task; +use utoipa::ToSchema; +use uuid::Uuid; + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct Base64Pubkey { + /// base64 representation of the compressed DER encoding of the publickey. This consists of a parity + /// byte at the beginning, which is either `0x02` (even parity) or `0x03` (odd parity), + /// followed by the big-endian encoding of the point's X-coordinate. + pub key: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +pub struct Pubkey { + /// nostr npub in string format + pub key: String, +} + +#[utoipa::path( + get, + path = "/oracle/pubkey", + responses( + (status = OK, description = "Successfully retrieved oracle's pubkey data", body = Base64Pubkey), + ))] +pub async fn get_pubkey( + State(state): State>, +) -> Result, ErrorResponse> { + Ok(Json(Base64Pubkey { + key: state.oracle.public_key(), + })) +} + +#[utoipa::path( + get, + path = "/oracle/npub", + responses( + (status = OK, description = "Successfully retrieved oracle's nostr npub", body = Pubkey), + ))] +pub async fn get_npub(State(state): State>) -> Result, ErrorResponse> { + Ok(Json(Pubkey { + key: state.oracle.npub()?, + })) +} + +#[utoipa::path( + get, + path = "/oracle/events", + params(EventFilter), + responses( + (status = OK, description = "Successfully retrieved oracle events", body = Vec), + ))] +pub async fn list_events( + State(state): State>, + Query(filter): Query, +) -> Result>, ErrorResponse> { + state + .oracle + .list_events(filter) + .await + .map(Json) + .map_err(|e| { + error!("error retrieving event data: {}", e); + e.into() + }) +} +#[utoipa::path( + post, + path = "/oracle/events", + request_body = CreateEvent, + responses( + (status = OK, description = "Successfully created oracle weather event", body = Event), + (status = BAD_REQUEST, description = "Invalid event to be created"), + ))] +pub async fn create_event( + State(state): State>, + Json(body): Json, +) -> Result, ErrorResponse> { + state + .oracle + .create_event(body) + .await + .map(Json) + .map_err(|e| { + error!("error saving event data: {}", e); + e.into() + }) +} + +#[utoipa::path( + get, + path = "/oracle/events/{event_id}", + params( + ("event_id" = Uuid, Path, description = "ID of a weather event the oracle is tracking"), + ), + responses( + (status = OK, description = "Successfully retrieved event data", body = Event), + (status = NOT_FOUND, description = "Event not found for the provided ID"), + ))] +pub async fn get_event( + State(state): State>, + Path(event_id): Path, +) -> Result, ErrorResponse> { + state + .oracle + .get_event(&event_id) + .await + .map(Json) + .map_err(|e| { + error!("error event data: {}", e); + e.into() + }) +} + +#[utoipa::path( + post, + path = "/oracle/events/{event_id}/entry", + request_body = AddEventEntry, + responses( + (status = OK, description = "Successfully add entry into oracle weather event", body = WeatherEntry), + (status = BAD_REQUEST, description = "Invalid entry to be created"), + ))] +pub async fn add_event_entry( + State(state): State>, + Path(_event_id): Path, + Json(body): Json, +) -> Result, ErrorResponse> { + state + .oracle + .add_event_entry(body) + .await + .map(Json) + .map_err(|e| { + error!("error adding entry to event: {}", e); + e.into() + }) +} + +#[utoipa::path( + get, + path = "/oracle/events/{event_id}/entry/{entry_id}", + params( + ("event_id" = Uuid, Path, description = "ID of a weather event the oracle is tracking"), + ("entry_id" = Uuid, Path, description = "ID of a entry into weather event the oracle is tracking"), + ), + responses( + (status = OK, description = "Successfully retrieved event entry", body = WeatherEntry), + (status = NOT_FOUND, description = "Event entry not found for the provided ID"), + ))] +pub async fn get_event_entry( + State(state): State>, + Path((event_id, entry_id)): Path<(Uuid, Uuid)>, +) -> Result, ErrorResponse> { + state + .oracle + .get_event_entry(&event_id, &entry_id) + .await + .map(Json) + .map_err(|e| { + error!("error weather entry data: {}", e); + e.into() + }) +} + +#[utoipa::path( + post, + path = "/oracle/update", + responses( + (status = OK, description = "Successfully kicked off oracle data update"), + (status = INTERNAL_SERVER_ERROR, description = "Failed to kick off oracle data update"), + ))] +pub async fn update_data(State(state): State>) -> Result { + let mut rng = rand::thread_rng(); + let etl_process_id: usize = rng.gen(); + let oracle_cpy = state.oracle.clone(); + // Kick off etl job, note when shutting down we don't do anything to wait for the task to complete at the moment + task::spawn(async move { + info!("starting etl process: {}", etl_process_id); + match oracle_cpy.etl_data(etl_process_id).await { + Ok(()) => info!("completed etl process: {}", etl_process_id), + Err(e) => error!("failed etl process: {} {}", etl_process_id, e), + } + }); + Ok(StatusCode::OK) +} + +impl IntoResponse for oracle::Error { + fn into_response(self) -> Response { + let (status, error_message) = match self.borrow() { + oracle::Error::NotFound(_) => (StatusCode::NOT_FOUND, self.to_string()), + oracle::Error::MinOutcome(_) => (StatusCode::BAD_REQUEST, self.to_string()), + oracle::Error::EventMaturity(_) => (StatusCode::BAD_REQUEST, self.to_string()), + oracle::Error::BadEntry(_) => (StatusCode::BAD_REQUEST, self.to_string()), + oracle::Error::BadEvent(_) => (StatusCode::BAD_REQUEST, self.to_string()), + _ => ( + StatusCode::INTERNAL_SERVER_ERROR, + String::from("internal server error"), + ), + }; + let body = Json(json!({ + "error": error_message, + })); + (status, body).into_response() + } +} diff --git a/oracle/src/routes/files/download.rs b/oracle/src/routes/files/download.rs index ef24e31..3630560 100644 --- a/oracle/src/routes/files/download.rs +++ b/oracle/src/routes/files/download.rs @@ -1,10 +1,13 @@ use axum::{ - body::Body, extract::{Path, State}, http::{HeaderValue, Request, StatusCode} + body::Body, + extract::{Path, State}, + http::{HeaderValue, Request, StatusCode}, }; use hyper::{ - header::{CONTENT_DISPOSITION, CONTENT_TYPE}, HeaderMap, + header::{CONTENT_DISPOSITION, CONTENT_TYPE}, + HeaderMap, }; -use slog::error; +use log::error; use std::sync::Arc; use time::{format_description::well_known::Rfc3339, OffsetDateTime}; use tokio::fs::File; @@ -12,6 +15,17 @@ use tokio_util::io::ReaderStream; use crate::{drop_suffix, AppState}; +#[utoipa::path( + get, + path = "file/{filename}", + params( + ("filename" = String, Path, description = "Name of file to download"), + ), + responses( + (status = OK, description = "Successfully retrieved file", body = Body), + (status = BAD_REQUEST, description = "Invalid file name"), + (status = INTERNAL_SERVER_ERROR, description = "Failed to retrieve file by name") + ))] pub async fn download( State(state): State>, Path(filename): Path, @@ -21,8 +35,8 @@ pub async fn download( let created_time = drop_suffix(file_pieces.last().unwrap(), ".parquet"); let file_generated_at = OffsetDateTime::parse(&created_time, &Rfc3339).map_err(|e| { error!( - state.logger, - "error stored filename does not have a valid rfc3339 datetime in name: {}", e + "error stored filename does not have a valid rfc3339 datetime in name: {}", + e ); ( StatusCode::BAD_REQUEST, @@ -33,15 +47,12 @@ pub async fn download( ) })?; // split filename for the date, add that to the path - let file_path = format!( - "{}/{}/{}", - state.data_dir, - file_generated_at.date(), - filename - ); + let file_path = state + .file_access + .build_file_path(&filename, file_generated_at); let file = File::open(file_path).await.map_err(|err| { - error!(state.logger, "error opening file: {}", err); + error!("error opening file: {}", err); (StatusCode::NOT_FOUND, format!("File not found: {}", err)) })?; diff --git a/oracle/src/routes/files/get_names.rs b/oracle/src/routes/files/get_names.rs index 1fddf08..793dd7a 100644 --- a/oracle/src/routes/files/get_names.rs +++ b/oracle/src/routes/files/get_names.rs @@ -1,197 +1,41 @@ -use crate::AppState; -use anyhow::{anyhow, Error}; +use crate::{AppError, AppState, FileParams}; use axum::{ extract::{Query, State}, - response::{IntoResponse, Response}, Json, }; -use hyper::StatusCode; +use log::error; use serde::{Deserialize, Serialize}; -use slog::{error, trace, Logger}; use std::sync::Arc; -use time::{ - format_description::well_known::Rfc3339, macros::format_description, Date, OffsetDateTime, -}; -use tokio::fs; - -// Make our own error that wraps `anyhow::Error`. -pub struct AppError(anyhow::Error); - -impl IntoResponse for AppError { - fn into_response(self) -> Response { - ( - StatusCode::INTERNAL_SERVER_ERROR, - format!("Error getting file names: {}", self.0), - ) - .into_response() - } -} +use utoipa::ToSchema; -impl From for AppError -where - E: Into, -{ - fn from(err: E) -> Self { - Self(err.into()) - } -} - -#[derive(Serialize)] +#[derive(Serialize, Deserialize, ToSchema)] pub struct Files { pub file_names: Vec, } -#[derive(Clone, Deserialize)] -pub struct FileParams { - pub start: Option, - pub end: Option, - pub observations: Option, - pub forecasts: Option, -} - +#[utoipa::path( + get, + path = "file/{filename}", + params( + FileParams + ), + responses( + (status = OK, description = "Successfully retrieved file names", body = Files), + (status = BAD_REQUEST, description = "Invalid file params"), + (status = INTERNAL_SERVER_ERROR, description = "Failed to retrieve file names") + ))] pub async fn files( State(state): State>, Query(params): Query, ) -> Result, AppError> { - validate_params(¶ms)?; - let file_names = grab_file_names(&state.logger, &state.data_dir, params) + let file_names = state + .file_access + .grab_file_names(params) .await .map_err(|e| { - error!(state.logger, "error getting filenames: {}", e); + error!("error getting filenames: {}", e); e })?; let files = Files { file_names }; Ok(Json(files)) } - -fn validate_params(params: &FileParams) -> Result<(), anyhow::Error> { - if let Some(start) = params.start.clone() { - OffsetDateTime::parse(&start, &Rfc3339) - .map_err(|_| anyhow!("start param value is not a value Rfc3339 datetime"))?; - } - - if let Some(end) = params.end.clone() { - OffsetDateTime::parse(&end, &Rfc3339) - .map_err(|_| anyhow!("end param value is not a value Rfc3339 datetime"))?; - } - - Ok(()) -} -//Body::from_stream -pub async fn grab_file_names( - logger: &Logger, - data_dir: &str, - params: FileParams, -) -> Result, Error> { - let mut files_names = vec![]; - if let Ok(mut entries) = fs::read_dir(data_dir).await { - while let Ok(Some(entry)) = entries.next_entry().await { - let path = entry.path(); - if !path.is_dir() { - continue; - } - if let Some(date) = entry.file_name().to_str() { - let format = format_description!("[year]-[month]-[day]"); - let directory_date = Date::parse(date, &format).map_err(|e| { - anyhow!( - "error stored directory name does not have a valid date in name: {}", - e - ) - })?; - if !is_date_in_range(directory_date, ¶ms) { - continue; - } - - if let Ok(mut subentries) = fs::read_dir(path).await { - while let Ok(Some(subentries)) = subentries.next_entry().await { - if let Some(filename) = add_filename(logger, subentries, ¶ms)? { - files_names.push(filename); - } - } - } - } - } - } - Ok(files_names) -} - -fn add_filename( - logger: &Logger, - entry: tokio::fs::DirEntry, - params: &FileParams, -) -> Result, Error> { - if let Some(filename) = entry.file_name().to_str() { - let file_pieces: Vec = filename.split('_').map(|f| f.to_owned()).collect(); - let created_time = drop_suffix(file_pieces.last().unwrap(), ".parquet"); - trace!(logger, "parsed file time:{}", created_time); - - let file_generated_at = OffsetDateTime::parse(&created_time, &Rfc3339).map_err(|e| { - anyhow!( - "error stored filename does not have a valid rfc3339 datetime in name: {}", - e - ) - })?; - let valid_time_range = is_time_in_range(file_generated_at, params); - let file_data_type = file_pieces.first().unwrap(); - trace!(logger, "parsed file type:{}", file_data_type); - - if let Some(observations) = params.observations { - if observations && file_data_type.eq("observations") && valid_time_range { - return Ok(Some(filename.to_owned())); - } - } - - if let Some(forecasts) = params.forecasts { - if forecasts && file_data_type.eq("forecasts") && valid_time_range { - return Ok(Some(filename.to_owned())); - } - } - - if params.forecasts.is_none() && params.observations.is_none() && valid_time_range { - return Ok(Some(filename.to_owned())); - } - } - Ok(None) -} - -pub fn drop_suffix(input: &str, suffix: &str) -> String { - if let Some(stripped) = input.strip_suffix(suffix) { - stripped.to_string() - } else { - input.to_string() - } -} - -fn is_date_in_range(compare_to: Date, params: &FileParams) -> bool { - if let Some(start) = params.start.clone() { - return match OffsetDateTime::parse(&start, &Rfc3339) { - Ok(start) => compare_to >= start.date(), - Err(_) => false, - }; - } - - if let Some(end) = params.end.clone() { - return match OffsetDateTime::parse(&end, &Rfc3339) { - Ok(end) => compare_to <= end.date(), - Err(_) => false, - }; - } - true -} - -fn is_time_in_range(compare_to: OffsetDateTime, params: &FileParams) -> bool { - if let Some(start) = params.start.clone() { - return match OffsetDateTime::parse(&start, &Rfc3339) { - Ok(start) => compare_to >= start, - Err(_) => false, - }; - } - - if let Some(end) = params.end.clone() { - return match OffsetDateTime::parse(&end, &Rfc3339) { - Ok(end) => compare_to <= end, - Err(_) => false, - }; - } - true -} diff --git a/oracle/src/routes/files/mod.rs b/oracle/src/routes/files/mod.rs index 4728755..0dafc30 100644 --- a/oracle/src/routes/files/mod.rs +++ b/oracle/src/routes/files/mod.rs @@ -1,6 +1,6 @@ -mod download; -mod get_names; -mod upload; +pub mod download; +pub mod get_names; +pub mod upload; pub use download::*; pub use get_names::*; diff --git a/oracle/src/routes/files/upload.rs b/oracle/src/routes/files/upload.rs index 1b15d87..47f4584 100644 --- a/oracle/src/routes/files/upload.rs +++ b/oracle/src/routes/files/upload.rs @@ -1,15 +1,24 @@ -use std::sync::Arc; - use axum::{ extract::{Multipart, Path, State}, http::StatusCode, }; -use slog::{error, info, Logger}; -use time::OffsetDateTime; +use log::{error, info}; +use std::sync::Arc; use tokio::{fs::File, io::AsyncWriteExt}; -use crate::{create_folder, subfolder_exists, AppState}; +use crate::AppState; +#[utoipa::path( + post, + path = "file/{file_name}", + params( + ("file_name" = String, Path, description = "Name of file to upload"), + ), + responses( + (status = OK, description = "Successfully uploaded weather data file"), + (status = BAD_REQUEST, description = "Invalid file"), + (status = INTERNAL_SERVER_ERROR, description = "Failed to save file") + ))] pub async fn upload( State(state): State>, Path(file_name): Path, @@ -20,7 +29,7 @@ pub async fn upload( } while let Some(field) = multipart.next_field().await.unwrap() { let data = field.bytes().await.map_err(|err| { - error!(state.logger, "error getting file's bytes: {}", err); + error!("error getting file's bytes: {}", err); ( StatusCode::INTERNAL_SERVER_ERROR, format!("Failed to get file's bytes: {}", err), @@ -28,29 +37,29 @@ pub async fn upload( })?; info!( - state.logger, "length of `{}` is {} mb", file_name, bytes_to_mb(data.len()) ); - let current_folder = current_folder(&state.logger, &state.data_dir); + let current_folder = state.file_access.current_folder(); let path = std::path::Path::new(¤t_folder).join(&file_name); // Create a new file and write the data to it let mut file = File::create(&path).await.map_err(|err| { - error!(state.logger, "error creating file: {}", err); + error!("error creating file: {}", err); ( StatusCode::INTERNAL_SERVER_ERROR, format!("Failed to create file: {}", err), ) })?; file.write_all(&data).await.map_err(|err| { - error!(state.logger, "error creating file: {}", err); + error!("error creating file: {}", err); ( StatusCode::INTERNAL_SERVER_ERROR, format!("Failed to write to file: {}", err), ) })?; } + Ok(()) } @@ -58,15 +67,6 @@ fn bytes_to_mb(bytes: usize) -> f64 { bytes as f64 / 1_048_576.0 } -fn current_folder(logger: &Logger, root_path: &str) -> String { - let current_date = OffsetDateTime::now_utc().date(); - let subfolder = format!("{}/{}", root_path, current_date); - if !subfolder_exists(&subfolder) { - create_folder(logger, &subfolder) - } - subfolder -} - // to prevent directory traversal attacks we ensure the path consists of exactly one normal component fn path_is_valid(path: &str) -> bool { let path = std::path::Path::new(path); diff --git a/oracle/src/routes/mod.rs b/oracle/src/routes/mod.rs index 4122cbc..1bc7a80 100644 --- a/oracle/src/routes/mod.rs +++ b/oracle/src/routes/mod.rs @@ -1,7 +1,9 @@ -mod files; +pub mod events; +pub mod files; mod home; -mod stations; +pub mod stations; +pub use events::*; pub use files::*; pub use home::*; pub use stations::*; diff --git a/oracle/src/routes/stations/forecasts.rs b/oracle/src/routes/stations/forecasts.rs deleted file mode 100644 index 7c4fd5b..0000000 --- a/oracle/src/routes/stations/forecasts.rs +++ /dev/null @@ -1,222 +0,0 @@ -use anyhow::anyhow; -use axum::{ - extract::{Query, State}, - Json, -}; -use duckdb::{ - arrow::array::{Int64Array, RecordBatch, StringArray}, - params_from_iter, Connection, Error, Result, -}; -use regex::Regex; -use scooby::postgres::{select, with, Aliasable, Parameters}; -use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use time::{format_description::well_known::Rfc3339, Duration, OffsetDateTime}; - -use crate::{build_file_paths, grab_file_names, AppError, AppState, FileParams}; - -#[derive(Clone, Deserialize)] -pub struct ForecastRequest { - pub start: Option, - pub end: Option, - pub station_ids: String, -} - -#[derive(Serialize, Deserialize)] -pub struct Forecast { - pub station_id: String, - pub date: String, - pub start_time: String, - pub end_time: String, - pub temp_low: i64, - pub temp_high: i64, - pub wind_speed: i64, -} - -impl ForecastRequest { - fn validate(&self) -> Result<(), anyhow::Error> { - if let Some(start) = self.start.clone() { - OffsetDateTime::parse(&start, &Rfc3339) - .map_err(|_| anyhow!("start param value is not a value Rfc3339 datetime"))?; - } - - if let Some(end) = self.end.clone() { - OffsetDateTime::parse(&end, &Rfc3339) - .map_err(|_| anyhow!("end param value is not a value Rfc3339 datetime"))?; - } - - Ok(()) - } -} - -pub async fn forecasts( - State(state): State>, - Query(req): Query, -) -> Result>, AppError> { - req.validate()?; - let start_back_one_day = if let Some(start) = req.start.clone() { - let start_date = OffsetDateTime::parse(&start, &Rfc3339)?; - start_date - .saturating_sub(Duration::days(1)) - .format(&Rfc3339)? - } else { - OffsetDateTime::now_utc() - .saturating_sub(Duration::days(1)) - .format(&Rfc3339)? - }; - let parquet_files = grab_file_names( - &state.logger, - &state.data_dir, - FileParams { - start: Some(start_back_one_day), - end: req.end.clone(), - observations: Some(false), - forecasts: Some(true), - }, - ) - .await?; - let file_paths = build_file_paths(state.data_dir.clone(), parquet_files); - let station_ids: Vec = req.station_ids.split(',').map(|id| id.to_owned()).collect(); - let conn = Connection::open_in_memory()?; - conn.execute_batch("INSTALL parquet; LOAD parquet;")?; - - let rbs = run_forecasts_query(&conn, file_paths, &req, station_ids)?; - - let forecasts: Vec = rbs.iter().flat_map(record_batch_to_vec).collect(); - Ok(Json(forecasts)) -} - -fn run_forecasts_query( - conn: &Connection, - file_paths: Vec, - req: &ForecastRequest, - station_ids: Vec, -) -> Result, Error> { - let mut placeholders = Parameters::new(); - - let mut daily_forecasts = select(( - "station_id", - "DATE_TRUNC('day', begin_time::TIMESTAMP)::TEXT".as_("date"), - "MIN(begin_time)".as_("start_time"), - "MAX(end_time)".as_("end_time"), - "MIN(min_temp)".as_("temp_low"), - "MAX(max_temp)".as_("temp_high"), - "MAX(wind_speed)".as_("wind_speed"), - )) - .from(format!( - "read_parquet(['{}'], union_by_name = true)", - file_paths.join("', '") - )); - - let mut values: Vec = vec![]; - if !station_ids.is_empty() { - daily_forecasts = daily_forecasts.where_(format!( - "station_id IN ({})", - placeholders.next_n(station_ids.len()) - )); - - for station_id in station_ids { - values.push(station_id); - } - } - if let Some(start) = &req.start { - daily_forecasts = daily_forecasts.where_(format!( - "(DATE_TRUNC('day', begin_time::TIMESTAMP)::TIMESTAMPTZ) >= {}::TIMESTAMPTZ", - placeholders.next() - )); - values.push(start.to_owned()); - } - - if let Some(end) = &req.end { - daily_forecasts = daily_forecasts.where_(format!( - "(DATE_TRUNC('day', end_time::TIMESTAMP)::TIMESTAMPTZ) <= {}::TIMESTAMPTZ", - placeholders.next() - )); - values.push(end.to_owned()); - } - daily_forecasts = daily_forecasts.group_by(("station_id", "begin_time")); - - let query = with("daily_forecasts") - .as_(daily_forecasts) - .select(( - "station_id", - "date", - "MIN(start_time)".as_("start_time"), - "MAX(end_time)".as_("end_time"), - "MIN(temp_low)".as_("temp_low"), - "MAX(temp_high)".as_("temp_high"), - "MAX(wind_speed)".as_("wind_speed"), - )) - .from("daily_forecasts") - .group_by(("station_id", "date")) - .to_string(); - - let re = Regex::new(r"\$(\d+)").unwrap(); - let binding = query.to_string(); - let fixed_params = re.replace_all(&binding, "?"); - let mut stmt = conn.prepare(&fixed_params)?; - let sql_params = params_from_iter(values.iter()); - let records: Vec = stmt.query_arrow(sql_params)?.collect(); - Ok(records) -} - -fn record_batch_to_vec(record_batch: &RecordBatch) -> Vec { - let mut forecasts = Vec::new(); - let station_id_arr = record_batch - .column(0) - .as_any() - .downcast_ref::() - .expect("Expected StringArray in column 0"); - let date_arr = record_batch - .column(1) - .as_any() - .downcast_ref::() - .expect("Expected StringArray in column 1"); - let start_time_arr = record_batch - .column(2) - .as_any() - .downcast_ref::() - .expect("Expected StringArray in column 2"); - let end_time_arr = record_batch - .column(3) - .as_any() - .downcast_ref::() - .expect("Expected StringArray in column 3"); - let temp_low_arr = record_batch - .column(4) - .as_any() - .downcast_ref::() - .expect("Expected Int64Array in column 4"); - let temp_high_arr = record_batch - .column(5) - .as_any() - .downcast_ref::() - .expect("Expected Int64Array in column 5"); - let wind_speed_arr = record_batch - .column(6) - .as_any() - .downcast_ref::() - .expect("Expected Int64Array in column 6"); - - for row_index in 0..record_batch.num_rows() { - let station_id = station_id_arr.value(row_index).to_owned(); - let date = date_arr.value(row_index).to_owned(); - let start_time = start_time_arr.value(row_index).to_owned(); - let end_time = end_time_arr.value(row_index).to_owned(); - let temp_low = temp_low_arr.value(row_index); - let temp_high = temp_high_arr.value(row_index); - let wind_speed = wind_speed_arr.value(row_index); - - forecasts.push(Forecast { - station_id, - date, - start_time, - end_time, - temp_low, - temp_high, - wind_speed, - }); - } - - forecasts -} diff --git a/oracle/src/routes/stations/get_stations.rs b/oracle/src/routes/stations/get_stations.rs deleted file mode 100644 index 0abf40c..0000000 --- a/oracle/src/routes/stations/get_stations.rs +++ /dev/null @@ -1,108 +0,0 @@ -use axum::{extract::State, Json}; -use duckdb::{ - arrow::{ - array::{Float64Array, StringArray}, - record_batch::RecordBatch, - }, - Connection, Error, Result, -}; -use scooby::postgres::select; -use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use time::{format_description::well_known::Rfc3339, Duration, OffsetDateTime}; - -use super::build_file_paths; -use crate::{grab_file_names, AppError, AppState, FileParams}; - -#[derive(Serialize, Deserialize)] -pub struct Station { - pub station_id: String, - pub station_name: String, - pub latitude: f64, - pub longitude: f64, -} - -pub async fn get_stations( - State(state): State>, -) -> Result>, AppError> { - let now = OffsetDateTime::now_utc(); - let end = now.format(&Rfc3339)?; - let start = now - .saturating_sub(Duration::hours(4_i64)) - .format(&Rfc3339)?; - let parquet_files = grab_file_names( - &state.logger, - &state.data_dir.clone(), - FileParams { - start: Some(start), - end: Some(end), - observations: Some(true), - forecasts: Some(false), - }, - ) - .await?; - let file_paths = build_file_paths(state.data_dir.clone(), parquet_files); - - let conn = Connection::open_in_memory()?; - conn.execute_batch("INSTALL parquet; LOAD parquet;")?; - - let rbs = run_stations_query(&conn, file_paths)?; - - let stations: Vec = rbs.iter().flat_map(record_batch_to_vec).collect(); - Ok(Json(stations)) -} - -fn run_stations_query( - conn: &Connection, - file_paths: Vec, -) -> Result, Error> { - let mut query = select(("station_id", "station_name", "latitude", "longitude")).from(format!( - "read_parquet(['{}'], union_by_name = true)", - file_paths.join("', '") - )); - query = query.group_by(("station_id", "station_name", "latitude", "longitude")); - - let mut stmt = conn.prepare(&query.to_string())?; - let records: Vec = stmt.query_arrow([])?.collect(); - Ok(records) -} - -fn record_batch_to_vec(record_batch: &RecordBatch) -> Vec { - let mut stations = Vec::new(); - let station_id_arr = record_batch - .column(0) - .as_any() - .downcast_ref::() - .expect("Expected StringArray in column 0"); - let station_name_arr = record_batch - .column(1) - .as_any() - .downcast_ref::() - .expect("Expected StringArray in column 1"); - let latitude_arr = record_batch - .column(2) - .as_any() - .downcast_ref::() - .expect("Expected Float64Array in column 2"); - let longitude_arr = record_batch - .column(3) - .as_any() - .downcast_ref::() - .expect("Expected Float64Array in column 3"); - - for row_index in 0..record_batch.num_rows() { - let station_id = station_id_arr.value(row_index).to_owned(); - let station_name = station_name_arr.value(row_index).to_owned(); - let latitude = latitude_arr.value(row_index); - let longitude = longitude_arr.value(row_index); - - stations.push(Station { - station_id, - station_name, - latitude, - longitude, - }); - } - - stations -} diff --git a/oracle/src/routes/stations/mod.rs b/oracle/src/routes/stations/mod.rs index ec04c70..51438d7 100644 --- a/oracle/src/routes/stations/mod.rs +++ b/oracle/src/routes/stations/mod.rs @@ -1,7 +1,3 @@ -mod forecasts; -mod get_stations; -mod observations; +pub mod weather_routes; -pub use forecasts::*; -pub use get_stations::*; -pub use observations::*; +pub use weather_routes::*; diff --git a/oracle/src/routes/stations/observations.rs b/oracle/src/routes/stations/observations.rs deleted file mode 100644 index 5ef3f2d..0000000 --- a/oracle/src/routes/stations/observations.rs +++ /dev/null @@ -1,203 +0,0 @@ -use anyhow::anyhow; -use axum::{ - extract::{Query, State}, - Json, -}; -use duckdb::{ - arrow::{ - array::{Float64Array, Int64Array, StringArray}, - record_batch::RecordBatch, - }, - params_from_iter, Connection, Error, Result, -}; -use regex::Regex; -use scooby::postgres::{select, Aliasable, Parameters}; -use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use time::{format_description::well_known::Rfc3339, OffsetDateTime}; - -use crate::{drop_suffix, grab_file_names, AppError, AppState, FileParams}; - -#[derive(Clone, Deserialize)] -pub struct ObservationRequest { - pub start: Option, - pub end: Option, - pub station_ids: String, -} - -impl ObservationRequest { - fn validate(&self) -> Result<(), anyhow::Error> { - if let Some(start) = self.start.clone() { - OffsetDateTime::parse(&start, &Rfc3339) - .map_err(|_| anyhow!("start param value is not a value Rfc3339 datetime"))?; - } - - if let Some(end) = self.end.clone() { - OffsetDateTime::parse(&end, &Rfc3339) - .map_err(|_| anyhow!("end param value is not a value Rfc3339 datetime"))?; - } - - Ok(()) - } -} - -#[derive(Serialize, Deserialize)] -pub struct Observation { - pub station_id: String, - pub start_time: String, - pub end_time: String, - pub temp_low: f64, - pub temp_high: f64, - pub wind_speed: i64, -} - -pub async fn observations( - State(state): State>, - Query(req): Query, -) -> Result>, AppError> { - req.validate()?; - - let parquet_files = grab_file_names( - &state.logger, - &state.data_dir.clone(), - FileParams { - start: req.start.clone(), - end: req.end.clone(), - observations: Some(true), - forecasts: Some(false), - }, - ) - .await?; - let file_paths = build_file_paths(state.data_dir.clone(), parquet_files); - let station_ids: Vec = req.station_ids.split(',').map(|id| id.to_owned()).collect(); - let conn = Connection::open_in_memory()?; - conn.execute_batch("INSTALL parquet; LOAD parquet;")?; - - let rbs = run_observations_query(&conn, file_paths, &req, station_ids)?; - - let observations: Vec = rbs.iter().flat_map(record_batch_to_vec).collect(); - Ok(Json(observations)) -} - -pub fn build_file_paths(data_dir: String, file_names: Vec) -> Vec { - file_names - .iter() - .map(|file_name| { - let file_pieces: Vec = file_name.split('_').map(|f| f.to_owned()).collect(); - let created_time = drop_suffix(file_pieces.last().unwrap(), ".parquet"); - let file_generated_at = OffsetDateTime::parse(&created_time, &Rfc3339).unwrap(); - format!("{}/{}/{}", data_dir, file_generated_at.date(), file_name) - }) - .collect() -} - -fn run_observations_query( - conn: &Connection, - file_paths: Vec, - req: &ObservationRequest, - station_ids: Vec, -) -> Result, Error> { - let mut placeholders = Parameters::new(); - let mut query = select(( - "station_id", - "min(generated_at)".as_("start_time"), - "max(generated_at)".as_("end_time"), - "min(temperature_value)".as_("temp_low"), - "max(temperature_value)".as_("temp_high"), - "max(wind_speed)".as_("wind_speed"), - )) - .from(format!( - "read_parquet(['{}'], union_by_name = true)", - file_paths.join("', '") - )); - - let mut values: Vec = vec![]; - if !station_ids.is_empty() { - query = query.where_(format!( - "station_id IN ({})", - placeholders.next_n(station_ids.len()) - )); - - for station_id in station_ids { - values.push(station_id); - } - } - if let Some(start) = &req.start { - query = query.where_(format!( - "generated_at::TIMESTAMPTZ >= {}::TIMESTAMPTZ", - placeholders.next() - )); - values.push(start.to_owned()); - } - - if let Some(end) = &req.end { - query = query.where_(format!( - "generated_at::TIMESTAMPTZ <= {}::TIMESTAMPTZ", - placeholders.next() - )); - values.push(end.to_owned()); - } - query = query.group_by("station_id"); - - let re = Regex::new(r"\$(\d+)").unwrap(); - let binding = query.to_string(); - let fixed_params = re.replace_all(&binding, "?"); - let mut stmt = conn.prepare(&fixed_params)?; - let sql_params = params_from_iter(values.iter()); - let records: Vec = stmt.query_arrow(sql_params)?.collect(); - Ok(records) -} - -fn record_batch_to_vec(record_batch: &RecordBatch) -> Vec { - let mut observations = Vec::new(); - let station_id_arr = record_batch - .column(0) - .as_any() - .downcast_ref::() - .expect("Expected StringArray in column 0"); - let start_time_arr = record_batch - .column(1) - .as_any() - .downcast_ref::() - .expect("Expected StringArray in column 1"); - let end_time_arr = record_batch - .column(2) - .as_any() - .downcast_ref::() - .expect("Expected StringArray in column 2"); - let temp_low_arr = record_batch - .column(3) - .as_any() - .downcast_ref::() - .expect("Expected Float64Array in column 3"); - let temp_high_arr = record_batch - .column(4) - .as_any() - .downcast_ref::() - .expect("Expected Float64Array in column 4"); - let wind_speed_arr = record_batch - .column(5) - .as_any() - .downcast_ref::() - .expect("Expected Int64Array in column 4"); - - for row_index in 0..record_batch.num_rows() { - let station_id = station_id_arr.value(row_index).to_owned(); - let start_time = start_time_arr.value(row_index).to_owned(); - let end_time = end_time_arr.value(row_index).to_owned(); - let temp_low = temp_low_arr.value(row_index); - let temp_high = temp_high_arr.value(row_index); - let wind_speed = wind_speed_arr.value(row_index); - - observations.push(Observation { - station_id, - start_time, - end_time, - temp_low, - temp_high, - wind_speed, - }); - } - - observations -} diff --git a/oracle/src/routes/stations/weather_routes.rs b/oracle/src/routes/stations/weather_routes.rs new file mode 100644 index 0000000..415add0 --- /dev/null +++ b/oracle/src/routes/stations/weather_routes.rs @@ -0,0 +1,134 @@ +use std::sync::Arc; + +use ::serde::Deserialize; +use axum::{ + extract::{Query, State}, + Json, +}; +use serde::Serialize; +use time::OffsetDateTime; +use utoipa::IntoParams; + +use crate::{utc_option_datetime, AppError, AppState, FileParams, Forecast, Observation, Station}; + +#[utoipa::path( + get, + path = "stations/forecasts", + params( + ForecastRequest + ), + responses( + (status = OK, description = "Successfully retrieved forecast data", body = Vec), + (status = BAD_REQUEST, description = "Times are not in RFC3339 format"), + (status = INTERNAL_SERVER_ERROR, description = "Failed to retrieved weather data") + ))] +pub async fn forecasts( + State(state): State>, + Query(req): Query, +) -> Result>, AppError> { + let forecasts = state + .weather_db + .forecasts_data(&req, req.station_ids()) + .await?; + + Ok(Json(forecasts)) +} + +#[derive(Clone, Serialize, Deserialize, IntoParams)] +pub struct ForecastRequest { + #[serde(with = "utc_option_datetime")] + #[serde(default)] + pub start: Option, + #[serde(with = "utc_option_datetime")] + #[serde(default)] + pub end: Option, + pub station_ids: String, +} + +impl ForecastRequest { + pub fn station_ids(&self) -> Vec { + self.station_ids + .split(',') + .map(|id| id.to_owned()) + .collect() + } +} + +impl From<&ForecastRequest> for FileParams { + fn from(value: &ForecastRequest) -> Self { + FileParams { + start: value.start, + end: value.end, + observations: Some(false), + forecasts: Some(true), + } + } +} + +#[derive(Clone, Serialize, Deserialize, IntoParams)] +pub struct ObservationRequest { + #[serde(with = "utc_option_datetime")] + #[serde(default)] + pub start: Option, + #[serde(with = "utc_option_datetime")] + #[serde(default)] + pub end: Option, + pub station_ids: String, +} + +impl ObservationRequest { + pub fn station_ids(&self) -> Vec { + self.station_ids + .split(',') + .map(|id| id.to_owned()) + .collect() + } +} + +impl From<&ObservationRequest> for FileParams { + fn from(value: &ObservationRequest) -> Self { + FileParams { + start: value.start, + end: value.end, + observations: Some(true), + forecasts: Some(false), + } + } +} + +#[utoipa::path( + get, + path = "stations/observations", + params( + ObservationRequest + ), + responses( + (status = OK, description = "Successfully retrieved observation data", body = Vec), + (status = BAD_REQUEST, description = "Times are not in RFC3339 format"), + (status = INTERNAL_SERVER_ERROR, description = "Failed to retrieved weather data") + ))] +pub async fn observations( + State(state): State>, + Query(req): Query, +) -> Result>, AppError> { + let observations = state + .weather_db + .observation_data(&req, req.station_ids()) + .await?; + + Ok(Json(observations)) +} + +#[utoipa::path( + get, + path = "stations", + responses( + (status = OK, description = "Successfully retrieved weather stations", body = Vec), + (status = INTERNAL_SERVER_ERROR, description = "Failed to retrieved weather stations from data") + ))] +pub async fn get_stations( + State(state): State>, +) -> Result>, AppError> { + let stations: Vec = state.weather_db.stations().await?; + Ok(Json(stations)) +} diff --git a/oracle/src/ser/mod.rs b/oracle/src/ser/mod.rs new file mode 100644 index 0000000..631def6 --- /dev/null +++ b/oracle/src/ser/mod.rs @@ -0,0 +1,2 @@ +pub mod utc_datetime; +pub mod utc_option_datetime; diff --git a/oracle/src/ser/utc_datetime.rs b/oracle/src/ser/utc_datetime.rs new file mode 100644 index 0000000..0c4203a --- /dev/null +++ b/oracle/src/ser/utc_datetime.rs @@ -0,0 +1,19 @@ +use serde::{Deserialize, Deserializer, Serializer}; +use time::{format_description::well_known::Rfc3339, OffsetDateTime}; + +pub fn serialize(value: &OffsetDateTime, serializer: S) -> Result +where + S: Serializer, +{ + let s = value.format(&Rfc3339).map_err(serde::ser::Error::custom)?; + serializer.serialize_str(&s) +} + +#[allow(dead_code)] +pub fn deserialize<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + OffsetDateTime::parse(&s, &Rfc3339).map_err(serde::de::Error::custom) +} diff --git a/oracle/src/ser/utc_option_datetime.rs b/oracle/src/ser/utc_option_datetime.rs new file mode 100644 index 0000000..ac73062 --- /dev/null +++ b/oracle/src/ser/utc_option_datetime.rs @@ -0,0 +1,31 @@ +use serde::{Deserialize, Deserializer, Serializer}; +use time::{format_description::well_known::Rfc3339, OffsetDateTime}; + +pub fn serialize(value: &Option, serializer: S) -> Result +where + S: Serializer, +{ + if let Some(datetime) = value { + let s = datetime + .format(&Rfc3339) + .map_err(serde::ser::Error::custom)?; + serializer.serialize_str(&s) + } else { + serializer.serialize_str("null") + } +} + +#[allow(dead_code)] +pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + if s == "null" { + Ok(None) + } else { + OffsetDateTime::parse(&s, &Rfc3339) + .map(Some) + .map_err(serde::de::Error::custom) + } +} diff --git a/oracle/src/startup.rs b/oracle/src/startup.rs index dd37b02..37d84da 100644 --- a/oracle/src/startup.rs +++ b/oracle/src/startup.rs @@ -1,41 +1,111 @@ -use std::sync::Arc; - -use crate::{download, files, forecasts, get_stations, index_handler, observations, upload}; +use crate::{ + add_event_entry, create_event, db, download, files, forecasts, get_event, get_event_entry, + get_npub, get_pubkey, get_stations, index_handler, list_events, observations, + oracle::{self, Oracle}, + routes, update_data, upload, + weather_data::WeatherAccess, + EventData, FileAccess, FileData, WeatherData, +}; +use anyhow::anyhow; use axum::{ - extract::DefaultBodyLimit, + body::Body, + extract::{DefaultBodyLimit, Request}, + middleware::{self, Next}, + response::IntoResponse, routing::{get, post}, Router, }; use hyper::Method; -use slog::Logger; +use log::info; +use std::sync::Arc; use tower_http::{ cors::{Any, CorsLayer}, services::{ServeDir, ServeFile}, }; +use utoipa::OpenApi; +use utoipa_scalar::{Scalar, Servable}; #[derive(Clone)] pub struct AppState { - pub logger: Logger, - pub data_dir: String, pub ui_dir: String, pub remote_url: String, + pub file_access: Arc, + pub weather_db: Arc, + pub oracle: Arc, +} + +#[derive(OpenApi)] +#[openapi( + paths( + routes::events::oracle_routes::get_npub, + routes::events::oracle_routes::get_pubkey, + routes::events::oracle_routes::list_events, + routes::events::oracle_routes::create_event, + routes::events::oracle_routes::get_event, + routes::events::oracle_routes::add_event_entry, + routes::events::oracle_routes::get_event_entry, + routes::events::oracle_routes::update_data, + routes::stations::weather_routes::forecasts, + routes::stations::weather_routes::observations, + routes::stations::weather_routes::get_stations, + routes::files::download::download, + routes::files::get_names::files, + routes::files::upload::upload, + ), + components( + schemas( + routes::files::get_names::Files, + oracle::Error, + db::event_data::Event, + db::event_data::WeatherEntry, + db::event_data::AddEventEntry, + db::event_data::CreateEvent, + routes::events::oracle_routes::Pubkey, + routes::events::oracle_routes::Base64Pubkey + ) + ), + tags( + (name = "noaa data oracle api", description = "a RESTful api that acts as an oracle for NOAA forecast and observation data") + ) +)] +struct ApiDoc; + +pub async fn build_app_state( + remote_url: String, + ui_dir: String, + data_dir: String, + event_dir: String, + private_key_file_path: String, +) -> Result { + let file_access = Arc::new(FileAccess::new(data_dir)); + let weather_db = Arc::new( + WeatherAccess::new(file_access.clone()) + .map_err(|e| anyhow!("error setting up weather data: {}", e))?, + ); + + let event_db = Arc::new( + EventData::new(&event_dir).map_err(|e| anyhow!("error setting up event data: {}", e))?, + ); + let oracle = Arc::new(Oracle::new(event_db, weather_db.clone(), &private_key_file_path).await?); + + Ok(AppState { + ui_dir, + remote_url, + weather_db, + file_access, + oracle, + }) } -pub fn app(logger: Logger, remote_url: String, ui_dir: String, data_dir: String) -> Router { +pub fn app(app_state: AppState) -> Router { + let api_docs = ApiDoc::openapi(); + // The ui folder needs to be generated and have this relative path from where the binary is being run + let serve_dir = ServeDir::new("ui").not_found_service(ServeFile::new(app_state.ui_dir.clone())); let cors = CorsLayer::new() // allow `GET` and `POST` when accessing the resource .allow_methods([Method::GET, Method::POST]) // allow requests from any origin .allow_origin(Any); - - // The ui folder needs to be generated and have this relative path from where the binary is being run - let serve_dir = ServeDir::new("ui").not_found_service(ServeFile::new(ui_dir.clone())); - let app_state = AppState { - logger, - data_dir, - ui_dir, - remote_url, - }; Router::new() .route("/files", get(files)) .route("/file/:file_name", get(download)) @@ -43,10 +113,39 @@ pub fn app(logger: Logger, remote_url: String, ui_dir: String, data_dir: String) .route("/stations", get(get_stations)) .route("/stations/forecasts", get(forecasts)) .route("/stations/observations", get(observations)) + .route("/oracle/npub", get(get_npub)) + .route("/oracle/pubkey", get(get_pubkey)) + .route("/oracle/update", post(update_data)) + .route("/oracle/events", get(list_events)) + .route("/oracle/events", post(create_event)) + .route("/oracle/events/:event_id", get(get_event)) + .route("/oracle/events/:event_id/entry", post(add_event_entry)) + .route( + "/oracle/events/:event_id/entry/:entry_id", + get(get_event_entry), + ) + .layer(middleware::from_fn(log_request)) .layer(DefaultBodyLimit::max(30 * 1024 * 1024)) // max is in bytes .route("/", get(index_handler)) .with_state(Arc::new(app_state)) + .merge(Scalar::with_url("/docs", api_docs)) .nest_service("/ui", serve_dir.clone()) .fallback_service(serve_dir) .layer(cors) } + +async fn log_request(request: Request, next: Next) -> impl IntoResponse { + let now = time::OffsetDateTime::now_utc(); + let path = request + .uri() + .path_and_query() + .map(|p| p.as_str()) + .unwrap_or_default(); + info!(target: "http_request","new request, {} {}", request.method().as_str(), path); + + let response = next.run(request).await; + let response_time = time::OffsetDateTime::now_utc() - now; + info!(target: "http_response", "response, code: {}, time: {}", response.status().as_str(), response_time); + + response +} diff --git a/oracle/src/utils.rs b/oracle/src/utils.rs index 4da85cd..6c389a7 100644 --- a/oracle/src/utils.rs +++ b/oracle/src/utils.rs @@ -1,25 +1,29 @@ +use clap::{command, Parser}; +use fern::{ + colors::{Color, ColoredLevelConfig}, + Dispatch, +}; +use log::{error, info, LevelFilter}; use std::{ env, fs::{self, File}, io::Read, path::Path, }; +use time::{format_description::well_known::Iso8601, OffsetDateTime}; -use clap::{command, Parser}; -use slog::{error, info, o, Drain, Level, Logger}; - -pub fn create_folder(logger: &Logger, root_path: &str) { +pub fn create_folder(root_path: &str) { let path = Path::new(root_path); if !path.exists() || !path.is_dir() { // Create the folder if it doesn't exist if let Err(err) = fs::create_dir(path) { - error!(logger, "error creating folder: {}", err); + error!("error creating folder: {}", err); } else { - info!(logger, "folder created: {}", root_path); + info!("folder created: {}", root_path); } } else { - info!(logger, "folder already exists: {}", root_path); + info!("folder already exists: {}", root_path); } } @@ -54,9 +58,17 @@ pub struct Cli { #[arg(short, long)] pub weather_dir: Option, + /// Path to db holding dlc event data (default: event_data) + #[arg(short, long)] + pub event_db: Option, + /// Path to files used to make the browser UI (default: ./ui) #[arg(short, long)] pub ui_dir: Option, + + /// Path to oracle private key (default: ./oracle_private_key.pem) + #[arg(short, long)] + pub oracle_private_key: Option, } pub fn get_config_info() -> Cli { @@ -73,32 +85,47 @@ pub fn get_config_info() -> Cli { cli } -pub fn setup_logger(cli: &Cli) -> Logger { - let log_level = if cli.level.is_some() { +pub fn get_log_level(cli: &Cli) -> LevelFilter { + if cli.level.is_some() { let level = cli.level.as_ref().unwrap(); match level.as_ref() { - "trace" => Level::Trace, - "debug" => Level::Debug, - "info" => Level::Info, - "warn" => Level::Warning, - "error" => Level::Error, - _ => Level::Info, + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "info" => LevelFilter::Info, + "warn" => LevelFilter::Warn, + "error" => LevelFilter::Error, + _ => LevelFilter::Info, } } else { let rust_log = env::var("RUST_LOG").unwrap_or_else(|_| String::from("")); match rust_log.to_lowercase().as_str() { - "trace" => Level::Trace, - "debug" => Level::Debug, - "info" => Level::Info, - "warn" => Level::Warning, - "error" => Level::Error, - _ => Level::Info, + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "info" => LevelFilter::Info, + "warn" => LevelFilter::Warn, + "error" => LevelFilter::Error, + _ => LevelFilter::Info, } - }; + } +} + +pub fn setup_logger() -> Dispatch { + let colors = ColoredLevelConfig::new() + .trace(Color::White) + .debug(Color::Cyan) + .info(Color::Blue) + .warn(Color::Yellow) + .error(Color::Magenta); - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::CompactFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - let drain = drain.filter_level(log_level).fuse(); - slog::Logger::root(drain, o!("version" => "0.5")) + fern::Dispatch::new() + .format(move |out, message, record| { + out.finish(format_args!( + "[{} {}] {}: {}", + OffsetDateTime::now_utc().format(&Iso8601::DEFAULT).unwrap(), + colors.color(record.level()), + record.target(), + message + )); + }) + .chain(std::io::stdout()) } diff --git a/oracle/tests/api/create_event.rs b/oracle/tests/api/create_event.rs new file mode 100644 index 0000000..a76e291 --- /dev/null +++ b/oracle/tests/api/create_event.rs @@ -0,0 +1,164 @@ +use std::sync::Arc; + +use crate::helpers::{spawn_app, MockWeatherAccess}; +use axum::{ + body::{to_bytes, Body}, + http::Request, +}; +use dlctix::Outcome; +use hyper::{header, Method}; +use oracle::{CreateEvent, Event}; +use serde_json::{from_slice, to_string}; +use time::OffsetDateTime; +use tower::ServiceExt; +use uuid::Uuid; + +#[tokio::test] +async fn can_create_oracle_event() { + let uri = String::from("/oracle/events"); + let test_app = spawn_app(Arc::new(MockWeatherAccess::new())).await; + + let new_event = CreateEvent { + id: Uuid::now_v7(), + observation_date: OffsetDateTime::now_utc(), + signing_date: OffsetDateTime::now_utc(), + locations: vec![ + String::from("PFNO"), + String::from("KSAW"), + String::from("PAPG"), + String::from("KWMC"), + ], + total_allowed_entries: 100, + number_of_places_win: 3, + number_of_values_per_entry: 6, + }; + let body_json = to_string(&new_event).unwrap(); + let request = Request::builder() + .method(Method::POST) + .uri(uri) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::from(body_json)) + .unwrap(); + + let response = test_app + .app + .oneshot(request) + .await + .expect("Failed to execute request."); + assert!(response.status().is_success()); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + let res: Event = from_slice(&body).unwrap(); + assert_eq!(res.signing_date, new_event.signing_date); + assert_eq!(res.locations, new_event.locations); + assert_eq!( + res.total_allowed_entries, + new_event.total_allowed_entries as i64 + ); + assert_eq!(res.entry_ids.len(), 0); + assert_eq!( + res.number_of_places_win, + new_event.number_of_places_win as i64 + ); + assert_eq!( + res.number_of_values_per_entry, + new_event.number_of_values_per_entry as i64 + ); + assert!(res.weather.is_empty()); + assert!(res.nonce.serialize().len() > 0); + assert!(res.attestation.is_none()); + assert!(res + .event_annoucement + .is_valid_outcome(&Outcome::Attestation(1))); +} + +#[tokio::test] +async fn can_create_and_get_oracle_event() { + let uri = String::from("/oracle/events"); + let test_app = spawn_app(Arc::new(MockWeatherAccess::new())).await; + + let new_event = CreateEvent { + id: Uuid::now_v7(), + observation_date: OffsetDateTime::now_utc(), + signing_date: OffsetDateTime::now_utc(), + locations: vec![ + String::from("PFNO"), + String::from("KSAW"), + String::from("PAPG"), + String::from("KWMC"), + ], + total_allowed_entries: 100, + number_of_places_win: 3, + number_of_values_per_entry: 6, + }; + let body_json = to_string(&new_event).unwrap(); + let request_post = Request::builder() + .method(Method::POST) + .uri(uri) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::from(body_json)) + .unwrap(); + + let response_post = test_app + .app + .clone() + .oneshot(request_post) + .await + .expect("Failed to execute request."); + assert!(response_post.status().is_success()); + let body = to_bytes(response_post.into_body(), usize::MAX) + .await + .unwrap(); + let res_post: Event = from_slice(&body).unwrap(); + + let request_get = Request::builder() + .method(Method::GET) + .uri(format!("/oracle/events/{}", res_post.id)) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::empty()) + .unwrap(); + + let response_get = test_app + .app + .oneshot(request_get) + .await + .expect("Failed to execute request."); + assert!(response_get.status().is_success()); + let body = to_bytes(response_get.into_body(), usize::MAX) + .await + .unwrap(); + let res: Event = from_slice(&body).unwrap(); + assert_eq!( + res.signing_date, + new_event + .signing_date + .replace_nanosecond(new_event.signing_date.nanosecond() / 1_000 * 1_000) + .unwrap() + ); + assert_eq!( + res.observation_date, + new_event + .observation_date + .replace_nanosecond(new_event.observation_date.nanosecond() / 1_000 * 1_000) + .unwrap() + ); + assert_eq!(res.locations, new_event.locations); + assert_eq!( + res.total_allowed_entries, + new_event.total_allowed_entries as i64 + ); + assert_eq!(res.entry_ids.len(), 0); + assert_eq!( + res.number_of_places_win, + new_event.number_of_places_win as i64 + ); + assert_eq!( + res.number_of_values_per_entry, + new_event.number_of_values_per_entry as i64 + ); + assert!(res.weather.is_empty()); + assert!(res.nonce.serialize().len() > 0); + assert!(res.attestation.is_none()); + assert!(res + .event_annoucement + .is_valid_outcome(&Outcome::Attestation(1))); +} diff --git a/oracle/tests/api/create_event_entry.rs b/oracle/tests/api/create_event_entry.rs new file mode 100644 index 0000000..e8a5e2b --- /dev/null +++ b/oracle/tests/api/create_event_entry.rs @@ -0,0 +1,167 @@ +use crate::helpers::{spawn_app, MockWeatherAccess}; +use axum::{ + body::{to_bytes, Body}, + http::Request, +}; +use hyper::{header, Method}; +use log::info; +use oracle::{AddEventEntry, CreateEvent, WeatherChoices, WeatherEntry}; +use serde_json::{from_slice, to_string}; +use std::sync::Arc; +use time::OffsetDateTime; +use tower::ServiceExt; +use uuid::Uuid; + +#[tokio::test] +async fn can_create_entry_into_event() { + let test_app = spawn_app(Arc::new(MockWeatherAccess::new())).await; + + let new_event = CreateEvent { + id: Uuid::now_v7(), + observation_date: OffsetDateTime::now_utc(), + signing_date: OffsetDateTime::now_utc(), + locations: vec![ + String::from("PFNO"), + String::from("KSAW"), + String::from("PAPG"), + String::from("KWMC"), + ], + total_allowed_entries: 100, + number_of_places_win: 3, + number_of_values_per_entry: 6, + }; + let oracle_event = test_app.oracle.create_event(new_event).await.unwrap(); + let new_entry = AddEventEntry { + id: Uuid::now_v7(), + event_id: oracle_event.id, + expected_observations: vec![ + WeatherChoices { + stations: String::from("PFNO"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: None, + wind_speed: None, + }, + WeatherChoices { + stations: String::from("KSAW"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Over), + }, + WeatherChoices { + stations: String::from("KWMC"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: Some(oracle::ValueOptions::Under), + wind_speed: None, + }, + ], + }; + let body_json = to_string(&new_entry).unwrap(); + let request = Request::builder() + .method(Method::POST) + .uri(format!("/oracle/events/{}/entry", oracle_event.id)) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::from(body_json)) + .unwrap(); + + let response = test_app + .app + .oneshot(request) + .await + .expect("Failed to execute request."); + info!("response status: {}", response.status()); + assert!(response.status().is_success()); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + let res: WeatherEntry = from_slice(&body).unwrap(); + assert_eq!(res.event_id, new_entry.event_id); + assert_eq!(res.id, new_entry.id); + assert_eq!(res.expected_observations, new_entry.expected_observations); +} + +#[tokio::test] +async fn can_create_and_get_event_entry() { + let test_app = spawn_app(Arc::new(MockWeatherAccess::new())).await; + + let new_event = CreateEvent { + id: Uuid::now_v7(), + observation_date: OffsetDateTime::now_utc(), + signing_date: OffsetDateTime::now_utc(), + locations: vec![ + String::from("PFNO"), + String::from("KSAW"), + String::from("PAPG"), + String::from("KWMC"), + ], + total_allowed_entries: 100, + number_of_places_win: 3, + number_of_values_per_entry: 6, + }; + let oracle_event = test_app.oracle.create_event(new_event).await.unwrap(); + let new_entry = AddEventEntry { + id: Uuid::now_v7(), + event_id: oracle_event.id, + expected_observations: vec![ + WeatherChoices { + stations: String::from("PFNO"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: None, + wind_speed: None, + }, + WeatherChoices { + stations: String::from("KSAW"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Over), + }, + WeatherChoices { + stations: String::from("KWMC"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: Some(oracle::ValueOptions::Under), + wind_speed: None, + }, + ], + }; + let body_json = to_string(&new_entry).unwrap(); + let request = Request::builder() + .method(Method::POST) + .uri(format!("/oracle/events/{}/entry", oracle_event.id)) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::from(body_json)) + .unwrap(); + + let response = test_app + .app + .clone() + .oneshot(request) + .await + .expect("Failed to execute request."); + info!("response status: {:?}", response); + assert!(response.status().is_success()); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + let res_post: WeatherEntry = from_slice(&body).unwrap(); + + let request_get = Request::builder() + .method(Method::GET) + .uri(format!( + "/oracle/events/{}/entry/{}", + oracle_event.id, res_post.id + )) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::empty()) + .unwrap(); + + let response_get = test_app + .app + .oneshot(request_get) + .await + .expect("Failed to execute request."); + + assert!(response_get.status().is_success()); + let body = to_bytes(response_get.into_body(), usize::MAX) + .await + .unwrap(); + let res: WeatherEntry = from_slice(&body).unwrap(); + assert_eq!(res_post.id, res.id); + assert_eq!(res_post.event_id, res.event_id); + assert_eq!(res_post.score, res.score); + assert_eq!(res_post.expected_observations, res.expected_observations); +} diff --git a/oracle/tests/api/etl_workflow.rs b/oracle/tests/api/etl_workflow.rs new file mode 100644 index 0000000..082523e --- /dev/null +++ b/oracle/tests/api/etl_workflow.rs @@ -0,0 +1,341 @@ +use std::{cmp, sync::Arc}; + +use crate::helpers::{spawn_app, MockWeatherAccess}; +use axum::{ + body::{to_bytes, Body}, + http::Request, +}; +use hyper::{header, Method}; +use log::info; +use oracle::{ + AddEventEntry, CreateEvent, Event, EventStatus, Forecast, Observation, WeatherChoices, +}; +use serde_json::from_slice; +use time::{format_description::well_known::Rfc3339, OffsetDateTime}; +use tokio::time::sleep; +use tower::ServiceExt; +use uuid::Uuid; + +#[tokio::test] +async fn can_get_event_run_etl_and_see_it_signed() { + let mut weather_data = MockWeatherAccess::new(); + //called twice per ETL process + weather_data + .expect_forecasts_data() + .times(2) + .returning(|_, _| Ok(mock_forecast_data())); + weather_data + .expect_observation_data() + .times(2) + .returning(|_, _| Ok(mock_observation_data())); + + let test_app = spawn_app(Arc::new(weather_data)).await; + + // This makes the event window 1 day (what is used by the oracle) + let observation_date = OffsetDateTime::parse("2024-08-12T00:00:00+00:00", &Rfc3339).unwrap(); + let signing_date = OffsetDateTime::parse("2024-08-13T00:00:00+00:00", &Rfc3339).unwrap(); + + let new_event_1 = CreateEvent { + id: Uuid::now_v7(), + observation_date, + signing_date, + locations: vec![ + String::from("PFNO"), + String::from("KSAW"), + String::from("PAPG"), + String::from("KWMC"), + ], + total_allowed_entries: 4, + number_of_places_win: 1, + number_of_values_per_entry: 6, + }; + info!("above create event"); + let event = test_app.oracle.create_event(new_event_1).await.unwrap(); + let entry_1 = AddEventEntry { + id: Uuid::now_v7(), + event_id: event.id, + expected_observations: vec![ + WeatherChoices { + stations: String::from("PFNO"), + temp_low: Some(oracle::ValueOptions::Under), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Over), + }, + WeatherChoices { + stations: String::from("KSAW"), + temp_low: None, + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Over), + }, + WeatherChoices { + stations: String::from("KWMC"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: Some(oracle::ValueOptions::Under), + wind_speed: Some(oracle::ValueOptions::Par), + }, + ], + }; + let entry_2 = AddEventEntry { + id: Uuid::now_v7(), + event_id: event.id, + expected_observations: vec![ + WeatherChoices { + stations: String::from("PFNO"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Par), + }, + WeatherChoices { + stations: String::from("KSAW"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Over), + }, + WeatherChoices { + stations: String::from("KWMC"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: Some(oracle::ValueOptions::Under), + wind_speed: None, + }, + ], + }; + let entry_3 = AddEventEntry { + id: Uuid::now_v7(), + event_id: event.id, + expected_observations: vec![ + WeatherChoices { + stations: String::from("PFNO"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Under), + }, + WeatherChoices { + stations: String::from("KSAW"), + temp_low: Some(oracle::ValueOptions::Over), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Over), + }, + WeatherChoices { + stations: String::from("KWMC"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Under), + }, + ], + }; + let entry_4 = AddEventEntry { + id: Uuid::now_v7(), + event_id: event.id, + expected_observations: vec![ + WeatherChoices { + stations: String::from("PFNO"), + temp_low: Some(oracle::ValueOptions::Over), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Par), + }, + WeatherChoices { + stations: String::from("KSAW"), + temp_low: None, + temp_high: Some(oracle::ValueOptions::Under), + wind_speed: Some(oracle::ValueOptions::Over), + }, + WeatherChoices { + stations: String::from("KWMC"), + temp_low: Some(oracle::ValueOptions::Par), + temp_high: None, + wind_speed: Some(oracle::ValueOptions::Under), + }, + ], + }; + test_app + .oracle + .add_event_entry(entry_1.clone()) + .await + .unwrap(); + test_app + .oracle + .add_event_entry(entry_2.clone()) + .await + .unwrap(); + test_app + .oracle + .add_event_entry(entry_3.clone()) + .await + .unwrap(); + test_app + .oracle + .add_event_entry(entry_4.clone()) + .await + .unwrap(); + + // 1) get event before etl + let request = Request::builder() + .method(Method::GET) + .uri(format!("/oracle/events/{}", event.id)) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::empty()) + .unwrap(); + + let response = test_app + .app + .clone() + .oneshot(request) + .await + .expect("Failed to execute request."); + assert!(response.status().is_success()); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + let res: Event = from_slice(&body).unwrap(); + assert_eq!(res.status, EventStatus::Completed); + assert!(res.attestation.is_none()); + + // 2) request etl to run + let request = Request::builder() + .method(Method::POST) + .uri(String::from("/oracle/update")) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::empty()) + .unwrap(); + + let response = test_app + .app + .clone() + .oneshot(request) + .await + .expect("Failed to execute request."); + assert!(response.status().is_success()); + + // wait for etl to run in background + sleep(std::time::Duration::from_secs(1)).await; + + // 3) get event after etl + let request = Request::builder() + .method(Method::GET) + .uri(format!("/oracle/events/{}", event.id)) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::empty()) + .unwrap(); + + let response = test_app + .app + .oneshot(request) + .await + .expect("Failed to execute request."); + assert!(response.status().is_success()); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + let res: Event = from_slice(&body).unwrap(); + + // Verify the event was signed and status changed + assert_eq!(res.status, EventStatus::Signed); + assert!(res.attestation.is_some()); + + let mut entries = res.entries; + entries.sort_by_key(|entry| cmp::Reverse(entry.score)); + info!("entries: {:?}", entries); + //Make sure the expected entries won and calculated the correct score for each + // We expect a tie between entry_1 and entry_3 with 4 pts + let entry_1_res = entries.iter().find(|entry| entry.id == entry_1.id).unwrap(); + assert_eq!(entry_1_res.score.unwrap(), 4); + let entry_2_res = entries.iter().find(|entry| entry.id == entry_2.id).unwrap(); + assert_eq!(entry_2_res.score.unwrap(), 3); + let entry_3_res = entries.iter().find(|entry| entry.id == entry_3.id).unwrap(); + assert_eq!(entry_3_res.score.unwrap(), 4); + let entry_4_res = entries.iter().find(|entry| entry.id == entry_4.id).unwrap(); + assert_eq!(entry_4_res.score.unwrap(), 1); + + let mut winning_score_bytes = (4_i64).to_le_bytes().to_vec(); + winning_score_bytes.reverse(); + + let outcome_index = event + .event_annoucement + .outcome_messages + .iter() + .position(|outcome| *outcome == winning_score_bytes) + .unwrap(); + + let attested_outcome = res.event_annoucement.attestation_secret( + outcome_index, + test_app.oracle.raw_private_key(), + res.nonce, + ); + + // Verify the attestation matches what we calculate in the test + assert_eq!(attested_outcome, res.attestation); +} + +fn mock_forecast_data() -> Vec { + vec![ + Forecast { + station_id: String::from("PFNO"), + date: String::from("2024-08-12"), + start_time: String::from("2024-08-11T00:00:00+00:00"), + end_time: String::from("2024-08-12T00:00:00+00:00"), + temp_low: 9, + temp_high: 35, + wind_speed: 8, + }, + Forecast { + station_id: String::from("KSAW"), + date: String::from("2024-08-12"), + start_time: String::from("2024-08-11T00:00:00+00:00"), + end_time: String::from("2024-08-12T00:00:00+00:00"), + temp_low: 17, + temp_high: 25, + wind_speed: 3, + }, + Forecast { + station_id: String::from("PAPG"), + date: String::from("2024-08-12"), + start_time: String::from("2024-08-11T00:00:00+00:00"), + end_time: String::from("2024-08-12T00:00:00+00:00"), + temp_low: 14, + temp_high: 17, + wind_speed: 6, + }, + Forecast { + station_id: String::from("KWMC"), + date: String::from("2024-08-12"), + start_time: String::from("2024-08-11T00:00:00+00:00"), + end_time: String::from("2024-08-12T00:00:00+00:00"), + temp_low: 31, + temp_high: 33, + wind_speed: 11, + }, + ] +} + +fn mock_observation_data() -> Vec { + vec![ + Observation { + station_id: String::from("PFNO"), + start_time: String::from("2024-08-12T00:00:00+00:00"), + end_time: String::from("2024-08-13T00:00:00+00:00"), + temp_low: 9.4, + temp_high: 35 as f64, + wind_speed: 11, + }, + Observation { + station_id: String::from("KSAW"), + start_time: String::from("2024-08-12T00:00:00+00:00"), + end_time: String::from("2024-08-13T00:00:00+00:00"), + temp_low: 22 as f64, + temp_high: 25 as f64, + wind_speed: 10, + }, + Observation { + station_id: String::from("PAPG"), + start_time: String::from("2024-08-12T00:00:00+00:00"), + end_time: String::from("2024-08-13T00:00:00+00:00"), + temp_low: 15 as f64, + temp_high: 16 as f64, + wind_speed: 6, + }, + Observation { + station_id: String::from("KWMC"), + start_time: String::from("2024-08-12T00:00:00+00:00"), + end_time: String::from("2024-08-13T00:00:00+00:00"), + temp_low: 32.8, + temp_high: 34.4, + wind_speed: 11, + }, + ] +} diff --git a/oracle/tests/api/get_events.rs b/oracle/tests/api/get_events.rs new file mode 100644 index 0000000..5142979 --- /dev/null +++ b/oracle/tests/api/get_events.rs @@ -0,0 +1,114 @@ +use std::sync::Arc; + +use crate::helpers::{spawn_app, MockWeatherAccess}; +use axum::{ + body::{to_bytes, Body}, + http::Request, +}; +use hyper::{header, Method}; +use oracle::{CreateEvent, EventSummary}; +use serde_json::from_slice; +use time::OffsetDateTime; +use tower::ServiceExt; +use uuid::Uuid; + +#[tokio::test] +async fn can_get_all_events() { + let uri = String::from("/oracle/events"); + let test_app = spawn_app(Arc::new(MockWeatherAccess::new())).await; + + let new_event_1 = CreateEvent { + id: Uuid::now_v7(), + observation_date: OffsetDateTime::now_utc(), + signing_date: OffsetDateTime::now_utc(), + locations: vec![ + String::from("PFNO"), + String::from("KSAW"), + String::from("PAPG"), + String::from("KWMC"), + ], + total_allowed_entries: 100, + number_of_places_win: 3, + number_of_values_per_entry: 6, + }; + let new_event_2 = CreateEvent { + id: Uuid::now_v7(), + observation_date: OffsetDateTime::now_utc(), + signing_date: OffsetDateTime::now_utc(), + locations: vec![ + String::from("KITH"), + String::from("KMCD"), + String::from("PAPG"), + String::from("KJAN"), + ], + total_allowed_entries: 100, + number_of_places_win: 3, + number_of_values_per_entry: 6, + }; + let new_event_3 = CreateEvent { + id: Uuid::now_v7(), + observation_date: OffsetDateTime::now_utc(), + signing_date: OffsetDateTime::now_utc(), + locations: vec![ + String::from("KCQW"), + String::from("KCSM"), + String::from("KCRW"), + String::from("KDED"), + ], + total_allowed_entries: 100, + number_of_places_win: 3, + number_of_values_per_entry: 6, + }; + let expected = vec![ + new_event_1.clone(), + new_event_2.clone(), + new_event_3.clone(), + ]; + test_app.oracle.create_event(new_event_1).await.unwrap(); + test_app.oracle.create_event(new_event_2).await.unwrap(); + test_app.oracle.create_event(new_event_3).await.unwrap(); + + let request = Request::builder() + .method(Method::GET) + .uri(uri) + .header(header::CONTENT_TYPE, "application/json") + .body(Body::empty()) + .unwrap(); + + let response = test_app + .app + .oneshot(request) + .await + .expect("Failed to execute request."); + assert!(response.status().is_success()); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + let res: Vec = from_slice(&body).unwrap(); + for (index, event_summary) in res.iter().enumerate() { + let cur_expect = expected.get(index).unwrap(); + assert_eq!( + event_summary.signing_date, + cur_expect + .signing_date + .replace_nanosecond(cur_expect.signing_date.nanosecond() / 1_000 * 1_000) + .unwrap() + ); + assert_eq!( + event_summary.observation_date, + cur_expect + .observation_date + .replace_nanosecond(cur_expect.observation_date.nanosecond() / 1_000 * 1_000) + .unwrap() + ); + assert_eq!( + event_summary.total_allowed_entries, + cur_expect.total_allowed_entries as i64 + ); + assert_eq!(event_summary.total_entries, 0); + assert_eq!( + event_summary.number_of_places_win, + cur_expect.number_of_places_win as i64 + ); + assert!(event_summary.weather.is_empty()); + assert!(event_summary.attestation.is_none()); + } +} diff --git a/oracle/tests/api/helpers.rs b/oracle/tests/api/helpers.rs new file mode 100644 index 0000000..f9c4c68 --- /dev/null +++ b/oracle/tests/api/helpers.rs @@ -0,0 +1,84 @@ +use std::sync::{Arc, Once}; + +use axum::{async_trait, Router}; +use log::{info, LevelFilter}; +use mockall::mock; +use oracle::{ + app, create_folder, oracle::Oracle, setup_logger, AppState, EventData, FileData, WeatherData, +}; +use rand::Rng; + +pub struct TestApp { + pub app: Router, + pub oracle: Arc, +} +static INIT_LOGGER: Once = Once::new(); +fn init_logger() { + INIT_LOGGER.call_once(|| { + setup_logger().level(LevelFilter::Debug).apply().unwrap(); + }); +} + +pub fn random_test_number() -> i32 { + let mut rng = rand::thread_rng(); + rng.gen_range(10000..99999) +} + +pub async fn spawn_app(weather_db: Arc) -> TestApp { + init_logger(); + create_folder("./test_data"); + let random_test_number = random_test_number(); + info!("test number: {}", random_test_number); + let test_folder = format!("./test_data/{}", random_test_number); + create_folder(&test_folder.clone()); + let event_data = format!("{}/event_data", test_folder); + create_folder(&event_data.clone()); + + let event_db = Arc::new(EventData::new(&event_data).unwrap()); + let private_key_file_path = String::from("./oracle_private_key.pem"); + let oracle = Arc::new( + Oracle::new(event_db, weather_db.clone(), &private_key_file_path) + .await + .unwrap(), + ); + + let app_state = AppState { + ui_dir: String::from("./ui"), + remote_url: String::from("http://127.0.0.1:9100"), + weather_db, + file_access: Arc::new(MockFileAccess::new()), + oracle: oracle.clone(), + }; + let app = app(app_state); + + TestApp { app, oracle } +} + +mock! { + pub FileAccess {} + #[async_trait] + impl FileData for FileAccess { + async fn grab_file_names(&self, params: oracle::FileParams) -> Result, oracle::Error>; + fn current_folder(&self) -> String; + fn build_file_paths(&self, file_names: Vec) -> Vec; + fn build_file_path(&self, filename: &str, file_generated_at: time::OffsetDateTime) -> String; + } +} + +mock! { + pub WeatherAccess{} + #[async_trait] + impl WeatherData for WeatherAccess { + async fn forecasts_data( + &self, + req: &oracle::ForecastRequest, + station_ids: Vec, + ) -> Result, oracle::weather_data::Error>; + async fn observation_data( + &self, + req: &oracle::ObservationRequest, + station_ids: Vec, + ) -> Result, oracle::weather_data::Error>; + async fn stations(&self) -> Result, oracle::weather_data::Error>; + } +} diff --git a/oracle/tests/api/main.rs b/oracle/tests/api/main.rs new file mode 100644 index 0000000..2941db0 --- /dev/null +++ b/oracle/tests/api/main.rs @@ -0,0 +1,5 @@ +mod create_event; +mod create_event_entry; +mod etl_workflow; +mod get_events; +mod helpers;