diff --git a/.github/workflows/build-api.yml b/.github/workflows/build-api.yml index 15684355a..f8e7fd198 100644 --- a/.github/workflows/build-api.yml +++ b/.github/workflows/build-api.yml @@ -12,10 +12,10 @@ on: workflow_dispatch: push: branches: - - 'main' + - "main" env: - CARGO_TERM_COLOR: always + CARGO_TERM_COLOR: always jobs: build-api: @@ -33,15 +33,15 @@ jobs: - name: set build cache uses: actions/cache@v3 with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - digital-asset-rpc-infrastructure/target/ - key: ${{ matrix.os }}_digital-asset-rpc-infrastructure_${{ hashFiles('digital-asset-rpc-infrastructure/Cargo.lock') }} - restore-keys: | - ${{ matrix.os }}_digital-asset-rpc-infrastructure + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + digital-asset-rpc-infrastructure/target/ + key: ${{ matrix.os }}_digital-asset-rpc-infrastructure_${{ hashFiles('digital-asset-rpc-infrastructure/Cargo.lock') }} + restore-keys: | + ${{ matrix.os }}_digital-asset-rpc-infrastructure - name: build digital asset rpc infra run: cargo build --verbose --release @@ -54,7 +54,7 @@ jobs: mv target/release/migration target/release/migration22 mv target/release/das_api target/release/das_api22 - # This steps can be omited to save space, are mostly in place to validate binaries (manually) and path to them + # This steps can be omited to save space, are mostly in place to validate binaries (manually) and path to them # Omiting this will save on storage consumption on the account - name: Publish artifact if: matrix.os == 'ubuntu-22.04' diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4ced2cd37..60f1b69ab 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -8,11 +8,11 @@ on: pull_request: push: branches: - - 'main' + - "main" workflow_dispatch: env: - CARGO_TERM_COLOR: always + CARGO_TERM_COLOR: always jobs: test: @@ -25,20 +25,20 @@ jobs: - name: set build cache uses: actions/cache@v3 with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - digital-asset-rpc-infrastructure/target/ - key: cargo-${{ hashFiles('**/Cargo.lock') }}-0001 + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + digital-asset-rpc-infrastructure/target/ + key: cargo-${{ hashFiles('**/Cargo.lock') }}-0001 # Cargo.lock - name: Check lock file run: | cargo tree git checkout Cargo.lock - cargo tree --frozen + cargo tree # fmt - name: Check fmt diff --git a/Builder.Dockerfile b/Builder.Dockerfile index 6b2b4b1bb..6ee7cbaed 100644 --- a/Builder.Dockerfile +++ b/Builder.Dockerfile @@ -1,11 +1,12 @@ -FROM rust:1.76-bullseye AS builder +FROM rust:1.75-bullseye AS builder RUN apt-get update -y && \ - apt-get install -y build-essential make git - + apt-get install -y build-essential make git + RUN mkdir /rust RUN mkdir /rust/bins COPY Cargo.toml /rust COPY core /rust/core +COPY backfill /rust/backfill COPY das_api /rust/das_api COPY digital_asset_types /rust/digital_asset_types COPY integration_tests /rust/integration_tests @@ -19,7 +20,7 @@ COPY blockbuster rust/blockbuster WORKDIR /rust RUN --mount=type=cache,target=/rust/target,id=das-rust \ cargo build --release --bins && cp `find /rust/target/release -maxdepth 1 -type f | sed 's/^\.\///' | grep -v "\." ` /rust/bins - + FROM rust:1.75-slim-bullseye as final COPY --from=builder /rust/bins /das/ CMD echo "Built the DAS API bins!" diff --git a/Cargo.lock b/Cargo.lock index fc7779580..86beb67db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -652,7 +652,7 @@ checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel 1.9.0", "async-executor", - "async-io 1.13.0", + "async-io", "async-lock 2.8.0", "blocking", "futures-lite 1.13.0", @@ -673,32 +673,13 @@ dependencies = [ "futures-lite 1.13.0", "log", "parking", - "polling 2.8.0", + "polling", "rustix 0.37.27", "slab", "socket2 0.4.9", "waker-fn", ] -[[package]] -name = "async-io" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" -dependencies = [ - "async-lock 3.1.1", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite 2.0.1", - "parking", - "polling 3.4.0", - "rustix 0.38.18", - "slab", - "tracing", - "windows-sys 0.52.0", -] - [[package]] name = "async-lock" version = "2.8.0" @@ -737,7 +718,7 @@ dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", + "async-io", "async-lock 2.8.0", "crossbeam-utils", "futures-channel", @@ -774,7 +755,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -785,13 +766,13 @@ checksum = "b4eb2cdb97421e01129ccb49169d8279ed21e829929144f4a22a6e54ac549ca1" [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -835,6 +816,60 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "autotools" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef941527c41b0fc0dd48511a8154cd5fc7e29200a0ff8b7203c5d777dbc795cf" +dependencies = [ + "cc", +] + +[[package]] +name = "axum" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backon" version = "0.4.1" @@ -1134,10 +1169,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", - "proc-macro-crate 3.2.0", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", "syn_derive", ] @@ -1233,9 +1268,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "d32a994c2b3ca201d9b263612a374263f05e7adde37c4707f693dcd375076d1f" [[package]] name = "bv" @@ -1286,7 +1321,7 @@ checksum = "965ab7eb5f8f97d2a083c799f3a1b994fc397b2fe2da5d1da1626ce15a39f2b1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -1329,6 +1364,18 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cargo-lock" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e11c675378efb449ed3ce8de78d75d0d80542fc98487c26aba28eb3b82feac72" +dependencies = [ + "semver", + "serde", + "toml 0.7.8", + "url", +] + [[package]] name = "cc" version = "1.0.83" @@ -1451,7 +1498,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -1556,6 +1603,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.3" @@ -1598,9 +1651,9 @@ checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] @@ -1616,35 +1669,29 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", - "memoffset 0.9.0", - "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "cfg-if", "crossbeam-utils", ] @@ -1724,7 +1771,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -1735,7 +1782,40 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.66", + "syn 2.0.90", +] + +[[package]] +name = "das-bubblegum" +version = "0.7.2" +dependencies = [ + "anchor-client", + "anyhow", + "blockbuster", + "borsh 0.10.3", + "bs58 0.4.0", + "clap 4.4.8", + "das-core", + "digital_asset_types", + "futures", + "heck 0.5.0", + "log", + "mpl-bubblegum", + "num-traits", + "program_transformers", + "sea-orm", + "serde_json", + "sha3 0.10.8", + "solana-client", + "solana-program", + "solana-sdk", + "solana-transaction-status", + "spl-account-compression", + "spl-token", + "sqlx", + "thiserror", + "tokio", + "tracing", ] [[package]] @@ -1744,18 +1824,110 @@ version = "0.7.2" dependencies = [ "anyhow", "backon", + "borsh 0.10.3", + "bs58 0.4.0", "cadence", "cadence-macros", "clap 4.4.8", + "derive_more", + "digital_asset_types", "figment", + "futures", + "indicatif", + "log", "plerkle_messenger", + "reqwest", + "sea-orm", + "serde", + "serde_json", "solana-account-decoder", "solana-client", "solana-sdk", "solana-transaction-status", + "spl-account-compression", + "sqlx", + "thiserror", + "tokio", + "url", +] + +[[package]] +name = "das-grpc-ingest" +version = "0.7.2" +dependencies = [ + "anyhow", + "async-stream", + "atty", + "cargo-lock", + "chrono", + "clap 4.4.8", + "das-bubblegum", + "das-core", + "digital_asset_types", + "futures", + "git-version", + "hex", + "hyper", + "json5", + "lazy_static", + "lru", + "opentelemetry", + "opentelemetry-jaeger", + "opentelemetry_sdk", + "program_transformers", + "prometheus", + "redis 0.25.4", + "reqwest", + "rust-crypto", + "sea-orm", + "serde", + "serde_json", + "serde_yaml", + "solana-sdk", + "sqlx", + "thiserror", + "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", + "vergen", + "yellowstone-grpc-client", + "yellowstone-grpc-proto", + "yellowstone-grpc-tools", +] + +[[package]] +name = "das-metadata-json" +version = "0.7.2" +dependencies = [ + "anyhow", + "backon", + "bs58 0.4.0", + "cadence", + "cadence-macros", + "chrono", + "clap 4.4.8", + "das-core", + "derive_more", + "digital_asset_types", + "env_logger 0.10.0", + "figment", + "futures", + "indicatif", + "log", + "plerkle_messenger", + "rand 0.8.5", + "redis 0.25.4", + "reqwest", + "sea-orm", + "sea-query 0.28.5", + "serde", + "serde_json", "sqlx", "thiserror", "tokio", + "tokio-stream", + "url", ] [[package]] @@ -1764,30 +1936,34 @@ version = "0.7.2" dependencies = [ "anchor-client", "anyhow", - "backon", "borsh 0.10.3", + "bs58 0.4.0", "cadence", "cadence-macros", "clap 4.4.8", + "das-bubblegum", "das-core", "digital_asset_types", "env_logger 0.10.0", "figment", - "flatbuffers", "futures", "indicatif", "log", "mpl-bubblegum", - "plerkle_messenger", - "plerkle_serialization", + "mpl-token-metadata", + "program_transformers", "sea-orm", + "serde_json", "solana-account-decoder", "solana-client", + "solana-program", "solana-sdk", "solana-transaction-status", "spl-account-compression", + "sqlx", "thiserror", "tokio", + "tracing", ] [[package]] @@ -1894,6 +2070,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_more" +version = "0.99.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn 2.0.90", +] + [[package]] name = "deunicode" version = "1.4.3" @@ -1946,6 +2135,7 @@ dependencies = [ "jsonpath_lib", "log", "mime_guess", + "mpl-token-metadata", "num-derive 0.3.3", "num-traits", "schemars", @@ -1989,7 +2179,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -2012,7 +2202,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -2070,9 +2260,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" dependencies = [ "serde", ] @@ -2109,7 +2299,7 @@ checksum = "a1ab991c1362ac86c61ab6f556cff143daa22e5a15e4e189df818b2fd19fe65b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -2171,19 +2361,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "event-listener" -version = "5.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", - "portable-atomic 1.6.0", - "portable-atomic-util", -] - [[package]] name = "event-listener-strategy" version = "0.3.0" @@ -2262,6 +2439,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "flatbuffers" version = "23.5.26" @@ -2341,9 +2524,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -2356,9 +2539,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -2366,15 +2549,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -2394,9 +2577,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -2429,32 +2612,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -2527,6 +2710,32 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +[[package]] +name = "git-version" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" +dependencies = [ + "git-version-macro", +] + +[[package]] +name = "git-version-macro" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + [[package]] name = "globset" version = "0.4.13" @@ -2628,12 +2837,6 @@ dependencies = [ "allocator-api2", ] -[[package]] -name = "hashbrown" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" - [[package]] name = "hashlink" version = "0.8.4" @@ -2678,9 +2881,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -2817,6 +3020,18 @@ dependencies = [ "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -2898,20 +3113,20 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.14.1", "serde", ] [[package]] name = "indicatif" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" dependencies = [ "console", "instant", @@ -2949,6 +3164,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + [[package]] name = "integration_tests" version = "0.1.0" @@ -2989,7 +3210,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -3006,7 +3227,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.3.9", "rustix 0.38.18", "windows-sys 0.48.0", ] @@ -3046,13 +3267,24 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + [[package]] name = "jsonpath_lib" version = "0.3.0" @@ -3202,9 +3434,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.161" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libsecp256k1" @@ -3318,6 +3550,15 @@ dependencies = [ "value-bag", ] +[[package]] +name = "lru" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +dependencies = [ + "hashbrown 0.14.1", +] + [[package]] name = "matchers" version = "0.1.0" @@ -3327,6 +3568,12 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "md-5" version = "0.10.6" @@ -3459,13 +3706,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" dependencies = [ + "hermit-abi 0.3.9", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3504,9 +3752,9 @@ dependencies = [ [[package]] name = "mpl-bubblegum" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3cbca5deb859e66a1a21ada94f2eaab3eb5caa4584c0c8ade0efac29a5414b8" +checksum = "a9eff5ae5cafd1acdf7e7c93359da1eec91dcaede318470d9f68b78e8b7469f4" dependencies = [ "borsh 0.10.3", "kaigan", @@ -3559,6 +3807,12 @@ dependencies = [ "thiserror", ] +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + [[package]] name = "native-tls" version = "0.2.11" @@ -3589,6 +3843,8 @@ dependencies = [ "cadence-macros", "chrono", "clap 4.4.8", + "das-core", + "das-metadata-json", "digital_asset_types", "figment", "flatbuffers", @@ -3599,6 +3855,7 @@ dependencies = [ "plerkle_serialization", "program_transformers", "rand 0.8.5", + "regex", "reqwest", "rust-crypto", "sea-orm", @@ -3610,7 +3867,6 @@ dependencies = [ "solana-transaction-status", "spl-account-compression", "sqlx", - "stretto", "thiserror", "tokio", "tracing-subscriber", @@ -3715,7 +3971,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -3753,9 +4009,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] @@ -3766,7 +4022,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.3", + "hermit-abi 0.3.9", "libc", ] @@ -3797,7 +4053,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -3806,10 +4062,19 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 3.2.0", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", +] + +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", ] [[package]] @@ -3895,7 +4160,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -3916,6 +4181,87 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" +dependencies = [ + "futures-core", + "futures-sink", + "indexmap 2.3.0", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", +] + +[[package]] +name = "opentelemetry-jaeger" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e617c66fd588e40e0dbbd66932fdc87393095b125d4459b1a3a10feb1712f8a1" +dependencies = [ + "async-trait", + "futures-core", + "futures-util", + "opentelemetry", + "opentelemetry-semantic-conventions", + "opentelemetry_sdk", + "thrift", + "tokio", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5774f1ef1f982ef2a447f6ee04ec383981a3ab99c8e77a1a7b30182e65bbc84" +dependencies = [ + "opentelemetry", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f16aec8a98a457a52664d69e0091bac3a0abd18ead9b641cb00202ba4e0efe4" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "once_cell", + "opentelemetry", + "ordered-float 4.2.2", + "percent-encoding", + "rand 0.8.5", + "thiserror", + "tokio", + "tokio-stream", +] + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-float" +version = "4.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a91171844676f8c7990ce64959210cd2eaef32c2612c50f9fae9f8aaa6065a6" +dependencies = [ + "num-traits", +] + [[package]] name = "os_str_bytes" version = "6.6.1" @@ -4049,7 +4395,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -4076,24 +4422,79 @@ dependencies = [ "num", ] +[[package]] +name = "pest" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "pest_meta" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" +dependencies = [ + "once_cell", + "pest", + "sha2 0.10.8", +] + +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.3.0", +] + [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -4156,7 +4557,7 @@ dependencies = [ "figment", "futures", "log", - "redis", + "redis 0.22.3", "serde", "thiserror", ] @@ -4192,20 +4593,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "polling" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" -dependencies = [ - "cfg-if", - "concurrent-queue", - "pin-project-lite", - "rustix 0.38.18", - "tracing", - "windows-sys 0.52.0", -] - [[package]] name = "polyval" version = "0.5.3" @@ -4233,21 +4620,22 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" -[[package]] -name = "portable-atomic-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a7411625b38d51b41421c6333976adffd4674a925a978856734a2dc853449b" -dependencies = [ - "portable-atomic 1.6.0", -] - [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +dependencies = [ + "proc-macro2", + "syn 2.0.90", +] + [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -4269,11 +4657,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.2.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" dependencies = [ - "toml_edit 0.22.22", + "toml_edit 0.21.0", ] [[package]] @@ -4302,9 +4690,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.83" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -4317,7 +4705,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", "version_check", "yansi", ] @@ -4330,17 +4718,20 @@ dependencies = [ "bs58 0.4.0", "cadence", "cadence-macros", + "das-core", "digital_asset_types", "futures", "heck 0.5.0", "mpl-bubblegum", "num-traits", "sea-orm", + "serde", "serde_json", "solana-sdk", "solana-transaction-status", "spl-account-compression", "spl-token", + "spl-token-2022", "sqlx", "thiserror", "tokio", @@ -4348,18 +4739,77 @@ dependencies = [ ] [[package]] -name = "prometheus" -version = "0.13.3" +name = "project-root" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bccbff07d5ed689c4087d20d7307a52ab6141edeedf487c3876a55b86cf63df" + +[[package]] +name = "prometheus" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +dependencies = [ + "cfg-if", + "fnv", + "lazy_static", + "memchr", + "parking_lot 0.12.1", + "protobuf", + "thiserror", +] + +[[package]] +name = "prost" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +dependencies = [ + "bytes", + "heck 0.5.0", + "itertools 0.11.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.90", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +dependencies = [ + "anyhow", + "itertools 0.11.0", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "prost-types" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" dependencies = [ - "cfg-if", - "fnv", - "lazy_static", - "memchr", - "parking_lot 0.12.1", - "protobuf", - "thiserror", + "prost", ] [[package]] @@ -4368,6 +4818,15 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" +[[package]] +name = "protobuf-src" +version = "1.1.0+21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7ac8852baeb3cc6fb83b93646fb93c0ffe5d14bf138c945ceb4b9948ee0e3c1" +dependencies = [ + "autotools", +] + [[package]] name = "proxy-wasm" version = "0.2.1" @@ -4415,7 +4874,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -4461,7 +4920,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.7", + "socket2 0.5.5", "tracing", "windows-sys 0.48.0", ] @@ -4664,6 +5123,29 @@ dependencies = [ "url", ] +[[package]] +name = "redis" +version = "0.25.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0d7a6955c7511f60f3ba9e86c6d02b3c3f144f8c24b288d1f4e18074ab8bbec" +dependencies = [ + "async-trait", + "bytes", + "combine 4.6.6", + "futures-util", + "itoa", + "native-tls", + "percent-encoding", + "pin-project-lite", + "ryu", + "sha1_smol", + "socket2 0.5.5", + "tokio", + "tokio-native-tls", + "tokio-util", + "url", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -5107,7 +5589,7 @@ checksum = "1db149f81d46d2deba7cd3c50772474707729550221e69588478ebf9ada425ae" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -5336,6 +5818,9 @@ name = "semver" version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +dependencies = [ + "serde", +] [[package]] name = "serde" @@ -5363,7 +5848,7 @@ checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -5379,11 +5864,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.3.0", "itoa", "memchr", "ryu", @@ -5431,7 +5916,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.6.0", + "indexmap 2.3.0", "serde", "serde_derive", "serde_json", @@ -5448,7 +5933,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -5460,16 +5945,16 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] name = "serde_yaml" -version = "0.9.25" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.3.0", "itoa", "ryu", "serde", @@ -5498,7 +5983,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -5662,12 +6147,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -5688,9 +6173,9 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b109fd3a106e079005167e5b0e6f6d2c88bbedec32530837b584791a8b5abf36" +checksum = "b4185d569c062983fc2a618ae4ee6fe1a139b36bce7a25045647c49bf0020a53" dependencies = [ "Inflector", "base64 0.21.7", @@ -5713,9 +6198,9 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "074ef478856a45d5627270fbc6b331f91de9aae7128242d9e423931013fb8a2a" +checksum = "c817832e71886dbea877d1aa911c9ce2e984a39081bb56ee30d4c835567827a6" dependencies = [ "chrono", "clap 2.34.0", @@ -5730,16 +6215,16 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a9f32c42402c4b9484d5868ac74b7e0a746e3905d8bfd756e1203e50cbb87e" +checksum = "7fa9cc6e8e59adf70acbf5cac21342ae8b5e41cbf05519fe5f6287e84ab40f63" dependencies = [ "async-trait", "bincode", "dashmap", "futures", "futures-util", - "indexmap 2.6.0", + "indexmap 2.3.0", "indicatif", "log", "quinn", @@ -5763,9 +6248,9 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d75b803860c0098e021a26f0624129007c15badd5b0bc2fbd9f0e1a73060d3b" +checksum = "d02fb29934427f1487d2149fe8bcb405306729b2f22a2ad616bb8ffd024cee7b" dependencies = [ "bincode", "chrono", @@ -5777,15 +6262,15 @@ dependencies = [ [[package]] name = "solana-connection-cache" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9306ede13e8ceeab8a096bcf5fa7126731e44c201ca1721ea3c38d89bcd4111" +checksum = "d8e5a2e26448b3e04ce673794994ff27f3972ec8a806c224eccc02e09f751ca5" dependencies = [ "async-trait", "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.6.0", + "indexmap 2.3.0", "log", "rand 0.8.5", "rayon", @@ -5799,9 +6284,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ab2c30c15311b511c0d1151e4ab6bc9a3e080a37e7c6e7c2d96f5784cf9434" +checksum = "20a6ef2db80dceb124b7bf81cca3300804bf427d2711973fc3df450ed7dfb26d" dependencies = [ "block-buffer 0.10.4", "bs58 0.4.0", @@ -5824,21 +6309,21 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c142f779c3633ac83c84d04ff06c70e1f558c876f13358bed77ba629c7417932" +checksum = "70088de7d4067d19a7455609e2b393e6086bd847bb39c4d2bf234fc14827ef9e" dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] name = "solana-geyser-plugin-interface" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4023dfd15b33053dd7193f58a267345d776601c3c7e37ad667e2ba43c7b8fadd" +checksum = "63f1835fe954e305097c83b4ce8548e675647eef6a3ea9ec13dbfe2d6b869398" dependencies = [ "log", "solana-sdk", @@ -5848,9 +6333,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121d36ffb3c6b958763312cbc697fbccba46ee837d3a0aa4fc0e90fcb3b884f3" +checksum = "b129da15193f26db62d62ae6bb9f72361f361bcdc36054be3ab8bc04cc7a4f31" dependencies = [ "env_logger 0.9.3", "lazy_static", @@ -5859,9 +6344,9 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c01a7f9cdc9d9d37a3d5651b2fe7ec9d433c2a3470b9f35897e373b421f0737" +checksum = "6d195b73093a4964ba6b5943418054a5fcbba23eafdd0842fd973fcceac1a967" dependencies = [ "log", "solana-sdk", @@ -5869,9 +6354,9 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e36052aff6be1536bdf6f737c6e69aca9dbb6a2f3f582e14ecb0ddc0cd66ce" +checksum = "fe7b06860ffbf4cf4714182e1b7eb00eb3ff0bcc9cff615d05e01e488923883c" dependencies = [ "crossbeam-channel", "gethostname", @@ -5884,9 +6369,9 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a1f5c6be9c5b272866673741e1ebc64b2ea2118e5c6301babbce526fdfb15f4" +checksum = "9400b50b8439868a99b5fa2d961d74e37b7a6c1d5865759d0b1c906c2ad6b2a9" dependencies = [ "bincode", "clap 3.2.25", @@ -5896,7 +6381,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_derive", - "socket2 0.5.7", + "socket2 0.5.5", "solana-logger", "solana-sdk", "solana-version", @@ -5906,9 +6391,9 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28acaf22477566a0fbddd67249ea5d859b39bacdb624aff3fadd3c5745e2643c" +checksum = "b01a386e852df67031195094628851b8d239dd71fe17b721c3993277e68cb3ab" dependencies = [ "ahash 0.8.11", "bincode", @@ -5935,9 +6420,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c10f4588cefd716b24a1a40dd32c278e43a560ab8ce4de6b5805c9d113afdfa1" +checksum = "fb2b2c8babfae4cace1a25b6efa00418f3acd852cf55d7cecc0360d3c5050479" dependencies = [ "ark-bn254", "ark-ec", @@ -5990,9 +6475,9 @@ dependencies = [ [[package]] name = "solana-program-runtime" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf0c3eab2a80f514289af1f422c121defb030937643c43b117959d6f1932fb5" +checksum = "0444f9440f4459d377c41470b2eb48b527def81f3052b7a121f6aa8c7350cc52" dependencies = [ "base64 0.21.7", "bincode", @@ -6018,9 +6503,9 @@ dependencies = [ [[package]] name = "solana-pubsub-client" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b064e76909d33821b80fdd826e6757251934a52958220c92639f634bea90366d" +checksum = "0ee4a39e41e789b6f100c97d9f40c1d08381bf6e3d0e351065e542091cddb039" dependencies = [ "crossbeam-channel", "futures-util", @@ -6043,9 +6528,9 @@ dependencies = [ [[package]] name = "solana-quic-client" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a90e40ee593f6e9ddd722d296df56743514ae804975a76d47e7afed4e3da244" +checksum = "baad755c76ee0aab8890f0ef873e61b8b3012c523d33bfa5b062fe9be8cef370" dependencies = [ "async-mutex", "async-trait", @@ -6070,9 +6555,9 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66468f9c014992167de10cc68aad6ac8919a8c8ff428dc88c0d2b4da8c02b8b7" +checksum = "c1c2a0ccb0be7ca79e8ff0d7c786bce586433a5687ffbea522453d0b41c4bf4a" dependencies = [ "lazy_static", "num_cpus", @@ -6080,9 +6565,9 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c191019f4d4f84281a6d0dd9a43181146b33019627fc394e42e08ade8976b431" +checksum = "3d042a812537e3507e1c163c7573fc04c96e12d3eba512e3fe74c7393229fa39" dependencies = [ "console", "dialoguer", @@ -6099,9 +6584,9 @@ dependencies = [ [[package]] name = "solana-rpc-client" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ed4628e338077c195ddbf790693d410123d17dec0a319b5accb4aaee3fb15c" +checksum = "3c6f5560283bd0a6833d1bd816299785058a870fff51b0df399fdb3ce92c8484" dependencies = [ "async-trait", "base64 0.21.7", @@ -6125,9 +6610,9 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c913551faa4a1ae4bbfef6af19f3a5cf847285c05b4409e37c8993b3444229" +checksum = "2e4ca77f89caa9071acadb1eed19c28a6691fd63d0563ed927c96bf734cf1c9c" dependencies = [ "base64 0.21.7", "bs58 0.4.0", @@ -6147,9 +6632,9 @@ dependencies = [ [[package]] name = "solana-rpc-client-nonce-utils" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a47b6bb1834e6141a799db62bbdcf80d17a7d58d7bc1684c614e01a7293d7cf" +checksum = "42a6ea9ad81d63f18fb8b3a9b39643cc43eaf909199d67037e724562301d1df7" dependencies = [ "clap 2.34.0", "solana-clap-utils", @@ -6160,9 +6645,9 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "580ad66c2f7a4c3cb3244fe21440546bd500f5ecb955ad9826e92a78dded8009" +checksum = "b5e0f0def5c5af07f53d321cea7b104487b522cfff77c3cae3da361bfe956e9e" dependencies = [ "assert_matches", "base64 0.21.7", @@ -6215,15 +6700,15 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b75d0f193a27719257af19144fdaebec0415d1c9e9226ae4bd29b791be5e9bd" +checksum = "c55c196c8050834c391a34b58e3c9fd86b15452ef1feeeafa1dbeb9d2291dfec" dependencies = [ "bs58 0.4.0", "proc-macro2", "quote", "rustversion", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -6234,16 +6719,16 @@ checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" [[package]] name = "solana-streamer" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8476e41ad94fe492e8c06697ee35912cf3080aae0c9e9ac6430835256ccf056" +checksum = "749720d82c5f31f7ec326da1e0baac098201de70f0874719172a55309433b449" dependencies = [ "async-channel 1.9.0", "bytes", "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.6.0", + "indexmap 2.3.0", "itertools 0.10.5", "libc", "log", @@ -6267,9 +6752,9 @@ dependencies = [ [[package]] name = "solana-thin-client" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c02245d0d232430e79dc0d624aa42d50006097c3aec99ac82ac299eaa3a73f" +checksum = "84535de1253afb6ccc4ae6852eb013ca734c439a902ec5e4684b90ed649a37c2" dependencies = [ "bincode", "log", @@ -6282,14 +6767,14 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67251506ed03de15f1347b46636b45c47da6be75015b4a13f0620b21beb00566" +checksum = "3ff514462bb715aaea9bc5c0ee60f83ab3f91e04279337c6b07d054153b616dc" dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.6.0", + "indexmap 2.3.0", "indicatif", "log", "rayon", @@ -6306,9 +6791,9 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3d36db1b2ab2801afd5482aad9fb15ed7959f774c81a77299fdd0ddcf839d4" +checksum = "670e387049812d42bdc8fcc4ff75452ff3cb00657af979a90f55f6d37dba9dd9" dependencies = [ "Inflector", "base64 0.21.7", @@ -6331,9 +6816,9 @@ dependencies = [ [[package]] name = "solana-udp-client" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a754a3c2265eb02e0c35aeaca96643951f03cee6b376afe12e0cf8860ffccd1" +checksum = "11183dae826f942ebd0401712c8a52367a4a6312f1cd325f304cd9551226fc8b" dependencies = [ "async-trait", "solana-connection-cache", @@ -6346,9 +6831,9 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f44776bd685cc02e67ba264384acc12ef2931d01d1a9f851cb8cdbd3ce455b9e" +checksum = "8e8d518e61ce22c812df23d9c61ab9bcbef4df3e3d3dcaa74a999625f11bcf07" dependencies = [ "log", "rustc_version", @@ -6362,9 +6847,9 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25810970c91feb579bd3f67dca215fce971522e42bfd59696af89c5dfebd997c" +checksum = "5743503143fb2259c41a973a78e9aeeb8e21f1b03543c3bb85449926ea692719" dependencies = [ "bincode", "log", @@ -6384,9 +6869,9 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.18.26" +version = "1.18.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cbdf4249b6dfcbba7d84e2b53313698043f60f8e22ce48286e6fbe8a17c8d16" +checksum = "57ee07fa523b4cfcff68de774db7aa87d2da2c4357155a90bacd9a0a0af70a99" dependencies = [ "aes-gcm-siv", "base64 0.21.7", @@ -6511,7 +6996,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -6523,7 +7008,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.66", + "syn 2.0.90", "thiserror", ] @@ -6582,7 +7067,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -6806,39 +7291,12 @@ dependencies = [ "tokio-rustls 0.23.4", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "stretto" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a313e115c2cd9a88d99d60386bc88641c853d468b2c3bc454c294f385fc084" -dependencies = [ - "async-channel 2.1.0", - "async-io 2.3.2", - "atomic", - "crossbeam-channel", - "futures", - "getrandom 0.2.10", - "parking_lot 0.12.1", - "rand 0.8.5", - "seahash", - "thiserror", - "tracing", - "wg", - "xxhash-rust", -] - [[package]] name = "stringprep" version = "0.1.4" @@ -6881,9 +7339,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.66" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -6899,7 +7357,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -6986,22 +7444,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -7014,6 +7472,28 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "thrift" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e54bc85fc7faa8bc175c4bab5b92ba8d9a3ce893d0e9f42cc455c8ab16a9e09" +dependencies = [ + "byteorder", + "integer-encoding", + "log", + "ordered-float 2.10.1", + "threadpool", +] + [[package]] name = "time" version = "0.1.45" @@ -7033,6 +7513,8 @@ checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" dependencies = [ "deranged", "itoa", + "libc", + "num_threads", "serde", "time-core", "time-macros", @@ -7089,33 +7571,42 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2 0.5.5", "tokio-macros", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -7199,6 +7690,18 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.19.15", +] + [[package]] name = "toml" version = "0.8.8" @@ -7213,9 +7716,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -7226,9 +7729,11 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.3.0", + "serde", + "serde_spanned", "toml_datetime", - "winnow 0.5.16", + "winnow", ] [[package]] @@ -7237,22 +7742,69 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.3.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.5.16", + "winnow", ] [[package]] -name = "toml_edit" -version = "0.22.22" +name = "tonic" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" dependencies = [ - "indexmap 2.6.0", - "toml_datetime", - "winnow 0.6.20", + "async-stream", + "async-trait", + "axum", + "base64 0.21.7", + "bytes", + "flate2", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "rustls 0.21.12", + "rustls-native-certs", + "rustls-pemfile", + "tokio", + "tokio-rustls 0.24.1", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "tonic-health" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f80db390246dfb46553481f6024f0082ba00178ea495dbb99e70ba9a4fafb5e1" +dependencies = [ + "async-stream", + "prost", + "tokio", + "tokio-stream", + "tonic", ] [[package]] @@ -7261,6 +7813,15 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -7317,7 +7878,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -7341,6 +7902,24 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-opentelemetry" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c67ac25c5407e7b961fafc6f7e9aa5958fd297aada2d20fa2ae1737357e55596" +dependencies = [ + "js-sys", + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "smallvec", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", + "web-time", +] + [[package]] name = "tracing-serde" version = "0.1.3" @@ -7401,16 +7980,6 @@ dependencies = [ "txn_forwarder", ] -[[package]] -name = "triomphe" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f" -dependencies = [ - "serde", - "stable_deref_trait", -] - [[package]] name = "try-lock" version = "0.2.4" @@ -7469,6 +8038,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uncased" version = "0.9.9" @@ -7510,9 +8085,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" @@ -7553,9 +8128,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "untrusted" @@ -7590,6 +8165,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf-8" version = "0.7.6" @@ -7636,6 +8217,18 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +[[package]] +name = "vergen" +version = "8.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" +dependencies = [ + "anyhow", + "rustc_version", + "rustversion", + "time 0.3.29", +] + [[package]] name = "version_check" version = "0.9.4" @@ -7689,9 +8282,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", "once_cell", @@ -7700,16 +8293,16 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", "wasm-bindgen-shared", ] @@ -7727,9 +8320,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7737,22 +8330,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "web-sys" @@ -7764,6 +8357,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa30049b1c872b72c89866d458eae9f20380ab280ffd1b1e18df2d3e2d98cfe0" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki" version = "0.22.4" @@ -7798,19 +8401,6 @@ version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" -[[package]] -name = "wg" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dadf90865f15d5c2d87f126a56ce3715b3a233641acdd10f59200aa7f4c81fb9" -dependencies = [ - "event-listener 5.3.0", - "futures-core", - "parking_lot 0.12.1", - "pin-project-lite", - "triomphe", -] - [[package]] name = "whoami" version = "1.4.1" @@ -8009,15 +8599,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "winnow" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" -dependencies = [ - "memchr", -] - [[package]] name = "winreg" version = "0.50.0" @@ -8055,12 +8636,6 @@ dependencies = [ "time 0.3.29", ] -[[package]] -name = "xxhash-rust" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9828b178da53440fa9c766a3d2f73f7cf5d0ac1fe3980c1e5018d899fd19e07b" - [[package]] name = "yaml-rust" version = "0.4.5" @@ -8085,6 +8660,66 @@ dependencies = [ "time 0.3.29", ] +[[package]] +name = "yellowstone-grpc-client" +version = "1.15.3+solana.1.18.22" +source = "git+https://github.com/rpcpool/yellowstone-grpc.git?tag=v1.15.1+solana.1.18.22#0bdedb5841d2eea663b8f2f441d37fea83e65933" +dependencies = [ + "bytes", + "futures", + "thiserror", + "tonic", + "tonic-health", + "yellowstone-grpc-proto", +] + +[[package]] +name = "yellowstone-grpc-proto" +version = "1.14.2+solana.1.18.22" +source = "git+https://github.com/rpcpool/yellowstone-grpc.git?tag=v1.15.1+solana.1.18.22#0bdedb5841d2eea663b8f2f441d37fea83e65933" +dependencies = [ + "anyhow", + "bincode", + "prost", + "protobuf-src", + "solana-account-decoder", + "solana-sdk", + "solana-transaction-status", + "tonic", + "tonic-build", +] + +[[package]] +name = "yellowstone-grpc-tools" +version = "1.0.0-rc.11+solana.1.18.22" +source = "git+https://github.com/rpcpool/yellowstone-grpc.git?tag=v1.15.1+solana.1.18.22#0bdedb5841d2eea663b8f2f441d37fea83e65933" +dependencies = [ + "anyhow", + "async-trait", + "atty", + "cargo-lock", + "clap 4.4.8", + "futures", + "git-version", + "hyper", + "json5", + "lazy_static", + "project-root", + "prometheus", + "serde", + "serde_json", + "serde_yaml", + "tokio", + "tokio-stream", + "tonic", + "tonic-health", + "tracing", + "tracing-subscriber", + "vergen", + "yellowstone-grpc-client", + "yellowstone-grpc-proto", +] + [[package]] name = "zerocopy" version = "0.7.32" @@ -8102,7 +8737,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] @@ -8122,7 +8757,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.90", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 101452cee..c0c8708e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,14 +1,17 @@ [workspace] members = [ + "bubblegum", "blockbuster", "core", "das_api", "digital_asset_types", "integration_tests", "metaplex-rpc-proxy", + "metadata_json", "migration", "nft_ingester", "ops", + "grpc-ingest", "program_transformers", "tools/acc_forwarder", "tools/bgtask_creator", @@ -26,24 +29,31 @@ repository = "https://github.com/metaplex-foundation/digital-asset-rpc-infrastru version = "0.7.2" [workspace.dependencies] + anchor-client = "0.29.0" anchor-lang = "0.29.0" anyhow = "1.0.75" async-std = "1.0.0" +async-stream = "0.3.5" async-trait = "0.1.60" +atty = "0.2.14" backon = "0.4.1" -blockbuster = {path = "blockbuster"} +blockbuster = { path = "blockbuster" } borsh = "~0.10.3" borsh-derive = "~0.10.3" bs58 = "0.4.0" -bytemuck = {version = "1.14.0", features = ["derive"]} +bytemuck = { version = "1.14.0", features = ["derive"] } cadence = "0.29.0" cadence-macros = "0.29.0" +cargo-lock = "9.0.0" chrono = "0.4.19" clap = "4.2.2" -das-core = {path = "core"} -das_api = {path = "das_api"} -digital_asset_types = {path = "digital_asset_types"} +das-core = { path = "core" } +das-metadata-json = { path = "metadata_json" } +das-bubblegum = { path = "bubblegum" } +das_api = { path = "das_api" } +derive_more = { version = "0.99.17" } +digital_asset_types = { path = "digital_asset_types" } enum-iterator = "1.2.0" enum-iterator-derive = "1.1.0" env_logger = "0.10.0" @@ -52,6 +62,7 @@ figment = "0.10.8" flatbuffers = "23.1.21" function_name = "0.3.0" futures = "0.3.28" +git-version = "0.3.5" heck = "0.5.0" hex = "0.4.3" hyper = "0.14.23" @@ -59,31 +70,37 @@ indexmap = "1.9.3" indicatif = "0.17.5" insta = "1.34.0" itertools = "0.10.1" +json5 = "0.4.1" jsonpath_lib = "0.3.0" jsonrpsee = "0.16.2" jsonrpsee-core = "0.16.2" lazy_static = "1.4.0" log = "0.4.17" +lru = "0.12.3" metrics = "0.20.1" -migration = {path = "migration"} +migration = { path = "migration" } mime_guess = "2.0.4" mpl-bubblegum = "1.2.0" mpl-account-compression = "0.4.2" -mpl-core = {version = "0.8.0-beta.1", features = ["serde"]} +mpl-core = { version = "0.8.0-beta.1", features = ["serde"] } mpl-noop = "0.2.1" mpl-token-metadata = "4.1.1" -nft_ingester = {path = "nft_ingester"} +nft_ingester = { path = "nft_ingester" } num-derive = "0.3.3" num-traits = "0.2.15" once_cell = "1.19.0" open-rpc-derive = "0.0.4" open-rpc-schema = "0.0.4" +opentelemetry = "0.21.0" +opentelemetry-jaeger = "0.20.0" +opentelemetry_sdk = "0.21.1" plerkle_messenger = "1.6.0" plerkle_serialization = "1.8.0" -program_transformers = {path = "program_transformers"} +program_transformers = { path = "program_transformers" } prometheus = "0.13.3" proxy-wasm = "0.2.0" rand = "0.8.5" +redis = "0.25.3" regex = "1.6.0" reqwest = "0.11.13" rust-crypto = "0.2.36" @@ -94,6 +111,7 @@ sea-orm-migration = "0.10.6" sea-query = "0.28.1" serde = "1.0.137" serde_json = "1.0.81" +serde_yaml = "0.9.34" serial_test = "2.0.0" solana-account-decoder = "~1.18.15" solana-client = "~1.18.15" @@ -106,11 +124,12 @@ spl-account-compression = "0.4.2" spl-associated-token-account = ">= 1.1.3, < 3.0" spl-concurrent-merkle-tree = "0.4.1" spl-noop = "0.2.0" -spl-pod = {version = "0.1.0", features = ["serde-traits"]} +spl-pod = { version = "0.1.0", features = ["serde-traits"] } spl-token = ">= 3.5.0, < 5.0" -spl-token-2022 = {version = "1.0", features = ["no-entrypoint"]} +spl-token-2022 = { version = "1.0", features = ["no-entrypoint"] } spl-token-group-interface = "0.1.0" spl-token-metadata-interface = "0.2.0" +sha3 = "0.10.8" sqlx = "0.6.2" stretto = "0.8.4" thiserror = "1.0.31" @@ -119,11 +138,16 @@ tokio-stream = "0.1.14" tower = "0.4.13" tower-http = "0.3.5" tracing = "0.1.35" +tracing-opentelemetry = "0.22.0" tracing-subscriber = "0.3.16" -txn_forwarder = {path = "tools/txn_forwarder"} +txn_forwarder = { path = "tools/txn_forwarder" } url = "2.3.1" +vergen = "8.2.1" wasi = "0.7.0" wasm-bindgen = "0.2.83" +yellowstone-grpc-client = { git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.15.1+solana.1.18.22" } # tag is geyser plugin +yellowstone-grpc-proto = { git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.15.1+solana.1.18.22" } # tag is geyser plugin +yellowstone-grpc-tools = { git = "https://github.com/rpcpool/yellowstone-grpc.git", tag = "v1.15.1+solana.1.18.22", default-features = false } # tag is geyser plugin [workspace.lints.clippy] clone_on_ref_ptr = "deny" diff --git a/Migrator.Dockerfile b/Migrator.Dockerfile index 63c72bc3d..d275aa7d6 100644 --- a/Migrator.Dockerfile +++ b/Migrator.Dockerfile @@ -1,6 +1,6 @@ FROM das-api/builder AS files -FROM rust:1.76-bullseye +FROM rust:1.75-bullseye COPY init.sql /init.sql ENV INIT_FILE_PATH=/init.sql COPY --from=files /das/migration /bins/migration diff --git a/Proxy.Dockerfile b/Proxy.Dockerfile index 52e35de55..add959fab 100644 --- a/Proxy.Dockerfile +++ b/Proxy.Dockerfile @@ -1,9 +1,10 @@ -FROM rust:1.76-bullseye AS builder +FROM rust:1.75-bullseye AS builder RUN cargo install wasm-pack RUN mkdir /rust COPY ./Cargo.toml /rust COPY ./core /rust/core +COPY ./backfill /rust/backfill COPY ./das_api /rust/das_api COPY ./digital_asset_types /rust/digital_asset_types COPY ./integration_tests /rust/integration_tests diff --git a/README.md b/README.md index 87d87aa79..ce7af8e5e 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,17 @@ super-fast querying and searching, as well as serves the merkle proofs needed to The API specification is located here https://github.com/metaplex-foundation/api-specifications This spec is what providers of this api must implement against. +#### GRPC-INGEST [/grpc-ingest/README.md](/grpc-ingest/README.md) + +This repo also houses the GRPC-INGEST component. This is a reimplementation of nft-ingester +This components separates grpc and ingester into two separate components. This is to allow for more flexibility in the future also increases the performance of the system. + +The two components are: + +- grpc2redis - This component listens to a gRPC stream (triton's Dragon's Mouth gRPC) for account and transaction updates and pushes them to a pipeline that flushes data to redis at regular intervals in real time. + +- ingester - This component listens to the redis stream and processes the data using program transformers. It also downloads token metadata json and stores them in a postgres db using sea-orm. + ### Infrastructure and Deployment Examples Along with the above rust binaries, this repo also maintains examples and best practice settings for running the entire infrastructure. diff --git a/backfiller.yaml b/backfiller.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/blockbuster/src/lib.rs b/blockbuster/src/lib.rs index 983c72897..e8e6eeedf 100644 --- a/blockbuster/src/lib.rs +++ b/blockbuster/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(deprecated)] pub mod error; pub mod instruction; pub mod program_handler; diff --git a/blockbuster/src/programs/mod.rs b/blockbuster/src/programs/mod.rs index 8da2feed0..474bce179 100644 --- a/blockbuster/src/programs/mod.rs +++ b/blockbuster/src/programs/mod.rs @@ -2,12 +2,14 @@ use bubblegum::BubblegumInstruction; use mpl_core_program::MplCoreAccountState; use token_account::TokenProgramAccount; use token_extensions::TokenExtensionsProgramAccount; +use token_inscriptions::TokenInscriptionAccount; use token_metadata::TokenMetadataAccountState; pub mod bubblegum; pub mod mpl_core_program; pub mod token_account; pub mod token_extensions; +pub mod token_inscriptions; pub mod token_metadata; // Note: `ProgramParseResult` used to contain the following variants that have been deprecated and @@ -30,5 +32,6 @@ pub enum ProgramParseResult<'a> { TokenMetadata(&'a TokenMetadataAccountState), TokenProgramAccount(&'a TokenProgramAccount), TokenExtensionsProgramAccount(&'a TokenExtensionsProgramAccount), + TokenInscriptionAccount(&'a TokenInscriptionAccount), Unknown, } diff --git a/blockbuster/src/programs/token_extensions/extension.rs b/blockbuster/src/programs/token_extensions/extension.rs index c9a0b9b4a..6dba2b57c 100644 --- a/blockbuster/src/programs/token_extensions/extension.rs +++ b/blockbuster/src/programs/token_extensions/extension.rs @@ -13,7 +13,6 @@ use spl_token_2022::extension::{ default_account_state::DefaultAccountState, group_member_pointer::GroupMemberPointer, group_pointer::GroupPointer, - immutable_owner::ImmutableOwner, interest_bearing_mint::{BasisPoints, InterestBearingConfig}, memo_transfer::MemoTransfer, metadata_pointer::MetadataPointer, @@ -77,9 +76,6 @@ pub struct ShadowDefaultAccountState { pub state: PodAccountState, } -#[derive(Clone, Copy, Debug, Default, PartialEq, Zeroable, Serialize, Deserialize)] -pub struct ShadowImmutableOwner; - #[derive(Clone, Copy, Debug, Default, PartialEq, Zeroable, Serialize, Deserialize)] pub struct ShadowInterestBearingConfig { pub rate_authority: OptionalNonZeroPubkey, @@ -97,20 +93,16 @@ pub struct ShadowMemoTransfer { #[derive(Clone, Copy, Debug, Default, PartialEq, Zeroable, Serialize, Deserialize)] pub struct ShadowMetadataPointer { - pub authority: OptionalNonZeroPubkey, pub metadata_address: OptionalNonZeroPubkey, } #[derive(Clone, Copy, Debug, Default, PartialEq, Zeroable, Serialize, Deserialize)] pub struct ShadowGroupMemberPointer { - pub authority: OptionalNonZeroPubkey, pub member_address: OptionalNonZeroPubkey, } #[derive(Clone, Copy, Debug, Default, PartialEq, Zeroable, Serialize, Deserialize)] pub struct ShadowGroupPointer { - /// Authority that can set the group address - pub authority: OptionalNonZeroPubkey, /// Account address that holds the group pub group_address: OptionalNonZeroPubkey, } @@ -348,12 +340,6 @@ impl From for ShadowDefaultAccountState { } } -impl From for ShadowImmutableOwner { - fn from(_: ImmutableOwner) -> Self { - ShadowImmutableOwner - } -} - impl From for ShadowConfidentialTransferFeeAmount { fn from(original: ConfidentialTransferFeeAmount) -> Self { ShadowConfidentialTransferFeeAmount { @@ -381,7 +367,6 @@ impl From for ShadowMemoTransfer { impl From for ShadowMetadataPointer { fn from(original: MetadataPointer) -> Self { ShadowMetadataPointer { - authority: original.authority, metadata_address: original.metadata_address, } } @@ -390,7 +375,6 @@ impl From for ShadowMetadataPointer { impl From for ShadowGroupPointer { fn from(original: GroupPointer) -> Self { ShadowGroupPointer { - authority: original.authority, group_address: original.group_address, } } @@ -420,7 +404,6 @@ impl From for ShadowTokenGroupMember { impl From for ShadowGroupMemberPointer { fn from(original: GroupMemberPointer) -> Self { ShadowGroupMemberPointer { - authority: original.authority, member_address: original.member_address, } } diff --git a/blockbuster/src/programs/token_extensions/mod.rs b/blockbuster/src/programs/token_extensions/mod.rs index ec1633c9c..6272f96b5 100644 --- a/blockbuster/src/programs/token_extensions/mod.rs +++ b/blockbuster/src/programs/token_extensions/mod.rs @@ -4,6 +4,7 @@ use crate::{ program_handler::{ParseResult, ProgramParser}, programs::ProgramParseResult, }; + use serde::{Deserialize, Serialize}; use solana_sdk::{pubkey::Pubkey, pubkeys}; use spl_token_2022::{ @@ -14,10 +15,12 @@ use spl_token_2022::{ default_account_state::DefaultAccountState, group_member_pointer::GroupMemberPointer, group_pointer::GroupPointer, + immutable_owner::ImmutableOwner, interest_bearing_mint::InterestBearingConfig, memo_transfer::MemoTransfer, metadata_pointer::MetadataPointer, mint_close_authority::MintCloseAuthority, + non_transferable::{NonTransferable, NonTransferableAccount}, permanent_delegate::PermanentDelegate, transfer_fee::{TransferFeeAmount, TransferFeeConfig}, transfer_hook::TransferHook, @@ -41,7 +44,6 @@ use self::extension::{ pub struct MintAccountExtensions { pub default_account_state: Option, pub confidential_transfer_mint: Option, - pub confidential_transfer_account: Option, pub confidential_transfer_fee_config: Option, pub interest_bearing_config: Option, pub transfer_fee_config: Option, @@ -54,6 +56,29 @@ pub struct MintAccountExtensions { pub token_group: Option, pub group_member_pointer: Option, pub token_group_member: Option, + // TODO : add this when spl-token-2022 is updated + // pub scaled_ui_amount: Option, + pub non_transferable: Option, + pub immutable_owner: Option, +} + +impl MintAccountExtensions { + pub fn is_some(&self) -> bool { + self.default_account_state.is_some() + || self.confidential_transfer_mint.is_some() + || self.confidential_transfer_fee_config.is_some() + || self.interest_bearing_config.is_some() + || self.transfer_fee_config.is_some() + || self.mint_close_authority.is_some() + || self.permanent_delegate.is_some() + || self.metadata_pointer.is_some() + || self.metadata.is_some() + || self.transfer_hook.is_some() + || self.group_pointer.is_some() + || self.token_group.is_some() + || self.group_member_pointer.is_some() + || self.token_group_member.is_some() + } } #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] @@ -62,6 +87,17 @@ pub struct TokenAccountExtensions { pub cpi_guard: Option, pub memo_transfer: Option, pub transfer_fee_amount: Option, + pub immutable_owner: Option, + pub non_transferable_account: Option, +} + +impl TokenAccountExtensions { + pub fn is_some(&self) -> bool { + self.confidential_transfer.is_some() + || self.cpi_guard.is_some() + || self.memo_transfer.is_some() + || self.transfer_fee_amount.is_some() + } } #[derive(Debug, PartialEq)] pub struct TokenAccount { @@ -134,6 +170,16 @@ impl ProgramParser for Token2022AccountParser { let cpi_guard = account.get_extension::().ok().copied(); let memo_transfer = account.get_extension::().ok().copied(); let transfer_fee_amount = account.get_extension::().ok().copied(); + let immutable_owner = account + .get_extension::() + .ok() + .copied() + .map(|_| true); + let non_transferable_account = account + .get_extension::() + .ok() + .copied() + .map(|_| true); // Create a structured account with extensions let structured_account = TokenAccount { @@ -144,6 +190,8 @@ impl ProgramParser for Token2022AccountParser { cpi_guard: cpi_guard.map(ShadowCpiGuard::from), memo_transfer: memo_transfer.map(ShadowMemoTransfer::from), transfer_fee_amount: transfer_fee_amount.map(ShadowTransferFeeAmount::from), + immutable_owner, + non_transferable_account, }, }; @@ -153,10 +201,7 @@ impl ProgramParser for Token2022AccountParser { .get_extension::() .ok() .copied(); - let confidential_transfer_account = mint - .get_extension::() - .ok() - .copied(); + let confidential_transfer_fee_config = mint .get_extension::() .ok() @@ -174,14 +219,23 @@ impl ProgramParser for Token2022AccountParser { let group_member_pointer = mint.get_extension::().ok().copied(); let token_group_member = mint.get_extension::().ok().copied(); let transfer_hook = mint.get_extension::().ok().copied(); + let non_transferable = mint + .get_extension::() + .ok() + .copied() + .map(|_| true); + + let immutable_owner = mint + .get_extension::() + .ok() + .copied() + .map(|_| true); let structured_mint = MintAccount { account: mint.base, extensions: MintAccountExtensions { confidential_transfer_mint: confidential_transfer_mint .map(ShadowConfidentialTransferMint::from), - confidential_transfer_account: confidential_transfer_account - .map(ShadowConfidentialTransferAccount::from), confidential_transfer_fee_config: confidential_transfer_fee_config .map(ShadowConfidentialTransferFeeConfig::from), default_account_state: default_account_state @@ -198,6 +252,8 @@ impl ProgramParser for Token2022AccountParser { token_group: token_group.map(ShadowTokenGroup::from), group_member_pointer: group_member_pointer.map(ShadowGroupMemberPointer::from), token_group_member: token_group_member.map(ShadowTokenGroupMember::from), + non_transferable, + immutable_owner, }, }; result = TokenExtensionsProgramAccount::MintAccount(structured_mint); diff --git a/blockbuster/src/programs/token_inscriptions/mod.rs b/blockbuster/src/programs/token_inscriptions/mod.rs new file mode 100644 index 000000000..7e40f5e07 --- /dev/null +++ b/blockbuster/src/programs/token_inscriptions/mod.rs @@ -0,0 +1,141 @@ +use serde::{Deserialize, Serialize}; +use solana_sdk::{pubkey::Pubkey, pubkeys}; + +use crate::{ + error::BlockbusterError, + program_handler::{ParseResult, ProgramParser}, +}; + +use super::ProgramParseResult; + +pubkeys!( + inscription_program_id, + "inscokhJarcjaEs59QbQ7hYjrKz25LEPRfCbP8EmdUp" +); + +pub struct TokenInscriptionParser; + +#[derive(Debug, Serialize, Deserialize)] +pub struct InscriptionData { + pub authority: String, + pub root: String, + pub content: String, + pub encoding: String, + pub inscription_data: String, + pub order: u64, + pub size: u32, + pub validation_hash: Option, +} + +impl InscriptionData { + pub const BASE_SIZE: usize = 121; + pub const INSCRIPTION_ACC_DATA_DISC: [u8; 8] = [232, 120, 205, 47, 153, 239, 229, 224]; + + pub fn try_unpack_data(data: &[u8]) -> Result { + let acc_disc = &data[0..8]; + + if acc_disc != Self::INSCRIPTION_ACC_DATA_DISC { + return Err(BlockbusterError::InvalidAccountType); + } + + if data.len() < Self::BASE_SIZE { + return Err(BlockbusterError::CustomDeserializationError( + "Inscription Data is too short".to_string(), + )); + } + + let authority = Pubkey::try_from(&data[8..40]).unwrap(); + let mint = Pubkey::try_from(&data[40..72]).unwrap(); + let inscription_data = Pubkey::try_from(&data[72..104]).unwrap(); + let order = u64::from_le_bytes(data[104..112].try_into().unwrap()); + let size = u32::from_le_bytes(data[112..116].try_into().unwrap()); + let content_type_len = u32::from_le_bytes(data[116..120].try_into().unwrap()) as usize; + let content = String::from_utf8(data[120..120 + content_type_len].to_vec()).unwrap(); + let encoding_len = u32::from_le_bytes( + data[120 + content_type_len..124 + content_type_len] + .try_into() + .unwrap(), + ) as usize; + + let encoding = String::from_utf8( + data[124 + content_type_len..124 + content_type_len + encoding_len].to_vec(), + ) + .unwrap(); + + let validation_exists = u8::from_le_bytes( + data[124 + content_type_len + encoding_len..124 + content_type_len + encoding_len + 1] + .try_into() + .unwrap(), + ); + + let validation_hash = if validation_exists == 1 { + let validation_hash_len = u32::from_le_bytes( + data[124 + content_type_len + encoding_len + 1 + ..128 + content_type_len + encoding_len + 1] + .try_into() + .unwrap(), + ) as usize; + Some( + String::from_utf8( + data[128 + content_type_len + encoding_len + 1 + ..128 + content_type_len + encoding_len + 1 + validation_hash_len] + .to_vec(), + ) + .unwrap(), + ) + } else { + None + }; + Ok(InscriptionData { + authority: authority.to_string(), + root: mint.to_string(), + content, + encoding, + inscription_data: inscription_data.to_string(), + order, + size, + validation_hash, + }) + } +} + +pub struct TokenInscriptionAccount { + pub data: InscriptionData, +} + +impl ParseResult for TokenInscriptionAccount { + fn result(&self) -> &Self + where + Self: Sized, + { + self + } + fn result_type(&self) -> ProgramParseResult { + ProgramParseResult::TokenInscriptionAccount(self) + } +} + +impl ProgramParser for TokenInscriptionParser { + fn key(&self) -> Pubkey { + inscription_program_id() + } + fn key_match(&self, key: &Pubkey) -> bool { + key == &inscription_program_id() + } + + fn handles_account_updates(&self) -> bool { + true + } + + fn handles_instructions(&self) -> bool { + false + } + + fn handle_account( + &self, + account_data: &[u8], + ) -> Result, BlockbusterError> { + let data = InscriptionData::try_unpack_data(account_data)?; + Ok(Box::new(TokenInscriptionAccount { data })) + } +} diff --git a/bubblegum/Cargo.toml b/bubblegum/Cargo.toml new file mode 100644 index 000000000..0ecd4704a --- /dev/null +++ b/bubblegum/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "das-bubblegum" +version = { workspace = true } +edition = { workspace = true } +repository = { workspace = true } +publish = { workspace = true } + +[dependencies] +anyhow = { workspace = true } +blockbuster = { workspace = true } +bs58 = { workspace = true } +das-core = { workspace = true } +solana-client = { workspace = true } +borsh = { workspace = true } +digital_asset_types = { workspace = true, features = [ + "json_types", + "sql_types", +] } +anchor-client = { workspace = true } +futures = { workspace = true } +clap = { workspace = true } +log = { workspace = true } +solana-program = { workspace = true } +program_transformers = { workspace = true } +heck = { workspace = true } +mpl-bubblegum = { workspace = true } +num-traits = { workspace = true } +sea-orm = { workspace = true } +serde_json = { workspace = true } +solana-sdk = { workspace = true } +sha3 = { workspace = true } +solana-transaction-status = { workspace = true } +spl-account-compression = { workspace = true, features = ["no-entrypoint"] } +spl-token = { workspace = true, features = ["no-entrypoint"] } +sqlx = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["time"] } +tracing = { workspace = true } + +[lints] +workspace = true diff --git a/bubblegum/README.md b/bubblegum/README.md new file mode 100644 index 000000000..e98983303 --- /dev/null +++ b/bubblegum/README.md @@ -0,0 +1,24 @@ +## DAS Backfill + +The DAS Backfill library facilitates the initial setup and data backfilling for DAS, focusing on the bubblegum program. This program's indexing heavily relies on transaction data. While the library supports parallel backfilling across different trees, it ensures that transactions within each tree are processed sequentially. This approach guarantees accurate representation of every modification in the merkle tree within DAS. + +## Usage + +```rust +use das_backfill::{ + BubblegumBackfillArgs, + BubblegumBackfillContext, + start_bubblegum_backfill +}; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let database_pool = sqlx::PgPool::connect("your_database_url").await?; + let solana_rpc = Rpc::new("your_solana_rpc_url"); + + let context = BubblegumBackfillContext::new(database_pool, solana_rpc); + let args = BubblegumBackfillArgs::parse(); // Parses args from CLI + + start_bubblegum_backfill(context, args).await +} +``` diff --git a/bubblegum/src/backfill/gap.rs b/bubblegum/src/backfill/gap.rs new file mode 100644 index 000000000..e167e888d --- /dev/null +++ b/bubblegum/src/backfill/gap.rs @@ -0,0 +1,139 @@ +use crate::{error::ErrorKind, Rpc}; +use anyhow::Result; +use clap::Args; +use sea_orm::{DatabaseConnection, DbBackend, FromQueryResult, Statement, Value}; +use solana_client::rpc_response::RpcConfirmedTransactionStatusWithSignature; +use solana_sdk::{pubkey::Pubkey, signature::Signature}; +use std::str::FromStr; +use tokio::sync::mpsc::Sender; + +const GET_SIGNATURES_FOR_ADDRESS_LIMIT: usize = 1000; + +#[derive(Debug, Clone, Args)] +pub struct ConfigBackfiller { + /// Solana RPC URL + #[arg(long, env)] + pub solana_rpc_url: String, +} + +const TREE_GAP_SQL: &str = r#" +WITH sequenced_data AS ( + SELECT + tree, + seq, + LEAD(seq) OVER (ORDER BY seq ASC) AS next_seq, + tx AS current_tx, + LEAD(tx) OVER (ORDER BY seq ASC) AS next_tx + FROM + cl_audits_v2 + WHERE + tree = $1 +), +gaps AS ( + SELECT + tree, + seq AS gap_start_seq, + next_seq AS gap_end_seq, + current_tx AS lower_bound_tx, + next_tx AS upper_bound_tx + FROM + sequenced_data + WHERE + next_seq IS NOT NULL AND + next_seq - seq > 1 +) +SELECT + tree, + gap_start_seq, + gap_end_seq, + lower_bound_tx, + upper_bound_tx +FROM + gaps +ORDER BY + gap_start_seq; +"#; + +#[derive(Debug, FromQueryResult, PartialEq, Clone)] +pub struct TreeGapModel { + pub tree: Vec, + pub gap_start_seq: i64, + pub gap_end_seq: i64, + pub lower_bound_tx: Vec, + pub upper_bound_tx: Vec, +} + +impl TreeGapModel { + pub async fn find(conn: &DatabaseConnection, tree: Pubkey) -> Result, ErrorKind> { + let statement = Statement::from_sql_and_values( + DbBackend::Postgres, + TREE_GAP_SQL, + vec![Value::Bytes(Some(Box::new(tree.as_ref().to_vec())))], + ); + + TreeGapModel::find_by_statement(statement) + .all(conn) + .await + .map_err(Into::into) + } +} + +impl TryFrom for TreeGapFill { + type Error = ErrorKind; + + fn try_from(model: TreeGapModel) -> Result { + let tree = Pubkey::try_from(model.tree).map_err(|_| ErrorKind::TryFromPubkey)?; + let upper = + Signature::try_from(model.upper_bound_tx).map_err(|_| ErrorKind::TryFromSignature)?; + let lower = + Signature::try_from(model.lower_bound_tx).map_err(|_| ErrorKind::TryFromSignature)?; + + Ok(Self::new(tree, Some(upper), Some(lower))) + } +} + +pub struct TreeGapFill { + tree: Pubkey, + before: Option, + until: Option, +} + +impl TreeGapFill { + pub const fn new(tree: Pubkey, before: Option, until: Option) -> Self { + Self { + tree, + before, + until, + } + } + + pub async fn crawl(&self, client: Rpc, sender: Sender) -> Result<()> { + let mut before = self.before; + + loop { + let sigs = client + .get_signatures_for_address(&self.tree, before, self.until) + .await?; + let sig_count = sigs.len(); + + let successful_transactions = sigs + .into_iter() + .filter(|transaction| transaction.err.is_none()) + .collect::>(); + + for sig in successful_transactions.iter() { + let sig = Signature::from_str(&sig.signature)?; + + sender.send(sig).await?; + + before = Some(sig); + } + + if sig_count < GET_SIGNATURES_FOR_ADDRESS_LIMIT { + break; + } + } + + Ok(()) + } +} diff --git a/bubblegum/src/backfill/mod.rs b/bubblegum/src/backfill/mod.rs new file mode 100644 index 000000000..5a4a874d6 --- /dev/null +++ b/bubblegum/src/backfill/mod.rs @@ -0,0 +1,2 @@ +pub mod gap; +pub mod worker; diff --git a/bubblegum/src/backfill/worker/gap.rs b/bubblegum/src/backfill/worker/gap.rs new file mode 100644 index 000000000..68523fdbe --- /dev/null +++ b/bubblegum/src/backfill/worker/gap.rs @@ -0,0 +1,64 @@ +use anyhow::Result; +use clap::Parser; +use das_core::Rpc; +use futures::{stream::FuturesUnordered, StreamExt}; +use log::error; +use solana_sdk::signature::Signature; +use tokio::{ + sync::mpsc::{channel, Sender}, + task::JoinHandle, +}; + +use crate::{backfill::gap::TreeGapFill, BubblegumContext}; + +#[derive(Parser, Debug, Clone)] +pub struct GapWorkerArgs { + /// The size of the signature channel. + #[arg(long, env, default_value = "1000")] + pub gap_channel_size: usize, + + /// The number of gap workers. + #[arg(long, env, default_value = "25")] + pub gap_worker_count: usize, +} + +impl GapWorkerArgs { + pub fn start( + &self, + context: BubblegumContext, + forward: Sender, + ) -> Result<(JoinHandle<()>, Sender)> { + let (gap_sender, mut gap_receiver) = channel::(self.gap_channel_size); + let gap_worker_count = self.gap_worker_count; + + let handler = tokio::spawn(async move { + let mut handlers = FuturesUnordered::new(); + let sender = forward.clone(); + + while let Some(gap) = gap_receiver.recv().await { + if handlers.len() >= gap_worker_count { + handlers.next().await; + } + + let client = context.solana_rpc.clone(); + let sender = sender.clone(); + + let handle = spawn_crawl_worker(client, sender, gap); + + handlers.push(handle); + } + + futures::future::join_all(handlers).await; + }); + + Ok((handler, gap_sender)) + } +} + +fn spawn_crawl_worker(client: Rpc, sender: Sender, gap: TreeGapFill) -> JoinHandle<()> { + tokio::spawn(async move { + if let Err(e) = gap.crawl(client, sender).await { + error!("tree transaction: {:?}", e); + } + }) +} diff --git a/bubblegum/src/backfill/worker/mod.rs b/bubblegum/src/backfill/worker/mod.rs new file mode 100644 index 000000000..0bc7f9e36 --- /dev/null +++ b/bubblegum/src/backfill/worker/mod.rs @@ -0,0 +1,9 @@ +mod gap; +mod program_transformer; +mod transaction; +mod tree; + +pub use gap::GapWorkerArgs; +pub use program_transformer::ProgramTransformerWorkerArgs; +pub use transaction::SignatureWorkerArgs; +pub use tree::TreeWorkerArgs; diff --git a/bubblegum/src/backfill/worker/program_transformer.rs b/bubblegum/src/backfill/worker/program_transformer.rs new file mode 100644 index 000000000..75f75c0a7 --- /dev/null +++ b/bubblegum/src/backfill/worker/program_transformer.rs @@ -0,0 +1,70 @@ +use anyhow::Result; +use clap::Parser; +use das_core::{create_download_metadata_notifier, DownloadMetadataInfo}; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use log::error; +use program_transformers::{ProgramTransformer, TransactionInfo}; +use std::sync::Arc; +use tokio::sync::mpsc::{channel, Sender, UnboundedSender}; +use tokio::task::JoinHandle; + +use crate::BubblegumContext; + +#[derive(Parser, Debug, Clone)] +pub struct ProgramTransformerWorkerArgs { + #[arg(long, env, default_value = "100000")] + pub program_transformer_channel_size: usize, + #[arg(long, env, default_value = "50")] + pub program_transformer_worker_count: usize, +} + +impl ProgramTransformerWorkerArgs { + pub fn start( + &self, + context: BubblegumContext, + forwarder: UnboundedSender, + ) -> Result<(JoinHandle<()>, Sender)> { + let (sender, mut receiver) = + channel::(self.program_transformer_channel_size); + + let worker_forwarder = forwarder.clone(); + let worker_pool = context.database_pool.clone(); + let worker_count = self.program_transformer_worker_count; + let handle = tokio::spawn(async move { + let download_metadata_notifier = + create_download_metadata_notifier(worker_forwarder.clone()).await; + let program_transformer = Arc::new(ProgramTransformer::new( + worker_pool.clone(), + download_metadata_notifier, + )); + + let mut handlers = FuturesUnordered::new(); + + while let Some(transaction) = receiver.recv().await { + if handlers.len() >= worker_count { + handlers.next().await; + } + + let program_transformer_clone = Arc::clone(&program_transformer); + let handle = tokio::spawn(async move { + if let Err(err) = program_transformer_clone + .handle_transaction(&transaction) + .await + { + error!( + "Failed to handle bubblegum instruction for txn {:?}: {:?}", + transaction.signature, err + ); + } + }); + + handlers.push(handle); + } + + futures::future::join_all(handlers).await; + }); + + Ok((handle, sender)) + } +} diff --git a/bubblegum/src/backfill/worker/transaction.rs b/bubblegum/src/backfill/worker/transaction.rs new file mode 100644 index 000000000..910b79a92 --- /dev/null +++ b/bubblegum/src/backfill/worker/transaction.rs @@ -0,0 +1,205 @@ +use crate::{error::ErrorKind, BubblegumContext}; +use anyhow::Result; +use clap::Parser; +use das_core::Rpc; +use futures::{stream::FuturesUnordered, StreamExt}; +use log::error; +use program_transformers::TransactionInfo; +use solana_program::pubkey::Pubkey; +use solana_sdk::instruction::CompiledInstruction; +use solana_sdk::signature::Signature; +use solana_sdk::transaction::VersionedTransaction; +use solana_transaction_status::{ + option_serializer::OptionSerializer, EncodedConfirmedTransactionWithStatusMeta, + InnerInstruction, InnerInstructions, UiInstruction, +}; +use tokio::{ + sync::mpsc::{channel, Sender}, + task::JoinHandle, +}; + +pub struct PubkeyString(pub String); + +impl TryFrom for Pubkey { + type Error = ErrorKind; + + fn try_from(value: PubkeyString) -> Result { + let decoded_bytes = bs58::decode(value.0) + .into_vec() + .map_err(|e| ErrorKind::Generic(e.to_string()))?; + + Pubkey::try_from(decoded_bytes) + .map_err(|_| ErrorKind::Generic("unable to convert pubkey".to_string())) + } +} + +#[derive(Debug)] +pub struct FetchedEncodedTransactionWithStatusMeta(pub EncodedConfirmedTransactionWithStatusMeta); + +impl TryFrom for TransactionInfo { + type Error = ErrorKind; + + fn try_from( + fetched_transaction: FetchedEncodedTransactionWithStatusMeta, + ) -> Result { + let mut account_keys = Vec::new(); + let encoded_transaction_with_status_meta = fetched_transaction.0; + + let ui_transaction: VersionedTransaction = encoded_transaction_with_status_meta + .transaction + .transaction + .decode() + .ok_or(ErrorKind::Generic( + "unable to decode transaction".to_string(), + ))?; + + let signature = ui_transaction.signatures[0]; + + let msg = ui_transaction.message; + + let meta = encoded_transaction_with_status_meta + .transaction + .meta + .ok_or(ErrorKind::Generic( + "transaction metadata is missing".to_string(), + ))?; + + for address in msg.static_account_keys().iter().copied() { + account_keys.push(address); + } + + let ui_loaded_addresses = match meta.loaded_addresses { + OptionSerializer::Some(addresses) => addresses, + OptionSerializer::None => { + return Err(ErrorKind::Generic( + "loaded addresses data is missing".to_string(), + )) + } + OptionSerializer::Skip => { + return Err(ErrorKind::Generic( + "loaded addresses are skipped".to_string(), + )); + } + }; + + let writtable_loaded_addresses = ui_loaded_addresses.writable; + let readable_loaded_addresses = ui_loaded_addresses.readonly; + + if msg.address_table_lookups().is_some() { + for address in writtable_loaded_addresses { + account_keys.push(PubkeyString(address).try_into()?); + } + + for address in readable_loaded_addresses { + account_keys.push(PubkeyString(address).try_into()?); + } + } + + let mut meta_inner_instructions = Vec::new(); + + if let OptionSerializer::Some(inner_instructions) = meta.inner_instructions { + for ix in inner_instructions { + let mut instructions = Vec::new(); + + for inner in ix.instructions { + if let UiInstruction::Compiled(compiled) = inner { + instructions.push(InnerInstruction { + stack_height: compiled.stack_height, + instruction: CompiledInstruction { + program_id_index: compiled.program_id_index, + accounts: compiled.accounts, + data: bs58::decode(compiled.data).into_vec().map_err(|e| { + ErrorKind::Generic(format!("Error decoding data: {}", e)) + })?, + }, + }); + } + } + + meta_inner_instructions.push(InnerInstructions { + index: ix.index, + instructions, + }); + } + } + + Ok(Self { + slot: encoded_transaction_with_status_meta.slot, + account_keys, + signature, + message_instructions: msg.instructions().to_vec(), + meta_inner_instructions, + }) + } +} + +#[derive(Parser, Clone, Debug)] +pub struct SignatureWorkerArgs { + /// The size of the signature channel. + #[arg(long, env, default_value = "100000")] + pub signature_channel_size: usize, + /// The number of transaction workers. + #[arg(long, env, default_value = "50")] + pub signature_worker_count: usize, +} + +type TransactionSender = Sender; + +impl SignatureWorkerArgs { + pub fn start( + &self, + context: BubblegumContext, + forwarder: TransactionSender, + ) -> Result<(JoinHandle<()>, Sender)> { + let (sig_sender, mut sig_receiver) = channel::(self.signature_channel_size); + let worker_count = self.signature_worker_count; + + let handle = tokio::spawn(async move { + let mut handlers = FuturesUnordered::new(); + + while let Some(signature) = sig_receiver.recv().await { + if handlers.len() >= worker_count { + handlers.next().await; + } + + let solana_rpc = context.solana_rpc.clone(); + let transaction_sender = forwarder.clone(); + + let handle = spawn_transaction_worker(solana_rpc, transaction_sender, signature); + + handlers.push(handle); + } + + futures::future::join_all(handlers).await; + }); + + Ok((handle, sig_sender)) + } +} + +async fn queue_transaction<'a>( + client: Rpc, + sender: Sender, + signature: Signature, +) -> Result<(), ErrorKind> { + let transaction = client.get_transaction(&signature).await?; + + sender + .send(FetchedEncodedTransactionWithStatusMeta(transaction).try_into()?) + .await + .map_err(|e| ErrorKind::Generic(e.to_string()))?; + + Ok(()) +} + +fn spawn_transaction_worker( + client: Rpc, + sender: Sender, + signature: Signature, +) -> JoinHandle<()> { + tokio::spawn(async move { + if let Err(e) = queue_transaction(client, sender, signature).await { + error!("queue transaction: {:?}", e); + } + }) +} diff --git a/bubblegum/src/backfill/worker/tree.rs b/bubblegum/src/backfill/worker/tree.rs new file mode 100644 index 000000000..e02c21305 --- /dev/null +++ b/bubblegum/src/backfill/worker/tree.rs @@ -0,0 +1,124 @@ +use crate::{ + backfill::gap::{TreeGapFill, TreeGapModel}, + tree::TreeResponse, + BubblegumContext, +}; +use anyhow::Result; +use clap::Parser; +use das_core::MetadataJsonDownloadWorkerArgs; +use digital_asset_types::dao::cl_audits_v2; +use log::error; +use sea_orm::{ColumnTrait, EntityTrait, QueryFilter, QueryOrder, SqlxPostgresConnector}; +use solana_sdk::signature::Signature; +use tokio::task::JoinHandle; + +use super::{GapWorkerArgs, ProgramTransformerWorkerArgs, SignatureWorkerArgs}; + +#[derive(Debug, Clone, Parser)] +pub struct TreeWorkerArgs { + #[clap(flatten)] + pub metadata_json_download_worker: MetadataJsonDownloadWorkerArgs, + + #[clap(flatten)] + pub signature_worker: SignatureWorkerArgs, + + #[clap(flatten)] + pub gap_worker: GapWorkerArgs, + + #[clap(flatten)] + pub program_transformer_worker: ProgramTransformerWorkerArgs, + + #[clap(long, env, default_value = "false")] + pub force: bool, +} +impl TreeWorkerArgs { + pub fn start(&self, context: BubblegumContext, tree: TreeResponse) -> JoinHandle> { + let db_pool = context.database_pool.clone(); + let metadata_json_download_db_pool = context.database_pool.clone(); + + let program_transformer_context = context.clone(); + let signature_context = context.clone(); + + let metadata_json_download_worker_args = self.metadata_json_download_worker.clone(); + let program_transformer_worker_args = self.program_transformer_worker.clone(); + let signature_worker_args = self.signature_worker.clone(); + let gap_worker_args = self.gap_worker.clone(); + let force = self.force; + + tokio::spawn(async move { + let (metadata_json_download_worker, metadata_json_download_sender) = + metadata_json_download_worker_args.start(metadata_json_download_db_pool)?; + + let (program_transformer_worker, transaction_info_sender) = + program_transformer_worker_args + .start(program_transformer_context, metadata_json_download_sender)?; + + let (signature_worker, signature_sender) = + signature_worker_args.start(signature_context, transaction_info_sender)?; + + let (gap_worker, tree_gap_sender) = gap_worker_args.start(context, signature_sender)?; + + { + let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(db_pool); + + let mut gaps = TreeGapModel::find(&conn, tree.pubkey) + .await? + .into_iter() + .map(TryInto::try_into) + .collect::, _>>()?; + + let upper_known_seq = if force { + None + } else { + cl_audits_v2::Entity::find() + .filter(cl_audits_v2::Column::Tree.eq(tree.pubkey.as_ref().to_vec())) + .order_by_desc(cl_audits_v2::Column::Seq) + .one(&conn) + .await? + }; + + let lower_known_seq = if force { + None + } else { + cl_audits_v2::Entity::find() + .filter(cl_audits_v2::Column::Tree.eq(tree.pubkey.as_ref().to_vec())) + .order_by_asc(cl_audits_v2::Column::Seq) + .one(&conn) + .await? + }; + + if let Some(upper_seq) = upper_known_seq { + let signature = Signature::try_from(upper_seq.tx.as_ref())?; + gaps.push(TreeGapFill::new(tree.pubkey, None, Some(signature))); + // Reprocess the entire tree if force is true or if the tree has a seq of 0 to keep the current behavior + } else if force || tree.seq > 0 { + gaps.push(TreeGapFill::new(tree.pubkey, None, None)); + } + + if let Some(lower_seq) = lower_known_seq.filter(|seq| seq.seq > 1) { + let signature = Signature::try_from(lower_seq.tx.as_ref())?; + + gaps.push(TreeGapFill::new(tree.pubkey, Some(signature), None)); + } + + for gap in gaps { + if let Err(e) = tree_gap_sender.send(gap).await { + error!("send gap: {:?}", e); + } + } + } + + drop(tree_gap_sender); + + futures::future::try_join4( + gap_worker, + signature_worker, + program_transformer_worker, + metadata_json_download_worker, + ) + .await?; + + Ok(()) + }) + } +} diff --git a/bubblegum/src/error.rs b/bubblegum/src/error.rs new file mode 100644 index 000000000..420a15a52 --- /dev/null +++ b/bubblegum/src/error.rs @@ -0,0 +1,19 @@ +#[derive(Debug, thiserror::Error)] +pub enum ErrorKind { + #[error("anchor")] + Anchor(#[from] anchor_client::anchor_lang::error::Error), + #[error("solana rpc")] + Rpc(#[from] solana_client::client_error::ClientError), + #[error("parse pubkey")] + ParsePubkey(#[from] solana_sdk::pubkey::ParsePubkeyError), + #[error("serialize tree response")] + SerializeTreeResponse, + #[error("sea orm")] + Database(#[from] sea_orm::DbErr), + #[error("try from pubkey")] + TryFromPubkey, + #[error("try from signature")] + TryFromSignature, + #[error("generic error: {0}")] + Generic(String), +} diff --git a/bubblegum/src/lib.rs b/bubblegum/src/lib.rs new file mode 100644 index 000000000..463526337 --- /dev/null +++ b/bubblegum/src/lib.rs @@ -0,0 +1,188 @@ +mod backfill; +mod error; +mod tree; + +use das_core::{MetadataJsonDownloadWorkerArgs, Rpc}; +pub use error::ErrorKind; +mod verify; +pub use verify::ProofReport; + +use anyhow::Result; +use backfill::worker::{ProgramTransformerWorkerArgs, SignatureWorkerArgs, TreeWorkerArgs}; +use clap::Parser; +use digital_asset_types::dao::cl_audits_v2; +use futures::{stream::FuturesUnordered, StreamExt}; +use sea_orm::ColumnTrait; +use sea_orm::QueryOrder; +use sea_orm::SqlxPostgresConnector; +use sea_orm::{EntityTrait, QueryFilter}; +use solana_sdk::pubkey::Pubkey; +use solana_sdk::signature::Signature; +use std::str::FromStr; +use tracing::error; +use tree::TreeResponse; + +#[derive(Clone)] +pub struct BubblegumContext { + pub database_pool: sqlx::PgPool, + pub solana_rpc: Rpc, +} + +impl BubblegumContext { + pub const fn new(database_pool: sqlx::PgPool, solana_rpc: Rpc) -> Self { + Self { + database_pool, + solana_rpc, + } + } +} + +#[derive(Debug, Parser, Clone)] +pub struct BackfillArgs { + /// Number of tree crawler workers + #[arg(long, env, default_value = "20")] + pub tree_crawler_count: usize, + + /// The list of trees to crawl. If not specified, all trees will be crawled. + #[arg(long, env, use_value_delimiter = true)] + pub only_trees: Option>, + + #[clap(flatten)] + pub tree_worker: TreeWorkerArgs, +} + +pub async fn start_backfill(context: BubblegumContext, args: BackfillArgs) -> Result<()> { + let trees = if let Some(ref only_trees) = args.only_trees { + TreeResponse::find(&context.solana_rpc, only_trees.clone()).await? + } else { + TreeResponse::all(&context.solana_rpc).await? + }; + + let mut crawl_handles = FuturesUnordered::new(); + + for tree in trees { + if crawl_handles.len() >= args.tree_crawler_count { + crawl_handles.next().await; + } + let context = context.clone(); + let handle = args.tree_worker.start(context, tree); + + crawl_handles.push(handle); + } + + futures::future::try_join_all(crawl_handles).await?; + + Ok(()) +} + +#[derive(Debug, Parser, Clone)] +pub struct BubblegumReplayArgs { + /// The tree to replay. + #[arg(long, env)] + pub tree: String, + + /// The list of sequences to replay. If not specified, all sequences will be replayed. + #[arg(long, env, use_value_delimiter = true)] + pub only_sequences: Option>, + + #[clap(flatten)] + pub signature_worker: SignatureWorkerArgs, + + #[clap(flatten)] + pub program_transformer_worker: ProgramTransformerWorkerArgs, + + #[clap(flatten)] + pub metadata_json_download_worker: MetadataJsonDownloadWorkerArgs, +} + +pub async fn start_bubblegum_replay( + context: BubblegumContext, + args: BubblegumReplayArgs, +) -> Result<()> { + let pubkey = Pubkey::from_str(&args.tree) + .map(|pubkey| pubkey.to_bytes().to_vec()) + .map_err(|e| anyhow::anyhow!("Invalid tree pubkey: {:?}", e))?; + + let database_pool = context.database_pool.clone(); + let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(database_pool); + + let mut query = cl_audits_v2::Entity::find() + .filter(cl_audits_v2::Column::Tree.eq(pubkey)) + .order_by_asc(cl_audits_v2::Column::Seq); + + if let Some(sequences) = args.only_sequences { + query = query.filter(cl_audits_v2::Column::Seq.is_in(sequences)); + } + + let cl_audits = query.all(&conn).await?; + + let metadata_json_download_worker_args = args.metadata_json_download_worker.clone(); + let program_transformer_worker_args = args.program_transformer_worker.clone(); + let signature_worker_args = args.signature_worker.clone(); + + let metadata_json_download_db_pool = context.database_pool.clone(); + let program_transformer_context = context.clone(); + let signature_context = context.clone(); + + let (metadata_json_download_worker, metadata_json_download_sender) = + metadata_json_download_worker_args.start(metadata_json_download_db_pool)?; + + let (program_transformer_worker, transaction_info_sender) = program_transformer_worker_args + .start(program_transformer_context, metadata_json_download_sender)?; + + let (signature_worker, signature_sender) = + signature_worker_args.start(signature_context, transaction_info_sender)?; + + for audit in cl_audits { + let signature = Signature::try_from(audit.tx.as_ref())?; + if let Err(e) = signature_sender.send(signature).await { + error!("send signature: {:?}", e); + } + } + + drop(signature_sender); + + futures::future::try_join3( + signature_worker, + program_transformer_worker, + metadata_json_download_worker, + ) + .await?; + + Ok(()) +} + +#[derive(Debug, Parser, Clone)] +pub struct VerifyArgs { + /// The list of trees to verify. If not specified, all trees will be crawled. + #[arg(long, env, use_value_delimiter = true)] + pub only_trees: Option>, + + #[arg(long, env, default_value = "20")] + pub max_concurrency: usize, +} + +pub async fn verify_bubblegum( + context: BubblegumContext, + args: VerifyArgs, +) -> Result> { + let trees = if let Some(ref only_trees) = args.only_trees { + TreeResponse::find(&context.solana_rpc, only_trees.clone()).await? + } else { + TreeResponse::all(&context.solana_rpc).await? + }; + + let (sender, receiver) = tokio::sync::mpsc::channel(trees.len()); + + tokio::spawn(async move { + for tree in trees { + if let Ok(report) = verify::check(context.clone(), tree, args.max_concurrency).await { + if sender.send(report).await.is_err() { + error!("Failed to send report"); + } + } + } + }); + + Ok(receiver) +} diff --git a/bubblegum/src/tree.rs b/bubblegum/src/tree.rs new file mode 100644 index 000000000..405c90037 --- /dev/null +++ b/bubblegum/src/tree.rs @@ -0,0 +1,117 @@ +use super::error::ErrorKind; +use anyhow::Result; +use borsh::BorshDeserialize; +use das_core::Rpc; +use solana_client::rpc_filter::{Memcmp, RpcFilterType}; +use solana_sdk::{account::Account, pubkey::Pubkey}; +use spl_account_compression::id; +use spl_account_compression::state::{ + merkle_tree_get_size, ConcurrentMerkleTreeHeader, CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1, +}; +use std::str::FromStr; + +#[derive(Clone)] +pub struct TreeHeaderResponse { + pub max_depth: u32, + pub max_buffer_size: u32, + pub creation_slot: u64, + pub size: usize, +} + +impl TryFrom for TreeHeaderResponse { + type Error = ErrorKind; + + fn try_from(payload: ConcurrentMerkleTreeHeader) -> Result { + let size = merkle_tree_get_size(&payload)?; + + Ok(Self { + max_depth: payload.get_max_depth(), + max_buffer_size: payload.get_max_buffer_size(), + creation_slot: payload.get_creation_slot(), + size, + }) + } +} + +pub struct TreeResponse { + pub pubkey: Pubkey, + pub tree_header: TreeHeaderResponse, + pub seq: u64, +} + +impl TreeResponse { + pub fn try_from_rpc(pubkey: Pubkey, account: Account) -> Result { + let bytes = account.data.as_slice(); + + let (header_bytes, rest) = bytes.split_at(CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1); + let header: ConcurrentMerkleTreeHeader = + ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?; + + let merkle_tree_size = merkle_tree_get_size(&header)?; + let (tree_bytes, _canopy_bytes) = rest.split_at(merkle_tree_size); + + let seq_bytes = tree_bytes[0..8].try_into()?; + let seq = u64::from_le_bytes(seq_bytes); + + let (auth, _) = Pubkey::find_program_address(&[pubkey.as_ref()], &mpl_bubblegum::ID); + + header.assert_valid_authority(&auth)?; + + let tree_header = header.try_into()?; + + Ok(Self { + pubkey, + tree_header, + seq, + }) + } + + pub async fn all(client: &Rpc) -> Result, ErrorKind> { + Ok(client + .get_program_accounts( + &id(), + Some(vec![RpcFilterType::Memcmp(Memcmp::new_raw_bytes( + 0, + vec![1u8], + ))]), + ) + .await? + .into_iter() + .filter_map(|(pubkey, account)| Self::try_from_rpc(pubkey, account).ok()) + .collect()) + } + + pub async fn find(client: &Rpc, pubkeys: Vec) -> Result, ErrorKind> { + let pubkeys: Vec = pubkeys + .into_iter() + .map(|p| Pubkey::from_str(&p)) + .collect::, _>>()?; + let pubkey_batches = pubkeys.chunks(100); + let pubkey_batches_count = pubkey_batches.len(); + + let mut gma_handles = Vec::with_capacity(pubkey_batches_count); + + for batch in pubkey_batches { + gma_handles.push(async move { + let accounts = client.get_multiple_accounts(batch).await?; + + let results: Vec<(&Pubkey, Option)> = batch.iter().zip(accounts).collect(); + + Ok::<_, ErrorKind>(results) + }) + } + + let result = futures::future::try_join_all(gma_handles).await?; + + let trees = result + .into_iter() + .flatten() + .filter_map(|(pubkey, account)| { + account.map(|account| Self::try_from_rpc(*pubkey, account)) + }) + .collect::, _>>() + .map_err(|_| ErrorKind::SerializeTreeResponse)?; + + Ok(trees) + } +} diff --git a/bubblegum/src/verify.rs b/bubblegum/src/verify.rs new file mode 100644 index 000000000..e99972ce0 --- /dev/null +++ b/bubblegum/src/verify.rs @@ -0,0 +1,182 @@ +use super::BubblegumContext; +use crate::error::ErrorKind; +use crate::tree::TreeResponse; +use anyhow::{anyhow, Result}; +use digital_asset_types::dapi::get_proof_for_asset; +use digital_asset_types::rpc::AssetProof; +use futures::stream::{FuturesUnordered, StreamExt}; +use mpl_bubblegum::accounts::TreeConfig; +use sea_orm::SqlxPostgresConnector; +use sha3::{Digest, Keccak256}; +use solana_sdk::pubkey::Pubkey; +use spl_account_compression::concurrent_tree_wrapper::ProveLeafArgs; +use std::fmt; +use std::sync::Arc; +use tokio::sync::Mutex; +use tracing::debug; + +trait TryFromAssetProof { + fn try_from_asset_proof(proof: AssetProof) -> Result + where + Self: Sized; +} + +impl TryFromAssetProof for ProveLeafArgs { + fn try_from_asset_proof(proof: AssetProof) -> Result { + Ok(ProveLeafArgs { + current_root: bs58::decode(&proof.root) + .into_vec() + .map_err(|e| anyhow!(e))? + .try_into() + .map_err(|_| anyhow!("Invalid root length"))?, + leaf: bs58::decode(&proof.leaf) + .into_vec() + .map_err(|e| anyhow!(e))? + .try_into() + .map_err(|_| anyhow!("Invalid leaf length"))?, + proof_vec: proof + .proof + .iter() + .map(|p| { + bs58::decode(p) + .into_vec() + .map_err(|e| anyhow!(e)) + .and_then(|v| v.try_into().map_err(|_| anyhow!("Invalid proof length"))) + }) + .collect::>>()?, + index: proof.node_index as u32, + }) + } +} + +fn hash(left: &[u8], right: &[u8]) -> [u8; 32] { + let mut hasher = Keccak256::new(); + hasher.update(left); + hasher.update(right); + let result = hasher.finalize(); + let mut hash = [0u8; 32]; + hash.copy_from_slice(&result); + hash +} + +fn verify_merkle_proof(proof: &ProveLeafArgs) -> bool { + let mut node = proof.leaf; + for (i, sibling) in proof.proof_vec.iter().enumerate() { + if (proof.index >> i) & 1 == 0 { + node = hash(&node, sibling); + } else { + node = hash(sibling, &node); + } + } + node == proof.current_root +} + +fn leaf_proof_result(proof: AssetProof) -> Result { + match ProveLeafArgs::try_from_asset_proof(proof) { + Ok(proof) if verify_merkle_proof(&proof) => Ok(ProofResult::Correct), + Ok(_) => Ok(ProofResult::Incorrect), + Err(_) => Ok(ProofResult::Corrupt), + } +} + +#[derive(Debug, Default)] +pub struct ProofReport { + pub tree_pubkey: Pubkey, + pub total_leaves: usize, + pub incorrect_proofs: usize, + pub not_found_proofs: usize, + pub correct_proofs: usize, + pub corrupt_proofs: usize, +} + +#[derive(Debug)] +enum ProofResult { + Correct, + Incorrect, + NotFound, + Corrupt, +} + +impl fmt::Display for ProofResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ProofResult::Correct => write!(f, "Correct proof found"), + ProofResult::Incorrect => write!(f, "Incorrect proof found"), + ProofResult::NotFound => write!(f, "Proof not found"), + ProofResult::Corrupt => write!(f, "Corrupt proof found"), + } + } +} + +pub async fn check( + context: BubblegumContext, + tree: TreeResponse, + max_concurrency: usize, +) -> Result { + let (tree_config_pubkey, _) = TreeConfig::find_pda(&tree.pubkey); + + let pool = context.database_pool.clone(); + + let account = context.solana_rpc.get_account(&tree_config_pubkey).await?; + let account = account + .value + .ok_or_else(|| ErrorKind::Generic("Account not found".to_string()))?; + + let tree_config = TreeConfig::from_bytes(account.data.as_slice())?; + + let report = Arc::new(Mutex::new(ProofReport { + tree_pubkey: tree.pubkey, + total_leaves: tree_config.num_minted as usize, + ..ProofReport::default() + })); + + let mut tasks = FuturesUnordered::new(); + + for i in 0..tree_config.num_minted { + if tasks.len() >= max_concurrency { + tasks.next().await; + } + + let db = SqlxPostgresConnector::from_sqlx_postgres_pool(pool.clone()); + let tree_pubkey = tree.pubkey; + let report = Arc::clone(&report); + + tasks.push(tokio::spawn(async move { + let (asset, _) = Pubkey::find_program_address( + &[b"asset", &tree_pubkey.to_bytes(), &i.to_le_bytes()], + &mpl_bubblegum::ID, + ); + let proof_lookup: Result = + get_proof_for_asset(&db, asset.to_bytes().to_vec()) + .await + .map_or_else(|_| Ok(ProofResult::NotFound), leaf_proof_result); + + if let Ok(proof_result) = proof_lookup { + let mut report = report.lock().await; + + match proof_result { + ProofResult::Correct => report.correct_proofs += 1, + ProofResult::Incorrect => report.incorrect_proofs += 1, + ProofResult::NotFound => report.not_found_proofs += 1, + ProofResult::Corrupt => report.corrupt_proofs += 1, + } + + debug!( + tree = %tree_pubkey, + leaf_index = i, + asset = %asset, + result = ?proof_result, + "Proof result for asset" + ); + } + })); + } + + while tasks.next().await.is_some() {} + + let final_report = Arc::try_unwrap(report) + .expect("Failed to unwrap Arc") + .into_inner(); + + Ok(final_report) +} diff --git a/core/Cargo.toml b/core/Cargo.toml index d0532684f..b97e38235 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -8,18 +8,35 @@ publish.workspace = true [dependencies] anyhow = { workspace = true } backon = { workspace = true } +borsh = { workspace = true } +bs58 = { workspace = true } +cadence = { workspace = true } +cadence-macros = { workspace = true } +clap = { workspace = true, features = ["derive", "cargo", "env"] } +derive_more = { workspace = true } +digital_asset_types = { workspace = true } +figment = { workspace = true } +futures = { workspace = true } +indicatif = { workspace = true } +log = { workspace = true } +plerkle_messenger = { workspace = true } +reqwest = { workspace = true } +sea-orm = { workspace = true, features = [ + "sqlx-postgres", + "with-chrono", + "runtime-tokio-rustls", +] } +serde = { workspace = true } +serde_json = { workspace = true } +spl-account-compression = { workspace = true } solana-account-decoder = { workspace = true } solana-client = { workspace = true } solana-sdk = { workspace = true } solana-transaction-status = { workspace = true } -cadence = { workspace = true } -cadence-macros = { workspace = true } +sqlx = { workspace = true, fatures = ["runtime-tokio-rustls", "postgres"] } thiserror = { workspace = true } -figment = { workspace = true } -plerkle_messenger = { workspace = true } tokio = { workspace = true } -clap = { workspace = true, features = ["derive", "cargo", "env"] } -sqlx = { workspace = true, features = ["runtime-tokio-rustls", "postgres"] } +url = { workspace = true } [lints] workspace = true diff --git a/core/src/db.rs b/core/src/db.rs index 3037f670d..5c211a877 100644 --- a/core/src/db.rs +++ b/core/src/db.rs @@ -27,7 +27,7 @@ pub struct PoolArgs { ///// # Returns ///// ///// * `Result` - On success, returns a `DatabaseConnection`. On failure, returns a `DbErr`. -pub async fn connect_db(config: PoolArgs) -> Result { +pub async fn connect_db(config: &PoolArgs) -> Result { let options: PgConnectOptions = config.database_url.parse()?; PgPoolOptions::new() diff --git a/core/src/lib.rs b/core/src/lib.rs index da6bb050e..341c54817 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -1,9 +1,11 @@ mod db; +mod metadata_json; mod metrics; mod plerkle_messenger_queue; mod solana_rpc; pub use db::*; +pub use metadata_json::*; pub use metrics::*; pub use plerkle_messenger_queue::*; pub use solana_rpc::*; diff --git a/core/src/metadata_json.rs b/core/src/metadata_json.rs new file mode 100644 index 000000000..1c87d6825 --- /dev/null +++ b/core/src/metadata_json.rs @@ -0,0 +1,250 @@ +use { + backon::{ExponentialBuilder, Retryable}, + clap::Parser, + digital_asset_types::dao::asset_data, + futures::{future::BoxFuture, stream::FuturesUnordered, StreamExt}, + indicatif::HumanDuration, + log::{debug, error}, + reqwest::{Client, Url as ReqwestUrl}, + sea_orm::{entity::*, SqlxPostgresConnector}, + serde::{Deserialize, Serialize}, + tokio::{ + sync::mpsc::{error::SendError, unbounded_channel, UnboundedSender}, + task::JoinHandle, + time::Instant, + }, +}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct DownloadMetadataInfo { + asset_data_id: Vec, + uri: String, + slot: i64, +} + +impl DownloadMetadataInfo { + pub fn new(asset_data_id: Vec, uri: String, slot: i64) -> Self { + Self { + asset_data_id, + uri: uri.trim().replace('\0', ""), + slot, + } + } + + pub fn into_inner(self) -> (Vec, String, i64) { + (self.asset_data_id, self.uri, self.slot) + } +} + +pub type DownloadMetadataNotifier = Box< + dyn Fn( + DownloadMetadataInfo, + ) -> BoxFuture<'static, Result<(), Box>> + + Sync + + Send, +>; + +pub async fn create_download_metadata_notifier( + download_metadata_json_sender: UnboundedSender, +) -> DownloadMetadataNotifier { + Box::new(move |info: DownloadMetadataInfo| -> BoxFuture<'static, Result<(), Box>> + { + let task = download_metadata_json_sender.send(info).map_err(Into::into); + + Box::pin(async move { task }) + }) +} + +#[derive(Parser, Clone, Debug, PartialEq, Eq)] +pub struct MetadataJsonDownloadWorkerArgs { + /// The number of worker threads + #[arg(long, env, default_value = "25")] + metadata_json_download_worker_count: usize, + /// The request timeout in milliseconds + #[arg(long, env, default_value = "1000")] + metadata_json_download_worker_request_timeout: u64, +} + +impl MetadataJsonDownloadWorkerArgs { + pub fn start( + &self, + pool: sqlx::PgPool, + ) -> Result< + (JoinHandle<()>, UnboundedSender), + MetadataJsonDownloadWorkerError, + > { + let (sender, mut rx) = unbounded_channel::(); + let worker_count = self.metadata_json_download_worker_count; + let client = reqwest::Client::builder() + .timeout(std::time::Duration::from_millis( + self.metadata_json_download_worker_request_timeout, + )) + .build()?; + + let handle = tokio::spawn(async move { + let mut handlers = FuturesUnordered::new(); + + while let Some(download_metadata_info) = rx.recv().await { + if handlers.len() >= worker_count { + handlers.next().await; + } + + let pool = pool.clone(); + let client = client.clone(); + + handlers.push(spawn_task(client, pool, download_metadata_info)); + } + + while handlers.next().await.is_some() {} + }); + + Ok((handle, sender)) + } +} + +#[derive(thiserror::Error, Debug)] +pub enum MetadataJsonDownloadWorkerError { + #[error("send error: {0}")] + Send(#[from] SendError), + #[error("join error: {0}")] + Join(#[from] tokio::task::JoinError), + #[error("reqwest: {0}")] + Reqwest(#[from] reqwest::Error), +} + +fn spawn_task( + client: Client, + pool: sqlx::PgPool, + download_metadata_info: DownloadMetadataInfo, +) -> JoinHandle<()> { + tokio::spawn(async move { + let timing = Instant::now(); + let asset_data_id = + bs58::encode(download_metadata_info.asset_data_id.clone()).into_string(); + + if let Err(e) = perform_metadata_json_task(client, pool, &download_metadata_info).await { + error!("Asset {} failed: {}", asset_data_id, e); + } + + debug!( + "Asset {} finished in {}", + asset_data_id, + HumanDuration(timing.elapsed()) + ); + }) +} + +#[derive(thiserror::Error, Debug)] +pub enum FetchMetadataJsonError { + #[error("reqwest: {0}")] + GenericReqwest(#[from] reqwest::Error), + #[error("json parse for url({url}) with {source}")] + Parse { + source: reqwest::Error, + url: ReqwestUrl, + }, + #[error("response {status} for url ({url}) with {source}")] + Response { + source: reqwest::Error, + url: ReqwestUrl, + status: StatusCode, + }, + #[error("url parse: {0}")] + Url(#[from] url::ParseError), +} + +#[derive(Debug, derive_more::Display)] +pub enum StatusCode { + Unknown, + Code(reqwest::StatusCode), +} + +async fn fetch_metadata_json( + client: Client, + metadata_json_url: &str, +) -> Result { + (|| async { + let url = ReqwestUrl::parse(metadata_json_url)?; + + let response = client.get(url.clone()).send().await?; + + match response.error_for_status() { + Ok(res) => res + .json::() + .await + .map_err(|source| FetchMetadataJsonError::Parse { source, url }), + Err(source) => { + let status = source + .status() + .map(StatusCode::Code) + .unwrap_or(StatusCode::Unknown); + + Err(FetchMetadataJsonError::Response { + source, + url, + status, + }) + } + } + }) + .retry(&ExponentialBuilder::default()) + .await +} + +#[derive(thiserror::Error, Debug)] +pub enum MetadataJsonTaskError { + #[error("sea orm: {0}")] + SeaOrm(#[from] sea_orm::DbErr), + #[error("metadata json: {0}")] + Fetch(#[from] FetchMetadataJsonError), + #[error("asset not found in the db")] + AssetNotFound, +} + +pub async fn perform_metadata_json_task( + client: Client, + pool: sqlx::PgPool, + download_metadata_info: &DownloadMetadataInfo, +) -> Result { + match fetch_metadata_json(client, &download_metadata_info.uri).await { + Ok(metadata) => { + let active_model = asset_data::ActiveModel { + id: Set(download_metadata_info.asset_data_id.clone()), + metadata: Set(metadata), + reindex: Set(Some(false)), + ..Default::default() + }; + + let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(pool); + + let model = active_model.update(&conn).await?; + + Ok(model) + } + Err(e) => Err(MetadataJsonTaskError::Fetch(e)), + } +} + +pub struct DownloadMetadata { + client: Client, + pool: sqlx::PgPool, +} + +impl DownloadMetadata { + pub const fn new(client: Client, pool: sqlx::PgPool) -> Self { + Self { client, pool } + } + + pub async fn handle_download( + &self, + download_metadata_info: &DownloadMetadataInfo, + ) -> Result<(), MetadataJsonTaskError> { + perform_metadata_json_task( + self.client.clone(), + self.pool.clone(), + download_metadata_info, + ) + .await + .map(|_| ()) + } +} diff --git a/core/src/metrics.rs b/core/src/metrics.rs index 9c0d3c531..13bf4e261 100644 --- a/core/src/metrics.rs +++ b/core/src/metrics.rs @@ -14,8 +14,8 @@ pub struct MetricsArgs { pub metrics_prefix: String, } -pub fn setup_metrics(config: MetricsArgs) -> Result<()> { - let host = (config.metrics_host, config.metrics_port); +pub fn setup_metrics(config: &MetricsArgs) -> Result<()> { + let host = (config.metrics_host.clone(), config.metrics_port); let socket = UdpSocket::bind("0.0.0.0:0")?; socket.set_nonblocking(true)?; diff --git a/core/src/plerkle_messenger_queue.rs b/core/src/plerkle_messenger_queue.rs index 8c30d01c0..11b1166ed 100644 --- a/core/src/plerkle_messenger_queue.rs +++ b/core/src/plerkle_messenger_queue.rs @@ -65,7 +65,7 @@ pub struct QueuePool { } impl QueuePool { - pub async fn try_from_config(config: QueueArgs) -> anyhow::Result { + pub async fn try_from_config(config: &QueueArgs) -> anyhow::Result { let size = usize::try_from(config.messenger_queue_connections)?; let (tx, rx) = mpsc::channel(size); diff --git a/core/src/solana_rpc.rs b/core/src/solana_rpc.rs index 71f86f8a6..26e9e1f05 100644 --- a/core/src/solana_rpc.rs +++ b/core/src/solana_rpc.rs @@ -3,13 +3,16 @@ use backon::ExponentialBuilder; use backon::Retryable; use clap::Parser; use solana_account_decoder::UiAccountEncoding; -use solana_client::rpc_response::RpcConfirmedTransactionStatusWithSignature; +use solana_client::rpc_response::RpcTokenAccountBalance; use solana_client::{ client_error::ClientError, nonblocking::rpc_client::RpcClient, rpc_client::GetConfirmedSignaturesForAddress2Config, rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcTransactionConfig}, rpc_filter::RpcFilterType, + rpc_request::RpcRequest, + rpc_response::Response as RpcResponse, + rpc_response::RpcConfirmedTransactionStatusWithSignature, }; use solana_sdk::{ account::Account, @@ -31,8 +34,8 @@ pub struct SolanaRpcArgs { pub struct Rpc(Arc); impl Rpc { - pub fn from_config(config: SolanaRpcArgs) -> Self { - Rpc(Arc::new(RpcClient::new(config.solana_rpc_url))) + pub fn from_config(config: &SolanaRpcArgs) -> Self { + Rpc(Arc::new(RpcClient::new(config.solana_rpc_url.clone()))) } pub async fn get_transaction( @@ -157,4 +160,26 @@ impl Rpc { .await? .value) } + + pub async fn get_token_largest_account(&self, mint: Pubkey) -> anyhow::Result { + Ok((|| async { + self.0 + .send::>>( + RpcRequest::Custom { + method: "getTokenLargestAccounts", + }, + serde_json::json!([mint.to_string(),]), + ) + .await + }) + .retry(&ExponentialBuilder::default()) + .await? + .value + .first() + .ok_or(anyhow::anyhow!(format!( + "no token accounts for mint {mint}: burned nft?" + )))? + .address + .parse::()?) + } } diff --git a/das_api/src/api/api_impl.rs b/das_api/src/api/api_impl.rs index 4b55f43d8..36d0bd6c7 100644 --- a/das_api/src/api/api_impl.rs +++ b/das_api/src/api/api_impl.rs @@ -1,21 +1,21 @@ use digital_asset_types::{ dao::{ - scopes::asset::get_grouping, + scopes::asset::{get_grouping, get_nft_editions}, sea_orm_active_enums::{ OwnerType, RoyaltyTargetType, SpecificationAssetClass, SpecificationVersions, }, Cursor, PageOptions, SearchAssetsQuery, }, dapi::{ - get_asset, get_asset_proofs, get_asset_signatures, get_assets, get_assets_by_authority, - get_assets_by_creator, get_assets_by_group, get_assets_by_owner, get_proof_for_asset, - search_assets, + common::create_pagination, get_asset, get_asset_proofs, get_asset_signatures, get_assets, + get_assets_by_authority, get_assets_by_creator, get_assets_by_group, get_assets_by_owner, + get_proof_for_asset, get_token_accounts, search_assets, }, rpc::{ - filter::{AssetSortBy, SearchConditionType}, - response::GetGroupingResponse, + filter::SearchConditionType, + response::{GetGroupingResponse, TokenAccountList}, + OwnershipModel, RoyaltyModel, }, - rpc::{OwnershipModel, RoyaltyModel}, }; use open_rpc_derive::document_rpc; use sea_orm::{sea_query::ConditionType, ConnectionTrait, DbBackend, Statement}; @@ -23,6 +23,7 @@ use sea_orm::{sea_query::ConditionType, ConnectionTrait, DbBackend, Statement}; use crate::error::DasApiError; use crate::validation::{validate_opt_pubkey, validate_search_with_name}; use open_rpc_schema::document::OpenrpcDocument; +use std::collections::HashSet; use { crate::api::*, crate::config::Config, @@ -40,7 +41,7 @@ pub struct DasApi { impl DasApi { pub async fn from_config(config: Config) -> Result { let pool = PgPoolOptions::new() - .max_connections(250) + .max_connections(config.max_database_connections.unwrap_or(250)) .connect(&config.database_url) .await?; @@ -101,11 +102,6 @@ impl DasApi { if cursor.is_some() { return Err(DasApiError::PaginationError); } - if let Some(sort) = &sorting { - if sort.sort_by != AssetSortBy::Id { - return Err(DasApiError::PaginationSortingValidationError); - } - } validate_pubkey(before.clone())?; is_cursor_enabled = false; } @@ -114,21 +110,14 @@ impl DasApi { if cursor.is_some() { return Err(DasApiError::PaginationError); } - if let Some(sort) = &sorting { - if sort.sort_by != AssetSortBy::Id { - return Err(DasApiError::PaginationSortingValidationError); - } - } + validate_pubkey(after.clone())?; is_cursor_enabled = false; } page_opt.limit = limit.map(|x| x as u64).unwrap_or(1000); if is_cursor_enabled { - if let Some(sort) = &sorting { - if sort.sort_by != AssetSortBy::Id { - return Err(DasApiError::PaginationSortingValidationError); - } + if let Some(_) = &sorting { page_opt.cursor = Some(self.get_cursor(cursor)?); } } else { @@ -148,6 +137,13 @@ pub fn not_found(asset_id: &String) -> DbErr { DbErr::RecordNotFound(format!("Asset Proof for {} Not Found", asset_id)) } +pub fn remove_duplicates_ids(ids: Vec) -> Vec { + let mut hash_set = HashSet::new(); + ids.into_iter() + .filter(|id| hash_set.insert(id.clone())) + .collect() +} + #[document_rpc] #[async_trait] impl ApiContract for DasApi { @@ -218,6 +214,7 @@ impl ApiContract for DasApi { ) -> Result>, DasApiError> { let GetAssets { ids, options } = payload; + let ids = remove_duplicates_ids(ids); let batch_size = ids.len(); if batch_size > 1000 { return Err(DasApiError::BatchSizeExceededError); @@ -284,6 +281,7 @@ impl ApiContract for DasApi { options, cursor, } = payload; + validate_pubkey(group_value.clone())?; let before: Option = before.filter(|before| !before.is_empty()); let after: Option = after.filter(|after| !after.is_empty()); let sort_by = sort_by.unwrap_or_default(); @@ -374,6 +372,7 @@ impl ApiContract for DasApi { negate, condition_type, interface, + token_type, owner_address, owner_type, creator_address, @@ -403,7 +402,7 @@ impl ApiContract for DasApi { // Deserialize search assets query let spec: Option<(SpecificationVersions, SpecificationAssetClass)> = - interface.map(|x| x.into()); + interface.clone().map(|x| x.into()); let specification_version = spec.clone().map(|x| x.0); let specification_asset_class = spec.map(|x| x.1); let condition_type = condition_type.map(|x| match x { @@ -431,8 +430,10 @@ impl ApiContract for DasApi { let saq = SearchAssetsQuery { negate, condition_type, + interface, specification_version, specification_asset_class, + token_type, owner_address, owner_type, creator_address, @@ -501,6 +502,7 @@ impl ApiContract for DasApi { .await .map_err(Into::into) } + async fn get_grouping( self: &DasApi, payload: GetGrouping, @@ -516,4 +518,59 @@ impl ApiContract for DasApi { group_size: gs.size, }) } + + async fn get_nft_editions( + self: &DasApi, + payload: GetNftEditions, + ) -> Result { + let GetNftEditions { + mint_address, + page, + limit, + before, + after, + cursor, + } = payload; + + let page_options = self.validate_pagination(limit, page, &before, &after, &cursor, None)?; + let mint_address = validate_pubkey(mint_address.clone())?; + let pagination = create_pagination(&page_options)?; + get_nft_editions( + &self.db_connection, + mint_address, + &pagination, + page_options.limit, + ) + .await + .map_err(Into::into) + } + + async fn get_token_accounts( + self: &DasApi, + payload: GetTokenAccounts, + ) -> Result { + let GetTokenAccounts { + owner_address, + mint_address, + limit, + page, + before, + after, + options, + cursor, + } = payload; + let owner_address = validate_opt_pubkey(&owner_address)?; + let mint_address = validate_opt_pubkey(&mint_address)?; + let options = options.unwrap_or_default(); + let page_options = self.validate_pagination(limit, page, &before, &after, &cursor, None)?; + get_token_accounts( + &self.db_connection, + owner_address, + mint_address, + &page_options, + &options, + ) + .await + .map_err(Into::into) + } } diff --git a/das_api/src/api/mod.rs b/das_api/src/api/mod.rs index 2e1da73ca..6d1bb299f 100644 --- a/das_api/src/api/mod.rs +++ b/das_api/src/api/mod.rs @@ -1,8 +1,10 @@ use crate::error::DasApiError; use async_trait::async_trait; -use digital_asset_types::rpc::filter::{AssetSortDirection, SearchConditionType}; +use digital_asset_types::rpc::filter::{AssetSortDirection, SearchConditionType, TokenTypeClass}; use digital_asset_types::rpc::options::Options; -use digital_asset_types::rpc::response::{AssetList, TransactionSignatureList}; +use digital_asset_types::rpc::response::{ + AssetList, NftEditions, TokenAccountList, TransactionSignatureList, +}; use digital_asset_types::rpc::{filter::AssetSorting, response::GetGroupingResponse}; use digital_asset_types::rpc::{Asset, AssetProof, Interface, OwnershipModel, RoyaltyModel}; use open_rpc_derive::{document_rpc, rpc}; @@ -94,6 +96,7 @@ pub struct SearchAssets { pub negate: Option, pub condition_type: Option, pub interface: Option, + pub token_type: Option, pub owner_address: Option, pub owner_type: Option, pub creator_address: Option, @@ -147,6 +150,18 @@ pub struct GetGrouping { pub group_value: String, } +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct GetNftEditions { + pub mint_address: String, + pub page: Option, + pub limit: Option, + pub before: Option, + pub after: Option, + #[serde(default)] + pub cursor: Option, +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema, Default)] #[serde(deny_unknown_fields, rename_all = "camelCase")] pub struct GetAssetSignatures { @@ -163,6 +178,21 @@ pub struct GetAssetSignatures { pub sort_direction: Option, } +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct GetTokenAccounts { + pub owner_address: Option, + pub mint_address: Option, + pub limit: Option, + pub page: Option, + pub before: Option, + pub after: Option, + #[serde(default, alias = "displayOptions")] + pub options: Option, + #[serde(default)] + pub cursor: Option, +} + #[document_rpc] #[async_trait] pub trait ApiContract: Send + Sync + 'static { @@ -251,4 +281,21 @@ pub trait ApiContract: Send + Sync + 'static { summary = "Get a list of assets grouped by a specific authority" )] async fn get_grouping(&self, payload: GetGrouping) -> Result; + + #[rpc( + name = "getTokenAccounts", + params = "named", + summary = "Get a list of token accounts by owner or mint" + )] + async fn get_token_accounts( + &self, + payload: GetTokenAccounts, + ) -> Result; + + #[rpc( + name = "getNftEditions", + params = "named", + summary = "Get all printable editions for a master edition NFT mint" + )] + async fn get_nft_editions(&self, payload: GetNftEditions) -> Result; } diff --git a/das_api/src/builder.rs b/das_api/src/builder.rs index 25edb0aa2..2b81da370 100644 --- a/das_api/src/builder.rs +++ b/das_api/src/builder.rs @@ -118,6 +118,29 @@ impl RpcApiBuilder { Ok(rpc_context.schema()) })?; + module.register_async_method( + "get_token_accounts", + |rpc_params, rpc_context| async move { + let payload = rpc_params.parse::()?; + rpc_context + .get_token_accounts(payload) + .await + .map_err(Into::into) + }, + )?; + + module.register_alias("getTokenAccounts", "get_token_accounts")?; + + module.register_async_method("get_nft_editions", |rpc_params, rpc_context| async move { + let payload = rpc_params.parse::()?; + rpc_context + .get_nft_editions(payload) + .await + .map_err(Into::into) + })?; + + module.register_alias("getNftEditions", "get_nft_editions")?; + Ok(module) } } diff --git a/das_api/src/config.rs b/das_api/src/config.rs index ae08346e4..ae3d6251e 100644 --- a/das_api/src/config.rs +++ b/das_api/src/config.rs @@ -7,6 +7,8 @@ use { #[derive(Deserialize, Default)] pub struct Config { pub database_url: String, + pub max_database_connections: Option, + pub max_request_connections: Option, pub metrics_port: Option, pub metrics_host: Option, pub server_port: u16, diff --git a/das_api/src/main.rs b/das_api/src/main.rs index a8105358a..f910cc118 100644 --- a/das_api/src/main.rs +++ b/das_api/src/main.rs @@ -134,6 +134,7 @@ async fn main() -> Result<(), DasApiError> { let server = ServerBuilder::default() .set_middleware(middleware) + .max_connections(config.max_request_connections.unwrap_or(100)) .set_logger(MetricMiddleware) .build(addr) .await?; diff --git a/digital_asset_types/Cargo.toml b/digital_asset_types/Cargo.toml index 449f6e78d..53d7bc0d5 100644 --- a/digital_asset_types/Cargo.toml +++ b/digital_asset_types/Cargo.toml @@ -29,6 +29,7 @@ spl-concurrent-merkle-tree = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["macros"] } url = { workspace = true } +mpl-token-metadata = { workspace = true } [features] default = ["json_types", "sql_types"] diff --git a/digital_asset_types/src/dao/extensions/asset.rs b/digital_asset_types/src/dao/extensions/asset.rs index 18525e39e..1d8e7f1d3 100644 --- a/digital_asset_types/src/dao/extensions/asset.rs +++ b/digital_asset_types/src/dao/extensions/asset.rs @@ -4,6 +4,7 @@ use crate::dao::{ asset, asset_authority, asset_creators, asset_data, asset_grouping, asset_v1_account_attachments, sea_orm_active_enums::{OwnerType, RoyaltyTargetType}, + token_accounts, }; #[derive(Copy, Clone, Debug, EnumIter)] @@ -13,6 +14,7 @@ pub enum Relation { AssetAuthority, AssetCreators, AssetGrouping, + TokenAccounts, } impl RelationTrait for Relation { @@ -22,6 +24,10 @@ impl RelationTrait for Relation { .from(asset::Column::AssetData) .to(asset_data::Column::Id) .into(), + Self::TokenAccounts => asset::Entity::belongs_to(token_accounts::Entity) + .from(asset::Column::Id) + .to(token_accounts::Column::Mint) + .into(), Self::AssetV1AccountAttachments => { asset::Entity::has_many(asset_v1_account_attachments::Entity).into() } @@ -62,6 +68,12 @@ impl Related for asset::Entity { } } +impl Related for asset::Entity { + fn to() -> RelationDef { + Relation::TokenAccounts.def() + } +} + impl Default for RoyaltyTargetType { fn default() -> Self { Self::Creators @@ -76,7 +88,7 @@ impl Default for asset::Model { specification_version: None, specification_asset_class: None, owner: None, - owner_type: OwnerType::Single, + owner_type: OwnerType::Unknown, delegate: None, frozen: Default::default(), supply: Default::default(), @@ -103,6 +115,7 @@ impl Default for asset::Model { owner_delegate_seq: None, leaf_seq: None, base_info_seq: None, + mint_extensions: None, mpl_core_plugins: None, mpl_core_unknown_plugins: None, mpl_core_collection_current_size: None, diff --git a/digital_asset_types/src/dao/extensions/asset_grouping.rs b/digital_asset_types/src/dao/extensions/asset_grouping.rs index 49b091efb..d474c63bb 100644 --- a/digital_asset_types/src/dao/extensions/asset_grouping.rs +++ b/digital_asset_types/src/dao/extensions/asset_grouping.rs @@ -1,11 +1,12 @@ use sea_orm::{EntityTrait, EnumIter, Related, RelationDef, RelationTrait}; -use crate::dao::{asset, asset_authority, asset_grouping}; +use crate::dao::{asset, asset_authority, asset_data, asset_grouping}; #[derive(Copy, Clone, Debug, EnumIter)] pub enum Relation { Asset, AssetAuthority, + AssetData, } impl RelationTrait for Relation { @@ -19,6 +20,10 @@ impl RelationTrait for Relation { .from(asset_grouping::Column::AssetId) .to(asset_authority::Column::Id) .into(), + Self::AssetData => asset_grouping::Entity::belongs_to(asset_data::Entity) + .from(asset_grouping::Column::AssetId) + .to(asset_data::Column::Id) + .into(), } } } @@ -34,3 +39,9 @@ impl Related for asset_grouping::Entity { Relation::AssetAuthority.def() } } + +impl Related for asset_grouping::Entity { + fn to() -> RelationDef { + Relation::AssetData.def() + } +} diff --git a/digital_asset_types/src/dao/extensions/mod.rs b/digital_asset_types/src/dao/extensions/mod.rs index bcfc3e130..af3baad66 100644 --- a/digital_asset_types/src/dao/extensions/mod.rs +++ b/digital_asset_types/src/dao/extensions/mod.rs @@ -5,3 +5,4 @@ pub mod asset_data; pub mod asset_grouping; pub mod asset_v1_account_attachment; pub mod instruction; +pub mod token_accounts; diff --git a/digital_asset_types/src/dao/extensions/token_accounts.rs b/digital_asset_types/src/dao/extensions/token_accounts.rs new file mode 100644 index 000000000..a8c8f60eb --- /dev/null +++ b/digital_asset_types/src/dao/extensions/token_accounts.rs @@ -0,0 +1,26 @@ +use sea_orm::{EntityTrait, EnumIter, Related, RelationDef, RelationTrait}; + +use crate::dao::{asset, token_accounts}; + +#[derive(Copy, Clone, Debug, EnumIter)] + +pub enum Relation { + Asset, +} + +impl RelationTrait for Relation { + fn def(&self) -> RelationDef { + match self { + Self::Asset => token_accounts::Entity::belongs_to(asset::Entity) + .from(token_accounts::Column::Mint) + .to(asset::Column::Id) + .into(), + } + } +} + +impl Related for token_accounts::Entity { + fn to() -> RelationDef { + Relation::Asset.def() + } +} diff --git a/digital_asset_types/src/dao/full_asset.rs b/digital_asset_types/src/dao/full_asset.rs index fda932d21..05e67b3ae 100644 --- a/digital_asset_types/src/dao/full_asset.rs +++ b/digital_asset_types/src/dao/full_asset.rs @@ -1,12 +1,28 @@ use crate::dao::{asset, asset_authority, asset_creators, asset_data, asset_grouping}; +use super::asset_v1_account_attachments; + +use super::tokens; + +pub struct FullAssetGroup { + pub id: i64, + pub asset_id: Vec, + pub group_key: String, + pub group_value: Option, + pub seq: Option, + pub slot_updated: Option, + pub verified: bool, + pub group_info_seq: Option, +} #[derive(Clone, Debug, PartialEq)] pub struct FullAsset { pub asset: asset::Model, - pub data: asset_data::Model, + pub data: Option, + pub token_info: Option, pub authorities: Vec, pub creators: Vec, - pub groups: Vec, + pub groups: Vec<(asset_grouping::Model, Option)>, + pub inscription: Option, } #[derive(Clone, Debug, PartialEq)] pub struct AssetRelated { diff --git a/digital_asset_types/src/dao/generated/asset.rs b/digital_asset_types/src/dao/generated/asset.rs index f70e0b383..f2f74d6a9 100644 --- a/digital_asset_types/src/dao/generated/asset.rs +++ b/digital_asset_types/src/dao/generated/asset.rs @@ -50,6 +50,7 @@ pub struct Model { pub owner_delegate_seq: Option, pub leaf_seq: Option, pub base_info_seq: Option, + pub mint_extensions: Option, pub mpl_core_plugins: Option, pub mpl_core_unknown_plugins: Option, pub mpl_core_collection_num_minted: Option, @@ -93,6 +94,7 @@ pub enum Column { OwnerDelegateSeq, LeafSeq, BaseInfoSeq, + MintExtensions, MplCorePlugins, MplCoreUnknownPlugins, MplCoreCollectionNumMinted, @@ -153,6 +155,7 @@ impl ColumnTrait for Column { Self::OwnerDelegateSeq => ColumnType::BigInteger.def().null(), Self::LeafSeq => ColumnType::BigInteger.def().null(), Self::BaseInfoSeq => ColumnType::BigInteger.def().null(), + Self::MintExtensions => ColumnType::JsonBinary.def().null(), Self::MplCorePlugins => ColumnType::JsonBinary.def().null(), Self::MplCoreUnknownPlugins => ColumnType::JsonBinary.def().null(), Self::MplCoreCollectionNumMinted => ColumnType::Integer.def().null(), diff --git a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs index e4d0e012d..cf7470c6f 100644 --- a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs +++ b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs @@ -66,6 +66,8 @@ pub enum V1AccountAttachments { MasterEditionV1, #[sea_orm(string_value = "master_edition_v2")] MasterEditionV2, + #[sea_orm(string_value = "token_inscription")] + TokenInscription, #[sea_orm(string_value = "unknown")] Unknown, } diff --git a/digital_asset_types/src/dao/generated/token_accounts.rs b/digital_asset_types/src/dao/generated/token_accounts.rs index 380e29b42..d758403b8 100644 --- a/digital_asset_types/src/dao/generated/token_accounts.rs +++ b/digital_asset_types/src/dao/generated/token_accounts.rs @@ -24,6 +24,7 @@ pub struct Model { pub delegated_amount: i64, pub slot_updated: i64, pub token_program: Vec, + pub extensions: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -38,6 +39,7 @@ pub enum Column { DelegatedAmount, SlotUpdated, TokenProgram, + Extensions, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] @@ -69,6 +71,7 @@ impl ColumnTrait for Column { Self::DelegatedAmount => ColumnType::BigInteger.def(), Self::SlotUpdated => ColumnType::BigInteger.def(), Self::TokenProgram => ColumnType::Binary.def(), + Self::Extensions => ColumnType::Json.def().null(), } } } diff --git a/digital_asset_types/src/dao/generated/tokens.rs b/digital_asset_types/src/dao/generated/tokens.rs index 326b8d968..ed1e5598d 100644 --- a/digital_asset_types/src/dao/generated/tokens.rs +++ b/digital_asset_types/src/dao/generated/tokens.rs @@ -23,6 +23,7 @@ pub struct Model { pub close_authority: Option>, pub extension_data: Option>, pub slot_updated: i64, + pub extensions: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -36,6 +37,7 @@ pub enum Column { CloseAuthority, ExtensionData, SlotUpdated, + Extensions, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] @@ -66,6 +68,7 @@ impl ColumnTrait for Column { Self::CloseAuthority => ColumnType::Binary.def().null(), Self::ExtensionData => ColumnType::Binary.def().null(), Self::SlotUpdated => ColumnType::BigInteger.def(), + Self::Extensions => ColumnType::JsonBinary.def().null(), } } } @@ -77,3 +80,13 @@ impl RelationTrait for Relation { } impl ActiveModelBehavior for ActiveModel {} + +pub trait IsNonFungible { + fn is_non_fungible(&self) -> bool; +} + +impl IsNonFungible for Model { + fn is_non_fungible(&self) -> bool { + self.decimals == 0 && self.supply == 1.into() + } +} diff --git a/digital_asset_types/src/dao/generated/tree_transactions.rs b/digital_asset_types/src/dao/generated/tree_transactions.rs new file mode 100644 index 000000000..65fd65b27 --- /dev/null +++ b/digital_asset_types/src/dao/generated/tree_transactions.rs @@ -0,0 +1,67 @@ +//! `SeaORM` Entity. Generated by sea-orm-codegen 0.10.5 + +use sea_orm::entity::prelude::*; +use serde::{Deserialize, Serialize}; + +#[derive(Copy, Clone, Default, Debug, DeriveEntity)] +pub struct Entity; + +impl EntityName for Entity { + fn table_name(&self) -> &str { + "tree_transactions" + } +} + +#[derive(Clone, Debug, PartialEq, DeriveModel, DeriveActiveModel, Eq, Serialize, Deserialize)] +pub struct Model { + pub signature: String, + pub tree: String, + pub slot: i64, + pub created_at: Option, + pub processed_at: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] +pub enum Column { + Signature, + Tree, + Slot, + CreatedAt, + ProcessedAt, +} + +#[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] +pub enum PrimaryKey { + Signature, +} + +impl PrimaryKeyTrait for PrimaryKey { + type ValueType = String; + fn auto_increment() -> bool { + false + } +} + +#[derive(Copy, Clone, Debug, EnumIter)] +pub enum Relation {} + +impl ColumnTrait for Column { + type EntityName = Entity; + fn def(&self) -> ColumnDef { + match self { + Self::Signature => ColumnType::Text.def(), + Self::Tree => ColumnType::Text.def(), + Self::Slot => ColumnType::BigInteger.def(), + Self::CreatedAt => ColumnType::TimestampWithTimeZone.def().null(), + Self::ProcessedAt => ColumnType::TimestampWithTimeZone.def().null(), + } + } +} + +impl RelationTrait for Relation { + fn def(&self) -> RelationDef { + panic!("No RelationDef") + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/digital_asset_types/src/dao/mod.rs b/digital_asset_types/src/dao/mod.rs index bf1e540ab..4cde6c8b3 100644 --- a/digital_asset_types/src/dao/mod.rs +++ b/digital_asset_types/src/dao/mod.rs @@ -2,6 +2,8 @@ mod full_asset; mod generated; pub mod scopes; +use crate::rpc::{filter::TokenTypeClass, Interface}; + use self::sea_orm_active_enums::{ OwnerType, RoyaltyTargetType, SpecificationAssetClass, SpecificationVersions, }; @@ -52,8 +54,10 @@ pub struct SearchAssetsQuery { pub negate: Option, /// Defaults to [ConditionType::All] pub condition_type: Option, + pub interface: Option, pub specification_version: Option, pub specification_asset_class: Option, + pub token_type: Option, pub owner_address: Option>, pub owner_type: Option, pub creator_address: Option>, @@ -75,6 +79,33 @@ pub struct SearchAssetsQuery { } impl SearchAssetsQuery { + pub fn check_for_onwer_type_and_token_type(&self) -> Result<(), DbErr> { + if self.token_type.is_some() && self.owner_type.is_some() { + return Err(DbErr::Custom( + "`owner_type` is not supported when using `token_type` field".to_string(), + )); + } + Ok(()) + } + + pub fn check_for_owner_address_and_token_type(&self) -> Result<(), DbErr> { + if self.owner_address.is_none() && self.token_type.is_some() { + return Err(DbErr::Custom( + "Must provide `owner_address` when using `token_type` field".to_string(), + )); + } + Ok(()) + } + pub fn check_for_token_type_and_interface(&self) -> Result<(), DbErr> { + if self.token_type.is_some() && self.interface.is_some() { + return Err(DbErr::Custom( + "`specification_asset_class` is not supported when using `token_type` field" + .to_string(), + )); + } + Ok(()) + } + pub fn conditions(&self) -> Result<(Condition, Vec), DbErr> { let mut conditions = match self.condition_type { // None --> default to all when no option is provided @@ -88,16 +119,38 @@ impl SearchAssetsQuery { .clone() .map(|x| asset::Column::SpecificationVersion.eq(x)), ) - .add_option( + .add_option({ + self.check_for_owner_address_and_token_type()?; + self.check_for_onwer_type_and_token_type()?; + self.token_type.as_ref().map(|x| match x { + TokenTypeClass::Compressed => asset::Column::TreeId.is_not_null(), + TokenTypeClass::Nft => asset::Column::TreeId.is_null().and( + asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::Nft) + .or(asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::MplCoreAsset)) + .or(asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::ProgrammableNft)), + ), + TokenTypeClass::NonFungible => asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::Nft) + .or(asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::ProgrammableNft)) + .or(asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::MplCoreAsset)), + TokenTypeClass::Fungible => asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::FungibleAsset) + .or(asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::FungibleToken)), + TokenTypeClass::All => asset::Column::SpecificationAssetClass.is_not_null(), + }) + }) + .add_option({ + self.check_for_token_type_and_interface()?; self.specification_asset_class .clone() - .map(|x| asset::Column::SpecificationAssetClass.eq(x)), - ) - .add_option( - self.owner_address - .to_owned() - .map(|x| asset::Column::Owner.eq(x)), - ) + .map(|x| asset::Column::SpecificationAssetClass.eq(x)) + }) .add_option( self.delegate .to_owned() @@ -145,16 +198,34 @@ impl SearchAssetsQuery { if let Some(o) = self.owner_type.clone() { conditions = conditions.add(asset::Column::OwnerType.eq(o)); } else { - // Default to NFTs - // - // In theory, the owner_type=single check should be sufficient, - // however there is an old bug that has marked some non-NFTs as "single" with supply > 1. - // The supply check guarentees we do not include those. - conditions = conditions.add_option(Some( - asset::Column::OwnerType - .eq(OwnerType::Single) - .and(asset::Column::Supply.lte(1)), - )); + match self.token_type { + Some(TokenTypeClass::Fungible) => { + conditions = conditions.add_option(Some( + asset::Column::OwnerType.eq(OwnerType::Token).and( + (asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::FungibleToken)) + .or(asset::Column::SpecificationAssetClass + .eq(SpecificationAssetClass::FungibleAsset)), + ), + )); + } + Some(TokenTypeClass::All) => { + conditions = conditions + .add_option(Some(asset::Column::SpecificationAssetClass.is_not_null())); + } + _ => { + // Default to NFTs + // + // In theory, the owner_type=single check should be sufficient, + // however there is an old bug that has marked some non-NFTs as "single" with supply > 1. + // The supply check guarentees we do not include those. + conditions = conditions.add( + asset::Column::OwnerType + .eq(OwnerType::Single) + .and(asset::Column::Supply.lte(1)), + ); + } + } } if let Some(c) = self.creator_address.to_owned() { @@ -194,6 +265,25 @@ impl SearchAssetsQuery { joins.push(rel); } + if let Some(o) = self.owner_address.to_owned() { + if self.token_type == Some(TokenTypeClass::Fungible) + || self.token_type == Some(TokenTypeClass::All) + { + conditions = conditions.add(token_accounts::Column::Owner.eq(o)); + let rel = extensions::token_accounts::Relation::Asset + .def() + .rev() + .on_condition(|left, right| { + Expr::tbl(right, token_accounts::Column::Mint) + .eq(Expr::tbl(left, asset::Column::Id)) + .into_condition() + }); + joins.push(rel); + } else { + conditions = conditions.add(asset::Column::Owner.eq(o)); + } + } + if let Some(g) = self.grouping.to_owned() { let cond = Condition::all() .add(asset_grouping::Column::GroupKey.eq(g.0)) diff --git a/digital_asset_types/src/dao/scopes/asset.rs b/digital_asset_types/src/dao/scopes/asset.rs index 58e4cbbaf..825a26859 100644 --- a/digital_asset_types/src/dao/scopes/asset.rs +++ b/digital_asset_types/src/dao/scopes/asset.rs @@ -1,15 +1,24 @@ use crate::{ dao::{ asset::{self}, - asset_authority, asset_creators, asset_data, asset_grouping, cl_audits_v2, + asset_authority, asset_creators, asset_data, asset_grouping, asset_v1_account_attachments, + cl_audits_v2, extensions::{self, instruction::PascalCase}, - sea_orm_active_enums::Instruction, - Cursor, FullAsset, GroupingSize, Pagination, + sea_orm_active_enums::{Instruction, V1AccountAttachments}, + token_accounts, tokens, Cursor, FullAsset, GroupingSize, Pagination, + }, + rpc::{ + filter::AssetSortDirection, + options::Options, + response::{NftEdition, NftEditions}, }, - rpc::filter::AssetSortDirection, }; use indexmap::IndexMap; -use sea_orm::{entity::*, query::*, ConnectionTrait, DbErr, Order}; +use mpl_token_metadata::accounts::{Edition, MasterEdition}; +use sea_orm::{entity::*, query::*, sea_query::Expr, ConnectionTrait, DbErr, Order}; +use serde::de::DeserializeOwned; +use serde_json::Value; +use solana_sdk::pubkey::Pubkey; use std::collections::HashMap; pub fn paginate( @@ -60,7 +69,7 @@ pub async fn get_by_creator( sort_direction: Order, pagination: &Pagination, limit: u64, - show_unverified_collections: bool, + options: &Options, ) -> Result, DbErr> { let mut condition = Condition::all() .add(asset_creators::Column::Creator.eq(creator.clone())) @@ -76,7 +85,7 @@ pub async fn get_by_creator( sort_direction, pagination, limit, - show_unverified_collections, + options, Some(creator), ) .await @@ -112,13 +121,13 @@ pub async fn get_by_grouping( sort_direction: Order, pagination: &Pagination, limit: u64, - show_unverified_collections: bool, + options: &Options, ) -> Result, DbErr> { let mut condition = asset_grouping::Column::GroupKey .eq(group_key) .and(asset_grouping::Column::GroupValue.eq(group_value)); - if !show_unverified_collections { + if !options.show_unverified_collections { condition = condition.and( asset_grouping::Column::Verified .eq(true) @@ -136,7 +145,7 @@ pub async fn get_by_grouping( sort_direction, pagination, limit, - show_unverified_collections, + options, None, ) .await @@ -149,11 +158,12 @@ pub async fn get_assets_by_owner( sort_direction: Order, pagination: &Pagination, limit: u64, - show_unverified_collections: bool, + options: &Options, ) -> Result, DbErr> { let cond = Condition::all() .add(asset::Column::Owner.eq(owner)) .add(asset::Column::Supply.gt(0)); + get_assets_by_condition( conn, cond, @@ -162,7 +172,7 @@ pub async fn get_assets_by_owner( sort_direction, pagination, limit, - show_unverified_collections, + options, ) .await } @@ -172,10 +182,12 @@ pub async fn get_assets( asset_ids: Vec>, pagination: &Pagination, limit: u64, + options: &Options, ) -> Result, DbErr> { let cond = Condition::all() .add(asset::Column::Id.is_in(asset_ids)) .add(asset::Column::Supply.gt(0)); + get_assets_by_condition( conn, cond, @@ -185,7 +197,7 @@ pub async fn get_assets( Order::Asc, pagination, limit, - false, + options, ) .await } @@ -197,7 +209,7 @@ pub async fn get_by_authority( sort_direction: Order, pagination: &Pagination, limit: u64, - show_unverified_collections: bool, + options: &Options, ) -> Result, DbErr> { let cond = Condition::all() .add(asset_authority::Column::Authority.eq(authority)) @@ -210,7 +222,7 @@ pub async fn get_by_authority( sort_direction, pagination, limit, - show_unverified_collections, + options, None, ) .await @@ -225,7 +237,7 @@ async fn get_by_related_condition( sort_direction: Order, pagination: &Pagination, limit: u64, - show_unverified_collections: bool, + options: &Options, required_creator: Option>, ) -> Result, DbErr> where @@ -236,27 +248,25 @@ where .join(JoinType::LeftJoin, relation.def()); if let Some(col) = sort_by { - stmt = stmt - .order_by(col, sort_direction.clone()) - .order_by(asset::Column::Id, sort_direction.clone()); + stmt = stmt.order_by(col, sort_direction.clone()) } let assets = paginate(pagination, limit, stmt, sort_direction, asset::Column::Id) .all(conn) .await?; - get_related_for_assets(conn, assets, show_unverified_collections, required_creator).await + get_related_for_assets(conn, assets, options, required_creator).await } pub async fn get_related_for_assets( conn: &impl ConnectionTrait, assets: Vec, - show_unverified_collections: bool, + options: &Options, required_creator: Option>, ) -> Result, DbErr> { let asset_ids = assets.iter().map(|a| a.id.clone()).collect::>(); let asset_data: Vec = asset_data::Entity::find() - .filter(asset_data::Column::Id.is_in(asset_ids)) + .filter(asset_data::Column::Id.is_in(asset_ids.clone())) .all(conn) .await?; let asset_data_map = asset_data.into_iter().fold(HashMap::new(), |mut acc, ad| { @@ -274,10 +284,12 @@ pub async fn get_related_for_assets( let id = asset.id.clone(); let fa = FullAsset { asset, - data: ad.clone(), + data: Some(ad.clone()), authorities: vec![], creators: vec![], groups: vec![], + token_info: None, + inscription: None, }; acc.insert(id, fa); }; @@ -288,7 +300,6 @@ pub async fn get_related_for_assets( // Get all creators for all assets in `assets_map``. let creators = asset_creators::Entity::find() .filter(asset_creators::Column::AssetId.is_in(ids.clone())) - .order_by_asc(asset_creators::Column::AssetId) .order_by_asc(asset_creators::Column::Position) .all(conn) .await?; @@ -316,7 +327,6 @@ pub async fn get_related_for_assets( let ids = assets_map.keys().cloned().collect::>(); let authorities = asset_authority::Entity::find() .filter(asset_authority::Column::AssetId.is_in(ids.clone())) - .order_by_asc(asset_authority::Column::AssetId) .all(conn) .await?; for a in authorities.into_iter() { @@ -325,7 +335,15 @@ pub async fn get_related_for_assets( } } - let cond = if show_unverified_collections { + for id in ids.clone() { + if let Ok(t) = get_token_by_id(conn, id.clone()).await { + if let Some(asset) = assets_map.get_mut(&id) { + asset.token_info = Some(t); + } + } + } + + let cond = if options.show_unverified_collections { Condition::all() } else { Condition::any() @@ -335,16 +353,41 @@ pub async fn get_related_for_assets( .add(asset_grouping::Column::Verified.is_null()) }; - let grouping = asset_grouping::Entity::find() + let grouping_base_query = asset_grouping::Entity::find() .filter(asset_grouping::Column::AssetId.is_in(ids.clone())) .filter(asset_grouping::Column::GroupValue.is_not_null()) - .filter(cond) - .order_by_asc(asset_grouping::Column::AssetId) - .all(conn) - .await?; - for g in grouping.into_iter() { - if let Some(asset) = assets_map.get_mut(&g.asset_id) { - asset.groups.push(g); + .filter(cond); + + if options.show_collection_metadata { + let combined_group_query = grouping_base_query + .find_also_related(asset_data::Entity) + .all(conn) + .await?; + + if options.show_inscription { + let attachments = asset_v1_account_attachments::Entity::find() + .filter(asset_v1_account_attachments::Column::AssetId.is_in(asset_ids)) + .all(conn) + .await?; + + for a in attachments.into_iter() { + if let Some(asset) = assets_map.get_mut(&a.id) { + asset.inscription = Some(a); + } + } + } + + for (g, a) in combined_group_query.into_iter() { + if let Some(asset) = assets_map.get_mut(&g.asset_id) { + asset.groups.push((g, a)); + } + } + } else { + let single_group_query = grouping_base_query.all(conn).await?; + for g in single_group_query.into_iter() { + if let Some(asset) = assets_map.get_mut(&g.asset_id) { + asset.groups.push((g, None)); + } } } @@ -360,7 +403,7 @@ pub async fn get_assets_by_condition( sort_direction: Order, pagination: &Pagination, limit: u64, - show_unverified_collections: bool, + options: &Options, ) -> Result, DbErr> { let mut stmt = asset::Entity::find(); for def in joins { @@ -368,16 +411,13 @@ pub async fn get_assets_by_condition( } stmt = stmt.filter(condition); if let Some(col) = sort_by { - stmt = stmt - .order_by(col, sort_direction.clone()) - .order_by(asset::Column::Id, sort_direction.clone()); + stmt = stmt.order_by(col, sort_direction.clone()) } let assets = paginate(pagination, limit, stmt, sort_direction, asset::Column::Id) .all(conn) .await?; - let full_assets = - get_related_for_assets(conn, assets, show_unverified_collections, None).await?; + let full_assets = get_related_for_assets(conn, assets, options, None).await?; Ok(full_assets) } @@ -385,22 +425,35 @@ pub async fn get_by_id( conn: &impl ConnectionTrait, asset_id: Vec, include_no_supply: bool, + options: &Options, ) -> Result { let mut asset_data = asset::Entity::find_by_id(asset_id.clone()).find_also_related(asset_data::Entity); if !include_no_supply { asset_data = asset_data.filter(Condition::all().add(asset::Column::Supply.gt(0))); } - let asset_data: (asset::Model, asset_data::Model) = + + let token_info = if options.show_fungible { + get_token_by_id(conn, asset_id.clone()).await.ok() + } else { + None + }; + + let inscription = if options.show_inscription { + get_inscription_by_mint(conn, asset_id.clone()).await.ok() + } else { + None + }; + + let asset_data: (asset::Model, Option) = asset_data.one(conn).await.and_then(|o| match o { - Some((a, Some(d))) => Ok((a, d)), + Some((a, d)) => Ok((a, d)), _ => Err(DbErr::RecordNotFound("Asset Not Found".to_string())), })?; let (asset, data) = asset_data; let authorities: Vec = asset_authority::Entity::find() .filter(asset_authority::Column::AssetId.eq(asset.id.clone())) - .order_by_asc(asset_authority::Column::AssetId) .all(conn) .await?; let mut creators: Vec = asset_creators::Entity::find() @@ -411,7 +464,7 @@ pub async fn get_by_id( filter_out_stale_creators(&mut creators); - let grouping: Vec = asset_grouping::Entity::find() + let grouping_query = asset_grouping::Entity::find() .filter(asset_grouping::Column::AssetId.eq(asset.id.clone())) .filter(asset_grouping::Column::GroupValue.is_not_null()) .filter( @@ -420,16 +473,30 @@ pub async fn get_by_id( // Older versions of the indexer did not have the verified flag. A group would be present if and only if it was verified. // Therefore if verified is null, we can assume that the group is verified. .add(asset_grouping::Column::Verified.is_null()), - ) - .order_by_asc(asset_grouping::Column::AssetId) - .all(conn) - .await?; + ); + + let groups = if options.show_collection_metadata { + grouping_query + .find_also_related(asset_data::Entity) + .all(conn) + .await? + } else { + grouping_query + .all(conn) + .await? + .into_iter() + .map(|g| (g, None)) + .collect::>() + }; + Ok(FullAsset { asset, data, authorities, creators, - groups: grouping, + groups, + token_info, + inscription, }) } @@ -500,7 +567,6 @@ pub async fn get_asset_signatures( let stmt = asset::Entity::find() .distinct_on([(asset::Entity, asset::Column::Id)]) .filter(asset::Column::Id.eq(asset_id)) - .order_by(asset::Column::Id, Order::Desc) .limit(1); let asset = stmt.one(conn).await?; if let Some(asset) = asset { @@ -553,3 +619,185 @@ fn filter_out_stale_creators(creators: &mut Vec) { } } } +pub async fn get_token_accounts( + conn: &impl ConnectionTrait, + owner_address: Option>, + mint_address: Option>, + pagination: &Pagination, + limit: u64, + options: &Options, +) -> Result, DbErr> { + let mut condition = Condition::all(); + + if owner_address.is_none() && mint_address.is_none() { + return Err(DbErr::Custom( + "Either 'owner_address' or 'mint_address' must be provided".to_string(), + )); + } + + if options.show_zero_balance { + condition = condition.add(token_accounts::Column::Amount.gte(0)); + } else { + condition = condition.add(token_accounts::Column::Amount.gt(0)); + } + + if let Some(owner) = owner_address { + condition = condition.add(token_accounts::Column::Owner.eq(owner)); + } + if let Some(mint) = mint_address { + condition = condition.add(token_accounts::Column::Mint.eq(mint)); + } + + let token_accounts = paginate( + pagination, + limit, + token_accounts::Entity::find().filter(condition), + Order::Asc, + token_accounts::Column::Pubkey, + ) + .all(conn) + .await?; + + Ok(token_accounts) +} + +pub fn get_edition_data_from_json(data: Value) -> Result { + serde_json::from_value(data).map_err(|e| DbErr::Custom(e.to_string())) +} + +pub fn attachment_to_nft_edition( + attachment: asset_v1_account_attachments::Model, +) -> Result { + let data: Edition = attachment + .data + .clone() + .ok_or(DbErr::RecordNotFound("Edition data not found".to_string())) + .map(get_edition_data_from_json)??; + + Ok(NftEdition { + mint_address: attachment + .asset_id + .clone() + .map(|id| bs58::encode(id).into_string()) + .unwrap_or("".to_string()), + edition_number: data.edition, + edition_address: bs58::encode(attachment.id.clone()).into_string(), + }) +} + +pub async fn get_nft_editions( + conn: &impl ConnectionTrait, + mint_address: Pubkey, + pagination: &Pagination, + limit: u64, +) -> Result { + let master_edition_pubkey = MasterEdition::find_pda(&mint_address).0; + + // to fetch nft editions associated with a mint we need to fetch the master edition first + let master_edition = + asset_v1_account_attachments::Entity::find_by_id(master_edition_pubkey.to_bytes().to_vec()) + .one(conn) + .await? + .ok_or(DbErr::RecordNotFound( + "Master Edition not found".to_string(), + ))?; + + let master_edition_data: MasterEdition = master_edition + .data + .clone() + .ok_or(DbErr::RecordNotFound( + "Master Edition data not found".to_string(), + )) + .map(get_edition_data_from_json)??; + + let mut stmt = asset_v1_account_attachments::Entity::find(); + + stmt = stmt.filter( + asset_v1_account_attachments::Column::AttachmentType + .eq(V1AccountAttachments::Edition) + // The data field is a JSON field that contains the edition data. + .and(asset_v1_account_attachments::Column::Data.is_not_null()) + // The parent field is a string field that contains the master edition pubkey ( mapping edition to master edition ) + .and(Expr::cust(&format!( + "data->>'parent' = '{}'", + master_edition_pubkey + ))), + ); + + let nft_editions = paginate( + pagination, + limit, + stmt, + Order::Asc, + asset_v1_account_attachments::Column::Id, + ) + .all(conn) + .await? + .into_iter() + .map(attachment_to_nft_edition) + .collect::, _>>()?; + + let (page, before, after, cursor) = match pagination { + Pagination::Keyset { before, after } => { + let bef = before.clone().and_then(|x| String::from_utf8(x).ok()); + let aft = after.clone().and_then(|x| String::from_utf8(x).ok()); + (None, bef, aft, None) + } + Pagination::Page { page } => (Some(*page as u32), None, None, None), + Pagination::Cursor(_) => { + if let Some(last_asset) = nft_editions.last() { + let cursor_str = bs58::encode(last_asset.edition_address.clone()).into_string(); + (None, None, None, Some(cursor_str)) + } else { + (None, None, None, None) + } + } + }; + + Ok(NftEditions { + total: nft_editions.len() as u32, + master_edition_address: master_edition_pubkey.to_string(), + supply: master_edition_data.supply, + max_supply: master_edition_data.max_supply, + editions: nft_editions, + limit: limit as u32, + page, + before, + after, + cursor, + }) +} + +pub async fn get_token_by_id( + conn: &impl ConnectionTrait, + id: Vec, +) -> Result { + tokens::Entity::find_by_id(id) + .one(conn) + .await + .and_then(|o| match o { + Some(t) => Ok(t), + _ => Err(DbErr::RecordNotFound("Token Not Found".to_string())), + }) +} + +pub async fn get_inscription_by_mint( + conn: &impl ConnectionTrait, + mint: Vec, +) -> Result { + asset_v1_account_attachments::Entity::find() + .filter( + asset_v1_account_attachments::Column::Data + .is_not_null() + .and(Expr::cust(&format!( + "data->>'root' = '{}'", + bs58::encode(mint).into_string() + ))), + ) + .one(conn) + .await + .and_then(|o| match o { + Some(t) => Ok(t), + _ => Err(DbErr::RecordNotFound("Inscription Not Found".to_string())), + }) +} diff --git a/digital_asset_types/src/dapi/assets_by_authority.rs b/digital_asset_types/src/dapi/assets_by_authority.rs index 59404f3e0..b52891062 100644 --- a/digital_asset_types/src/dapi/assets_by_authority.rs +++ b/digital_asset_types/src/dapi/assets_by_authority.rs @@ -24,7 +24,7 @@ pub async fn get_assets_by_authority( sort_direction, &pagination, page_options.limit, - options.show_unverified_collections, + options, ) .await?; Ok(build_asset_response( diff --git a/digital_asset_types/src/dapi/assets_by_creator.rs b/digital_asset_types/src/dapi/assets_by_creator.rs index 9ce5de591..3a2b1e53e 100644 --- a/digital_asset_types/src/dapi/assets_by_creator.rs +++ b/digital_asset_types/src/dapi/assets_by_creator.rs @@ -27,7 +27,7 @@ pub async fn get_assets_by_creator( sort_direction, &pagination, page_options.limit, - options.show_unverified_collections, + options, ) .await?; Ok(build_asset_response( diff --git a/digital_asset_types/src/dapi/assets_by_group.rs b/digital_asset_types/src/dapi/assets_by_group.rs index 68784b9f4..36f4ae534 100644 --- a/digital_asset_types/src/dapi/assets_by_group.rs +++ b/digital_asset_types/src/dapi/assets_by_group.rs @@ -27,7 +27,7 @@ pub async fn get_assets_by_group( sort_direction, &pagination, page_options.limit, - options.show_unverified_collections, + options, ) .await?; Ok(build_asset_response( diff --git a/digital_asset_types/src/dapi/assets_by_owner.rs b/digital_asset_types/src/dapi/assets_by_owner.rs index c3c4da3a5..5f342fe9a 100644 --- a/digital_asset_types/src/dapi/assets_by_owner.rs +++ b/digital_asset_types/src/dapi/assets_by_owner.rs @@ -24,7 +24,7 @@ pub async fn get_assets_by_owner( sort_direction, &pagination, page_options.limit, - options.show_unverified_collections, + options, ) .await?; Ok(build_asset_response( diff --git a/digital_asset_types/src/dapi/change_logs.rs b/digital_asset_types/src/dapi/change_logs.rs index 7023ad12f..fdd19ddf9 100644 --- a/digital_asset_types/src/dapi/change_logs.rs +++ b/digital_asset_types/src/dapi/change_logs.rs @@ -200,7 +200,7 @@ fn build_asset_proof( tree_id: Vec, leaf_node_idx: i64, leaf_hash: Vec, - req_indexes: &Vec, + req_indexes: &[i64], required_nodes: &[SimpleChangeLog], ) -> AssetProof { let mut final_node_list = vec![SimpleChangeLog::default(); req_indexes.len()]; @@ -211,7 +211,7 @@ fn build_asset_proof( } for (i, (n, nin)) in final_node_list .iter_mut() - .zip(req_indexes.clone()) + .zip(req_indexes.to_owned()) .enumerate() { if *n == SimpleChangeLog::default() { diff --git a/digital_asset_types/src/dapi/common/asset.rs b/digital_asset_types/src/dapi/common/asset.rs index c9a9dd486..cb766ba49 100644 --- a/digital_asset_types/src/dapi/common/asset.rs +++ b/digital_asset_types/src/dapi/common/asset.rs @@ -1,16 +1,21 @@ -use crate::dao::sea_orm_active_enums::SpecificationVersions; +use crate::dao::token_accounts; use crate::dao::FullAsset; use crate::dao::PageOptions; use crate::dao::Pagination; use crate::dao::{asset, asset_authority, asset_creators, asset_data, asset_grouping}; use crate::rpc::filter::{AssetSortBy, AssetSortDirection, AssetSorting}; use crate::rpc::options::Options; +use crate::rpc::response::TokenAccountList; use crate::rpc::response::TransactionSignatureList; -use crate::rpc::response::{AssetError, AssetList}; +use crate::rpc::response::{AssetList, DasError}; +use crate::rpc::TokenInfo; +use crate::rpc::TokenInscriptionInfo; use crate::rpc::{ Asset as RpcAsset, Authority, Compression, Content, Creator, File, Group, Interface, - MetadataMap, MplCoreInfo, Ownership, Royalty, Scope, Supply, Uses, + MetadataMap, MplCoreInfo, Ownership, Royalty, Scope, Supply, TokenAccount as RpcTokenAccount, + Uses, }; +use blockbuster::programs::token_inscriptions::InscriptionData; use jsonpath_lib::JsonPathError; use log::warn; use mime_guess::Mime; @@ -71,16 +76,27 @@ pub fn build_asset_response( } }; - let (items, errors) = asset_list_to_rpc(assets, options); - AssetList { + let base_asset_list = AssetList { total, limit: limit as u32, page: page.map(|x| x as u32), before, after, + items: vec![], + errors: vec![], + cursor, + }; + + if assets.is_empty() { + return base_asset_list; + } + + let (items, errors) = asset_list_to_rpc(assets, options); + + AssetList { items, errors, - cursor, + ..base_asset_list } } @@ -111,7 +127,7 @@ pub fn build_transaction_signatures_response( pub fn create_sorting(sorting: AssetSorting) -> (sea_orm::query::Order, Option) { let sort_column = match sorting.sort_by { - AssetSortBy::Id => Some(asset::Column::Id), + // AssetSortBy::Id => Some(asset::Column::Id), AssetSortBy::Created => Some(asset::Column::CreatedAt), AssetSortBy::Updated => Some(asset::Column::SlotUpdated), AssetSortBy::RecentAction => Some(asset::Column::SlotUpdated), @@ -278,14 +294,8 @@ pub fn v1_content_from_json(asset_data: &asset_data::Model) -> Result Result { - match asset.specification_version { - Some(SpecificationVersions::V1) | Some(SpecificationVersions::V0) => { - v1_content_from_json(data) - } - Some(_) => Err(DbErr::Custom("Version Not Implemented".to_string())), - None => Err(DbErr::Custom("Specification version not found".to_string())), - } +pub fn get_content(data: &asset_data::Model) -> Option { + v1_content_from_json(data).ok() } pub fn to_authority(authority: Vec) -> Vec { @@ -310,34 +320,56 @@ pub fn to_creators(creators: Vec) -> Vec { } pub fn to_grouping( - groups: Vec, + groups: Vec<(asset_grouping::Model, Option)>, options: &Options, ) -> Result, DbErr> { let result: Vec = groups .iter() - .filter_map(|model| { + .filter_map(|(asset_group, asset_data)| { let verified = match options.show_unverified_collections { // Null verified indicates legacy data, meaning it is verified. - true => Some(model.verified), + true => Some(asset_group.verified), false => None, }; // Filter out items where group_value is None. - model.group_value.clone().map(|group_value| Group { - group_key: model.group_key.clone(), - group_value: Some(group_value), - verified, + asset_group.group_value.clone().map(|group_value| { + let collection_metadata = asset_data.as_ref().map(|data| { + let mut metadata_selector_fn = jsonpath_lib::selector(&data.metadata); + let metadata_selector = &mut metadata_selector_fn; + let mut meta: MetadataMap = MetadataMap::new(); + + if let Some(name) = safe_select(metadata_selector, "$.name") { + meta.set_item("name", name.clone()); + } + if let Some(symbol) = safe_select(metadata_selector, "$.symbol") { + meta.set_item("symbol", symbol.clone()); + } + if let Some(image) = safe_select(metadata_selector, "$.image") { + meta.set_item("image", image.clone()); + } + if let Some(external_url) = safe_select(metadata_selector, "$.external_url") { + meta.set_item("external_url", external_url.clone()); + } + + meta + }); + + Group { + group_key: asset_group.group_key.clone(), + group_value: Some(group_value), + verified, + collection_metadata, + } }) }) .collect(); + Ok(result) } pub fn get_interface(asset: &asset::Model) -> Result { Ok(Interface::from(( - asset - .specification_version - .as_ref() - .ok_or(DbErr::Custom("Specification version not found".to_string()))?, + asset.specification_version.as_ref(), asset .specification_asset_class .as_ref() @@ -355,19 +387,39 @@ pub fn asset_to_rpc(asset: FullAsset, options: &Options) -> Result Some(MplCoreInfo { num_minted: asset.mpl_core_collection_num_minted, @@ -377,12 +429,53 @@ pub fn asset_to_rpc(asset: FullAsset, options: &Options) -> Result None, }; + let token_info = if options.show_fungible { + token_info.map(|token_info| TokenInfo { + supply: token_info.supply.try_into().unwrap_or(0), + decimals: token_info.decimals as u8, + mint_authority: token_info + .mint_authority + .map(|s| bs58::encode(s).into_string()), + freeze_authority: token_info + .freeze_authority + .map(|s| bs58::encode(s).into_string()), + token_program: bs58::encode(token_info.token_program).into_string(), + }) + } else { + None + }; + + let inscription = if options.show_inscription { + inscription + .and_then(|i| { + i.data.map(|d| -> Result { + let deserialized_data: InscriptionData = + serde_json::from_value(d).map_err(|e| { + DbErr::Custom(format!("Failed to deserialize inscription data: {}", e)) + })?; + Ok(TokenInscriptionInfo { + authority: deserialized_data.authority, + root: deserialized_data.root, + content: deserialized_data.content, + encoding: deserialized_data.encoding, + inscription_data: deserialized_data.inscription_data, + order: deserialized_data.order, + size: deserialized_data.size, + validation_hash: deserialized_data.validation_hash, + }) + }) + }) + .and_then(|i| i.ok()) + } else { + None + }; + Ok(RpcAsset { interface: interface.clone(), id: bs58::encode(asset.id).into_string(), - content: Some(content), + content, authorities: Some(rpc_authorities), - mutable: data.chain_data_mutability.into(), + mutable, compression: Some(Compression { eligible: asset.compressible, compressed: asset.compressed, @@ -415,7 +508,7 @@ pub fn asset_to_rpc(asset: FullAsset, options: &Options) -> Result Result Some(Supply { edition_nonce, @@ -433,17 +526,11 @@ pub fn asset_to_rpc(asset: FullAsset, options: &Options) -> Result None, }, - uses: data.chain_data.get("uses").map(|u| Uses { - use_method: u - .get("use_method") - .and_then(|s| s.as_str()) - .unwrap_or("Single") - .to_string() - .into(), - total: u.get("total").and_then(|t| t.as_u64()).unwrap_or(0), - remaining: u.get("remaining").and_then(|t| t.as_u64()).unwrap_or(0), - }), + uses, burnt: asset.burnt, + token_info, + inscription, + mint_extensions: asset.mint_extensions, plugins: asset.mpl_core_plugins, unknown_plugins: asset.mpl_core_unknown_plugins, mpl_core_info, @@ -455,14 +542,14 @@ pub fn asset_to_rpc(asset: FullAsset, options: &Options) -> Result, options: &Options, -) -> (Vec, Vec) { +) -> (Vec, Vec) { asset_list .into_iter() .fold((vec![], vec![]), |(mut assets, mut errors), asset| { let id = bs58::encode(asset.asset.id.clone()).into_string(); match asset_to_rpc(asset, options) { Ok(rpc_asset) => assets.push(rpc_asset), - Err(e) => errors.push(AssetError { + Err(e) => errors.push(DasError { id, error: e.to_string(), }), @@ -470,3 +557,86 @@ pub fn asset_list_to_rpc( (assets, errors) }) } +pub fn token_account_to_rpc( + token_account: token_accounts::Model, + _options: &Options, +) -> Result { + let address = bs58::encode(token_account.pubkey.clone()).into_string(); + let mint = bs58::encode(token_account.mint.clone()).into_string(); + let owner = bs58::encode(token_account.owner.clone()).into_string(); + let delegate = token_account + .delegate + .map(|d| bs58::encode(d).into_string()); + let close_authority = token_account + .close_authority + .map(|d| bs58::encode(d).into_string()); + + Ok(RpcTokenAccount { + address, + mint, + amount: token_account.amount as u64, + owner, + frozen: token_account.frozen, + delegate, + delegated_amount: token_account.delegated_amount as u64, + close_authority, + extensions: None, + }) +} + +pub fn token_account_list_to_rpc( + token_accounts: Vec, + options: &Options, +) -> (Vec, Vec) { + token_accounts.into_iter().fold( + (vec![], vec![]), + |(mut accounts, mut errors), token_account| { + let id = bs58::encode(token_account.pubkey.clone()).into_string(); + match token_account_to_rpc(token_account, options) { + Ok(rpc_token_account) => accounts.push(rpc_token_account), + Err(e) => errors.push(DasError { + id, + error: e.to_string(), + }), + } + (accounts, errors) + }, + ) +} + +pub fn build_token_list_response( + token_accounts: Vec, + limit: u64, + pagination: &Pagination, + options: &Options, +) -> TokenAccountList { + let total = token_accounts.len() as u32; + let (page, before, after, cursor) = match pagination { + Pagination::Keyset { before, after } => { + let bef = before.clone().and_then(|x| String::from_utf8(x).ok()); + let aft = after.clone().and_then(|x| String::from_utf8(x).ok()); + (None, bef, aft, None) + } + Pagination::Page { page } => (Some(*page as u32), None, None, None), + Pagination::Cursor(_) => { + if let Some(last_token_account) = token_accounts.last() { + let cursor_str = bs58::encode(&last_token_account.pubkey.clone()).into_string(); + (None, None, None, Some(cursor_str)) + } else { + (None, None, None, None) + } + } + }; + + let (items, errors) = token_account_list_to_rpc(token_accounts, options); + TokenAccountList { + total, + limit: limit as u32, + page, + before, + after, + token_accounts: items, + cursor, + errors, + } +} diff --git a/digital_asset_types/src/dapi/get_asset.rs b/digital_asset_types/src/dapi/get_asset.rs index 3740562c3..2ec7d9006 100644 --- a/digital_asset_types/src/dapi/get_asset.rs +++ b/digital_asset_types/src/dapi/get_asset.rs @@ -11,7 +11,7 @@ pub async fn get_asset( id: Vec, options: &Options, ) -> Result { - let asset = scopes::asset::get_by_id(db, id, false).await?; + let asset = scopes::asset::get_by_id(db, id, false, options).await?; asset_to_rpc(asset, options) } @@ -22,7 +22,7 @@ pub async fn get_assets( options: &Options, ) -> Result, DbErr> { let pagination = Pagination::Page { page: 1 }; - let assets = scopes::asset::get_assets(db, ids, &pagination, limit).await?; + let assets = scopes::asset::get_assets(db, ids, &pagination, limit, options).await?; let asset_list = build_asset_response(assets, limit, &pagination, options); let asset_map = asset_list .items diff --git a/digital_asset_types/src/dapi/get_token_accounts.rs b/digital_asset_types/src/dapi/get_token_accounts.rs new file mode 100644 index 000000000..28dcfc503 --- /dev/null +++ b/digital_asset_types/src/dapi/get_token_accounts.rs @@ -0,0 +1,35 @@ +use sea_orm::{DatabaseConnection, DbErr}; + +use crate::{ + dao::PageOptions, + rpc::{options::Options, response::TokenAccountList}, +}; + +use super::common::{build_token_list_response, create_pagination}; + +pub async fn get_token_accounts( + db: &DatabaseConnection, + owner_address: Option>, + mint_address: Option>, + page_options: &PageOptions, + options: &Options, +) -> Result { + let pagination = create_pagination(page_options)?; + + let token_accounts = crate::dao::scopes::asset::get_token_accounts( + db, + owner_address, + mint_address, + &pagination, + page_options.limit, + options, + ) + .await?; + + Ok(build_token_list_response( + token_accounts, + page_options.limit, + &pagination, + options, + )) +} diff --git a/digital_asset_types/src/dapi/mod.rs b/digital_asset_types/src/dapi/mod.rs index e9481169a..488a474bd 100644 --- a/digital_asset_types/src/dapi/mod.rs +++ b/digital_asset_types/src/dapi/mod.rs @@ -5,6 +5,7 @@ mod assets_by_owner; mod change_logs; mod get_asset; mod get_asset_signatures; +mod get_token_accounts; mod search_assets; pub mod common; @@ -16,4 +17,5 @@ pub use assets_by_owner::*; pub use change_logs::*; pub use get_asset::*; pub use get_asset_signatures::*; +pub use get_token_accounts::*; pub use search_assets::*; diff --git a/digital_asset_types/src/dapi/search_assets.rs b/digital_asset_types/src/dapi/search_assets.rs index a7ee65509..85a0f190d 100644 --- a/digital_asset_types/src/dapi/search_assets.rs +++ b/digital_asset_types/src/dapi/search_assets.rs @@ -23,7 +23,7 @@ pub async fn search_assets( sort_direction, &pagination, page_options.limit, - options.show_unverified_collections, + options, ) .await?; Ok(build_asset_response( diff --git a/digital_asset_types/src/dapi/signatures_for_asset.rs b/digital_asset_types/src/dapi/signatures_for_asset.rs new file mode 100644 index 000000000..f34893800 --- /dev/null +++ b/digital_asset_types/src/dapi/signatures_for_asset.rs @@ -0,0 +1,36 @@ +use crate::dao::scopes; +use crate::dao::PageOptions; +use crate::rpc::filter::AssetSorting; +use crate::rpc::response::TransactionSignatureList; +use sea_orm::DatabaseConnection; +use sea_orm::DbErr; +use super::common::build_transaction_signatures_response; +use super::common::{build_asset_response, create_pagination, create_sorting}; + + +pub async fn get_signatures_for_asset( + db: &DatabaseConnection, + asset_id: Option>, + tree: Option>, + leaf_idx: Option, + sorting: AssetSorting, + page_options: &PageOptions, +) -> Result { + let pagination = create_pagination(&page_options)?; + let (sort_direction, sort_column) = create_sorting(sorting); + let transactions = scopes::asset::get_signatures_for_asset( + db, + asset_id, + tree, + leaf_idx, + sort_direction, + &pagination, + page_options.limit + ) + .await?; + Ok(build_transaction_signatures_response( + transactions, + page_options.limit, + &pagination, + )) +} diff --git a/digital_asset_types/src/rpc/asset.rs b/digital_asset_types/src/rpc/asset.rs index 751ef3841..ba27c4cad 100644 --- a/digital_asset_types/src/rpc/asset.rs +++ b/digital_asset_types/src/rpc/asset.rs @@ -21,22 +21,22 @@ pub struct AssetProof { pub tree_id: String, } -#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, JsonSchema)] +#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, JsonSchema, Default)] pub enum Interface { #[serde(rename = "V1_NFT")] V1NFT, #[serde(rename = "V1_PRINT")] V1PRINT, - #[serde(rename = "LEGACY_NFT")] + #[serde(rename = "V2_NFT")] + Nft, // TODO: change on version bump + #[serde(rename = "LEGACY_NFT")] #[allow(non_camel_case_types)] LEGACY_NFT, - #[serde(rename = "V2_NFT")] - Nft, #[serde(rename = "FungibleAsset")] FungibleAsset, - #[serde(rename = "Custom")] - Custom, + #[serde(rename = "FungibleToken")] + FungibleToken, #[serde(rename = "Identity")] Identity, #[serde(rename = "Executable")] @@ -47,19 +47,28 @@ pub enum Interface { MplCoreAsset, #[serde(rename = "MplCoreCollection")] MplCoreCollection, + #[default] + #[serde(rename = "Custom")] + Custom, } -impl From<(&SpecificationVersions, &SpecificationAssetClass)> for Interface { - fn from(i: (&SpecificationVersions, &SpecificationAssetClass)) -> Self { +impl From<(Option<&SpecificationVersions>, &SpecificationAssetClass)> for Interface { + fn from(i: (Option<&SpecificationVersions>, &SpecificationAssetClass)) -> Self { match i { - (SpecificationVersions::V1, SpecificationAssetClass::Nft) => Interface::V1NFT, - (SpecificationVersions::V1, SpecificationAssetClass::PrintableNft) => Interface::V1NFT, - (SpecificationVersions::V0, SpecificationAssetClass::Nft) => Interface::LEGACY_NFT, - (SpecificationVersions::V1, SpecificationAssetClass::ProgrammableNft) => { + (Some(SpecificationVersions::V1), SpecificationAssetClass::Nft) => Interface::V1NFT, + (Some(SpecificationVersions::V1), SpecificationAssetClass::PrintableNft) => { + Interface::V1NFT + } + (Some(SpecificationVersions::V0), SpecificationAssetClass::Nft) => { + Interface::LEGACY_NFT + } + (Some(SpecificationVersions::V1), SpecificationAssetClass::ProgrammableNft) => { Interface::ProgrammableNFT } (_, SpecificationAssetClass::MplCoreAsset) => Interface::MplCoreAsset, (_, SpecificationAssetClass::MplCoreCollection) => Interface::MplCoreCollection, + (_, SpecificationAssetClass::FungibleAsset) => Interface::FungibleAsset, + (_, SpecificationAssetClass::FungibleToken) => Interface::FungibleToken, _ => Interface::Custom, } } @@ -87,6 +96,10 @@ impl From for (SpecificationVersions, SpecificationAssetClass) { SpecificationVersions::V1, SpecificationAssetClass::MplCoreCollection, ), + Interface::FungibleToken => ( + SpecificationVersions::V1, + SpecificationAssetClass::FungibleToken, + ), _ => (SpecificationVersions::V1, SpecificationAssetClass::Unknown), } } @@ -219,6 +232,8 @@ pub struct Group { pub group_value: Option, #[serde(skip_serializing_if = "Option::is_none")] pub verified: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub collection_metadata: Option, } #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, JsonSchema)] @@ -367,6 +382,30 @@ pub struct MplCoreInfo { } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] +pub struct TokenInfo { + pub supply: u64, + pub decimals: u8, + pub token_program: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub mint_authority: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub freeze_authority: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] +pub struct TokenInscriptionInfo { + pub authority: String, + pub root: String, + pub inscription_data: String, + pub content: String, + pub encoding: String, + pub order: u64, + pub size: u32, + pub validation_hash: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema, Default)] + pub struct Asset { pub interface: Interface, pub id: String, @@ -382,13 +421,21 @@ pub struct Asset { pub royalty: Option, #[serde(skip_serializing_if = "Option::is_none")] pub creators: Option>, - pub ownership: Ownership, + #[serde(skip_serializing_if = "Option::is_none")] + pub ownership: Option, #[serde(skip_serializing_if = "Option::is_none")] pub uses: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub supply: Option, pub mutable: bool, pub burnt: bool, #[serde(skip_serializing_if = "Option::is_none")] + pub token_info: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub inscription: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub mint_extensions: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub plugins: Option, #[serde(skip_serializing_if = "Option::is_none")] pub unknown_plugins: Option, @@ -399,3 +446,16 @@ pub struct Asset { #[serde(skip_serializing_if = "Option::is_none")] pub unknown_external_plugins: Option, } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default, JsonSchema)] +#[serde(default)] +pub struct TokenAccount { + pub address: String, + pub mint: String, + pub amount: u64, + pub owner: String, + pub frozen: bool, + pub delegate: Option, + pub delegated_amount: u64, + pub close_authority: Option, + pub extensions: Option, +} diff --git a/digital_asset_types/src/rpc/display_options.rs b/digital_asset_types/src/rpc/display_options.rs new file mode 100644 index 000000000..ac3efc6b6 --- /dev/null +++ b/digital_asset_types/src/rpc/display_options.rs @@ -0,0 +1,9 @@ +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, JsonSchema, Default)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct DisplayOptions { + #[serde(default)] + pub show_unverified_collections: bool, +} diff --git a/digital_asset_types/src/rpc/filter.rs b/digital_asset_types/src/rpc/filter.rs index a471f21fb..83416fb8f 100644 --- a/digital_asset_types/src/rpc/filter.rs +++ b/digital_asset_types/src/rpc/filter.rs @@ -1,4 +1,5 @@ use schemars::JsonSchema; +use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] @@ -11,7 +12,7 @@ pub struct AssetSorting { impl Default for AssetSorting { fn default() -> AssetSorting { AssetSorting { - sort_by: AssetSortBy::Id, + sort_by: AssetSortBy::None, sort_direction: Some(AssetSortDirection::default()), } } @@ -19,8 +20,6 @@ impl Default for AssetSorting { #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] pub enum AssetSortBy { - #[serde(rename = "id")] - Id, #[serde(rename = "created")] Created, #[serde(rename = "updated")] @@ -31,6 +30,15 @@ pub enum AssetSortBy { None, } +#[derive(Debug, Clone, PartialEq, Eq, EnumIter, Serialize, Deserialize, JsonSchema)] +pub enum TokenTypeClass { + Fungible, + NonFungible, + Compressed, + Nft, + All, +} + #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] pub enum AssetSortDirection { #[serde(rename = "asc")] diff --git a/digital_asset_types/src/rpc/options.rs b/digital_asset_types/src/rpc/options.rs index a1fafa2c4..9102d015e 100644 --- a/digital_asset_types/src/rpc/options.rs +++ b/digital_asset_types/src/rpc/options.rs @@ -6,4 +6,12 @@ use serde::{Deserialize, Serialize}; pub struct Options { #[serde(default)] pub show_unverified_collections: bool, + #[serde(default)] + pub show_fungible: bool, + #[serde(default)] + pub show_collection_metadata: bool, + #[serde(default)] + pub show_inscription: bool, + #[serde(default)] + pub show_zero_balance: bool, } diff --git a/digital_asset_types/src/rpc/response.rs b/digital_asset_types/src/rpc/response.rs index 53076c8b2..641f39640 100644 --- a/digital_asset_types/src/rpc/response.rs +++ b/digital_asset_types/src/rpc/response.rs @@ -1,12 +1,13 @@ use schemars::JsonSchema; + use { - crate::rpc::Asset, + crate::rpc::{Asset, TokenAccount}, serde::{Deserialize, Serialize}, }; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default, JsonSchema)] #[serde(default)] -pub struct AssetError { +pub struct DasError { pub id: String, pub error: String, } @@ -34,7 +35,7 @@ pub struct AssetList { pub cursor: Option, pub items: Vec, #[serde(skip_serializing_if = "Vec::is_empty")] - pub errors: Vec, + pub errors: Vec, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default, JsonSchema)] @@ -50,3 +51,48 @@ pub struct TransactionSignatureList { pub after: Option, pub items: Vec<(String, String)>, } + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default, JsonSchema)] +#[serde(default)] +pub struct TokenAccountList { + pub total: u32, + pub limit: u32, + #[serde(skip_serializing_if = "Option::is_none")] + pub page: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub before: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + pub token_accounts: Vec, + pub cursor: Option, + pub errors: Vec, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default, JsonSchema)] +#[serde(default)] + +pub struct NftEdition { + pub mint_address: String, + pub edition_address: String, + pub edition_number: u64, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default, JsonSchema)] +#[serde(default)] +pub struct NftEditions { + pub total: u32, + pub limit: u32, + pub master_edition_address: String, + pub supply: u64, + pub max_supply: Option, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub editions: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub page: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub before: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub cursor: Option, +} diff --git a/digital_asset_types/tests/common.rs b/digital_asset_types/tests/common.rs index 0dcad71ef..6f40f610e 100644 --- a/digital_asset_types/tests/common.rs +++ b/digital_asset_types/tests/common.rs @@ -169,6 +169,7 @@ pub fn create_asset( mpl_core_plugins_json_version: None, mpl_core_external_plugins: None, mpl_core_unknown_external_plugins: None, + mint_extensions: None, }, ) } diff --git a/docker-compose.yaml b/docker-compose.yaml index 292f1ead7..ef3b51052 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -87,8 +87,17 @@ services: ports: - "6379:6379" db: - image: 'postgres:14' - command: [ "postgres", "-c", "log_statement=all", "-c", "log_destination=stderr" ,"-c","max_connections=200" ] + image: "postgres:14" + command: + [ + "postgres", + "-c", + "log_statement=all", + "-c", + "log_destination=stderr", + "-c", + "max_connections=200", + ] ports: - 5432:5432 environment: @@ -112,6 +121,16 @@ services: - "8001:8001" - "8899:8899" - "9900:9900" + prometheus: + image: prom/prometheus:latest + container_name: prometheus + volumes: + - ./prometheus-config.yaml:/etc/prometheus/prometheus-config.yaml + command: ["--config.file=/etc/prometheus/prometheus-config.yaml"] + ports: + - "9090:9090" + extra_hosts: + - "host.docker.internal:host-gateway" volumes: - grafana_data: { } - graphite_data: { } + grafana_data: {} + graphite_data: {} diff --git a/grpc-ingest/.gitignore b/grpc-ingest/.gitignore new file mode 100644 index 000000000..191f3badd --- /dev/null +++ b/grpc-ingest/.gitignore @@ -0,0 +1,3 @@ +config-grpc2redis.yaml +config-ingester.yaml +config-monitor.yaml diff --git a/grpc-ingest/Cargo.toml b/grpc-ingest/Cargo.toml new file mode 100644 index 000000000..9cbe506e2 --- /dev/null +++ b/grpc-ingest/Cargo.toml @@ -0,0 +1,66 @@ +[package] +name = "das-grpc-ingest" +version = { workspace = true } +edition = { workspace = true } +repository = { workspace = true } +publish = { workspace = true } + +[dependencies] +anyhow = { workspace = true } +async-stream = { workspace = true } +atty = { workspace = true } +das-bubblegum = { workspace = true } +sqlx = { workspace = true, features = [ + "macros", + "runtime-tokio-rustls", + "postgres", + "uuid", + "offline", + "json", +] } +chrono = { workspace = true } +clap = { workspace = true, features = ["cargo", "derive"] } +thiserror = { workspace = true } +das-core = { workspace = true } +digital_asset_types = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +hyper = { workspace = true, features = ["server"] } +json5 = { workspace = true } +lazy_static = { workspace = true } +lru = { workspace = true } +opentelemetry = { workspace = true } +opentelemetry-jaeger = { workspace = true, features = ["rt-tokio"] } +opentelemetry_sdk = { workspace = true, features = ["trace"] } +program_transformers = { workspace = true } +prometheus = { workspace = true } +redis = { workspace = true, features = ["tokio-comp", "tokio-native-tls-comp"] } +reqwest = { workspace = true } +rust-crypto = { workspace = true } +sea-orm = { workspace = true, features = ["sqlx-postgres"] } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +solana-sdk = { workspace = true } # only prom rn +tokio = { workspace = true, features = [ + "rt-multi-thread", + "macros", + "time", + "fs", + "tracing", +] } +tracing = { workspace = true } +tracing-opentelemetry = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } +yellowstone-grpc-client = { workspace = true } +yellowstone-grpc-proto = { workspace = true } +yellowstone-grpc-tools = { workspace = true } + +[build-dependencies] +anyhow = { workspace = true } +cargo-lock = { workspace = true } +git-version = { workspace = true } +vergen = { workspace = true, features = ["build", "rustc"] } + +[lints] +workspace = true diff --git a/grpc-ingest/README.md b/grpc-ingest/README.md new file mode 100644 index 000000000..1f3a46393 --- /dev/null +++ b/grpc-ingest/README.md @@ -0,0 +1,54 @@ +## Dev setup + +> **Note:** Run these commands from the root of the project + +### Run redis, postgres and prometheus docker containers + +```bash +docker compose up db redis prometheus +``` + +### Seed the database and run migrations + +```bash +INIT_FILE_PATH=./init.sql sea migrate up --database-url=postgres://solana:solana@localhost:5432/solana +``` + +### Configs + +Example config files are available at +- [./config-grpc2redis.example.yml](./config-grpc2redis.example.yml) +- [./config-ingester.example.yml](./config-ingester.example.yml) +- [./config-monitor.example.yml](./config-monitor.example.yml) + +Copy these files and modify them as needed to setup the project. + + +### Run grpc2redis service + +This service will listen to geyser gRPC account and transaction updates. It makes multiple subscriptions to the gRPC stream and filter the data based on the config. The data (vec of bytes) is pushed to a pipeline and then flushed to redis at regular intervals. + +> **Note:** Log level can be set to `info`, `debug`, `warn`, `error` + +```bash +RUST_LOG=info cargo run --bin das-grpc-ingest -- --config grpc-ingest/config-grpc2redis.yml grpc2redis +``` + +### Config for Ingester [./config-ingester.yml](./config-ingester.yml) + +### Run the Ingester service + +This service performs many concurrent tasks + +- Fetch account updates from redis and process them using using program_transformer +- Fetch transaction updates from redis and processe them +- Fetch snapshots from redis and process them +- download token metedata json and store them in postgres db + +```bash + RUST_LOG=debug,sqlx=warn cargo run --bin das-grpc-ingest -- --config grpc-ingest/config-ingester.yml ingester +``` + +### Metrics + +Both grpc2redis and ingester services expose prometheus metrics and can be accessed at `http://localhost:9090/metrics` diff --git a/grpc-ingest/build.rs b/grpc-ingest/build.rs new file mode 100644 index 000000000..92e1f4c7c --- /dev/null +++ b/grpc-ingest/build.rs @@ -0,0 +1,38 @@ +use {cargo_lock::Lockfile, std::collections::HashSet}; + +fn main() -> anyhow::Result<()> { + let mut envs = vergen::EmitBuilder::builder(); + envs.all_build().all_rustc(); + envs.emit()?; + + // vergen git version does not looks cool + println!( + "cargo:rustc-env=GIT_VERSION={}", + git_version::git_version!() + ); + + // Extract packages version + let lockfile = Lockfile::load("../Cargo.lock")?; + println!( + "cargo:rustc-env=SOLANA_SDK_VERSION={}", + get_pkg_version(&lockfile, "solana-sdk") + ); + println!( + "cargo:rustc-env=YELLOWSTONE_GRPC_PROTO_VERSION={}", + get_pkg_version(&lockfile, "yellowstone-grpc-proto") + ); + + Ok(()) +} + +fn get_pkg_version(lockfile: &Lockfile, pkg_name: &str) -> String { + lockfile + .packages + .iter() + .filter(|pkg| pkg.name.as_str() == pkg_name) + .map(|pkg| pkg.version.to_string()) + .collect::>() + .into_iter() + .collect::>() + .join(",") +} diff --git a/grpc-ingest/config-grpc2redis.example.yml b/grpc-ingest/config-grpc2redis.example.yml new file mode 100644 index 000000000..877b6cc0f --- /dev/null +++ b/grpc-ingest/config-grpc2redis.example.yml @@ -0,0 +1,71 @@ +# This file is used to configure the grpc2redis service. + +# prometheus metrics are pushed to this endpoint +prometheus: 0.0.0.0:8873 + +# gRPC server configuration (change x_token and endpoint to the correct ones) +geyser: + # endpoint of the dragonmouth stream + endpoint: https://index.rpcpool.com/ + # x-token of the dragonmouth stream + x_token: b6e163d7-388a-49fe-bec2-63a6ea1a8f6b + # transaction commitment level: finalized, confirmed, processed + commitment: finalized + # connection timeout in seconds + connection_timeout: 10 + # request timeout in seconds + timeout: 10 + +# gRPC subscription configuration (each representing a separate stream processed concurrently) +# check account and transaction filters here (https://github.com/rpcpool/yellowstone-grpc/blob/master/README.md) +subscriptions: + metadata: + stream: + # stream name + name: ACCOUNTS + # maximum length of the stream. + max_len: 100_000_000 + # maximum concurrency for processing the stream. + max_concurrency: 2 + # filter accounts by owner + filter: + accounts: + owner: + - metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s + token: + stream: + name: ACCOUNTS + max_len: 100_000_000 + max_concurrency: 5 + filter: + accounts: + owner: + - TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA + core: + stream: + name: ACCOUNTS + max_len: 100_000_000 + max_concurrency: 2 + filter: + accounts: + owner: + - CoREENxT6tW1HoK8ypY1SxRMZTcVPm7R94rH4PZNhX7d + bubblegum: + stream: + name: TRANSACTIONS + max_len: 100_000_000 + max_concurrency: 2 + # filter transactions by accounts_included + filter: + transactions: + account_include: + - BGUMAp9Gq7iTEuizy4pqaxsTyUCBK68MDfK752saRPUY + +# Redis configuration +redis: + # redis connection url + url: redis://localhost:6379 + # maximum size (in bytes) of the Redis pipeline before it is flushed. + pipeline_max_size: 1_000 + # maximum idle time (in milliseconds) before the redis pipeline is flushed + pipeline_max_idle_ms: 100_000 diff --git a/grpc-ingest/config-ingester.example.yml b/grpc-ingest/config-ingester.example.yml new file mode 100644 index 000000000..37815047e --- /dev/null +++ b/grpc-ingest/config-ingester.example.yml @@ -0,0 +1,55 @@ +# This file is used to configure the ingester service. + +# prometheus metrics are pushed to this endpoint +prometheus: 0.0.0.0:8875 + +# redis connection url +redis: redis://localhost:6379 + +# postgres configuration +postgres: + # connection url + url: postgres://solana:solana@localhost/solana + # min connections + min_connections: 10 + # max connections + max_connections: 50 + +# snapshots configuration for the ingester +snapshots: + # stream name + name: SNAPSHOTS + # maximum number of concurrent tasks for processing. + max_concurrency: 10 + # maximum number of elements to return per redis stream + batch_size: 100 + # maximum idle time (in milliseconds) before processing a batch + xack_batch_max_idle_ms: 1_000 + # buffer size for redis acknowledgment messages + xack_buffer_size: 10_000 + # maximum size of a batch for redis acknowledgment processing. + xack_batch_max_size: 500 + # maximum number of redis messages to keep to buffer. + messages_buffer_size: 200 + # redis stream consumer name must be unique per ingester process + consumer: consumer + +# accounts configuration for the ingester +accounts: + name: ACCOUNTS + max_concurrency: 10 + batch_size: 100 + xack_batch_max_idle_ms: 1_000 + xack_buffer_size: 10_000 + xack_batch_max_size: 500 + +# transactions configuration for the ingester +transactions: + name: TRANSACTIONS + +# download metadata configuration for the ingester +download_metadata: + # maximum number of attempts for downloading metadata JSON. + max_attempts: 1 + stream: + name: METADATA_JSON diff --git a/grpc-ingest/config-monitor.example.yml b/grpc-ingest/config-monitor.example.yml new file mode 100644 index 000000000..e2826a9e5 --- /dev/null +++ b/grpc-ingest/config-monitor.example.yml @@ -0,0 +1,20 @@ +# this file is used to configure the monitor service for bubblegum merkle trees. + +# prometheus metrics are pushed to this endpoint +prometheus: 0.0.0.0:8876 + +# rpc endpoint +rpc: http://127.0.0.1:8899 + +# postgres configuration +postgres: + # connection url + url: postgres://solana:solana@localhost/solana + # min connections + min_connections: 10 + # maximum allowed connections + max_connections: 50 + +# bubblegum merkle tree configuration +bubblegum: + only_trees: null diff --git a/grpc-ingest/src/config.rs b/grpc-ingest/src/config.rs new file mode 100644 index 000000000..b34ba8fe8 --- /dev/null +++ b/grpc-ingest/src/config.rs @@ -0,0 +1,386 @@ +use { + anyhow::Context, + serde::{de, Deserialize}, + std::{collections::HashMap, net::SocketAddr, path::Path, time::Duration}, + tokio::fs, + yellowstone_grpc_tools::config::{ + deserialize_usize_str, ConfigGrpcRequestAccounts, ConfigGrpcRequestCommitment, + ConfigGrpcRequestTransactions, + }, +}; + +pub const REDIS_STREAM_DATA_KEY: &str = "data"; + +pub async fn load(path: impl AsRef + Copy) -> anyhow::Result +where + T: de::DeserializeOwned, +{ + let text = fs::read_to_string(path) + .await + .context("failed to read config from file")?; + + match path.as_ref().extension().and_then(|e| e.to_str()) { + Some("yaml") | Some("yml") => { + serde_yaml::from_str(&text).context("failed to parse config from YAML file") + } + Some("json") => json5::from_str(&text).context("failed to parse config from JSON file"), + value => anyhow::bail!("unknown config extension: {value:?}"), + } +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct ConfigIngestStream { + pub name: String, + #[serde(default = "ConfigIngestStream::default_group")] + pub group: String, + #[serde(default = "ConfigIngestStream::default_consumer")] + pub consumer: String, + #[serde( + default = "ConfigIngestStream::default_xack_batch_max_size", + deserialize_with = "deserialize_usize_str" + )] + pub xack_batch_max_size: usize, + #[serde( + default = "ConfigIngestStream::default_xack_batch_max_idle", + deserialize_with = "deserialize_duration_str", + rename = "xack_batch_max_idle_ms" + )] + pub xack_batch_max_idle: Duration, + #[serde( + default = "ConfigIngestStream::default_batch_size", + deserialize_with = "deserialize_usize_str" + )] + pub batch_size: usize, + #[serde( + default = "ConfigIngestStream::default_max_concurrency", + deserialize_with = "deserialize_usize_str" + )] + pub max_concurrency: usize, + #[serde( + default = "ConfigIngestStream::default_ack_concurrency", + deserialize_with = "deserialize_usize_str" + )] + pub ack_concurrency: usize, + #[serde( + default = "ConfigIngestStream::default_xack_buffer_size", + deserialize_with = "deserialize_usize_str" + )] + pub xack_buffer_size: usize, + #[serde( + default = "ConfigIngestStream::default_message_buffer_size", + deserialize_with = "deserialize_usize_str" + )] + pub message_buffer_size: usize, +} + +impl ConfigIngestStream { + pub const fn default_xack_buffer_size() -> usize { + 1_000 + } + + pub const fn default_message_buffer_size() -> usize { + 100 + } + + pub const fn default_max_concurrency() -> usize { + 2 + } + + pub const fn default_ack_concurrency() -> usize { + 5 + } + + pub const fn default_xack_batch_max_idle() -> Duration { + Duration::from_millis(10_000) + } + + pub fn default_group() -> String { + "ingester".to_owned() + } + + pub fn default_consumer() -> String { + "consumer".to_owned() + } + + pub const fn default_xack_batch_max_size() -> usize { + 5 + } + + pub const fn default_batch_size() -> usize { + 5 + } +} + +#[derive(Debug, Clone, Deserialize, Default)] +#[serde(default)] +pub struct ConfigTopograph { + #[serde(default = "ConfigTopograph::default_num_threads")] + pub num_threads: usize, +} + +impl ConfigTopograph { + pub const fn default_num_threads() -> usize { + 5 + } +} + +#[derive(Debug, Clone, Default, Deserialize)] +#[serde(default)] +pub struct ConfigPrometheus { + pub prometheus: Option, +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct ConfigGeyser { + pub endpoint: String, + pub x_token: Option, + #[serde(default = "ConfigGeyser::default_commitment")] + pub commitment: ConfigGrpcRequestCommitment, + #[serde(default = "ConfigGeyser::default_connection_timeout")] + pub connect_timeout: u64, + #[serde(default = "ConfigGeyser::default_timeout")] + pub timeout: u64, +} + +impl ConfigGeyser { + pub const fn default_commitment() -> ConfigGrpcRequestCommitment { + ConfigGrpcRequestCommitment::Finalized + } + + pub const fn default_connection_timeout() -> u64 { + 10 + } + + pub const fn default_timeout() -> u64 { + 10 + } +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct ConfigStream { + pub name: String, + #[serde( + default = "ConfigStream::default_stream_maxlen", + deserialize_with = "deserialize_usize_str" + )] + pub max_len: usize, + #[serde( + default = "ConfigStream::default_max_concurrency", + deserialize_with = "deserialize_usize_str" + )] + pub max_concurrency: usize, +} + +impl ConfigStream { + pub const fn default_stream_maxlen() -> usize { + 10_000_000 + } + + pub const fn default_max_concurrency() -> usize { + 10 + } +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct ConfigGrpcRequestFilter { + pub accounts: Option, + pub transactions: Option, +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct ConfigSubscription { + pub stream: ConfigStream, + pub filter: ConfigGrpcRequestFilter, +} + +pub type ConfigGrpcSubscriptions = HashMap; + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct ConfigGrpc { + pub geyser: ConfigGeyser, + + pub subscriptions: ConfigGrpcSubscriptions, + + pub redis: ConfigGrpcRedis, +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct ConfigGrpcRedis { + pub url: String, + #[serde( + default = "ConfigGrpcRedis::default_pipeline_max_idle", + deserialize_with = "deserialize_duration_str" + )] + pub pipeline_max_idle: Duration, +} + +impl ConfigGrpcRedis { + pub const fn default_pipeline_max_idle() -> Duration { + Duration::from_millis(10) + } +} + +pub fn deserialize_duration_str<'de, D>(deserializer: D) -> Result +where + D: de::Deserializer<'de>, +{ + let ms = deserialize_usize_str(deserializer)?; + Ok(Duration::from_millis(ms as u64)) +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ConfigIngester { + pub redis: String, + pub postgres: ConfigPostgres, + pub download_metadata: ConfigIngesterDownloadMetadata, + pub snapshots: ConfigIngestStream, + pub accounts: ConfigIngestStream, + pub transactions: ConfigIngestStream, +} + +#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +pub enum ConfigIngesterRedisStreamType { + Account, + Transaction, + MetadataJson, + Snapshot, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ConfigPostgres { + pub url: String, + #[serde( + default = "ConfigPostgres::default_min_connections", + deserialize_with = "deserialize_usize_str" + )] + pub min_connections: usize, + #[serde( + default = "ConfigPostgres::default_max_connections", + deserialize_with = "deserialize_usize_str" + )] + pub max_connections: usize, + #[serde( + default = "ConfigPostgres::default_idle_timeout", + deserialize_with = "deserialize_duration_str" + )] + pub idle_timeout: Duration, + #[serde( + default = "ConfigPostgres::default_max_lifetime", + deserialize_with = "deserialize_duration_str" + )] + pub max_lifetime: Duration, +} + +impl ConfigPostgres { + pub const fn default_min_connections() -> usize { + 10 + } + + pub const fn default_max_connections() -> usize { + 50 + } + + pub const fn default_idle_timeout() -> Duration { + Duration::from_millis(75) + } + + pub const fn default_max_lifetime() -> Duration { + Duration::from_millis(125) + } +} + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct ConfigIngesterDownloadMetadata { + pub stream: ConfigIngestStream, + #[serde( + default = "ConfigIngesterDownloadMetadata::default_num_threads", + deserialize_with = "deserialize_usize_str" + )] + pub num_threads: usize, + #[serde( + default = "ConfigIngesterDownloadMetadata::default_max_attempts", + deserialize_with = "deserialize_usize_str" + )] + pub max_attempts: usize, + #[serde( + default = "ConfigIngesterDownloadMetadata::default_request_timeout", + deserialize_with = "deserialize_duration_str", + rename = "request_timeout_ms" + )] + pub request_timeout: Duration, + #[serde( + default = "ConfigIngesterDownloadMetadata::default_stream_maxlen", + deserialize_with = "deserialize_usize_str" + )] + pub stream_maxlen: usize, + #[serde( + default = "ConfigIngesterDownloadMetadata::default_stream_max_size", + deserialize_with = "deserialize_usize_str" + )] + pub pipeline_max_size: usize, + #[serde( + default = "ConfigIngesterDownloadMetadata::default_pipeline_max_idle", + deserialize_with = "deserialize_duration_str", + rename = "pipeline_max_idle_ms" + )] + pub pipeline_max_idle: Duration, +} + +impl ConfigIngesterDownloadMetadata { + pub const fn default_num_threads() -> usize { + 2 + } + + pub const fn default_pipeline_max_idle() -> Duration { + Duration::from_millis(10) + } + + pub const fn default_stream_max_size() -> usize { + 10 + } + + pub const fn default_stream_maxlen() -> usize { + 10_000_000 + } + + pub const fn default_max_attempts() -> usize { + 3 + } + + pub const fn default_request_timeout() -> Duration { + Duration::from_millis(3_000) + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ConfigMonitor { + pub postgres: ConfigPostgres, + pub rpc: String, + pub bubblegum: ConfigBubblegumVerify, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ConfigBubblegumVerify { + #[serde( + default = "ConfigBubblegumVerify::default_report_interval", + deserialize_with = "deserialize_duration_str" + )] + pub report_interval: Duration, + #[serde(default)] + pub only_trees: Option>, + #[serde( + default = "ConfigBubblegumVerify::default_max_concurrency", + deserialize_with = "deserialize_usize_str" + )] + pub max_concurrency: usize, +} + +impl ConfigBubblegumVerify { + pub const fn default_report_interval() -> Duration { + Duration::from_millis(5 * 60 * 1000) + } + pub const fn default_max_concurrency() -> usize { + 20 + } +} diff --git a/grpc-ingest/src/grpc.rs b/grpc-ingest/src/grpc.rs new file mode 100644 index 000000000..ed6985dfa --- /dev/null +++ b/grpc-ingest/src/grpc.rs @@ -0,0 +1,322 @@ +use { + crate::{ + config::{ConfigGrpc, ConfigGrpcRequestFilter, ConfigSubscription}, + prom::{grpc_tasks_total_dec, grpc_tasks_total_inc, redis_xadd_status_inc}, + redis::TrackedPipeline, + util::create_shutdown, + }, + anyhow::Context, + futures::{ + stream::{FuturesUnordered, StreamExt}, + SinkExt, + }, + redis::streams::StreamMaxlen, + std::{collections::HashMap, sync::Arc, time::Duration}, + tokio::{sync::Mutex, time::sleep}, + tracing::{debug, error, warn}, + yellowstone_grpc_client::GeyserGrpcClient, + yellowstone_grpc_proto::{ + geyser::{SubscribeRequest, SubscribeRequestPing, SubscribeUpdate}, + prelude::subscribe_update::UpdateOneof, + prost::Message, + }, + yellowstone_grpc_tools::config::GrpcRequestToProto, +}; + +const PING_ID: i32 = 0; + +pub async fn run(config: ConfigGrpc) -> anyhow::Result<()> { + let redis_client = redis::Client::open(config.redis.url.clone())?; + let connection = redis_client.get_multiplexed_tokio_connection().await?; + + let mut shutdown = create_shutdown()?; + + let config = Arc::new(config); + + let subscriptions = config.subscriptions.clone(); + + let mut subscription_tasks = Vec::new(); + for (label, subscription_config) in subscriptions { + let subscription = Subscription { + label, + config: subscription_config, + }; + let task = SubscriptionTask::build() + .config(Arc::clone(&config)) + .connection(connection.clone()) + .subscription(subscription) + .start() + .await?; + + subscription_tasks.push(task); + } + + if let Some(signal) = shutdown.next().await { + warn!( + target: "grpc2redis", + action = "shutdown_signal_received", + message = "Shutdown signal received, waiting for spawned tasks to complete", + signal = ?signal + ); + } + + futures::future::join_all( + subscription_tasks + .into_iter() + .map(|task| task.stop()) + .collect::>(), + ) + .await + .into_iter() + .collect::>()?; + + Ok(()) +} + +pub struct Subscription { + pub label: String, + pub config: ConfigSubscription, +} + +#[derive(Default)] +pub struct SubscriptionTask { + pub config: Arc, + pub connection: Option, + pub subscription: Option, +} + +impl SubscriptionTask { + pub fn build() -> Self { + Self::default() + } + + pub fn config(mut self, config: Arc) -> Self { + self.config = config; + self + } + + pub fn subscription(mut self, subscription: Subscription) -> Self { + self.subscription = Some(subscription); + self + } + + pub fn connection(mut self, connection: redis::aio::MultiplexedConnection) -> Self { + self.connection = Some(connection); + self + } + + pub async fn start(mut self) -> anyhow::Result { + let config = Arc::clone(&self.config); + let connection = self + .connection + .take() + .expect("Redis Connection is required"); + + let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); + let subscription = self.subscription.take().expect("Subscription is required"); + let label = subscription.label.clone(); + let subscription_config = Arc::new(subscription.config); + let connection = connection.clone(); + + let ConfigSubscription { stream, filter } = subscription_config.as_ref().clone(); + + let stream_config = Arc::new(stream.clone()); + let mut req_accounts = HashMap::with_capacity(1); + let mut req_transactions = HashMap::with_capacity(1); + + let ConfigGrpcRequestFilter { + accounts, + transactions, + } = filter; + + if let Some(accounts) = accounts { + req_accounts.insert(label.clone(), accounts.to_proto()); + } + + if let Some(transactions) = transactions { + req_transactions.insert(label.clone(), transactions.to_proto()); + } + + let request = SubscribeRequest { + accounts: req_accounts, + transactions: req_transactions, + commitment: Some(config.geyser.commitment.to_proto().into()), + ..Default::default() + }; + + let pipe = Arc::new(Mutex::new(TrackedPipeline::default())); + let mut tasks = FuturesUnordered::new(); + + let mut dragon_mouth_client = + GeyserGrpcClient::build_from_shared(config.geyser.endpoint.clone())? + .x_token(config.geyser.x_token.clone())? + .connect_timeout(Duration::from_secs(config.geyser.connect_timeout)) + .timeout(Duration::from_secs(config.geyser.timeout)) + .connect() + .await + .context("failed to connect to gRPC")?; + + let (mut subscribe_tx, stream) = dragon_mouth_client + .subscribe_with_request(Some(request)) + .await?; + + let deadline_config = Arc::clone(&config); + + let control = tokio::spawn({ + async move { + tokio::pin!(stream); + + let (flush_tx, mut flush_rx) = tokio::sync::mpsc::channel::<()>(1); + + let flush_handle = tokio::spawn({ + let pipe = Arc::clone(&pipe); + let stream_config = Arc::clone(&stream_config); + let label = label.clone(); + let mut connection = connection.clone(); + + async move { + while (flush_rx.recv().await).is_some() { + let mut pipe = pipe.lock().await; + let flush = pipe.flush(&mut connection).await; + + let status = flush.as_ref().map(|_| ()).map_err(|_| ()); + let count = flush.as_ref().unwrap_or_else(|count| count); + + debug!(target: "grpc2redis", action = "flush_redis_pipe", stream = ?stream_config.name, status = ?status, count = ?count); + redis_xadd_status_inc(&stream_config.name, &label, status, *count); + } + } + }); + + loop { + tokio::select! { + _ = sleep(deadline_config.redis.pipeline_max_idle) => { + let _ = flush_tx.send(()).await; + } + Some(Ok(msg)) = stream.next() => { + match msg.update_oneof { + Some(UpdateOneof::Account(_)) | Some(UpdateOneof::Transaction(_)) => { + if tasks.len() >= stream_config.max_concurrency { + tasks.next().await; + } + grpc_tasks_total_inc(&label, &stream_config.name); + + tasks.push(tokio::spawn({ + let pipe = Arc::clone(&pipe); + let label = label.clone(); + let stream_config = Arc::clone(&stream_config); + + async move { + let stream = stream_config.name.clone(); + let stream_maxlen = stream_config.max_len; + + let SubscribeUpdate { update_oneof, .. } = msg; + + let mut pipe = pipe.lock().await; + + if let Some(update) = update_oneof { + match update { + UpdateOneof::Account(account) => { + pipe.xadd_maxlen( + &stream.to_string(), + StreamMaxlen::Approx(stream_maxlen), + "*", + account.encode_to_vec(), + ); + debug!(target: "grpc2redis", action = "process_account_update",label = ?label, stream = ?stream, maxlen = ?stream_maxlen); + } + + UpdateOneof::Transaction(transaction) => { + pipe.xadd_maxlen( + &stream.to_string(), + StreamMaxlen::Approx(stream_maxlen), + "*", + transaction.encode_to_vec(), + ); + debug!(target: "grpc2redis", action = "process_transaction_update",label = ?label, stream = ?stream, maxlen = ?stream_maxlen); + } + _ => { + warn!(target: "grpc2redis", action = "unknown_update_variant",label = ?label, message = "Unknown update variant") + } + } + } + + grpc_tasks_total_dec(&label, &stream_config.name); + } + } + )) + } + Some(UpdateOneof::Ping(_)) => { + let ping = subscribe_tx + .send(SubscribeRequest { + ping: Some(SubscribeRequestPing { id: PING_ID }), + ..Default::default() + }) + .await; + + match ping { + Ok(_) => { + debug!(target: "grpc2redis", action = "send_ping", message = "Ping sent successfully", id = PING_ID) + } + Err(err) => { + warn!(target: "grpc2redis", action = "send_ping_failed", message = "Failed to send ping", ?err, id = PING_ID) + } + } + } + Some(UpdateOneof::Pong(pong)) => { + if pong.id == PING_ID { + debug!(target: "grpc2redis", action = "receive_pong", message = "Pong received", id = PING_ID); + } else { + warn!(target: "grpc2redis", action = "receive_unknown_pong", message = "Unknown pong id received", id = pong.id); + } + } + _ => { + warn!(target: "grpc2redis", action = "unknown_update_variant", message = "Unknown update variant", ?msg.update_oneof) + } + } + } + _ = &mut shutdown_rx => { + debug!(target: "grpc2redis", action = "shutdown_signal_received", message = "Shutdown signal received, stopping subscription task", ?label); + break; + } + } + } + + while (tasks.next().await).is_some() {} + + if let Err(err) = flush_tx.send(()).await { + error!(target: "grpc2redis", action = "flush_send_failed", message = "Failed to send flush signal", ?err); + } + + drop(flush_tx); + + if let Err(err) = flush_handle.await { + error!(target: "grpc2redis", action = "flush_failed", message = "Failed to flush", ?err); + } + } + }); + + Ok(SubscriptionTaskStop { + shutdown_tx, + control, + }) + } +} + +#[derive(Debug)] +pub struct SubscriptionTaskStop { + pub shutdown_tx: tokio::sync::oneshot::Sender<()>, + pub control: tokio::task::JoinHandle<()>, +} + +impl SubscriptionTaskStop { + pub async fn stop(self) -> anyhow::Result<()> { + self.shutdown_tx + .send(()) + .map_err(|_| anyhow::anyhow!("Failed to send shutdown signal"))?; + + self.control.await?; + + Ok(()) + } +} diff --git a/grpc-ingest/src/ingester.rs b/grpc-ingest/src/ingester.rs new file mode 100644 index 000000000..710415d5f --- /dev/null +++ b/grpc-ingest/src/ingester.rs @@ -0,0 +1,145 @@ +use { + crate::{ + config::{ConfigIngester, REDIS_STREAM_DATA_KEY}, + postgres::{create_pool as pg_create_pool, report_pgpool}, + prom::redis_xadd_status_inc, + redis::{AccountHandle, DownloadMetadataJsonHandle, IngestStream, TransactionHandle}, + util::create_shutdown, + }, + das_core::{DownloadMetadata, DownloadMetadataInfo, DownloadMetadataNotifier}, + futures::{future::BoxFuture, stream::StreamExt}, + program_transformers::ProgramTransformer, + redis::aio::MultiplexedConnection, + std::sync::Arc, + tokio::time::{sleep, Duration}, + tracing::warn, +}; + +fn download_metadata_notifier_v2( + connection: MultiplexedConnection, + stream: String, + stream_maxlen: usize, +) -> anyhow::Result { + Ok( + Box::new( + move |info: DownloadMetadataInfo| -> BoxFuture< + 'static, + Result<(), Box>, + > { + let mut connection = connection.clone(); + let stream = stream.clone(); + Box::pin(async move { + + let info_bytes = serde_json::to_vec(&info)?; + + let xadd = redis::cmd("XADD") + .arg(&stream) + .arg("MAXLEN") + .arg("~") + .arg(stream_maxlen) + .arg("*") + .arg(REDIS_STREAM_DATA_KEY) + .arg(info_bytes) + .query_async::<_, redis::Value>(&mut connection) + .await; + + let status = xadd.map(|_| ()).map_err(|_| ()); + + redis_xadd_status_inc(&stream, "metadata_notifier",status, 1); + + Ok(()) + }) + }, + ), + ) +} + +pub async fn run(config: ConfigIngester) -> anyhow::Result<()> { + let redis_client = redis::Client::open(config.redis)?; + let connection = redis_client.get_multiplexed_tokio_connection().await?; + let pool = pg_create_pool(config.postgres).await?; + + let download_metadata_stream = config.download_metadata.stream.clone(); + let download_metadata_stream_maxlen = config.download_metadata.stream_maxlen; + + let download_metadata_notifier = download_metadata_notifier_v2( + connection.clone(), + download_metadata_stream.name.clone(), + download_metadata_stream_maxlen, + )?; + + let program_transformer = Arc::new(ProgramTransformer::new( + pool.clone(), + download_metadata_notifier, + )); + let http_client = reqwest::Client::builder() + .timeout(config.download_metadata.request_timeout) + .build()?; + + let download_metadata = Arc::new(DownloadMetadata::new(http_client, pool.clone())); + let download_metadatas = IngestStream::build() + .config(config.download_metadata.stream.clone()) + .connection(connection.clone()) + .handler(DownloadMetadataJsonHandle::new(Arc::clone( + &download_metadata, + ))) + .start() + .await?; + + let accounts = IngestStream::build() + .config(config.accounts) + .connection(connection.clone()) + .handler(AccountHandle::new(Arc::clone(&program_transformer))) + .start() + .await?; + + let transactions = IngestStream::build() + .config(config.transactions) + .connection(connection.clone()) + .handler(TransactionHandle::new(Arc::clone(&program_transformer))) + .start() + .await?; + + let snapshots = IngestStream::build() + .config(config.snapshots) + .connection(connection.clone()) + .handler(AccountHandle::new(Arc::clone(&program_transformer))) + .start() + .await?; + + let mut shutdown = create_shutdown()?; + + let report_pool = pool.clone(); + let report = tokio::spawn(async move { + let pool = report_pool.clone(); + loop { + sleep(Duration::from_millis(100)).await; + report_pgpool(pool.clone()); + } + }); + + if let Some(signal) = shutdown.next().await { + warn!( + target: "ingester", + action = "shutdown_signal_received", + message = "Shutdown signal received, waiting for spawned tasks to complete", + signal = ?signal + ); + } + + futures::future::join_all(vec![ + accounts.stop(), + transactions.stop(), + snapshots.stop(), + download_metadatas.stop(), + ]) + .await + .into_iter() + .collect::>()?; + + report.abort(); + + pool.close().await; + + Ok::<(), anyhow::Error>(()) +} diff --git a/grpc-ingest/src/main.rs b/grpc-ingest/src/main.rs new file mode 100644 index 000000000..99ad667be --- /dev/null +++ b/grpc-ingest/src/main.rs @@ -0,0 +1,87 @@ +use { + crate::{ + config::{load as config_load, ConfigGrpc, ConfigIngester, ConfigPrometheus}, + prom::run_server as prometheus_run_server, + tracing::init as tracing_init, + }, + anyhow::Context, + clap::{Parser, Subcommand}, + config::ConfigMonitor, + std::net::SocketAddr, +}; + +mod config; +mod grpc; +mod ingester; +mod monitor; +mod postgres; +mod prom; +mod redis; +mod tracing; +mod util; +mod version; + +#[derive(Debug, Parser)] +#[clap(author, version)] +struct Args { + /// Path to config file + #[clap(short, long)] + config: String, + + /// Prometheus listen address + #[clap(long)] + prometheus: Option, + + #[command(subcommand)] + action: ArgsAction, +} + +#[derive(Debug, Clone, Subcommand)] +enum ArgsAction { + /// Subscribe on Geyser events using gRPC and send them to Redis + #[command(name = "grpc2redis")] + Grpc, + /// Run ingester process (process events from Redis) + #[command(name = "ingester")] + Ingester, + #[command(name = "monitor")] + Monitor, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_init()?; + + let args = Args::parse(); + + // Run prometheus server + let config = config_load::(&args.config) + .await + .with_context(|| format!("failed to parse prometheus config from: {}", args.config))?; + if let Some(address) = args.prometheus.or(config.prometheus) { + prometheus_run_server(address)?; + } + + // Run grpc / ingester / download-metadata + match args.action { + ArgsAction::Grpc => { + let config = config_load::(&args.config) + .await + .with_context(|| format!("failed to parse config from: {}", args.config))?; + grpc::run(config).await + } + ArgsAction::Ingester => { + let config = config_load::(&args.config) + .await + .with_context(|| format!("failed to parse config from: {}", args.config))?; + ingester::run(config).await + } + ArgsAction::Monitor => { + let config = config_load::(&args.config) + .await + .with_context(|| format!("failed to parse config from: {}", args.config))?; + + monitor::run(config).await + } + } +} diff --git a/grpc-ingest/src/monitor.rs b/grpc-ingest/src/monitor.rs new file mode 100644 index 000000000..7c7e277f8 --- /dev/null +++ b/grpc-ingest/src/monitor.rs @@ -0,0 +1,50 @@ +use crate::postgres::create_pool; +use crate::util::create_shutdown; +use crate::{config::ConfigMonitor, prom::update_tree_proof_report}; +use das_bubblegum::{verify_bubblegum, BubblegumContext, VerifyArgs}; +use das_core::{Rpc, SolanaRpcArgs}; +use futures::stream::StreamExt; +use tracing::{error, info}; + +pub async fn run(config: ConfigMonitor) -> anyhow::Result<()> { + let mut shutdown = create_shutdown()?; + let database_pool = create_pool(config.postgres).await?; + let rpc = Rpc::from_config(&SolanaRpcArgs { + solana_rpc_url: config.rpc, + }); + + let bubblegum_verify = tokio::spawn(async move { + loop { + let bubblegum_context = BubblegumContext::new(database_pool.clone(), rpc.clone()); + let verify_args = VerifyArgs { + only_trees: config.bubblegum.only_trees.clone(), + max_concurrency: config.bubblegum.max_concurrency, + }; + + match verify_bubblegum(bubblegum_context, verify_args).await { + Ok(mut reports_receiver) => { + while let Some(report) = reports_receiver.recv().await { + info!( + report = ?report, + ); + update_tree_proof_report(&report); + } + + tokio::time::sleep(tokio::time::Duration::from_secs(600)).await; + } + Err(e) => { + error!( + message = "Error proof report recv", + error = ?e + ); + } + } + } + }); + + if let Some(_signal) = shutdown.next().await {} + + bubblegum_verify.abort(); + + Ok(()) +} diff --git a/grpc-ingest/src/postgres.rs b/grpc-ingest/src/postgres.rs new file mode 100644 index 000000000..d38c5250c --- /dev/null +++ b/grpc-ingest/src/postgres.rs @@ -0,0 +1,27 @@ +use { + crate::{ + config::ConfigPostgres, + prom::{pgpool_connections_set, PgpoolConnectionsKind}, + }, + sqlx::{ + postgres::{PgConnectOptions, PgPoolOptions}, + PgPool, + }, +}; + +pub async fn create_pool(config: ConfigPostgres) -> anyhow::Result { + let options: PgConnectOptions = config.url.parse()?; + PgPoolOptions::new() + .min_connections(config.min_connections.try_into()?) + .max_connections(config.max_connections.try_into()?) + .idle_timeout(config.idle_timeout) + .max_lifetime(config.max_lifetime) + .connect_with(options) + .await + .map_err(Into::into) +} + +pub fn report_pgpool(pgpool: PgPool) { + pgpool_connections_set(PgpoolConnectionsKind::Total, pgpool.size() as usize); + pgpool_connections_set(PgpoolConnectionsKind::Idle, pgpool.num_idle()); +} diff --git a/grpc-ingest/src/prom.rs b/grpc-ingest/src/prom.rs new file mode 100644 index 000000000..1bb3a125a --- /dev/null +++ b/grpc-ingest/src/prom.rs @@ -0,0 +1,395 @@ +use { + crate::{redis::RedisStreamMessageError, version::VERSION as VERSION_INFO}, + das_bubblegum::ProofReport, + das_core::MetadataJsonTaskError, + hyper::{ + server::conn::AddrStream, + service::{make_service_fn, service_fn}, + Body, Request, Response, Server, StatusCode, + }, + program_transformers::error::ProgramTransformerError, + prometheus::{ + HistogramOpts, HistogramVec, IntCounter, IntCounterVec, IntGaugeVec, Opts, Registry, + TextEncoder, + }, + std::{net::SocketAddr, sync::Once}, + tracing::{error, info}, +}; + +lazy_static::lazy_static! { + static ref REGISTRY: Registry = Registry::new(); + + static ref VERSION_INFO_METRIC: IntCounterVec = IntCounterVec::new( + Opts::new("version_info", "Plugin version info"), + &["buildts", "git", "package", "proto", "rustc", "solana", "version"] + ).unwrap(); + + static ref REDIS_STREAM_LENGTH: IntGaugeVec = IntGaugeVec::new( + Opts::new("redis_stream_length", "Length of stream in Redis"), + &["stream"] + ).unwrap(); + + static ref REDIS_XADD_STATUS_COUNT: IntCounterVec = IntCounterVec::new( + Opts::new("redis_xadd_status_count", "Status of messages sent to Redis stream"), + &["stream", "label", "status"] + ).unwrap(); + + static ref REDIS_XREAD_COUNT: IntCounterVec = IntCounterVec::new( + Opts::new("redis_xread_count", "Count of messages seen"), + &["stream", "consumer"] + ).unwrap(); + + static ref REDIS_XACK_COUNT: IntCounterVec = IntCounterVec::new( + Opts::new("redis_xack_count", "Total number of processed messages"), + &["stream", "consumer"] + ).unwrap(); + + static ref PGPOOL_CONNECTIONS: IntGaugeVec = IntGaugeVec::new( + Opts::new("pgpool_connections", "Total number of connections in Postgres Pool"), + &["kind"] + ).unwrap(); + + static ref PROGRAM_TRANSFORMER_TASK_STATUS_COUNT: IntCounterVec = IntCounterVec::new( + Opts::new("program_transformer_task_status_count", "Status of processed messages"), + &["stream", "consumer", "status"], + ).unwrap(); + + static ref INGEST_JOB_TIME: HistogramVec = HistogramVec::new( + HistogramOpts::new("ingest_job_time", "Time taken for ingest jobs"), + &["stream", "consumer"] + ).unwrap(); + + static ref DOWNLOAD_METADATA_INSERTED_COUNT: IntCounter = IntCounter::new( + "download_metadata_inserted_count", "Total number of inserted tasks for download metadata" + ).unwrap(); + + static ref INGEST_TASKS: IntGaugeVec = IntGaugeVec::new( + Opts::new("ingest_tasks", "Number of tasks spawned for ingest"), + &["stream", "consumer"] + ).unwrap(); + + static ref ACK_TASKS: IntGaugeVec = IntGaugeVec::new( + Opts::new("ack_tasks", "Number of tasks spawned for ack redis messages"), + &["stream", "consumer"] + ).unwrap(); + + static ref GRPC_TASKS: IntGaugeVec = IntGaugeVec::new( + Opts::new("grpc_tasks", "Number of tasks spawned for writing grpc messages to redis "), + &["label","stream"] + ).unwrap(); + + static ref BUBBLEGUM_TREE_TOTAL_LEAVES: IntGaugeVec = IntGaugeVec::new( + Opts::new("bubblegum_tree_total_leaves", "Total number of leaves in the bubblegum tree"), + &["tree"] + ).unwrap(); + + static ref BUBBLEGUM_TREE_INCORRECT_PROOFS: IntGaugeVec = IntGaugeVec::new( + Opts::new("bubblegum_tree_incorrect_proofs", "Number of incorrect proofs in the bubblegum tree"), + &["tree"] + ).unwrap(); + + static ref BUBBLEGUM_TREE_NOT_FOUND_PROOFS: IntGaugeVec = IntGaugeVec::new( + Opts::new("bubblegum_tree_not_found_proofs", "Number of not found proofs in the bubblegum tree"), + &["tree"] + ).unwrap(); + + static ref BUBBLEGUM_TREE_CORRECT_PROOFS: IntGaugeVec = IntGaugeVec::new( + Opts::new("bubblegum_tree_correct_proofs", "Number of correct proofs in the bubblegum tree"), + &["tree"] + ).unwrap(); + + static ref BUBBLEGUM_TREE_CORRUPT_PROOFS: IntGaugeVec = IntGaugeVec::new( + Opts::new("bubblegum_tree_corrupt_proofs", "Number of corrupt proofs in the bubblegum tree"), + &["tree"] + ).unwrap(); +} + +pub fn run_server(address: SocketAddr) -> anyhow::Result<()> { + static REGISTER: Once = Once::new(); + REGISTER.call_once(|| { + macro_rules! register { + ($collector:ident) => { + REGISTRY + .register(Box::new($collector.clone())) + .expect("collector can't be registered"); + }; + } + + register!(VERSION_INFO_METRIC); + register!(REDIS_STREAM_LENGTH); + register!(REDIS_XADD_STATUS_COUNT); + register!(REDIS_XREAD_COUNT); + register!(REDIS_XACK_COUNT); + register!(PGPOOL_CONNECTIONS); + register!(PROGRAM_TRANSFORMER_TASK_STATUS_COUNT); + register!(INGEST_JOB_TIME); + register!(DOWNLOAD_METADATA_INSERTED_COUNT); + register!(INGEST_TASKS); + register!(ACK_TASKS); + register!(GRPC_TASKS); + register!(BUBBLEGUM_TREE_TOTAL_LEAVES); + register!(BUBBLEGUM_TREE_INCORRECT_PROOFS); + register!(BUBBLEGUM_TREE_NOT_FOUND_PROOFS); + register!(BUBBLEGUM_TREE_CORRECT_PROOFS); + register!(BUBBLEGUM_TREE_CORRUPT_PROOFS); + + VERSION_INFO_METRIC + .with_label_values(&[ + VERSION_INFO.buildts, + VERSION_INFO.git, + VERSION_INFO.package, + VERSION_INFO.proto, + VERSION_INFO.rustc, + VERSION_INFO.solana, + VERSION_INFO.version, + ]) + .inc(); + }); + + let make_service = make_service_fn(move |_: &AddrStream| async move { + Ok::<_, hyper::Error>(service_fn(move |req: Request| async move { + let response = match req.uri().path() { + "/metrics" => metrics_handler(), + _ => not_found_handler(), + }; + Ok::<_, hyper::Error>(response) + })) + }); + + let server = Server::try_bind(&address)?.serve(make_service); + info!("prometheus server started: http://{address:?}/metrics"); + + tokio::spawn(async move { + if let Err(error) = server.await { + error!("prometheus server failed: {error:?}"); + } + }); + + Ok(()) +} + +fn metrics_handler() -> Response { + let metrics = TextEncoder::new() + .encode_to_string(®ISTRY.gather()) + .unwrap_or_else(|error| { + error!("could not encode custom metrics: {}", error); + String::new() + }); + Response::builder().body(Body::from(metrics)).unwrap() +} + +fn not_found_handler() -> Response { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::empty()) + .unwrap() +} + +pub fn redis_xlen_set(stream: &str, len: usize) { + REDIS_STREAM_LENGTH + .with_label_values(&[stream]) + .set(len as i64); +} + +pub fn ingest_job_time_set(stream: &str, consumer: &str, value: f64) { + INGEST_JOB_TIME + .with_label_values(&[stream, consumer]) + .observe(value); +} + +pub fn redis_xadd_status_inc(stream: &str, label: &str, status: Result<(), ()>, delta: usize) { + REDIS_XADD_STATUS_COUNT + .with_label_values(&[ + stream, + label, + if status.is_ok() { "success" } else { "failed" }, + ]) + .inc_by(delta as u64); +} + +pub fn redis_xread_inc(stream: &str, consumer: &str, delta: usize) { + REDIS_XREAD_COUNT + .with_label_values(&[stream, consumer]) + .inc_by(delta as u64) +} + +pub fn redis_xack_inc(stream: &str, consumer: &str, delta: usize) { + REDIS_XACK_COUNT + .with_label_values(&[stream, consumer]) + .inc_by(delta as u64) +} + +#[derive(Debug, Clone, Copy)] +pub enum PgpoolConnectionsKind { + Total, + Idle, +} + +pub fn pgpool_connections_set(kind: PgpoolConnectionsKind, size: usize) { + PGPOOL_CONNECTIONS + .with_label_values(&[match kind { + PgpoolConnectionsKind::Total => "total", + PgpoolConnectionsKind::Idle => "idle", + }]) + .set(size as i64) +} + +pub fn ingest_tasks_total_inc(stream: &str, consumer: &str) { + INGEST_TASKS.with_label_values(&[stream, consumer]).inc() +} + +pub fn ingest_tasks_total_dec(stream: &str, consumer: &str) { + INGEST_TASKS.with_label_values(&[stream, consumer]).dec() +} + +pub fn ack_tasks_total_inc(stream: &str, consumer: &str) { + ACK_TASKS.with_label_values(&[stream, consumer]).inc() +} + +pub fn ack_tasks_total_dec(stream: &str, consumer: &str) { + ACK_TASKS.with_label_values(&[stream, consumer]).dec() +} + +pub fn grpc_tasks_total_inc(label: &str, stream: &str) { + GRPC_TASKS.with_label_values(&[label, stream]).inc() +} + +pub fn grpc_tasks_total_dec(label: &str, stream: &str) { + GRPC_TASKS.with_label_values(&[label, stream]).dec() +} + +#[derive(Debug, Clone, Copy)] +pub enum ProgramTransformerTaskStatusKind { + Success, + NotImplemented, + DeserializationError, + ParsingError, + ChangeLogEventMalformed, + StorageWriteError, + SerializatonError, + DatabaseError, + AssetIndexError, + DownloadMetadataNotify, + DownloadMetadataSeaOrmError, + DownloadMetadataFetchError, + DownloadMetadataAssetNotFound, + RedisMessageDeserializeError, +} + +impl From for ProgramTransformerTaskStatusKind { + fn from(error: ProgramTransformerError) -> Self { + match error { + ProgramTransformerError::ChangeLogEventMalformed => { + ProgramTransformerTaskStatusKind::ChangeLogEventMalformed + } + ProgramTransformerError::StorageWriteError(_) => { + ProgramTransformerTaskStatusKind::StorageWriteError + } + ProgramTransformerError::NotImplemented => { + ProgramTransformerTaskStatusKind::NotImplemented + } + ProgramTransformerError::DeserializationError(_) => { + ProgramTransformerTaskStatusKind::DeserializationError + } + ProgramTransformerError::SerializatonError(_) => { + ProgramTransformerTaskStatusKind::SerializatonError + } + ProgramTransformerError::ParsingError(_) => { + ProgramTransformerTaskStatusKind::ParsingError + } + ProgramTransformerError::DatabaseError(_) => { + ProgramTransformerTaskStatusKind::DatabaseError + } + ProgramTransformerError::AssetIndexError(_) => { + ProgramTransformerTaskStatusKind::AssetIndexError + } + ProgramTransformerError::DownloadMetadataNotify(_) => { + ProgramTransformerTaskStatusKind::DownloadMetadataNotify + } + } + } +} + +impl From for ProgramTransformerTaskStatusKind { + fn from(error: MetadataJsonTaskError) -> Self { + match error { + MetadataJsonTaskError::SeaOrm(_) => { + ProgramTransformerTaskStatusKind::DownloadMetadataSeaOrmError + } + MetadataJsonTaskError::Fetch(_) => { + ProgramTransformerTaskStatusKind::DownloadMetadataFetchError + } + MetadataJsonTaskError::AssetNotFound => { + ProgramTransformerTaskStatusKind::DownloadMetadataAssetNotFound + } + } + } +} + +impl From for ProgramTransformerTaskStatusKind { + fn from(_: RedisStreamMessageError) -> Self { + ProgramTransformerTaskStatusKind::RedisMessageDeserializeError + } +} +impl ProgramTransformerTaskStatusKind { + pub const fn to_str(self) -> &'static str { + match self { + ProgramTransformerTaskStatusKind::Success => "success", + ProgramTransformerTaskStatusKind::NotImplemented => "not_implemented", + ProgramTransformerTaskStatusKind::DeserializationError => "deserialization_error", + ProgramTransformerTaskStatusKind::ParsingError => "parsing_error", + ProgramTransformerTaskStatusKind::ChangeLogEventMalformed => { + "changelog_event_malformed" + } + ProgramTransformerTaskStatusKind::StorageWriteError => "storage_write_error", + ProgramTransformerTaskStatusKind::SerializatonError => "serialization_error", + ProgramTransformerTaskStatusKind::DatabaseError => "database_error", + ProgramTransformerTaskStatusKind::AssetIndexError => "asset_index_error", + ProgramTransformerTaskStatusKind::DownloadMetadataNotify => "download_metadata_notify", + ProgramTransformerTaskStatusKind::DownloadMetadataSeaOrmError => { + "download_metadata_sea_orm_error" + } + ProgramTransformerTaskStatusKind::DownloadMetadataFetchError => { + "download_metadata_fetch_error" + } + ProgramTransformerTaskStatusKind::DownloadMetadataAssetNotFound => { + "download_metadata_asset_not_found" + } + ProgramTransformerTaskStatusKind::RedisMessageDeserializeError => { + "redis_message_deserialize_error" + } + } + } +} + +pub fn program_transformer_task_status_inc( + stream: &str, + consumer: &str, + kind: ProgramTransformerTaskStatusKind, +) { + PROGRAM_TRANSFORMER_TASK_STATUS_COUNT + .with_label_values(&[stream, consumer, kind.to_str()]) + .inc() +} + +pub fn update_tree_proof_report(report: &ProofReport) { + BUBBLEGUM_TREE_TOTAL_LEAVES + .with_label_values(&[&report.tree_pubkey.to_string()]) + .set(report.total_leaves as i64); + + BUBBLEGUM_TREE_INCORRECT_PROOFS + .with_label_values(&[&report.tree_pubkey.to_string()]) + .set(report.incorrect_proofs as i64); + + BUBBLEGUM_TREE_NOT_FOUND_PROOFS + .with_label_values(&[&report.tree_pubkey.to_string()]) + .set(report.not_found_proofs as i64); + + BUBBLEGUM_TREE_CORRECT_PROOFS + .with_label_values(&[&report.tree_pubkey.to_string()]) + .set(report.correct_proofs as i64); + + BUBBLEGUM_TREE_CORRUPT_PROOFS + .with_label_values(&[&report.tree_pubkey.to_string()]) + .set(report.corrupt_proofs as i64); +} diff --git a/grpc-ingest/src/redis.rs b/grpc-ingest/src/redis.rs new file mode 100644 index 000000000..9da08f0ab --- /dev/null +++ b/grpc-ingest/src/redis.rs @@ -0,0 +1,731 @@ +use { + crate::{ + config::{ConfigIngestStream, REDIS_STREAM_DATA_KEY}, + prom::{ + ack_tasks_total_dec, ack_tasks_total_inc, ingest_job_time_set, ingest_tasks_total_dec, + ingest_tasks_total_inc, program_transformer_task_status_inc, redis_xack_inc, + redis_xlen_set, redis_xread_inc, ProgramTransformerTaskStatusKind, + }, + }, + das_core::{DownloadMetadata, DownloadMetadataInfo}, + futures::{future::BoxFuture, stream::FuturesUnordered, StreamExt}, + program_transformers::{AccountInfo, ProgramTransformer, TransactionInfo}, + redis::{ + aio::MultiplexedConnection, + streams::{StreamId, StreamKey, StreamMaxlen, StreamReadOptions, StreamReadReply}, + AsyncCommands, ErrorKind as RedisErrorKind, RedisResult, Value as RedisValue, + }, + solana_sdk::{pubkey::Pubkey, signature::Signature}, + std::{collections::HashMap, marker::PhantomData, sync::Arc}, + tokio::time::{sleep, Duration}, + tracing::{debug, error, warn}, + yellowstone_grpc_proto::{ + convert_from::{ + create_message_instructions, create_meta_inner_instructions, create_pubkey_vec, + }, + prelude::{SubscribeUpdateAccount, SubscribeUpdateTransaction}, + prost::Message, + }, +}; + +#[derive(thiserror::Error, Debug)] +pub enum RedisStreamMessageError { + #[error("failed to get data (key: {0}) from stream")] + MissingData(String), + #[error("invalid data (key: {0}) from stream")] + InvalidData(String), + #[error("failed to decode message")] + Decode(#[from] yellowstone_grpc_proto::prost::DecodeError), + #[error("received invalid SubscribeUpdateAccount")] + InvalidSubscribeUpdateAccount, + #[error("failed to convert pubkey")] + PubkeyConversion(#[from] std::array::TryFromSliceError), + #[error("JSON deserialization error: {0}")] + JsonDeserialization(#[from] serde_json::Error), +} + +pub trait RedisStreamMessage { + fn try_parse_msg(msg: HashMap) -> Result; + + fn get_data_as_vec( + msg: &HashMap, + ) -> Result<&Vec, RedisStreamMessageError> { + let data = msg.get(REDIS_STREAM_DATA_KEY).ok_or_else(|| { + RedisStreamMessageError::MissingData(REDIS_STREAM_DATA_KEY.to_string()) + })?; + + match data { + RedisValue::Data(data) => Ok(data), + _ => Err(RedisStreamMessageError::InvalidData( + REDIS_STREAM_DATA_KEY.to_string(), + )), + } + } +} + +impl RedisStreamMessage for AccountInfo { + fn try_parse_msg(msg: HashMap) -> Result { + let account_data = Self::get_data_as_vec(&msg)?; + + let SubscribeUpdateAccount { account, slot, .. } = Message::decode(account_data.as_ref())?; + + let account = + account.ok_or_else(|| RedisStreamMessageError::InvalidSubscribeUpdateAccount)?; + + Ok(Self { + slot, + pubkey: Pubkey::try_from(account.pubkey.as_slice())?, + owner: Pubkey::try_from(account.owner.as_slice())?, + data: account.data, + }) + } +} + +impl RedisStreamMessage for TransactionInfo { + fn try_parse_msg(msg: HashMap) -> Result { + let transaction_data = Self::get_data_as_vec(&msg)?; + + let SubscribeUpdateTransaction { transaction, slot } = + Message::decode(transaction_data.as_ref())?; + + let transaction = transaction.ok_or_else(|| { + RedisStreamMessageError::InvalidData( + "received invalid SubscribeUpdateTransaction".to_string(), + ) + })?; + let tx = transaction.transaction.ok_or_else(|| { + RedisStreamMessageError::InvalidData( + "received invalid transaction in SubscribeUpdateTransaction".to_string(), + ) + })?; + let message = tx.message.ok_or_else(|| { + RedisStreamMessageError::InvalidData( + "received invalid message in SubscribeUpdateTransaction".to_string(), + ) + })?; + let meta = transaction.meta.ok_or_else(|| { + RedisStreamMessageError::InvalidData( + "received invalid meta in SubscribeUpdateTransaction".to_string(), + ) + })?; + + let mut account_keys = create_pubkey_vec(message.account_keys).map_err(|e| { + RedisStreamMessageError::Decode(yellowstone_grpc_proto::prost::DecodeError::new(e)) + })?; + for pubkey in create_pubkey_vec(meta.loaded_writable_addresses).map_err(|e| { + RedisStreamMessageError::Decode(yellowstone_grpc_proto::prost::DecodeError::new(e)) + })? { + account_keys.push(pubkey); + } + for pubkey in create_pubkey_vec(meta.loaded_readonly_addresses).map_err(|e| { + RedisStreamMessageError::Decode(yellowstone_grpc_proto::prost::DecodeError::new(e)) + })? { + account_keys.push(pubkey); + } + + Ok(Self { + slot, + signature: Signature::try_from(transaction.signature.as_slice())?, + account_keys, + message_instructions: create_message_instructions(message.instructions).map_err( + |e| { + RedisStreamMessageError::Decode( + yellowstone_grpc_proto::prost::DecodeError::new(e), + ) + }, + )?, + meta_inner_instructions: create_meta_inner_instructions(meta.inner_instructions) + .map_err(|e| { + RedisStreamMessageError::Decode( + yellowstone_grpc_proto::prost::DecodeError::new(e), + ) + })?, + }) + } +} + +impl RedisStreamMessage for DownloadMetadataInfo { + fn try_parse_msg(msg: HashMap) -> Result { + let metadata_data = Self::get_data_as_vec(&msg)?; + + let info: DownloadMetadataInfo = serde_json::from_slice(metadata_data.as_ref())?; + + Ok(info) + } +} + +#[derive(thiserror::Error, Debug)] +pub enum IngestMessageError { + #[error("Redis stream message parse error: {0}")] + RedisStreamMessage(#[from] RedisStreamMessageError), + #[error("Program transformer error: {0}")] + ProgramTransformer(#[from] program_transformers::error::ProgramTransformerError), + #[error("Download metadata JSON task error: {0}")] + DownloadMetadataJson(#[from] das_core::MetadataJsonTaskError), +} + +pub struct IngestStreamStop { + shutdown_tx: tokio::sync::oneshot::Sender<()>, + control: tokio::task::JoinHandle<()>, +} + +impl IngestStreamStop { + pub async fn stop(self) -> anyhow::Result<()> { + self.shutdown_tx + .send(()) + .map_err(|_| anyhow::anyhow!("Failed to send shutdown signal"))?; + + self.control.await?; + + Ok(()) + } +} + +pub trait MessageHandler: Send + Sync + Clone + 'static { + fn handle( + &self, + input: HashMap, + ) -> BoxFuture<'static, Result<(), IngestMessageError>>; +} + +pub struct DownloadMetadataJsonHandle(Arc); + +impl MessageHandler for DownloadMetadataJsonHandle { + fn handle( + &self, + input: HashMap, + ) -> BoxFuture<'static, Result<(), IngestMessageError>> { + let download_metadata = Arc::clone(&self.0); + + Box::pin(async move { + let info = DownloadMetadataInfo::try_parse_msg(input)?; + download_metadata + .handle_download(&info) + .await + .map_err(Into::into) + }) + } +} + +impl DownloadMetadataJsonHandle { + pub fn new(download_metadata: Arc) -> Self { + Self(download_metadata) + } +} + +impl Clone for DownloadMetadataJsonHandle { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } +} + +pub struct AccountHandle(Arc); + +impl AccountHandle { + pub fn new(program_transformer: Arc) -> Self { + Self(program_transformer) + } +} + +impl MessageHandler for AccountHandle { + fn handle( + &self, + input: HashMap, + ) -> BoxFuture<'static, Result<(), IngestMessageError>> { + let program_transformer = Arc::clone(&self.0); + Box::pin(async move { + let account = AccountInfo::try_parse_msg(input)?; + program_transformer + .handle_account_update(&account) + .await + .map_err(IngestMessageError::ProgramTransformer) + }) + } +} + +impl Clone for AccountHandle { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } +} + +pub struct TransactionHandle(Arc); + +impl TransactionHandle { + pub fn new(program_transformer: Arc) -> Self { + Self(program_transformer) + } +} + +impl MessageHandler for TransactionHandle { + fn handle( + &self, + input: HashMap, + ) -> BoxFuture<'static, Result<(), IngestMessageError>> { + let program_transformer = Arc::clone(&self.0); + + Box::pin(async move { + let transaction = TransactionInfo::try_parse_msg(input)?; + program_transformer + .handle_transaction(&transaction) + .await + .map_err(IngestMessageError::ProgramTransformer) + }) + } +} + +impl Clone for TransactionHandle { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } +} + +#[derive(Clone)] +pub struct Acknowledge { + config: Arc, + connection: MultiplexedConnection, +} + +impl Acknowledge { + pub fn new(config: Arc, connection: MultiplexedConnection) -> Self { + Self { config, connection } + } +} + +impl Acknowledge { + async fn handle(&self, ids: Vec) { + let mut connection = self.connection.clone(); + let config = &self.config; + + let count = ids.len(); + + match redis::pipe() + .atomic() + .xack(&config.name, &config.group, &ids) + .xdel(&config.name, &ids) + .query_async::<_, redis::Value>(&mut connection) + .await + { + Ok(response) => { + debug!( + target: "acknowledge_handler", + "action=acknowledge_and_delete stream={} response={:?} expected={:?}", + config.name, response, count + ); + + redis_xack_inc(&config.name, &config.consumer, count); + } + Err(e) => { + error!( + target: "acknowledge_handler", + "action=acknowledge_and_delete_failed stream={} error={:?}", + config.name, e + ); + } + } + + ack_tasks_total_dec(&config.name, &config.consumer); + } +} + +pub struct IngestStream { + config: Arc, + connection: Option, + handler: Option, + _handler: PhantomData, +} + +impl IngestStream { + pub fn build() -> Self { + Self { + config: Arc::new(ConfigIngestStream::default()), + connection: None, + handler: None, + _handler: PhantomData, + } + } + + pub fn handler(mut self, handler: H) -> Self { + self.handler = Some(handler); + self + } + + pub fn config(mut self, config: ConfigIngestStream) -> Self { + self.config = Arc::new(config); + self + } + + pub fn connection(mut self, connection: MultiplexedConnection) -> Self { + self.connection = Some(connection); + self + } + + async fn read(&self, connection: &mut MultiplexedConnection) -> RedisResult { + let config = &self.config; + + let opts = StreamReadOptions::default() + .group(&config.group, &config.consumer) + .count(config.batch_size) + .block(250); + + connection + .xread_options(&[&config.name], &[">"], &opts) + .await + } + + pub async fn start(mut self) -> anyhow::Result { + let config = Arc::clone(&self.config); + let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); + + let mut connection = self.connection.take().expect("Connection is required"); + let handler = self.handler.take().expect("Handler is required"); + + xgroup_create(&mut connection, &config.name, &config.group).await?; + + xgroup_delete_consumer( + &mut connection, + &config.name, + &config.group, + &config.consumer, + ) + .await?; + + xgroup_create_consumer( + &mut connection, + &config.name, + &config.group, + &config.consumer, + ) + .await?; + + let (ack_tx, mut ack_rx) = tokio::sync::mpsc::channel::(config.xack_buffer_size); + let (ack_shutdown_tx, mut ack_shutdown_rx) = tokio::sync::oneshot::channel::<()>(); + + let (msg_tx, mut msg_rx) = + tokio::sync::mpsc::channel::>(config.message_buffer_size); + let (msg_shutdown_tx, mut msg_shutdown_rx) = tokio::sync::oneshot::channel::<()>(); + + let config_messages = Arc::clone(&config); + + let messages = tokio::spawn(async move { + let mut tasks = FuturesUnordered::new(); + let config = Arc::clone(&config_messages); + let handler = handler.clone(); + + loop { + tokio::select! { + Some(ids) = msg_rx.recv() => { + for StreamId { id, map } in ids { + if tasks.len() >= config.max_concurrency { + tasks.next().await; + } + + let handler = handler.clone(); + let ack_tx = ack_tx.clone(); + let config = Arc::clone(&config); + + ingest_tasks_total_inc(&config.name, &config.consumer); + + tasks.push(tokio::spawn(async move { + let start_time = tokio::time::Instant::now(); + let result = handler.handle(map).await.map_err(IngestMessageError::into); + let elapsed_time = start_time.elapsed().as_secs_f64(); + + ingest_job_time_set(&config.name, &config.consumer, elapsed_time); + + match result { + Ok(()) => { + program_transformer_task_status_inc(&config.name, &config.consumer, ProgramTransformerTaskStatusKind::Success); + } + Err(IngestMessageError::RedisStreamMessage(e)) => { + error!("Failed to process message: {:?}", e); + program_transformer_task_status_inc(&config.name, &config.consumer, e.into()); + } + Err(IngestMessageError::DownloadMetadataJson(e)) => { + program_transformer_task_status_inc(&config.name, &config.consumer, e.into()); + } + Err(IngestMessageError::ProgramTransformer(e)) => { + error!("Failed to process message: {:?}", e); + program_transformer_task_status_inc(&config.name, &config.consumer, e.into()); + } + } + + if let Err(e) = ack_tx.send(id).await { + error!(target: "ingest_stream", "action=send_ack stream={} error={:?}", &config.name, e); + } + + ingest_tasks_total_dec(&config.name, &config.consumer); + })); + } + } + _ = &mut msg_shutdown_rx => { + break; + } + } + } + + while (tasks.next().await).is_some() {} + }); + + let ack = tokio::spawn({ + let config = Arc::clone(&config); + let mut pending = Vec::new(); + let mut tasks = FuturesUnordered::new(); + let handler = Arc::new(Acknowledge::new(Arc::clone(&config), connection.clone())); + + async move { + let deadline = tokio::time::sleep(config.xack_batch_max_idle); + tokio::pin!(deadline); + + loop { + tokio::select! { + Some(id) = ack_rx.recv() => { + pending.push(id); + + if pending.len() >= config.xack_batch_max_size { + if tasks.len() >= config.ack_concurrency { + tasks.next().await; + } + + let ids = std::mem::take(&mut pending); + let handler = Arc::clone(&handler); + + + ack_tasks_total_inc(&config.name, &config.consumer); + + tasks.push(tokio::spawn(async move { + handler.handle(ids).await; + })); + + deadline.as_mut().reset(tokio::time::Instant::now() + config.xack_batch_max_idle); + } + } + _ = &mut deadline, if !pending.is_empty() => { + if tasks.len() >= config.ack_concurrency { + tasks.next().await; + } + let ids = std::mem::take(&mut pending); + let handler = Arc::clone(&handler); + + ack_tasks_total_inc(&config.name, &config.consumer); + + tasks.push(tokio::spawn(async move { + handler.handle(ids).await; + })); + + deadline.as_mut().reset(tokio::time::Instant::now() + config.xack_batch_max_idle); + } + _ = &mut ack_shutdown_rx => { + break; + } + } + } + + if !pending.is_empty() { + let handler = Arc::clone(&handler); + handler.handle(std::mem::take(&mut pending)).await; + } + + while (tasks.next().await).is_some() {} + } + }); + + let labels = vec![config.name.clone()]; + tokio::spawn({ + let connection = connection.clone(); + let config = Arc::clone(&config); + + async move { + let config = Arc::clone(&config); + + loop { + let connection = connection.clone(); + let labels = labels.clone(); + + if let Err(e) = report_xlen(connection, labels).await { + error!(target: "ingest_stream", "action=report_xlen stream={} error={:?}", &config.name, e); + } + + sleep(Duration::from_millis(100)).await; + } + } + }); + + let control = tokio::spawn({ + let mut connection = connection.clone(); + + async move { + let config = Arc::clone(&config); + + debug!(target: "ingest_stream", "action=read_stream_start stream={}", config.name); + + loop { + let config = Arc::clone(&config); + + tokio::select! { + _ = &mut shutdown_rx => { + if let Err(e) = msg_shutdown_tx.send(()) { + error!(target: "ingest_stream", "action=msg_shutdown stream={} error={:?}", &config.name, e); + } + + if let Err(e) = messages.await { + error!(target: "ingest_stream", "action=await_messages stream={} error={:?}", &config.name, e); + } + + if let Err(e) = ack_shutdown_tx.send(()) { + error!(target: "ingest_stream", "action=ack_shutdown stream={} error={:?}", &config.name, e); + } + + if let Err(e) = ack.await { + error!(target: "ingest_stream", "action=ack_shutdown stream={} error={:?}", &config.name, e); + } + + break; + }, + result = self.read(&mut connection) => { + match result { + Ok(reply) => { + for StreamKey { key: _, ids } in reply.keys { + let config = Arc::clone(&config); + let count = ids.len(); + debug!(target: "ingest_stream", "action=xread stream={} count={:?}", &config.name, count); + + redis_xread_inc(&config.name, &config.consumer, count); + + if let Err(e) = msg_tx.send(ids).await { + error!(target: "ingest_stream", "action=send_ids stream={} error={:?}", &config.name, e); + } + } + } + Err(err) => { + error!(target: "ingest_stream", "action=xread stream={} error={:?}", &config.name, err); + } + } + } + } + } + + warn!(target: "ingest_stream", "action=stream_shutdown stream={} stream shutdown", config.name); + } + }); + + Ok(IngestStreamStop { + control, + shutdown_tx, + }) + } +} + +#[derive(Clone)] +pub struct TrackedPipeline { + pipeline: redis::Pipeline, + count: usize, +} + +impl Default for TrackedPipeline { + fn default() -> Self { + Self { + pipeline: redis::pipe(), + count: 0, + } + } +} + +impl TrackedPipeline { + pub fn xadd_maxlen(&mut self, key: &str, maxlen: StreamMaxlen, id: F, field: V) + where + F: redis::ToRedisArgs, + V: redis::ToRedisArgs, + { + self.pipeline + .xadd_maxlen(key, maxlen, id, &[(REDIS_STREAM_DATA_KEY, field)]); + self.count += 1; + } + + pub async fn flush(&mut self, connection: &mut MultiplexedConnection) -> Result { + let result: RedisResult = self.pipeline.atomic().query_async(connection).await; + let count = self.count; + self.count = 0; + self.pipeline.clear(); + + match result { + Ok(_) => Ok(count), + Err(_) => Err(count), + } + } +} + +pub async fn report_xlen( + mut connection: C, + streams: Vec, +) -> anyhow::Result<()> { + let mut pipe = redis::pipe(); + for stream in &streams { + pipe.xlen(stream); + } + let xlens: Vec = pipe.query_async(&mut connection).await?; + + for (stream, xlen) in streams.iter().zip(xlens.into_iter()) { + redis_xlen_set(stream, xlen); + } + + Ok(()) +} + +pub async fn xgroup_create( + connection: &mut C, + name: &str, + group: &str, +) -> anyhow::Result<()> { + let result: RedisResult = connection.xgroup_create_mkstream(name, group, "0").await; + if let Err(error) = result { + if !(error.kind() == RedisErrorKind::ExtensionError + && error.detail() == Some("Consumer Group name already exists") + && error.code() == Some("BUSYGROUP")) + { + return Err(error.into()); + } + } + + Ok(()) +} + +pub async fn xgroup_create_consumer( + connection: &mut C, + name: &str, + group: &str, + consumer: &str, +) -> anyhow::Result<()> { + let result: RedisResult = redis::cmd("XGROUP") + .arg("CREATECONSUMER") + .arg(name) + .arg(group) + .arg(consumer) + .query_async(connection) + .await; + + match result { + Ok(_) => Ok(()), + Err(error) => Err(error.into()), + } +} + +pub async fn xgroup_delete_consumer( + connection: &mut C, + name: &str, + group: &str, + consumer: &str, +) -> anyhow::Result<()> { + let result: RedisResult = redis::cmd("XGROUP") + .arg("DELCONSUMER") + .arg(name) + .arg(group) + .arg(consumer) + .query_async(connection) + .await; + + match result { + Ok(_) => Ok(()), + Err(error) => Err(error.into()), + } +} diff --git a/grpc-ingest/src/tracing.rs b/grpc-ingest/src/tracing.rs new file mode 100644 index 000000000..2d50f785c --- /dev/null +++ b/grpc-ingest/src/tracing.rs @@ -0,0 +1,33 @@ +use { + opentelemetry_sdk::trace::{self, Sampler}, + std::env, + tracing_subscriber::{filter::EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}, +}; + +pub fn init() -> anyhow::Result<()> { + let open_tracer = opentelemetry_jaeger::new_agent_pipeline() + .with_service_name(env::var("CARGO_PKG_NAME")?) + .with_auto_split_batch(true) + .with_trace_config(trace::config().with_sampler(Sampler::TraceIdRatioBased(0.25))) + .install_batch(opentelemetry_sdk::runtime::Tokio)?; + let jeager_layer = tracing_opentelemetry::layer().with_tracer(open_tracer); + + let env_filter = EnvFilter::builder() + .parse(env::var(EnvFilter::DEFAULT_ENV).unwrap_or_else(|_| "info,sqlx=warn".to_owned()))?; + + let is_atty = atty::is(atty::Stream::Stdout) && atty::is(atty::Stream::Stderr); + let io_layer = tracing_subscriber::fmt::layer().with_ansi(is_atty); + + let registry = tracing_subscriber::registry() + .with(jeager_layer) + .with(env_filter) + .with(io_layer); + + if env::var_os("RUST_LOG_JSON").is_some() { + let json_layer = tracing_subscriber::fmt::layer().json().flatten_event(true); + registry.with(json_layer).try_init() + } else { + registry.try_init() + } + .map_err(Into::into) +} diff --git a/grpc-ingest/src/util.rs b/grpc-ingest/src/util.rs new file mode 100644 index 000000000..0a7800a12 --- /dev/null +++ b/grpc-ingest/src/util.rs @@ -0,0 +1,19 @@ +use { + async_stream::stream, + futures::stream::{BoxStream, StreamExt}, + tokio::signal::unix::{signal, SignalKind}, +}; + +pub fn create_shutdown() -> anyhow::Result> { + let mut sigint = signal(SignalKind::interrupt())?; + let mut sigterm = signal(SignalKind::terminate())?; + Ok(stream! { + loop { + yield tokio::select! { + _ = sigint.recv() => "SIGINT", + _ = sigterm.recv() => "SIGTERM", + }; + } + } + .boxed()) +} diff --git a/grpc-ingest/src/version.rs b/grpc-ingest/src/version.rs new file mode 100644 index 000000000..b9da62845 --- /dev/null +++ b/grpc-ingest/src/version.rs @@ -0,0 +1,22 @@ +use {serde::Serialize, std::env}; + +#[derive(Debug, Serialize)] +pub struct Version { + pub package: &'static str, + pub version: &'static str, + pub proto: &'static str, + pub solana: &'static str, + pub git: &'static str, + pub rustc: &'static str, + pub buildts: &'static str, +} + +pub const VERSION: Version = Version { + package: env!("CARGO_PKG_NAME"), + version: env!("CARGO_PKG_VERSION"), + proto: env!("YELLOWSTONE_GRPC_PROTO_VERSION"), + solana: env!("SOLANA_SDK_VERSION"), + git: env!("GIT_VERSION"), + rustc: env!("VERGEN_RUSTC_SEMVER"), + buildts: env!("VERGEN_BUILD_TIMESTAMP"), +}; diff --git a/integration_tests/tests/data/accounts/fungible_token_get_asset/5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq b/integration_tests/tests/data/accounts/fungible_token_get_asset/5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq new file mode 100644 index 000000000..a6006eeb7 Binary files /dev/null and b/integration_tests/tests/data/accounts/fungible_token_get_asset/5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq differ diff --git a/integration_tests/tests/data/accounts/fungible_token_get_asset/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v b/integration_tests/tests/data/accounts/fungible_token_get_asset/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v new file mode 100644 index 000000000..182b80a9a Binary files /dev/null and b/integration_tests/tests/data/accounts/fungible_token_get_asset/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v differ diff --git a/integration_tests/tests/data/accounts/fungible_token_get_asset_scenario_1/5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq b/integration_tests/tests/data/accounts/fungible_token_get_asset_scenario_1/5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq new file mode 100644 index 000000000..1326753e3 Binary files /dev/null and b/integration_tests/tests/data/accounts/fungible_token_get_asset_scenario_1/5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq differ diff --git a/integration_tests/tests/data/accounts/fungible_token_get_asset_scenario_1/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v b/integration_tests/tests/data/accounts/fungible_token_get_asset_scenario_1/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v new file mode 100644 index 000000000..3ffad1cc0 Binary files /dev/null and b/integration_tests/tests/data/accounts/fungible_token_get_asset_scenario_1/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v differ diff --git a/integration_tests/tests/data/accounts/fungible_token_get_asset_scenario_2/wKocBVvHQoVaiwWoCs9JYSVye4YZRrv5Cucf7fDqnz1 b/integration_tests/tests/data/accounts/fungible_token_get_asset_scenario_2/wKocBVvHQoVaiwWoCs9JYSVye4YZRrv5Cucf7fDqnz1 new file mode 100644 index 000000000..49c0c5373 Binary files /dev/null and b/integration_tests/tests/data/accounts/fungible_token_get_asset_scenario_2/wKocBVvHQoVaiwWoCs9JYSVye4YZRrv5Cucf7fDqnz1 differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/7fXKY9tPpvYsdbSNyesUqo27WYC6ZsBEULdtngGHqLCK b/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/7fXKY9tPpvYsdbSNyesUqo27WYC6ZsBEULdtngGHqLCK new file mode 100644 index 000000000..8825aa196 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/7fXKY9tPpvYsdbSNyesUqo27WYC6ZsBEULdtngGHqLCK differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/8Xv3SpX94HHf32Apg4TeSeS3i2p6wuXeE8FBZr168Hti b/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/8Xv3SpX94HHf32Apg4TeSeS3i2p6wuXeE8FBZr168Hti new file mode 100644 index 000000000..4ce4fb62c Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/8Xv3SpX94HHf32Apg4TeSeS3i2p6wuXeE8FBZr168Hti differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM b/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM new file mode 100644 index 000000000..1c60720cb Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk b/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk new file mode 100644 index 000000000..5999bdd44 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_collection_metadata_option/F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_1/7z6b5TE4WX4mgcQjuNBTDxK4SE75sbgEg5WWJwoUeie8 b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_1/7z6b5TE4WX4mgcQjuNBTDxK4SE75sbgEg5WWJwoUeie8 new file mode 100644 index 000000000..7670328be Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_1/7z6b5TE4WX4mgcQjuNBTDxK4SE75sbgEg5WWJwoUeie8 differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_1/8myaCN6KcKVkMqroXuLJq6QsqRcPbvme4wV5Ubfr5mDC b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_1/8myaCN6KcKVkMqroXuLJq6QsqRcPbvme4wV5Ubfr5mDC new file mode 100644 index 000000000..69f99d0d5 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_1/8myaCN6KcKVkMqroXuLJq6QsqRcPbvme4wV5Ubfr5mDC differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_1/Ca84nWhQu41DMRnjdhRrLZty1i9txepMhAhz5qLLGcBw b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_1/Ca84nWhQu41DMRnjdhRrLZty1i9txepMhAhz5qLLGcBw new file mode 100644 index 000000000..f273bd7c1 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_1/Ca84nWhQu41DMRnjdhRrLZty1i9txepMhAhz5qLLGcBw differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_2/7fXKY9tPpvYsdbSNyesUqo27WYC6ZsBEULdtngGHqLCK b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_2/7fXKY9tPpvYsdbSNyesUqo27WYC6ZsBEULdtngGHqLCK new file mode 100644 index 000000000..de4e24d36 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_2/7fXKY9tPpvYsdbSNyesUqo27WYC6ZsBEULdtngGHqLCK differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_2/8Xv3SpX94HHf32Apg4TeSeS3i2p6wuXeE8FBZr168Hti b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_2/8Xv3SpX94HHf32Apg4TeSeS3i2p6wuXeE8FBZr168Hti new file mode 100644 index 000000000..3ffe5ac43 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_2/8Xv3SpX94HHf32Apg4TeSeS3i2p6wuXeE8FBZr168Hti differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_2/AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_2/AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM new file mode 100644 index 000000000..4922084e3 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_fungible_scenario_2/AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/4Q18N6XrfJHgDbRTaHJR328jN9dixCLQAQhDsTsRzg3v b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/4Q18N6XrfJHgDbRTaHJR328jN9dixCLQAQhDsTsRzg3v new file mode 100644 index 000000000..7443f1f46 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/4Q18N6XrfJHgDbRTaHJR328jN9dixCLQAQhDsTsRzg3v differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/9FkS3kZV4MoGps14tUSp7iVnizGbxcK4bDEhSoF5oYAZ b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/9FkS3kZV4MoGps14tUSp7iVnizGbxcK4bDEhSoF5oYAZ new file mode 100644 index 000000000..1478546cd Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/9FkS3kZV4MoGps14tUSp7iVnizGbxcK4bDEhSoF5oYAZ differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/AKo9P7S8FE9NYeAcrtZEpimwQAXJMp8Lrt8p4dMkHkY2 b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/AKo9P7S8FE9NYeAcrtZEpimwQAXJMp8Lrt8p4dMkHkY2 new file mode 100644 index 000000000..b6b5149df Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/AKo9P7S8FE9NYeAcrtZEpimwQAXJMp8Lrt8p4dMkHkY2 differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/DarH4z6SmdVzPrt8krAygpLodhdjvNAstP3taj2tysN2 b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/DarH4z6SmdVzPrt8krAygpLodhdjvNAstP3taj2tysN2 new file mode 100644 index 000000000..cc574d2ce Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/DarH4z6SmdVzPrt8krAygpLodhdjvNAstP3taj2tysN2 differ diff --git a/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/HMixBLSkuhiGgVbcGhqJar476xzu1bC8wM7yHsc1iXwP b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/HMixBLSkuhiGgVbcGhqJar476xzu1bC8wM7yHsc1iXwP new file mode 100644 index 000000000..ee9399855 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_asset_with_show_inscription_scenario_1/HMixBLSkuhiGgVbcGhqJar476xzu1bC8wM7yHsc1iXwP differ diff --git a/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/2ecGsTKbj7FecLwxTHaodZRFwza7m7LamqDG4YjczZMj b/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/2ecGsTKbj7FecLwxTHaodZRFwza7m7LamqDG4YjczZMj new file mode 100644 index 000000000..1ae41d2d6 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/2ecGsTKbj7FecLwxTHaodZRFwza7m7LamqDG4YjczZMj differ diff --git a/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/DZAZ3mGuq7nCYGzUyw4MiA74ysr15EfqLpzCzX2cRVng b/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/DZAZ3mGuq7nCYGzUyw4MiA74ysr15EfqLpzCzX2cRVng new file mode 100644 index 000000000..3e817c9dc Binary files /dev/null and b/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/DZAZ3mGuq7nCYGzUyw4MiA74ysr15EfqLpzCzX2cRVng differ diff --git a/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk b/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk new file mode 100644 index 000000000..4908e565b Binary files /dev/null and b/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk differ diff --git a/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/JEKKtnGvjiZ8GtATnMVgadHU41AuTbFkMW8oD2tdyV9X b/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/JEKKtnGvjiZ8GtATnMVgadHU41AuTbFkMW8oD2tdyV9X new file mode 100644 index 000000000..eee1c3953 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_assets_with_multiple_same_ids/JEKKtnGvjiZ8GtATnMVgadHU41AuTbFkMW8oD2tdyV9X differ diff --git a/integration_tests/tests/data/accounts/get_nft_editions/4V9QuYLpiMu4ZQmhdEHmgATdgiHkDeJfvZi84BfkYcez b/integration_tests/tests/data/accounts/get_nft_editions/4V9QuYLpiMu4ZQmhdEHmgATdgiHkDeJfvZi84BfkYcez new file mode 100644 index 000000000..558488282 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_nft_editions/4V9QuYLpiMu4ZQmhdEHmgATdgiHkDeJfvZi84BfkYcez differ diff --git a/integration_tests/tests/data/accounts/get_nft_editions/8SHfqzJYABeGfiG1apwiEYt6TvfGQiL1pdwEjvTKsyiZ b/integration_tests/tests/data/accounts/get_nft_editions/8SHfqzJYABeGfiG1apwiEYt6TvfGQiL1pdwEjvTKsyiZ new file mode 100644 index 000000000..10eae717a Binary files /dev/null and b/integration_tests/tests/data/accounts/get_nft_editions/8SHfqzJYABeGfiG1apwiEYt6TvfGQiL1pdwEjvTKsyiZ differ diff --git a/integration_tests/tests/data/accounts/get_nft_editions/9ZmY7qCaq7WbrR7RZdHWCNS9FrFRPwRqU84wzWfmqLDz b/integration_tests/tests/data/accounts/get_nft_editions/9ZmY7qCaq7WbrR7RZdHWCNS9FrFRPwRqU84wzWfmqLDz new file mode 100644 index 000000000..c8244da85 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_nft_editions/9ZmY7qCaq7WbrR7RZdHWCNS9FrFRPwRqU84wzWfmqLDz differ diff --git a/integration_tests/tests/data/accounts/get_nft_editions/9yQecKKYSHxez7fFjJkUvkz42TLmkoXzhyZxEf2pw8pz b/integration_tests/tests/data/accounts/get_nft_editions/9yQecKKYSHxez7fFjJkUvkz42TLmkoXzhyZxEf2pw8pz new file mode 100644 index 000000000..75d190842 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_nft_editions/9yQecKKYSHxez7fFjJkUvkz42TLmkoXzhyZxEf2pw8pz differ diff --git a/integration_tests/tests/data/accounts/get_nft_editions/AoxgzXKEsJmUyF5pBb3djn9cJFA26zh2SQHvd9EYijZV b/integration_tests/tests/data/accounts/get_nft_editions/AoxgzXKEsJmUyF5pBb3djn9cJFA26zh2SQHvd9EYijZV new file mode 100644 index 000000000..31abb9fb6 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_nft_editions/AoxgzXKEsJmUyF5pBb3djn9cJFA26zh2SQHvd9EYijZV differ diff --git a/integration_tests/tests/data/accounts/get_nft_editions/Ey2Qb8kLctbchQsMnhZs5DjY32To2QtPuXNwWvk4NosL b/integration_tests/tests/data/accounts/get_nft_editions/Ey2Qb8kLctbchQsMnhZs5DjY32To2QtPuXNwWvk4NosL new file mode 100644 index 000000000..cf8f217c8 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_nft_editions/Ey2Qb8kLctbchQsMnhZs5DjY32To2QtPuXNwWvk4NosL differ diff --git a/integration_tests/tests/data/accounts/get_nft_editions/GJvFDcBWf6aDncd1TBzx2ou1rgLFYaMBdbYLBa9oTAEw b/integration_tests/tests/data/accounts/get_nft_editions/GJvFDcBWf6aDncd1TBzx2ou1rgLFYaMBdbYLBa9oTAEw new file mode 100644 index 000000000..80ea336fb Binary files /dev/null and b/integration_tests/tests/data/accounts/get_nft_editions/GJvFDcBWf6aDncd1TBzx2ou1rgLFYaMBdbYLBa9oTAEw differ diff --git a/integration_tests/tests/data/accounts/get_nft_editions/giWoA4jqHFkodPJgtbRYRcYtiXbsVytnxnEao3QT2gg b/integration_tests/tests/data/accounts/get_nft_editions/giWoA4jqHFkodPJgtbRYRcYtiXbsVytnxnEao3QT2gg new file mode 100644 index 000000000..a63f177f0 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_nft_editions/giWoA4jqHFkodPJgtbRYRcYtiXbsVytnxnEao3QT2gg differ diff --git a/integration_tests/tests/data/accounts/get_token_accounts/jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2 b/integration_tests/tests/data/accounts/get_token_accounts/jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2 new file mode 100644 index 000000000..b4c5bef73 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_token_accounts/jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2 differ diff --git a/integration_tests/tests/data/accounts/get_token_accounts_by_mint/jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2 b/integration_tests/tests/data/accounts/get_token_accounts_by_mint/jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2 new file mode 100644 index 000000000..71c23c91d Binary files /dev/null and b/integration_tests/tests/data/accounts/get_token_accounts_by_mint/jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2 differ diff --git a/integration_tests/tests/data/accounts/get_token_accounts_by_owner/3Pv9H5UzU8T9BwgutXrcn2wLohS1JUZuk3x8paiRyzui b/integration_tests/tests/data/accounts/get_token_accounts_by_owner/3Pv9H5UzU8T9BwgutXrcn2wLohS1JUZuk3x8paiRyzui new file mode 100644 index 000000000..20e262776 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_token_accounts_by_owner/3Pv9H5UzU8T9BwgutXrcn2wLohS1JUZuk3x8paiRyzui differ diff --git a/integration_tests/tests/data/accounts/get_token_accounts_by_owner/jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2 b/integration_tests/tests/data/accounts/get_token_accounts_by_owner/jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2 new file mode 100644 index 000000000..868b38969 Binary files /dev/null and b/integration_tests/tests/data/accounts/get_token_accounts_by_owner/jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2 differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_all/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB new file mode 100644 index 000000000..17af7f79e Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_all/7oSzJpyztTuK124EyPAw2nbF4Vaj2P9MU9vww1QN1k8p b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/7oSzJpyztTuK124EyPAw2nbF4Vaj2P9MU9vww1QN1k8p new file mode 100644 index 000000000..118ca2a0c Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/7oSzJpyztTuK124EyPAw2nbF4Vaj2P9MU9vww1QN1k8p differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_all/7sVGLDmpnYqX5EvTg7i3tpRNEugeaUyDC9HtPSb3V3DS b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/7sVGLDmpnYqX5EvTg7i3tpRNEugeaUyDC9HtPSb3V3DS new file mode 100644 index 000000000..05f08bf57 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/7sVGLDmpnYqX5EvTg7i3tpRNEugeaUyDC9HtPSb3V3DS differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_all/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo new file mode 100644 index 000000000..36feef591 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_all/DVHQquD7pQFUsBoPpW816CU8zQrQCua9mw4Znh9FyKZJ b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/DVHQquD7pQFUsBoPpW816CU8zQrQCua9mw4Znh9FyKZJ new file mode 100644 index 000000000..450366f15 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/DVHQquD7pQFUsBoPpW816CU8zQrQCua9mw4Znh9FyKZJ differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_all/HboN9TsoMSKJAp388G752pSUscb8iZwgdH459KSJxbZT b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/HboN9TsoMSKJAp388G752pSUscb8iZwgdH459KSJxbZT new file mode 100644 index 000000000..db36302a3 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_all/HboN9TsoMSKJAp388G752pSUscb8iZwgdH459KSJxbZT differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_fungible/6BRNfDfdq1nKyU1TQiCEQLWyPtD8EwUH9Kt2ahsbidUx b/integration_tests/tests/data/accounts/search_asset_with_token_type_fungible/6BRNfDfdq1nKyU1TQiCEQLWyPtD8EwUH9Kt2ahsbidUx new file mode 100644 index 000000000..5ae2375a0 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_fungible/6BRNfDfdq1nKyU1TQiCEQLWyPtD8EwUH9Kt2ahsbidUx differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_fungible/7BajpcYgnxmWK91RhrfsdB3Tm83PcDwPvMC8ZinvtTY6 b/integration_tests/tests/data/accounts/search_asset_with_token_type_fungible/7BajpcYgnxmWK91RhrfsdB3Tm83PcDwPvMC8ZinvtTY6 new file mode 100644 index 000000000..2d30ad308 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_fungible/7BajpcYgnxmWK91RhrfsdB3Tm83PcDwPvMC8ZinvtTY6 differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_fungible/7EYnhQoR9YM3N7UoaKRoA44Uy8JeaZV3qyouov87awMs b/integration_tests/tests/data/accounts/search_asset_with_token_type_fungible/7EYnhQoR9YM3N7UoaKRoA44Uy8JeaZV3qyouov87awMs new file mode 100644 index 000000000..48f5f5651 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_fungible/7EYnhQoR9YM3N7UoaKRoA44Uy8JeaZV3qyouov87awMs differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/47ackukZJRBkQSufwFnhTkmTzB11Ww8375EDXTwY75wk b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/47ackukZJRBkQSufwFnhTkmTzB11Ww8375EDXTwY75wk new file mode 100644 index 000000000..d13dae66e Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/47ackukZJRBkQSufwFnhTkmTzB11Ww8375EDXTwY75wk differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo new file mode 100644 index 000000000..eac706841 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk new file mode 100644 index 000000000..47c32c19e Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/DVHQquD7pQFUsBoPpW816CU8zQrQCua9mw4Znh9FyKZJ b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/DVHQquD7pQFUsBoPpW816CU8zQrQCua9mw4Znh9FyKZJ new file mode 100644 index 000000000..8268e82ec Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/DVHQquD7pQFUsBoPpW816CU8zQrQCua9mw4Znh9FyKZJ differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/ELNshcVjEgQ6nSsogWEQjRTr9EaEHJzKcSenqe2kyx5J b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/ELNshcVjEgQ6nSsogWEQjRTr9EaEHJzKcSenqe2kyx5J new file mode 100644 index 000000000..6d8ed5e48 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/ELNshcVjEgQ6nSsogWEQjRTr9EaEHJzKcSenqe2kyx5J differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/HboN9TsoMSKJAp388G752pSUscb8iZwgdH459KSJxbZT b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/HboN9TsoMSKJAp388G752pSUscb8iZwgdH459KSJxbZT new file mode 100644 index 000000000..f930ec392 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_non_fungible/HboN9TsoMSKJAp388G752pSUscb8iZwgdH459KSJxbZT differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2 b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2 new file mode 100644 index 000000000..a7efd35f9 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2 differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB new file mode 100644 index 000000000..9c7db2557 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/7oSzJpyztTuK124EyPAw2nbF4Vaj2P9MU9vww1QN1k8p b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/7oSzJpyztTuK124EyPAw2nbF4Vaj2P9MU9vww1QN1k8p new file mode 100644 index 000000000..22ba70ff4 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/7oSzJpyztTuK124EyPAw2nbF4Vaj2P9MU9vww1QN1k8p differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/7sVGLDmpnYqX5EvTg7i3tpRNEugeaUyDC9HtPSb3V3DS b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/7sVGLDmpnYqX5EvTg7i3tpRNEugeaUyDC9HtPSb3V3DS new file mode 100644 index 000000000..dc785fa4b Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/7sVGLDmpnYqX5EvTg7i3tpRNEugeaUyDC9HtPSb3V3DS differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/CyyZ43boZTaP4mJsbTVUpinFJGMQXSMstJvDeatpEo4S b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/CyyZ43boZTaP4mJsbTVUpinFJGMQXSMstJvDeatpEo4S new file mode 100644 index 000000000..a75763c95 Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/CyyZ43boZTaP4mJsbTVUpinFJGMQXSMstJvDeatpEo4S differ diff --git a/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/iRjBSW4tnw42qxf3tycTYKki1d4bcETt9AfjEwoHYgy b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/iRjBSW4tnw42qxf3tycTYKki1d4bcETt9AfjEwoHYgy new file mode 100644 index 000000000..dc89d878f Binary files /dev/null and b/integration_tests/tests/data/accounts/search_asset_with_token_type_regular_nft/iRjBSW4tnw42qxf3tycTYKki1d4bcETt9AfjEwoHYgy differ diff --git a/integration_tests/tests/data/accounts/show_zero_balance_filter/BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS b/integration_tests/tests/data/accounts/show_zero_balance_filter/BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS new file mode 100644 index 000000000..e6349a5bf Binary files /dev/null and b/integration_tests/tests/data/accounts/show_zero_balance_filter/BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS differ diff --git a/integration_tests/tests/data/accounts/show_zero_balance_filter/CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a b/integration_tests/tests/data/accounts/show_zero_balance_filter/CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a new file mode 100644 index 000000000..53ce3c8dd Binary files /dev/null and b/integration_tests/tests/data/accounts/show_zero_balance_filter/CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a differ diff --git a/integration_tests/tests/data/accounts/show_zero_balance_filter_being_disabled/BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS b/integration_tests/tests/data/accounts/show_zero_balance_filter_being_disabled/BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS new file mode 100644 index 000000000..9aee1db2c Binary files /dev/null and b/integration_tests/tests/data/accounts/show_zero_balance_filter_being_disabled/BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS differ diff --git a/integration_tests/tests/data/accounts/show_zero_balance_filter_being_disabled/CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a b/integration_tests/tests/data/accounts/show_zero_balance_filter_being_disabled/CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a new file mode 100644 index 000000000..100867304 Binary files /dev/null and b/integration_tests/tests/data/accounts/show_zero_balance_filter_being_disabled/CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a differ diff --git a/integration_tests/tests/data/accounts/show_zero_balance_filter_being_enabled/BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS b/integration_tests/tests/data/accounts/show_zero_balance_filter_being_enabled/BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS new file mode 100644 index 000000000..570a99c14 Binary files /dev/null and b/integration_tests/tests/data/accounts/show_zero_balance_filter_being_enabled/BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS differ diff --git a/integration_tests/tests/data/accounts/show_zero_balance_filter_being_enabled/CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a b/integration_tests/tests/data/accounts/show_zero_balance_filter_being_enabled/CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a new file mode 100644 index 000000000..204f4d2ec Binary files /dev/null and b/integration_tests/tests/data/accounts/show_zero_balance_filter_being_enabled/CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a differ diff --git a/integration_tests/tests/data/accounts/token_extensions_get_asset/BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y b/integration_tests/tests/data/accounts/token_extensions_get_asset/BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y new file mode 100644 index 000000000..357411c17 Binary files /dev/null and b/integration_tests/tests/data/accounts/token_extensions_get_asset/BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y differ diff --git a/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario1/BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario1/BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y new file mode 100644 index 000000000..c864c0089 Binary files /dev/null and b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario1/BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y differ diff --git a/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario2/HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario2/HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM new file mode 100644 index 000000000..a4c739eb4 Binary files /dev/null and b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario2/HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM differ diff --git a/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario_1/BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario_1/BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y new file mode 100644 index 000000000..805898376 Binary files /dev/null and b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario_1/BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y differ diff --git a/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario_2/HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario_2/HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM new file mode 100644 index 000000000..951ce886c Binary files /dev/null and b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario_2/HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM differ diff --git a/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario_3/2b1kV6DkPAnxd5ixfnxCpjxmKwqjjaYmCZfHsFu24GXo b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario_3/2b1kV6DkPAnxd5ixfnxCpjxmKwqjjaYmCZfHsFu24GXo new file mode 100644 index 000000000..2f1cdbc62 Binary files /dev/null and b/integration_tests/tests/data/accounts/token_extensions_get_asset_scenario_3/2b1kV6DkPAnxd5ixfnxCpjxmKwqjjaYmCZfHsFu24GXo differ diff --git a/integration_tests/tests/data/largest_token_account_ids/2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2/2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2 b/integration_tests/tests/data/largest_token_account_ids/2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2/2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2 new file mode 100644 index 000000000..3dc3aa9c3 --- /dev/null +++ b/integration_tests/tests/data/largest_token_account_ids/2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2/2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2 @@ -0,0 +1,2 @@ + +œÅ¸ëñ¡9”ž€ÆmR¨i\Ÿ¾âê ½ë]Ê \ No newline at end of file diff --git a/integration_tests/tests/data/largest_token_account_ids/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB b/integration_tests/tests/data/largest_token_account_ids/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB new file mode 100644 index 000000000..c9241fcba --- /dev/null +++ b/integration_tests/tests/data/largest_token_account_ids/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB/42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB @@ -0,0 +1,2 @@ +e 9;§:þ$fúÊEÁv½KA¶ +º”\«%.‰ò \ No newline at end of file diff --git a/integration_tests/tests/data/largest_token_account_ids/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo b/integration_tests/tests/data/largest_token_account_ids/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo new file mode 100644 index 000000000..eb66ffa10 Binary files /dev/null and b/integration_tests/tests/data/largest_token_account_ids/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo/8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo differ diff --git a/integration_tests/tests/data/largest_token_account_ids/AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk/AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk b/integration_tests/tests/data/largest_token_account_ids/AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk/AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk new file mode 100644 index 000000000..7bc1e9a00 --- /dev/null +++ b/integration_tests/tests/data/largest_token_account_ids/AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk/AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk @@ -0,0 +1 @@ +.BŽQ–*pp-¥ëÊUÉìûîž·„gå›óîÏ>þ \ No newline at end of file diff --git a/integration_tests/tests/data/transactions/search_asset_with_token_type_compressed/4URwUGBjbsF7UBUYdSC546tnBy7nD67txsso8D9CR9kGLtbbYh9NkGw15tEp16LLasmJX5VQR4Seh8gDjTrtdpoC b/integration_tests/tests/data/transactions/search_asset_with_token_type_compressed/4URwUGBjbsF7UBUYdSC546tnBy7nD67txsso8D9CR9kGLtbbYh9NkGw15tEp16LLasmJX5VQR4Seh8gDjTrtdpoC new file mode 100644 index 000000000..a9f81afe7 Binary files /dev/null and b/integration_tests/tests/data/transactions/search_asset_with_token_type_compressed/4URwUGBjbsF7UBUYdSC546tnBy7nD67txsso8D9CR9kGLtbbYh9NkGw15tEp16LLasmJX5VQR4Seh8gDjTrtdpoC differ diff --git a/integration_tests/tests/data/transactions/search_asset_with_token_type_compressed/4nKDSvw2kGpccZWLEPnfdP7J1SEexQFRP3xWc9NBtQ1qQeGu3bu5WnAdpcLbjQ4iyX6BQ5QGF69wevE8ZeeY5poA b/integration_tests/tests/data/transactions/search_asset_with_token_type_compressed/4nKDSvw2kGpccZWLEPnfdP7J1SEexQFRP3xWc9NBtQ1qQeGu3bu5WnAdpcLbjQ4iyX6BQ5QGF69wevE8ZeeY5poA new file mode 100644 index 000000000..23b8337f4 Binary files /dev/null and b/integration_tests/tests/data/transactions/search_asset_with_token_type_compressed/4nKDSvw2kGpccZWLEPnfdP7J1SEexQFRP3xWc9NBtQ1qQeGu3bu5WnAdpcLbjQ4iyX6BQ5QGF69wevE8ZeeY5poA differ diff --git a/integration_tests/tests/integration_tests/fungibles_and_token_extensions_tests.rs b/integration_tests/tests/integration_tests/fungibles_and_token_extensions_tests.rs new file mode 100644 index 000000000..6115b3103 --- /dev/null +++ b/integration_tests/tests/integration_tests/fungibles_and_token_extensions_tests.rs @@ -0,0 +1,162 @@ +use function_name::named; + +use das_api::api::{self, ApiContract}; + +use itertools::Itertools; + +use serial_test::serial; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_token_extensions_get_asset_scenario_1() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts(["BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y"]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y" + } + "#; + + let request: api::GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_token_extensions_get_asset_scenario_2() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts(["HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM"]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM" + } + "#; + + let request: api::GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_token_extensions_get_asset_scenario_3() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts(["2b1kV6DkPAnxd5ixfnxCpjxmKwqjjaYmCZfHsFu24GXo"]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "2b1kV6DkPAnxd5ixfnxCpjxmKwqjjaYmCZfHsFu24GXo" + } + "#; + + let request: api::GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_fungible_token_get_asset_scenario_1() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v" + } + "#; + + let request: api::GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_fungible_token_get_asset_scenario_2() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts(["wKocBVvHQoVaiwWoCs9JYSVye4YZRrv5Cucf7fDqnz1"]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "wKocBVvHQoVaiwWoCs9JYSVye4YZRrv5Cucf7fDqnz1" + } + "#; + + let request: api::GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/tests/integration_tests/main.rs b/integration_tests/tests/integration_tests/main.rs index 7220ca96c..c14c435e9 100644 --- a/integration_tests/tests/integration_tests/main.rs +++ b/integration_tests/tests/integration_tests/main.rs @@ -1,6 +1,15 @@ mod account_update_tests; mod cnft_tests; mod common; +mod fungibles_and_token_extensions_tests; mod general_scenario_tests; mod mpl_core_tests; +mod nft_editions_tests; mod regular_nft_tests; +mod show_fungible_flag_tests; +mod show_inscription_flag_tests; +mod test_get_assets_with_multiple_same_ids; +mod test_show_collection_metadata_option; +mod test_show_zero_balance_filter; +mod token_accounts_tests; +mod token_type_test; diff --git a/integration_tests/tests/integration_tests/nft_editions_tests.rs b/integration_tests/tests/integration_tests/nft_editions_tests.rs new file mode 100644 index 000000000..61929f2f5 --- /dev/null +++ b/integration_tests/tests/integration_tests/nft_editions_tests.rs @@ -0,0 +1,50 @@ +use function_name::named; + +use das_api::api::{self, ApiContract}; + +use itertools::Itertools; + +use serial_test::serial; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_get_nft_editions() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "Ey2Qb8kLctbchQsMnhZs5DjY32To2QtPuXNwWvk4NosL", + "9ZmY7qCaq7WbrR7RZdHWCNS9FrFRPwRqU84wzWfmqLDz", + "8SHfqzJYABeGfiG1apwiEYt6TvfGQiL1pdwEjvTKsyiZ", + "GJvFDcBWf6aDncd1TBzx2ou1rgLFYaMBdbYLBa9oTAEw", + "9ZmY7qCaq7WbrR7RZdHWCNS9FrFRPwRqU84wzWfmqLDz", + "AoxgzXKEsJmUyF5pBb3djn9cJFA26zh2SQHvd9EYijZV", + "9yQecKKYSHxez7fFjJkUvkz42TLmkoXzhyZxEf2pw8pz", + "4V9QuYLpiMu4ZQmhdEHmgATdgiHkDeJfvZi84BfkYcez", + "giWoA4jqHFkodPJgtbRYRcYtiXbsVytnxnEao3QT2gg", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "mintAddress": "Ey2Qb8kLctbchQsMnhZs5DjY32To2QtPuXNwWvk4NosL", + "limit":10 + } + "#; + + let request: api::GetNftEditions = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_nft_editions(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/tests/integration_tests/show_fungible_flag_tests.rs b/integration_tests/tests/integration_tests/show_fungible_flag_tests.rs new file mode 100644 index 000000000..113e4ab22 --- /dev/null +++ b/integration_tests/tests/integration_tests/show_fungible_flag_tests.rs @@ -0,0 +1,84 @@ +use function_name::named; + +use das_api::api::{self, ApiContract}; + +use itertools::Itertools; + +use serial_test::serial; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_get_asset_with_show_fungible_scenario_1() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "Ca84nWhQu41DMRnjdhRrLZty1i9txepMhAhz5qLLGcBw", + "7z6b5TE4WX4mgcQjuNBTDxK4SE75sbgEg5WWJwoUeie8", + "8myaCN6KcKVkMqroXuLJq6QsqRcPbvme4wV5Ubfr5mDC", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "Ca84nWhQu41DMRnjdhRrLZty1i9txepMhAhz5qLLGcBw", + "displayOptions": { + "showFungible": true + } + + } + "#; + + let request: api::GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_get_asset_with_show_fungible_scenario_2() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM", + "7fXKY9tPpvYsdbSNyesUqo27WYC6ZsBEULdtngGHqLCK", + "8Xv3SpX94HHf32Apg4TeSeS3i2p6wuXeE8FBZr168Hti", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM", + "displayOptions": { + "showFungible": true + } + } + "#; + + let request: api::GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/tests/integration_tests/show_inscription_flag_tests.rs b/integration_tests/tests/integration_tests/show_inscription_flag_tests.rs new file mode 100644 index 000000000..7c5dffdff --- /dev/null +++ b/integration_tests/tests/integration_tests/show_inscription_flag_tests.rs @@ -0,0 +1,46 @@ +use function_name::named; + +use das_api::api::{self, ApiContract}; + +use itertools::Itertools; + +use serial_test::serial; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_get_asset_with_show_inscription_scenario_1() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "9FkS3kZV4MoGps14tUSp7iVnizGbxcK4bDEhSoF5oYAZ", + "HMixBLSkuhiGgVbcGhqJar476xzu1bC8wM7yHsc1iXwP", + "DarH4z6SmdVzPrt8krAygpLodhdjvNAstP3taj2tysN2", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "9FkS3kZV4MoGps14tUSp7iVnizGbxcK4bDEhSoF5oYAZ", + "displayOptions": { + "showInscription": true + } + } + "#; + + let request: api::GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap index fe306fab3..7ede46288 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/account_update_tests.rs expression: response_new_slot +snapshot_kind: text --- { "interface": "ProgrammableNFT", @@ -62,7 +63,6 @@ expression: response_new_slot "ownership_model": "single", "owner": "BzbdvwEkQKeghTY53aZxTYjUienhdbkNVkgrLV6cErke" }, - "supply": null, "mutable": false, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap index 5dac7346d..90985a04a 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/account_update_tests.rs expression: response_new_slot +snapshot_kind: text --- { "interface": "ProgrammableNFT", @@ -62,7 +63,6 @@ expression: response_new_slot "ownership_model": "single", "owner": "1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap index d0978550e..4a314f738 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/account_update_tests.rs expression: setup.das_api.get_asset(request.clone()).await.unwrap() +snapshot_kind: text --- { "interface": "ProgrammableNFT", @@ -62,7 +63,6 @@ expression: setup.das_api.get_asset(request.clone()).await.unwrap() "ownership_model": "single", "owner": "1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM" }, - "supply": null, "mutable": false, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates.snap index 2deeb3b43..e37b12286 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__account_update_tests__account_updates.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/account_update_tests.rs expression: response +snapshot_kind: text --- { "interface": "ProgrammableNFT", @@ -62,7 +63,6 @@ expression: response "ownership_model": "single", "owner": "BzbdvwEkQKeghTY53aZxTYjUienhdbkNVkgrLV6cErke" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__fungible_token_get_asset_scenario_1.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__fungible_token_get_asset_scenario_1.snap new file mode 100644 index 000000000..16be64a22 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__fungible_token_get_asset_scenario_1.snap @@ -0,0 +1,56 @@ +--- +source: integration_tests/tests/integration_tests/fungibles_and_token_extensions_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "FungibleToken", + "id": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "", + "files": [], + "metadata": { + "name": "USD Coin", + "symbol": "USDC" + }, + "links": {} + }, + "authorities": [ + { + "address": "2wmVCSfPxGPjrnMMn7rchp4uaeoTqN39mXFC2zhPdri9", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "token", + "owner": "" + }, + "mutable": true, + "burnt": false +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__fungible_token_get_asset_scenario_2.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__fungible_token_get_asset_scenario_2.snap new file mode 100644 index 000000000..2357ddf57 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__fungible_token_get_asset_scenario_2.snap @@ -0,0 +1,39 @@ +--- +source: integration_tests/tests/integration_tests/fungibles_and_token_extensions_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "FungibleToken", + "id": "wKocBVvHQoVaiwWoCs9JYSVye4YZRrv5Cucf7fDqnz1", + "authorities": [], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "token", + "owner": "" + }, + "mutable": false, + "burnt": false +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__token_extensions_get_asset_scenario_1.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__token_extensions_get_asset_scenario_1.snap new file mode 100644 index 000000000..e11162d34 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__token_extensions_get_asset_scenario_1.snap @@ -0,0 +1,62 @@ +--- +source: integration_tests/tests/integration_tests/fungibles_and_token_extensions_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "FungibleToken", + "id": "BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://acme.com/demo.json", + "files": [], + "metadata": { + "name": "DAS Dev", + "symbol": "DAS" + }, + "links": {} + }, + "authorities": [], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "token", + "owner": "" + }, + "mutable": true, + "burnt": false, + "mint_extensions": { + "metadata": { + "uri": "https://acme.com/demo.json", + "mint": "BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y", + "name": "DAS Dev", + "symbol": "DAS", + "update_authority": "Em34oqDQYQZ9b6ycPHD28K47mttrRsdNu1S1pgK6NtPL", + "additional_metadata": [] + }, + "metadata_pointer": { + "metadata_address": "BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y" + } + } +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__token_extensions_get_asset_scenario_2.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__token_extensions_get_asset_scenario_2.snap new file mode 100644 index 000000000..6df591f6c --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__token_extensions_get_asset_scenario_2.snap @@ -0,0 +1,77 @@ +--- +source: integration_tests/tests/integration_tests/fungibles_and_token_extensions_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "FungibleToken", + "id": "HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://token-metadata.paxos.com/usdp_metadata/prod/solana/usdp_metadata.json", + "files": [], + "metadata": { + "name": "Pax Dollar", + "symbol": "USDP" + }, + "links": {} + }, + "authorities": [], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "token", + "owner": "" + }, + "mutable": true, + "burnt": false, + "mint_extensions": { + "metadata": { + "uri": "https://token-metadata.paxos.com/usdp_metadata/prod/solana/usdp_metadata.json", + "mint": "HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM", + "name": "Pax Dollar", + "symbol": "USDP", + "update_authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "additional_metadata": [] + }, + "transfer_hook": { + "authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "program_id": null + }, + "metadata_pointer": { + "metadata_address": "HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM" + }, + "permanent_delegate": { + "delegate": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk" + }, + "mint_close_authority": { + "close_authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk" + }, + "confidential_transfer_mint": { + "authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "auditor_elgamal_pubkey": null, + "auto_approve_new_accounts": false + } + } +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__token_extensions_get_asset_scenario_3.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__token_extensions_get_asset_scenario_3.snap new file mode 100644 index 000000000..ba9c9e79f --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__fungibles_and_token_extensions_tests__token_extensions_get_asset_scenario_3.snap @@ -0,0 +1,196 @@ +--- +source: integration_tests/tests/integration_tests/fungibles_and_token_extensions_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "FungibleToken", + "id": "2b1kV6DkPAnxd5ixfnxCpjxmKwqjjaYmCZfHsFu24GXo", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://token-metadata.paxos.com/pyusd_metadata/prod/solana/pyusd_metadata.json", + "files": [], + "metadata": { + "name": "PayPal USD", + "symbol": "PYUSD" + }, + "links": {} + }, + "authorities": [], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "token", + "owner": "" + }, + "mutable": true, + "burnt": false, + "mint_extensions": { + "metadata": { + "uri": "https://token-metadata.paxos.com/pyusd_metadata/prod/solana/pyusd_metadata.json", + "mint": "2b1kV6DkPAnxd5ixfnxCpjxmKwqjjaYmCZfHsFu24GXo", + "name": "PayPal USD", + "symbol": "PYUSD", + "update_authority": "9nEfZqzTP3dfVWmzQy54TzsZqSQqDFVW4PhXdG9vYCVD", + "additional_metadata": [] + }, + "transfer_hook": { + "authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "program_id": null + }, + "metadata_pointer": { + "metadata_address": "2b1kV6DkPAnxd5ixfnxCpjxmKwqjjaYmCZfHsFu24GXo" + }, + "permanent_delegate": { + "delegate": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk" + }, + "transfer_fee_config": { + "withheld_amount": 0, + "newer_transfer_fee": { + "epoch": 605, + "maximum_fee": 0, + "transfer_fee_basis_points": 0 + }, + "older_transfer_fee": { + "epoch": 605, + "maximum_fee": 0, + "transfer_fee_basis_points": 0 + }, + "withdraw_withheld_authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "transfer_fee_config_authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk" + }, + "mint_close_authority": { + "close_authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk" + }, + "confidential_transfer_mint": { + "authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "auditor_elgamal_pubkey": null, + "auto_approve_new_accounts": false + }, + "confidential_transfer_fee_config": { + "authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "withheld_amount": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "harvest_to_mint_enabled": true, + "withdraw_withheld_authority_elgamal_pubkey": [ + 28, + 55, + 230, + 67, + 59, + 115, + 4, + 221, + 130, + 115, + 122, + 228, + 13, + 155, + 139, + 243, + 196, + 159, + 91, + 14, + 108, + 73, + 168, + 213, + 51, + 40, + 179, + 229, + 6, + 144, + 28, + 87 + ] + } + } +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap index 27d495e3a..9843eb6b3 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/general_scenario_tests.rs expression: response +snapshot_kind: text --- { "interface": "ProgrammableNFT", @@ -62,7 +63,6 @@ expression: response "ownership_model": "single", "owner": "BzbdvwEkQKeghTY53aZxTYjUienhdbkNVkgrLV6cErke" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__general_scenario_tests__creators_reordering.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__general_scenario_tests__creators_reordering.snap index 2ed4bf732..7b50b3a93 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__general_scenario_tests__creators_reordering.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__general_scenario_tests__creators_reordering.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/general_scenario_tests.rs expression: response +snapshot_kind: text --- { "interface": "ProgrammableNFT", @@ -77,7 +78,6 @@ expression: response "ownership_model": "single", "owner": "AZgXpkRSetUJHy6C3NBvG6jNe49MpgrkZ2RkdMkjCjkW" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin.snap index 1a8cfd344..552549a56 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 487 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": { diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin_with_signature.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin_with_signature.snap index 4277369f1..2753ea8e2 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin_with_signature.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_autograph_plugin_with_signature.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 516 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "ACxrDWeCPic6voU6a8vyadpL8nSW15Un5vT76LDpxD4N" }, - "supply": null, "mutable": true, "burnt": false, "plugins": { diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset.snap index 0b9a6039f..a72d1bfc8 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 37 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -62,7 +62,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": { diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority.snap index b0d613582..e48244c90 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_binary_data_and_owner_is_data_authority.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 603 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority.snap index ba7f6bca3..d7f66a390 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_json_data_and_update_authority_is_data_authority.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 633 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority.snap index 2031b1797..38619bc7f 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_app_data_with_msg_pack_data_and_address_is_data_authority.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 662 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_binary_data.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_binary_data.snap index 85e72340b..3fe19c33d 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_binary_data.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_binary_data.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 721 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -56,7 +56,6 @@ expression: response "ownership_model": "single", "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_json_data.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_json_data.snap index 9524d637f..410235155 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_json_data.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_json_data.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 780 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -56,7 +56,6 @@ expression: response "ownership_model": "single", "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_msg_pack_data.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_msg_pack_data.snap index 840e75768..0294d65bf 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_msg_pack_data.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_data_section_with_msg_pack_data.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 839 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -56,7 +56,6 @@ expression: response "ownership_model": "single", "owner": "HKwwF4sPVYFPgqEgxth4GZRZjJ9o4E3wA8eu2QM5Vt3H" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_edition.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_edition.snap index 6333a1260..f86780213 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_edition.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_edition.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 216 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": { diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_multiple_internal_and_external_plugins.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_multiple_internal_and_external_plugins.snap index 60e77e446..a941b9087 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_multiple_internal_and_external_plugins.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_multiple_internal_and_external_plugins.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 458 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -57,7 +57,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": { diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config.snap index ac19d3d3a..08ad3c6dd 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_custom_offset_and_base_address_config.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 361 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_external_plugin_on_collection.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_external_plugin_on_collection.snap index 40802d2a6..520d97d9b 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_external_plugin_on_collection.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_external_plugin_on_collection.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 274 expression: response +snapshot_kind: text --- { "interface": "MplCoreCollection", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_multiple_lifecycle_events.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_multiple_lifecycle_events.snap index f42fdf792..9bf617915 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_multiple_lifecycle_events.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_multiple_lifecycle_events.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 332 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_no_offset.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_no_offset.snap index 51d44c25c..fcbfab1d2 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_no_offset.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_oracle_no_offset.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 390 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_pubkey_in_rule_set.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_pubkey_in_rule_set.snap index bfbe6538e..05acfc8ac 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_pubkey_in_rule_set.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_pubkey_in_rule_set.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 245 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -62,7 +62,6 @@ expression: response "ownership_model": "single", "owner": "8LsUNkb7bLaAcZ7NjRKPuvcyRGTWbm9BxzUpVKjqdV8o" }, - "supply": null, "mutable": true, "burnt": false, "plugins": { diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_two_oracle_external_plugins.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_two_oracle_external_plugins.snap index b9d26a436..1da1d1281 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_two_oracle_external_plugins.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_asset_with_two_oracle_external_plugins.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 274 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_authority.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_authority.snap index 5acbdeb5d..4fb1a6c7c 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_authority.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_authority.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 108 expression: response +snapshot_kind: text --- { "total": 2, @@ -9,14 +9,14 @@ expression: response "page": 1, "items": [ { - "interface": "MplCoreAsset", - "id": "4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz", + "interface": "MplCoreCollection", + "id": "9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci", "content": { "$schema": "https://schema.metaplex.com/nft1.0.json", - "json_uri": "https://example.com/asset", + "json_uri": "https://example.com/collection", "files": [], "metadata": { - "name": "Test Asset", + "name": "Test Collection", "symbol": "" }, "links": {} @@ -39,12 +39,7 @@ expression: response "seq": 0, "leaf_id": 0 }, - "grouping": [ - { - "group_key": "collection", - "group_value": "9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci" - } - ], + "grouping": [], "royalty": { "royalty_model": "creators", "target": null, @@ -59,26 +54,27 @@ expression: response "delegated": false, "delegate": null, "ownership_model": "single", - "owner": "GzYvuu9aUYXmnardj4svbAcCNmefiaGu2E3knGw9NJQQ" + "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, "mpl_core_info": { + "num_minted": 1, + "current_size": 1, "plugins_json_version": 1 }, "external_plugins": [] }, { - "interface": "MplCoreCollection", - "id": "9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci", + "interface": "MplCoreAsset", + "id": "4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz", "content": { "$schema": "https://schema.metaplex.com/nft1.0.json", - "json_uri": "https://example.com/collection", + "json_uri": "https://example.com/asset", "files": [], "metadata": { - "name": "Test Collection", + "name": "Test Asset", "symbol": "" }, "links": {} @@ -101,7 +97,12 @@ expression: response "seq": 0, "leaf_id": 0 }, - "grouping": [], + "grouping": [ + { + "group_key": "collection", + "group_value": "9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci" + } + ], "royalty": { "royalty_model": "creators", "target": null, @@ -116,15 +117,12 @@ expression: response "delegated": false, "delegate": null, "ownership_model": "single", - "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" + "owner": "GzYvuu9aUYXmnardj4svbAcCNmefiaGu2E3knGw9NJQQ" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, "mpl_core_info": { - "num_minted": 1, - "current_size": 1, "plugins_json_version": 1 }, "external_plugins": [] diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group.snap index 431a64ec4..f338c6fb7 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 149 expression: response +snapshot_kind: text --- { "total": 3, @@ -61,7 +61,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, @@ -123,7 +122,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, @@ -185,7 +183,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds.snap index f9cea5683..a4236f53d 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_group_with_oracle_and_custom_pda_all_seeds.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 429 expression: response +snapshot_kind: text --- { "total": 1, @@ -61,7 +61,6 @@ expression: response "ownership_model": "single", "owner": "FAe4nM85BQ8b1nWEc5TTeogQGnNDWsuGYU84vuiPc7kE" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_owner.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_owner.snap index 7b190b8a9..946d8a9af 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_owner.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_assets_by_owner.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 187 expression: response +snapshot_kind: text --- { "total": 1, @@ -61,7 +61,6 @@ expression: response "ownership_model": "single", "owner": "7uScVQiT4vArB88dHrZoeVKWbtsRJmNp9r5Gce5VQpXS" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection.snap index 1d854ee47..14a95dbb2 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 66 expression: response +snapshot_kind: text --- { "interface": "MplCoreCollection", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": { diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority.snap index 38d0e3771..4e3b7c0c2 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_binary_data_and_address_is_data_authority.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 692 expression: response +snapshot_kind: text --- { "interface": "MplCoreCollection", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority.snap index b7ef57f77..2ed4afdc1 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_json_data_and_owner_is_data_authority.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 751 expression: response +snapshot_kind: text --- { "interface": "MplCoreCollection", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority.snap index ff0cfbdbf..526a7a545 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_get_collection_with_linked_app_data_with_msg_pack_data_and_update_authority_is_data_authority.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 810 expression: response +snapshot_kind: text --- { "interface": "MplCoreCollection", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": {}, diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin.snap index 6387d404c..cbd495062 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 545 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7" }, - "supply": null, "mutable": true, "burnt": false, "plugins": { diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin_with_signature.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin_with_signature.snap index 96428d6a1..36b1cd541 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin_with_signature.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__mpl_core_tests__mpl_core_verified_creators_plugin_with_signature.snap @@ -1,7 +1,7 @@ --- source: integration_tests/tests/integration_tests/mpl_core_tests.rs -assertion_line: 574 expression: response +snapshot_kind: text --- { "interface": "MplCoreAsset", @@ -51,7 +51,6 @@ expression: response "ownership_model": "single", "owner": "D9SSaw4tz5AGpfWynYJ66jDCVNTsbLBkqT8rxQFenqj4" }, - "supply": null, "mutable": true, "burnt": false, "plugins": { diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__nft_editions_tests__get_nft_editions.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__nft_editions_tests__get_nft_editions.snap new file mode 100644 index 000000000..d67779b90 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__nft_editions_tests__get_nft_editions.snap @@ -0,0 +1,24 @@ +--- +source: integration_tests/tests/integration_tests/nft_editions_tests.rs +expression: response +snapshot_kind: text +--- +{ + "total": 2, + "limit": 10, + "master_edition_address": "8SHfqzJYABeGfiG1apwiEYt6TvfGQiL1pdwEjvTKsyiZ", + "supply": 60, + "max_supply": 69, + "editions": [ + { + "mint_address": "GJvFDcBWf6aDncd1TBzx2ou1rgLFYaMBdbYLBa9oTAEw", + "edition_address": "AoxgzXKEsJmUyF5pBb3djn9cJFA26zh2SQHvd9EYijZV", + "edition_number": 1 + }, + { + "mint_address": "9yQecKKYSHxez7fFjJkUvkz42TLmkoXzhyZxEf2pw8pz", + "edition_address": "giWoA4jqHFkodPJgtbRYRcYtiXbsVytnxnEao3QT2gg", + "edition_number": 2 + } + ] +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap index c62b3de1c..befc56b19 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/regular_nft_tests.rs expression: response +snapshot_kind: text --- { "interface": "ProgrammableNFT", @@ -67,7 +68,6 @@ expression: response "ownership_model": "single", "owner": "A59E2tNJEqNN9TDnzgGnmLmnTsdRDoPocGx3n1w2dqZw" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap index 0825ec1b2..2f859cbb6 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/regular_nft_tests.rs expression: response +snapshot_kind: text --- [ { @@ -68,7 +69,6 @@ expression: response "ownership_model": "single", "owner": "9PacVenjPyQYiWBha89UYRM1nn6mf9bGY7vi32zY6DLn" }, - "supply": null, "mutable": true, "burnt": false }, @@ -138,7 +138,6 @@ expression: response "ownership_model": "single", "owner": "3H3d3hfpZVVdVwuFAxDtDSFN2AdR7kwiDA3ynbnbkhc9" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap index 36c9961f9..565f843d4 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/regular_nft_tests.rs expression: response +snapshot_kind: text --- [ { @@ -68,7 +69,6 @@ expression: response "ownership_model": "single", "owner": "9PacVenjPyQYiWBha89UYRM1nn6mf9bGY7vi32zY6DLn" }, - "supply": null, "mutable": true, "burnt": false }, @@ -137,7 +137,6 @@ expression: response "ownership_model": "single", "owner": "3H3d3hfpZVVdVwuFAxDtDSFN2AdR7kwiDA3ynbnbkhc9" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap index f07052af1..7647ef26c 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/regular_nft_tests.rs expression: response +snapshot_kind: text --- [ { @@ -68,7 +69,6 @@ expression: response "ownership_model": "single", "owner": "BaBQKh34KrqZzd4ifSHQYMf86HiBGASN6TWUi1ZwfyKv" }, - "supply": null, "mutable": true, "burnt": false }, @@ -137,7 +137,6 @@ expression: response "ownership_model": "single", "owner": "9PacVenjPyQYiWBha89UYRM1nn6mf9bGY7vi32zY6DLn" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap index 87920f33b..ce44d4bc4 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap @@ -1,6 +1,7 @@ --- source: integration_tests/tests/integration_tests/regular_nft_tests.rs expression: response +snapshot_kind: text --- { "total": 1, @@ -77,7 +78,6 @@ expression: response "ownership_model": "single", "owner": "9qUcfdADyrrTSetFjNjF9Ro7LKAqzJkzZV6WKLHfv5MU" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap index bd321f698..415fdbb43 100644 --- a/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap @@ -72,7 +72,6 @@ expression: response "ownership_model": "single", "owner": "6Cr66AabRYymhZgYQSfTCo6FVpH18wXrMZswAbcErpyX" }, - "supply": null, "mutable": true, "burnt": false } diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__show_fungible_flag_tests__get_asset_with_show_fungible_scenario_1.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__show_fungible_flag_tests__get_asset_with_show_fungible_scenario_1.snap new file mode 100644 index 000000000..b0a8c7016 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__show_fungible_flag_tests__get_asset_with_show_fungible_scenario_1.snap @@ -0,0 +1,80 @@ +--- +source: integration_tests/tests/integration_tests/show_fungible_flag_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "Ca84nWhQu41DMRnjdhRrLZty1i9txepMhAhz5qLLGcBw", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://nftstorage.link/ipfs/bafkreibmdapcawep5fb77lvcuvoivft3w3wsnc4qworrntjbg6abc5vwti", + "files": [], + "metadata": { + "name": "Claynosaurz #1096", + "symbol": "DINO", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "B7B2g3WbdZMDV3YcDGRGhEt5KyWqDJZFwRR8zpWVEkUF", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "6mszaj17KSfVqADrQj3o4W3zoLMTykgmV37W4QadCczK" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "AoebZtN5iKpVyUBc82aouWhugVknLzjUmEEUezxviYNo", + "share": 0, + "verified": true + }, + { + "address": "36tfiBtaDGjAMKd6smPacHQhe4MXycLL6f9ww9CD1naT", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "J1ep1LizHMU3Bf1GKkWePGHU3Qwwzw6FvwW5ySFWdCkn" + }, + "mutable": true, + "burnt": false, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "EwSfdqoZPBW6JJN1SBkM2pPgpihDVbByuxKdmiXurxYF", + "freeze_authority": "EwSfdqoZPBW6JJN1SBkM2pPgpihDVbByuxKdmiXurxYF" + } +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__show_fungible_flag_tests__get_asset_with_show_fungible_scenario_2.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__show_fungible_flag_tests__get_asset_with_show_fungible_scenario_2.snap new file mode 100644 index 000000000..8b1ca31c0 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__show_fungible_flag_tests__get_asset_with_show_fungible_scenario_2.snap @@ -0,0 +1,80 @@ +--- +source: integration_tests/tests/integration_tests/show_fungible_flag_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/1983.json", + "files": [], + "metadata": { + "name": "Mad Lads #1983", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "BPB5idZgbA1DG4XEmnKs62AADRZFf3jY7Kr9mpMGyKPi", + "ownership_model": "single", + "owner": "GqPnSDXwp4JFtKS7YZ2HERgBbYLKpKVYy9TpVunzLRa9" + }, + "mutable": true, + "burnt": false, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "FAzbjKo66M3tKhkKqegmWFaYr93FB74B1ChEBdFyKcip", + "freeze_authority": "FAzbjKo66M3tKhkKqegmWFaYr93FB74B1ChEBdFyKcip" + } +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__show_inscription_flag_tests__get_asset_with_show_inscription_scenario_1.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__show_inscription_flag_tests__get_asset_with_show_inscription_scenario_1.snap new file mode 100644 index 000000000..62373341d --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__show_inscription_flag_tests__get_asset_with_show_inscription_scenario_1.snap @@ -0,0 +1,78 @@ +--- +source: integration_tests/tests/integration_tests/show_inscription_flag_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "V1_NFT", + "id": "9FkS3kZV4MoGps14tUSp7iVnizGbxcK4bDEhSoF5oYAZ", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://arweave.net/qJdjeP8XFfYIG6z5pJ-3RsZR2EcbgILX_ot-b2fEC0g", + "files": [], + "metadata": { + "name": "punk2491", + "symbol": "Symbol", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "BVk6Bvxa9v6Y32o7KGPhYV4CU9pmG2K7nAYc7mDejsGM", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.08, + "basis_points": 800, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "BVk6Bvxa9v6Y32o7KGPhYV4CU9pmG2K7nAYc7mDejsGM", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 252 + }, + "mutable": true, + "burnt": false, + "inscription": { + "authority": "11111111111111111111111111111111", + "root": "9FkS3kZV4MoGps14tUSp7iVnizGbxcK4bDEhSoF5oYAZ", + "inscription_data": "4Q18N6XrfJHgDbRTaHJR328jN9dixCLQAQhDsTsRzg3v", + "content": "image/net/riupjyro3lsvkb_listajh0jdsrsjmnyhusxvznycqw", + "encoding": "base64", + "order": 1733, + "size": 202, + "validation_hash": "7fa0041483b92f5a0448067ecef9beca2192b13bfe86fbd53a0024e84fcea652" + } +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__test_get_assets_with_multiple_same_ids__get_assets_with_multiple_same_ids.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__test_get_assets_with_multiple_same_ids__get_assets_with_multiple_same_ids.snap new file mode 100644 index 000000000..e856e4c02 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__test_get_assets_with_multiple_same_ids__get_assets_with_multiple_same_ids.snap @@ -0,0 +1,138 @@ +--- +source: integration_tests/tests/integration_tests/test_get_assets_with_multiple_same_ids.rs +expression: response +snapshot_kind: text +--- +[ + { + "interface": "ProgrammableNFT", + "id": "F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/8420.json", + "files": [], + "metadata": { + "name": "Mad Lads #8420", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "" + }, + "mutable": true, + "burnt": false + }, + { + "interface": "V1_NFT", + "id": "JEKKtnGvjiZ8GtATnMVgadHU41AuTbFkMW8oD2tdyV9X", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://nftstorage.link/ipfs/bafkreidgvjw2atmkw2jzkkfi56arfrzaicrebzw5xwfkz3b67fq5gbvlre", + "files": [], + "metadata": { + "name": "TURTLES", + "symbol": "TURTLES", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "J4tNLYMTegHE9nVjpRM17tf1EYwJnA9Crfn3KytRNcGv", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "J4tNLYMTegHE9nVjpRM17tf1EYwJnA9Crfn3KytRNcGv", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 253 + }, + "mutable": false, + "burnt": false + } +] diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__test_show_collection_metadata_option__get_asset_with_show_collection_metadata_option.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__test_show_collection_metadata_option__get_asset_with_show_collection_metadata_option.snap new file mode 100644 index 000000000..e49fdf0cd --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__test_show_collection_metadata_option__get_asset_with_show_collection_metadata_option.snap @@ -0,0 +1,74 @@ +--- +source: integration_tests/tests/integration_tests/test_show_collection_metadata_option.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads.s3.us-west-2.amazonaws.com/json/1983.json", + "files": [], + "metadata": { + "name": "Mad Lads #1983", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w", + "collection_metadata": {} + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.042, + "basis_points": 420, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "5XvhfmRjwXkGp3jHGmaKpqeerNYjkuZZBYLVQYdeVcRv", + "share": 0, + "verified": true + }, + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "BPB5idZgbA1DG4XEmnKs62AADRZFf3jY7Kr9mpMGyKPi", + "ownership_model": "single", + "owner": "GqPnSDXwp4JFtKS7YZ2HERgBbYLKpKVYy9TpVunzLRa9" + }, + "mutable": true, + "burnt": false +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__test_show_zero_balance_filter__show_zero_balance_filter_being_disabled.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__test_show_zero_balance_filter__show_zero_balance_filter_being_disabled.snap new file mode 100644 index 000000000..f2a5d1a10 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__test_show_zero_balance_filter__show_zero_balance_filter_being_disabled.snap @@ -0,0 +1,24 @@ +--- +source: integration_tests/tests/integration_tests/test_show_zero_balance_filter.rs +expression: response +snapshot_kind: text +--- +{ + "total": 1, + "limit": 1000, + "token_accounts": [ + { + "address": "CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a", + "mint": "7Y5WQ2e3FummR2DebrqP8caC64QvXzpnhSTNWjNabxWn", + "amount": 1, + "owner": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g", + "frozen": true, + "delegate": "D98f1ebFe6kfZTcztLo1iPeKAwogbWHAgXzgSpdRDiu7", + "delegated_amount": 1, + "close_authority": null, + "extensions": null + } + ], + "cursor": null, + "errors": [] +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__test_show_zero_balance_filter__show_zero_balance_filter_being_enabled.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__test_show_zero_balance_filter__show_zero_balance_filter_being_enabled.snap new file mode 100644 index 000000000..c628cdebb --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__test_show_zero_balance_filter__show_zero_balance_filter_being_enabled.snap @@ -0,0 +1,35 @@ +--- +source: integration_tests/tests/integration_tests/test_show_zero_balance_filter.rs +expression: response +snapshot_kind: text +--- +{ + "total": 2, + "limit": 1000, + "token_accounts": [ + { + "address": "BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS", + "mint": "JUPyiwrYJFskUPiHa7hkeR8VUtAeFoSYbKedZNsDvCN", + "amount": 0, + "owner": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g", + "frozen": false, + "delegate": null, + "delegated_amount": 0, + "close_authority": null, + "extensions": null + }, + { + "address": "CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a", + "mint": "7Y5WQ2e3FummR2DebrqP8caC64QvXzpnhSTNWjNabxWn", + "amount": 1, + "owner": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g", + "frozen": true, + "delegate": "D98f1ebFe6kfZTcztLo1iPeKAwogbWHAgXzgSpdRDiu7", + "delegated_amount": 1, + "close_authority": null, + "extensions": null + } + ], + "cursor": null, + "errors": [] +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__token_accounts_tests__get_token_accounts_by_mint.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_accounts_tests__get_token_accounts_by_mint.snap new file mode 100644 index 000000000..461483ad2 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_accounts_tests__get_token_accounts_by_mint.snap @@ -0,0 +1,24 @@ +--- +source: integration_tests/tests/integration_tests/token_accounts_tests.rs +expression: response +snapshot_kind: text +--- +{ + "total": 1, + "limit": 1000, + "token_accounts": [ + { + "address": "jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2", + "mint": "wKocBVvHQoVaiwWoCs9JYSVye4YZRrv5Cucf7fDqnz1", + "amount": 1000000000000, + "owner": "CeviT1DTQLuicEB7yLeFkkAGmam5GnJssbGb7CML4Tgx", + "frozen": false, + "delegate": null, + "delegated_amount": 0, + "close_authority": null, + "extensions": null + } + ], + "cursor": null, + "errors": [] +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__token_accounts_tests__get_token_accounts_by_owner.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_accounts_tests__get_token_accounts_by_owner.snap new file mode 100644 index 000000000..e99e05b5e --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_accounts_tests__get_token_accounts_by_owner.snap @@ -0,0 +1,35 @@ +--- +source: integration_tests/tests/integration_tests/token_accounts_tests.rs +expression: response +snapshot_kind: text +--- +{ + "total": 2, + "limit": 1000, + "token_accounts": [ + { + "address": "jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2", + "mint": "wKocBVvHQoVaiwWoCs9JYSVye4YZRrv5Cucf7fDqnz1", + "amount": 1000000000000, + "owner": "CeviT1DTQLuicEB7yLeFkkAGmam5GnJssbGb7CML4Tgx", + "frozen": false, + "delegate": null, + "delegated_amount": 0, + "close_authority": null, + "extensions": null + }, + { + "address": "3Pv9H5UzU8T9BwgutXrcn2wLohS1JUZuk3x8paiRyzui", + "mint": "F3D8Priw3BRecH36BuMubQHrTUn1QxmupLHEmmbZ4LXW", + "amount": 10000, + "owner": "CeviT1DTQLuicEB7yLeFkkAGmam5GnJssbGb7CML4Tgx", + "frozen": false, + "delegate": null, + "delegated_amount": 0, + "close_authority": null, + "extensions": null + } + ], + "cursor": null, + "errors": [] +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__token_extensions_tests__token_extensions_get_asset_scenario1.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_extensions_tests__token_extensions_get_asset_scenario1.snap new file mode 100644 index 000000000..3e6f1d5a1 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_extensions_tests__token_extensions_get_asset_scenario1.snap @@ -0,0 +1,53 @@ +--- +source: integration_tests/tests/integration_tests/token_extensions_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "FungibleToken", + "id": "BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y", + "authorities": [], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "token", + "owner": "" + }, + "mutable": true, + "burnt": false, + "mint_extensions": { + "metadata": { + "uri": "https://acme.com/demo.json", + "mint": "BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y", + "name": "DAS Dev", + "symbol": "DAS", + "update_authority": "Em34oqDQYQZ9b6ycPHD28K47mttrRsdNu1S1pgK6NtPL", + "additional_metadata": [] + }, + "metadata_pointer": { + "authority": "Em34oqDQYQZ9b6ycPHD28K47mttrRsdNu1S1pgK6NtPL", + "metadata_address": "BPU5vrAHafRuVeK33CgfdwTKSsmC4p6t3aqyav3cFF7Y" + } + } +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__token_extensions_tests__token_extensions_get_asset_scenario2.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_extensions_tests__token_extensions_get_asset_scenario2.snap new file mode 100644 index 000000000..d56a5b03f --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_extensions_tests__token_extensions_get_asset_scenario2.snap @@ -0,0 +1,68 @@ +--- +source: integration_tests/tests/integration_tests/token_extensions_tests.rs +expression: response +snapshot_kind: text +--- +{ + "interface": "FungibleToken", + "id": "HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM", + "authorities": [], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "token", + "owner": "" + }, + "mutable": true, + "burnt": false, + "mint_extensions": { + "metadata": { + "uri": "https://token-metadata.paxos.com/usdp_metadata/prod/solana/usdp_metadata.json", + "mint": "HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM", + "name": "Pax Dollar", + "symbol": "USDP", + "update_authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "additional_metadata": [] + }, + "transfer_hook": { + "authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "program_id": null + }, + "metadata_pointer": { + "authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "metadata_address": "HVbpJAQGNpkgBaYBZQBR1t7yFdvaYVp2vCQQfKKEN4tM" + }, + "permanent_delegate": { + "delegate": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk" + }, + "mint_close_authority": { + "close_authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk" + }, + "confidential_transfer_mint": { + "authority": "2apBGMsS6ti9RyF5TwQTDswXBWskiJP2LD4cUEDqYJjk", + "auditor_elgamal_pubkey": null, + "auto_approve_new_accounts": false + } + } +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_all.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_all.snap new file mode 100644 index 000000000..9a0a55094 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_all.snap @@ -0,0 +1,148 @@ +--- +source: integration_tests/tests/integration_tests/token_type_test.rs +expression: response +snapshot_kind: text +--- +{ + "total": 2, + "limit": 2, + "page": 1, + "items": [ + { + "interface": "ProgrammableNFT", + "id": "42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://cdn.hellomoon.io/public/silicons/metadata/2835.json", + "files": [], + "metadata": { + "name": "SILICON #2835", + "symbol": "SILI", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "A2QW89tFNDkkdvJv671tdknAyA21u6hvS7HTUyeMWnf3", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "HS1oygRKNBG1nMqjSmaBXSQqQ7apWr14gUU4pW3aDMCP" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "8X2e7Lf3wmA9RPHpPH73kmTqqHHyZE9BcED6Y6TWaZCx", + "share": 0, + "verified": true + }, + { + "address": "5bTgyaCCRNCem3DZXxdRREyesduc6adqwks8rRWGXx8D", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "D98f1ebFe6kfZTcztLo1iPeKAwogbWHAgXzgSpdRDiu7", + "ownership_model": "single", + "owner": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g" + }, + "mutable": true, + "burnt": false + }, + { + "interface": "ProgrammableNFT", + "id": "8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://cdn.hellomoon.io/public/silicons/metadata/1466.json", + "files": [], + "metadata": { + "name": "SILICON #1466", + "symbol": "SILI", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "A2QW89tFNDkkdvJv671tdknAyA21u6hvS7HTUyeMWnf3", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "HS1oygRKNBG1nMqjSmaBXSQqQ7apWr14gUU4pW3aDMCP" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "8X2e7Lf3wmA9RPHpPH73kmTqqHHyZE9BcED6Y6TWaZCx", + "share": 0, + "verified": true + }, + { + "address": "5bTgyaCCRNCem3DZXxdRREyesduc6adqwks8rRWGXx8D", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "D98f1ebFe6kfZTcztLo1iPeKAwogbWHAgXzgSpdRDiu7", + "ownership_model": "single", + "owner": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g" + }, + "mutable": true, + "burnt": false + } + ] +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_compressed.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_compressed.snap new file mode 100644 index 000000000..59c7dbe8d --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_compressed.snap @@ -0,0 +1,80 @@ +--- +source: integration_tests/tests/integration_tests/token_type_test.rs +expression: response +snapshot_kind: text +--- +{ + "total": 1, + "limit": 2, + "page": 1, + "items": [ + { + "interface": "V1_NFT", + "id": "7myVr8fEG52mZ3jAwgz88iQRWsuzuVR2nfH8n2AXnBxE", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://arweave.net/S40vvBVuCvZhAWI3kvk3QreUqVAvR0AaUDObOhB8WIY", + "files": [], + "metadata": { + "name": "Golden Azurite", + "symbol": "OEAs", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "EDAR6p4AUbv9SpD1pDm3gxdSAivdqsHxsf6V9pBc532U", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": true, + "data_hash": "B2tu4duCUPequnXh7DxbMnLeLcACHbDCQn3g34s5Cvbx", + "creator_hash": "6UiSCAv4r66MALaqhNE7qdTK84qKk1yJqR4UYtT8qEQ1", + "asset_hash": "8EBnZHUKKB2Vdef34H4L3nxYKkf5RPwCkoHBVLAAM2zN", + "tree": "4r2zZHZvC4Se1KUcCcyCM4ZoFQNGZm2M5FMmUypFocAP", + "seq": 39, + "leaf_id": 5 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "BwwjnxTHeVWdFieDWmoezta19q1NiwcNNyoon9S38bkM" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "4gETqgEwFLkXX9yk6qBszA6LMjC2kRyyERXsAr2rwhwf", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "53VVFtLzzi3nL2p1QF591PAB8rbcbsirYepwUphtHU9Q" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": null + }, + "mutable": true, + "burnt": false + } + ] +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_fungible.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_fungible.snap new file mode 100644 index 000000000..3a0362762 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_fungible.snap @@ -0,0 +1,64 @@ +--- +source: integration_tests/tests/integration_tests/token_type_test.rs +expression: response +snapshot_kind: text +--- +{ + "total": 1, + "limit": 1, + "page": 1, + "items": [ + { + "interface": "FungibleToken", + "id": "7EYnhQoR9YM3N7UoaKRoA44Uy8JeaZV3qyouov87awMs", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://gateway.irys.xyz/P8X64pGutyX5eyTpQmqZr3H4_Lqhm0IYxr5SyzFFNek", + "files": [], + "metadata": { + "name": "Silly Dragon", + "symbol": "SILLY", + "token_standard": "Fungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "38qZKCqcphT5wDrVNJGHYcuenjEtEFPitvrqvMFQkPu7", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": true, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "token", + "owner": "" + }, + "mutable": true, + "burnt": false + } + ] +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_non_fungible.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_non_fungible.snap new file mode 100644 index 000000000..3b14b1809 --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_non_fungible.snap @@ -0,0 +1,148 @@ +--- +source: integration_tests/tests/integration_tests/token_type_test.rs +expression: response +snapshot_kind: text +--- +{ + "total": 2, + "limit": 2, + "page": 1, + "items": [ + { + "interface": "ProgrammableNFT", + "id": "AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://cdn.hellomoon.io/public/silicons/metadata/4515.json", + "files": [], + "metadata": { + "name": "SILICON #4515", + "symbol": "SILI", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "A2QW89tFNDkkdvJv671tdknAyA21u6hvS7HTUyeMWnf3", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "HS1oygRKNBG1nMqjSmaBXSQqQ7apWr14gUU4pW3aDMCP" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "8X2e7Lf3wmA9RPHpPH73kmTqqHHyZE9BcED6Y6TWaZCx", + "share": 0, + "verified": true + }, + { + "address": "5bTgyaCCRNCem3DZXxdRREyesduc6adqwks8rRWGXx8D", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "D98f1ebFe6kfZTcztLo1iPeKAwogbWHAgXzgSpdRDiu7", + "ownership_model": "single", + "owner": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g" + }, + "mutable": true, + "burnt": false + }, + { + "interface": "ProgrammableNFT", + "id": "8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://cdn.hellomoon.io/public/silicons/metadata/1466.json", + "files": [], + "metadata": { + "name": "SILICON #1466", + "symbol": "SILI", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "A2QW89tFNDkkdvJv671tdknAyA21u6hvS7HTUyeMWnf3", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "HS1oygRKNBG1nMqjSmaBXSQqQ7apWr14gUU4pW3aDMCP" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "8X2e7Lf3wmA9RPHpPH73kmTqqHHyZE9BcED6Y6TWaZCx", + "share": 0, + "verified": true + }, + { + "address": "5bTgyaCCRNCem3DZXxdRREyesduc6adqwks8rRWGXx8D", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "D98f1ebFe6kfZTcztLo1iPeKAwogbWHAgXzgSpdRDiu7", + "ownership_model": "single", + "owner": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g" + }, + "mutable": true, + "burnt": false + } + ] +} diff --git a/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_regular_nft.snap b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_regular_nft.snap new file mode 100644 index 000000000..d8ee5ff5b --- /dev/null +++ b/integration_tests/tests/integration_tests/snapshots/integration_tests__token_type_test__search_asset_with_token_type_regular_nft.snap @@ -0,0 +1,148 @@ +--- +source: integration_tests/tests/integration_tests/token_type_test.rs +expression: response +snapshot_kind: text +--- +{ + "total": 2, + "limit": 2, + "page": 1, + "items": [ + { + "interface": "ProgrammableNFT", + "id": "42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://cdn.hellomoon.io/public/silicons/metadata/2835.json", + "files": [], + "metadata": { + "name": "SILICON #2835", + "symbol": "SILI", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "A2QW89tFNDkkdvJv671tdknAyA21u6hvS7HTUyeMWnf3", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "HS1oygRKNBG1nMqjSmaBXSQqQ7apWr14gUU4pW3aDMCP" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "8X2e7Lf3wmA9RPHpPH73kmTqqHHyZE9BcED6Y6TWaZCx", + "share": 0, + "verified": true + }, + { + "address": "5bTgyaCCRNCem3DZXxdRREyesduc6adqwks8rRWGXx8D", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "D98f1ebFe6kfZTcztLo1iPeKAwogbWHAgXzgSpdRDiu7", + "ownership_model": "single", + "owner": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g" + }, + "mutable": true, + "burnt": false + }, + { + "interface": "ProgrammableNFT", + "id": "2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://cdn.hellomoon.io/public/silicons/metadata/4685.json", + "files": [], + "metadata": { + "name": "SILICON #4685", + "symbol": "SILI", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "A2QW89tFNDkkdvJv671tdknAyA21u6hvS7HTUyeMWnf3", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [ + { + "group_key": "collection", + "group_value": "HS1oygRKNBG1nMqjSmaBXSQqQ7apWr14gUU4pW3aDMCP" + } + ], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": true, + "locked": false + }, + "creators": [ + { + "address": "8X2e7Lf3wmA9RPHpPH73kmTqqHHyZE9BcED6Y6TWaZCx", + "share": 0, + "verified": true + }, + { + "address": "5bTgyaCCRNCem3DZXxdRREyesduc6adqwks8rRWGXx8D", + "share": 100, + "verified": false + } + ], + "ownership": { + "frozen": true, + "delegated": true, + "delegate": "D98f1ebFe6kfZTcztLo1iPeKAwogbWHAgXzgSpdRDiu7", + "ownership_model": "single", + "owner": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g" + }, + "mutable": true, + "burnt": false + } + ] +} diff --git a/integration_tests/tests/integration_tests/test_get_assets_with_multiple_same_ids.rs b/integration_tests/tests/integration_tests/test_get_assets_with_multiple_same_ids.rs new file mode 100644 index 000000000..aebb1c8c2 --- /dev/null +++ b/integration_tests/tests/integration_tests/test_get_assets_with_multiple_same_ids.rs @@ -0,0 +1,42 @@ +use function_name::named; + +use das_api::api::{self, ApiContract}; + +use itertools::Itertools; + +use serial_test::serial; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_get_assets_with_multiple_same_ids() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new(name.clone()).await; + + let seeds: Vec = seed_accounts([ + "F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk", + "DZAZ3mGuq7nCYGzUyw4MiA74ysr15EfqLpzCzX2cRVng", + "JEKKtnGvjiZ8GtATnMVgadHU41AuTbFkMW8oD2tdyV9X", + "2ecGsTKbj7FecLwxTHaodZRFwza7m7LamqDG4YjczZMj", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ids": [ + "F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk", + "F9Lw3ki3hJ7PF9HQXsBzoY8GyE6sPoEZZdXJBsTTD2rk", + "JEKKtnGvjiZ8GtATnMVgadHU41AuTbFkMW8oD2tdyV9X", + "JEKKtnGvjiZ8GtATnMVgadHU41AuTbFkMW8oD2tdyV9X" + ] + } + "#; + + let request: api::GetAssets = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_assets(request).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/tests/integration_tests/test_show_collection_metadata_option.rs b/integration_tests/tests/integration_tests/test_show_collection_metadata_option.rs new file mode 100644 index 000000000..688448f51 --- /dev/null +++ b/integration_tests/tests/integration_tests/test_show_collection_metadata_option.rs @@ -0,0 +1,50 @@ +use function_name::named; + +use das_api::api::{self, ApiContract}; + +use itertools::Itertools; + +use serial_test::serial; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] + +async fn test_get_asset_with_show_collection_metadata_option() { + let name = trim_test_name(function_name!()); + + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM", + "7fXKY9tPpvYsdbSNyesUqo27WYC6ZsBEULdtngGHqLCK", + "8Xv3SpX94HHf32Apg4TeSeS3i2p6wuXeE8FBZr168Hti", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "AH6wj7T8Ke5nbukjtcobjjs1CDWUcQxndtnLkKAdrSrM", + "displayOptions" : { + "showCollectionMetadata": true + } + } + "#; + + let request: api::GetAsset = serde_json::from_str(request).unwrap(); + + let response = setup.das_api.get_asset(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/tests/integration_tests/test_show_zero_balance_filter.rs b/integration_tests/tests/integration_tests/test_show_zero_balance_filter.rs new file mode 100644 index 000000000..e19f18d71 --- /dev/null +++ b/integration_tests/tests/integration_tests/test_show_zero_balance_filter.rs @@ -0,0 +1,83 @@ +use function_name::named; + +use das_api::api::{self, ApiContract}; + +use itertools::Itertools; + +use serial_test::serial; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_show_zero_balance_filter_being_enabled() { + let name = trim_test_name(function_name!()); + + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS", + "CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress":"2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g", + "displayOptions": { + "showZeroBalance": true + } + } + "#; + + let request: api::GetTokenAccounts = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_token_accounts(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_show_zero_balance_filter_being_disabled() { + let name = trim_test_name(function_name!()); + + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "BE1CkzRjLTXAWcSVCaqzycwXsZ18Yuk3jMDMnPUoHjjS", + "CyqarC6hyNYvb3EDueyeYrnGeAUjCDtMvWrbtdAnA53a", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress":"2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g", + "displayOptions": { + "showZeroBalance": false + } + } + "#; + + let request: api::GetTokenAccounts = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_token_accounts(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/tests/integration_tests/token_accounts_tests.rs b/integration_tests/tests/integration_tests/token_accounts_tests.rs new file mode 100644 index 000000000..8a465e914 --- /dev/null +++ b/integration_tests/tests/integration_tests/token_accounts_tests.rs @@ -0,0 +1,74 @@ +use function_name::named; + +use das_api::api::{self, ApiContract}; + +use itertools::Itertools; + +use serial_test::serial; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_get_token_accounts_by_mint() { + let name = trim_test_name(function_name!()); + + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts(["jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2"]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "mintAddress":"wKocBVvHQoVaiwWoCs9JYSVye4YZRrv5Cucf7fDqnz1" + } + "#; + + let request: api::GetTokenAccounts = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_token_accounts(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_get_token_accounts_by_owner() { + let name = trim_test_name(function_name!()); + + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Devnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "jKLTJu7nE1zLmC2J2xjVVBm4G7vJcKGCGQX36Jrsba2", + "3Pv9H5UzU8T9BwgutXrcn2wLohS1JUZuk3x8paiRyzui", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress":"CeviT1DTQLuicEB7yLeFkkAGmam5GnJssbGb7CML4Tgx" + } + "#; + + let request: api::GetTokenAccounts = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_token_accounts(request).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/tests/integration_tests/token_type_test.rs b/integration_tests/tests/integration_tests/token_type_test.rs new file mode 100644 index 000000000..260d92567 --- /dev/null +++ b/integration_tests/tests/integration_tests/token_type_test.rs @@ -0,0 +1,185 @@ +use function_name::named; + +use das_api::api::{self, ApiContract}; + +use itertools::Itertools; + +use serial_test::serial; + +use super::common::*; + +#[tokio::test] +#[serial] +#[named] +async fn test_search_asset_with_token_type_regular_nft() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_nfts([ + "42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB", + "2w81QrLYTwSDkNwXgCqKAwrC1Tu6R9mh9BHcxys2Bup2", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g", + "page": 1, + "limit": 2, + "tokenType": "Nft" + } + "#; + + let request: api::SearchAssets = serde_json::from_str(request).unwrap(); + let response = setup.das_api.search_assets(request).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_search_asset_with_token_type_non_fungible() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_nfts([ + "AH6VcoSbCGGv8BHeN7K766VUWMcdFRTaXpLvGTLSdAmk", + "8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g", + "page": 1, + "limit": 2, + "tokenType": "NonFungible" + } + "#; + + let request: api::SearchAssets = serde_json::from_str(request).unwrap(); + let response = setup.das_api.search_assets(request).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_search_asset_with_token_type_compressed() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_txns([ + "4nKDSvw2kGpccZWLEPnfdP7J1SEexQFRP3xWc9NBtQ1qQeGu3bu5WnAdpcLbjQ4iyX6BQ5QGF69wevE8ZeeY5poA", + "4URwUGBjbsF7UBUYdSC546tnBy7nD67txsso8D9CR9kGLtbbYh9NkGw15tEp16LLasmJX5VQR4Seh8gDjTrtdpoC", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress": "53VVFtLzzi3nL2p1QF591PAB8rbcbsirYepwUphtHU9Q", + "page": 1, + "limit": 2, + "tokenType": "Compressed" + } + "#; + + let request: api::SearchAssets = serde_json::from_str(request).unwrap(); + let response = setup.das_api.search_assets(request).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_search_asset_with_token_type_all() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_nfts([ + "42AYryUGNmJMe9ycBXZekkYvdTehgbtECHs7SLu5JJTB", + "8t77ShMViat27Sjphvi1FVPaGrhFcttPAkEnLCFp49Bo", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g", + "page": 1, + "limit": 2, + "tokenType": "All" + } + "#; + + let request: api::SearchAssets = serde_json::from_str(request).unwrap(); + let response = setup.das_api.search_assets(request).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_search_asset_with_token_type_fungible() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { + network: Some(Network::Mainnet), + }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "7EYnhQoR9YM3N7UoaKRoA44Uy8JeaZV3qyouov87awMs", + "7BajpcYgnxmWK91RhrfsdB3Tm83PcDwPvMC8ZinvtTY6", + "6BRNfDfdq1nKyU1TQiCEQLWyPtD8EwUH9Kt2ahsbidUx", + ]); + + apply_migrations_and_delete_data(setup.db.clone()).await; + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "ownerAddress": "2oerfxddTpK5hWAmCMYB6fr9WvNrjEH54CHCWK8sAq7g", + "page": 1, + "limit": 1, + "tokenType": "Fungible" + } + "#; + + let request: api::SearchAssets = serde_json::from_str(request).unwrap(); + let response = setup.das_api.search_assets(request).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} diff --git a/metadata_json/Cargo.toml b/metadata_json/Cargo.toml new file mode 100644 index 000000000..81e5bd9d5 --- /dev/null +++ b/metadata_json/Cargo.toml @@ -0,0 +1,67 @@ +[package] +name = "das-metadata-json" +version = { workspace = true } +edition = { workspace = true } +repository = { workspace = true } +publish = { workspace = true } + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] + +[[bin]] +name = "das-metadata-json" + +[dependencies] + +anyhow = { workspace = true } +backon = "0.4.1" +bs58 = { workspace = true } +cadence = { workspace = true } +cadence-macros = { workspace = true } +chrono = { workspace = true } +clap = { workspace = true, features = ["derive", "cargo", "env"] } +das-core = { workspace = true } +digital_asset_types = { workspace = true, features = [ + "json_types", + "sql_types", +] } +derive_more = { version = "0.99.17" } +env_logger = { workspace = true } +figment = { workspace = true, features = ["env", "toml", "yaml"] } +futures = { workspace = true } +indicatif = "0.17.5" +log = { workspace = true } +plerkle_messenger = { workspace = true, features = ['redis'] } +rand = { workspace = true } +redis = { version = "0.25.2", features = [ + "aio", + "tokio-comp", + "streams", + "tokio-native-tls-comp", +] } +reqwest = { workspace = true } +sea-orm = { workspace = true, features = [ + "macros", + "runtime-tokio-rustls", + "sqlx-postgres", + "with-chrono", + "mock", +] } +sea-query = { workspace = true, features = ["postgres-array"] } +serde = { workspace = true } +serde_json = { workspace = true } +sqlx = { workspace = true, features = [ + "macros", + "runtime-tokio-rustls", + "postgres", + "uuid", + "offline", + "json", +] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full", "tracing"] } +tokio-stream = { workspace = true } +url = { workspace = true } + +[lints] +workspace = true diff --git a/metadata_json/README.md b/metadata_json/README.md new file mode 100644 index 000000000..5e5754297 --- /dev/null +++ b/metadata_json/README.md @@ -0,0 +1,102 @@ +# DAS Metadata JSON Indexer CLI + +## Overview +The DAS Metadata JSON Indexer CLI is a tool for indexing metadata JSON associated with tokens. It supports operations such as ingesting new metadata and backfilling missing metadata, as well as providing metrics and performance tuning options. + +## Features +- **Ingest**: Process and index new metadata JSON files with various configuration options. +- **Backfill**: Fill in missing metadata for previously indexed tokens with configurable parameters. +- **Metrics**: Collect and send metrics to a specified host and port. + +## Installation +Ensure you have Rust installed on your machine. If not, install it from [the official Rust website](https://www.rust-lang.org/). + + +``` +cargo run --bin das-metadata-json -- --help +``` + +## Usage + +### Ingest Command + +To continuously process metadata JSON, the METADATA_JSON Redis stream is monitored. Upon reading an ID from the stream, the ingest loop lookups the corresponding asset_data using the ID within the DAS DB, fetches the metadata JSON, and then updates the asset_data record with the retrieved metadata. +``` +das-metadata-json ingest [OPTIONS] --messenger-redis-url --database-url +``` + +#### Options +- `--messenger-redis-url `: The Redis URL for the messenger service. +- `--messenger-redis-batch-size `: Batch size for Redis operations (default: 100). +- `--metrics-host `: Host for sending metrics (default: 127.0.0.1). +- `--metrics-port `: Port for sending metrics (default: 8125). +- `--metrics-prefix `: Prefix for metrics (default: das.backfiller). +- `--database-url `: The database URL. +- `--database-max-connections `: Maximum database connections (default: 125). +- `--database-min-connections `: Minimum database connections (default: 5). +- `--timeout `: Timeout for operations in milliseconds (default: 1000). +- `--queue-size `: Size of the job queue (default: 1000). +- `--worker-count `: Number of worker threads (default: 100). +- `-h, --help`: Print help information. + +### Backfill Command + +To backfill any `asset_data` marked for indexing with `reindex=true`: + +``` +das-metadata-json backfill [OPTIONS] --database-url +``` + +#### Options +- `--database-url `: The database URL. +- `--database-max-connections `: Maximum database connections (default: 125). +- `--database-min-connections `: Minimum database connections (default: 5). +- `--metrics-host `: Host for sending metrics (default: 127.0.0.1). +- `--metrics-port `: Port for sending metrics (default: 8125). +- `--metrics-prefix `: Prefix for metrics (default: das.backfiller). +- `--queue-size `: Size of the job queue (default: 1000). +- `--worker-count `: Number of worker threads (default: 100). +- `--timeout `: Timeout for operations in milliseconds (default: 1000). +- `--batch-size `: Number of records to process in a single batch (default: 1000). +- `-h, --help`: Print help information. + +## Lib + +The `das-metadata-json` crate provides a `sender` module which can be integrated in a third-party service (eg `nft_ingester`) to push asset data IDs for indexing. To configure follow the steps below: + +### Configuration + +1. **Set up the `SenderArgs`:** Ensure that the `nft_ingester` is configured with the necessary `SenderArgs`. These arguments include the Redis URL, batch size, and the number of queue connections. For example: + +```rust +let sender_args = SenderArgs { +messenger_redis_url: "redis://localhost:6379".to_string(), +messenger_redis_batch_size: "100".to_string(), +messenger_queue_connections: 5, +}; +``` + +2. **Initialize the `SenderPool`:** Use the `try_from_config` async function to create a `SenderPool` instance from the `SenderArgs`. This will set up the necessary channels and messengers for communication. + +```rust +let sender_pool = SenderPool::try_from_config(sender_args).await?; +``` + +3. **Push Asset Data IDs for Indexing:** With the `SenderPool` instance, you can now push asset data IDs to be indexed using the `push` method. The IDs should be serialized into a byte array before being sent. The `asset_data` record should be written to the database before pushing its ID. + +```rust +let message = asset_data.id; + +sender_pool.push(&message).await?; +``` + +Within the `nft_ingester`, the `sender_pool` is orchestrated by the `TaskManager`. When configured appropriately, upon receiving a `DownloadMetadata` task, the `task_manager` will forego the usual process of creating a task record. Instead, it will directly push the asset ID to the `METADATA_JSON` Redis stream. This action queues the ID for processing by the `das-metadata-json` indexer, streamlining the workflow for indexing metadata JSON. + +## Configuration +The CLI can be configured using command-line options or environment variables. For options that have an associated environment variable, you can set the variable instead of passing the option on the command line. + +## Logging +Logging is managed by `env_logger`. Set the `RUST_LOG` environment variable to control the logging level, e.g., `RUST_LOG=info`. + +## Error Handling +The CLI provides error messages for any issues encountered during execution. \ No newline at end of file diff --git a/metadata_json/src/cmds/backfill.rs b/metadata_json/src/cmds/backfill.rs new file mode 100644 index 000000000..bbbaf4b90 --- /dev/null +++ b/metadata_json/src/cmds/backfill.rs @@ -0,0 +1,84 @@ +use { + crate::worker::{Worker, WorkerArgs}, + clap::Parser, + das_core::{connect_db, setup_metrics, MetricsArgs, PoolArgs}, + digital_asset_types::dao::asset_data, + log::info, + reqwest::ClientBuilder, + sea_orm::{entity::*, prelude::*, query::*, EntityTrait, SqlxPostgresConnector}, + tokio::time::Duration, +}; + +#[derive(Parser, Clone, Debug)] +pub struct BackfillArgs { + #[clap(flatten)] + database: PoolArgs, + + #[command(flatten)] + metrics: MetricsArgs, + + #[command(flatten)] + worker: WorkerArgs, + + #[arg(long, default_value = "1000")] + timeout: u64, + + #[arg(long, default_value = "1000")] + batch_size: u64, +} + +pub async fn run(args: BackfillArgs) -> Result<(), anyhow::Error> { + let batch_size = args.batch_size; + + let pool = connect_db(&args.database).await?; + + setup_metrics(&args.metrics)?; + + let client = ClientBuilder::new() + .timeout(Duration::from_millis(args.timeout)) + .build()?; + + let worker = Worker::from(args.worker); + + let (tx, handle) = worker.start(pool.clone(), client.clone()); + + let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(pool); + + let mut condition = Condition::all(); + condition = condition.add(asset_data::Column::Reindex.eq(true)); + let query = asset_data::Entity::find() + .filter(condition) + .order_by(asset_data::Column::Id, Order::Asc); + + let mut after = None; + + loop { + let mut query = query.clone().cursor_by(asset_data::Column::Id); + let mut query = query.first(batch_size); + + if let Some(after) = after { + query = query.after(after); + } + + let assets = query.all(&conn).await?; + let assets_count = assets.len(); + + for asset in assets.clone() { + tx.send(asset.id).await?; + } + + if u64::try_from(assets_count)? < batch_size { + break; + } + + after = assets.last().cloned().map(|asset| asset.id); + } + + drop(tx); + + info!("Waiting for tasks to finish"); + handle.await?; + + info!("Tasks finished"); + Ok(()) +} diff --git a/metadata_json/src/cmds/ingest.rs b/metadata_json/src/cmds/ingest.rs new file mode 100644 index 000000000..df801572b --- /dev/null +++ b/metadata_json/src/cmds/ingest.rs @@ -0,0 +1,58 @@ +use crate::stream::{Receiver, ReceiverArgs}; +use crate::worker::{Worker, WorkerArgs}; +use clap::Parser; +use das_core::{connect_db, setup_metrics, MetricsArgs, PoolArgs}; +use log::info; +use reqwest::ClientBuilder; +use tokio::time::Duration; + +#[derive(Parser, Clone, Debug)] +pub struct IngestArgs { + #[clap(flatten)] + receiver: ReceiverArgs, + + #[clap(flatten)] + metrics: MetricsArgs, + + #[clap(flatten)] + database: PoolArgs, + + #[arg(long, default_value = "1000")] + timeout: u64, + + #[clap(flatten)] + worker: WorkerArgs, +} + +pub async fn run(args: IngestArgs) -> Result<(), anyhow::Error> { + let rx = Receiver::try_from_config(args.receiver.into()).await?; + + let pool = connect_db(&args.database).await?; + + setup_metrics(&args.metrics)?; + + let client = ClientBuilder::new() + .timeout(Duration::from_millis(args.timeout)) + .build()?; + + let worker = Worker::from(args.worker); + + let (tx, handle) = worker.start(pool.clone(), client.clone()); + + while let Ok(messages) = rx.recv().await { + for message in messages.clone() { + tx.send(message.data).await?; + } + + let ids: Vec = messages.into_iter().map(|m| m.id).collect(); + rx.ack(&ids).await?; + } + + drop(tx); + + handle.await?; + + info!("Ingesting stopped"); + + Ok(()) +} diff --git a/metadata_json/src/cmds/mod.rs b/metadata_json/src/cmds/mod.rs new file mode 100644 index 000000000..249a6e6fd --- /dev/null +++ b/metadata_json/src/cmds/mod.rs @@ -0,0 +1,4 @@ +pub mod backfill; +pub mod ingest; +pub mod report; +pub mod single; diff --git a/metadata_json/src/cmds/report.rs b/metadata_json/src/cmds/report.rs new file mode 100644 index 000000000..d37bedc58 --- /dev/null +++ b/metadata_json/src/cmds/report.rs @@ -0,0 +1,52 @@ +use cadence_macros::statsd_gauge; +use clap::Parser; +use das_core::{connect_db, setup_metrics, MetricsArgs, PoolArgs}; +use digital_asset_types::dao::asset_data::{Column, Entity}; +use log::{error, info}; +use sea_orm::{ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, SqlxPostgresConnector}; +use std::thread; +use std::time::Duration; + +#[derive(Parser, Clone, Debug)] +pub struct ReportArgs { + #[clap(flatten)] + metrics: MetricsArgs, + + #[clap(flatten)] + database: PoolArgs, + + /// Interval in minutes to report the status + #[arg(long, default_value = "15")] + interval: u64, +} + +pub async fn run(args: ReportArgs) -> Result<(), anyhow::Error> { + let pool = connect_db(&args.database).await?; + + setup_metrics(&args.metrics)?; + + loop { + { + let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(pool.clone()); + + match Entity::find() + .filter(Column::Reindex.eq(true)) + .count(&conn) + .await + { + Ok(count) => { + info!("Count of asset_data with reindex=true: {}", count); + statsd_gauge!("report.status", 1); + + statsd_gauge!("download.pending", count); + } + Err(e) => { + error!("Failed to count asset_data with reindex=true: {}", e); + statsd_gauge!("report.status", 0); + } + }; + } + + thread::sleep(Duration::from_secs(args.interval * 60)); + } +} diff --git a/metadata_json/src/cmds/single.rs b/metadata_json/src/cmds/single.rs new file mode 100644 index 000000000..c50a8b0c8 --- /dev/null +++ b/metadata_json/src/cmds/single.rs @@ -0,0 +1,40 @@ +use crate::worker::perform_metadata_json_task; +use clap::Parser; +use das_core::{connect_db, setup_metrics, MetricsArgs, PoolArgs}; +use log::{debug, error}; +use reqwest::ClientBuilder; +use tokio::time::Duration; + +#[derive(Parser, Clone, Debug)] +pub struct SingleArgs { + #[clap(flatten)] + metrics: MetricsArgs, + + #[clap(flatten)] + database: PoolArgs, + + #[arg(long, default_value = "1000")] + timeout: u64, + + mint: String, // Accept mint as an argument +} + +pub async fn run(args: SingleArgs) -> Result<(), anyhow::Error> { + let pool = connect_db(&args.database).await?; + + setup_metrics(&args.metrics)?; + + let asset_data = bs58::decode(args.mint.as_str()).into_vec()?; + + let client = ClientBuilder::new() + .timeout(Duration::from_millis(args.timeout)) + .build()?; + + if let Err(e) = perform_metadata_json_task(client, pool, asset_data).await { + error!("{}", e); + } else { + debug!("Success"); + } + + Ok(()) +} diff --git a/metadata_json/src/lib.rs b/metadata_json/src/lib.rs new file mode 100644 index 000000000..7ef1b944c --- /dev/null +++ b/metadata_json/src/lib.rs @@ -0,0 +1,3 @@ +mod stream; + +pub use stream::*; diff --git a/metadata_json/src/main.rs b/metadata_json/src/main.rs new file mode 100644 index 000000000..ef24b4487 --- /dev/null +++ b/metadata_json/src/main.rs @@ -0,0 +1,36 @@ +use clap::{Parser, Subcommand}; + +mod cmds; +mod stream; +mod worker; + +use cmds::{backfill, ingest, report, single}; + +#[derive(Parser)] +#[command(author, about, next_line_help = true)] +struct Args { + #[command(subcommand)] + action: Action, +} + +#[derive(Subcommand, Clone)] +enum Action { + Ingest(ingest::IngestArgs), + Backfill(backfill::BackfillArgs), + Single(single::SingleArgs), + Report(report::ReportArgs), +} + +#[tokio::main] +async fn main() -> Result<(), anyhow::Error> { + env_logger::init(); + + let args = Args::parse(); + + match args.action { + Action::Ingest(args) => ingest::run(args).await, + Action::Backfill(args) => backfill::run(args).await, + Action::Single(args) => single::run(args).await, + Action::Report(args) => report::run(args).await, + } +} diff --git a/metadata_json/src/stream/mod.rs b/metadata_json/src/stream/mod.rs new file mode 100644 index 000000000..d4de571e6 --- /dev/null +++ b/metadata_json/src/stream/mod.rs @@ -0,0 +1,6 @@ +pub mod receiver; +pub mod sender; + +pub use receiver::*; + +pub const METADATA_JSON_STREAM: &str = "METADATA_JSON"; diff --git a/metadata_json/src/stream/receiver.rs b/metadata_json/src/stream/receiver.rs new file mode 100644 index 000000000..3b3d53037 --- /dev/null +++ b/metadata_json/src/stream/receiver.rs @@ -0,0 +1,91 @@ +use super::METADATA_JSON_STREAM; +use clap::Parser; +use figment::value::{Dict, Value}; +use plerkle_messenger::{select_messenger, Messenger, MessengerConfig, MessengerType, RecvData}; +use rand::{distributions::Alphanumeric, thread_rng, Rng}; +use std::sync::Arc; +use tokio::sync::Mutex; + +#[derive(Clone, Debug, Parser)] +pub struct ReceiverArgs { + #[arg(long, env)] + pub messenger_redis_url: String, + #[arg(long, env, default_value = "100")] + pub messenger_redis_batch_size: String, +} + +fn rand_string() -> String { + thread_rng() + .sample_iter(&Alphanumeric) + .take(30) + .map(char::from) + .collect() +} + +impl From for MessengerConfig { + fn from(args: ReceiverArgs) -> Self { + let mut connection_config = Dict::new(); + + connection_config.insert( + "redis_connection_str".to_string(), + Value::from(args.messenger_redis_url), + ); + connection_config.insert( + "batch_size".to_string(), + Value::from(args.messenger_redis_batch_size), + ); + connection_config.insert( + "pipeline_size_bytes".to_string(), + Value::from(1u128.to_string()), + ); + connection_config.insert("consumer_id".to_string(), Value::from(rand_string())); + + Self { + messenger_type: MessengerType::Redis, + connection_config, + } + } +} + +#[derive(thiserror::Error, Debug)] +pub enum ReceiverError { + #[error("messenger: {0}")] + Messenger(#[from] plerkle_messenger::MessengerError), +} + +#[derive(Clone)] +pub struct Receiver(Arc>>); + +impl Receiver { + pub async fn try_from_config(config: MessengerConfig) -> Result { + let mut messenger = select_messenger(config).await?; + + messenger.add_stream(METADATA_JSON_STREAM).await?; + messenger + .set_buffer_size(METADATA_JSON_STREAM, 10000000000000000) + .await; + + Ok(Self(Arc::new(Mutex::new(messenger)))) + } + + pub async fn recv(&self) -> Result, ReceiverError> { + let mut messenger = self.0.lock().await; + + messenger + .recv( + METADATA_JSON_STREAM, + plerkle_messenger::ConsumptionType::All, + ) + .await + .map_err(Into::into) + } + + pub async fn ack(&self, ids: &[String]) -> Result<(), ReceiverError> { + let mut messenger = self.0.lock().await; + + messenger + .ack_msg(METADATA_JSON_STREAM, ids) + .await + .map_err(Into::into) + } +} diff --git a/metadata_json/src/stream/sender.rs b/metadata_json/src/stream/sender.rs new file mode 100644 index 000000000..cd2ca1315 --- /dev/null +++ b/metadata_json/src/stream/sender.rs @@ -0,0 +1,112 @@ +use super::METADATA_JSON_STREAM; +use anyhow::Result; +use clap::Parser; +use figment::value::{Dict, Value}; +use plerkle_messenger::{Messenger, MessengerConfig, MessengerType}; +use rand::{distributions::Alphanumeric, thread_rng, Rng}; +use serde::Deserialize; +use std::num::TryFromIntError; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::sync::{mpsc::error::TrySendError, Mutex}; + +#[derive(Clone, Debug, Parser, Deserialize, PartialEq)] +pub struct SenderArgs { + #[arg(long, env)] + pub messenger_redis_url: String, + #[arg(long, env, default_value = "100")] + pub messenger_redis_batch_size: String, + #[arg(long, env, default_value = "5")] + pub messenger_queue_connections: u64, +} + +fn rand_string() -> String { + thread_rng() + .sample_iter(&Alphanumeric) + .take(30) + .map(char::from) + .collect() +} + +impl From for MessengerConfig { + fn from(args: SenderArgs) -> Self { + let mut connection_config = Dict::new(); + + connection_config.insert( + "redis_connection_str".to_string(), + Value::from(args.messenger_redis_url), + ); + connection_config.insert( + "batch_size".to_string(), + Value::from(args.messenger_redis_batch_size), + ); + connection_config.insert( + "pipeline_size_bytes".to_string(), + Value::from(1u128.to_string()), + ); + connection_config.insert("consumer_id".to_string(), Value::from(rand_string())); + + Self { + messenger_type: MessengerType::Redis, + connection_config, + } + } +} + +#[derive(thiserror::Error, Debug)] +pub enum SenderPoolError { + #[error("messenger")] + Messenger(#[from] plerkle_messenger::MessengerError), + #[error("tokio try send to channel")] + TrySendMessengerChannel(#[from] TrySendError>), + #[error("revc messenger connection")] + RecvMessengerConnection, + #[error("try from int")] + TryFromInt(#[from] TryFromIntError), + #[error("tokio send to channel")] + SendMessengerChannel(#[from] mpsc::error::SendError>), +} + +#[derive(Debug, Clone)] +pub struct SenderPool { + tx: mpsc::Sender>, + rx: Arc>>>, +} + +impl SenderPool { + #[allow(dead_code)] + pub async fn try_from_config(config: SenderArgs) -> anyhow::Result { + let size = usize::try_from(config.messenger_queue_connections)?; + let (tx, rx) = mpsc::channel(size); + + for _ in 0..config.messenger_queue_connections { + let messenger_config: MessengerConfig = config.clone().into(); + let mut messenger = plerkle_messenger::select_messenger(messenger_config).await?; + messenger.add_stream(METADATA_JSON_STREAM).await?; + messenger + .set_buffer_size(METADATA_JSON_STREAM, 10000000000000000) + .await; + + tx.try_send(messenger)?; + } + + Ok(Self { + tx, + rx: Arc::new(Mutex::new(rx)), + }) + } + #[allow(dead_code)] + pub async fn push(&self, message: &[u8]) -> Result<(), SenderPoolError> { + let mut rx = self.rx.lock().await; + let mut messenger = rx + .recv() + .await + .ok_or(SenderPoolError::RecvMessengerConnection)?; + + messenger.send(METADATA_JSON_STREAM, message).await?; + + self.tx.send(messenger).await?; + + Ok(()) + } +} diff --git a/metadata_json/src/worker.rs b/metadata_json/src/worker.rs new file mode 100644 index 000000000..d72b73eb7 --- /dev/null +++ b/metadata_json/src/worker.rs @@ -0,0 +1,195 @@ +use { + backon::{ExponentialBuilder, Retryable}, + cadence_macros::{statsd_count, statsd_time}, + clap::Parser, + digital_asset_types::dao::asset_data, + futures::{stream::FuturesUnordered, StreamExt}, + indicatif::HumanDuration, + log::{debug, error}, + reqwest::{Client, Url}, + sea_orm::{entity::*, prelude::*, EntityTrait, SqlxPostgresConnector}, + tokio::{sync::mpsc, task::JoinHandle, time::Instant}, +}; + +#[derive(Parser, Clone, Debug)] +pub struct WorkerArgs { + #[arg(long, env, default_value = "1000")] + queue_size: usize, + #[arg(long, env, default_value = "100")] + worker_count: usize, +} + +pub struct Worker { + queue_size: usize, + worker_count: usize, +} + +impl From for Worker { + fn from(args: WorkerArgs) -> Self { + Self { + queue_size: args.queue_size, + worker_count: args.worker_count, + } + } +} + +#[derive(thiserror::Error, Debug)] +pub enum WorkerError { + #[error("send error: {0}")] + Send(#[from] mpsc::error::SendError), + #[error("join error: {0}")] + Join(#[from] tokio::task::JoinError), +} + +impl Worker { + pub fn start( + &self, + pool: sqlx::PgPool, + client: Client, + ) -> (mpsc::Sender>, JoinHandle<()>) { + let (tx, mut rx) = mpsc::channel::>(self.queue_size); + let worker_count = self.worker_count; + + let handle = tokio::spawn(async move { + let mut handlers = FuturesUnordered::new(); + + while let Some(asset_data) = rx.recv().await { + if handlers.len() >= worker_count { + handlers.next().await; + } + + let pool = pool.clone(); + let client = client.clone(); + + handlers.push(spawn_task(client, pool, asset_data)); + } + + while handlers.next().await.is_some() {} + }); + + (tx, handle) + } +} + +fn spawn_task(client: Client, pool: sqlx::PgPool, asset_data: Vec) -> JoinHandle<()> { + tokio::spawn(async move { + let timing = Instant::now(); + + let asset_data_id = asset_data.clone(); + let asset_data_id = bs58::encode(asset_data_id).into_string(); + + if let Err(e) = perform_metadata_json_task(client, pool, asset_data).await { + error!("Asset {} {}", asset_data_id, e); + } else { + debug!("Asset {} success", asset_data_id); + } + + debug!( + "Asset {} finished in {}", + asset_data_id, + HumanDuration(timing.elapsed()) + ); + + statsd_time!("ingester.bgtask.finished", timing.elapsed(), "type" => "DownloadMetadata"); + }) +} + +#[derive(thiserror::Error, Debug)] +pub enum MetadataJsonTaskError { + #[error("sea orm: {0}")] + SeaOrm(#[from] sea_orm::DbErr), + #[error("metadata json: {0}")] + Fetch(#[from] FetchMetadataJsonError), + #[error("asset not found in the db")] + AssetNotFound, +} + +pub async fn perform_metadata_json_task( + client: Client, + pool: sqlx::PgPool, + asset_data: Vec, +) -> Result { + let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(pool); + + let asset_data_model = asset_data::Entity::find() + .filter(asset_data::Column::Id.eq(asset_data)) + .one(&conn) + .await? + .ok_or(MetadataJsonTaskError::AssetNotFound)?; + + match fetch_metadata_json(client, &asset_data_model.metadata_url).await { + Ok(metadata) => { + let mut active_model: asset_data::ActiveModel = asset_data_model.into(); + active_model.metadata = Set(metadata); + active_model.reindex = Set(Some(false)); + + active_model.update(&conn).await.map_err(Into::into) + } + Err(e) => { + let status = match &e { + FetchMetadataJsonError::Response { status, .. } => status.to_string(), + FetchMetadataJsonError::Parse { .. } => "parse".to_string(), + FetchMetadataJsonError::GenericReqwest(_) => "reqwest".to_string(), + _ => "unhandled".to_string(), + }; + + statsd_count!("ingester.bgtask.error", 1, "type" => "DownloadMetadata", "status" => &status); + + Err(MetadataJsonTaskError::Fetch(e)) + } + } +} + +#[derive(thiserror::Error, Debug)] +pub enum FetchMetadataJsonError { + #[error("reqwest: {0}")] + GenericReqwest(#[from] reqwest::Error), + #[error("json parse for url({url}) with {source}")] + Parse { source: reqwest::Error, url: Url }, + #[error("response {status} for url ({url}) with {source}")] + Response { + source: reqwest::Error, + url: Url, + status: StatusCode, + }, + #[error("url parse: {0}")] + Url(#[from] url::ParseError), +} + +#[derive(Debug, derive_more::Display)] +pub enum StatusCode { + Unknown, + Code(reqwest::StatusCode), +} + +async fn fetch_metadata_json( + client: Client, + uri: &str, +) -> Result { + (|| async { + let url = Url::parse(uri)?; + + let response = client.get(url.clone()).send().await?; + + match response.error_for_status() { + Ok(res) => res + .json::() + .await + .map_err(|source| FetchMetadataJsonError::Parse { source, url }), + Err(source) => { + let status = source + .status() + .map(StatusCode::Code) + .unwrap_or(StatusCode::Unknown); + + Err(FetchMetadataJsonError::Response { + source, + url, + status, + }) + } + } + }) + .retry(&ExponentialBuilder::default()) + .await +} diff --git a/migration/Cargo.toml b/migration/Cargo.toml index c0202ffdd..6ce0612be 100644 --- a/migration/Cargo.toml +++ b/migration/Cargo.toml @@ -9,7 +9,10 @@ publish = { workspace = true } async-std = { workspace = true, features = ["attributes", "tokio1"] } enum-iterator = { workspace = true } enum-iterator-derive = { workspace = true } -sea-orm-migration = { workspace = true, features = ["runtime-tokio-rustls", "sqlx-postgres"] } +sea-orm-migration = { workspace = true, features = [ + "runtime-tokio-rustls", + "sqlx-postgres", +] } [lints] workspace = true diff --git a/migration/src/lib.rs b/migration/src/lib.rs index 7cee77f5e..a7468b864 100644 --- a/migration/src/lib.rs +++ b/migration/src/lib.rs @@ -38,11 +38,15 @@ mod m20240116_130744_add_update_metadata_ix; mod m20240117_120101_alter_creator_indices; mod m20240124_173104_add_tree_seq_index_to_cl_audits_v2; mod m20240124_181900_add_slot_updated_column_per_update_type; +mod m20240219_115532_add_extensions_column; mod m20240313_120101_add_mpl_core_plugins_columns; mod m20240319_120101_add_mpl_core_enum_vals; mod m20240320_120101_add_mpl_core_info_items; mod m20240520_120101_add_mpl_core_external_plugins_columns; mod m20240718_161232_change_supply_columns_to_numeric; +mod m20241119_060310_add_token_inscription_enum_variant; +mod m20241209_100813_add_unique_index_for_asset_owner_and_supply; +mod m20241209_111604_add_index_for_asset_id_group_value_verified; pub mod model; @@ -90,11 +94,15 @@ impl MigratorTrait for Migrator { Box::new(m20240117_120101_alter_creator_indices::Migration), Box::new(m20240124_173104_add_tree_seq_index_to_cl_audits_v2::Migration), Box::new(m20240124_181900_add_slot_updated_column_per_update_type::Migration), + Box::new(m20240219_115532_add_extensions_column::Migration), Box::new(m20240313_120101_add_mpl_core_plugins_columns::Migration), Box::new(m20240319_120101_add_mpl_core_enum_vals::Migration), Box::new(m20240320_120101_add_mpl_core_info_items::Migration), Box::new(m20240520_120101_add_mpl_core_external_plugins_columns::Migration), Box::new(m20240718_161232_change_supply_columns_to_numeric::Migration), + Box::new(m20241119_060310_add_token_inscription_enum_variant::Migration), + Box::new(m20241209_100813_add_unique_index_for_asset_owner_and_supply::Migration), + Box::new(m20241209_111604_add_index_for_asset_id_group_value_verified::Migration), ] } } diff --git a/migration/src/m20231101_120101_add_instruction_into_cl_audit.rs b/migration/src/m20231101_120101_add_instruction_into_cl_audit.rs new file mode 100644 index 000000000..37820db5d --- /dev/null +++ b/migration/src/m20231101_120101_add_instruction_into_cl_audit.rs @@ -0,0 +1,32 @@ +use digital_asset_types::dao::cl_audits; +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + sea_query::Table::alter() + .table(cl_audits::Entity) + .add_column(ColumnDef::new(Alias::new("Instruction")).string()) + .to_owned(), + ) + .await?; + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + sea_query::Table::alter() + .table(cl_audits::Entity) + .drop_column(Alias::new("Instruction")) + .to_owned(), + ) + .await?; + Ok(()) + } +} \ No newline at end of file diff --git a/migration/src/m20231101_120101_cl_audit_table_index.rs b/migration/src/m20231101_120101_cl_audit_table_index.rs new file mode 100644 index 000000000..fb93bda79 --- /dev/null +++ b/migration/src/m20231101_120101_cl_audit_table_index.rs @@ -0,0 +1,54 @@ +use digital_asset_types::dao::cl_audits; +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .create_index( + Index::create() + .name("idx_cl_audits_tree") + .col(cl_audits::Column::Tree) + .table(cl_audits::Entity) + .to_owned(), + ) + .await?; + + manager + .create_index( + Index::create() + .name("idx_cl_audits_leaf_id") + .col(cl_audits::Column::LeafIdx) + .table(cl_audits::Entity) + .to_owned(), + ) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .drop_index( + Index::drop() + .name("idx_cl_audits_tree") + .table(cl_audits::Entity) + .to_owned(), + ) + .await?; + + manager + .drop_index( + Index::drop() + .name("idx_cl_audits_leaf_id") + .table(cl_audits::Entity) + .to_owned(), + ) + .await?; + + Ok(()) + } +} diff --git a/migration/src/m20231222_110618_add_indices_to_cl_audits_v2.rs b/migration/src/m20231222_110618_add_indices_to_cl_audits_v2.rs new file mode 100644 index 000000000..66b524764 --- /dev/null +++ b/migration/src/m20231222_110618_add_indices_to_cl_audits_v2.rs @@ -0,0 +1,53 @@ +use digital_asset_types::dao::cl_audits_v2; +use sea_orm_migration::prelude::*; +use sea_orm::{ConnectionTrait, Statement, DatabaseBackend}; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let conn = manager.get_connection(); + + // manager + // .create_index( + // Index::create() + // .name("tree_idx") + // .table(cl_audits_v2::Entity) + // .col(cl_audits_v2::Column::Tree) + // .to_owned(), + // ) + // .await?; + + conn.execute(Statement::from_string( + DatabaseBackend::Postgres, + "CREATE INDEX tree_seq_idx ON cl_audits_v2 (tree, seq);".to_string(), + )) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .drop_index( + Index::drop() + .name("tree_idx") + .table(cl_audits_v2::Entity) + .to_owned(), + ) + .await?; + + manager + .drop_index( + Index::drop() + .name("tree_seq_idx") + .table(cl_audits_v2::Entity) + .to_owned(), + ) + .await?; + + Ok(()) + } +} diff --git a/migration/src/m20240219_115532_add_extensions_column.rs b/migration/src/m20240219_115532_add_extensions_column.rs new file mode 100644 index 000000000..d0414ea18 --- /dev/null +++ b/migration/src/m20240219_115532_add_extensions_column.rs @@ -0,0 +1,60 @@ +use sea_orm_migration::{ + prelude::*, + sea_orm::{ConnectionTrait, DatabaseBackend, Statement}, +}; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let connection = manager.get_connection(); + + connection + .execute(Statement::from_string( + DatabaseBackend::Postgres, + "ALTER TABLE asset ADD COLUMN mint_extensions jsonb;".to_string(), + )) + .await?; + connection + .execute(Statement::from_string( + DatabaseBackend::Postgres, + "ALTER TABLE tokens ADD COLUMN extensions jsonb;".to_string(), + )) + .await?; + connection + .execute(Statement::from_string( + DatabaseBackend::Postgres, + "ALTER TABLE token_accounts ADD COLUMN extensions jsonb;".to_string(), + )) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let connection = manager.get_connection(); + + connection + .execute(Statement::from_string( + DatabaseBackend::Postgres, + "ALTER TABLE asset DROP COLUMN mint_extensions;".to_string(), + )) + .await?; + connection + .execute(Statement::from_string( + DatabaseBackend::Postgres, + "ALTER TABLE tokens DROP COLUMN extensions;".to_string(), + )) + .await?; + connection + .execute(Statement::from_string( + DatabaseBackend::Postgres, + "ALTER TABLE token_accounts DROP COLUMN extensions;".to_string(), + )) + .await?; + + Ok(()) + } +} diff --git a/migration/src/m20241119_060310_add_token_inscription_enum_variant.rs b/migration/src/m20241119_060310_add_token_inscription_enum_variant.rs new file mode 100644 index 000000000..1bdd6f8c3 --- /dev/null +++ b/migration/src/m20241119_060310_add_token_inscription_enum_variant.rs @@ -0,0 +1,25 @@ +use sea_orm::{ConnectionTrait, DatabaseBackend, Statement}; +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .get_connection() + .execute(Statement::from_string( + DatabaseBackend::Postgres, + "ALTER TYPE v1_account_attachments ADD VALUE IF NOT EXISTS 'token_inscription';" + .to_string(), + )) + .await?; + + Ok(()) + } + + async fn down(&self, _manager: &SchemaManager) -> Result<(), DbErr> { + Ok(()) + } +} diff --git a/migration/src/m20241209_100813_add_unique_index_for_asset_owner_and_supply.rs b/migration/src/m20241209_100813_add_unique_index_for_asset_owner_and_supply.rs new file mode 100644 index 000000000..d7ca68111 --- /dev/null +++ b/migration/src/m20241209_100813_add_unique_index_for_asset_owner_and_supply.rs @@ -0,0 +1,39 @@ +use sea_orm_migration::prelude::*; + +use crate::model::table::Asset; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .create_index( + Index::create() + .name("idx_asset_owner_supply") + .col(Asset::Owner) + .col(Asset::Supply) + .col(Asset::Burnt) + .col(Asset::OwnerType) + .table(Asset::Table) + .to_owned(), + ) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .drop_index( + Index::drop() + .name("idx_asset_owner_supply") + .table(Asset::Table) + .to_owned(), + ) + .await?; + + Ok(()) + } +} diff --git a/migration/src/m20241209_111604_add_index_for_asset_id_group_value_verified.rs b/migration/src/m20241209_111604_add_index_for_asset_id_group_value_verified.rs new file mode 100644 index 000000000..d23828077 --- /dev/null +++ b/migration/src/m20241209_111604_add_index_for_asset_id_group_value_verified.rs @@ -0,0 +1,38 @@ +use sea_orm_migration::prelude::*; + +use crate::model::table::AssetGrouping; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .create_index( + Index::create() + .unique() + .name("asset_grouping_id_value_verified_unique") + .col(AssetGrouping::AssetId) + .col(AssetGrouping::GroupValue) + .col(AssetGrouping::Verified) + .table(AssetGrouping::Table) + .to_owned(), + ) + .await?; + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .drop_index( + sea_query::Index::drop() + .name("asset_grouping_id_value_verified_unique") + .table(AssetGrouping::Table) + .to_owned(), + ) + .await?; + + Ok(()) + } +} diff --git a/nft_ingester/Cargo.toml b/nft_ingester/Cargo.toml index fd17a2bed..cb6e1cf01 100644 --- a/nft_ingester/Cargo.toml +++ b/nft_ingester/Cargo.toml @@ -14,6 +14,8 @@ cadence = { workspace = true } cadence-macros = { workspace = true } chrono = { workspace = true } clap = { workspace = true, features = ["derive", "cargo"] } +das-core = { workspace = true } +das-metadata-json = { workspace = true } digital_asset_types = { workspace = true, features = [ "json_types", "sql_types", @@ -27,6 +29,7 @@ plerkle_messenger = { workspace = true, features = ["redis"] } plerkle_serialization = { workspace = true } program_transformers = { workspace = true } rand = { workspace = true } +regex = { workspace = true } reqwest = { workspace = true } rust-crypto = { workspace = true } sea-orm = { workspace = true, features = [ @@ -51,7 +54,6 @@ sqlx = { workspace = true, features = [ "offline", "json", ] } -stretto = { workspace = true, features = ["async"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["tracing"] } tracing-subscriber = { workspace = true, features = [ diff --git a/nft_ingester/src/backfiller.rs b/nft_ingester/src/backfiller.rs deleted file mode 100644 index a8a4f1685..000000000 --- a/nft_ingester/src/backfiller.rs +++ /dev/null @@ -1,1082 +0,0 @@ -//! Backfiller that fills gaps in trees by detecting gaps in sequence numbers -//! in the `backfill_items` table. Inspired by backfiller.ts/backfill.ts. - -use borsh::BorshDeserialize; -use cadence_macros::{is_global_default_set, statsd_count, statsd_gauge}; -use chrono::Utc; -use digital_asset_types::dao::backfill_items; -use flatbuffers::FlatBufferBuilder; -use futures::{stream::FuturesUnordered, StreamExt}; -use log::{debug, error, info}; -use plerkle_messenger::{Messenger, TRANSACTION_BACKFILL_STREAM}; -use plerkle_serialization::serializer::seralize_encoded_transaction_with_status; - -use sea_orm::{ - entity::*, query::*, sea_query::Expr, DatabaseConnection, DbBackend, DbErr, FromQueryResult, - SqlxPostgresConnector, -}; -use solana_account_decoder::UiAccountEncoding; -use solana_client::{ - nonblocking::rpc_client::RpcClient, - rpc_client::GetConfirmedSignaturesForAddress2Config, - rpc_config::{RpcAccountInfoConfig, RpcBlockConfig, RpcProgramAccountsConfig}, - rpc_filter::{Memcmp, RpcFilterType}, -}; -use solana_sdk::{ - account::Account, - commitment_config::{CommitmentConfig, CommitmentLevel}, - pubkey::Pubkey, - signature::Signature, - slot_history::Slot, -}; -use solana_transaction_status::{ - option_serializer::OptionSerializer, EncodedConfirmedBlock, - EncodedConfirmedTransactionWithStatusMeta, UiTransactionEncoding, -}; -use spl_account_compression::state::{ - merkle_tree_get_size, ConcurrentMerkleTreeHeader, CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1, -}; -use sqlx::{self, Pool, Postgres}; -use std::{ - cmp, - collections::{HashMap, HashSet}, - str::FromStr, - sync::Arc, -}; -use stretto::{AsyncCache, AsyncCacheBuilder}; -use tokio::{ - sync::Semaphore, - task::JoinSet, - time::{self, sleep, Duration}, -}; - -use crate::{ - config::{IngesterConfig, DATABASE_LISTENER_CHANNEL_KEY, RPC_COMMITMENT_KEY, RPC_URL_KEY}, - error::IngesterError, - metric, -}; -// Number of tries to backfill a single tree before marking as "failed". -const NUM_TRIES: i32 = 5; -const TREE_SYNC_INTERVAL: u64 = 60; -const MAX_BACKFILL_CHECK_WAIT: u64 = 1000; -// Constants used for varying delays when failures occur. -const INITIAL_FAILURE_DELAY: u64 = 100; -const MAX_FAILURE_DELAY_MS: u64 = 10_000; -const BLOCK_CACHE_SIZE: usize = 300_000; -const MAX_CACHE_COST: i64 = 32; -const BLOCK_CACHE_DURATION: u64 = 172800; - -struct SlotSeq(u64, u64); -/// Main public entry point for backfiller task. -pub fn setup_backfiller( - pool: Pool, - config: IngesterConfig, -) -> tokio::task::JoinHandle<()> { - tokio::spawn(async move { - loop { - let pool_cloned = pool.clone(); - let config_cloned = config.clone(); - let block_cache = Arc::new( - AsyncCacheBuilder::new(BLOCK_CACHE_SIZE, MAX_CACHE_COST) - .set_ignore_internal_cost(true) - .finalize(tokio::spawn) - .expect("failed to create cache"), - ); - let mut tasks = JoinSet::new(); - let bc = Arc::clone(&block_cache); - tasks.spawn(async move { - info!("Backfiller filler running"); - let mut backfiller = Backfiller::::new(pool_cloned, config_cloned, &bc).await; - backfiller.run_filler().await; - }); - - let pool_cloned = pool.clone(); - let config_cloned = config.clone(); - let bc = Arc::clone(&block_cache); - tasks.spawn(async move { - info!("Backfiller finder running"); - let mut backfiller = Backfiller::::new(pool_cloned, config_cloned, &bc).await; - backfiller.run_finder().await; - }); - - while let Some(task) = tasks.join_next().await { - match task { - Ok(_) => break, - Err(err) if err.is_panic() => { - metric! { - statsd_count!("ingester.backfiller.task_panic", 1); - } - } - Err(err) => { - let err = err.to_string(); - metric! { - statsd_count!("ingester.backfiller.task_error", 1, "error" => &err); - } - } - } - } - } - }) -} - -/// Struct used when querying for unique trees. -#[derive(Debug, FromQueryResult)] -struct UniqueTree { - tree: Vec, -} - -/// Struct used when querying for unique trees. -#[derive(Debug, FromQueryResult)] -struct TreeWithSlot { - tree: Vec, - slot: i64, -} - -#[derive(Debug, Default, Clone)] -struct MissingTree { - tree: Pubkey, - slot: u64, -} - -/// Struct used when storing trees to backfill. -struct BackfillTree { - unique_tree: UniqueTree, - backfill_from_seq_1: bool, - #[allow(dead_code)] - slot: u64, -} - -impl BackfillTree { - const fn new(unique_tree: UniqueTree, backfill_from_seq_1: bool, slot: u64) -> Self { - Self { - unique_tree, - backfill_from_seq_1, - slot, - } - } -} - -/// Struct used when querying the max sequence number of a tree. -#[derive(Debug, FromQueryResult, Clone)] -struct MaxSeqItem { - seq: i64, -} - -/// Struct used when querying for items to backfill. -#[derive(Debug, FromQueryResult, Clone)] -struct SimpleBackfillItem { - seq: i64, - slot: i64, -} - -/// Struct used to store sequence number gap info for a given tree. -#[derive(Debug)] -struct GapInfo { - prev: SimpleBackfillItem, - curr: SimpleBackfillItem, -} - -impl GapInfo { - const fn new(prev: SimpleBackfillItem, curr: SimpleBackfillItem) -> Self { - Self { prev, curr } - } -} - -/// Main struct used for backfiller task. -struct Backfiller<'a, T: Messenger> { - config: IngesterConfig, - db: DatabaseConnection, - rpc_client: RpcClient, - rpc_block_config: RpcBlockConfig, - messenger: T, - failure_delay: u64, - cache: &'a AsyncCache, -} - -impl<'a, T: Messenger> Backfiller<'a, T> { - /// Create a new `Backfiller` struct. - async fn new( - pool: Pool, - config: IngesterConfig, - cache: &'a AsyncCache, - ) -> Backfiller<'a, T> { - // Create Sea ORM database connection used later for queries. - let db = SqlxPostgresConnector::from_sqlx_postgres_pool(pool.clone()); - - // Get database listener channel. - let _channel = config - .database_config - .get(DATABASE_LISTENER_CHANNEL_KEY) - .and_then(|u| u.clone().into_string()) - .ok_or(IngesterError::ConfigurationError { - msg: format!( - "Database listener channel missing: {}", - DATABASE_LISTENER_CHANNEL_KEY - ), - }) - .unwrap(); - - // Get RPC URL. - let rpc_url = config - .rpc_config - .get(RPC_URL_KEY) - .and_then(|u| u.clone().into_string()) - .ok_or(IngesterError::ConfigurationError { - msg: format!("RPC URL missing: {}", RPC_URL_KEY), - }) - .unwrap(); - - // Get RPC commitment level. - let rpc_commitment_level = config - .rpc_config - .get(RPC_COMMITMENT_KEY) - .and_then(|v| v.as_str()) - .ok_or(IngesterError::ConfigurationError { - msg: format!("RPC commitment level missing: {}", RPC_COMMITMENT_KEY), - }) - .unwrap(); - - // Check if commitment level is valid and create `CommitmentConfig`. - let rpc_commitment = CommitmentConfig { - commitment: CommitmentLevel::from_str(rpc_commitment_level) - .map_err(|_| IngesterError::ConfigurationError { - msg: format!("Invalid RPC commitment level: {}", rpc_commitment_level), - }) - .unwrap(), - }; - - // Create `RpcBlockConfig` used when getting blocks from RPC provider. - let rpc_block_config = RpcBlockConfig { - encoding: Some(UiTransactionEncoding::Base64), - commitment: Some(rpc_commitment), - max_supported_transaction_version: Some(0), - ..RpcBlockConfig::default() - }; - - // Instantiate RPC client. - let rpc_client = RpcClient::new_with_commitment(rpc_url, rpc_commitment); - - // Instantiate messenger. - let mut messenger = T::new(config.get_messneger_client_config()).await.unwrap(); - messenger - .add_stream(TRANSACTION_BACKFILL_STREAM) - .await - .unwrap(); - messenger - .set_buffer_size(TRANSACTION_BACKFILL_STREAM, 10_000_000) - .await; - - Self { - config, - db, - rpc_client, - rpc_block_config, - messenger, - failure_delay: INITIAL_FAILURE_DELAY, - cache, - } - } - - async fn run_finder(&mut self) { - let mut interval = time::interval(tokio::time::Duration::from_secs(TREE_SYNC_INTERVAL)); - let sem = Semaphore::new(1); - loop { - interval.tick().await; - let _permit = sem.acquire().await.unwrap(); - - debug!("Looking for missing trees..."); - - let missing = self.get_missing_trees(&self.db).await; - match missing { - Ok(missing_trees) => { - let txn = self.db.begin().await.unwrap(); - let len = missing_trees.len(); - metric! { - statsd_gauge!("ingester.backfiller.missing_trees", len as f64); - } - debug!("Found {} missing trees", len); - if len > 0 { - let res = self.force_backfill_missing_trees(missing_trees, &txn).await; - - let res2 = txn.commit().await; - match (res, res2) { - (Ok(_), Ok(_)) => { - debug!("Set {} trees to backfill from 0", len); - } - (Err(e), _) => { - error!("Error setting trees to backfill from 0: {}", e); - } - (_, Err(e)) => { - error!("Error setting trees to backfill from 0: error committing transaction: {}", e); - } - } - } - } - Err(e) => { - error!("Error getting missing trees: {}", e); - } - } - } - } - /// Run the backfiller task. - async fn run_filler(&mut self) { - let mut interval = - time::interval(tokio::time::Duration::from_millis(MAX_BACKFILL_CHECK_WAIT)); - loop { - interval.tick().await; - match self.get_trees_to_backfill().await { - Ok(backfill_trees) => { - if !backfill_trees.is_empty() { - for backfill_tree in backfill_trees { - for tries in 1..=NUM_TRIES { - // Get the tree out of nested structs. - let tree = &backfill_tree.unique_tree.tree; - let tree_string = bs58::encode(&tree).into_string(); - info!("Backfilling tree: {tree_string}"); - // Call different methods based on whether tree needs to be backfilled - // completely from seq number 1 or just have any gaps in seq number - // filled. - let result = if backfill_tree.backfill_from_seq_1 { - self.backfill_tree_from_seq_1(&backfill_tree).await - } else { - self.fetch_and_plug_gaps(tree).await - }; - - match result { - Ok(opt_max_seq) => { - // Successfully backfilled the tree. Now clean up database. - self.clean_up_backfilled_tree( - opt_max_seq, - tree, - &tree_string, - tries, - ) - .await; - self.reset_delay(); - break; - } - Err(err) => { - error!("Failed to fetch and plug gaps for {tree_string}, attempt {tries}"); - error!("{err}"); - } - } - - if tries == NUM_TRIES { - if let Err(err) = self.mark_tree_as_failed(tree).await { - error!("Error marking tree as failed to backfill: {err}"); - } - } else { - self.sleep_and_increase_delay().await; - } - } - } - } - } - Err(err) => { - // Print error but keep trying. - error!("Could not get trees to backfill from db: {err}"); - self.sleep_and_increase_delay().await; - } - } - } - } - - async fn force_backfill_missing_trees( - &mut self, - missing_trees: Vec, - cn: &impl ConnectionTrait, - ) -> Result<(), IngesterError> { - let trees = missing_trees - .into_iter() - .map(|tree| backfill_items::ActiveModel { - tree: Set(tree.tree.as_ref().to_vec()), - seq: Set(0), - slot: Set(tree.slot as i64), - force_chk: Set(true), - backfilled: Set(false), - failed: Set(false), - ..Default::default() - }) - .collect::>(); - - backfill_items::Entity::insert_many(trees).exec(cn).await?; - - Ok(()) - } - - async fn clean_up_backfilled_tree( - &mut self, - opt_max_seq: Option, - tree: &[u8], - tree_string: &String, - tries: i32, - ) { - match opt_max_seq { - Some(max_seq) => { - debug!("Successfully backfilled tree: {tree_string}, attempt {tries}"); - - // Delete extra rows and mark as backfilled. - match self - .delete_extra_rows_and_mark_as_backfilled(tree, max_seq) - .await - { - Ok(_) => { - // Debug. - debug!("Successfully deleted rows up to {max_seq}"); - } - Err(err) => { - error!("Error deleting rows and marking as backfilled: {err}"); - if let Err(err) = self.mark_tree_as_failed(tree).await { - error!("Error marking tree as failed to backfill: {err}"); - } - } - } - } - None => { - // Debug. - error!("Unexpected error, tree was in list, but no rows found for {tree_string}"); - if let Err(err) = self.mark_tree_as_failed(tree).await { - error!("Error marking tree as failed to backfill: {err}"); - } - } - } - } - - async fn sleep_and_increase_delay(&mut self) { - sleep(Duration::from_millis(self.failure_delay)).await; - - // Increase failure delay up to `MAX_FAILURE_DELAY_MS`. - self.failure_delay = self.failure_delay.saturating_mul(2); - if self.failure_delay > MAX_FAILURE_DELAY_MS { - self.failure_delay = MAX_FAILURE_DELAY_MS; - } - } - - fn reset_delay(&mut self) { - self.failure_delay = INITIAL_FAILURE_DELAY; - } - - async fn get_missing_trees( - &self, - cn: &impl ConnectionTrait, - ) -> Result, IngesterError> { - let mut all_trees: HashMap = self.fetch_trees_by_gpa().await?; - debug!("Number of Trees on Chain {}", all_trees.len()); - - if let Some(only_trees) = &self.config.backfiller_trees { - let mut trees = HashSet::with_capacity(only_trees.len()); - for tree in only_trees { - trees.insert(Pubkey::try_from(tree.as_str()).expect("backfiller tree is invalid")); - } - - all_trees.retain(|key, _value| trees.contains(key)); - info!( - "Number of Trees to backfill (with only filter): {}", - all_trees.len() - ); - } - let get_locked_or_failed_trees = Statement::from_string( - DbBackend::Postgres, - "SELECT DISTINCT tree FROM backfill_items WHERE failed = true\n\ - OR locked = true" - .to_string(), - ); - let locked_trees = cn.query_all(get_locked_or_failed_trees).await?; - for row in locked_trees.into_iter() { - let tree = UniqueTree::from_query_result(&row, "")?; - let key = Pubkey::try_from(tree.tree.as_slice()).unwrap(); - all_trees.remove(&key); - } - info!( - "Number of Trees to backfill (with failed/locked filter): {}", - all_trees.len() - ); - - // Get all the local trees already in cl_items and remove them - let get_all_local_trees = Statement::from_string( - DbBackend::Postgres, - "SELECT DISTINCT cl_items.tree FROM cl_items".to_string(), - ); - let force_chk_trees = cn.query_all(get_all_local_trees).await?; - for row in force_chk_trees.into_iter() { - let tree = UniqueTree::from_query_result(&row, "")?; - let key = Pubkey::try_from(tree.tree.as_slice()).unwrap(); - all_trees.remove(&key); - } - info!( - "Number of Trees to backfill (with cl_items existed filter): {}", - all_trees.len() - ); - - // After removing all the tres in backfill_itemsa nd the trees already in CL Items then return the list - // of missing trees - let missing_trees = all_trees - .into_iter() - .map(|(k, s)| MissingTree { tree: k, slot: s.0 }) - .collect::>(); - if !missing_trees.is_empty() { - info!("Number of Missing local trees: {}", missing_trees.len()); - } else { - debug!("No missing trees"); - } - Ok(missing_trees) - } - - async fn get_trees_to_backfill(&self) -> Result, DbErr> { - // Start a db transaction. - let txn = self.db.begin().await?; - - // Get trees with the `force_chk` flag set to true (that have not failed and are not locked). - let force_chk_trees = Statement::from_string( - DbBackend::Postgres, - "SELECT DISTINCT backfill_items.tree, backfill_items.slot FROM backfill_items\n\ - WHERE backfill_items.force_chk = TRUE\n\ - AND backfill_items.failed = FALSE\n\ - AND backfill_items.locked = FALSE" - .to_string(), - ); - - let force_chk_trees: Vec = - txn.query_all(force_chk_trees).await.map(|qr| { - qr.iter() - .map(|q| TreeWithSlot::from_query_result(q, "").unwrap()) - .collect() - })?; - - debug!( - "Number of force check trees to backfill: {} {}", - force_chk_trees.len(), - Utc::now() - ); - - for tree in force_chk_trees.iter() { - let stmt = backfill_items::Entity::update_many() - .col_expr(backfill_items::Column::Locked, Expr::value(true)) - .filter(backfill_items::Column::Tree.eq(&*tree.tree)) - .build(DbBackend::Postgres); - - if let Err(err) = txn.execute(stmt).await { - error!( - "Error marking tree {} as locked: {}", - bs58::encode(&tree.tree).into_string(), - err - ); - return Err(err); - } - } - - // Get trees with multiple rows from `backfill_items` table (that have not failed and are not locked). - let multi_row_trees = Statement::from_string( - DbBackend::Postgres, - "SELECT backfill_items.tree, max(backfill_items.slot) as slot FROM backfill_items\n\ - WHERE backfill_items.failed = FALSE - AND backfill_items.locked = FALSE\n\ - GROUP BY backfill_items.tree\n\ - HAVING COUNT(*) > 1" - .to_string(), - ); - - let multi_row_trees: Vec = - txn.query_all(multi_row_trees).await.map(|qr| { - qr.iter() - .map(|q| TreeWithSlot::from_query_result(q, "").unwrap()) - .collect() - })?; - - debug!( - "Number of multi-row trees to backfill {}", - multi_row_trees.len() - ); - - for tree in multi_row_trees.iter() { - let stmt = backfill_items::Entity::update_many() - .col_expr(backfill_items::Column::Locked, Expr::value(true)) - .filter(backfill_items::Column::Tree.eq(&*tree.tree)) - .build(DbBackend::Postgres); - - if let Err(err) = txn.execute(stmt).await { - error!( - "Error marking tree {} as locked: {}", - bs58::encode(&tree.tree).into_string(), - err - ); - return Err(err); - } - } - - // Close out transaction and relinqish the lock. - txn.commit().await?; - - // Convert force check trees Vec of `UniqueTree` to a Vec of `BackfillTree` (which contain extra info). - let mut trees: Vec = force_chk_trees - .into_iter() - .map(|tree| BackfillTree::new(UniqueTree { tree: tree.tree }, true, tree.slot as u64)) - .collect(); - - // Convert multi-row trees Vec of `UniqueTree` to a Vec of `BackfillTree` (which contain extra info). - let mut multi_row_trees: Vec = multi_row_trees - .into_iter() - .map(|tree| BackfillTree::new(UniqueTree { tree: tree.tree }, false, tree.slot as u64)) - .collect(); - - trees.append(&mut multi_row_trees); - - Ok(trees) - } - - async fn backfill_tree_from_seq_1( - &mut self, - btree: &BackfillTree, - ) -> Result, IngesterError> { - let address = match Pubkey::try_from(btree.unique_tree.tree.as_slice()) { - Ok(pubkey) => pubkey, - Err(error) => { - return Err(IngesterError::DeserializationError(format!( - "failed to parse pubkey: {error:?}" - ))) - } - }; - - let slots = self.find_slots_via_address(&address).await?; - let address = btree.unique_tree.tree.clone(); - for slot in slots { - let gap = GapInfo { - prev: SimpleBackfillItem { - seq: 0, - slot: slot as i64, - }, - curr: SimpleBackfillItem { - seq: 0, - slot: slot as i64, - }, - }; - self.plug_gap(&gap, &address).await?; - } - Ok(Some(0)) - } - - async fn find_slots_via_address(&self, address: &Pubkey) -> Result, IngesterError> { - let mut last_sig = None; - let mut slots = HashSet::new(); - // TODO: Any log running function like this should actually be run in a way that supports re-entry, - // usually we woudl break the tasks into smaller parralel tasks and we woudl not worry about it, but in this we have several linearally dpendent async tasks - // and if they fail, it causes a chain reaction of failures since the dependant nature of it affects the next task. Right now you are just naivley looping and - // hoping for the best what needs to happen is to start saving the state opf each task with the last signature that was retuned iun durable storage. - // Then if the task fails, you can restart it from the last signature that was returned. - loop { - let before = last_sig; - let sigs = self - .rpc_client - .get_signatures_for_address_with_config( - address, - GetConfirmedSignaturesForAddress2Config { - before, - until: None, - ..GetConfirmedSignaturesForAddress2Config::default() - }, - ) - .await - .map_err(|e| { - IngesterError::RpcGetDataError(format!( - "GetSignaturesForAddressWithConfig failed {}", - e - )) - })?; - for sig in sigs.iter() { - let slot = sig.slot; - let sig = Signature::from_str(&sig.signature).map_err(|e| { - IngesterError::RpcDataUnsupportedFormat(format!( - "Failed to parse signature {}", - e - )) - })?; - - slots.insert(slot); - last_sig = Some(sig); - } - if sigs.is_empty() || sigs.len() < 1000 { - break; - } - } - Ok(Vec::from_iter(slots)) - } - - #[allow(dead_code)] - async fn get_max_seq(&self, tree: &[u8]) -> Result, DbErr> { - let query = backfill_items::Entity::find() - .select_only() - .column(backfill_items::Column::Seq) - .filter(backfill_items::Column::Tree.eq(tree)) - .order_by_desc(backfill_items::Column::Seq) - .limit(1) - .build(DbBackend::Postgres); - - let start_seq_vec = MaxSeqItem::find_by_statement(query).all(&self.db).await?; - - Ok(start_seq_vec.last().map(|row| row.seq)) - } - - async fn clear_force_chk_flag(&self, tree: &[u8]) -> Result { - backfill_items::Entity::update_many() - .col_expr(backfill_items::Column::ForceChk, Expr::value(false)) - .filter(backfill_items::Column::Tree.eq(tree)) - .exec(&self.db) - .await - } - - async fn fetch_trees_by_gpa(&self) -> Result, IngesterError> { - let config = RpcProgramAccountsConfig { - filters: Some(vec![RpcFilterType::Memcmp(Memcmp::new_raw_bytes( - 0, - vec![1u8], - ))]), - account_config: RpcAccountInfoConfig { - encoding: Some(UiAccountEncoding::Base64), - ..RpcAccountInfoConfig::default() - }, - ..RpcProgramAccountsConfig::default() - }; - let results: Vec<(Pubkey, Account)> = self - .rpc_client - .get_program_accounts_with_config(&spl_account_compression::id(), config) - .await - .map_err(|e| IngesterError::RpcGetDataError(e.to_string()))?; - let mut list = HashMap::with_capacity(results.len()); - for r in results.into_iter() { - let (pubkey, mut account) = r; - let (header_bytes, rest) = account - .data - .split_at_mut(CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1); - let header: ConcurrentMerkleTreeHeader = - ConcurrentMerkleTreeHeader::try_from_slice(header_bytes) - .map_err(|e| IngesterError::RpcGetDataError(e.to_string()))?; - - let auth = Pubkey::find_program_address(&[pubkey.as_ref()], &mpl_bubblegum::ID).0; - - let merkle_tree_size = merkle_tree_get_size(&header) - .map_err(|e| IngesterError::RpcGetDataError(e.to_string()))?; - let (tree_bytes, _canopy_bytes) = rest.split_at_mut(merkle_tree_size); - let seq_bytes = tree_bytes[0..8].try_into().map_err(|_e| { - IngesterError::RpcGetDataError("Failed to convert seq bytes to array".to_string()) - })?; - let seq = u64::from_le_bytes(seq_bytes); - list.insert(pubkey, SlotSeq(header.get_creation_slot(), seq)); - - if header.assert_valid_authority(&auth).is_err() { - continue; - } - } - Ok(list) - } - - // Similar to `fetchAndPlugGaps()` in `backfiller.ts`. - async fn fetch_and_plug_gaps(&mut self, tree: &[u8]) -> Result, IngesterError> { - let (opt_max_seq, gaps) = self.get_missing_data(tree).await?; - - // Similar to `plugGapsBatched()` in `backfiller.ts` (although not batched). - for gap in gaps.iter() { - // Similar to `plugGaps()` in `backfiller.ts`. - self.plug_gap(gap, tree).await?; - } - - Ok(opt_max_seq) - } - - // Similar to `getMissingData()` in `db.ts`. - async fn get_missing_data(&self, tree: &[u8]) -> Result<(Option, Vec), DbErr> { - // Get the maximum sequence number that has been backfilled, and use - // that for the starting sequence number for backfilling. - let query = backfill_items::Entity::find() - .select_only() - .column(backfill_items::Column::Seq) - .filter( - Condition::all() - .add(backfill_items::Column::Tree.eq(tree)) - .add(backfill_items::Column::Backfilled.eq(true)), - ) - .order_by_desc(backfill_items::Column::Seq) - .limit(1) - .build(DbBackend::Postgres); - - let start_seq_vec = MaxSeqItem::find_by_statement(query).all(&self.db).await?; - let start_seq = if let Some(seq) = start_seq_vec.last().map(|row| row.seq) { - seq - } else { - 0 - }; - - // Get all rows for the tree that have not yet been backfilled. - let mut query = backfill_items::Entity::find() - .select_only() - .column(backfill_items::Column::Seq) - .column(backfill_items::Column::Slot) - .filter( - Condition::all() - .add(backfill_items::Column::Seq.gte(start_seq)) - .add(backfill_items::Column::Tree.eq(tree)), - ) - .order_by_asc(backfill_items::Column::Seq) - .build(DbBackend::Postgres); - - query.sql = query.sql.replace("SELECT", "SELECT DISTINCT"); - let rows = SimpleBackfillItem::find_by_statement(query) - .all(&self.db) - .await?; - let mut gaps = vec![]; - - // Look at each pair of subsequent rows, looking for a gap in sequence number. - for (prev, curr) in rows.iter().zip(rows.iter().skip(1)) { - if curr.seq == prev.seq { - let message = format!( - "Error in DB, identical sequence numbers with different slots: {}, {}", - prev.slot, curr.slot - ); - error!("{}", message); - return Err(DbErr::Custom(message)); - } else if curr.seq - prev.seq > 1 { - gaps.push(GapInfo::new(prev.clone(), curr.clone())); - } - } - - // Get the max sequence number if any rows were returned from the query. - let opt_max_seq = rows.last().map(|row| row.seq); - - Ok((opt_max_seq, gaps)) - } - - async fn plug_gap(&mut self, gap: &GapInfo, tree: &[u8]) -> Result<(), IngesterError> { - // TODO: This needs to make sure all slots are available otherwise it will partially - // fail and redo the whole backfill process. So for now checking the max block before - // looping as a quick workaround. - let diff = gap.curr.slot - gap.prev.slot; - let mut num_iter = (diff + 250_000) / 500_000; - let mut start_slot = gap.prev.slot; - let mut end_slot = gap.prev.slot + cmp::min(500_000, diff); - let get_confirmed_slot_tasks = FuturesUnordered::new(); - if num_iter == 0 { - num_iter = 1; - } - for _ in 0..num_iter { - get_confirmed_slot_tasks.push(self.rpc_client.get_blocks_with_commitment( - start_slot as u64, - Some(end_slot as u64), - CommitmentConfig { - commitment: CommitmentLevel::Confirmed, - }, - )); - start_slot = end_slot; - end_slot = cmp::min(end_slot + 500_000, gap.curr.slot); - } - let result_slots = get_confirmed_slot_tasks - .collect::>() - .await - .into_iter() - .filter_map(|x| x.ok()) - .flatten(); - for slot in result_slots { - let key = format!("block{}", slot); - let mut cached_block = self.cache.get(&key).await; - if cached_block.is_none() { - debug!("Fetching block {} from RPC", slot); - let block = EncodedConfirmedBlock::from( - self.rpc_client - .get_block_with_config(slot, self.rpc_block_config) - .await - .map_err(|e| IngesterError::RpcGetDataError(e.to_string()))?, - ); - let cost = cmp::min(32, block.transactions.len() as i64); - let write = self - .cache - .try_insert_with_ttl( - key.clone(), - block, - cost, - Duration::from_secs(BLOCK_CACHE_DURATION), - ) - .await?; - - if !write { - return Err(IngesterError::CacheStorageWriteError(format!( - "Cache Write Failed on {} is missing.", - &key - ))); - } - self.cache.wait().await?; - cached_block = self.cache.get(&key).await; - } - if cached_block.is_none() { - return Err(IngesterError::CacheStorageWriteError(format!( - "Cache Procedure Failed {} is missing.", - &key - ))); - } - let block_ref = cached_block.unwrap(); - let block_data = block_ref.value(); - - for tx in block_data.transactions.iter() { - // See if transaction has an error. - let meta = if let Some(meta) = &tx.meta { - if let Some(_err) = &meta.err { - continue; - } - meta - } else { - error!("Unexpected, EncodedTransactionWithStatusMeta struct has no metadata"); - continue; - }; - let decoded_tx = if let Some(decoded_tx) = tx.transaction.decode() { - decoded_tx - } else { - error!("Unable to decode transaction"); - continue; - }; - let sig = decoded_tx.signatures[0].to_string(); - let msg = decoded_tx.message; - let atl_keys = msg.address_table_lookups(); - let tree = Pubkey::try_from(tree) - .map_err(|e| IngesterError::DeserializationError(e.to_string()))?; - let account_keys = msg.static_account_keys(); - let account_keys = { - let mut account_keys_vec = vec![]; - for key in account_keys.iter() { - account_keys_vec.push(key.to_bytes()); - } - if atl_keys.is_some() { - if let OptionSerializer::Some(ad) = &meta.loaded_addresses { - for i in &ad.writable { - let mut output: [u8; 32] = [0; 32]; - bs58::decode(i).into(&mut output).map_err(|e| { - IngesterError::DeserializationError(e.to_string()) - })?; - account_keys_vec.push(output); - } - - for i in &ad.readonly { - let mut output: [u8; 32] = [0; 32]; - bs58::decode(i).into(&mut output).map_err(|e| { - IngesterError::DeserializationError(e.to_string()) - })?; - account_keys_vec.push(output); - } - } - } - account_keys_vec - }; - - // Filter out transactions that don't have to do with the tree we are interested in or - // the Bubblegum program. - let tb = tree.to_bytes(); - let bubblegum = blockbuster::programs::bubblegum::ID.to_bytes(); - if account_keys.iter().all(|pk| *pk != tb && *pk != bubblegum) { - continue; - } - - // Serialize data. - let builder = FlatBufferBuilder::new(); - debug!("Serializing transaction in backfiller {}", sig); - let tx_wrap = EncodedConfirmedTransactionWithStatusMeta { - transaction: tx.to_owned(), - slot, - block_time: block_data.block_time, - }; - let builder = seralize_encoded_transaction_with_status(builder, tx_wrap)?; - self.messenger - .send(TRANSACTION_BACKFILL_STREAM, builder.finished_data()) - .await?; - } - drop(block_ref); - } - - Ok(()) - } - - async fn delete_extra_rows_and_mark_as_backfilled( - &self, - tree: &[u8], - max_seq: i64, - ) -> Result<(), DbErr> { - // Debug. - let test_items = backfill_items::Entity::find() - .filter(backfill_items::Column::Tree.eq(tree)) - .all(&self.db) - .await?; - debug!("Count of items before delete: {}", test_items.len()); - // Delete all rows in the `backfill_items` table for a specified tree, except for the row with - // the caller-specified max seq number. One row for each tree must remain so that gaps can be - // detected after subsequent inserts. - backfill_items::Entity::delete_many() - .filter( - Condition::all() - .add(backfill_items::Column::Tree.eq(tree)) - .add(backfill_items::Column::Seq.ne(max_seq)), - ) - .exec(&self.db) - .await?; - - // Remove any duplicates that have the caller-specified max seq number. This happens when - // a transaction that was already handled is replayed during backfilling. - let items = backfill_items::Entity::find() - .filter( - Condition::all() - .add(backfill_items::Column::Tree.eq(tree)) - .add(backfill_items::Column::Seq.ne(max_seq)), - ) - .all(&self.db) - .await?; - - if items.len() > 1 { - for item in items.iter().skip(1) { - backfill_items::Entity::delete_by_id(item.id) - .exec(&self.db) - .await?; - } - } - - // Mark remaining row as backfilled so future backfilling can start above this sequence number. - self.mark_tree_as_backfilled(tree).await?; - - // Clear the `force_chk` flag if it was set. - self.clear_force_chk_flag(tree).await?; - - // Unlock tree. - self.unlock_tree(tree).await?; - - // Debug. - let test_items = backfill_items::Entity::find() - .filter(backfill_items::Column::Tree.eq(tree)) - .all(&self.db) - .await?; - debug!("Count of items after delete: {}", test_items.len()); - Ok(()) - } - - async fn mark_tree_as_backfilled(&self, tree: &[u8]) -> Result<(), DbErr> { - backfill_items::Entity::update_many() - .col_expr(backfill_items::Column::Backfilled, Expr::value(true)) - .filter(backfill_items::Column::Tree.eq(tree)) - .exec(&self.db) - .await?; - - Ok(()) - } - - async fn mark_tree_as_failed(&self, tree: &[u8]) -> Result<(), DbErr> { - backfill_items::Entity::update_many() - .col_expr(backfill_items::Column::Failed, Expr::value(true)) - .filter(backfill_items::Column::Tree.eq(tree)) - .exec(&self.db) - .await?; - - Ok(()) - } - - async fn unlock_tree(&self, tree: &[u8]) -> Result<(), DbErr> { - backfill_items::Entity::update_many() - .col_expr(backfill_items::Column::Locked, Expr::value(false)) - .filter(backfill_items::Column::Tree.eq(tree)) - .exec(&self.db) - .await?; - - Ok(()) - } -} diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index ab1f3cd38..b3c7e1942 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -1,3 +1,4 @@ +use das_metadata_json::sender::SenderArgs; use figment::{ providers::{Env, Format, Yaml}, value::Value, @@ -31,6 +32,7 @@ pub struct IngesterConfig { pub code_version: Option<&'static str>, pub background_task_runner_config: Option, pub cl_audits: Option, // save transaction logs for compressed nfts + pub metadata_json_sender: Option, } #[derive(Deserialize, PartialEq, Debug, Clone)] @@ -69,7 +71,7 @@ impl IngesterConfig { .unwrap() } - pub fn get_messneger_client_config(&self) -> MessengerConfig { + pub fn get_messenger_client_config(&self) -> MessengerConfig { let mut mc = self.messenger_config.clone(); mc.connection_config .insert("consumer_id".to_string(), Value::from(rand_string())); @@ -120,7 +122,6 @@ pub const CODE_VERSION: &str = env!("CARGO_PKG_VERSION"); pub enum IngesterRole { #[default] All, - Backfiller, BackgroundTaskRunner, Ingester, } @@ -129,7 +130,6 @@ impl Display for IngesterRole { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { IngesterRole::All => write!(f, "All"), - IngesterRole::Backfiller => write!(f, "Backfiller"), IngesterRole::BackgroundTaskRunner => write!(f, "BackgroundTaskRunner"), IngesterRole::Ingester => write!(f, "Ingester"), } diff --git a/nft_ingester/src/error/mod.rs b/nft_ingester/src/error/mod.rs index 37ed5f24b..bbc55a12c 100644 --- a/nft_ingester/src/error/mod.rs +++ b/nft_ingester/src/error/mod.rs @@ -60,12 +60,6 @@ impl From for IngesterError { } } -impl From for IngesterError { - fn from(err: stretto::CacheError) -> Self { - IngesterError::CacheStorageWriteError(err.to_string()) - } -} - impl From for IngesterError { fn from(_err: serde_json::Error) -> Self { IngesterError::SerializatonError("JSON ERROR".to_string()) diff --git a/nft_ingester/src/lib.rs b/nft_ingester/src/lib.rs index 9569da43a..254497591 100644 --- a/nft_ingester/src/lib.rs +++ b/nft_ingester/src/lib.rs @@ -1,5 +1,4 @@ pub mod ack; -pub mod backfiller; pub mod config; pub mod database; pub mod error; diff --git a/nft_ingester/src/main.rs b/nft_ingester/src/main.rs index a65f1d166..85b996cc1 100644 --- a/nft_ingester/src/main.rs +++ b/nft_ingester/src/main.rs @@ -1,6 +1,5 @@ mod account_updates; mod ack; -mod backfiller; pub mod config; mod database; pub mod error; @@ -13,7 +12,6 @@ mod transaction_notifications; use crate::{ account_updates::account_worker, ack::ack_worker, - backfiller::setup_backfiller, config::{init_logger, rand_string, setup_config, IngesterRole, WorkerType}, database::setup_database, error::IngesterError, @@ -85,8 +83,15 @@ pub async fn main() -> Result<(), IngesterError> { )), })]; - let mut background_task_manager = - TaskManager::new(rand_string(), database_pool.clone(), bg_task_definitions); + let mut background_task_manager = TaskManager::try_new_async( + rand_string(), + database_pool.clone(), + config.metadata_json_sender.clone(), + bg_task_definitions, + ) + .await + .unwrap(); + // This is how we send new bg tasks let bg_task_listener = background_task_manager .start_listener(role == IngesterRole::BackgroundTaskRunner || role == IngesterRole::All); @@ -101,7 +106,7 @@ pub async fn main() -> Result<(), IngesterError> { let workers = config.get_worker_config().clone(); let (_ack_task, ack_sender) = - ack_worker::(config.get_messneger_client_config()); + ack_worker::(config.get_messenger_client_config()); // iterate all the workers for worker in workers { @@ -121,7 +126,7 @@ pub async fn main() -> Result<(), IngesterError> { if worker.worker_type == WorkerType::Account { let _account = account_worker::( database_pool.clone(), - config.get_messneger_client_config(), + config.get_messenger_client_config(), bg_task_sender.clone(), ack_sender.clone(), if i == 0 { @@ -134,7 +139,7 @@ pub async fn main() -> Result<(), IngesterError> { } else if worker.worker_type == WorkerType::Transaction { let _txn = transaction_worker::( database_pool.clone(), - config.get_messneger_client_config(), + config.get_messenger_client_config(), bg_task_sender.clone(), ack_sender.clone(), if i == 0 { @@ -155,11 +160,6 @@ pub async fn main() -> Result<(), IngesterError> { let background_runner_config = config.clone().background_task_runner_config; tasks.spawn(background_task_manager.start_runner(background_runner_config)); } - // Backfiller Setup ------------------------------------------ - if role == IngesterRole::Backfiller || role == IngesterRole::All { - let backfiller = setup_backfiller::(database_pool.clone(), config.clone()); - tasks.spawn(backfiller); - } let roles_str = role.to_string(); metric! { diff --git a/nft_ingester/src/program_transformers/mod.rs b/nft_ingester/src/program_transformers/mod.rs new file mode 100644 index 000000000..584f1f4de --- /dev/null +++ b/nft_ingester/src/program_transformers/mod.rs @@ -0,0 +1,200 @@ +use crate::{error::IngesterError, tasks::TaskData}; +use blockbuster::{ + instruction::{order_instructions, InstructionBundle, IxPair}, + program_handler::ProgramParser, + programs::{ + bubblegum::BubblegumParser, token_account::TokenAccountParser, + token_metadata::TokenMetadataParser, ProgramParseResult, + }, +}; +use log::{debug, error, info}; +use plerkle_serialization::{AccountInfo, Pubkey as FBPubkey, TransactionInfo}; +use sea_orm::{DatabaseConnection, SqlxPostgresConnector}; +use solana_sdk::pubkey::Pubkey; +use sqlx::PgPool; +use std::collections::{HashMap, HashSet, VecDeque}; +use tokio::sync::mpsc::UnboundedSender; + +use crate::program_transformers::{ + bubblegum::handle_bubblegum_instruction, token::handle_token_program_account, + token_metadata::handle_token_metadata_account, +}; + +mod asset_upserts; +mod bubblegum; +mod token; +mod token_metadata; + +pub struct ProgramTransformer { + storage: DatabaseConnection, + task_sender: UnboundedSender, + matchers: HashMap>, + key_set: HashSet, + cl_audits: bool, +} + +impl ProgramTransformer { + pub fn new(pool: PgPool, task_sender: UnboundedSender, cl_audits: bool) -> Self { + let mut matchers: HashMap> = HashMap::with_capacity(1); + let bgum = BubblegumParser {}; + let token_metadata = TokenMetadataParser {}; + let token = TokenAccountParser {}; + matchers.insert(bgum.key(), Box::new(bgum)); + matchers.insert(token_metadata.key(), Box::new(token_metadata)); + matchers.insert(token.key(), Box::new(token)); + let hs = matchers.iter().fold(HashSet::new(), |mut acc, (k, _)| { + acc.insert(*k); + acc + }); + let pool: PgPool = pool; + ProgramTransformer { + storage: SqlxPostgresConnector::from_sqlx_postgres_pool(pool), + task_sender, + matchers, + key_set: hs, + cl_audits, + } + } + + pub fn break_transaction<'i>( + &self, + tx: &'i TransactionInfo<'i>, + ) -> VecDeque<(IxPair<'i>, Option>>)> { + let ref_set: HashSet<&[u8]> = self.key_set.iter().map(|k| k.as_ref()).collect(); + order_instructions(ref_set, tx) + } + + #[allow(clippy::borrowed_box)] + pub fn match_program(&self, key: &FBPubkey) -> Option<&Box> { + match Pubkey::try_from(key.0.as_slice()) { + Ok(pubkey) => self.matchers.get(&pubkey), + Err(_error) => { + log::warn!("failed to parse key: {key:?}"); + None + } + } + } + + pub async fn handle_transaction<'a>( + &self, + tx: &'a TransactionInfo<'a>, + ) -> Result<(), IngesterError> { + let sig: Option<&str> = tx.signature(); + info!("Handling Transaction: {:?}", sig); + let instructions = self.break_transaction(tx); + let accounts = tx.account_keys().unwrap_or_default(); + let slot = tx.slot(); + let txn_id = tx.signature().unwrap_or(""); + let mut keys: Vec = Vec::with_capacity(accounts.len()); + for k in accounts.into_iter() { + keys.push(*k); + } + let mut not_impl = 0; + let ixlen = instructions.len(); + debug!("Instructions: {}", ixlen); + let contains = instructions + .iter() + .filter(|(ib, _inner)| ib.0 .0.as_ref() == mpl_bubblegum::ID.as_ref()); + debug!("Instructions bgum: {}", contains.count()); + for (outer_ix, inner_ix) in instructions { + let (program, instruction) = outer_ix; + let ix_accounts = instruction.accounts().unwrap().iter().collect::>(); + let ix_account_len = ix_accounts.len(); + let max = ix_accounts.iter().max().copied().unwrap_or(0) as usize; + if keys.len() < max { + return Err(IngesterError::DeserializationError( + "Missing Accounts in Serialized Ixn/Txn".to_string(), + )); + } + let ix_accounts = + ix_accounts + .iter() + .fold(Vec::with_capacity(ix_account_len), |mut acc, a| { + if let Some(key) = keys.get(*a as usize) { + acc.push(*key); + } + acc + }); + let ix = InstructionBundle { + txn_id, + program, + instruction: Some(instruction), + inner_ix, + keys: ix_accounts.as_slice(), + slot, + }; + + if let Some(program) = self.match_program(&ix.program) { + debug!("Found a ix for program: {:?}", program.key()); + let result = program.handle_instruction(&ix)?; + let concrete = result.result_type(); + match concrete { + ProgramParseResult::Bubblegum(parsing_result) => { + handle_bubblegum_instruction( + parsing_result, + &ix, + &self.storage, + &self.task_sender, + self.cl_audits, + ) + .await + .map_err(|err| { + error!( + "Failed to handle bubblegum instruction for txn {:?}: {:?}", + sig, err + ); + err + })?; + } + _ => { + not_impl += 1; + } + }; + } + } + + if not_impl == ixlen { + debug!("Not imple"); + return Err(IngesterError::NotImplemented); + } + Ok(()) + } + + pub async fn handle_account_update<'b>( + &self, + acct: AccountInfo<'b>, + ) -> Result<(), IngesterError> { + let owner = acct.owner().unwrap(); + if let Some(program) = self.match_program(owner) { + let result = program.handle_account(&acct)?; + let concrete = result.result_type(); + + if let Some(pubkey) = acct.pubkey() { + info!("Handling Account: {}", bs58::encode(pubkey.0).into_string()); + } + + match concrete { + ProgramParseResult::TokenMetadata(parsing_result) => { + handle_token_metadata_account( + &acct, + parsing_result, + &self.storage, + &self.task_sender, + ) + .await + } + ProgramParseResult::TokenProgramAccount(parsing_result) => { + handle_token_program_account( + &acct, + parsing_result, + &self.storage, + &self.task_sender, + ) + .await + } + _ => Err(IngesterError::NotImplemented), + }?; + } + Ok(()) + } +} diff --git a/nft_ingester/src/tasks/common/mod.rs b/nft_ingester/src/tasks/common/mod.rs index 17ec935a0..7d52439a9 100644 --- a/nft_ingester/src/tasks/common/mod.rs +++ b/nft_ingester/src/tasks/common/mod.rs @@ -2,10 +2,10 @@ use { super::{BgTask, FromTaskData, IngesterError, IntoTaskData, TaskData}, async_trait::async_trait, chrono::{NaiveDateTime, Utc}, + das_core::{DownloadMetadataInfo, DownloadMetadataNotifier}, digital_asset_types::dao::asset_data, futures::future::BoxFuture, log::debug, - program_transformers::{DownloadMetadataInfo, DownloadMetadataNotifier}, reqwest::{Client, ClientBuilder}, sea_orm::*, serde::{Deserialize, Serialize}, @@ -25,7 +25,7 @@ pub fn create_download_metadata_notifier( 'static, Result<(), Box>, > { - let (asset_data_id, uri) = info.into_inner(); + let (asset_data_id, uri, _slot) = info.into_inner(); let task = DownloadMetadata { asset_data_id, uri, diff --git a/nft_ingester/src/tasks/mod.rs b/nft_ingester/src/tasks/mod.rs index ec7a813fe..039ffca9b 100644 --- a/nft_ingester/src/tasks/mod.rs +++ b/nft_ingester/src/tasks/mod.rs @@ -3,8 +3,9 @@ use async_trait::async_trait; use cadence_macros::{is_global_default_set, statsd_count, statsd_histogram}; use chrono::{Duration, NaiveDateTime, Utc}; use crypto::{digest::Digest, sha2::Sha256}; +use das_metadata_json::sender::{SenderArgs, SenderPool}; use digital_asset_types::dao::{sea_orm_active_enums::TaskStatus, tasks}; -use log::{debug, error, info, warn}; +use log::{debug, error, warn}; use sea_orm::{ entity::*, query::*, sea_query::Expr, ActiveValue::Set, ColumnTrait, DatabaseConnection, DeleteResult, SqlxPostgresConnector, @@ -98,6 +99,7 @@ pub struct TaskManager { instance_name: String, pool: Pool, producer: Option>, + metadata_json_sender: Option, registered_task_types: Arc>>, } @@ -232,27 +234,39 @@ impl TaskManager { task.locked_by = Set(Some(instance_name)); } - pub fn new( + pub async fn try_new_async( instance_name: String, pool: Pool, + metadata_json_sender_config: Option, task_defs: Vec>, - ) -> Self { + ) -> Result { let mut tasks = HashMap::new(); for task in task_defs { tasks.insert(task.name().to_string(), task); } - TaskManager { + + let metadata_json_sender = if let Some(config) = metadata_json_sender_config { + Some(SenderPool::try_from_config(config).await.map_err(|_| { + IngesterError::TaskManagerError( + "Failed to connect to metadata json sender".to_string(), + ) + })?) + } else { + None + }; + + Ok(TaskManager { instance_name, pool, producer: None, + metadata_json_sender, registered_task_types: Arc::new(tasks), - } + }) } pub fn new_task_handler( pool: Pool, instance_name: String, - _name: String, task: TaskData, tasks_def: Arc>>, process_now: bool, @@ -314,23 +328,54 @@ impl TaskManager { let act: tasks::ActiveModel = task; act.save(txn).await.map_err(|e| e.into()) } + pub fn start_listener(&mut self, process_on_receive: bool) -> JoinHandle<()> { let (producer, mut receiver) = mpsc::unbounded_channel::(); self.producer = Some(producer); let task_map = Arc::clone(&self.registered_task_types); let pool = self.pool.clone(); let instance_name = self.instance_name.clone(); + let sender_pool = self.metadata_json_sender.clone(); tokio::task::spawn(async move { while let Some(task) = receiver.recv().await { + let task_name = task.name; + if let Some(task_created_time) = task.created_at { - let bus_time = - Utc::now().timestamp_millis() - task_created_time.timestamp_millis(); + let bus_time = Utc::now().timestamp_millis() + - task_created_time.and_utc().timestamp_millis(); metric! { statsd_histogram!("ingester.bgtask.bus_time", bus_time as u64, "type" => task.name); } } - let name = instance_name.clone(); + if task_name == "DownloadMetadata" { + if let Some(sender_pool) = sender_pool.clone() { + let download_metadata_task = DownloadMetadata::from_task_data(task); + + if let Ok(download_metadata_task) = download_metadata_task { + if sender_pool + .push(&download_metadata_task.asset_data_id) + .await + .is_err() + { + metric! { + statsd_count!("ingester.metadata_json.send.failed", 1); + } + } else { + metric! { + statsd_count!("ingester.bgtask.new", 1, "type" => task_name); + } + } + } else { + metric! { + statsd_count!("ingester.metadata_json.send.failed", 1); + } + } + + continue; + } + } + if let Ok(hash) = task.hash() { let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(pool.clone()); let task_entry = tasks::Entity::find_by_id(hash.clone()) @@ -349,7 +394,6 @@ impl TaskManager { TaskManager::new_task_handler( pool.clone(), instance_name.clone(), - name, task, Arc::clone(&task_map), process_on_receive, @@ -402,7 +446,7 @@ impl TaskManager { let delete_res = TaskManager::purge_old_tasks(&conn, purge_time).await; match delete_res { Ok(res) => { - info!("deleted {} tasks entries", res.rows_affected); + debug!("deleted {} tasks entries", res.rows_affected); metric! { statsd_count!("ingester.bgtask.purged_tasks", i64::try_from(res.rows_affected).unwrap_or(1)); } diff --git a/ops/Cargo.toml b/ops/Cargo.toml index e6cb7a870..14f741870 100644 --- a/ops/Cargo.toml +++ b/ops/Cargo.toml @@ -11,27 +11,31 @@ name = "das-ops" [dependencies] anchor-client = { workspace = true } anyhow = { workspace = true } -backon = { workspace = true } borsh = { workspace = true } +bs58 = { workspace = true } cadence = { workspace = true } cadence-macros = { workspace = true } clap = { workspace = true, features = ["derive", "cargo", "env"] } +das-bubblegum = { workspace = true } das-core = { workspace = true } digital_asset_types = { workspace = true } env_logger = { workspace = true } figment = { workspace = true } -flatbuffers = { workspace = true } futures = { workspace = true } indicatif = { workspace = true } log = { workspace = true } mpl-bubblegum = { workspace = true } -plerkle_messenger = { workspace = true } -plerkle_serialization = { workspace = true } +program_transformers = { workspace = true } sea-orm = { workspace = true } solana-account-decoder = { workspace = true } solana-client = { workspace = true } +solana-program = { workspace = true } solana-sdk = { workspace = true } solana-transaction-status = { workspace = true } spl-account-compression = { workspace = true } +sqlx = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } +mpl-token-metadata = { workspace = true } +serde_json = { workspace = true } diff --git a/ops/README.md b/ops/README.md new file mode 100644 index 000000000..d81cc4a24 --- /dev/null +++ b/ops/README.md @@ -0,0 +1,41 @@ +### DAS Ops + +DAS Ops is a collection of operational tools and scripts for managing and maintaining the Digital Asset RPC infrastructure. + +> **Note:** Run these commands from the root of the project + +### Setup + +```bash +sudo docker compose up db +``` + +### Running the cli + +```bash +cargo run --bin das-ops -- --help +``` + +#### Required Args + +- `--solana-rpc-url` - RPC URL of the Solana cluster +- `--database-url` - URL of the Postgres database (if using Docker: `postgres://solana:solana@localhost:5432/solana`) + +### Commands + +- `account` : Account related operations + + #### Subcommands + + - `program ` command is used to backfill the index against on-chain accounts owned by a program + + - `single ` command is used to backfill the index against a single account + + - `nft ` command is used to backfill the index against an NFT mint, token metadata, and token account + +- `bubblegum` : Bubblegum program related operations + + #### Subcommands + + - `backfill` command is used to cross-reference the index against on-chain accounts. It crawls through trees and backfills any missed tree transactions. + - `replay ` command is used to replay the Bubblegum program transactions for a given tree address and parse all the cNFT instructions diff --git a/ops/src/account/account_details.rs b/ops/src/account/account_details.rs deleted file mode 100644 index 0490f1ca1..000000000 --- a/ops/src/account/account_details.rs +++ /dev/null @@ -1,30 +0,0 @@ -use anyhow::Result; -use das_core::Rpc; -use solana_sdk::{account::Account, pubkey::Pubkey}; - -pub struct AccountDetails<'a> { - pub account: Account, - pub slot: u64, - pub pubkey: &'a Pubkey, -} - -impl<'a> AccountDetails<'a> { - pub fn new(account: Account, slot: u64, pubkey: &'a Pubkey) -> Self { - Self { - account, - slot, - pubkey, - } - } - - pub async fn fetch(rpc: &Rpc, pubkey: &'a Pubkey) -> Result { - let account_response = rpc.get_account(pubkey).await?; - let slot = account_response.context.slot; - - let account = account_response - .value - .ok_or_else(|| anyhow::anyhow!("Account not found for pubkey: {}", pubkey))?; - - Ok(Self::new(account, slot, pubkey)) - } -} diff --git a/ops/src/account/account_info.rs b/ops/src/account/account_info.rs new file mode 100644 index 000000000..6ce48a544 --- /dev/null +++ b/ops/src/account/account_info.rs @@ -0,0 +1,28 @@ +use anyhow::Result; +use das_core::Rpc; +use program_transformers::AccountInfo; +use solana_sdk::pubkey::Pubkey; + +#[derive(thiserror::Error, Debug)] +pub enum AccountInfoError { + #[error("account not found for pubkey: {pubkey}")] + NotFound { pubkey: Pubkey }, + #[error("failed to fetch account info")] + SolanaRequestError(#[from] solana_client::client_error::ClientError), +} + +pub async fn fetch(rpc: &Rpc, pubkey: Pubkey) -> Result { + let account_response = rpc.get_account(&pubkey).await?; + let slot = account_response.context.slot; + + let account = account_response + .value + .ok_or(AccountInfoError::NotFound { pubkey })?; + + Ok(AccountInfo { + slot, + pubkey, + owner: account.owner, + data: account.data, + }) +} diff --git a/ops/src/account/cmd.rs b/ops/src/account/cmd.rs index 659758d1c..c36fdd22e 100644 --- a/ops/src/account/cmd.rs +++ b/ops/src/account/cmd.rs @@ -1,4 +1,4 @@ -use super::{program, single}; +use super::{nft, program, single}; use anyhow::Result; use clap::{Args, Subcommand}; @@ -10,6 +10,9 @@ pub enum Commands { /// The 'single' command is used to backfill the index against a single account. #[clap(name = "single")] Single(single::Args), + /// The 'nft' command is used to backfill the index against an NFT mint, token metadata, and token account. + #[clap(name = "nft")] + Nft(nft::Args), } #[derive(Debug, Clone, Args)] @@ -26,6 +29,9 @@ pub async fn subcommand(subcommand: AccountCommand) -> Result<()> { Commands::Single(args) => { single::run(args).await?; } + Commands::Nft(args) => { + nft::run(args).await?; + } } Ok(()) diff --git a/ops/src/account/mod.rs b/ops/src/account/mod.rs index 6eeff799f..e770cd362 100644 --- a/ops/src/account/mod.rs +++ b/ops/src/account/mod.rs @@ -1,5 +1,6 @@ -mod account_details; +mod account_info; mod cmd; +mod nft; mod program; mod single; diff --git a/ops/src/account/nft.rs b/ops/src/account/nft.rs new file mode 100644 index 000000000..f92e005d0 --- /dev/null +++ b/ops/src/account/nft.rs @@ -0,0 +1,95 @@ +use std::sync::Arc; + +use anyhow::Result; +use tokio::task::JoinHandle; + +use super::account_info; +use log::error; + +use clap::Parser; +use das_core::{ + connect_db, create_download_metadata_notifier, MetadataJsonDownloadWorkerArgs, PoolArgs, Rpc, + SolanaRpcArgs, +}; +use mpl_token_metadata::accounts::Metadata; +use program_transformers::ProgramTransformer; +use solana_sdk::pubkey::Pubkey; + +#[derive(Debug, Parser, Clone)] +pub struct Args { + /// Database configuration + #[clap(flatten)] + pub database: PoolArgs, + + #[clap(flatten)] + pub metadata_json_download_worker: MetadataJsonDownloadWorkerArgs, + + /// Solana configuration + #[clap(flatten)] + pub solana: SolanaRpcArgs, + + /// NFT Mint address + #[clap(value_parser = parse_pubkey)] + pub mint: Pubkey, +} + +fn parse_pubkey(s: &str) -> Result { + Pubkey::try_from(s).map_err(|_| "Failed to parse public key") +} + +pub async fn run(config: Args) -> Result<()> { + let rpc = Rpc::from_config(&config.solana); + let pool = connect_db(&config.database).await?; + let metadata_json_download_db_pool = pool.clone(); + + let (metadata_json_download_worker, metadata_json_download_sender) = config + .metadata_json_download_worker + .start(metadata_json_download_db_pool)?; + + let download_metadata_notifier = + create_download_metadata_notifier(metadata_json_download_sender.clone()).await; + + let mint = config.mint; + + let metadata = Metadata::find_pda(&mint).0; + + let mut accounts_to_fetch = vec![mint, metadata]; + + let token_account = rpc.get_token_largest_account(mint).await; + + if let Ok(token_account) = token_account { + accounts_to_fetch.push(token_account); + } + + let program_transformer = Arc::new(ProgramTransformer::new(pool, download_metadata_notifier)); + let mut tasks = Vec::new(); + + for account in accounts_to_fetch { + let program_transformer = Arc::clone(&program_transformer); + let rpc = rpc.clone(); + + let task: JoinHandle> = tokio::spawn(async move { + let account_info = account_info::fetch(&rpc, account).await?; + if let Err(e) = program_transformer + .handle_account_update(&account_info) + .await + { + error!("Failed to handle account update: {:?}", e); + } + + Ok(()) + }); + + tasks.push(task); + } + + futures::future::try_join_all(tasks).await?; + + drop(metadata_json_download_sender); + + drop(program_transformer); + + metadata_json_download_worker.await?; + + Ok(()) +} diff --git a/ops/src/account/program.rs b/ops/src/account/program.rs index 7dd990582..e25431f52 100644 --- a/ops/src/account/program.rs +++ b/ops/src/account/program.rs @@ -1,24 +1,20 @@ +use super::account_info; use anyhow::Result; - -use super::account_details::AccountDetails; use clap::Parser; -use das_core::{MetricsArgs, QueueArgs, QueuePool, Rpc, SolanaRpcArgs}; -use flatbuffers::FlatBufferBuilder; -use plerkle_serialization::{ - serializer::serialize_account, solana_geyser_plugin_interface_shims::ReplicaAccountInfoV2, +use das_core::{ + connect_db, create_download_metadata_notifier, MetadataJsonDownloadWorkerArgs, PoolArgs, Rpc, + SolanaRpcArgs, }; +use futures::{stream::FuturesUnordered, StreamExt}; +use log::error; +use program_transformers::{AccountInfo, ProgramTransformer}; use solana_sdk::pubkey::Pubkey; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::task; #[derive(Debug, Parser, Clone)] pub struct Args { - /// Redis configuration - #[clap(flatten)] - pub queue: QueueArgs, - - /// Metrics configuration - #[clap(flatten)] - pub metrics: MetricsArgs, - /// Solana configuration #[clap(flatten)] pub solana: SolanaRpcArgs, @@ -30,6 +26,22 @@ pub struct Args { /// The public key of the program to backfill #[clap(value_parser = parse_pubkey)] pub program: Pubkey, + + /// The maximum buffer size for accounts + #[arg(long, env, default_value = "10000")] + pub max_buffer_size: usize, + + /// The number of worker threads + #[arg(long, env, default_value = "1000")] + pub account_worker_count: usize, + + /// Metadata JSON download worker configuration + #[clap(flatten)] + pub metadata_json_download_worker: MetadataJsonDownloadWorkerArgs, + + /// Database configuration + #[clap(flatten)] + pub database: PoolArgs, } fn parse_pubkey(s: &str) -> Result { @@ -37,45 +49,68 @@ fn parse_pubkey(s: &str) -> Result { } pub async fn run(config: Args) -> Result<()> { - let rpc = Rpc::from_config(config.solana); - let queue = QueuePool::try_from_config(config.queue).await?; + let rpc = Rpc::from_config(&config.solana); + let pool = connect_db(&config.database).await?; + let num_workers = config.account_worker_count; - let accounts = rpc.get_program_accounts(&config.program, None).await?; + let metadata_json_download_db_pool = pool.clone(); + + let (metadata_json_download_worker, metadata_json_download_sender) = config + .metadata_json_download_worker + .start(metadata_json_download_db_pool)?; + let (tx, mut rx) = mpsc::channel::>(config.max_buffer_size); + let download_metadata_notifier = + create_download_metadata_notifier(metadata_json_download_sender.clone()).await; + + let mut workers = FuturesUnordered::new(); + let program_transformer = Arc::new(ProgramTransformer::new(pool, download_metadata_notifier)); + + let account_info_worker_manager = tokio::spawn(async move { + while let Some(account_infos) = rx.recv().await { + if workers.len() >= num_workers { + workers.next().await; + } + + for account_info in account_infos { + let program_transformer = Arc::clone(&program_transformer); + + let worker = task::spawn(async move { + if let Err(e) = program_transformer + .handle_account_update(&account_info) + .await + { + error!("Failed to handle account update: {:?}", e); + } + }); + + workers.push(worker); + } + } + + while (workers.next().await).is_some() {} + }); + + let accounts = rpc.get_program_accounts(&config.program, None).await?; let accounts_chunks = accounts.chunks(config.batch_size); for batch in accounts_chunks { let results = futures::future::try_join_all( batch .iter() - .map(|(pubkey, _account)| AccountDetails::fetch(&rpc, pubkey)), + .cloned() + .map(|(pubkey, _account)| account_info::fetch(&rpc, pubkey)), ) .await?; - for account_detail in results { - let AccountDetails { - account, - slot, - pubkey, - } = account_detail; - let builder = FlatBufferBuilder::new(); - let account_info = ReplicaAccountInfoV2 { - pubkey: &pubkey.to_bytes(), - lamports: account.lamports, - owner: &account.owner.to_bytes(), - executable: account.executable, - rent_epoch: account.rent_epoch, - data: &account.data, - write_version: 0, - txn_signature: None, - }; - - let fbb = serialize_account(builder, &account_info, slot, false); - let bytes = fbb.finished_data(); - - queue.push_account_backfill(bytes).await?; - } + tx.send(results).await?; } + account_info_worker_manager.await?; + + drop(metadata_json_download_sender); + + metadata_json_download_worker.await?; + Ok(()) } diff --git a/ops/src/account/single.rs b/ops/src/account/single.rs index 41269bb05..81d6e0ad4 100644 --- a/ops/src/account/single.rs +++ b/ops/src/account/single.rs @@ -1,23 +1,22 @@ use anyhow::Result; -use super::account_details::AccountDetails; +use super::account_info; use clap::Parser; -use das_core::{MetricsArgs, QueueArgs, QueuePool, Rpc, SolanaRpcArgs}; -use flatbuffers::FlatBufferBuilder; -use plerkle_serialization::{ - serializer::serialize_account, solana_geyser_plugin_interface_shims::ReplicaAccountInfoV2, +use das_core::{ + connect_db, create_download_metadata_notifier, MetadataJsonDownloadWorkerArgs, PoolArgs, Rpc, + SolanaRpcArgs, }; +use program_transformers::ProgramTransformer; use solana_sdk::pubkey::Pubkey; #[derive(Debug, Parser, Clone)] pub struct Args { - /// Redis configuration + /// Database configuration #[clap(flatten)] - pub queue: QueueArgs, + pub database: PoolArgs, - /// Metrics configuration #[clap(flatten)] - pub metrics: MetricsArgs, + pub metadata_json_download_worker: MetadataJsonDownloadWorkerArgs, /// Solana configuration #[clap(flatten)] @@ -32,30 +31,28 @@ fn parse_pubkey(s: &str) -> Result { } pub async fn run(config: Args) -> Result<()> { - let rpc = Rpc::from_config(config.solana); - let queue = QueuePool::try_from_config(config.queue).await?; - - let AccountDetails { - account, - slot, - pubkey, - } = AccountDetails::fetch(&rpc, &config.account).await?; - let builder = FlatBufferBuilder::new(); - let account_info = ReplicaAccountInfoV2 { - pubkey: &pubkey.to_bytes(), - lamports: account.lamports, - owner: &account.owner.to_bytes(), - executable: account.executable, - rent_epoch: account.rent_epoch, - data: &account.data, - write_version: 0, - txn_signature: None, - }; - - let fbb = serialize_account(builder, &account_info, slot, false); - let bytes = fbb.finished_data(); - - queue.push_account_backfill(bytes).await?; + let rpc = Rpc::from_config(&config.solana); + let pool = connect_db(&config.database).await?; + let metadata_json_download_db_pool = pool.clone(); + + let (metadata_json_download_worker, metadata_json_download_sender) = config + .metadata_json_download_worker + .start(metadata_json_download_db_pool)?; + + { + let download_metadata_notifier = + create_download_metadata_notifier(metadata_json_download_sender).await; + + let program_transformer = ProgramTransformer::new(pool, download_metadata_notifier); + + let account_info = account_info::fetch(&rpc, config.account).await?; + + program_transformer + .handle_account_update(&account_info) + .await?; + } + + metadata_json_download_worker.await?; Ok(()) } diff --git a/ops/src/bubblegum/README.md b/ops/src/bubblegum/README.md index d1dc5772b..20f4bf235 100644 --- a/ops/src/bubblegum/README.md +++ b/ops/src/bubblegum/README.md @@ -8,79 +8,6 @@ Command line arguments can also be set through environment variables. ### Backfill -The `backfill` command initiates the crawling and backfilling process. It requires the Solana RPC URL, the database URL, and the messenger Redis URL. +The `backfill` command initiates the crawling and backfilling process. It requires the Solana RPC URL, the database URL. -**warning**: The command expects full archive access to transactions. Before proceeding ensure your RPC is able to serve complete transaction history for Solana. - -```mermaid -flowchart - start((Start)) -->init[Initialize RPC, DB] - init --> fetchTreesDB[Fetch Trees from DB] - fetchTreesDB --> findGapsDB[Find Gaps in DB] - findGapsDB --> enqueueGapFills[Enqueue Gap Fills] - enqueueGapFills --> gapWorkerManager[Gap Worker Manager] - gapWorkerManager --> crawlSignatures[Crawl Solana RPC] - crawlSignatures --> enqueueSignatures[Enqueue Signatures] - enqueueSignatures --> transactionWorkerManager[Transaction Worker Manager] - transactionWorkerManager --> fetchTransactionsRPC[Fetch Transactions RPC] - fetchTransactionsRPC --> processTransactions[Push Transaction to Messenger] - processTransactions ---> Finished -``` - -``` -Usage: das-ops bubblegum backfill [OPTIONS] --database-url --messenger-redis-url --solana-rpc-url - -Options: - --tree-crawler-count - Number of tree crawler workers [env: TREE_CRAWLER_COUNT=] [default: 20] - --signature-channel-size - The size of the signature channel [env: SIGNATURE_CHANNEL_SIZE=] [default: 10000] - --gap-channel-size - The size of the signature channel [env: GAP_CHANNEL_SIZE=] [default: 1000] - --transaction-worker-count - The number of transaction workers [env: TRANSACTION_WORKER_COUNT=] [default: 100] - --gap-worker-count - The number of gap workers [env: GAP_WORKER_COUNT=] [default: 25] - --only-trees - The list of trees to crawl. If not specified, all trees will be crawled [env: ONLY_TREES=] - --database-url - The database URL [env: DATABASE_URL=] - --database-max-connections - The maximum number of connections to the database [env: DATABASE_MAX_CONNECTIONS=] [default: 125] - --database-min-connections - The minimum number of connections to the database [env: DATABASE_MIN_CONNECTIONS=] [default: 5] - --messenger-redis-url - [env: MESSENGER_REDIS_URL=] - --messenger-redis-batch-size - [env: MESSENGER_REDIS_BATCH_SIZE=] [default: 100] - --messenger-queue-connections - [env: MESSENGER_QUEUE_CONNECTIONS=] [default: 25] - --messenger-queue-stream - [env: MESSENGER_QUEUE_STREAM=] [default: TXNFILL] - --metrics-host - [env: METRICS_HOST=] [default: 127.0.0.1] - --metrics-port - [env: METRICS_PORT=] [default: 8125] - --metrics-prefix - [env: METRICS_PREFIX=] [default: das.backfiller] - --solana-rpc-url - [env: SOLANA_RPC_URL=] - -h, --help - Print help -``` - -### Metrics - -The bubblegum command provides several metrics for monitoring performance and status: - -Metric | Description ---- | --- -transaction.failed | Count of failed transaction -transaction.succeeded | Count of successfully queued transaction -transaction.queued | Time for a transaction to be queued -gap.failed | Count of failed gap crawling -gap.succeeded | Count of successfully crawled gaps -gap.queued | Time for a gap to be queued -tree.succeeded | Count of completed tree crawl -tree.crawled | Time to crawl a tree -job.completed | Time to complete the job +**warning**: The command expects full archive access to transactions. Before proceeding ensure your RPC is able to serve complete transaction history for Solana. \ No newline at end of file diff --git a/ops/src/bubblegum/audit.rs b/ops/src/bubblegum/audit.rs index 4da0a2652..29b2406c7 100644 --- a/ops/src/bubblegum/audit.rs +++ b/ops/src/bubblegum/audit.rs @@ -1,10 +1,15 @@ use anyhow::Result; + +use borsh::BorshSerialize; use clap::Parser; use das_core::{connect_db, MetricsArgs, PoolArgs, Rpc, SolanaRpcArgs}; -use digital_asset_types::dao::cl_audits_v2; use futures::future; -use sea_orm::{CursorTrait, EntityTrait, SqlxPostgresConnector}; -use solana_sdk::signature::Signature; +use log::debug; +use std::{path::PathBuf, str::FromStr}; + +use digital_asset_types::dao::{cl_audits_v2, sea_orm_active_enums::Instruction}; +use sea_orm::{ColumnTrait, CursorTrait, EntityTrait, QueryFilter, SqlxPostgresConnector}; +use solana_sdk::{pubkey::Pubkey, signature::Signature}; use solana_transaction_status::EncodedConfirmedTransactionWithStatusMeta; use tokio::io::{stdout, AsyncWriteExt}; @@ -25,6 +30,15 @@ pub struct Args { #[arg(long, env, default_value = "10000")] pub batch_size: u64, + + #[arg(long, env)] + pub only_trees: Option>, + + #[arg(long, env, default_value = "false")] + pub fix: bool, + + #[arg(long, env)] + pub log_path: Option, } pub async fn run(config: Args) -> Result<()> { @@ -34,10 +48,41 @@ pub async fn run(config: Args) -> Result<()> { let mut output = stdout(); let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(pool); - let mut after = None; + let mut after: Option = None; + + if let Some(log_path) = config.log_path { + after = match std::fs::read_to_string(log_path) { + Ok(content) => content + .lines() + .last() + .and_then(|last_entry| last_entry.parse().ok()), + Err(_) => None, + }; + } loop { - let mut query = cl_audits_v2::Entity::find().cursor_by(cl_audits_v2::Column::Id); + let mut query = cl_audits_v2::Entity::find(); + + if let Some(only_trees) = &config.only_trees { + let pubkeys = only_trees + .iter() + .map(|address| { + Pubkey::from_str(address) + .map_err(|e| anyhow::anyhow!(e.to_string()))? + .try_to_vec() + .map_err(|e| anyhow::anyhow!(e.to_string())) + }) + .collect::>, anyhow::Error>>()?; + + let pubkeys = pubkeys + .into_iter() + .map(|pubkey| pubkey.try_to_vec()) + .collect::, std::io::Error>>()?; + + query = query.filter(cl_audits_v2::Column::Tree.is_in(pubkeys)); + } + let mut query = query.cursor_by(cl_audits_v2::Column::Id); + let mut query = query.first(config.batch_size); if let Some(after) = after { @@ -54,11 +99,24 @@ pub async fn run(config: Args) -> Result<()> { let transactions = future::join_all(transactions).await; - for (signature, transaction) in transactions.into_iter().flatten() { - if let Some(meta) = transaction.transaction.meta { + for response in transactions.into_iter().flatten() { + if let Some(meta) = response.transaction.transaction.meta { if meta.err.is_some() { + if config.fix { + match response.entry.instruction { + Instruction::Transfer => { + let model: cl_audits_v2::ActiveModel = + response.entry.clone().into(); + + cl_audits_v2::Entity::delete(model).exec(&conn).await?; + } + _ => { + debug!("Unhandled instruction: {:?}", response.entry.instruction); + } + } + } output - .write_all(format!("{}\n", signature).as_bytes()) + .write_all(format!("{}\n", response.entry.id).as_bytes()) .await?; output.flush().await?; @@ -76,13 +134,27 @@ pub async fn run(config: Args) -> Result<()> { Ok(()) } +struct FetchTransactionResponse { + pub entry: cl_audits_v2::Model, + pub transaction: EncodedConfirmedTransactionWithStatusMeta, +} + +impl FetchTransactionResponse { + fn new( + entry: cl_audits_v2::Model, + transaction: EncodedConfirmedTransactionWithStatusMeta, + ) -> Self { + Self { entry, transaction } + } +} + async fn fetch_transaction( entry: cl_audits_v2::Model, solana_rpc: Rpc, -) -> Result<(Signature, EncodedConfirmedTransactionWithStatusMeta)> { +) -> Result { let signature = Signature::try_from(entry.tx.as_ref())?; let transaction = solana_rpc.get_transaction(&signature).await?; - Ok((signature, transaction)) + Ok(FetchTransactionResponse::new(entry, transaction)) } diff --git a/ops/src/bubblegum/backfiller.rs b/ops/src/bubblegum/backfiller.rs index 8b0e5ed4f..c13315aaa 100644 --- a/ops/src/bubblegum/backfiller.rs +++ b/ops/src/bubblegum/backfiller.rs @@ -1,325 +1,53 @@ -use super::tree::{TreeErrorKind, TreeGapFill, TreeGapModel, TreeResponse}; use anyhow::Result; -use cadence_macros::{statsd_count, statsd_time}; use clap::Parser; -use das_core::{ - connect_db, setup_metrics, MetricsArgs, PoolArgs, QueueArgs, QueuePool, Rpc, SolanaRpcArgs, -}; -use digital_asset_types::dao::cl_audits_v2; -use flatbuffers::FlatBufferBuilder; -use futures::{stream::FuturesUnordered, StreamExt}; -use indicatif::HumanDuration; -use log::{error, info}; -use plerkle_serialization::serializer::seralize_encoded_transaction_with_status; -use sea_orm::{ - ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, QueryOrder, SqlxPostgresConnector, -}; -use solana_sdk::signature::Signature; -use std::time::Instant; -use tokio::{sync::mpsc, task::JoinHandle}; +use das_bubblegum::{start_backfill, BackfillArgs, BubblegumContext}; +use das_core::{connect_db, PoolArgs, Rpc, SolanaRpcArgs}; #[derive(Debug, Parser, Clone)] pub struct Args { - /// Number of tree crawler workers - #[arg(long, env, default_value = "20")] - pub tree_crawler_count: usize, - - /// The size of the signature channel. - #[arg(long, env, default_value = "10000")] - pub signature_channel_size: usize, - - /// The size of the signature channel. - #[arg(long, env, default_value = "1000")] - pub gap_channel_size: usize, - - /// The number of transaction workers. - #[arg(long, env, default_value = "100")] - pub transaction_worker_count: usize, - - /// The number of gap workers. - #[arg(long, env, default_value = "25")] - pub gap_worker_count: usize, - - /// The list of trees to crawl. If not specified, all trees will be crawled. - #[arg(long, env, use_value_delimiter = true)] - pub only_trees: Option>, + /// Backfill Bubblegum Args + #[clap(flatten)] + pub backfill_bubblegum: BackfillArgs, /// Database configuration #[clap(flatten)] pub database: PoolArgs, - /// Redis configuration - #[clap(flatten)] - pub queue: QueueArgs, - - /// Metrics configuration - #[clap(flatten)] - pub metrics: MetricsArgs, - /// Solana configuration #[clap(flatten)] pub solana: SolanaRpcArgs, } -/// Runs the backfilling process for the tree crawler. +/// Executes the backfilling operation for the tree crawler. /// -/// This function initializes the necessary components for the backfilling process, -/// including database connections, RPC clients, and worker managers for handling -/// transactions and gaps. It then proceeds to fetch the trees that need to be crawled -/// and manages the crawling process across multiple workers. +/// This function initializes the necessary components for the backfilling operation, +/// such as database connections and RPC clients, and then delegates the actual +/// backfilling logic to the `das_bubblegum_backfill` crate. /// -/// The function handles the following major tasks: -/// - Establishing connections to the database and initializing RPC clients. -/// - Setting up channels for communication between different parts of the system. -/// - Spawning worker managers for processing transactions and gaps. -/// - Fetching trees from the database and managing their crawling process. -/// - Reporting metrics and logging information throughout the process. +/// The function undertakes the following key tasks: +/// - Establishes database connections and initializes RPC clients. +/// - Creates a context for the backfilling operation. +/// - Invokes the `start_bubblegum_backfill` function from the `das_bubblegum_backfill` crate. /// /// # Arguments /// -/// * `config` - A configuration object containing settings for the backfilling process, -/// including database, RPC, and worker configurations. +/// * `config` - A configuration object that includes settings for the backfilling operation, +/// such as database, RPC, and worker configurations. /// /// # Returns /// -/// This function returns a `Result` which is `Ok` if the backfilling process completes -/// successfully, or an `Err` with an appropriate error message if any part of the process -/// fails. +/// This function returns a `Result` which is `Ok` if the backfilling operation is completed +/// successfully, or an `Err` with a relevant error message if any part of the operation +/// encounters issues. /// /// # Errors /// -/// This function can return errors related to database connectivity, RPC failures, -/// or issues with spawning and managing worker tasks. +/// Potential errors can arise from database connectivity issues or RPC failures. pub async fn run(config: Args) -> Result<()> { - let pool = connect_db(config.database).await?; - - let solana_rpc = Rpc::from_config(config.solana); - let transaction_solana_rpc = solana_rpc.clone(); - let gap_solana_rpc = solana_rpc.clone(); - - setup_metrics(config.metrics)?; - - let (sig_sender, mut sig_receiver) = mpsc::channel::(config.signature_channel_size); - let gap_sig_sender = sig_sender.clone(); - let (gap_sender, mut gap_receiver) = mpsc::channel::(config.gap_channel_size); - - let queue = QueuePool::try_from_config(config.queue).await?; - - let transaction_worker_count = config.transaction_worker_count; - - let transaction_worker_manager = tokio::spawn(async move { - let mut handlers = FuturesUnordered::new(); - - while let Some(signature) = sig_receiver.recv().await { - if handlers.len() >= transaction_worker_count { - handlers.next().await; - } - - let solana_rpc = transaction_solana_rpc.clone(); - let queue = queue.clone(); - - let handle = spawn_transaction_worker(solana_rpc, queue, signature); - - handlers.push(handle); - } - - futures::future::join_all(handlers).await; - }); - - let gap_worker_count = config.gap_worker_count; - - let gap_worker_manager = tokio::spawn(async move { - let mut handlers = FuturesUnordered::new(); - - while let Some(gap) = gap_receiver.recv().await { - if handlers.len() >= gap_worker_count { - handlers.next().await; - } - - let client = gap_solana_rpc.clone(); - let sender = gap_sig_sender.clone(); - - let handle = spawn_crawl_worker(client, sender, gap); - - handlers.push(handle); - } - - futures::future::join_all(handlers).await; - }); - - let started = Instant::now(); - - let trees = if let Some(only_trees) = config.only_trees { - TreeResponse::find(&solana_rpc, only_trees).await? - } else { - TreeResponse::all(&solana_rpc).await? - }; - - let tree_count = trees.len(); - - info!( - "fetched {} trees in {}", - tree_count, - HumanDuration(started.elapsed()) - ); - - let tree_crawler_count = config.tree_crawler_count; - let mut crawl_handles = FuturesUnordered::new(); - - for tree in trees { - if crawl_handles.len() >= tree_crawler_count { - crawl_handles.next().await; - } - - let sender = gap_sender.clone(); - let pool = pool.clone(); - let conn = SqlxPostgresConnector::from_sqlx_postgres_pool(pool); - - let handle = spawn_gap_worker(conn, sender, tree); - - crawl_handles.push(handle); - } - - futures::future::try_join_all(crawl_handles).await?; - drop(gap_sender); - info!("crawled all trees"); - - gap_worker_manager.await?; - drop(sig_sender); - info!("all gaps processed"); - - transaction_worker_manager.await?; - info!("all transactions queued"); - - statsd_time!("job.completed", started.elapsed()); - - info!( - "crawled {} trees in {}", - tree_count, - HumanDuration(started.elapsed()) - ); - - Ok(()) -} - -fn spawn_gap_worker( - conn: DatabaseConnection, - sender: mpsc::Sender, - tree: TreeResponse, -) -> JoinHandle> { - tokio::spawn(async move { - let timing = Instant::now(); - - let mut gaps = TreeGapModel::find(&conn, tree.pubkey) - .await? - .into_iter() - .map(TryInto::try_into) - .collect::, _>>()?; - - let upper_known_seq = cl_audits_v2::Entity::find() - .filter(cl_audits_v2::Column::Tree.eq(tree.pubkey.as_ref().to_vec())) - .order_by_desc(cl_audits_v2::Column::Seq) - .one(&conn) - .await?; - - let lower_known_seq = cl_audits_v2::Entity::find() - .filter(cl_audits_v2::Column::Tree.eq(tree.pubkey.as_ref().to_vec())) - .order_by_asc(cl_audits_v2::Column::Seq) - .one(&conn) - .await?; - - if let Some(upper_seq) = upper_known_seq { - let signature = Signature::try_from(upper_seq.tx.as_ref())?; - info!( - "tree {} has known highest seq {} filling tree from {}", - tree.pubkey, upper_seq.seq, signature - ); - gaps.push(TreeGapFill::new(tree.pubkey, None, Some(signature))); - } else if tree.seq > 0 { - info!( - "tree {} has no known highest seq but the actual seq is {} filling whole tree", - tree.pubkey, tree.seq - ); - gaps.push(TreeGapFill::new(tree.pubkey, None, None)); - } - - if let Some(lower_seq) = lower_known_seq.filter(|seq| seq.seq > 1) { - let signature = Signature::try_from(lower_seq.tx.as_ref())?; - - info!( - "tree {} has known lowest seq {} filling tree starting at {}", - tree.pubkey, lower_seq.seq, signature - ); - - gaps.push(TreeGapFill::new(tree.pubkey, Some(signature), None)); - } - - let gap_count = gaps.len(); - - for gap in gaps { - if let Err(e) = sender.send(gap).await { - statsd_count!("gap.failed", 1); - error!("send gap: {:?}", e); - } - } - - info!("crawling tree {} with {} gaps", tree.pubkey, gap_count); - - statsd_count!("tree.succeeded", 1); - statsd_time!("tree.crawled", timing.elapsed()); - - Ok::<(), anyhow::Error>(()) - }) -} - -fn spawn_crawl_worker( - client: Rpc, - sender: mpsc::Sender, - gap: TreeGapFill, -) -> JoinHandle<()> { - tokio::spawn(async move { - let timing = Instant::now(); - - if let Err(e) = gap.crawl(client, sender).await { - error!("tree transaction: {:?}", e); - - statsd_count!("gap.failed", 1); - } else { - statsd_count!("gap.succeeded", 1); - } - - statsd_time!("gap.queued", timing.elapsed()); - }) -} - -async fn queue_transaction<'a>( - client: Rpc, - queue: QueuePool, - signature: Signature, -) -> Result<(), TreeErrorKind> { - let transaction = client.get_transaction(&signature).await?; - - let message = seralize_encoded_transaction_with_status(FlatBufferBuilder::new(), transaction)?; - - queue - .push_transaction_backfill(message.finished_data()) - .await?; - - Ok(()) -} - -fn spawn_transaction_worker(client: Rpc, queue: QueuePool, signature: Signature) -> JoinHandle<()> { - tokio::spawn(async move { - let timing = Instant::now(); - - if let Err(e) = queue_transaction(client, queue, signature).await { - error!("queue transaction: {:?}", e); + let database_pool = connect_db(&config.database).await?; - statsd_count!("transaction.failed", 1); - } else { - statsd_count!("transaction.succeeded", 1); - } + let solana_rpc = Rpc::from_config(&config.solana); + let context = BubblegumContext::new(database_pool, solana_rpc); - statsd_time!("transaction.queued", timing.elapsed()); - }) + start_backfill(context, config.backfill_bubblegum).await } diff --git a/ops/src/bubblegum/cmd.rs b/ops/src/bubblegum/cmd.rs index bb2244167..dd19c3246 100644 --- a/ops/src/bubblegum/cmd.rs +++ b/ops/src/bubblegum/cmd.rs @@ -1,4 +1,4 @@ -use super::{audit, backfiller}; +use super::{backfiller, replay, verify}; use anyhow::Result; use clap::{Args, Subcommand}; @@ -8,9 +8,10 @@ pub enum Commands { /// It crawls through trees and backfills any missed tree transactions. #[clap(name = "backfill")] Backfill(backfiller::Args), - /// The `audit` commands checks `cl_audits_v2` for any failed transactions and logs them to stdout. - #[clap(name = "audit")] - Audit(audit::Args), + #[clap(name = "replay")] + Replay(replay::Args), + /// The 'verify' command is used to verify the integrity of the bubblegum index. + Verify(verify::Args), } #[derive(Debug, Clone, Args)] @@ -24,8 +25,11 @@ pub async fn subcommand(subcommand: BubblegumCommand) -> Result<()> { Commands::Backfill(args) => { backfiller::run(args).await?; } - Commands::Audit(args) => { - audit::run(args).await?; + Commands::Replay(args) => { + replay::run(args).await?; + } + Commands::Verify(args) => { + verify::run(args).await?; } } diff --git a/ops/src/bubblegum/mod.rs b/ops/src/bubblegum/mod.rs index eb4c867ad..0f683a332 100644 --- a/ops/src/bubblegum/mod.rs +++ b/ops/src/bubblegum/mod.rs @@ -1,6 +1,6 @@ -mod audit; mod backfiller; mod cmd; -mod tree; +mod replay; +mod verify; pub use cmd::*; diff --git a/ops/src/bubblegum/replay.rs b/ops/src/bubblegum/replay.rs new file mode 100644 index 000000000..c777b913c --- /dev/null +++ b/ops/src/bubblegum/replay.rs @@ -0,0 +1,27 @@ +use anyhow::Result; +use clap::Parser; +use das_bubblegum::{start_bubblegum_replay, BubblegumContext, BubblegumReplayArgs}; +use das_core::{connect_db, PoolArgs, Rpc, SolanaRpcArgs}; + +#[derive(Debug, Parser, Clone)] +pub struct Args { + /// Database configuration + #[clap(flatten)] + pub database: PoolArgs, + + /// Solana configuration + #[clap(flatten)] + pub solana: SolanaRpcArgs, + + #[clap(flatten)] + pub replay_bubblegum: BubblegumReplayArgs, +} + +pub async fn run(config: Args) -> Result<()> { + let database_pool = connect_db(&config.database).await?; + + let solana_rpc = Rpc::from_config(&config.solana); + let context = BubblegumContext::new(database_pool, solana_rpc); + + start_bubblegum_replay(context, config.replay_bubblegum).await +} diff --git a/ops/src/bubblegum/tree.rs b/ops/src/bubblegum/tree.rs deleted file mode 100644 index 09a3c92cb..000000000 --- a/ops/src/bubblegum/tree.rs +++ /dev/null @@ -1,278 +0,0 @@ -use anyhow::Result; -use borsh::BorshDeserialize; -use clap::Args; -use das_core::{QueuePoolError, Rpc}; -use log::error; -use sea_orm::{DatabaseConnection, DbBackend, FromQueryResult, Statement, Value}; -use solana_client::rpc_filter::{Memcmp, RpcFilterType}; -use solana_client::rpc_response::RpcConfirmedTransactionStatusWithSignature; -use solana_sdk::{account::Account, pubkey::Pubkey, signature::Signature}; -use spl_account_compression::id; -use spl_account_compression::state::{ - merkle_tree_get_size, ConcurrentMerkleTreeHeader, CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1, -}; -use std::str::FromStr; -use thiserror::Error as ThisError; -use tokio::sync::mpsc::Sender; - -const GET_SIGNATURES_FOR_ADDRESS_LIMIT: usize = 1000; - -#[derive(Debug, Clone, Args)] -pub struct ConfigBackfiller { - /// Solana RPC URL - #[arg(long, env)] - pub solana_rpc_url: String, -} - -#[derive(ThisError, Debug)] -pub enum TreeErrorKind { - #[error("solana rpc")] - Rpc(#[from] solana_client::client_error::ClientError), - #[error("anchor")] - Achor(#[from] anchor_client::anchor_lang::error::Error), - #[error("perkle serialize")] - PerkleSerialize(#[from] plerkle_serialization::error::PlerkleSerializationError), - #[error("perkle messenger")] - PlerkleMessenger(#[from] plerkle_messenger::MessengerError), - #[error("queue pool")] - QueuePool(#[from] QueuePoolError), - #[error("parse pubkey")] - ParsePubkey(#[from] solana_sdk::pubkey::ParsePubkeyError), - #[error("serialize tree response")] - SerializeTreeResponse, - #[error("sea orm")] - Database(#[from] sea_orm::DbErr), - #[error("try from pubkey")] - TryFromPubkey, - #[error("try from signature")] - TryFromSignature, -} - -const TREE_GAP_SQL: &str = r#" -WITH sequenced_data AS ( - SELECT - tree, - seq, - LEAD(seq) OVER (ORDER BY seq ASC) AS next_seq, - tx AS current_tx, - LEAD(tx) OVER (ORDER BY seq ASC) AS next_tx - FROM - cl_audits_v2 - WHERE - tree = $1 -), -gaps AS ( - SELECT - tree, - seq AS gap_start_seq, - next_seq AS gap_end_seq, - current_tx AS lower_bound_tx, - next_tx AS upper_bound_tx - FROM - sequenced_data - WHERE - next_seq IS NOT NULL AND - next_seq - seq > 1 -) -SELECT - tree, - gap_start_seq, - gap_end_seq, - lower_bound_tx, - upper_bound_tx -FROM - gaps -ORDER BY - gap_start_seq; -"#; - -#[derive(Debug, FromQueryResult, PartialEq, Clone)] -pub struct TreeGapModel { - pub tree: Vec, - pub gap_start_seq: i64, - pub gap_end_seq: i64, - pub lower_bound_tx: Vec, - pub upper_bound_tx: Vec, -} - -impl TreeGapModel { - pub async fn find(conn: &DatabaseConnection, tree: Pubkey) -> Result, TreeErrorKind> { - let statement = Statement::from_sql_and_values( - DbBackend::Postgres, - TREE_GAP_SQL, - vec![Value::Bytes(Some(Box::new(tree.as_ref().to_vec())))], - ); - - TreeGapModel::find_by_statement(statement) - .all(conn) - .await - .map_err(Into::into) - } -} - -impl TryFrom for TreeGapFill { - type Error = TreeErrorKind; - - fn try_from(model: TreeGapModel) -> Result { - let tree = Pubkey::try_from(model.tree).map_err(|_| TreeErrorKind::TryFromPubkey)?; - let upper = Signature::try_from(model.upper_bound_tx) - .map_err(|_| TreeErrorKind::TryFromSignature)?; - let lower = Signature::try_from(model.lower_bound_tx) - .map_err(|_| TreeErrorKind::TryFromSignature)?; - - Ok(Self::new(tree, Some(upper), Some(lower))) - } -} - -pub struct TreeGapFill { - tree: Pubkey, - before: Option, - until: Option, -} - -impl TreeGapFill { - pub fn new(tree: Pubkey, before: Option, until: Option) -> Self { - Self { - tree, - before, - until, - } - } - - pub async fn crawl(&self, client: Rpc, sender: Sender) -> Result<()> { - let mut before = self.before; - - loop { - let sigs = client - .get_signatures_for_address(&self.tree, before, self.until) - .await?; - let sig_count = sigs.len(); - - let successful_transactions = sigs - .into_iter() - .filter(|transaction| transaction.err.is_none()) - .collect::>(); - - for sig in successful_transactions.iter() { - let sig = Signature::from_str(&sig.signature)?; - - sender.send(sig).await?; - - before = Some(sig); - } - - if sig_count < GET_SIGNATURES_FOR_ADDRESS_LIMIT { - break; - } - } - - Ok(()) - } -} - -#[derive(Debug, Clone)] -pub struct TreeHeaderResponse { - pub max_depth: u32, - pub max_buffer_size: u32, - pub creation_slot: u64, - pub size: usize, -} - -impl TryFrom for TreeHeaderResponse { - type Error = TreeErrorKind; - - fn try_from(payload: ConcurrentMerkleTreeHeader) -> Result { - let size = merkle_tree_get_size(&payload)?; - - Ok(Self { - max_depth: payload.get_max_depth(), - max_buffer_size: payload.get_max_buffer_size(), - creation_slot: payload.get_creation_slot(), - size, - }) - } -} - -#[derive(Debug, Clone)] -pub struct TreeResponse { - pub pubkey: Pubkey, - pub tree_header: TreeHeaderResponse, - pub seq: u64, -} - -impl TreeResponse { - pub fn try_from_rpc(pubkey: Pubkey, account: Account) -> Result { - let bytes = account.data.as_slice(); - - let (header_bytes, rest) = bytes.split_at(CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1); - let header: ConcurrentMerkleTreeHeader = - ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?; - - let merkle_tree_size = merkle_tree_get_size(&header)?; - let (tree_bytes, _canopy_bytes) = rest.split_at(merkle_tree_size); - - let seq_bytes = tree_bytes[0..8].try_into()?; - let seq = u64::from_le_bytes(seq_bytes); - - let (auth, _) = Pubkey::find_program_address(&[pubkey.as_ref()], &mpl_bubblegum::ID); - - header.assert_valid_authority(&auth)?; - - let tree_header = header.try_into()?; - - Ok(Self { - pubkey, - tree_header, - seq, - }) - } - - pub async fn all(client: &Rpc) -> Result, TreeErrorKind> { - Ok(client - .get_program_accounts( - &id(), - Some(vec![RpcFilterType::Memcmp(Memcmp::new_raw_bytes( - 0, - vec![1u8], - ))]), - ) - .await? - .into_iter() - .filter_map(|(pubkey, account)| Self::try_from_rpc(pubkey, account).ok()) - .collect()) - } - - pub async fn find(client: &Rpc, pubkeys: Vec) -> Result, TreeErrorKind> { - let pubkeys: Vec = pubkeys - .into_iter() - .map(|p| Pubkey::from_str(&p)) - .collect::, _>>()?; - let pubkey_batches = pubkeys.chunks(100); - let pubkey_batches_count = pubkey_batches.len(); - - let mut gma_handles = Vec::with_capacity(pubkey_batches_count); - - for batch in pubkey_batches { - gma_handles.push(async move { - let accounts = client.get_multiple_accounts(batch).await?; - - let results: Vec<(&Pubkey, Option)> = batch.iter().zip(accounts).collect(); - - Ok::<_, TreeErrorKind>(results) - }) - } - - let result = futures::future::try_join_all(gma_handles).await?; - - let trees = result - .into_iter() - .flatten() - .filter_map(|(pubkey, account)| { - account.map(|account| Self::try_from_rpc(*pubkey, account)) - }) - .collect::, _>>() - .map_err(|_| TreeErrorKind::SerializeTreeResponse)?; - - Ok(trees) - } -} diff --git a/ops/src/bubblegum/verify.rs b/ops/src/bubblegum/verify.rs new file mode 100644 index 000000000..46cb97cfc --- /dev/null +++ b/ops/src/bubblegum/verify.rs @@ -0,0 +1,42 @@ +use anyhow::Result; +use clap::Parser; +use das_bubblegum::{verify_bubblegum, BubblegumContext, VerifyArgs}; +use das_core::{connect_db, PoolArgs, Rpc, SolanaRpcArgs}; +use tracing::info; + +#[derive(Debug, Parser, Clone)] +pub struct Args { + /// Verify Bubblegum Args + #[clap(flatten)] + pub verify_bubblegum: VerifyArgs, + + /// Database configuration + #[clap(flatten)] + pub database: PoolArgs, + + /// Solana configuration + #[clap(flatten)] + pub solana: SolanaRpcArgs, +} + +pub async fn run(config: Args) -> Result<()> { + let database_pool = connect_db(&config.database).await?; + + let solana_rpc = Rpc::from_config(&config.solana); + let context = BubblegumContext::new(database_pool, solana_rpc); + + let mut reports = verify_bubblegum(context, config.verify_bubblegum).await?; + + while let Some(report) = reports.recv().await { + info!( + "Tree: {}, Total Leaves: {}, Incorrect Proofs: {}, Not Found Proofs: {}, Correct Proofs: {}", + report.tree_pubkey, + report.total_leaves, + report.incorrect_proofs, + report.not_found_proofs, + report.correct_proofs + ); + } + + Ok(()) +} diff --git a/program_transformers/Cargo.toml b/program_transformers/Cargo.toml index 35bab7a19..4262824a5 100644 --- a/program_transformers/Cargo.toml +++ b/program_transformers/Cargo.toml @@ -10,12 +10,17 @@ blockbuster = { workspace = true } bs58 = { workspace = true } cadence = { workspace = true } cadence-macros = { workspace = true } -digital_asset_types = { workspace = true, features = ["json_types", "sql_types"] } +das-core = { workspace = true } +digital_asset_types = { workspace = true, features = [ + "json_types", + "sql_types", +] } futures = { workspace = true } heck = { workspace = true } mpl-bubblegum = { workspace = true } num-traits = { workspace = true } sea-orm = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } solana-sdk = { workspace = true } solana-transaction-status = { workspace = true } @@ -25,6 +30,7 @@ sqlx = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["time"] } tracing = { workspace = true } +spl-token-2022 = {workspace = true, features = ["no-entrypoint"]} [lints] workspace = true diff --git a/program_transformers/src/asset_upserts.rs b/program_transformers/src/asset_upserts.rs index 5561ff5fd..9ba7a80b8 100644 --- a/program_transformers/src/asset_upserts.rs +++ b/program_transformers/src/asset_upserts.rs @@ -6,8 +6,8 @@ use { }, }, sea_orm::{ - sea_query::OnConflict, ConnectionTrait, DbBackend, DbErr, EntityTrait, QueryTrait, Set, - TransactionTrait, + sea_query::{Alias, Condition, Expr, OnConflict}, + ConnectionTrait, DbErr, EntityTrait, Set, TransactionTrait, }, serde_json::value::Value, sqlx::types::Decimal, @@ -33,7 +33,8 @@ pub async fn upsert_assets_token_account_columns= asset.slot_updated_token_account OR asset.slot_updated_token_account IS NULL", - query.sql); - txn_or_conn.execute(query).await?; Ok(()) } pub struct AssetMintAccountColumns { pub mint: Vec, pub supply: Decimal, - pub supply_mint: Option>, - pub slot_updated_mint_account: u64, + pub slot_updated_mint_account: i64, + pub extensions: Option, } pub async fn upsert_assets_mint_account_columns( @@ -65,35 +98,82 @@ pub async fn upsert_assets_mint_account_columns Result<(), DbErr> { let active_model = asset::ActiveModel { - id: Set(columns.mint), + id: Set(columns.mint.clone()), supply: Set(columns.supply), - supply_mint: Set(columns.supply_mint), - slot_updated_mint_account: Set(Some(columns.slot_updated_mint_account as i64)), + supply_mint: Set(Some(columns.mint.clone())), + slot_updated_mint_account: Set(Some(columns.slot_updated_mint_account)), + slot_updated: Set(Some(columns.slot_updated_mint_account)), + mint_extensions: Set(columns.extensions), + asset_data: Set(Some(columns.mint.clone())), + // assume every token is a fungible token when mint account is created + specification_asset_class: Set(Some(SpecificationAssetClass::FungibleToken)), + // // assume multiple ownership as we set asset class to fungible token + owner_type: Set(OwnerType::Token), ..Default::default() }; - let mut query = asset::Entity::insert(active_model) + asset::Entity::insert(active_model) .on_conflict( OnConflict::columns([asset::Column::Id]) .update_columns([ asset::Column::Supply, asset::Column::SupplyMint, asset::Column::SlotUpdatedMintAccount, + asset::Column::MintExtensions, + asset::Column::SlotUpdated, + asset::Column::AssetData, ]) + .action_cond_where( + Condition::any() + .add( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + asset::Column::Supply, + ) + .ne(Expr::tbl(asset::Entity, asset::Column::Supply)), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset::Column::SupplyMint, + ) + .ne( + Expr::tbl(asset::Entity, asset::Column::SupplyMint), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset::Column::OwnerType, + ) + .ne(Expr::tbl(asset::Entity, asset::Column::OwnerType)), + ), + ) + .add( + Expr::tbl(asset::Entity, asset::Column::SlotUpdatedMintAccount) + .lte(columns.slot_updated_mint_account), + ), + ) + .add( + Expr::tbl(asset::Entity, asset::Column::SlotUpdatedMintAccount) + .is_null(), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); + .exec_without_returning(txn_or_conn) + .await?; - query.sql = format!( - "{} WHERE excluded.slot_updated_mint_account >= asset.slot_updated_mint_account OR asset.slot_updated_mint_account IS NULL", - query.sql); - txn_or_conn.execute(query).await?; Ok(()) } pub struct AssetMetadataAccountColumns { pub mint: Vec, - pub owner_type: OwnerType, pub specification_asset_class: Option, + pub owner_type: OwnerType, pub royalty_amount: i32, pub asset_data: Option>, pub slot_updated_metadata_account: u64, @@ -112,9 +192,9 @@ pub async fn upsert_assets_metadata_account_columns Result<(), DbErr> { let active_model = asset::ActiveModel { id: Set(columns.mint), - owner_type: Set(columns.owner_type), specification_version: Set(Some(SpecificationVersions::V1)), specification_asset_class: Set(columns.specification_asset_class), + owner_type: Set(columns.owner_type), tree_id: Set(None), nonce: Set(Some(0)), seq: Set(Some(0)), @@ -138,13 +218,13 @@ pub async fn upsert_assets_metadata_account_columns= asset.slot_updated_metadata_account OR asset.slot_updated_metadata_account IS NULL", - query.sql); - txn_or_conn.execute(query).await?; + .exec_without_returning(txn_or_conn) + .await?; Ok(()) } diff --git a/program_transformers/src/bubblegum/burn.rs b/program_transformers/src/bubblegum/burn.rs index 6a18ae5f6..56112aac9 100644 --- a/program_transformers/src/bubblegum/burn.rs +++ b/program_transformers/src/bubblegum/burn.rs @@ -28,7 +28,13 @@ where T: ConnectionTrait + TransactionTrait, { if let Some(cl) = &parsing_result.tree_update { - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, instruction).await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + let seq = + save_changelog_event(cl, bundle.slot, bundle.txn_id, &multi_txn, instruction).await?; let leaf_index = cl.index; let (asset_id, _) = Pubkey::find_program_address( &[ @@ -47,11 +53,6 @@ where ..Default::default() }; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - // Upsert asset table `burnt` column. Note we don't check for decompression (asset.seq = 0) // because we know if the item was burnt it could not have been decompressed later. let query = asset::Entity::insert(asset_model) diff --git a/program_transformers/src/bubblegum/cancel_redeem.rs b/program_transformers/src/bubblegum/cancel_redeem.rs index 28ea1695c..23aaddee5 100644 --- a/program_transformers/src/bubblegum/cancel_redeem.rs +++ b/program_transformers/src/bubblegum/cancel_redeem.rs @@ -23,7 +23,13 @@ where T: ConnectionTrait + TransactionTrait, { if let (Some(le), Some(cl)) = (&parsing_result.leaf_update, &parsing_result.tree_update) { - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, instruction).await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + let seq = + save_changelog_event(cl, bundle.slot, bundle.txn_id, &multi_txn, instruction).await?; match le.schema { LeafSchema::V1 { id, @@ -41,11 +47,6 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( &multi_txn, diff --git a/program_transformers/src/bubblegum/collection_verification.rs b/program_transformers/src/bubblegum/collection_verification.rs index 27a3ed4d4..e353d8f71 100644 --- a/program_transformers/src/bubblegum/collection_verification.rs +++ b/program_transformers/src/bubblegum/collection_verification.rs @@ -43,7 +43,13 @@ where "Handling collection verification event for {} (verify: {}): {}", collection, verify, bundle.txn_id ); - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, instruction).await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + let seq = + save_changelog_event(cl, bundle.slot, bundle.txn_id, &multi_txn, instruction).await?; let id_bytes = match le.schema { LeafSchema::V1 { id, .. } => id.to_bytes().to_vec(), }; @@ -51,11 +57,6 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( &multi_txn, diff --git a/program_transformers/src/bubblegum/creator_verification.rs b/program_transformers/src/bubblegum/creator_verification.rs index 8281e7179..70ecdc8d6 100644 --- a/program_transformers/src/bubblegum/creator_verification.rs +++ b/program_transformers/src/bubblegum/creator_verification.rs @@ -59,7 +59,12 @@ where "Handling creator verification event for creator {} (verify: {}): {}", creator, verify, bundle.txn_id ); - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, instruction).await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + let seq = + save_changelog_event(cl, bundle.slot, bundle.txn_id, &multi_txn, instruction).await?; match le.schema { LeafSchema::V1 { @@ -79,11 +84,6 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - // Partial update of asset table with just leaf info. upsert_asset_with_leaf_info( &multi_txn, diff --git a/program_transformers/src/bubblegum/db.rs b/program_transformers/src/bubblegum/db.rs index b086c6de8..f03e2af28 100644 --- a/program_transformers/src/bubblegum/db.rs +++ b/program_transformers/src/bubblegum/db.rs @@ -1,8 +1,8 @@ use { crate::error::{ProgramTransformerError, ProgramTransformerResult}, + das_core::DownloadMetadataInfo, digital_asset_types::dao::{ - asset, asset_authority, asset_creators, asset_data, asset_grouping, backfill_items, - cl_audits_v2, cl_items, + asset, asset_authority, asset_creators, asset_data, asset_grouping, cl_audits_v2, cl_items, sea_orm_active_enums::{ ChainMutability, Instruction, Mutability, OwnerType, RoyaltyTargetType, SpecificationAssetClass, SpecificationVersions, @@ -10,14 +10,14 @@ use { }, mpl_bubblegum::types::{Collection, Creator}, sea_orm::{ - entity::{ActiveValue, ColumnTrait, EntityTrait}, + entity::{ActiveValue, EntityTrait}, prelude::*, - query::{JsonValue, QueryFilter, QuerySelect, QueryTrait}, - sea_query::query::OnConflict, + query::{JsonValue, QueryTrait}, + sea_query::{query::OnConflict, Expr}, ConnectionTrait, DbBackend, TransactionTrait, }, spl_account_compression::events::ChangeLogEventV1, - tracing::{debug, error, info}, + tracing::{debug, error}, }; pub async fn save_changelog_event<'c, T>( @@ -40,7 +40,7 @@ const fn node_idx_to_leaf_idx(index: i64, tree_height: u32) -> i64 { pub async fn insert_change_log<'c, T>( change_log_event: &ChangeLogEventV1, - slot: u64, + _slot: u64, txn_id: &str, txn: &T, instruction: &str, @@ -48,54 +48,60 @@ pub async fn insert_change_log<'c, T>( where T: ConnectionTrait + TransactionTrait, { - let mut i: i64 = 0; let depth = change_log_event.path.len() - 1; let tree_id = change_log_event.id.as_ref(); - for p in change_log_event.path.iter() { - let node_idx = p.index as i64; - debug!( - "seq {}, index {} level {}, node {:?}, txn: {:?}, instruction {}", - change_log_event.seq, - p.index, - i, - bs58::encode(p.node).into_string(), - txn_id, - instruction - ); - let leaf_idx = if i == 0 { - Some(node_idx_to_leaf_idx(node_idx, depth as u32)) - } else { - None - }; - - let item = cl_items::ActiveModel { - tree: ActiveValue::Set(tree_id.to_vec()), - level: ActiveValue::Set(i), - node_idx: ActiveValue::Set(node_idx), - hash: ActiveValue::Set(p.node.as_ref().to_vec()), - seq: ActiveValue::Set(change_log_event.seq as i64), - leaf_idx: ActiveValue::Set(leaf_idx), - ..Default::default() - }; - - i += 1; - let mut query = cl_items::Entity::insert(item) - .on_conflict( - OnConflict::columns([cl_items::Column::Tree, cl_items::Column::NodeIdx]) - .update_columns([ - cl_items::Column::Hash, - cl_items::Column::Seq, - cl_items::Column::LeafIdx, - cl_items::Column::Level, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - query.sql = format!("{} WHERE excluded.seq > cl_items.seq", query.sql); - txn.execute(query) - .await - .map_err(|db_err| ProgramTransformerError::StorageWriteError(db_err.to_string()))?; - } + + let items: Vec = change_log_event + .path + .iter() + .enumerate() + .map(|(i, p)| { + let node_idx = p.index as i64; + debug!( + "ChangeLogEvent: seq={}, index={}, level={}, depth={}, node={}, txn={}, instruction={}", + change_log_event.seq, + p.index, + i, + depth, + bs58::encode(p.node).into_string(), + txn_id, + instruction + ); + let leaf_idx = if i == 0 { + Some(node_idx_to_leaf_idx(node_idx, depth as u32)) + } else { + None + }; + + cl_items::ActiveModel { + tree: ActiveValue::Set(tree_id.to_vec()), + level: ActiveValue::Set(i as i64), + node_idx: ActiveValue::Set(node_idx), + hash: ActiveValue::Set(p.node.as_ref().to_vec()), + seq: ActiveValue::Set(change_log_event.seq as i64), + leaf_idx: ActiveValue::Set(leaf_idx), + ..Default::default() + } + }) + .collect(); + + cl_items::Entity::insert_many(items) + .on_conflict( + OnConflict::columns([cl_items::Column::Tree, cl_items::Column::NodeIdx]) + .update_columns([ + cl_items::Column::Hash, + cl_items::Column::Seq, + cl_items::Column::LeafIdx, + cl_items::Column::Level, + ]) + .action_and_where( + Expr::tbl(cl_items::Entity, cl_items::Column::Seq).lte(change_log_event.seq), + ) + .to_owned(), + ) + .exec_without_returning(txn) + .await + .map_err(|db_err| ProgramTransformerError::StorageWriteError(db_err.to_string()))?; let tx_id_bytes = bs58::decode(txn_id) .into_vec() @@ -131,37 +137,6 @@ where } } - // If and only if the entire path of nodes was inserted into the `cl_items` table, then insert - // a single row into the `backfill_items` table. This way if an incomplete path was inserted - // into `cl_items` due to an error, a gap will be created for the tree and the backfiller will - // fix it. - if i - 1 == depth as i64 { - // See if the tree already exists in the `backfill_items` table. - let rows = backfill_items::Entity::find() - .filter(backfill_items::Column::Tree.eq(tree_id)) - .limit(1) - .all(txn) - .await?; - - // If the tree does not exist in `backfill_items` and the sequence number is greater than 1, - // then we know we will need to backfill the tree from sequence number 1 up to the current - // sequence number. So in this case we set at flag to force checking the tree. - let force_chk = rows.is_empty() && change_log_event.seq > 1; - - info!("Adding to backfill_items table at level {}", i - 1); - let item = backfill_items::ActiveModel { - tree: ActiveValue::Set(tree_id.to_vec()), - seq: ActiveValue::Set(change_log_event.seq as i64), - slot: ActiveValue::Set(slot as i64), - force_chk: ActiveValue::Set(force_chk), - backfilled: ActiveValue::Set(false), - failed: ActiveValue::Set(false), - ..Default::default() - }; - - backfill_items::Entity::insert(item).exec(txn).await?; - } - Ok(()) } @@ -403,13 +378,11 @@ pub async fn upsert_asset_data( chain_data: JsonValue, metadata_url: String, metadata_mutability: Mutability, - metadata: JsonValue, slot_updated: i64, - reindex: Option, raw_name: Vec, raw_symbol: Vec, seq: i64, -) -> ProgramTransformerResult<()> +) -> ProgramTransformerResult> where T: ConnectionTrait + TransactionTrait, { @@ -417,11 +390,11 @@ where id: ActiveValue::Set(id.clone()), chain_data_mutability: ActiveValue::Set(chain_data_mutability), chain_data: ActiveValue::Set(chain_data), - metadata_url: ActiveValue::Set(metadata_url), + metadata_url: ActiveValue::Set(metadata_url.clone()), metadata_mutability: ActiveValue::Set(metadata_mutability), - metadata: ActiveValue::Set(metadata), + metadata: ActiveValue::Set(JsonValue::String("processing".to_string())), slot_updated: ActiveValue::Set(slot_updated), - reindex: ActiveValue::Set(reindex), + reindex: ActiveValue::Set(Some(true)), raw_name: ActiveValue::Set(Some(raw_name)), raw_symbol: ActiveValue::Set(Some(raw_symbol)), base_info_seq: ActiveValue::Set(Some(seq)), @@ -435,9 +408,7 @@ where asset_data::Column::ChainData, asset_data::Column::MetadataUrl, asset_data::Column::MetadataMutability, - // Don't update asset_data::Column::Metadata if it already exists. Even if we - // are indexing `update_metadata`` and there's a new URI, the new background - // task will overwrite it. + asset_data::Column::Metadata, asset_data::Column::SlotUpdated, asset_data::Column::Reindex, asset_data::Column::RawName, @@ -450,15 +421,27 @@ where // Do not overwrite changes that happened after decompression (asset_data.base_info_seq = 0). // Do not overwrite changes from a later Bubblegum instruction. + // Do not update the record if the incoming slot is larger than the current or if it's null. + // Update if the current slot on the record is null. query.sql = format!( - "{} WHERE (asset_data.base_info_seq != 0 AND excluded.base_info_seq >= asset_data.base_info_seq) OR asset_data.base_info_seq IS NULL", + "{} WHERE ((asset_data.base_info_seq != 0 AND excluded.base_info_seq >= asset_data.base_info_seq) OR asset_data.base_info_seq IS NULL) AND (excluded.slot_updated <= asset_data.slot_updated OR asset_data.slot_updated IS NULL)", query.sql ); - txn.execute(query) + + let result = txn + .execute(query) .await .map_err(|db_err| ProgramTransformerError::StorageWriteError(db_err.to_string()))?; - Ok(()) + if result.rows_affected() > 0 { + Ok(Some(DownloadMetadataInfo::new( + id, + metadata_url, + slot_updated, + ))) + } else { + Ok(None) + } } #[allow(clippy::too_many_arguments)] diff --git a/program_transformers/src/bubblegum/delegate.rs b/program_transformers/src/bubblegum/delegate.rs index 15b50a69a..0e38b2c0c 100644 --- a/program_transformers/src/bubblegum/delegate.rs +++ b/program_transformers/src/bubblegum/delegate.rs @@ -23,7 +23,14 @@ where T: ConnectionTrait + TransactionTrait, { if let (Some(le), Some(cl)) = (&parsing_result.leaf_update, &parsing_result.tree_update) { - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, instruction).await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + let seq = + save_changelog_event(cl, bundle.slot, bundle.txn_id, &multi_txn, instruction).await?; + match le.schema { LeafSchema::V1 { id, @@ -40,11 +47,6 @@ where }; let tree_id = cl.id.to_bytes(); - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( &multi_txn, diff --git a/program_transformers/src/bubblegum/mint_v1.rs b/program_transformers/src/bubblegum/mint_v1.rs index fb179727a..db39778ee 100644 --- a/program_transformers/src/bubblegum/mint_v1.rs +++ b/program_transformers/src/bubblegum/mint_v1.rs @@ -24,7 +24,7 @@ use { }, json::ChainDataV1, }, - sea_orm::{query::JsonValue, ConnectionTrait, TransactionTrait}, + sea_orm::{ConnectionTrait, Statement, TransactionTrait}, tracing::warn, }; @@ -50,7 +50,27 @@ where &parsing_result.tree_update, &parsing_result.payload, ) { - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, instruction).await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + let set_lock_timeout = "SET LOCAL lock_timeout = '2s';"; + let set_local_app_name = + "SET LOCAL application_name = 'das::program_transformers::bubblegum::mint_v1';"; + let set_lock_timeout_stmt = Statement::from_string( + multi_txn.get_database_backend(), + set_lock_timeout.to_string(), + ); + let set_local_app_name_stmt = Statement::from_string( + multi_txn.get_database_backend(), + set_local_app_name.to_string(), + ); + multi_txn.execute(set_lock_timeout_stmt).await?; + multi_txn.execute(set_local_app_name_stmt).await?; + + let seq = + save_changelog_event(cl, bundle.slot, bundle.txn_id, &multi_txn, instruction).await?; let metadata = args; #[allow(unreachable_patterns)] return match le.schema { @@ -86,21 +106,14 @@ where false => ChainMutability::Immutable, }; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - - upsert_asset_data( + let download_metadata_info = upsert_asset_data( &multi_txn, id_bytes.to_vec(), chain_mutability, chain_data_json, uri.clone(), Mutability::Mutable, - JsonValue::String("processing".to_string()), slot_i, - Some(true), name.to_vec(), symbol.to_vec(), seq as i64, @@ -207,7 +220,7 @@ where return Ok(None); } - Ok(Some(DownloadMetadataInfo::new(id_bytes.to_vec(), uri))) + Ok(download_metadata_info) } _ => Err(ProgramTransformerError::NotImplemented), }; diff --git a/program_transformers/src/bubblegum/mod.rs b/program_transformers/src/bubblegum/mod.rs index 5cdff2b8a..03a800858 100644 --- a/program_transformers/src/bubblegum/mod.rs +++ b/program_transformers/src/bubblegum/mod.rs @@ -11,7 +11,7 @@ use { token_metadata::types::UseMethod as TokenMetadataUseMethod, }, sea_orm::{ConnectionTrait, TransactionTrait}, - tracing::{debug, info}, + tracing::debug, }; mod burn; @@ -58,7 +58,7 @@ where InstructionName::SetDecompressibleState => "SetDecompressibleState", InstructionName::UpdateMetadata => "UpdateMetadata", }; - info!("BGUM instruction txn={:?}: {:?}", ix_str, bundle.txn_id); + debug!("BGUM instruction txn={:?}: {:?}", ix_str, bundle.txn_id); match ix_type { InstructionName::Transfer => { diff --git a/program_transformers/src/bubblegum/redeem.rs b/program_transformers/src/bubblegum/redeem.rs index 22caaf3dd..25271d7fe 100644 --- a/program_transformers/src/bubblegum/redeem.rs +++ b/program_transformers/src/bubblegum/redeem.rs @@ -22,7 +22,13 @@ where T: ConnectionTrait + TransactionTrait, { if let Some(cl) = &parsing_result.tree_update { - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, instruction).await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + let seq = + save_changelog_event(cl, bundle.slot, bundle.txn_id, &multi_txn, instruction).await?; let leaf_index = cl.index; let (asset_id, _) = Pubkey::find_program_address( &[ @@ -37,11 +43,6 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( &multi_txn, diff --git a/program_transformers/src/bubblegum/transfer.rs b/program_transformers/src/bubblegum/transfer.rs index 9c551beea..a802e9fa0 100644 --- a/program_transformers/src/bubblegum/transfer.rs +++ b/program_transformers/src/bubblegum/transfer.rs @@ -10,7 +10,7 @@ use { instruction::InstructionBundle, programs::bubblegum::{BubblegumInstruction, LeafSchema}, }, - sea_orm::{ConnectionTrait, TransactionTrait}, + sea_orm::{ConnectionTrait, Statement, TransactionTrait}, }; pub async fn transfer<'c, T>( @@ -23,7 +23,27 @@ where T: ConnectionTrait + TransactionTrait, { if let (Some(le), Some(cl)) = (&parsing_result.leaf_update, &parsing_result.tree_update) { - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, instruction).await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + let set_lock_timeout = "SET LOCAL lock_timeout = '1s';"; + let set_local_app_name = + "SET LOCAL application_name = 'das::program_transformers::bubblegum::transfer';"; + let set_lock_timeout_stmt = Statement::from_string( + multi_txn.get_database_backend(), + set_lock_timeout.to_string(), + ); + let set_local_app_name_stmt = Statement::from_string( + multi_txn.get_database_backend(), + set_local_app_name.to_string(), + ); + multi_txn.execute(set_lock_timeout_stmt).await?; + multi_txn.execute(set_local_app_name_stmt).await?; + + let seq = + save_changelog_event(cl, bundle.slot, bundle.txn_id, &multi_txn, instruction).await?; match le.schema { LeafSchema::V1 { id, @@ -41,11 +61,6 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( &multi_txn, @@ -77,6 +92,7 @@ where } } } + Err(ProgramTransformerError::ParsingError( "Ix not parsed correctly".to_string(), )) diff --git a/program_transformers/src/bubblegum/update_metadata.rs b/program_transformers/src/bubblegum/update_metadata.rs index 702aa552c..35703fdca 100644 --- a/program_transformers/src/bubblegum/update_metadata.rs +++ b/program_transformers/src/bubblegum/update_metadata.rs @@ -22,7 +22,7 @@ use { }, json::ChainDataV1, }, - sea_orm::{query::*, ConnectionTrait, JsonValue}, + sea_orm::{query::*, ConnectionTrait}, tracing::warn, }; @@ -48,7 +48,13 @@ where &parsing_result.tree_update, &parsing_result.payload, ) { - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, instruction).await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + let seq = + save_changelog_event(cl, bundle.slot, bundle.txn_id, &multi_txn, instruction).await?; #[allow(unreachable_patterns)] return match le.schema { @@ -109,21 +115,14 @@ where ChainMutability::Immutable }; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - - upsert_asset_data( + let download_metadata_info = upsert_asset_data( &multi_txn, id_bytes.to_vec(), chain_mutability, chain_data_json, uri.clone(), Mutability::Mutable, - JsonValue::String("processing".to_string()), slot_i, - Some(true), name.into_bytes().to_vec(), symbol.into_bytes().to_vec(), seq as i64, @@ -188,7 +187,7 @@ where return Ok(None); } - Ok(Some(DownloadMetadataInfo::new(id_bytes.to_vec(), uri))) + Ok(download_metadata_info) } _ => Err(ProgramTransformerError::NotImplemented), }; diff --git a/program_transformers/src/lib.rs b/program_transformers/src/lib.rs index b317c47f6..dab2fa0d8 100644 --- a/program_transformers/src/lib.rs +++ b/program_transformers/src/lib.rs @@ -4,6 +4,7 @@ use { error::{ProgramTransformerError, ProgramTransformerResult}, mpl_core_program::handle_mpl_core_account, token::handle_token_program_account, + token_inscription::handle_token_inscription_program_update, token_metadata::handle_token_metadata_account, }, blockbuster::{ @@ -11,31 +12,37 @@ use { program_handler::ProgramParser, programs::{ bubblegum::BubblegumParser, mpl_core_program::MplCoreParser, - token_account::TokenAccountParser, token_metadata::TokenMetadataParser, + token_account::TokenAccountParser, token_extensions::Token2022AccountParser, + token_inscriptions::TokenInscriptionParser, token_metadata::TokenMetadataParser, ProgramParseResult, }, }, - futures::future::BoxFuture, + das_core::{DownloadMetadataInfo, DownloadMetadataNotifier}, sea_orm::{ - entity::EntityTrait, query::Select, ConnectionTrait, DatabaseConnection, DbErr, - SqlxPostgresConnector, TransactionTrait, + entity::EntityTrait, query::Select, ConnectionTrait, DbErr, SqlxPostgresConnector, + TransactionTrait, }, + serde::Deserialize, + serde_json::{Map, Value}, solana_sdk::{instruction::CompiledInstruction, pubkey::Pubkey, signature::Signature}, solana_transaction_status::InnerInstructions, sqlx::PgPool, std::collections::{HashMap, HashSet, VecDeque}, + token_extensions::handle_token_extensions_program_account, tokio::time::{sleep, Duration}, - tracing::{debug, error, info}, + tracing::{debug, error}, }; mod asset_upserts; -mod bubblegum; +pub mod bubblegum; pub mod error; mod mpl_core_program; mod token; +mod token_extensions; +mod token_inscription; mod token_metadata; -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] pub struct AccountInfo { pub slot: u64, pub pubkey: Pubkey, @@ -43,7 +50,7 @@ pub struct AccountInfo { pub data: Vec, } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] pub struct TransactionInfo { pub slot: u64, pub signature: Signature, @@ -52,35 +59,8 @@ pub struct TransactionInfo { pub meta_inner_instructions: Vec, } -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct DownloadMetadataInfo { - asset_data_id: Vec, - uri: String, -} - -impl DownloadMetadataInfo { - pub fn new(asset_data_id: Vec, uri: String) -> Self { - Self { - asset_data_id, - uri: uri.trim().replace('\0', ""), - } - } - - pub fn into_inner(self) -> (Vec, String) { - (self.asset_data_id, self.uri) - } -} - -pub type DownloadMetadataNotifier = Box< - dyn Fn( - DownloadMetadataInfo, - ) -> BoxFuture<'static, Result<(), Box>> - + Sync - + Send, ->; - pub struct ProgramTransformer { - storage: DatabaseConnection, + storage: PgPool, download_metadata_notifier: DownloadMetadataNotifier, parsers: HashMap>, key_set: HashSet, @@ -88,22 +68,25 @@ pub struct ProgramTransformer { impl ProgramTransformer { pub fn new(pool: PgPool, download_metadata_notifier: DownloadMetadataNotifier) -> Self { - let mut parsers: HashMap> = HashMap::with_capacity(3); + let mut parsers: HashMap> = HashMap::with_capacity(5); let bgum = BubblegumParser {}; let token_metadata = TokenMetadataParser {}; let token = TokenAccountParser {}; let mpl_core = MplCoreParser {}; + let token_inscription = TokenInscriptionParser {}; + let token_extensions = Token2022AccountParser {}; parsers.insert(bgum.key(), Box::new(bgum)); parsers.insert(token_metadata.key(), Box::new(token_metadata)); parsers.insert(token.key(), Box::new(token)); parsers.insert(mpl_core.key(), Box::new(mpl_core)); + parsers.insert(token_inscription.key(), Box::new(token_inscription)); + parsers.insert(token_extensions.key(), Box::new(token_extensions)); let hs = parsers.iter().fold(HashSet::new(), |mut acc, (k, _)| { acc.insert(*k); acc }); - let pool: PgPool = pool; ProgramTransformer { - storage: SqlxPostgresConnector::from_sqlx_postgres_pool(pool), + storage: pool, download_metadata_notifier, parsers, key_set: hs, @@ -131,7 +114,6 @@ impl ProgramTransformer { &self, tx_info: &TransactionInfo, ) -> ProgramTransformerResult<()> { - info!("Handling Transaction: {:?}", tx_info.signature); let instructions = self.break_transaction(tx_info); let mut not_impl = 0; let ixlen = instructions.len(); @@ -140,6 +122,7 @@ impl ProgramTransformer { .iter() .filter(|(ib, _inner)| ib.0 == mpl_bubblegum::ID); debug!("Instructions bgum: {}", contains.count()); + for (outer_ix, inner_ix) in instructions { let (program, instruction) = outer_ix; let ix_accounts = &instruction.accounts; @@ -169,16 +152,20 @@ impl ProgramTransformer { }; let program_key = ix.program; + if let Some(program) = self.match_program(&program_key) { debug!("Found a ix for program: {:?}", program.key()); let result = program.handle_instruction(&ix)?; let concrete = result.result_type(); + + let db = SqlxPostgresConnector::from_sqlx_postgres_pool(self.storage.clone()); + match concrete { ProgramParseResult::Bubblegum(parsing_result) => { handle_bubblegum_instruction( parsing_result, &ix, - &self.storage, + &db, &self.download_metadata_notifier, ) .await @@ -213,12 +200,14 @@ impl ProgramTransformer { ) -> ProgramTransformerResult<()> { if let Some(program) = self.match_program(&account_info.owner) { let result = program.handle_account(&account_info.data)?; + let db = SqlxPostgresConnector::from_sqlx_postgres_pool(self.storage.clone()); + match result.result_type() { ProgramParseResult::TokenMetadata(parsing_result) => { handle_token_metadata_account( account_info, parsing_result, - &self.storage, + &db, &self.download_metadata_notifier, ) .await @@ -227,20 +216,26 @@ impl ProgramTransformer { handle_token_program_account( account_info, parsing_result, - &self.storage, + &db, &self.download_metadata_notifier, ) .await } + ProgramParseResult::TokenExtensionsProgramAccount(parsing_result) => { + handle_token_extensions_program_account(account_info, parsing_result, &db).await + } ProgramParseResult::MplCore(parsing_result) => { handle_mpl_core_account( account_info, parsing_result, - &self.storage, + &db, &self.download_metadata_notifier, ) .await } + ProgramParseResult::TokenInscriptionAccount(parsing_result) => { + handle_token_inscription_program_update(account_info, parsing_result, &db).await + } _ => Err(ProgramTransformerError::NotImplemented), }?; } @@ -280,3 +275,27 @@ fn record_metric(metric_name: &str, success: bool, retries: u32) { cadence_macros::statsd_count!(metric_name, 1, "success" => success, "retry_count" => retry_count); } } + +pub fn filter_non_null_fields(value: Value) -> Option { + match value { + Value::Null => None, + Value::Object(map) => { + if map.values().all(|v| matches!(v, Value::Null)) { + None + } else { + let filtered_map: Map = map + .into_iter() + .filter(|(_k, v)| !matches!(v, Value::Null)) + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); + + if filtered_map.is_empty() { + None + } else { + Some(Value::Object(filtered_map)) + } + } + } + _ => Some(value), + } +} diff --git a/program_transformers/src/mpl_core_program/v1_asset.rs b/program_transformers/src/mpl_core_program/v1_asset.rs index 42e449286..28798bce0 100644 --- a/program_transformers/src/mpl_core_program/v1_asset.rs +++ b/program_transformers/src/mpl_core_program/v1_asset.rs @@ -25,10 +25,9 @@ use { sea_orm::{ entity::{ActiveValue, ColumnTrait, EntityTrait}, prelude::*, - query::{JsonValue, QueryFilter, QueryTrait}, - sea_query::query::OnConflict, - sea_query::Expr, - ConnectionTrait, CursorTrait, DbBackend, TransactionTrait, + query::{JsonValue, QueryFilter}, + sea_query::{query::OnConflict, Alias, Condition, Expr}, + ConnectionTrait, CursorTrait, Statement, TransactionTrait, }, serde_json::{value::Value, Map}, solana_sdk::pubkey::Pubkey, @@ -47,18 +46,22 @@ pub async fn burn_v1_asset( burnt: ActiveValue::Set(true), ..Default::default() }; - let mut query = asset::Entity::insert(model) + asset::Entity::insert(model) .on_conflict( - OnConflict::columns([asset::Column::Id]) + OnConflict::column(asset::Column::Id) .update_columns([asset::Column::SlotUpdated, asset::Column::Burnt]) + .action_cond_where( + Condition::all() + .add( + Expr::tbl(Alias::new("excluded"), asset::Column::Burnt) + .ne(Expr::tbl(asset::Entity, asset::Column::Burnt)), + ) + .add(Expr::tbl(asset::Entity, asset::Column::SlotUpdated).lte(slot_i)), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > asset.slot_updated", - query.sql - ); - conn.execute(query).await?; + .exec_without_returning(conn) + .await?; Ok(()) } @@ -108,6 +111,16 @@ pub async fn save_v1_asset( let txn = conn.begin().await?; + let set_lock_timeout = "SET LOCAL lock_timeout = '1s';"; + let set_local_app_name = + "SET LOCAL application_name = 'das::program_transformers::mpl_core_program::v1_asset';"; + let set_lock_timeout_stmt = + Statement::from_string(txn.get_database_backend(), set_lock_timeout.to_string()); + let set_local_app_name_stmt = + Statement::from_string(txn.get_database_backend(), set_local_app_name.to_string()); + txn.execute(set_lock_timeout_stmt).await?; + txn.execute(set_local_app_name_stmt).await?; + let model = asset_authority::ActiveModel { asset_id: ActiveValue::Set(id_vec.clone()), authority: ActiveValue::Set(update_authority.clone()), @@ -116,22 +129,33 @@ pub async fn save_v1_asset( ..Default::default() }; - let mut query = asset_authority::Entity::insert(model) + asset_authority::Entity::insert(model) .on_conflict( - OnConflict::columns([asset_authority::Column::AssetId]) + OnConflict::column(asset_authority::Column::AssetId) .update_columns([ asset_authority::Column::Authority, - asset_authority::Column::Seq, asset_authority::Column::SlotUpdated, ]) + .action_cond_where( + Condition::all() + .add( + Expr::tbl(Alias::new("excluded"), asset_authority::Column::Authority) + .ne(Expr::tbl( + asset_authority::Entity, + asset_authority::Column::Authority, + )), + ) + .add( + Expr::tbl( + asset_authority::Entity, + asset_authority::Column::SlotUpdated, + ) + .lte(slot_i), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > asset_authority.slot_updated", - query.sql - ); - txn.execute(query) + .exec_without_returning(&txn) .await .map_err(|db_err| ProgramTransformerError::AssetIndexError(db_err.to_string()))?; @@ -186,8 +210,7 @@ pub async fn save_v1_asset( raw_symbol: ActiveValue::Set(None), base_info_seq: ActiveValue::Set(Some(0)), }; - - let mut query = asset_data::Entity::insert(asset_data_model) + asset_data::Entity::insert(asset_data_model) .on_conflict( OnConflict::columns([asset_data::Column::Id]) .update_columns([ @@ -201,14 +224,93 @@ pub async fn save_v1_asset( asset_data::Column::RawSymbol, asset_data::Column::BaseInfoSeq, ]) + .action_cond_where( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::ChainDataMutability, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::ChainDataMutability, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::ChainData, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::ChainData, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::MetadataUrl, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::MetadataUrl, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::MetadataMutability, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::MetadataMutability, + )), + ) + .add( + Expr::tbl(Alias::new("excluded"), asset_data::Column::Reindex) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::Reindex, + )), + ) + .add( + Expr::tbl(Alias::new("excluded"), asset_data::Column::RawName) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::RawName, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::RawSymbol, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::RawSymbol, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::BaseInfoSeq, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::BaseInfoSeq, + )), + ), + ) + .add( + Expr::tbl(asset_data::Entity, asset_data::Column::SlotUpdated) + .lte(slot_i), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > asset_data.slot_updated", - query.sql - ); - txn.execute(query) + .exec_without_returning(&txn) .await .map_err(|db_err| ProgramTransformerError::AssetIndexError(db_err.to_string()))?; @@ -293,18 +395,18 @@ pub async fn save_v1_asset( upsert_assets_metadata_account_columns( AssetMetadataAccountColumns { mint: id_vec.clone(), - owner_type: ownership_type, - specification_asset_class: Some(class), + owner_type: ownership_type.clone(), + specification_asset_class: Some(class.clone()), royalty_amount: royalty_amount as i32, asset_data: Some(id_vec.clone()), slot_updated_metadata_account: slot, - mpl_core_plugins: Some(plugins_json), - mpl_core_unknown_plugins: unknown_plugins_json, + mpl_core_plugins: Some(plugins_json.clone()), + mpl_core_unknown_plugins: unknown_plugins_json.clone(), mpl_core_collection_num_minted: asset.num_minted.map(|val| val as i32), mpl_core_collection_current_size: asset.current_size.map(|val| val as i32), mpl_core_plugins_json_version: Some(1), - mpl_core_external_plugins: Some(external_plugins_json), - mpl_core_unknown_external_plugins: unknown_external_plugins_json, + mpl_core_external_plugins: Some(external_plugins_json.clone()), + mpl_core_unknown_external_plugins: unknown_external_plugins_json.clone(), }, &txn, ) @@ -316,9 +418,9 @@ pub async fn save_v1_asset( upsert_assets_mint_account_columns( AssetMintAccountColumns { mint: id_vec.clone(), - supply_mint: None, supply, - slot_updated_mint_account: slot, + slot_updated_mint_account: slot as i64, + extensions: None, }, &txn, ) @@ -336,7 +438,6 @@ pub async fn save_v1_asset( PluginAuthority::None => None, }); - // Get frozen status from `FreezeDelegate` plugin if available. let frozen = asset .plugins .get(&PluginType::FreezeDelegate) @@ -349,7 +450,6 @@ pub async fn save_v1_asset( }) .unwrap_or(false); - // TODO: these upserts needed to be separate for Token Metadata but here could be one upsert. upsert_assets_token_account_columns( AssetTokenAccountColumns { mint: id_vec.clone(), @@ -362,7 +462,6 @@ pub async fn save_v1_asset( &txn, ) .await?; - //----------------------- // asset_grouping table //----------------------- @@ -378,7 +477,8 @@ pub async fn save_v1_asset( slot_updated: ActiveValue::Set(Some(slot_i)), ..Default::default() }; - let mut query = asset_grouping::Entity::insert(model) + + asset_grouping::Entity::insert(model) .on_conflict( OnConflict::columns([ asset_grouping::Column::AssetId, @@ -386,18 +486,25 @@ pub async fn save_v1_asset( ]) .update_columns([ asset_grouping::Column::GroupValue, - asset_grouping::Column::Verified, asset_grouping::Column::SlotUpdated, - asset_grouping::Column::GroupInfoSeq, ]) + .action_cond_where( + Condition::all() + .add( + Expr::tbl(Alias::new("excluded"), asset_grouping::Column::GroupValue) + .ne(Expr::tbl( + asset_grouping::Entity, + asset_grouping::Column::GroupValue, + )), + ) + .add( + Expr::tbl(asset_grouping::Entity, asset_grouping::Column::SlotUpdated) + .lte(slot_i), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated >= asset_grouping.slot_updated", - query.sql - ); - txn.execute(query) + .exec_without_returning(&txn) .await .map_err(|db_err| ProgramTransformerError::AssetIndexError(db_err.to_string()))?; } @@ -423,7 +530,7 @@ pub async fn save_v1_asset( .collect::>(); if !creators.is_empty() { - let mut query = asset_creators::Entity::insert_many(creators) + asset_creators::Entity::insert_many(creators) .on_conflict( OnConflict::columns([ asset_creators::Column::AssetId, @@ -432,18 +539,55 @@ pub async fn save_v1_asset( .update_columns([ asset_creators::Column::Creator, asset_creators::Column::Share, - asset_creators::Column::Verified, - asset_creators::Column::Seq, asset_creators::Column::SlotUpdated, ]) + .action_cond_where( + Condition::any() + .add( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + asset_creators::Column::Creator, + ) + .ne( + Expr::tbl( + asset_creators::Entity, + asset_creators::Column::Creator, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_creators::Column::Share, + ) + .ne( + Expr::tbl( + asset_creators::Entity, + asset_creators::Column::Share, + ), + ), + ), + ) + .add( + Expr::tbl( + asset_creators::Entity, + asset_creators::Column::SlotUpdated, + ) + .lte(slot_i), + ), + ) + .add( + Expr::tbl(asset_creators::Entity, asset_creators::Column::SlotUpdated) + .is_null(), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated >= asset_creators.slot_updated OR asset_creators.slot_updated is NULL", - query.sql - ); - txn.execute(query) + .exec_without_returning(&txn) .await .map_err(|db_err| ProgramTransformerError::AssetIndexError(db_err.to_string()))?; } @@ -461,7 +605,7 @@ pub async fn save_v1_asset( } // Otherwise return with info for background downloading. - Ok(Some(DownloadMetadataInfo::new(id_vec.clone(), uri))) + Ok(Some(DownloadMetadataInfo::new(id_vec.clone(), uri, slot_i))) } // Modify the JSON structure to remove the `Plugin` name and just display its data. diff --git a/program_transformers/src/token/mod.rs b/program_transformers/src/token/mod.rs index d46360317..67c76187a 100644 --- a/program_transformers/src/token/mod.rs +++ b/program_transformers/src/token/mod.rs @@ -8,12 +8,15 @@ use { AccountInfo, DownloadMetadataNotifier, }, blockbuster::programs::token_account::TokenProgramAccount, - digital_asset_types::dao::{asset, sea_orm_active_enums::OwnerType, token_accounts, tokens}, + digital_asset_types::dao::{ + token_accounts, + tokens::{self, IsNonFungible}, + }, sea_orm::{ - entity::{ActiveValue, ColumnTrait}, - query::{QueryFilter, QueryTrait}, + entity::ActiveValue, sea_query::query::OnConflict, - ConnectionTrait, DatabaseConnection, DbBackend, EntityTrait, TransactionTrait, + sea_query::{Alias, Condition, Expr}, + ConnectionTrait, DatabaseConnection, EntityTrait, Statement, TransactionTrait, }, solana_sdk::program_option::COption, spl_token::state::AccountState, @@ -27,6 +30,7 @@ pub async fn handle_token_program_account<'a, 'b>( ) -> ProgramTransformerResult<()> { let account_key = account_info.pubkey.to_bytes().to_vec(); let account_owner = account_info.owner.to_bytes().to_vec(); + let slot = account_info.slot as i64; match &parsing_result { TokenProgramAccount::TokenAccount(ta) => { let mint = ta.mint.to_bytes().to_vec(); @@ -44,14 +48,27 @@ pub async fn handle_token_program_account<'a, 'b>( frozen: ActiveValue::Set(frozen), delegated_amount: ActiveValue::Set(ta.delegated_amount as i64), token_program: ActiveValue::Set(account_owner.clone()), - slot_updated: ActiveValue::Set(account_info.slot as i64), + slot_updated: ActiveValue::Set(slot), amount: ActiveValue::Set(ta.amount as i64), close_authority: ActiveValue::Set(None), + extensions: ActiveValue::Set(None), }; - let mut query = token_accounts::Entity::insert(model) + let txn = db.begin().await?; + + let set_lock_timeout = "SET LOCAL lock_timeout = '100ms';"; + let set_local_app_name = + "SET LOCAL application_name = 'das::program_transformers::token::token_account';"; + let set_lock_timeout_stmt = + Statement::from_string(txn.get_database_backend(), set_lock_timeout.to_string()); + let set_local_app_name_stmt = + Statement::from_string(txn.get_database_backend(), set_local_app_name.to_string()); + txn.execute(set_lock_timeout_stmt).await?; + txn.execute(set_local_app_name_stmt).await?; + + token_accounts::Entity::insert(model) .on_conflict( - OnConflict::columns([token_accounts::Column::Pubkey]) + OnConflict::column(token_accounts::Column::Pubkey) .update_columns([ token_accounts::Column::Mint, token_accounts::Column::DelegatedAmount, @@ -60,39 +77,126 @@ pub async fn handle_token_program_account<'a, 'b>( token_accounts::Column::Frozen, token_accounts::Column::TokenProgram, token_accounts::Column::Owner, - token_accounts::Column::CloseAuthority, token_accounts::Column::SlotUpdated, ]) + .action_cond_where( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Mint, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Mint, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::DelegatedAmount, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::DelegatedAmount, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Delegate, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Delegate, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Amount, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Amount, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Frozen, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Frozen, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::TokenProgram, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::TokenProgram, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Owner, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Owner, + ), + ), + ), + ) + .add( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::SlotUpdated, + ) + .lte(account_info.slot as i64), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > token_accounts.slot_updated", - query.sql - ); - db.execute(query).await?; - let txn = db.begin().await?; - let asset_update: Option = asset::Entity::find_by_id(mint.clone()) - .filter(asset::Column::OwnerType.eq("single")) - .one(&txn) + .exec_without_returning(&txn) + .await?; + + let token = tokens::Entity::find_by_id(mint.clone()).one(db).await?; + + let is_non_fungible = token.map(|t| t.is_non_fungible()).unwrap_or(false); + + if is_non_fungible { + upsert_assets_token_account_columns( + AssetTokenAccountColumns { + mint: mint.clone(), + owner: Some(owner.clone()), + frozen, + delegate, + slot_updated_token_account: Some(slot), + }, + &txn, + ) .await?; - if let Some(_asset) = asset_update { - // will only update owner if token account balance is non-zero - // since the asset is marked as single then the token account balance can only be 1. Greater implies a fungible token in which case no si - // TODO: this does not guarantee in case when wallet receives an amount of 1 for a token but its supply is more. is unlikely since mints often have a decimal - if ta.amount == 1 { - upsert_assets_token_account_columns( - AssetTokenAccountColumns { - mint: mint.clone(), - owner: Some(owner.clone()), - frozen, - delegate, - slot_updated_token_account: Some(account_info.slot as i64), - }, - &txn, - ) - .await?; - } } txn.commit().await?; Ok(()) @@ -116,53 +220,111 @@ pub async fn handle_token_program_account<'a, 'b>( extension_data: ActiveValue::Set(None), mint_authority: ActiveValue::Set(mint_auth), freeze_authority: ActiveValue::Set(freeze_auth), + extensions: ActiveValue::Set(None), }; - let mut query = tokens::Entity::insert(model) + let txn = db.begin().await?; + + let set_lock_timeout = "SET LOCAL lock_timeout = '100ms';"; + let set_local_app_name = + "SET LOCAL application_name = 'das::program_transformers::token::mint';"; + let set_lock_timeout_stmt = + Statement::from_string(txn.get_database_backend(), set_lock_timeout.to_string()); + let set_local_app_name_stmt = + Statement::from_string(txn.get_database_backend(), set_local_app_name.to_string()); + txn.execute(set_lock_timeout_stmt).await?; + txn.execute(set_local_app_name_stmt).await?; + + tokens::Entity::insert(model) .on_conflict( OnConflict::columns([tokens::Column::Mint]) .update_columns([ tokens::Column::Supply, tokens::Column::TokenProgram, tokens::Column::MintAuthority, - tokens::Column::CloseAuthority, - tokens::Column::ExtensionData, tokens::Column::SlotUpdated, tokens::Column::Decimals, tokens::Column::FreezeAuthority, ]) + .action_cond_where( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::Supply, + ) + .ne(Expr::tbl(tokens::Entity, tokens::Column::Supply)), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::TokenProgram, + ) + .ne( + Expr::tbl( + tokens::Entity, + tokens::Column::TokenProgram, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::MintAuthority, + ) + .ne( + Expr::tbl( + tokens::Entity, + tokens::Column::MintAuthority, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::Decimals, + ) + .ne( + Expr::tbl(tokens::Entity, tokens::Column::Decimals), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::FreezeAuthority, + ) + .ne( + Expr::tbl( + tokens::Entity, + tokens::Column::FreezeAuthority, + ), + ), + ), + ) + .add( + Expr::tbl(tokens::Entity, tokens::Column::SlotUpdated) + .lte(account_info.slot as i64), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated >= tokens.slot_updated", - query.sql - ); - db.execute(query).await?; - - let asset_update: Option = asset::Entity::find_by_id(account_key.clone()) - .filter( - asset::Column::OwnerType - .eq(OwnerType::Single) - .or(asset::Column::OwnerType - .eq(OwnerType::Unknown) - .and(asset::Column::Supply.eq(1))), - ) - .one(db) + .exec_without_returning(&txn) .await?; - if let Some(_asset) = asset_update { - upsert_assets_mint_account_columns( - AssetMintAccountColumns { - mint: account_key.clone(), - supply_mint: Some(account_key), - supply: m.supply.into(), - slot_updated_mint_account: account_info.slot, - }, - db, - ) - .await?; - } + upsert_assets_mint_account_columns( + AssetMintAccountColumns { + mint: account_key.clone(), + supply: m.supply.into(), + slot_updated_mint_account: slot, + extensions: None, + }, + &txn, + ) + .await?; + + txn.commit().await?; Ok(()) } } diff --git a/program_transformers/src/token_extensions/mod.rs b/program_transformers/src/token_extensions/mod.rs new file mode 100644 index 000000000..64e79e509 --- /dev/null +++ b/program_transformers/src/token_extensions/mod.rs @@ -0,0 +1,487 @@ +use { + crate::{ + asset_upserts::{ + upsert_assets_mint_account_columns, upsert_assets_token_account_columns, + AssetMintAccountColumns, AssetTokenAccountColumns, + }, + error::{ProgramTransformerError, ProgramTransformerResult}, + filter_non_null_fields, AccountInfo, + }, + blockbuster::programs::token_extensions::{ + extension::ShadowMetadata, MintAccount, TokenAccount, TokenExtensionsProgramAccount, + }, + digital_asset_types::dao::{ + asset, asset_data, + sea_orm_active_enums::ChainMutability, + token_accounts, + tokens::{self, IsNonFungible as IsNonFungibleModel}, + }, + sea_orm::{ + entity::ActiveValue, + query::QueryTrait, + sea_query::{query::OnConflict, Alias, Expr}, + Condition, ConnectionTrait, DatabaseConnection, DatabaseTransaction, DbBackend, DbErr, + EntityTrait, Set, Statement, TransactionTrait, + }, + serde_json::Value, + solana_sdk::program_option::COption, + spl_token_2022::state::AccountState, +}; + +pub async fn handle_token_extensions_program_account<'a, 'b, 'c>( + account_info: &'a AccountInfo, + parsing_result: &'b TokenExtensionsProgramAccount, + db: &'c DatabaseConnection, +) -> ProgramTransformerResult<()> { + let account_key = account_info.pubkey.to_bytes().to_vec(); + let account_owner = account_info.owner.to_bytes().to_vec(); + let slot = account_info.slot as i64; + match parsing_result { + TokenExtensionsProgramAccount::TokenAccount(ta) => { + let TokenAccount { + account, + extensions, + } = ta; + let ta = account; + + let extensions: Option = if extensions.is_some() { + filter_non_null_fields( + serde_json::to_value(extensions.clone()) + .map_err(|e| ProgramTransformerError::SerializatonError(e.to_string()))?, + ) + } else { + None + }; + + let mint = ta.mint.to_bytes().to_vec(); + let delegate: Option> = match ta.delegate { + COption::Some(d) => Some(d.to_bytes().to_vec()), + COption::None => None, + }; + let frozen = matches!(ta.state, AccountState::Frozen); + let owner = ta.owner.to_bytes().to_vec(); + let model = token_accounts::ActiveModel { + pubkey: ActiveValue::Set(account_key.clone()), + mint: ActiveValue::Set(mint.clone()), + delegate: ActiveValue::Set(delegate.clone()), + owner: ActiveValue::Set(owner.clone()), + frozen: ActiveValue::Set(frozen), + delegated_amount: ActiveValue::Set(ta.delegated_amount as i64), + token_program: ActiveValue::Set(account_owner.clone()), + slot_updated: ActiveValue::Set(slot), + amount: ActiveValue::Set(ta.amount as i64), + close_authority: ActiveValue::Set(None), + extensions: ActiveValue::Set(extensions.clone()), + }; + + let txn = db.begin().await?; + + let set_lock_timeout = "SET LOCAL lock_timeout = '100ms';"; + let set_local_app_name = + "SET LOCAL application_name = 'das::program_transformers::token_extensions::token_account';"; + let set_lock_timeout_stmt = + Statement::from_string(txn.get_database_backend(), set_lock_timeout.to_string()); + let set_local_app_name_stmt = + Statement::from_string(txn.get_database_backend(), set_local_app_name.to_string()); + txn.execute(set_lock_timeout_stmt).await?; + txn.execute(set_local_app_name_stmt).await?; + + token_accounts::Entity::insert(model) + .on_conflict( + OnConflict::columns([token_accounts::Column::Pubkey]) + .update_columns([ + token_accounts::Column::Mint, + token_accounts::Column::DelegatedAmount, + token_accounts::Column::Delegate, + token_accounts::Column::Amount, + token_accounts::Column::Frozen, + token_accounts::Column::TokenProgram, + token_accounts::Column::Owner, + token_accounts::Column::SlotUpdated, + token_accounts::Column::Extensions, + ]) + .action_cond_where( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Mint, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Mint, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::DelegatedAmount, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::DelegatedAmount, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Delegate, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Delegate, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Amount, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Amount, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Frozen, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Frozen, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::TokenProgram, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::TokenProgram, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Owner, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Owner, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + token_accounts::Column::Extensions, + ) + .ne( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::Extensions, + ), + ), + ), + ) + .add( + Expr::tbl( + token_accounts::Entity, + token_accounts::Column::SlotUpdated, + ) + .lte(account_info.slot as i64), + ), + ) + .to_owned(), + ) + .exec_without_returning(&txn) + .await?; + + let token = tokens::Entity::find_by_id(mint.clone()).one(db).await?; + + let is_non_fungible = token.map(|t| t.is_non_fungible()).unwrap_or(false); + + if is_non_fungible { + upsert_assets_token_account_columns( + AssetTokenAccountColumns { + mint: mint.clone(), + owner: Some(owner.clone()), + frozen, + delegate, + slot_updated_token_account: Some(slot), + }, + &txn, + ) + .await?; + } + txn.commit().await?; + Ok(()) + } + TokenExtensionsProgramAccount::MintAccount(m) => { + let MintAccount { + account, + extensions, + } = m; + + let mint_extensions: Option = if extensions.is_some() { + filter_non_null_fields( + serde_json::to_value(extensions.clone()) + .map_err(|e| ProgramTransformerError::SerializatonError(e.to_string()))?, + ) + } else { + None + }; + + let m = account; + let freeze_auth: Option> = match m.freeze_authority { + COption::Some(d) => Some(d.to_bytes().to_vec()), + COption::None => None, + }; + let mint_auth: Option> = match m.mint_authority { + COption::Some(d) => Some(d.to_bytes().to_vec()), + COption::None => None, + }; + let model = tokens::ActiveModel { + mint: ActiveValue::Set(account_key.clone()), + token_program: ActiveValue::Set(account_owner), + slot_updated: ActiveValue::Set(slot), + supply: ActiveValue::Set(m.supply.into()), + decimals: ActiveValue::Set(m.decimals as i32), + close_authority: ActiveValue::Set(None), + extension_data: ActiveValue::Set(None), + mint_authority: ActiveValue::Set(mint_auth), + freeze_authority: ActiveValue::Set(freeze_auth), + extensions: ActiveValue::Set(mint_extensions.clone()), + }; + + let txn = db.begin().await?; + + let set_lock_timeout = "SET LOCAL lock_timeout = '100ms';"; + let set_local_app_name = + "SET LOCAL application_name = 'das::program_transformers::token_extensions::mint';"; + let set_lock_timeout_stmt = + Statement::from_string(txn.get_database_backend(), set_lock_timeout.to_string()); + let set_local_app_name_stmt = + Statement::from_string(txn.get_database_backend(), set_local_app_name.to_string()); + txn.execute(set_lock_timeout_stmt).await?; + txn.execute(set_local_app_name_stmt).await?; + + tokens::Entity::insert(model) + .on_conflict( + OnConflict::columns([tokens::Column::Mint]) + .update_columns([ + tokens::Column::Supply, + tokens::Column::TokenProgram, + tokens::Column::MintAuthority, + tokens::Column::SlotUpdated, + tokens::Column::Decimals, + tokens::Column::FreezeAuthority, + tokens::Column::Extensions, + ]) + .action_cond_where( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::Supply, + ) + .ne(Expr::tbl(tokens::Entity, tokens::Column::Supply)), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::TokenProgram, + ) + .ne( + Expr::tbl( + tokens::Entity, + tokens::Column::TokenProgram, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::MintAuthority, + ) + .ne( + Expr::tbl( + tokens::Entity, + tokens::Column::MintAuthority, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::Decimals, + ) + .ne( + Expr::tbl(tokens::Entity, tokens::Column::Decimals), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::FreezeAuthority, + ) + .ne( + Expr::tbl( + tokens::Entity, + tokens::Column::FreezeAuthority, + ), + ), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + tokens::Column::Extensions, + ) + .ne( + Expr::tbl( + tokens::Entity, + tokens::Column::Extensions, + ), + ), + ), + ) + .add( + Expr::tbl(tokens::Entity, tokens::Column::SlotUpdated) + .lte(account_info.slot as i64), + ), + ) + .to_owned(), + ) + .exec_without_returning(&txn) + .await?; + + upsert_assets_mint_account_columns( + AssetMintAccountColumns { + mint: account_key.clone(), + supply: m.supply.into(), + slot_updated_mint_account: slot, + extensions: mint_extensions.clone(), + }, + &txn, + ) + .await?; + + txn.commit().await?; + + if let Some(metadata) = &extensions.metadata { + upsert_asset_data(metadata, account_key.clone(), slot, db).await?; + } + + Ok(()) + } + _ => Err(ProgramTransformerError::NotImplemented), + } +} + +async fn upsert_asset_data( + metadata: &ShadowMetadata, + key_bytes: Vec, + slot: i64, + db: &DatabaseConnection, +) -> ProgramTransformerResult<()> { + let metadata_json = serde_json::to_value(metadata.clone()) + .map_err(|e| ProgramTransformerError::SerializatonError(e.to_string()))?; + let asset_data_model = asset_data::ActiveModel { + metadata_url: ActiveValue::Set(metadata.uri.clone()), + metadata: ActiveValue::Set(Value::String("processing".to_string())), + id: ActiveValue::Set(key_bytes.clone()), + chain_data_mutability: ActiveValue::Set(ChainMutability::Mutable), + chain_data: ActiveValue::Set(metadata_json), + slot_updated: ActiveValue::Set(slot), + base_info_seq: ActiveValue::Set(Some(0)), + raw_name: ActiveValue::Set(Some(metadata.name.clone().into_bytes().to_vec())), + raw_symbol: ActiveValue::Set(Some(metadata.symbol.clone().into_bytes().to_vec())), + ..Default::default() + }; + let mut asset_data_query = asset_data::Entity::insert(asset_data_model) + .on_conflict( + OnConflict::columns([asset_data::Column::Id]) + .update_columns([ + asset_data::Column::ChainDataMutability, + asset_data::Column::ChainData, + asset_data::Column::MetadataUrl, + asset_data::Column::SlotUpdated, + asset_data::Column::BaseInfoSeq, + asset_data::Column::RawName, + asset_data::Column::RawSymbol, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + asset_data_query.sql = format!( + "{} WHERE excluded.slot_updated >= asset_data.slot_updated", + asset_data_query.sql + ); + db.execute(asset_data_query).await?; + + let txn = db.begin().await?; + upsert_assets_metadata_cols( + AssetMetadataAccountCols { + mint: key_bytes.clone(), + slot_updated_metadata_account: slot, + }, + &txn, + ) + .await?; + + txn.commit().await?; + + Ok(()) +} + +struct AssetMetadataAccountCols { + mint: Vec, + slot_updated_metadata_account: i64, +} + +async fn upsert_assets_metadata_cols( + metadata: AssetMetadataAccountCols, + db: &DatabaseTransaction, +) -> Result<(), DbErr> { + let asset = asset::ActiveModel { + id: ActiveValue::Set(metadata.mint.clone()), + slot_updated_metadata_account: Set(Some(metadata.slot_updated_metadata_account)), + ..Default::default() + }; + + let mut asset_query = asset::Entity::insert(asset) + .on_conflict( + OnConflict::columns([asset::Column::Id]) + .update_columns([asset::Column::SlotUpdatedMetadataAccount]) + .to_owned(), + ) + .build(DbBackend::Postgres); + + asset_query.sql = format!( + "{} WHERE excluded.slot_updated_metadata_account >= asset.slot_updated_metadata_account OR asset.slot_updated_metadata_account IS NULL", + asset_query.sql + ); + + db.execute(asset_query).await?; + + Ok(()) +} diff --git a/program_transformers/src/token_inscription/mod.rs b/program_transformers/src/token_inscription/mod.rs new file mode 100644 index 000000000..957275fc8 --- /dev/null +++ b/program_transformers/src/token_inscription/mod.rs @@ -0,0 +1,59 @@ +use std::str::FromStr; + +use crate::AccountInfo; +use blockbuster::programs::token_inscriptions::TokenInscriptionAccount; +use digital_asset_types::dao::asset_v1_account_attachments; +use digital_asset_types::dao::sea_orm_active_enums::V1AccountAttachments; +use sea_orm::sea_query::OnConflict; +use sea_orm::{ + ActiveValue, ConnectionTrait, DatabaseConnection, DbBackend, EntityTrait, QueryTrait, +}; +use solana_sdk::pubkey::Pubkey; + +use crate::error::{ProgramTransformerError, ProgramTransformerResult}; + +pub async fn handle_token_inscription_program_update<'a, 'b>( + account_info: &AccountInfo, + parsing_result: &'a TokenInscriptionAccount, + db: &'b DatabaseConnection, +) -> ProgramTransformerResult<()> { + let account_key = account_info.pubkey.to_bytes().to_vec(); + + let TokenInscriptionAccount { data } = parsing_result; + + let ser = serde_json::to_value(data) + .map_err(|e| ProgramTransformerError::SerializatonError(e.to_string()))?; + + let asset_id = Pubkey::from_str(&data.root) + .map_err(|e| ProgramTransformerError::ParsingError(e.to_string()))? + .to_bytes() + .to_vec(); + + let model = asset_v1_account_attachments::ActiveModel { + id: ActiveValue::Set(account_key), + asset_id: ActiveValue::Set(Some(asset_id)), + data: ActiveValue::Set(Some(ser)), + slot_updated: ActiveValue::Set(account_info.slot as i64), + initialized: ActiveValue::Set(true), + attachment_type: ActiveValue::Set(V1AccountAttachments::TokenInscription), + }; + + let mut query = asset_v1_account_attachments::Entity::insert(model) + .on_conflict( + OnConflict::columns([asset_v1_account_attachments::Column::Id]) + .update_columns([ + asset_v1_account_attachments::Column::Data, + asset_v1_account_attachments::Column::SlotUpdated, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + + query.sql = format!( + "{} WHERE excluded.slot_updated > asset_v1_account_attachments.slot_updated", + query.sql + ); + db.execute(query).await?; + + Ok(()) +} diff --git a/program_transformers/src/token_metadata/master_edition.rs b/program_transformers/src/token_metadata/master_edition.rs index 791368af6..cdb823e6c 100644 --- a/program_transformers/src/token_metadata/master_edition.rs +++ b/program_transformers/src/token_metadata/master_edition.rs @@ -1,19 +1,16 @@ use { crate::error::{ProgramTransformerError, ProgramTransformerResult}, blockbuster::token_metadata::{ - accounts::{DeprecatedMasterEditionV1, MasterEdition}, + accounts::{DeprecatedMasterEditionV1, Edition, MasterEdition}, types::Key, }, digital_asset_types::dao::{ - asset, asset_v1_account_attachments, extensions, - sea_orm_active_enums::{SpecificationAssetClass, V1AccountAttachments}, + asset_v1_account_attachments, sea_orm_active_enums::V1AccountAttachments, }, sea_orm::{ - entity::{ActiveModelTrait, ActiveValue, EntityTrait, RelationTrait}, - prelude::*, - query::{JoinType, QuerySelect, QueryTrait}, - sea_query::query::OnConflict, - ConnectionTrait, DatabaseTransaction, DbBackend, + entity::{ActiveValue, EntityTrait}, + sea_query::{query::OnConflict, Alias, Condition, Expr}, + DatabaseTransaction, }, solana_sdk::pubkey::Pubkey, }; @@ -65,15 +62,6 @@ pub async fn save_master_edition( txn: &DatabaseTransaction, ) -> ProgramTransformerResult<()> { let id_bytes = id.to_bytes().to_vec(); - let master_edition: Option<(asset_v1_account_attachments::Model, Option)> = - asset_v1_account_attachments::Entity::find_by_id(id.to_bytes().to_vec()) - .find_also_related(asset::Entity) - .join( - JoinType::InnerJoin, - extensions::asset::Relation::AssetData.def(), - ) - .one(txn) - .await?; let ser = serde_json::to_value(me_data) .map_err(|e| ProgramTransformerError::SerializatonError(e.to_string()))?; @@ -85,14 +73,74 @@ pub async fn save_master_edition( ..Default::default() }; - if let Some((_me, Some(asset))) = master_edition { - let mut updatable: asset::ActiveModel = asset.into(); - updatable.supply = ActiveValue::Set(Decimal::from(1)); - updatable.specification_asset_class = ActiveValue::Set(Some(SpecificationAssetClass::Nft)); - updatable.update(txn).await?; - } + asset_v1_account_attachments::Entity::insert(model) + .on_conflict( + OnConflict::columns([asset_v1_account_attachments::Column::Id]) + .update_columns([ + asset_v1_account_attachments::Column::AttachmentType, + asset_v1_account_attachments::Column::Data, + asset_v1_account_attachments::Column::SlotUpdated, + ]) + .action_cond_where( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + asset_v1_account_attachments::Column::AttachmentType, + ) + .ne(Expr::tbl( + asset_v1_account_attachments::Entity, + asset_v1_account_attachments::Column::AttachmentType, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_v1_account_attachments::Column::Data, + ) + .ne(Expr::tbl( + asset_v1_account_attachments::Entity, + asset_v1_account_attachments::Column::Data, + )), + ), + ) + .add( + Expr::tbl( + asset_v1_account_attachments::Entity, + asset_v1_account_attachments::Column::SlotUpdated, + ) + .lte(slot as i64), + ), + ) + .to_owned(), + ) + .exec_without_returning(txn) + .await?; + + Ok(()) +} + +pub async fn save_edition( + id: Pubkey, + slot: u64, + e_data: &Edition, + txn: &DatabaseTransaction, +) -> ProgramTransformerResult<()> { + let id_bytes = id.to_bytes().to_vec(); + + let ser = serde_json::to_value(e_data) + .map_err(|e| ProgramTransformerError::SerializatonError(e.to_string()))?; - let query = asset_v1_account_attachments::Entity::insert(model) + let model = asset_v1_account_attachments::ActiveModel { + id: ActiveValue::Set(id_bytes), + attachment_type: ActiveValue::Set(V1AccountAttachments::Edition), + data: ActiveValue::Set(Some(ser)), + slot_updated: ActiveValue::Set(slot as i64), + ..Default::default() + }; + asset_v1_account_attachments::Entity::insert(model) .on_conflict( OnConflict::columns([asset_v1_account_attachments::Column::Id]) .update_columns([ @@ -100,9 +148,42 @@ pub async fn save_master_edition( asset_v1_account_attachments::Column::Data, asset_v1_account_attachments::Column::SlotUpdated, ]) + .action_cond_where( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + asset_v1_account_attachments::Column::AttachmentType, + ) + .ne(Expr::tbl( + asset_v1_account_attachments::Entity, + asset_v1_account_attachments::Column::AttachmentType, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_v1_account_attachments::Column::Data, + ) + .ne(Expr::tbl( + asset_v1_account_attachments::Entity, + asset_v1_account_attachments::Column::Data, + )), + ), + ) + .add( + Expr::tbl( + asset_v1_account_attachments::Entity, + asset_v1_account_attachments::Column::SlotUpdated, + ) + .lte(slot as i64), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - txn.execute(query).await?; + .exec_without_returning(txn) + .await?; Ok(()) } diff --git a/program_transformers/src/token_metadata/mod.rs b/program_transformers/src/token_metadata/mod.rs index cbeb94171..bf0710560 100644 --- a/program_transformers/src/token_metadata/mod.rs +++ b/program_transformers/src/token_metadata/mod.rs @@ -7,7 +7,11 @@ use { }, AccountInfo, DownloadMetadataNotifier, }, - blockbuster::programs::token_metadata::{TokenMetadataAccountData, TokenMetadataAccountState}, + blockbuster::{ + programs::token_metadata::{TokenMetadataAccountData, TokenMetadataAccountState}, + token_metadata::types::TokenStandard, + }, + master_edition::save_edition, sea_orm::{DatabaseConnection, TransactionTrait}, }; @@ -45,9 +49,32 @@ pub async fn handle_token_metadata_account<'a, 'b>( txn.commit().await?; Ok(()) } + TokenMetadataAccountData::EditionV1(e) => { + let txn = db.begin().await?; + save_edition(account_info.pubkey, account_info.slot, e, &txn).await?; + txn.commit().await?; + Ok(()) + } + // TokenMetadataAccountData::EditionMarker(_) => {} // TokenMetadataAccountData::UseAuthorityRecord(_) => {} // TokenMetadataAccountData::CollectionAuthorityRecord(_) => {} _ => Err(ProgramTransformerError::NotImplemented), } } + +pub trait IsNonFungibeFromTokenStandard { + fn is_non_fungible(&self) -> bool; +} + +impl IsNonFungibeFromTokenStandard for TokenStandard { + fn is_non_fungible(&self) -> bool { + matches!( + self, + TokenStandard::NonFungible + | TokenStandard::NonFungibleEdition + | TokenStandard::ProgrammableNonFungible + | TokenStandard::ProgrammableNonFungibleEdition + ) + } +} diff --git a/program_transformers/src/token_metadata/v1_asset.rs b/program_transformers/src/token_metadata/v1_asset.rs index 7c45b68b7..b2c82bc88 100644 --- a/program_transformers/src/token_metadata/v1_asset.rs +++ b/program_transformers/src/token_metadata/v1_asset.rs @@ -1,4 +1,5 @@ use { + super::IsNonFungibeFromTokenStandard, crate::{ asset_upserts::{ upsert_assets_metadata_account_columns, upsert_assets_mint_account_columns, @@ -20,24 +21,27 @@ use { ChainMutability, Mutability, OwnerType, SpecificationAssetClass, SpecificationVersions, V1AccountAttachments, }, - token_accounts, tokens, + token_accounts, + tokens::{self}, }, json::ChainDataV1, }, sea_orm::{ - entity::{ActiveValue, ColumnTrait, EntityTrait}, - query::{JsonValue, Order, QueryFilter, QueryOrder, QueryTrait}, - sea_query::query::OnConflict, - ConnectionTrait, DbBackend, DbErr, TransactionTrait, + entity::{ActiveValue, EntityTrait}, + query::{JsonValue, QueryTrait}, + sea_query::{query::OnConflict, Alias, Expr}, + ColumnTrait, Condition, ConnectionTrait, DbBackend, DbErr, Order, QueryFilter, QueryOrder, + Statement, TransactionTrait, }, solana_sdk::pubkey, + solana_sdk::pubkey::Pubkey, sqlx::types::Decimal, tracing::warn, }; pub async fn burn_v1_asset( conn: &T, - id: pubkey::Pubkey, + id: Pubkey, slot: u64, ) -> ProgramTransformerResult<()> { let slot_i = slot as i64; @@ -47,23 +51,29 @@ pub async fn burn_v1_asset( burnt: ActiveValue::Set(true), ..Default::default() }; - let mut query = asset::Entity::insert(model) + + asset::Entity::insert(model) .on_conflict( OnConflict::columns([asset::Column::Id]) .update_columns([asset::Column::SlotUpdated, asset::Column::Burnt]) + .action_cond_where( + Condition::all() + .add( + Expr::tbl(Alias::new("excluded"), asset::Column::Burnt) + .ne(Expr::tbl(asset::Entity, asset::Column::Burnt)), + ) + .add(Expr::tbl(asset::Entity, asset::Column::SlotUpdated).lte(slot_i)), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > asset.slot_updated", - query.sql - ); - conn.execute(query).await?; + .exec_without_returning(conn) + .await?; + Ok(()) } -const RETRY_INTERVALS: &[u64] = &[0, 5, 10]; static WSOL_PUBKEY: pubkey::Pubkey = pubkey!("So11111111111111111111111111111111111111112"); +const RETRY_INTERVALS: &[u64] = &[0, 5, 10]; pub async fn index_and_fetch_mint_data( conn: &T, @@ -84,9 +94,9 @@ pub async fn index_and_fetch_mint_data( upsert_assets_mint_account_columns( AssetMintAccountColumns { mint: mint_pubkey_vec.clone(), - supply_mint: Some(token.mint.clone()), supply: token.supply, - slot_updated_mint_account: token.slot_updated as u64, + slot_updated_mint_account: token.slot_updated, + extensions: token.extensions.clone(), }, conn, ) @@ -109,7 +119,7 @@ async fn index_token_account_data( ) -> ProgramTransformerResult<()> { let token_account: Option = find_model_with_retry( conn, - "owners", + "token_accounts", &token_accounts::Entity::find() .filter(token_accounts::Column::Mint.eq(mint_pubkey_vec.clone())) .filter(token_accounts::Column::Amount.gt(0)) @@ -135,7 +145,7 @@ async fn index_token_account_data( } else { warn!( target: "Account not found", - "Token acc not found in 'owners' table for mint {}", + "Token acc not found in 'token-accounts' table for mint {}", bs58::encode(&mint_pubkey_vec).into_string() ); } @@ -173,16 +183,13 @@ pub async fn save_v1_asset( let mut ownership_type = match class { SpecificationAssetClass::FungibleAsset => OwnerType::Token, SpecificationAssetClass::FungibleToken => OwnerType::Token, - SpecificationAssetClass::Nft | SpecificationAssetClass::ProgrammableNft => { - OwnerType::Single - } - _ => OwnerType::Unknown, + SpecificationAssetClass::Unknown => OwnerType::Unknown, + _ => OwnerType::Single, }; // Wrapped Solana is a special token that has supply 0 (infinite). // It's a fungible token with a metadata account, but without any token standard, meaning the code above will misabel it as an NFT. if mint_pubkey == WSOL_PUBKEY { - ownership_type = OwnerType::Token; class = SpecificationAssetClass::FungibleToken; } @@ -200,6 +207,14 @@ pub async fn save_v1_asset( }; }; + //Map specification asset class based on the supply. + if class == SpecificationAssetClass::Unknown { + class = match supply { + s if s > Decimal::from(1) => SpecificationAssetClass::FungibleToken, + _ => SpecificationAssetClass::Unknown, + }; + }; + if (ownership_type == OwnerType::Single) | (ownership_type == OwnerType::Unknown) { index_token_account_data(conn, mint_pubkey_vec.clone()).await?; } @@ -235,7 +250,18 @@ pub async fn save_v1_asset( base_info_seq: ActiveValue::Set(Some(0)), }; let txn = conn.begin().await?; - let mut query = asset_data::Entity::insert(asset_data_model) + + let set_lock_timeout = "SET LOCAL lock_timeout = '1s';"; + let set_local_app_name = + "SET LOCAL application_name = 'das::program_transformers::token_metadata::v1_asset';"; + let set_lock_timeout_stmt = + Statement::from_string(txn.get_database_backend(), set_lock_timeout.to_string()); + let set_local_app_name_stmt = + Statement::from_string(txn.get_database_backend(), set_local_app_name.to_string()); + txn.execute(set_lock_timeout_stmt).await?; + txn.execute(set_local_app_name_stmt).await?; + + asset_data::Entity::insert(asset_data_model) .on_conflict( OnConflict::columns([asset_data::Column::Id]) .update_columns([ @@ -249,22 +275,101 @@ pub async fn save_v1_asset( asset_data::Column::RawSymbol, asset_data::Column::BaseInfoSeq, ]) + .action_cond_where( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::ChainDataMutability, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::ChainDataMutability, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::ChainData, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::ChainData, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::MetadataUrl, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::MetadataUrl, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::MetadataMutability, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::MetadataMutability, + )), + ) + .add( + Expr::tbl(Alias::new("excluded"), asset_data::Column::Reindex) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::Reindex, + )), + ) + .add( + Expr::tbl(Alias::new("excluded"), asset_data::Column::RawName) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::RawName, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::RawSymbol, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::RawSymbol, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_data::Column::BaseInfoSeq, + ) + .ne(Expr::tbl( + asset_data::Entity, + asset_data::Column::BaseInfoSeq, + )), + ), + ) + .add( + Expr::tbl(asset_data::Entity, asset_data::Column::SlotUpdated) + .lte(slot_i), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > asset_data.slot_updated", - query.sql - ); - txn.execute(query) + .exec_without_returning(&txn) .await .map_err(|db_err| ProgramTransformerError::AssetIndexError(db_err.to_string()))?; upsert_assets_metadata_account_columns( AssetMetadataAccountColumns { mint: mint_pubkey_vec.clone(), - owner_type: ownership_type, specification_asset_class: Some(class), + owner_type: ownership_type, royalty_amount: metadata.seller_fee_basis_points as i32, asset_data: Some(mint_pubkey_vec.clone()), slot_updated_metadata_account: slot_i as u64, @@ -286,14 +391,14 @@ pub async fn save_v1_asset( attachment_type: ActiveValue::Set(V1AccountAttachments::MasterEditionV2), ..Default::default() }; - let query = asset_v1_account_attachments::Entity::insert(attachment) + + asset_v1_account_attachments::Entity::insert(attachment) .on_conflict( OnConflict::columns([asset_v1_account_attachments::Column::Id]) .do_nothing() .to_owned(), ) - .build(DbBackend::Postgres); - txn.execute(query) + .exec_without_returning(&txn) .await .map_err(|db_err| ProgramTransformerError::AssetIndexError(db_err.to_string()))?; @@ -304,22 +409,34 @@ pub async fn save_v1_asset( slot_updated: ActiveValue::Set(slot_i), ..Default::default() }; - let mut query = asset_authority::Entity::insert(model) + + asset_authority::Entity::insert(model) .on_conflict( - OnConflict::columns([asset_authority::Column::AssetId]) + OnConflict::column(asset_authority::Column::AssetId) .update_columns([ asset_authority::Column::Authority, - asset_authority::Column::Seq, asset_authority::Column::SlotUpdated, ]) + .action_cond_where( + Condition::all() + .add( + Expr::tbl(Alias::new("excluded"), asset_authority::Column::Authority) + .ne(Expr::tbl( + asset_authority::Entity, + asset_authority::Column::Authority, + )), + ) + .add( + Expr::tbl( + asset_authority::Entity, + asset_authority::Column::SlotUpdated, + ) + .lte(slot_i), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > asset_authority.slot_updated", - query.sql - ); - txn.execute(query) + .exec_without_returning(&txn) .await .map_err(|db_err| ProgramTransformerError::AssetIndexError(db_err.to_string()))?; @@ -333,7 +450,8 @@ pub async fn save_v1_asset( slot_updated: ActiveValue::Set(Some(slot_i)), ..Default::default() }; - let mut query = asset_grouping::Entity::insert(model) + + asset_grouping::Entity::insert(model) .on_conflict( OnConflict::columns([ asset_grouping::Column::AssetId, @@ -343,16 +461,40 @@ pub async fn save_v1_asset( asset_grouping::Column::GroupValue, asset_grouping::Column::Verified, asset_grouping::Column::SlotUpdated, - asset_grouping::Column::GroupInfoSeq, ]) + .action_cond_where( + Condition::all() + .add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + asset_grouping::Column::GroupValue, + ) + .ne(Expr::tbl( + asset_grouping::Entity, + asset_grouping::Column::GroupValue, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_grouping::Column::Verified, + ) + .ne(Expr::tbl( + asset_grouping::Entity, + asset_grouping::Column::Verified, + )), + ), + ) + .add( + Expr::tbl(asset_grouping::Entity, asset_grouping::Column::SlotUpdated) + .lte(slot_i), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > asset_grouping.slot_updated", - query.sql - ); - txn.execute(query) + .exec_without_returning(&txn) .await .map_err(|db_err| ProgramTransformerError::AssetIndexError(db_err.to_string()))?; } @@ -375,7 +517,7 @@ pub async fn save_v1_asset( .collect::>(); if !creators.is_empty() { - let mut query = asset_creators::Entity::insert_many(creators) + asset_creators::Entity::insert_many(creators) .on_conflict( OnConflict::columns([ asset_creators::Column::AssetId, @@ -384,23 +526,88 @@ pub async fn save_v1_asset( .update_columns([ asset_creators::Column::Creator, asset_creators::Column::Share, - asset_creators::Column::Verified, asset_creators::Column::Seq, + asset_creators::Column::Verified, asset_creators::Column::SlotUpdated, ]) + .action_cond_where( + Condition::any() + .add( + Condition::all().add( + Condition::any() + .add( + Expr::tbl( + Alias::new("excluded"), + asset_creators::Column::Creator, + ) + .ne(Expr::tbl( + asset_creators::Entity, + asset_creators::Column::Creator, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_creators::Column::Share, + ) + .ne(Expr::tbl( + asset_creators::Entity, + asset_creators::Column::Share, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_creators::Column::Verified, + ) + .ne(Expr::tbl( + asset_creators::Entity, + asset_creators::Column::Verified, + )), + ) + .add( + Expr::tbl( + Alias::new("excluded"), + asset_creators::Column::Seq, + ) + .ne(Expr::tbl( + asset_creators::Entity, + asset_creators::Column::Seq, + )), + ), + ), + ) + .add( + Condition::any() + .add( + Expr::tbl( + asset_creators::Entity, + asset_creators::Column::SlotUpdated, + ) + .is_null(), + ) + .add( + Expr::tbl( + asset_creators::Entity, + asset_creators::Column::SlotUpdated, + ) + .lte(slot_i), + ), + ), + ) .to_owned(), ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated >= asset_creators.slot_updated OR asset_creators.slot_updated is NULL", - query.sql - ); - txn.execute(query) + .exec_without_returning(&txn) .await .map_err(|db_err| ProgramTransformerError::AssetIndexError(db_err.to_string()))?; } txn.commit().await?; + // If the asset is a non-fungible token, then we need to insert to the asset_v1_account_attachments table + if let Some(true) = metadata.token_standard.map(|t| t.is_non_fungible()) { + upsert_asset_v1_account_attachments(conn, &mint_pubkey, slot).await?; + } + if uri.is_empty() { warn!( "URI is empty for mint {}. Skipping background task.", @@ -409,5 +616,37 @@ pub async fn save_v1_asset( return Ok(None); } - Ok(Some(DownloadMetadataInfo::new(mint_pubkey_vec, uri))) + Ok(Some(DownloadMetadataInfo::new( + mint_pubkey_vec, + uri, + slot_i, + ))) +} + +async fn upsert_asset_v1_account_attachments( + conn: &T, + mint_pubkey: &Pubkey, + slot: u64, +) -> ProgramTransformerResult<()> { + let edition_pubkey = MasterEdition::find_pda(mint_pubkey).0; + let mint_pubkey_vec = mint_pubkey.to_bytes().to_vec(); + let attachment = asset_v1_account_attachments::ActiveModel { + id: ActiveValue::Set(edition_pubkey.to_bytes().to_vec()), + asset_id: ActiveValue::Set(Some(mint_pubkey_vec.clone())), + slot_updated: ActiveValue::Set(slot as i64), + // by default, the attachment type is MasterEditionV1 + attachment_type: ActiveValue::Set(V1AccountAttachments::MasterEditionV1), + ..Default::default() + }; + let query = asset_v1_account_attachments::Entity::insert(attachment) + .on_conflict( + OnConflict::columns([asset_v1_account_attachments::Column::Id]) + .update_columns([asset_v1_account_attachments::Column::AssetId]) + .to_owned(), + ) + .build(DbBackend::Postgres); + + conn.execute(query).await?; + + Ok(()) } diff --git a/prometheus-config.yaml b/prometheus-config.yaml new file mode 100644 index 000000000..3f975ab85 --- /dev/null +++ b/prometheus-config.yaml @@ -0,0 +1,14 @@ +global: + scrape_interval: 1s + evaluation_interval: 5s + +scrape_configs: + - job_name: "prometheus" + honor_labels: true + static_configs: + - targets: + [ + "host.docker.internal:8873", + "host.docker.internal:8875", + "host.docker.internal:8876", + ] diff --git a/tools/acc_forwarder/src/main.rs b/tools/acc_forwarder/src/main.rs index 6e83e6e21..20a8b97b1 100644 --- a/tools/acc_forwarder/src/main.rs +++ b/tools/acc_forwarder/src/main.rs @@ -1,3 +1,4 @@ +#![allow(deprecated)] use { anyhow::Context, clap::Parser, diff --git a/tools/bgtask_creator/src/main.rs b/tools/bgtask_creator/src/main.rs index 8a3240fe0..0abf32a7c 100644 --- a/tools/bgtask_creator/src/main.rs +++ b/tools/bgtask_creator/src/main.rs @@ -237,7 +237,6 @@ WHERE let mut asset_data_missing = asset_data_processing .0 - .order_by(asset_data::Column::Id, Order::Asc) .paginate(&conn, *batch_size) .into_stream(); @@ -310,11 +309,7 @@ WHERE let condition = asset_data::Column::Reindex.eq(true); let asset_data = find_by_type(authority, collection, creator, mint, condition); - let mut asset_data_missing = asset_data - .0 - .order_by(asset_data::Column::Id, Order::Asc) - .paginate(&conn, *batch_size) - .into_stream(); + let mut asset_data_missing = asset_data.0.paginate(&conn, *batch_size).into_stream(); // Find all the assets with missing metadata let mut tasks = Vec::new(); @@ -360,7 +355,6 @@ WHERE let res = TaskManager::new_task_handler( database_pool.clone(), name.clone(), - name, task_data, Arc::clone(&task_map), false,