From 09a599ddd77324763cad31b22e5147f9d85c5e3d Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Sun, 15 Oct 2023 14:27:01 -0700 Subject: [PATCH 01/46] Add code to index Bubblegum Update Metadata --- .../bubblegum/collection_verification.rs | 6 +- .../src/program_transformers/bubblegum/db.rs | 108 ++++++- .../program_transformers/bubblegum/mint_v1.rs | 80 +++-- .../src/program_transformers/bubblegum/mod.rs | 11 +- .../bubblegum/update_metadata.rs | 273 ++++++++++++++++++ 5 files changed, 427 insertions(+), 51 deletions(-) create mode 100644 nft_ingester/src/program_transformers/bubblegum/update_metadata.rs diff --git a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs index 7517f1544..e6825774a 100644 --- a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs @@ -26,7 +26,7 @@ where let (collection, verify) = match payload { Payload::CollectionVerification { collection, verify, .. - } => (collection.clone(), verify.clone()), + } => (collection, verify), _ => { return Err(IngesterError::ParsingError( "Ix not parsed correctly".to_string(), @@ -64,8 +64,8 @@ where txn, id_bytes.to_vec(), Some(Collection { - key: collection.clone(), - verified: verify, + key: *collection, + verified: *verify, }), bundle.slot as i64, seq as i64, diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 7e930abdc..a708db5f5 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -1,6 +1,7 @@ use crate::error::IngesterError; use digital_asset_types::dao::{ - asset, asset_creators, asset_grouping, backfill_items, cl_audits, cl_items, + asset, asset_creators, asset_data, asset_grouping, backfill_items, cl_audits, cl_items, + sea_orm_active_enums::{ChainMutability, Mutability}, }; use log::{debug, info}; use mpl_bubblegum::types::Collection; @@ -9,8 +10,6 @@ use sea_orm::{ }; use spl_account_compression::events::ChangeLogEventV1; -use std::convert::From; - pub async fn save_changelog_event<'c, T>( change_log_event: &ChangeLogEventV1, slot: u64, @@ -135,6 +134,7 @@ where //TODO -> set maximum size of path and break into multiple statements } +#[allow(clippy::too_many_arguments)] pub async fn upsert_asset_with_leaf_info( txn: &T, id: Vec, @@ -441,3 +441,105 @@ where Ok(()) } + +pub async fn upsert_asset_data( + txn: &T, + id: Vec, + chain_data_mutability: ChainMutability, + chain_data: JsonValue, + metadata_url: String, + metadata_mutability: Mutability, + metadata: JsonValue, + slot_updated: i64, + reindex: Option, + raw_name: Vec, + raw_symbol: Vec, + seq: i64, +) -> Result<(), IngesterError> +where + T: ConnectionTrait + TransactionTrait, +{ + let model = asset_data::ActiveModel { + id: Set(id), + chain_data_mutability: Set(chain_data_mutability), + chain_data: Set(chain_data), + metadata_url: Set(metadata_url), + metadata_mutability: Set(metadata_mutability), + metadata: Set(metadata), + slot_updated: Set(slot_updated), + reindex: Set(reindex), + raw_name: Set(raw_name), + raw_symbol: Set(raw_symbol), + //seq: Set(seq), + }; + + let mut query = asset_data::Entity::insert(model) + .on_conflict( + OnConflict::columns([asset_data::Column::Id]) + .update_columns([ + asset_data::Column::ChainDataMutability, + asset_data::Column::ChainData, + asset_data::Column::MetadataUrl, + asset_data::Column::MetadataMutability, + //TODO DEAL WITH THIS + //asset_data::Column::Metadata, + asset_data::Column::SlotUpdated, + asset_data::Column::Reindex, + asset_data::Column::RawName, + asset_data::Column::RawSymbol, + //asset_data::Column::Seq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + query.sql = format!( + // TODO DEAL WITH THIS + "{} WHERE (excluded.slot_updated > asset_data.slot_updated) AND (excluded.seq >= asset_data.seq OR asset_data.seq IS NULL)", + query.sql +); + txn.execute(query) + .await + .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; + + Ok(()) +} + +pub async fn upsert_asset_with_royalty_amount( + txn: &T, + id: Vec, + royalty_amount: i32, + seq: i64, +) -> Result<(), IngesterError> +where + T: ConnectionTrait + TransactionTrait, +{ + let model = asset::ActiveModel { + id: Set(id), + royalty_amount: Set(royalty_amount), + //royalty_amount_seq: Set(Some(seq)), + ..Default::default() + }; + + let mut query = asset::Entity::insert(model) + .on_conflict( + OnConflict::column(asset::Column::Id) + .update_columns([ + asset::Column::RoyaltyAmount, + //asset::Column::RoyaltyAmountSeq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + + query.sql = format!( + // TODO DEAL WITH THIS + "{} WHERE (NOT asset.was_decompressed) AND (excluded.royalty_amount_seq >= asset.royalty_amount_seq OR royalty_amount_seq.seq IS NULL)", + query.sql + ); + + txn.execute(query) + .await + .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; + + Ok(()) +} diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 5920a19d5..b0fdaecec 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -1,8 +1,9 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_with_compression_info, upsert_asset_with_leaf_info, - upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, upsert_collection_info, + save_changelog_event, upsert_asset_data, upsert_asset_with_compression_info, + upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, + upsert_asset_with_royalty_amount, upsert_asset_with_seq, upsert_collection_info, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -17,7 +18,7 @@ use blockbuster::{ use chrono::Utc; use digital_asset_types::{ dao::{ - asset, asset_authority, asset_creators, asset_data, asset_v1_account_attachments, + asset, asset_authority, asset_creators, asset_v1_account_attachments, sea_orm_active_enums::{ChainMutability, Mutability, OwnerType, RoyaltyTargetType}, }, json::ChainDataV1, @@ -62,7 +63,14 @@ where let (edition_attachment_address, _) = find_master_edition_account(&id); let id_bytes = id.to_bytes(); let slot_i = bundle.slot as i64; + let uri = metadata.uri.replace('\0', ""); + if uri.is_empty() { + return Err(IngesterError::DeserializationError( + "URI is empty".to_string(), + )); + } + let name = metadata.name.clone().into_bytes(); let symbol = metadata.symbol.clone().into_bytes(); let mut chain_data = ChainDataV1 { @@ -84,46 +92,23 @@ where true => ChainMutability::Mutable, false => ChainMutability::Immutable, }; - if uri.is_empty() { - return Err(IngesterError::DeserializationError( - "URI is empty".to_string(), - )); - } - let data = asset_data::ActiveModel { - id: Set(id_bytes.to_vec()), - chain_data_mutability: Set(chain_mutability), - chain_data: Set(chain_data_json), - metadata_url: Set(uri), - metadata: Set(JsonValue::String("processing".to_string())), - metadata_mutability: Set(Mutability::Mutable), - slot_updated: Set(slot_i), - reindex: Set(Some(true)), - raw_name: Set(name.to_vec()), - raw_symbol: Set(symbol.to_vec()), - ..Default::default() - }; - let mut query = asset_data::Entity::insert(data) - .on_conflict( - OnConflict::columns([asset_data::Column::Id]) - .update_columns([ - asset_data::Column::ChainDataMutability, - asset_data::Column::ChainData, - asset_data::Column::MetadataUrl, - asset_data::Column::MetadataMutability, - asset_data::Column::SlotUpdated, - asset_data::Column::Reindex, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > asset_data.slot_updated", - query.sql - ); - txn.execute(query) - .await - .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; + upsert_asset_data( + txn, + id_bytes.to_vec(), + chain_mutability, + chain_data_json, + uri, + Mutability::Mutable, + JsonValue::String("processing".to_string()), + slot_i, + Some(true), + name.to_vec(), + symbol.to_vec(), + seq as i64, + ) + .await?; + // Insert into `asset` table. let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None @@ -143,7 +128,6 @@ where nonce: Set(Some(nonce as i64)), royalty_target_type: Set(RoyaltyTargetType::Creators), royalty_target: Set(None), - royalty_amount: Set(metadata.seller_fee_basis_points as i32), //basis points asset_data: Set(Some(id_bytes.to_vec())), slot_updated: Set(Some(slot_i)), ..Default::default() @@ -160,7 +144,6 @@ where asset::Column::SpecificationAssetClass, asset::Column::RoyaltyTargetType, asset::Column::RoyaltyTarget, - asset::Column::RoyaltyAmount, asset::Column::AssetData, ]) .to_owned(), @@ -176,6 +159,14 @@ where .await .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; + upsert_asset_with_royalty_amount( + txn, + id_bytes.to_vec(), + metadata.seller_fee_basis_points as i32, + seq as i64, + ) + .await?; + // Partial update of asset table with just compression info elements. upsert_asset_with_compression_info( txn, @@ -248,6 +239,7 @@ where if creators_set.contains(&c.address) { continue; } + db_creator_infos.push(asset_creators::ActiveModel { asset_id: Set(id_bytes.to_vec()), creator: Set(c.address.to_bytes().to_vec()), diff --git a/nft_ingester/src/program_transformers/bubblegum/mod.rs b/nft_ingester/src/program_transformers/bubblegum/mod.rs index af186808d..3ac69c9d8 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mod.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mod.rs @@ -17,6 +17,7 @@ mod delegate; mod mint_v1; mod redeem; mod transfer; +mod update_metadata; pub use db::*; @@ -53,7 +54,8 @@ where InstructionName::VerifyCollection => "VerifyCollection", InstructionName::UnverifyCollection => "UnverifyCollection", InstructionName::SetAndVerifyCollection => "SetAndVerifyCollection", - InstructionName::SetDecompressibleState | InstructionName::UpdateMetadata => todo!(), + InstructionName::SetDecompressibleState => "SetDecompressibleState", + InstructionName::UpdateMetadata => "UpdateMetadata", }; info!("BGUM instruction txn={:?}: {:?}", ix_str, bundle.txn_id); @@ -92,6 +94,13 @@ where | InstructionName::SetAndVerifyCollection => { collection_verification::process(parsing_result, bundle, txn, cl_audits).await?; } + InstructionName::SetDecompressibleState => (), // Nothing to index. + InstructionName::UpdateMetadata => { + let task = + update_metadata::update_metadata(parsing_result, bundle, txn, cl_audits).await?; + + task_manager.send(task)?; + } _ => debug!("Bubblegum: Not Implemented Instruction"), } Ok(()) diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs new file mode 100644 index 000000000..2a0d79ed8 --- /dev/null +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -0,0 +1,273 @@ +use crate::{ + error::IngesterError, + program_transformers::bubblegum::{ + save_changelog_event, upsert_asset_data, upsert_asset_with_leaf_info, + upsert_asset_with_royalty_amount, upsert_asset_with_seq, + }, + tasks::{DownloadMetadata, IntoTaskData, TaskData}, +}; +use blockbuster::{ + instruction::InstructionBundle, + programs::bubblegum::{BubblegumInstruction, LeafSchema, Payload}, + token_metadata::state::{TokenStandard, UseMethod, Uses}, +}; +use chrono::Utc; +use digital_asset_types::{ + dao::{ + asset_creators, + sea_orm_active_enums::{ChainMutability, Mutability}, + }, + json::ChainDataV1, +}; +use num_traits::FromPrimitive; +use sea_orm::{ + entity::*, query::*, sea_query::OnConflict, ConnectionTrait, DbBackend, EntityTrait, JsonValue, +}; +use std::collections::HashSet; + +pub async fn update_metadata<'c, T>( + parsing_result: &BubblegumInstruction, + bundle: &InstructionBundle<'c>, + txn: &'c T, + cl_audits: bool, +) -> Result +where + T: ConnectionTrait + TransactionTrait, +{ + if let ( + Some(le), + Some(cl), + Some(Payload::UpdateMetadata { + current_metadata, + update_args, + }), + ) = ( + &parsing_result.leaf_update, + &parsing_result.tree_update, + &parsing_result.payload, + ) { + let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, cl_audits).await?; + #[allow(unreachable_patterns)] + return match le.schema { + LeafSchema::V1 { id, nonce, .. } => { + let id_bytes = id.to_bytes(); + let slot_i = bundle.slot as i64; + + let uri = if let Some(uri) = &update_args.uri { + uri.replace('\0', "") + } else { + current_metadata.uri.replace('\0', "") + }; + if uri.is_empty() { + return Err(IngesterError::DeserializationError( + "URI is empty".to_string(), + )); + } + + let name = if let Some(name) = update_args.name.clone() { + name + } else { + current_metadata.name.clone() + }; + + let symbol = if let Some(symbol) = update_args.symbol.clone() { + symbol + } else { + current_metadata.symbol.clone() + }; + + let primary_sale_happened = + if let Some(primary_sale_happened) = update_args.primary_sale_happened { + primary_sale_happened + } else { + current_metadata.primary_sale_happened + }; + + let mut chain_data = ChainDataV1 { + name: name.clone(), + symbol: symbol.clone(), + edition_nonce: current_metadata.edition_nonce, + primary_sale_happened, + token_standard: Some(TokenStandard::NonFungible), + uses: current_metadata.uses.clone().map(|u| Uses { + use_method: UseMethod::from_u8(u.use_method as u8).unwrap(), + remaining: u.remaining, + total: u.total, + }), + }; + chain_data.sanitize(); + let chain_data_json = serde_json::to_value(chain_data) + .map_err(|e| IngesterError::DeserializationError(e.to_string()))?; + + let is_mutable = if let Some(is_mutable) = update_args.is_mutable { + is_mutable + } else { + current_metadata.is_mutable + }; + + let chain_mutability = if is_mutable { + ChainMutability::Mutable + } else { + ChainMutability::Immutable + }; + + upsert_asset_data( + txn, + id_bytes.to_vec(), + chain_mutability, + chain_data_json, + uri.clone(), + Mutability::Mutable, + JsonValue::String("processing".to_string()), + slot_i, + Some(true), + name.into_bytes().to_vec(), + symbol.into_bytes().to_vec(), + seq as i64, + ) + .await?; + + // Partial update of asset table with just seller fee basis points. + let seller_fee_basis_points = + if let Some(seller_fee_basis_points) = update_args.seller_fee_basis_points { + seller_fee_basis_points + } else { + current_metadata.seller_fee_basis_points + }; + + upsert_asset_with_royalty_amount( + txn, + id_bytes.to_vec(), + seller_fee_basis_points as i32, + seq as i64, + ) + .await?; + + // Partial update of asset table with just leaf. + let tree_id = bundle.keys.get(5).unwrap().0.to_vec(); + upsert_asset_with_leaf_info( + txn, + id_bytes.to_vec(), + nonce as i64, + tree_id, + le.leaf_hash.to_vec(), + le.schema.data_hash(), + le.schema.creator_hash(), + seq as i64, + false, + ) + .await?; + + upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; + + // Update `asset_creators` table. + if let Some(creators) = &update_args.creators { + // Vec to hold base creator information. + let mut db_creator_infos = Vec::with_capacity(creators.len()); + + // Vec to hold info on whether a creator is verified. This info is protected by `seq` number. + let mut db_creator_verified_infos = Vec::with_capacity(creators.len()); + + // Set to prevent duplicates. + let mut creators_set = HashSet::new(); + + for (i, c) in creators.iter().enumerate() { + if creators_set.contains(&c.address) { + continue; + } + + db_creator_infos.push(asset_creators::ActiveModel { + asset_id: Set(id_bytes.to_vec()), + creator: Set(c.address.to_bytes().to_vec()), + position: Set(i as i16), + share: Set(c.share as i32), + slot_updated: Set(Some(slot_i)), + ..Default::default() + }); + + db_creator_verified_infos.push(asset_creators::ActiveModel { + asset_id: Set(id_bytes.to_vec()), + creator: Set(c.address.to_bytes().to_vec()), + verified: Set(c.verified), + seq: Set(Some(seq as i64)), + ..Default::default() + }); + + creators_set.insert(c.address); + } + + // Remove creators no longer present in creator array. + let db_creators_to_remove: Vec> = current_metadata + .creators + .iter() + .filter(|c| !creators_set.contains(&c.address)) + .map(|c| c.address.to_bytes().to_vec()) + .collect(); + + asset_creators::Entity::delete_many() + .filter( + Condition::all() + .add(asset_creators::Column::AssetId.eq(id_bytes.to_vec())) + .add(asset_creators::Column::Creator.is_in(db_creators_to_remove)) + // TODO WHAT IF SEQ IS NULL + .add(asset_creators::Column::Seq.lt(seq as i64)), + ) + .exec(txn) + .await?; + + // This statement will update base information for each creator. + let query = asset_creators::Entity::insert_many(db_creator_infos) + .on_conflict( + OnConflict::columns([ + asset_creators::Column::AssetId, + asset_creators::Column::Creator, + ]) + .update_columns([ + asset_creators::Column::Position, + asset_creators::Column::Share, + asset_creators::Column::SlotUpdated, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + txn.execute(query).await?; + + // This statement will update whether the creator is verified and the `seq` + // number. `seq` is used to protect the `verified` field, allowing for `mint` + // and `verifyCreator` to be processed out of order. + let mut query = asset_creators::Entity::insert_many(db_creator_verified_infos) + .on_conflict( + OnConflict::columns([ + asset_creators::Column::AssetId, + asset_creators::Column::Creator, + ]) + .update_columns([ + asset_creators::Column::Verified, + asset_creators::Column::Seq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + query.sql = format!( + "{} WHERE excluded.seq > asset_creators.seq OR asset_creators.seq IS NULL", + query.sql + ); + txn.execute(query).await?; + } + + // TODO DEAL WITH TASKS + let mut task = DownloadMetadata { + asset_data_id: id_bytes.to_vec(), + uri, + created_at: Some(Utc::now().naive_utc()), + }; + task.sanitize(); + return task.into_task_data(); + } + _ => Err(IngesterError::NotImplemented), + }?; + } + Err(IngesterError::ParsingError( + "Ix not parsed correctly".to_string(), + )) +} From 5ef2eb9bc8d97aa8a33e8447d8871af407c844dc Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Sun, 15 Oct 2023 14:35:55 -0700 Subject: [PATCH 02/46] Update rust toolchain file --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 469626eac..8142c3012 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.70.0" \ No newline at end of file +channel = "1.73.0" From 2084aa7492855dfd256d54543a9c834b844711b6 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Tue, 17 Oct 2023 15:42:20 -0700 Subject: [PATCH 03/46] Fix moved variable after merge --- nft_ingester/src/program_transformers/bubblegum/mint_v1.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 687e23c44..ddb038515 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -92,7 +92,7 @@ where id_bytes.to_vec(), chain_mutability, chain_data_json, - uri, + uri.clone(), Mutability::Mutable, JsonValue::String("processing".to_string()), slot_i, From 269bf0d25d0f7ae74969bf2774360f62d130f03e Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Tue, 17 Oct 2023 16:20:12 -0700 Subject: [PATCH 04/46] Add code from mintV1 that allows for empty URI --- .../src/program_transformers/bubblegum/mod.rs | 4 +++- .../bubblegum/update_metadata.rs | 16 +++++++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/mod.rs b/nft_ingester/src/program_transformers/bubblegum/mod.rs index adb5efa51..5ed21dbf7 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mod.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mod.rs @@ -101,7 +101,9 @@ where let task = update_metadata::update_metadata(parsing_result, bundle, txn, cl_audits).await?; - task_manager.send(task)?; + if let Some(t) = task { + task_manager.send(t)?; + } } _ => debug!("Bubblegum: Not Implemented Instruction"), } diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 2a0d79ed8..adec5f54d 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -19,6 +19,7 @@ use digital_asset_types::{ }, json::ChainDataV1, }; +use log::warn; use num_traits::FromPrimitive; use sea_orm::{ entity::*, query::*, sea_query::OnConflict, ConnectionTrait, DbBackend, EntityTrait, JsonValue, @@ -30,7 +31,7 @@ pub async fn update_metadata<'c, T>( bundle: &InstructionBundle<'c>, txn: &'c T, cl_audits: bool, -) -> Result +) -> Result, IngesterError> where T: ConnectionTrait + TransactionTrait, { @@ -255,6 +256,14 @@ where txn.execute(query).await?; } + if uri.is_empty() { + warn!( + "URI is empty for mint {}. Skipping background task.", + bs58::encode(id).into_string() + ); + return Ok(None); + } + // TODO DEAL WITH TASKS let mut task = DownloadMetadata { asset_data_id: id_bytes.to_vec(), @@ -262,10 +271,11 @@ where created_at: Some(Utc::now().naive_utc()), }; task.sanitize(); - return task.into_task_data(); + let t = task.into_task_data()?; + Ok(Some(t)) } _ => Err(IngesterError::NotImplemented), - }?; + }; } Err(IngesterError::ParsingError( "Ix not parsed correctly".to_string(), From bdf8e3cf63c58891ed4d335b9c00e225feb50bd5 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:12:03 -0700 Subject: [PATCH 05/46] Ordering using asset.seq initially applied to update_metadata --- .../src/program_transformers/bubblegum/db.rs | 24 +++++++++++++------ .../bubblegum/decompress.rs | 1 + .../bubblegum/update_metadata.rs | 2 +- .../token_metadata/v1_asset.rs | 1 + 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index a708db5f5..889d90a0c 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -442,6 +442,7 @@ where Ok(()) } +#[allow(clippy::too_many_arguments)] pub async fn upsert_asset_data( txn: &T, id: Vec, @@ -454,13 +455,13 @@ pub async fn upsert_asset_data( reindex: Option, raw_name: Vec, raw_symbol: Vec, - seq: i64, + _seq: i64, ) -> Result<(), IngesterError> where T: ConnectionTrait + TransactionTrait, { let model = asset_data::ActiveModel { - id: Set(id), + id: Set(id.clone()), chain_data_mutability: Set(chain_data_mutability), chain_data: Set(chain_data), metadata_url: Set(metadata_url), @@ -473,6 +474,13 @@ where //seq: Set(seq), }; + // First check to see if this asset has been decompressed. + if let Some(asset) = asset::Entity::find_by_id(id).one(txn).await? { + if let Some(0) = asset.seq { + return Ok(()); + } + }; + let mut query = asset_data::Entity::insert(model) .on_conflict( OnConflict::columns([asset_data::Column::Id]) @@ -481,7 +489,9 @@ where asset_data::Column::ChainData, asset_data::Column::MetadataUrl, asset_data::Column::MetadataMutability, - //TODO DEAL WITH THIS + // Don't update Metadata if it already exists. Even if we are doing + // and update_metadata and there's a new URI, the new background task + // will overwrite it. //asset_data::Column::Metadata, asset_data::Column::SlotUpdated, asset_data::Column::Reindex, @@ -493,10 +503,10 @@ where ) .build(DbBackend::Postgres); query.sql = format!( - // TODO DEAL WITH THIS - "{} WHERE (excluded.slot_updated > asset_data.slot_updated) AND (excluded.seq >= asset_data.seq OR asset_data.seq IS NULL)", - query.sql -); + // New asset_data.seq. + "{} WHERE excluded.seq >= asset_data.seq OR asset_data.seq IS NULL)", + query.sql + ); txn.execute(query) .await .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; diff --git a/nft_ingester/src/program_transformers/bubblegum/decompress.rs b/nft_ingester/src/program_transformers/bubblegum/decompress.rs index a024d5ebe..79e124ce6 100644 --- a/nft_ingester/src/program_transformers/bubblegum/decompress.rs +++ b/nft_ingester/src/program_transformers/bubblegum/decompress.rs @@ -19,6 +19,7 @@ where // Partial update of asset table with just leaf. upsert_asset_with_leaf_info_for_decompression(txn, id_bytes.to_vec()).await?; + upsert_asset_with_compression_info( txn, id_bytes.to_vec(), diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index adec5f54d..774acdd7e 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -128,7 +128,7 @@ where ) .await?; - // Partial update of asset table with just seller fee basis points. + // Partial update of asset table with just royalty amount (seller fee basis points). let seller_fee_basis_points = if let Some(seller_fee_basis_points) = update_args.seller_fee_basis_points { seller_fee_basis_points diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 879581341..215399322 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -161,6 +161,7 @@ pub async fn save_v1_asset( asset_data::Column::MetadataMutability, asset_data::Column::SlotUpdated, asset_data::Column::Reindex, + //TODO RAW NAME ]) .to_owned(), ) From 0240ce5ffc30855f3b8ab6e7fe8a2795fb3574f1 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Wed, 18 Oct 2023 21:45:36 -0700 Subject: [PATCH 06/46] Add simple check for whether asset was decompressed to Bubblegum transformers --- .../program_transformers/bubblegum/burn.rs | 3 + .../bubblegum/cancel_redeem.rs | 9 ++- .../bubblegum/collection_verification.rs | 11 +++- .../bubblegum/creator_verification.rs | 9 ++- .../src/program_transformers/bubblegum/db.rs | 60 +++++++++---------- .../bubblegum/decompress.rs | 12 +++- .../bubblegum/delegate.rs | 9 ++- .../program_transformers/bubblegum/mint_v1.rs | 21 +++---- .../program_transformers/bubblegum/redeem.rs | 10 +++- .../bubblegum/transfer.rs | 11 +++- .../bubblegum/update_metadata.rs | 34 +++++++---- .../token_metadata/v1_asset.rs | 9 ++- 12 files changed, 129 insertions(+), 69 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/burn.rs b/nft_ingester/src/program_transformers/bubblegum/burn.rs index 70ddcfcea..653d1bd45 100644 --- a/nft_ingester/src/program_transformers/bubblegum/burn.rs +++ b/nft_ingester/src/program_transformers/bubblegum/burn.rs @@ -23,6 +23,9 @@ where T: ConnectionTrait + TransactionTrait, { if let Some(cl) = &parsing_result.tree_update { + // Note: We do not check whether the asset has been decompressed here because we know if it + // was burned then it could not have been decompressed later. + let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, cl_audits).await?; let leaf_index = cl.index; let (asset_id, _) = Pubkey::find_program_address( diff --git a/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs b/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs index 1b8f5842a..ba6dd3073 100644 --- a/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs +++ b/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs @@ -1,7 +1,7 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_with_leaf_info, + asset_was_decompressed, save_changelog_event, upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, }, }; @@ -31,6 +31,12 @@ where .. } => { let id_bytes = id.to_bytes(); + + // First check to see if this asset has been decompressed and if so do not update. + if asset_was_decompressed(txn, id_bytes.to_vec()).await? { + return Ok(()); + } + let owner_bytes = owner.to_bytes().to_vec(); let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None @@ -50,7 +56,6 @@ where le.schema.data_hash(), le.schema.creator_hash(), seq as i64, - false, ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs index e6825774a..fd88d6c35 100644 --- a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs @@ -1,4 +1,6 @@ -use crate::program_transformers::bubblegum::{upsert_asset_with_seq, upsert_collection_info}; +use crate::program_transformers::bubblegum::{ + asset_was_decompressed, upsert_asset_with_seq, upsert_collection_info, +}; use blockbuster::{ instruction::InstructionBundle, programs::bubblegum::{BubblegumInstruction, LeafSchema, Payload}, @@ -41,6 +43,12 @@ where let id_bytes = match le.schema { LeafSchema::V1 { id, .. } => id.to_bytes().to_vec(), }; + + // First check to see if this asset has been decompressed and if so do not update. + if asset_was_decompressed(txn, id_bytes.to_vec()).await? { + return Ok(()); + } + let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; @@ -54,7 +62,6 @@ where le.schema.data_hash(), le.schema.creator_hash(), seq as i64, - false, ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs index 134fe89ca..85c6e9857 100644 --- a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs @@ -1,7 +1,7 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_with_leaf_info, + asset_was_decompressed, save_changelog_event, upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, upsert_creator_verified, }, }; @@ -51,6 +51,12 @@ where .. } => { let id_bytes = id.to_bytes(); + + // First check to see if this asset has been decompressed and if so do not update. + if asset_was_decompressed(txn, id_bytes.to_vec()).await? { + return Ok(()); + } + let owner_bytes = owner.to_bytes().to_vec(); let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None @@ -70,7 +76,6 @@ where le.schema.data_hash(), le.schema.creator_hash(), seq as i64, - false, ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 889d90a0c..6c15b52bd 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -67,7 +67,7 @@ where ..Default::default() }; - let mut audit_item: Option = if (cl_audits) { + let audit_item: Option = if cl_audits { let mut ai: cl_audits::ActiveModel = item.clone().into(); ai.tx = Set(txn_id.to_string()); Some(ai) @@ -144,7 +144,6 @@ pub async fn upsert_asset_with_leaf_info( data_hash: [u8; 32], creator_hash: [u8; 32], seq: i64, - was_decompressed: bool, ) -> Result<(), IngesterError> where T: ConnectionTrait + TransactionTrait, @@ -169,22 +168,19 @@ where asset::Column::Nonce, asset::Column::TreeId, asset::Column::Leaf, - asset::Column::LeafSeq, asset::Column::DataHash, asset::Column::CreatorHash, + asset::Column::LeafSeq, ]) .to_owned(), ) .build(DbBackend::Postgres); - // If we are indexing decompression we will update the leaf regardless of if we have previously - // indexed decompression and regardless of seq. - if !was_decompressed { - query.sql = format!( - "{} WHERE (NOT asset.was_decompressed) AND (excluded.leaf_seq >= asset.leaf_seq OR asset.leaf_seq IS NULL)", - query.sql - ); - } + // If the asset was decompressed, don't update the leaf info since we cleared it during decompression. + query.sql = format!( + "{} WHERE (NOT asset.was_decompressed) AND (excluded.leaf_seq >= asset.leaf_seq OR asset.leaf_seq IS NULL)", + query.sql + ); txn.execute(query) .await @@ -202,26 +198,25 @@ where { let model = asset::ActiveModel { id: Set(id), - leaf: Set(None), nonce: Set(Some(0)), - leaf_seq: Set(None), + tree_id: Set(None), + leaf: Set(None), data_hash: Set(None), creator_hash: Set(None), - tree_id: Set(None), - seq: Set(Some(0)), + leaf_seq: Set(None), ..Default::default() }; + let query = asset::Entity::insert(model) .on_conflict( OnConflict::column(asset::Column::Id) .update_columns([ - asset::Column::Leaf, - asset::Column::LeafSeq, asset::Column::Nonce, + asset::Column::TreeId, + asset::Column::Leaf, asset::Column::DataHash, asset::Column::CreatorHash, - asset::Column::TreeId, - asset::Column::Seq, + asset::Column::LeafSeq, ]) .to_owned(), ) @@ -474,13 +469,6 @@ where //seq: Set(seq), }; - // First check to see if this asset has been decompressed. - if let Some(asset) = asset::Entity::find_by_id(id).one(txn).await? { - if let Some(0) = asset.seq { - return Ok(()); - } - }; - let mut query = asset_data::Entity::insert(model) .on_conflict( OnConflict::columns([asset_data::Column::Id]) @@ -503,7 +491,6 @@ where ) .build(DbBackend::Postgres); query.sql = format!( - // New asset_data.seq. "{} WHERE excluded.seq >= asset_data.seq OR asset_data.seq IS NULL)", query.sql ); @@ -518,13 +505,13 @@ pub async fn upsert_asset_with_royalty_amount( txn: &T, id: Vec, royalty_amount: i32, - seq: i64, + _seq: i64, ) -> Result<(), IngesterError> where T: ConnectionTrait + TransactionTrait, { let model = asset::ActiveModel { - id: Set(id), + id: Set(id.clone()), royalty_amount: Set(royalty_amount), //royalty_amount_seq: Set(Some(seq)), ..Default::default() @@ -542,8 +529,7 @@ where .build(DbBackend::Postgres); query.sql = format!( - // TODO DEAL WITH THIS - "{} WHERE (NOT asset.was_decompressed) AND (excluded.royalty_amount_seq >= asset.royalty_amount_seq OR royalty_amount_seq.seq IS NULL)", + "{} WHERE excluded.royalty_amount_seq >= asset.royalty_amount_seq OR royalty_amount_seq.seq IS NULL)", query.sql ); @@ -553,3 +539,15 @@ where Ok(()) } + +pub async fn asset_was_decompressed(txn: &T, id: Vec) -> Result +where + T: ConnectionTrait + TransactionTrait, +{ + if let Some(asset) = asset::Entity::find_by_id(id).one(txn).await? { + if let Some(0) = asset.seq { + return Ok(true); + } + }; + Ok(false) +} diff --git a/nft_ingester/src/program_transformers/bubblegum/decompress.rs b/nft_ingester/src/program_transformers/bubblegum/decompress.rs index 79e124ce6..6e9e0341a 100644 --- a/nft_ingester/src/program_transformers/bubblegum/decompress.rs +++ b/nft_ingester/src/program_transformers/bubblegum/decompress.rs @@ -1,12 +1,13 @@ use crate::{ error::IngesterError, - program_transformers::bubblegum::upsert_asset_with_leaf_info_for_decompression, + program_transformers::bubblegum::{ + asset_was_decompressed, upsert_asset_with_compression_info, + upsert_asset_with_leaf_info_for_decompression, + }, }; use blockbuster::{instruction::InstructionBundle, programs::bubblegum::BubblegumInstruction}; use sea_orm::{query::*, ConnectionTrait}; -use super::upsert_asset_with_compression_info; - pub async fn decompress<'c, T>( _parsing_result: &BubblegumInstruction, bundle: &InstructionBundle<'c>, @@ -17,6 +18,11 @@ where { let id_bytes = bundle.keys.get(3).unwrap().0.as_slice(); + // First check to see if this asset has been decompressed and if so do not update. + if asset_was_decompressed(txn, id_bytes.to_vec()).await? { + return Ok(()); + } + // Partial update of asset table with just leaf. upsert_asset_with_leaf_info_for_decompression(txn, id_bytes.to_vec()).await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/delegate.rs b/nft_ingester/src/program_transformers/bubblegum/delegate.rs index 88896de64..5f646b269 100644 --- a/nft_ingester/src/program_transformers/bubblegum/delegate.rs +++ b/nft_ingester/src/program_transformers/bubblegum/delegate.rs @@ -1,7 +1,7 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_with_leaf_info, + asset_was_decompressed, save_changelog_event, upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, }, }; @@ -30,6 +30,12 @@ where .. } => { let id_bytes = id.to_bytes(); + + // First check to see if this asset has been decompressed and if so do not update. + if asset_was_decompressed(txn, id_bytes.to_vec()).await? { + return Ok(()); + } + let owner_bytes = owner.to_bytes().to_vec(); let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None @@ -48,7 +54,6 @@ where le.schema.data_hash(), le.schema.creator_hash(), seq as i64, - false, ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index ddb038515..dbcd13447 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -1,9 +1,10 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_data, upsert_asset_with_compression_info, - upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, - upsert_asset_with_royalty_amount, upsert_asset_with_seq, upsert_collection_info, + asset_was_decompressed, save_changelog_event, upsert_asset_data, + upsert_asset_with_compression_info, upsert_asset_with_leaf_info, + upsert_asset_with_owner_and_delegate_info, upsert_asset_with_royalty_amount, + upsert_asset_with_seq, upsert_collection_info, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -63,6 +64,12 @@ where } => { let (edition_attachment_address, _) = find_master_edition_account(&id); let id_bytes = id.to_bytes(); + + // First check to see if this asset has been decompressed and if so do not update. + if asset_was_decompressed(txn, id_bytes.to_vec()).await? { + return Ok(None); + } + let slot_i = bundle.slot as i64; let uri = metadata.uri.replace('\0', ""); let name = metadata.name.clone().into_bytes(); @@ -128,7 +135,7 @@ where }; // Upsert asset table base info. - let mut query = asset::Entity::insert(asset_model) + let query = asset::Entity::insert(asset_model) .on_conflict( OnConflict::columns([asset::Column::Id]) .update_columns([ @@ -144,11 +151,6 @@ where ) .build(DbBackend::Postgres); - // Do not overwrite changes that happened after the asset was decompressed. - query.sql = format!( - "{} WHERE excluded.slot_updated > asset.slot_updated OR asset.slot_updated IS NULL", - query.sql - ); txn.execute(query) .await .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; @@ -183,7 +185,6 @@ where le.schema.data_hash(), le.schema.creator_hash(), seq as i64, - false, ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/redeem.rs b/nft_ingester/src/program_transformers/bubblegum/redeem.rs index b9b7f2c27..3dc0bc999 100644 --- a/nft_ingester/src/program_transformers/bubblegum/redeem.rs +++ b/nft_ingester/src/program_transformers/bubblegum/redeem.rs @@ -4,7 +4,8 @@ use log::debug; use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, u32_to_u8_array, upsert_asset_with_leaf_info, upsert_asset_with_seq, + asset_was_decompressed, save_changelog_event, u32_to_u8_array, upsert_asset_with_leaf_info, + upsert_asset_with_seq, }, }; use blockbuster::{instruction::InstructionBundle, programs::bubblegum::BubblegumInstruction}; @@ -32,6 +33,12 @@ where ); debug!("Indexing redeem for asset id: {:?}", asset_id); let id_bytes = asset_id.to_bytes(); + + // First check to see if this asset has been decompressed and if so do not update. + if asset_was_decompressed(txn, id_bytes.to_vec()).await? { + return Ok(()); + } + let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; @@ -45,7 +52,6 @@ where [0; 32], [0; 32], seq as i64, - false, ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/transfer.rs b/nft_ingester/src/program_transformers/bubblegum/transfer.rs index 573f33a8f..07abbe523 100644 --- a/nft_ingester/src/program_transformers/bubblegum/transfer.rs +++ b/nft_ingester/src/program_transformers/bubblegum/transfer.rs @@ -2,8 +2,8 @@ use super::save_changelog_event; use crate::{ error::IngesterError, program_transformers::bubblegum::{ - upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, - upsert_asset_with_seq, + asset_was_decompressed, upsert_asset_with_leaf_info, + upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, }, }; use blockbuster::{ @@ -32,6 +32,12 @@ where .. } => { let id_bytes = id.to_bytes(); + + // First check to see if this asset has been decompressed and if so do not update. + if asset_was_decompressed(txn, id_bytes.to_vec()).await? { + return Ok(()); + } + let owner_bytes = owner.to_bytes().to_vec(); let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None @@ -51,7 +57,6 @@ where le.schema.data_hash(), le.schema.creator_hash(), seq as i64, - false, ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 774acdd7e..940c0932c 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -1,8 +1,8 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_data, upsert_asset_with_leaf_info, - upsert_asset_with_royalty_amount, upsert_asset_with_seq, + asset_was_decompressed, save_changelog_event, upsert_asset_data, + upsert_asset_with_leaf_info, upsert_asset_with_royalty_amount, upsert_asset_with_seq, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -48,10 +48,17 @@ where &parsing_result.payload, ) { let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, cl_audits).await?; + #[allow(unreachable_patterns)] return match le.schema { LeafSchema::V1 { id, nonce, .. } => { let id_bytes = id.to_bytes(); + + // First check to see if this asset has been decompressed and if so do not update. + if asset_was_decompressed(txn, id_bytes.to_vec()).await? { + return Ok(None); + } + let slot_i = bundle.slot as i64; let uri = if let Some(uri) = &update_args.uri { @@ -155,7 +162,6 @@ where le.schema.data_hash(), le.schema.creator_hash(), seq as i64, - false, ) .await?; @@ -216,8 +222,10 @@ where .exec(txn) .await?; - // This statement will update base information for each creator. - let query = asset_creators::Entity::insert_many(db_creator_infos) + // This statement will update base information for each creator and the + // `base_info_seq` number, allows for `mintV1` and `update_metadata` to be + // processed out of order. + let mut query = asset_creators::Entity::insert_many(db_creator_infos) .on_conflict( OnConflict::columns([ asset_creators::Column::AssetId, @@ -227,15 +235,21 @@ where asset_creators::Column::Position, asset_creators::Column::Share, asset_creators::Column::SlotUpdated, + //asset_creators::Column::BaseInfoSeq, ]) .to_owned(), ) .build(DbBackend::Postgres); + query.sql = format!( + "{} WHERE excluded.base_info_seq > asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", + query.sql + ); txn.execute(query).await?; - // This statement will update whether the creator is verified and the `seq` - // number. `seq` is used to protect the `verified` field, allowing for `mint` - // and `verifyCreator` to be processed out of order. + // This statement will update whether the creator is verified and the + // `verified_seq` number, which is used to protect the `verified` field, + // allowing for `mintV1`, `update_metadata`, and `verifyCreator` to be + // processed out of order. let mut query = asset_creators::Entity::insert_many(db_creator_verified_infos) .on_conflict( OnConflict::columns([ @@ -244,13 +258,13 @@ where ]) .update_columns([ asset_creators::Column::Verified, - asset_creators::Column::Seq, + //asset_creators::Column::VerifiedSeq, ]) .to_owned(), ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.seq > asset_creators.seq OR asset_creators.seq IS NULL", + "{} WHERE excluded.verified_seq > asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", query.sql ); txn.execute(query).await?; diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 215399322..c4975d575 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -23,7 +23,7 @@ use num_traits::FromPrimitive; use plerkle_serialization::Pubkey as FBPubkey; use sea_orm::{ entity::*, query::*, sea_query::OnConflict, ActiveValue::Set, ConnectionTrait, DbBackend, - DbErr, EntityTrait, FromQueryResult, JoinType, JsonValue, + DbErr, EntityTrait, JsonValue, }; use std::collections::HashSet; @@ -195,6 +195,9 @@ pub async fn save_v1_asset( asset_data: Set(Some(id.to_vec())), slot_updated: Set(Some(slot_i)), burnt: Set(false), + //data_hash, + //creator_hash, + //leaf_seq, ..Default::default() }; let mut query = asset::Entity::insert(model) @@ -273,6 +276,8 @@ pub async fn save_v1_asset( txn.execute(query) .await .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; + + // TODO remove old items that are no longer in collection. if let Some(c) = &metadata.collection { let model = asset_grouping::ActiveModel { asset_id: Set(id.to_vec()), @@ -345,7 +350,7 @@ pub async fn save_v1_asset( ) .exec(&txn) .await?; - if db_creators.len() > 0 { + if !db_creators.is_empty() { let mut query = asset_creators::Entity::insert_many(db_creators) .on_conflict( OnConflict::columns([ From c26c6935f903b87fd8a8b559860ebe18e96b8718 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Thu, 19 Oct 2023 09:35:28 -0700 Subject: [PATCH 07/46] Don't prevent sequence number update when already decompressed --- nft_ingester/src/program_transformers/bubblegum/burn.rs | 5 +---- nft_ingester/src/program_transformers/bubblegum/db.rs | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/burn.rs b/nft_ingester/src/program_transformers/bubblegum/burn.rs index 653d1bd45..adca1324f 100644 --- a/nft_ingester/src/program_transformers/bubblegum/burn.rs +++ b/nft_ingester/src/program_transformers/bubblegum/burn.rs @@ -49,10 +49,7 @@ where let query = asset::Entity::insert(asset_model) .on_conflict( OnConflict::columns([asset::Column::Id]) - .update_columns([ - asset::Column::Burnt, - //TODO maybe handle slot updated. - ]) + .update_columns([asset::Column::Burnt]) .to_owned(), ) .build(DbBackend::Postgres); diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 6c15b52bd..8c7729c4e 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -329,7 +329,7 @@ where .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE (NOT asset.was_decompressed) AND (excluded.seq >= asset.seq OR asset.seq IS NULL)", + "{} WHERE excluded.seq >= asset.seq OR asset.seq IS NULL", query.sql ); From dd891e89aa49732e0b226ca7b72a42963c816eb5 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Thu, 19 Oct 2023 11:17:06 -0700 Subject: [PATCH 08/46] Add sequence number to downloading metadata background task --- .../program_transformers/bubblegum/mint_v1.rs | 1 + .../bubblegum/update_metadata.rs | 2 +- .../token_metadata/v1_asset.rs | 1 + nft_ingester/src/tasks/common/mod.rs | 30 +++++++++++-------- tools/bgtask_creator/src/main.rs | 1 + 5 files changed, 22 insertions(+), 13 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index dbcd13447..4c9db5fc7 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -338,6 +338,7 @@ where let mut task = DownloadMetadata { asset_data_id: id_bytes.to_vec(), uri: metadata.uri.clone(), + seq: seq as i64, created_at: Some(Utc::now().naive_utc()), }; task.sanitize(); diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 940c0932c..d615900db 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -278,10 +278,10 @@ where return Ok(None); } - // TODO DEAL WITH TASKS let mut task = DownloadMetadata { asset_data_id: id_bytes.to_vec(), uri, + seq: seq as i64, created_at: Some(Utc::now().naive_utc()), }; task.sanitize(); diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index c4975d575..b07edbae1 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -389,6 +389,7 @@ pub async fn save_v1_asset( let mut task = DownloadMetadata { asset_data_id: id.to_vec(), uri, + seq: 0, created_at: Some(Utc::now().naive_utc()), }; task.sanitize(); diff --git a/nft_ingester/src/tasks/common/mod.rs b/nft_ingester/src/tasks/common/mod.rs index bf39e455d..2cf1cca18 100644 --- a/nft_ingester/src/tasks/common/mod.rs +++ b/nft_ingester/src/tasks/common/mod.rs @@ -18,6 +18,7 @@ const TASK_NAME: &str = "DownloadMetadata"; pub struct DownloadMetadata { pub asset_data_id: Vec, pub uri: String, + pub seq: i64, #[serde(skip_serializing)] pub created_at: Option, } @@ -110,24 +111,29 @@ impl BgTask for DownloadMetadataTask { id: Unchanged(download_metadata.asset_data_id.clone()), metadata: Set(body), reindex: Set(Some(false)), + //download_metadata_seq: Set(Some(download_metadata.seq)), ..Default::default() }; debug!( "download metadata for {:?}", bs58::encode(download_metadata.asset_data_id.clone()).into_string() ); - asset_data::Entity::update(model) - .filter(asset_data::Column::Id.eq(download_metadata.asset_data_id.clone())) - .exec(db) - .await - .map(|_| ()) - .map_err(|db| { - IngesterError::TaskManagerError(format!( - "Database error with {}, error: {}", - self.name(), - db - )) - })?; + let query = asset_data::Entity::update(model) + .filter(asset_data::Column::Id.eq(download_metadata.asset_data_id.clone())); + // if download_metadata.seq != 0 { + // query.filter( + // Condition::any() + // .add(asset_data::Column::DownloadMetadataSeq.lt(download_metadata.seq)) + // .add(asset_data::Column::DownloadMetadataSeq.is_null()), + // ); + // } + query.exec(db).await.map(|_| ()).map_err(|db| { + IngesterError::TaskManagerError(format!( + "Database error with {}, error: {}", + self.name(), + db + )) + })?; if meta_url.is_err() { return Err(IngesterError::UnrecoverableTaskError(format!( diff --git a/tools/bgtask_creator/src/main.rs b/tools/bgtask_creator/src/main.rs index a08dd87d1..17d1bba0c 100644 --- a/tools/bgtask_creator/src/main.rs +++ b/tools/bgtask_creator/src/main.rs @@ -322,6 +322,7 @@ WHERE let mut task = DownloadMetadata { asset_data_id: asset.id, uri: asset.metadata_url, + seq: 0, created_at: Some(Utc::now().naive_utc()), }; From 97fe275702a6bf0957195ba869d839baa35810fe Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Thu, 19 Oct 2023 17:13:12 -0700 Subject: [PATCH 09/46] Add sequence number migration (Sea ORM not regenerated yet) --- migration/src/lib.rs | 2 + ...01_add_seq_numbers_bgum_update_metadata.rs | 99 +++++++++++++++++++ .../src/program_transformers/bubblegum/db.rs | 8 +- .../program_transformers/bubblegum/mint_v1.rs | 14 ++- .../bubblegum/update_metadata.rs | 8 +- .../token_metadata/v1_asset.rs | 1 - 6 files changed, 120 insertions(+), 12 deletions(-) create mode 100644 migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs diff --git a/migration/src/lib.rs b/migration/src/lib.rs index 7e38ac93d..7b4be4523 100644 --- a/migration/src/lib.rs +++ b/migration/src/lib.rs @@ -30,6 +30,7 @@ mod m20230724_120101_add_group_info_seq; mod m20230726_013107_remove_not_null_constraint_from_group_value; mod m20230918_182123_add_raw_name_symbol; mod m20230919_072154_cl_audits; +mod m20231019_120101_add_seq_numbers_bgum_update_metadata; pub struct Migrator; @@ -67,6 +68,7 @@ impl MigratorTrait for Migrator { Box::new(m20230726_013107_remove_not_null_constraint_from_group_value::Migration), Box::new(m20230918_182123_add_raw_name_symbol::Migration), Box::new(m20230919_072154_cl_audits::Migration), + Box::new(m20231019_120101_add_seq_numbers_bgum_update_metadata::Migration), ] } } diff --git a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs new file mode 100644 index 000000000..15179639a --- /dev/null +++ b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs @@ -0,0 +1,99 @@ +use digital_asset_types::dao::{asset, asset_creators, asset_data}; +use sea_orm_migration::{ + prelude::*, + sea_orm::{ConnectionTrait, DatabaseBackend, Statement}, +}; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .get_connection() + .execute(Statement::from_string( + DatabaseBackend::Postgres, + " + ALTER TABLE asset_creators + RENAME COLUMN seq to verified_seq; + " + .to_string(), + )) + .await?; + + manager + .alter_table( + Table::alter() + .table(asset_creators::Entity) + .add_column(ColumnDef::new(Alias::new("base_info_seq")).big_integer()) + .to_owned(), + ) + .await?; + + manager + .alter_table( + Table::alter() + .table(asset_data::Entity) + .add_column(ColumnDef::new(Alias::new("base_info_seq")).big_integer()) + .add_column(ColumnDef::new(Alias::new("download_metadata_seq")).big_integer()) + .to_owned(), + ) + .await?; + + manager + .alter_table( + Table::alter() + .table(asset::Entity) + .add_column(ColumnDef::new(Alias::new("royalty_amount_seq")).big_integer()) + .to_owned(), + ) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .get_connection() + .execute(Statement::from_string( + DatabaseBackend::Postgres, + " + ALTER TABLE asset_creators + RENAME COLUMN verified_seq to seq; + " + .to_string(), + )) + .await?; + + manager + .alter_table( + Table::alter() + .table(asset_creators::Entity) + .drop_column(Alias::new("base_info_seq")) + .to_owned(), + ) + .await?; + + manager + .alter_table( + Table::alter() + .table(asset_data::Entity) + .drop_column(Alias::new("base_info_seq")) + .drop_column(Alias::new("download_metadata_seq")) + .to_owned(), + ) + .await?; + + manager + .alter_table( + Table::alter() + .table(asset::Entity) + .drop_column(Alias::new("royalty_amount_seq")) + .to_owned(), + ) + .await?; + + Ok(()) + } +} diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 8c7729c4e..4e1391cf7 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -373,7 +373,7 @@ where .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.seq >= asset_creators.seq OR asset_creators.seq is NULL", + "{} WHERE excluded.seq >= asset_creators.verified_seq OR asset_creators.verified_seq is NULL", query.sql ); @@ -466,7 +466,7 @@ where reindex: Set(reindex), raw_name: Set(raw_name), raw_symbol: Set(raw_symbol), - //seq: Set(seq), + //base_info_seq: Set(seq), }; let mut query = asset_data::Entity::insert(model) @@ -485,13 +485,13 @@ where asset_data::Column::Reindex, asset_data::Column::RawName, asset_data::Column::RawSymbol, - //asset_data::Column::Seq, + //asset_data::Column::BaseInfoSeq, ]) .to_owned(), ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.seq >= asset_data.seq OR asset_data.seq IS NULL)", + "{} WHERE excluded.base_info_seq >= asset_data.base_info_seq OR asset_data.base_info_seq IS NULL)", query.sql ); txn.execute(query) diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 4c9db5fc7..3b3603891 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -241,6 +241,7 @@ where position: Set(i as i16), share: Set(c.share as i32), slot_updated: Set(Some(slot_i)), + //base_info_seq: Set(Some(seq as i64)) ..Default::default() }); @@ -248,7 +249,7 @@ where asset_id: Set(id_bytes.to_vec()), creator: Set(c.address.to_bytes().to_vec()), verified: Set(c.verified), - seq: Set(Some(seq as i64)), + //verified_seq: Set(Some(seq as i64)), ..Default::default() }); @@ -256,7 +257,7 @@ where } // This statement will update base information for each creator. - let query = asset_creators::Entity::insert_many(db_creator_infos) + let mut query = asset_creators::Entity::insert_many(db_creator_infos) .on_conflict( OnConflict::columns([ asset_creators::Column::AssetId, @@ -266,10 +267,15 @@ where asset_creators::Column::Position, asset_creators::Column::Share, asset_creators::Column::SlotUpdated, + //asset_creators::Column::BaseInfoSeq, ]) .to_owned(), ) .build(DbBackend::Postgres); + query.sql = format!( + "{} WHERE excluded.base_info_seq > asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", + query.sql + ); txn.execute(query).await?; // This statement will update whether the creator is verified and the `seq` @@ -283,13 +289,13 @@ where ]) .update_columns([ asset_creators::Column::Verified, - asset_creators::Column::Seq, + //asset_creators::Column::VerifiedSeq, ]) .to_owned(), ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.seq > asset_creators.seq OR asset_creators.seq IS NULL", + "{} WHERE excluded.seq > asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", query.sql ); txn.execute(query).await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index d615900db..204a4154c 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -215,9 +215,11 @@ where .filter( Condition::all() .add(asset_creators::Column::AssetId.eq(id_bytes.to_vec())) - .add(asset_creators::Column::Creator.is_in(db_creators_to_remove)) - // TODO WHAT IF SEQ IS NULL - .add(asset_creators::Column::Seq.lt(seq as i64)), + .add(asset_creators::Column::Creator.is_in(db_creators_to_remove)), // .add( + // Condition::any() + // .add(asset_creators::Column::VerifiedSeq.lt(seq as i64)) + // .add(asset_creators::Column::VerifiedSeq.is_null()), + // ), ) .exec(txn) .await?; diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index b07edbae1..1bfedac84 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -277,7 +277,6 @@ pub async fn save_v1_asset( .await .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; - // TODO remove old items that are no longer in collection. if let Some(c) = &metadata.collection { let model = asset_grouping::ActiveModel { asset_id: Set(id.to_vec()), From 660a6ba4ff848a7c6cfd0a5351c5a7e5b33b69fb Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Fri, 20 Oct 2023 14:39:01 -0700 Subject: [PATCH 10/46] Regenerate Sea-ORM types --- .../src/dao/generated/asset.rs | 3 ++ .../src/dao/generated/asset_creators.rs | 9 +++-- .../src/dao/generated/asset_data.rs | 16 +++++--- .../src/dao/generated/asset_grouping.rs | 6 +-- .../src/dao/generated/cl_audits.rs | 2 +- .../src/dao/generated/sea_orm_active_enums.rs | 40 +++++++++---------- 6 files changed, 44 insertions(+), 32 deletions(-) diff --git a/digital_asset_types/src/dao/generated/asset.rs b/digital_asset_types/src/dao/generated/asset.rs index 0ced69299..dffccca17 100644 --- a/digital_asset_types/src/dao/generated/asset.rs +++ b/digital_asset_types/src/dao/generated/asset.rs @@ -46,6 +46,7 @@ pub struct Model { pub owner_delegate_seq: Option, pub was_decompressed: bool, pub leaf_seq: Option, + pub royalty_amount_seq: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -78,6 +79,7 @@ pub enum Column { OwnerDelegateSeq, WasDecompressed, LeafSeq, + RoyaltyAmountSeq, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] @@ -133,6 +135,7 @@ impl ColumnTrait for Column { Self::OwnerDelegateSeq => ColumnType::BigInteger.def().null(), Self::WasDecompressed => ColumnType::Boolean.def(), Self::LeafSeq => ColumnType::BigInteger.def().null(), + Self::RoyaltyAmountSeq => ColumnType::BigInteger.def().null(), } } } diff --git a/digital_asset_types/src/dao/generated/asset_creators.rs b/digital_asset_types/src/dao/generated/asset_creators.rs index 21f34dcf7..510b896a3 100644 --- a/digital_asset_types/src/dao/generated/asset_creators.rs +++ b/digital_asset_types/src/dao/generated/asset_creators.rs @@ -19,9 +19,10 @@ pub struct Model { pub creator: Vec, pub share: i32, pub verified: bool, - pub seq: Option, + pub verified_seq: Option, pub slot_updated: Option, pub position: i16, + pub base_info_seq: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -31,9 +32,10 @@ pub enum Column { Creator, Share, Verified, - Seq, + VerifiedSeq, SlotUpdated, Position, + BaseInfoSeq, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] @@ -62,9 +64,10 @@ impl ColumnTrait for Column { Self::Creator => ColumnType::Binary.def(), Self::Share => ColumnType::Integer.def(), Self::Verified => ColumnType::Boolean.def(), - Self::Seq => ColumnType::BigInteger.def().null(), + Self::VerifiedSeq => ColumnType::BigInteger.def().null(), Self::SlotUpdated => ColumnType::BigInteger.def().null(), Self::Position => ColumnType::SmallInteger.def(), + Self::BaseInfoSeq => ColumnType::BigInteger.def().null(), } } } diff --git a/digital_asset_types/src/dao/generated/asset_data.rs b/digital_asset_types/src/dao/generated/asset_data.rs index 374ed854a..17bbc2e43 100644 --- a/digital_asset_types/src/dao/generated/asset_data.rs +++ b/digital_asset_types/src/dao/generated/asset_data.rs @@ -24,8 +24,10 @@ pub struct Model { pub metadata: Json, pub slot_updated: i64, pub reindex: Option, - pub raw_name: Vec, - pub raw_symbol: Vec, + pub raw_name: Option>, + pub raw_symbol: Option>, + pub base_info_seq: Option, + pub download_metadata_seq: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -40,6 +42,8 @@ pub enum Column { Reindex, RawName, RawSymbol, + BaseInfoSeq, + DownloadMetadataSeq, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] @@ -70,9 +74,11 @@ impl ColumnTrait for Column { Self::MetadataMutability => Mutability::db_type(), Self::Metadata => ColumnType::JsonBinary.def(), Self::SlotUpdated => ColumnType::BigInteger.def(), - Self::Reindex => ColumnType::Boolean.def(), - Self::RawName => ColumnType::Binary.def(), - Self::RawSymbol => ColumnType::Binary.def(), + Self::Reindex => ColumnType::Boolean.def().null(), + Self::RawName => ColumnType::Binary.def().null(), + Self::RawSymbol => ColumnType::Binary.def().null(), + Self::BaseInfoSeq => ColumnType::BigInteger.def().null(), + Self::DownloadMetadataSeq => ColumnType::BigInteger.def().null(), } } } diff --git a/digital_asset_types/src/dao/generated/asset_grouping.rs b/digital_asset_types/src/dao/generated/asset_grouping.rs index aae51d6d8..5d5c0e749 100644 --- a/digital_asset_types/src/dao/generated/asset_grouping.rs +++ b/digital_asset_types/src/dao/generated/asset_grouping.rs @@ -1,4 +1,4 @@ -//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.3 +//! SeaORM Entity. Generated by sea-orm-codegen 0.9.3 use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; @@ -12,7 +12,7 @@ impl EntityName for Entity { } } -#[derive(Clone, Debug, PartialEq, DeriveModel, DeriveActiveModel, Eq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, DeriveModel, DeriveActiveModel, Serialize, Deserialize)] pub struct Model { pub id: i64, pub asset_id: Vec, @@ -20,7 +20,7 @@ pub struct Model { pub group_value: Option, pub seq: Option, pub slot_updated: Option, - pub verified: Option, + pub verified: bool, pub group_info_seq: Option, } diff --git a/digital_asset_types/src/dao/generated/cl_audits.rs b/digital_asset_types/src/dao/generated/cl_audits.rs index a07714202..0d02b7769 100644 --- a/digital_asset_types/src/dao/generated/cl_audits.rs +++ b/digital_asset_types/src/dao/generated/cl_audits.rs @@ -22,7 +22,7 @@ pub struct Model { pub seq: i64, pub level: i64, pub hash: Vec, - pub created_at: Option, + pub created_at: DateTime, pub tx: String, } diff --git a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs index 2be0283e7..628071b54 100644 --- a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs +++ b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs @@ -3,16 +3,6 @@ use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] -pub enum Mutability { - #[sea_orm(string_value = "immutable")] - Immutable, - #[sea_orm(string_value = "mutable")] - Mutable, - #[sea_orm(string_value = "unknown")] - Unknown, -} #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", @@ -60,6 +50,26 @@ pub enum RoyaltyTargetType { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] +pub enum Mutability { + #[sea_orm(string_value = "immutable")] + Immutable, + #[sea_orm(string_value = "mutable")] + Mutable, + #[sea_orm(string_value = "unknown")] + Unknown, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] +pub enum OwnerType { + #[sea_orm(string_value = "single")] + Single, + #[sea_orm(string_value = "token")] + Token, + #[sea_orm(string_value = "unknown")] + Unknown, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", @@ -113,13 +123,3 @@ pub enum SpecificationVersions { #[sea_orm(string_value = "v2")] V2, } -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] -pub enum OwnerType { - #[sea_orm(string_value = "single")] - Single, - #[sea_orm(string_value = "token")] - Token, - #[sea_orm(string_value = "unknown")] - Unknown, -} From 213dac51bfbd8c005799cbf1bbfe204bac7b5aa2 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Fri, 20 Oct 2023 15:21:11 -0700 Subject: [PATCH 11/46] Use new sequence numbers for Bubblegum Update Metadata --- digital_asset_types/tests/common.rs | 12 +++++--- digital_asset_types/tests/json_parsing.rs | 6 ++-- .../src/program_transformers/bubblegum/db.rs | 30 +++++++++---------- .../program_transformers/bubblegum/mint_v1.rs | 8 ++--- .../bubblegum/update_metadata.rs | 18 ++++++----- .../token_metadata/v1_asset.rs | 17 +++++------ nft_ingester/src/tasks/common/mod.rs | 18 +++++------ 7 files changed, 58 insertions(+), 51 deletions(-) diff --git a/digital_asset_types/tests/common.rs b/digital_asset_types/tests/common.rs index bbe3cf509..ef551f251 100644 --- a/digital_asset_types/tests/common.rs +++ b/digital_asset_types/tests/common.rs @@ -83,8 +83,10 @@ pub fn create_asset_data( metadata: JsonValue::String("processing".to_string()), slot_updated: 0, reindex: None, - raw_name: metadata.name.into_bytes().to_vec().clone(), - raw_symbol: metadata.symbol.into_bytes().to_vec().clone(), + raw_name: Some(metadata.name.into_bytes().to_vec().clone()), + raw_symbol: Some(metadata.symbol.into_bytes().to_vec().clone()), + base_info_seq: Some(0), + download_metadata_seq: Some(0), }, ) } @@ -157,6 +159,7 @@ pub fn create_asset( owner_delegate_seq: Some(0), was_decompressed: false, leaf_seq: Some(0), + royalty_amount_seq: Some(0), }, ) } @@ -182,9 +185,10 @@ pub fn create_asset_creator( creator, share, verified, - seq: Some(0), + verified_seq: Some(0), slot_updated: Some(0), position: 0, + base_info_seq: Some(0), }, ) } @@ -231,7 +235,7 @@ pub fn create_asset_grouping( id: row_num, group_key: "collection".to_string(), slot_updated: Some(0), - verified: Some(false), + verified: false, group_info_seq: Some(0), }, ) diff --git a/digital_asset_types/tests/json_parsing.rs b/digital_asset_types/tests/json_parsing.rs index 765f14bc6..c10ca12e3 100644 --- a/digital_asset_types/tests/json_parsing.rs +++ b/digital_asset_types/tests/json_parsing.rs @@ -34,8 +34,10 @@ pub async fn parse_onchain_json(json: serde_json::Value) -> Content { metadata: json, slot_updated: 0, reindex: None, - raw_name: String::from("Handalf").into_bytes().to_vec(), - raw_symbol: String::from("").into_bytes().to_vec(), + raw_name: Some(String::from("Handalf").into_bytes().to_vec()), + raw_symbol: Some(String::from("").into_bytes().to_vec()), + base_info_seq: Some(0), + download_metadata_seq: Some(0), }; v1_content_from_json(&asset_data).unwrap() diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 4e1391cf7..44e41f240 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -354,7 +354,7 @@ where asset_id: Set(asset_id), creator: Set(creator), verified: Set(verified), - seq: Set(Some(seq)), + verified_seq: Set(Some(seq)), ..Default::default() }; @@ -366,7 +366,7 @@ where ]) .update_columns([ asset_creators::Column::Verified, - asset_creators::Column::Seq, + asset_creators::Column::VerifiedSeq, ]) .to_owned(), ) @@ -403,7 +403,7 @@ where asset_id: Set(asset_id), group_key: Set("collection".to_string()), group_value: Set(group_value), - verified: Set(Some(verified)), + verified: Set(verified), slot_updated: Set(Some(slot_updated)), group_info_seq: Set(Some(seq)), ..Default::default() @@ -450,7 +450,7 @@ pub async fn upsert_asset_data( reindex: Option, raw_name: Vec, raw_symbol: Vec, - _seq: i64, + seq: i64, ) -> Result<(), IngesterError> where T: ConnectionTrait + TransactionTrait, @@ -464,9 +464,10 @@ where metadata: Set(metadata), slot_updated: Set(slot_updated), reindex: Set(reindex), - raw_name: Set(raw_name), - raw_symbol: Set(raw_symbol), - //base_info_seq: Set(seq), + raw_name: Set(Some(raw_name)), + raw_symbol: Set(Some(raw_symbol)), + base_info_seq: Set(Some(seq)), + ..Default::default() }; let mut query = asset_data::Entity::insert(model) @@ -477,15 +478,14 @@ where asset_data::Column::ChainData, asset_data::Column::MetadataUrl, asset_data::Column::MetadataMutability, - // Don't update Metadata if it already exists. Even if we are doing - // and update_metadata and there's a new URI, the new background task - // will overwrite it. - //asset_data::Column::Metadata, + // Don't update asset_data::Column::Metadata if it already exists. Even if we + // are indexing `update_metadata`` and there's a new URI, the new background + // task will overwrite it. asset_data::Column::SlotUpdated, asset_data::Column::Reindex, asset_data::Column::RawName, asset_data::Column::RawSymbol, - //asset_data::Column::BaseInfoSeq, + asset_data::Column::BaseInfoSeq, ]) .to_owned(), ) @@ -505,7 +505,7 @@ pub async fn upsert_asset_with_royalty_amount( txn: &T, id: Vec, royalty_amount: i32, - _seq: i64, + seq: i64, ) -> Result<(), IngesterError> where T: ConnectionTrait + TransactionTrait, @@ -513,7 +513,7 @@ where let model = asset::ActiveModel { id: Set(id.clone()), royalty_amount: Set(royalty_amount), - //royalty_amount_seq: Set(Some(seq)), + royalty_amount_seq: Set(Some(seq)), ..Default::default() }; @@ -522,7 +522,7 @@ where OnConflict::column(asset::Column::Id) .update_columns([ asset::Column::RoyaltyAmount, - //asset::Column::RoyaltyAmountSeq, + asset::Column::RoyaltyAmountSeq, ]) .to_owned(), ) diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 3b3603891..7a5be0033 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -241,7 +241,7 @@ where position: Set(i as i16), share: Set(c.share as i32), slot_updated: Set(Some(slot_i)), - //base_info_seq: Set(Some(seq as i64)) + base_info_seq: Set(Some(seq as i64)), ..Default::default() }); @@ -249,7 +249,7 @@ where asset_id: Set(id_bytes.to_vec()), creator: Set(c.address.to_bytes().to_vec()), verified: Set(c.verified), - //verified_seq: Set(Some(seq as i64)), + verified_seq: Set(Some(seq as i64)), ..Default::default() }); @@ -267,7 +267,7 @@ where asset_creators::Column::Position, asset_creators::Column::Share, asset_creators::Column::SlotUpdated, - //asset_creators::Column::BaseInfoSeq, + asset_creators::Column::BaseInfoSeq, ]) .to_owned(), ) @@ -289,7 +289,7 @@ where ]) .update_columns([ asset_creators::Column::Verified, - //asset_creators::Column::VerifiedSeq, + asset_creators::Column::VerifiedSeq, ]) .to_owned(), ) diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 204a4154c..6c13eb097 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -189,6 +189,7 @@ where position: Set(i as i16), share: Set(c.share as i32), slot_updated: Set(Some(slot_i)), + base_info_seq: Set(Some(seq as i64)), ..Default::default() }); @@ -196,7 +197,7 @@ where asset_id: Set(id_bytes.to_vec()), creator: Set(c.address.to_bytes().to_vec()), verified: Set(c.verified), - seq: Set(Some(seq as i64)), + verified_seq: Set(Some(seq as i64)), ..Default::default() }); @@ -215,11 +216,12 @@ where .filter( Condition::all() .add(asset_creators::Column::AssetId.eq(id_bytes.to_vec())) - .add(asset_creators::Column::Creator.is_in(db_creators_to_remove)), // .add( - // Condition::any() - // .add(asset_creators::Column::VerifiedSeq.lt(seq as i64)) - // .add(asset_creators::Column::VerifiedSeq.is_null()), - // ), + .add(asset_creators::Column::Creator.is_in(db_creators_to_remove)) + .add( + Condition::any() + .add(asset_creators::Column::VerifiedSeq.lt(seq as i64)) + .add(asset_creators::Column::VerifiedSeq.is_null()), + ), ) .exec(txn) .await?; @@ -237,7 +239,7 @@ where asset_creators::Column::Position, asset_creators::Column::Share, asset_creators::Column::SlotUpdated, - //asset_creators::Column::BaseInfoSeq, + asset_creators::Column::BaseInfoSeq, ]) .to_owned(), ) @@ -260,7 +262,7 @@ where ]) .update_columns([ asset_creators::Column::Verified, - //asset_creators::Column::VerifiedSeq, + asset_creators::Column::VerifiedSeq, ]) .to_owned(), ) diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 1bfedac84..7950d8e45 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -147,8 +147,10 @@ pub async fn save_v1_asset( slot_updated: Set(slot_i), reindex: Set(Some(true)), id: Set(id.to_vec()), - raw_name: Set(name.to_vec()), - raw_symbol: Set(symbol.to_vec()), + raw_name: Set(Some(name.to_vec())), + raw_symbol: Set(Some(symbol.to_vec())), + download_metadata_seq: Set(Some(0)), + ..Default::default() }; let txn = conn.begin().await?; let mut query = asset_data::Entity::insert(asset_data_model) @@ -161,7 +163,8 @@ pub async fn save_v1_asset( asset_data::Column::MetadataMutability, asset_data::Column::SlotUpdated, asset_data::Column::Reindex, - //TODO RAW NAME + asset_data::Column::RawName, + asset_data::Column::RawSymbol, ]) .to_owned(), ) @@ -195,9 +198,6 @@ pub async fn save_v1_asset( asset_data: Set(Some(id.to_vec())), slot_updated: Set(Some(slot_i)), burnt: Set(false), - //data_hash, - //creator_hash, - //leaf_seq, ..Default::default() }; let mut query = asset::Entity::insert(model) @@ -282,7 +282,7 @@ pub async fn save_v1_asset( asset_id: Set(id.to_vec()), group_key: Set("collection".to_string()), group_value: Set(Some(c.key.to_string())), - verified: Set(Some(c.verified)), + verified: Set(c.verified), seq: Set(None), slot_updated: Set(Some(slot_i)), ..Default::default() @@ -333,7 +333,6 @@ pub async fn save_v1_asset( creator: Set(c.address.to_bytes().to_vec()), share: Set(c.share as i32), verified: Set(c.verified), - seq: Set(Some(0)), slot_updated: Set(Some(slot_i)), position: Set(i as i16), ..Default::default() @@ -360,7 +359,7 @@ pub async fn save_v1_asset( asset_creators::Column::Creator, asset_creators::Column::Share, asset_creators::Column::Verified, - asset_creators::Column::Seq, + asset_creators::Column::VerifiedSeq, asset_creators::Column::SlotUpdated, ]) .to_owned(), diff --git a/nft_ingester/src/tasks/common/mod.rs b/nft_ingester/src/tasks/common/mod.rs index 2cf1cca18..19f1a12e9 100644 --- a/nft_ingester/src/tasks/common/mod.rs +++ b/nft_ingester/src/tasks/common/mod.rs @@ -111,22 +111,22 @@ impl BgTask for DownloadMetadataTask { id: Unchanged(download_metadata.asset_data_id.clone()), metadata: Set(body), reindex: Set(Some(false)), - //download_metadata_seq: Set(Some(download_metadata.seq)), + download_metadata_seq: Set(Some(download_metadata.seq)), ..Default::default() }; debug!( "download metadata for {:?}", bs58::encode(download_metadata.asset_data_id.clone()).into_string() ); - let query = asset_data::Entity::update(model) + let mut query = asset_data::Entity::update(model) .filter(asset_data::Column::Id.eq(download_metadata.asset_data_id.clone())); - // if download_metadata.seq != 0 { - // query.filter( - // Condition::any() - // .add(asset_data::Column::DownloadMetadataSeq.lt(download_metadata.seq)) - // .add(asset_data::Column::DownloadMetadataSeq.is_null()), - // ); - // } + if download_metadata.seq != 0 { + query = query.filter( + Condition::any() + .add(asset_data::Column::DownloadMetadataSeq.lt(download_metadata.seq)) + .add(asset_data::Column::DownloadMetadataSeq.is_null()), + ); + } query.exec(db).await.map(|_| ()).map_err(|db| { IngesterError::TaskManagerError(format!( "Database error with {}, error: {}", From 026c16a9440dc8dc3c034e395dc1ea09dfe0875e Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Fri, 20 Oct 2023 17:58:51 -0700 Subject: [PATCH 12/46] Extra condition to protect out of order creator verification --- nft_ingester/src/program_transformers/bubblegum/db.rs | 6 ++++-- .../src/program_transformers/bubblegum/mint_v1.rs | 4 ++-- .../src/program_transformers/bubblegum/update_metadata.rs | 8 ++++---- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 44e41f240..4f11d8167 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -373,8 +373,10 @@ where .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.seq >= asset_creators.verified_seq OR asset_creators.verified_seq is NULL", - query.sql + "{} WHERE (excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq is NULL)\n\ + AND ({} >= asset_creators.base_info_seq OR asset_creators.base_info_seq is NULL)", + query.sql, + seq ); txn.execute(query) diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 7a5be0033..06c8482de 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -273,7 +273,7 @@ where ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.base_info_seq > asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", + "{} WHERE excluded.base_info_seq >= asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", query.sql ); txn.execute(query).await?; @@ -295,7 +295,7 @@ where ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.seq > asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", + "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", query.sql ); txn.execute(query).await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 6c13eb097..d6f72ad96 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -219,8 +219,8 @@ where .add(asset_creators::Column::Creator.is_in(db_creators_to_remove)) .add( Condition::any() - .add(asset_creators::Column::VerifiedSeq.lt(seq as i64)) - .add(asset_creators::Column::VerifiedSeq.is_null()), + .add(asset_creators::Column::BaseInfoSeq.lt(seq as i64)) + .add(asset_creators::Column::BaseInfoSeq.is_null()), ), ) .exec(txn) @@ -245,7 +245,7 @@ where ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.base_info_seq > asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", + "{} WHERE excluded.base_info_seq >= asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", query.sql ); txn.execute(query).await?; @@ -268,7 +268,7 @@ where ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.verified_seq > asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", + "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", query.sql ); txn.execute(query).await?; From e797adcd28eba8c6be2277e9f787c2117acbc1f4 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 23 Oct 2023 00:34:58 -0700 Subject: [PATCH 13/46] Remove base_info_seq for each creator and add creators_added_seq to asset table --- ...01_add_seq_numbers_bgum_update_metadata.rs | 20 +- .../src/program_transformers/bubblegum/db.rs | 87 ++++++-- .../program_transformers/bubblegum/mint_v1.rs | 21 +- .../bubblegum/update_metadata.rs | 202 +++++++++--------- .../token_metadata/v1_asset.rs | 3 + 5 files changed, 194 insertions(+), 139 deletions(-) diff --git a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs index 15179639a..6881c3c8b 100644 --- a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs +++ b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs @@ -22,15 +22,6 @@ impl MigrationTrait for Migration { )) .await?; - manager - .alter_table( - Table::alter() - .table(asset_creators::Entity) - .add_column(ColumnDef::new(Alias::new("base_info_seq")).big_integer()) - .to_owned(), - ) - .await?; - manager .alter_table( Table::alter() @@ -46,6 +37,7 @@ impl MigrationTrait for Migration { Table::alter() .table(asset::Entity) .add_column(ColumnDef::new(Alias::new("royalty_amount_seq")).big_integer()) + .add_column(ColumnDef::new(Alias::new("creators_added_seq")).big_integer()) .to_owned(), ) .await?; @@ -66,15 +58,6 @@ impl MigrationTrait for Migration { )) .await?; - manager - .alter_table( - Table::alter() - .table(asset_creators::Entity) - .drop_column(Alias::new("base_info_seq")) - .to_owned(), - ) - .await?; - manager .alter_table( Table::alter() @@ -90,6 +73,7 @@ impl MigrationTrait for Migration { Table::alter() .table(asset::Entity) .drop_column(Alias::new("royalty_amount_seq")) + .drop_column(Alias::new("creators_added_seq")) .to_owned(), ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 4f11d8167..d207a44a9 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -351,32 +351,71 @@ where T: ConnectionTrait + TransactionTrait, { let model = asset_creators::ActiveModel { - asset_id: Set(asset_id), + asset_id: Set(asset_id.clone()), creator: Set(creator), verified: Set(verified), verified_seq: Set(Some(seq)), ..Default::default() }; - let mut query = asset_creators::Entity::insert(model) + // Only upsert a creator if the asset table's creator array seq is at a lower value. That seq + // gets updated when we set up the creator array in `mintV1` or `update_metadata`. We don't + // want to insert a creator that was removed from a later `update_metadata`. And we don't need + // to worry about creator verification in that case because the `update_metadata` updates + // creator verification state as well. + if creators_should_be_updated(txn, asset_id, seq).await? { + let mut query = asset_creators::Entity::insert(model) + .on_conflict( + OnConflict::columns([ + asset_creators::Column::AssetId, + asset_creators::Column::Creator, + ]) + .update_columns([ + asset_creators::Column::Verified, + asset_creators::Column::VerifiedSeq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + + query.sql = format!( + "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq is NULL", + query.sql, +); + + txn.execute(query) + .await + .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; + } + + Ok(()) +} + +pub async fn upsert_asset_with_creators_added_seq( + txn: &T, + id: Vec, + seq: i64, +) -> Result<(), IngesterError> +where + T: ConnectionTrait + TransactionTrait, +{ + let model = asset::ActiveModel { + id: Set(id), + seq: Set(Some(seq)), + ..Default::default() + }; + + let mut query = asset::Entity::insert(model) .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Verified, - asset_creators::Column::VerifiedSeq, - ]) - .to_owned(), + OnConflict::column(asset::Column::Id) + .update_columns([asset::Column::Seq]) + .to_owned(), ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE (excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq is NULL)\n\ - AND ({} >= asset_creators.base_info_seq OR asset_creators.base_info_seq is NULL)", - query.sql, - seq + "{} WHERE excluded.creators_added_seq >= asset.creators_added_seq OR asset.creators_added_seq IS NULL", + query.sql ); txn.execute(query) @@ -553,3 +592,21 @@ where }; Ok(false) } + +pub async fn creators_should_be_updated( + txn: &T, + id: Vec, + seq: i64, +) -> Result +where + T: ConnectionTrait + TransactionTrait, +{ + if let Some(asset) = asset::Entity::find_by_id(id).one(txn).await? { + if let Some(creator_array_seq) = asset.seq { + if seq < creator_array_seq { + return Ok(false); + } + } + } + Ok(true) +} diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 06c8482de..1c7cba0fe 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -1,8 +1,9 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, save_changelog_event, upsert_asset_data, - upsert_asset_with_compression_info, upsert_asset_with_leaf_info, + asset_was_decompressed, creators_should_be_updated, save_changelog_event, + upsert_asset_data, upsert_asset_with_compression_info, + upsert_asset_with_creators_added_seq, upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, upsert_asset_with_royalty_amount, upsert_asset_with_seq, upsert_collection_info, }, @@ -218,9 +219,11 @@ where .await .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; - // Insert into `asset_creators` table. + // Insert into `asset_creators` table as long as there wasn't a subsequent `update_metadata`.` let creators = &metadata.creators; - if !creators.is_empty() { + if !creators.is_empty() + && creators_should_be_updated(txn, id_bytes.to_vec(), seq as i64).await? + { // Vec to hold base creator information. let mut db_creator_infos = Vec::with_capacity(creators.len()); @@ -257,7 +260,7 @@ where } // This statement will update base information for each creator. - let mut query = asset_creators::Entity::insert_many(db_creator_infos) + let query = asset_creators::Entity::insert_many(db_creator_infos) .on_conflict( OnConflict::columns([ asset_creators::Column::AssetId, @@ -267,15 +270,10 @@ where asset_creators::Column::Position, asset_creators::Column::Share, asset_creators::Column::SlotUpdated, - asset_creators::Column::BaseInfoSeq, ]) .to_owned(), ) .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.base_info_seq >= asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", - query.sql - ); txn.execute(query).await?; // This statement will update whether the creator is verified and the `seq` @@ -299,6 +297,9 @@ where query.sql ); txn.execute(query).await?; + + upsert_asset_with_creators_added_seq(txn, id_bytes.to_vec(), seq as i64) + .await?; } // Insert into `asset_authority` table. diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index d6f72ad96..2d2f6d37a 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -1,8 +1,9 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, save_changelog_event, upsert_asset_data, - upsert_asset_with_leaf_info, upsert_asset_with_royalty_amount, upsert_asset_with_seq, + asset_was_decompressed, creators_should_be_updated, save_changelog_event, + upsert_asset_data, upsert_asset_with_creators_added_seq, upsert_asset_with_leaf_info, + upsert_asset_with_royalty_amount, upsert_asset_with_seq, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -168,110 +169,119 @@ where upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; // Update `asset_creators` table. - if let Some(creators) = &update_args.creators { - // Vec to hold base creator information. - let mut db_creator_infos = Vec::with_capacity(creators.len()); + if creators_should_be_updated(txn, id_bytes.to_vec(), seq as i64).await? { + if let Some(creators) = &update_args.creators { + // Vec to hold base creator information. + let mut db_creator_infos = Vec::with_capacity(creators.len()); - // Vec to hold info on whether a creator is verified. This info is protected by `seq` number. - let mut db_creator_verified_infos = Vec::with_capacity(creators.len()); + // Vec to hold info on whether a creator is verified. This info is protected by `seq` number. + let mut db_creator_verified_infos = Vec::with_capacity(creators.len()); - // Set to prevent duplicates. - let mut creators_set = HashSet::new(); + // Set to prevent duplicates. + let mut creators_set = HashSet::new(); - for (i, c) in creators.iter().enumerate() { - if creators_set.contains(&c.address) { - continue; - } + for (i, c) in creators.iter().enumerate() { + if creators_set.contains(&c.address) { + continue; + } - db_creator_infos.push(asset_creators::ActiveModel { - asset_id: Set(id_bytes.to_vec()), - creator: Set(c.address.to_bytes().to_vec()), - position: Set(i as i16), - share: Set(c.share as i32), - slot_updated: Set(Some(slot_i)), - base_info_seq: Set(Some(seq as i64)), - ..Default::default() - }); + db_creator_infos.push(asset_creators::ActiveModel { + asset_id: Set(id_bytes.to_vec()), + creator: Set(c.address.to_bytes().to_vec()), + position: Set(i as i16), + share: Set(c.share as i32), + slot_updated: Set(Some(slot_i)), + base_info_seq: Set(Some(seq as i64)), + ..Default::default() + }); - db_creator_verified_infos.push(asset_creators::ActiveModel { - asset_id: Set(id_bytes.to_vec()), - creator: Set(c.address.to_bytes().to_vec()), - verified: Set(c.verified), - verified_seq: Set(Some(seq as i64)), - ..Default::default() - }); + db_creator_verified_infos.push(asset_creators::ActiveModel { + asset_id: Set(id_bytes.to_vec()), + creator: Set(c.address.to_bytes().to_vec()), + verified: Set(c.verified), + verified_seq: Set(Some(seq as i64)), + ..Default::default() + }); - creators_set.insert(c.address); - } + creators_set.insert(c.address); + } - // Remove creators no longer present in creator array. - let db_creators_to_remove: Vec> = current_metadata - .creators - .iter() - .filter(|c| !creators_set.contains(&c.address)) - .map(|c| c.address.to_bytes().to_vec()) - .collect(); + // Remove creators no longer present in creator array. + let db_creators_to_remove: Vec> = current_metadata + .creators + .iter() + .filter(|c| !creators_set.contains(&c.address)) + .map(|c| c.address.to_bytes().to_vec()) + .collect(); - asset_creators::Entity::delete_many() - .filter( - Condition::all() - .add(asset_creators::Column::AssetId.eq(id_bytes.to_vec())) - .add(asset_creators::Column::Creator.is_in(db_creators_to_remove)) - .add( - Condition::any() - .add(asset_creators::Column::BaseInfoSeq.lt(seq as i64)) - .add(asset_creators::Column::BaseInfoSeq.is_null()), - ), - ) - .exec(txn) - .await?; + asset_creators::Entity::delete_many() + .filter( + Condition::all() + .add(asset_creators::Column::AssetId.eq(id_bytes.to_vec())) + .add( + asset_creators::Column::Creator + .is_in(db_creators_to_remove), + ) + .add( + Condition::any() + .add(asset_creators::Column::BaseInfoSeq.lt(seq as i64)) + .add(asset_creators::Column::BaseInfoSeq.is_null()), + ), + ) + .exec(txn) + .await?; - // This statement will update base information for each creator and the - // `base_info_seq` number, allows for `mintV1` and `update_metadata` to be - // processed out of order. - let mut query = asset_creators::Entity::insert_many(db_creator_infos) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Position, - asset_creators::Column::Share, - asset_creators::Column::SlotUpdated, - asset_creators::Column::BaseInfoSeq, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.base_info_seq >= asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", - query.sql - ); - txn.execute(query).await?; + // This statement will update base information for each creator and the + // `base_info_seq` number, allows for `mintV1` and `update_metadata` to be + // processed out of order. + let mut query = asset_creators::Entity::insert_many(db_creator_infos) + .on_conflict( + OnConflict::columns([ + asset_creators::Column::AssetId, + asset_creators::Column::Creator, + ]) + .update_columns([ + asset_creators::Column::Position, + asset_creators::Column::Share, + asset_creators::Column::SlotUpdated, + asset_creators::Column::BaseInfoSeq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + query.sql = format!( + "{} WHERE excluded.base_info_seq >= asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", + query.sql + ); + txn.execute(query).await?; - // This statement will update whether the creator is verified and the - // `verified_seq` number, which is used to protect the `verified` field, - // allowing for `mintV1`, `update_metadata`, and `verifyCreator` to be - // processed out of order. - let mut query = asset_creators::Entity::insert_many(db_creator_verified_infos) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Verified, - asset_creators::Column::VerifiedSeq, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", - query.sql - ); - txn.execute(query).await?; + // This statement will update whether the creator is verified and the + // `verified_seq` number, which is used to protect the `verified` field, + // allowing for `mintV1`, `update_metadata`, and `verifyCreator` to be + // processed out of order. + let mut query = + asset_creators::Entity::insert_many(db_creator_verified_infos) + .on_conflict( + OnConflict::columns([ + asset_creators::Column::AssetId, + asset_creators::Column::Creator, + ]) + .update_columns([ + asset_creators::Column::Verified, + asset_creators::Column::VerifiedSeq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + query.sql = format!( + "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", + query.sql + ); + txn.execute(query).await?; + + upsert_asset_with_creators_added_seq(txn, id_bytes.to_vec(), seq as i64) + .await?; + } } if uri.is_empty() { diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 7950d8e45..7cdfe8ba5 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -312,6 +312,7 @@ pub async fn save_v1_asset( } txn.commit().await?; let creators = data.creators.unwrap_or_default(); + if !creators.is_empty() { let mut creators_set = HashSet::new(); let existing_creators: Vec = asset_creators::Entity::find() @@ -322,6 +323,7 @@ pub async fn save_v1_asset( ) .all(conn) .await?; + if !existing_creators.is_empty() { let mut db_creators = Vec::with_capacity(creators.len()); for (i, c) in creators.into_iter().enumerate() { @@ -348,6 +350,7 @@ pub async fn save_v1_asset( ) .exec(&txn) .await?; + if !db_creators.is_empty() { let mut query = asset_creators::Entity::insert_many(db_creators) .on_conflict( From f2b12186bc60299a7bdde8ce69aa64f0c530bc23 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 23 Oct 2023 00:52:09 -0700 Subject: [PATCH 14/46] Regenerate Sea-ORM types --- README.md | 2 +- .../src/dao/generated/asset.rs | 3 + .../src/dao/generated/asset_creators.rs | 3 - .../src/dao/generated/sea_orm_active_enums.rs | 124 +++++++++--------- 4 files changed, 66 insertions(+), 66 deletions(-) diff --git a/README.md b/README.md index 2784bcad9..e0b4d6d2b 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Because this is a multi component system the easiest way to develop or locally t #### Regenerating DB Types Edit the init.sql, then run `docker compose up db` Then with a local `DATABASE_URL` var exported like this `export DATABASE_URL=postgres://solana:solana@localhost/solana` you can run -` sea-orm-cli generate entity -o ./digital_asset_types/src/dao/generated/ --database-url $DATABASE_URL --with-serde both --expanded-format` +`sea-orm-cli generate entity -o ./digital_asset_types/src/dao/generated/ --database-url $DATABASE_URL --with-serde both --expanded-format` If you need to install `sea-orm-cli` run `cargo install sea-orm-cli`. diff --git a/digital_asset_types/src/dao/generated/asset.rs b/digital_asset_types/src/dao/generated/asset.rs index dffccca17..b8e116e93 100644 --- a/digital_asset_types/src/dao/generated/asset.rs +++ b/digital_asset_types/src/dao/generated/asset.rs @@ -47,6 +47,7 @@ pub struct Model { pub was_decompressed: bool, pub leaf_seq: Option, pub royalty_amount_seq: Option, + pub creators_added_seq: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -80,6 +81,7 @@ pub enum Column { WasDecompressed, LeafSeq, RoyaltyAmountSeq, + CreatorsAddedSeq, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] @@ -136,6 +138,7 @@ impl ColumnTrait for Column { Self::WasDecompressed => ColumnType::Boolean.def(), Self::LeafSeq => ColumnType::BigInteger.def().null(), Self::RoyaltyAmountSeq => ColumnType::BigInteger.def().null(), + Self::CreatorsAddedSeq => ColumnType::BigInteger.def().null(), } } } diff --git a/digital_asset_types/src/dao/generated/asset_creators.rs b/digital_asset_types/src/dao/generated/asset_creators.rs index 510b896a3..346ed3b2e 100644 --- a/digital_asset_types/src/dao/generated/asset_creators.rs +++ b/digital_asset_types/src/dao/generated/asset_creators.rs @@ -22,7 +22,6 @@ pub struct Model { pub verified_seq: Option, pub slot_updated: Option, pub position: i16, - pub base_info_seq: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -35,7 +34,6 @@ pub enum Column { VerifiedSeq, SlotUpdated, Position, - BaseInfoSeq, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] @@ -67,7 +65,6 @@ impl ColumnTrait for Column { Self::VerifiedSeq => ColumnType::BigInteger.def().null(), Self::SlotUpdated => ColumnType::BigInteger.def().null(), Self::Position => ColumnType::SmallInteger.def(), - Self::BaseInfoSeq => ColumnType::BigInteger.def().null(), } } } diff --git a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs index 628071b54..bf72d7957 100644 --- a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs +++ b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs @@ -3,62 +3,6 @@ use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm( - rs_type = "String", - db_type = "Enum", - enum_name = "v1_account_attachments" -)] -pub enum V1AccountAttachments { - #[sea_orm(string_value = "edition")] - Edition, - #[sea_orm(string_value = "edition_marker")] - EditionMarker, - #[sea_orm(string_value = "master_edition_v1")] - MasterEditionV1, - #[sea_orm(string_value = "master_edition_v2")] - MasterEditionV2, - #[sea_orm(string_value = "unknown")] - Unknown, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] -pub enum TaskStatus { - #[sea_orm(string_value = "failed")] - Failed, - #[sea_orm(string_value = "pending")] - Pending, - #[sea_orm(string_value = "running")] - Running, - #[sea_orm(string_value = "success")] - Success, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm( - rs_type = "String", - db_type = "Enum", - enum_name = "royalty_target_type" -)] -pub enum RoyaltyTargetType { - #[sea_orm(string_value = "creators")] - Creators, - #[sea_orm(string_value = "fanout")] - Fanout, - #[sea_orm(string_value = "single")] - Single, - #[sea_orm(string_value = "unknown")] - Unknown, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] -pub enum Mutability { - #[sea_orm(string_value = "immutable")] - Immutable, - #[sea_orm(string_value = "mutable")] - Mutable, - #[sea_orm(string_value = "unknown")] - Unknown, -} #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] pub enum OwnerType { @@ -98,12 +42,20 @@ pub enum SpecificationAssetClass { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] -pub enum ChainMutability { - #[sea_orm(string_value = "immutable")] - Immutable, - #[sea_orm(string_value = "mutable")] - Mutable, +#[sea_orm( + rs_type = "String", + db_type = "Enum", + enum_name = "v1_account_attachments" +)] +pub enum V1AccountAttachments { + #[sea_orm(string_value = "edition")] + Edition, + #[sea_orm(string_value = "edition_marker")] + EditionMarker, + #[sea_orm(string_value = "master_edition_v1")] + MasterEditionV1, + #[sea_orm(string_value = "master_edition_v2")] + MasterEditionV2, #[sea_orm(string_value = "unknown")] Unknown, } @@ -123,3 +75,51 @@ pub enum SpecificationVersions { #[sea_orm(string_value = "v2")] V2, } +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm( + rs_type = "String", + db_type = "Enum", + enum_name = "royalty_target_type" +)] +pub enum RoyaltyTargetType { + #[sea_orm(string_value = "creators")] + Creators, + #[sea_orm(string_value = "fanout")] + Fanout, + #[sea_orm(string_value = "single")] + Single, + #[sea_orm(string_value = "unknown")] + Unknown, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] +pub enum TaskStatus { + #[sea_orm(string_value = "failed")] + Failed, + #[sea_orm(string_value = "pending")] + Pending, + #[sea_orm(string_value = "running")] + Running, + #[sea_orm(string_value = "success")] + Success, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] +pub enum ChainMutability { + #[sea_orm(string_value = "immutable")] + Immutable, + #[sea_orm(string_value = "mutable")] + Mutable, + #[sea_orm(string_value = "unknown")] + Unknown, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] +pub enum Mutability { + #[sea_orm(string_value = "immutable")] + Immutable, + #[sea_orm(string_value = "mutable")] + Mutable, + #[sea_orm(string_value = "unknown")] + Unknown, +} From 77020fa87275cbed01d0d5fc1430a87d8e7b73db Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 23 Oct 2023 01:00:42 -0700 Subject: [PATCH 15/46] Change creator metadata updates to use new creators_added_seq --- digital_asset_types/tests/common.rs | 2 +- .../src/program_transformers/bubblegum/db.rs | 72 +++++++++---------- .../program_transformers/bubblegum/mint_v1.rs | 6 +- .../bubblegum/update_metadata.rs | 21 +----- 4 files changed, 42 insertions(+), 59 deletions(-) diff --git a/digital_asset_types/tests/common.rs b/digital_asset_types/tests/common.rs index ef551f251..50bfc194a 100644 --- a/digital_asset_types/tests/common.rs +++ b/digital_asset_types/tests/common.rs @@ -160,6 +160,7 @@ pub fn create_asset( was_decompressed: false, leaf_seq: Some(0), royalty_amount_seq: Some(0), + creators_added_seq: Some(0), }, ) } @@ -188,7 +189,6 @@ pub fn create_asset_creator( verified_seq: Some(0), slot_updated: Some(0), position: 0, - base_info_seq: Some(0), }, ) } diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index d207a44a9..2738bcb7f 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -391,40 +391,6 @@ where Ok(()) } -pub async fn upsert_asset_with_creators_added_seq( - txn: &T, - id: Vec, - seq: i64, -) -> Result<(), IngesterError> -where - T: ConnectionTrait + TransactionTrait, -{ - let model = asset::ActiveModel { - id: Set(id), - seq: Set(Some(seq)), - ..Default::default() - }; - - let mut query = asset::Entity::insert(model) - .on_conflict( - OnConflict::column(asset::Column::Id) - .update_columns([asset::Column::Seq]) - .to_owned(), - ) - .build(DbBackend::Postgres); - - query.sql = format!( - "{} WHERE excluded.creators_added_seq >= asset.creators_added_seq OR asset.creators_added_seq IS NULL", - query.sql - ); - - txn.execute(query) - .await - .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; - - Ok(()) -} - pub async fn upsert_collection_info( txn: &T, asset_id: Vec, @@ -602,11 +568,45 @@ where T: ConnectionTrait + TransactionTrait, { if let Some(asset) = asset::Entity::find_by_id(id).one(txn).await? { - if let Some(creator_array_seq) = asset.seq { - if seq < creator_array_seq { + if let Some(creators_added_seq) = asset.creators_added_seq { + if seq < creators_added_seq { return Ok(false); } } } Ok(true) } + +pub async fn upsert_asset_with_creators_added_seq( + txn: &T, + id: Vec, + seq: i64, +) -> Result<(), IngesterError> +where + T: ConnectionTrait + TransactionTrait, +{ + let model = asset::ActiveModel { + id: Set(id), + creators_added_seq: Set(Some(seq)), + ..Default::default() + }; + + let mut query = asset::Entity::insert(model) + .on_conflict( + OnConflict::column(asset::Column::Id) + .update_columns([asset::Column::CreatorsAddedSeq]) + .to_owned(), + ) + .build(DbBackend::Postgres); + + query.sql = format!( + "{} WHERE excluded.creators_added_seq >= asset.creators_added_seq OR asset.creators_added_seq IS NULL", + query.sql + ); + + txn.execute(query) + .await + .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; + + Ok(()) +} diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 1c7cba0fe..8ba918e52 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -244,7 +244,6 @@ where position: Set(i as i16), share: Set(c.share as i32), slot_updated: Set(Some(slot_i)), - base_info_seq: Set(Some(seq as i64)), ..Default::default() }); @@ -276,9 +275,8 @@ where .build(DbBackend::Postgres); txn.execute(query).await?; - // This statement will update whether the creator is verified and the `seq` - // number. `seq` is used to protect the `verified` field, allowing for `mint` - // and `verifyCreator` to be processed out of order. + // This statement will update whether the creator is verified and the + // `verified_seq` number. let mut query = asset_creators::Entity::insert_many(db_creator_verified_infos) .on_conflict( OnConflict::columns([ diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 2d2f6d37a..1b08f20e4 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -191,7 +191,6 @@ where position: Set(i as i16), share: Set(c.share as i32), slot_updated: Set(Some(slot_i)), - base_info_seq: Set(Some(seq as i64)), ..Default::default() }); @@ -221,20 +220,13 @@ where .add( asset_creators::Column::Creator .is_in(db_creators_to_remove), - ) - .add( - Condition::any() - .add(asset_creators::Column::BaseInfoSeq.lt(seq as i64)) - .add(asset_creators::Column::BaseInfoSeq.is_null()), ), ) .exec(txn) .await?; - // This statement will update base information for each creator and the - // `base_info_seq` number, allows for `mintV1` and `update_metadata` to be - // processed out of order. - let mut query = asset_creators::Entity::insert_many(db_creator_infos) + // This statement will update base information for each creator. + let query = asset_creators::Entity::insert_many(db_creator_infos) .on_conflict( OnConflict::columns([ asset_creators::Column::AssetId, @@ -244,21 +236,14 @@ where asset_creators::Column::Position, asset_creators::Column::Share, asset_creators::Column::SlotUpdated, - asset_creators::Column::BaseInfoSeq, ]) .to_owned(), ) .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.base_info_seq >= asset_creators.base_info_seq OR asset_creators.base_info_seq IS NULL", - query.sql - ); txn.execute(query).await?; // This statement will update whether the creator is verified and the - // `verified_seq` number, which is used to protect the `verified` field, - // allowing for `mintV1`, `update_metadata`, and `verifyCreator` to be - // processed out of order. + // `verified_seq` number. let mut query = asset_creators::Entity::insert_many(db_creator_verified_infos) .on_conflict( From ab1f1b49dab7fc9cc0660fc572d29cd6fc7de434 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:33:29 -0700 Subject: [PATCH 16/46] Factor out common creator update code to helper function --- .../src/program_transformers/bubblegum/db.rs | 94 ++++++++++++- .../program_transformers/bubblegum/mint_v1.rs | 108 +++------------ .../bubblegum/update_metadata.rs | 124 +++--------------- 3 files changed, 129 insertions(+), 197 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 2738bcb7f..e788526db 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -4,11 +4,12 @@ use digital_asset_types::dao::{ sea_orm_active_enums::{ChainMutability, Mutability}, }; use log::{debug, info}; -use mpl_bubblegum::types::Collection; +use mpl_bubblegum::types::{Collection, Creator}; use sea_orm::{ query::*, sea_query::OnConflict, ActiveValue::Set, ColumnTrait, DbBackend, EntityTrait, }; use spl_account_compression::events::ChangeLogEventV1; +use std::collections::HashSet; pub async fn save_changelog_event<'c, T>( change_log_event: &ChangeLogEventV1, @@ -610,3 +611,94 @@ where Ok(()) } + +pub async fn upsert_creators( + txn: &T, + id: Vec, + creators: &Vec, + slot_updated: i64, + seq: i64, +) -> Result<(), IngesterError> +where + T: ConnectionTrait + TransactionTrait, +{ + if creators_should_be_updated(txn, id.clone(), seq).await? { + if !creators.is_empty() { + // Vec to hold base creator information. + let mut db_creator_infos = Vec::with_capacity(creators.len()); + + // Vec to hold info on whether a creator is verified. This info is protected by `seq` number. + let mut db_creator_verified_infos = Vec::with_capacity(creators.len()); + + // Set to prevent duplicates. + let mut creators_set = HashSet::new(); + + for (i, c) in creators.iter().enumerate() { + if creators_set.contains(&c.address) { + continue; + } + + db_creator_infos.push(asset_creators::ActiveModel { + asset_id: Set(id.clone()), + creator: Set(c.address.to_bytes().to_vec()), + position: Set(i as i16), + share: Set(c.share as i32), + slot_updated: Set(Some(slot_updated)), + ..Default::default() + }); + + db_creator_verified_infos.push(asset_creators::ActiveModel { + asset_id: Set(id.clone()), + creator: Set(c.address.to_bytes().to_vec()), + verified: Set(c.verified), + verified_seq: Set(Some(seq)), + ..Default::default() + }); + + creators_set.insert(c.address); + } + + // This statement will update base information for each creator. + let query = asset_creators::Entity::insert_many(db_creator_infos) + .on_conflict( + OnConflict::columns([ + asset_creators::Column::AssetId, + asset_creators::Column::Creator, + ]) + .update_columns([ + asset_creators::Column::Position, + asset_creators::Column::Share, + asset_creators::Column::SlotUpdated, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + txn.execute(query).await?; + + // This statement will update whether the creator is verified and the + // `verified_seq` number. + let mut query = asset_creators::Entity::insert_many(db_creator_verified_infos) + .on_conflict( + OnConflict::columns([ + asset_creators::Column::AssetId, + asset_creators::Column::Creator, + ]) + .update_columns([ + asset_creators::Column::Verified, + asset_creators::Column::VerifiedSeq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + query.sql = format!( + "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", + query.sql + ); + txn.execute(query).await?; + } + + upsert_asset_with_creators_added_seq(txn, id, seq).await?; + } + + Ok(()) +} diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 8ba918e52..6f0a22275 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -1,11 +1,10 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, creators_should_be_updated, save_changelog_event, - upsert_asset_data, upsert_asset_with_compression_info, - upsert_asset_with_creators_added_seq, upsert_asset_with_leaf_info, + asset_was_decompressed, save_changelog_event, upsert_asset_data, + upsert_asset_with_compression_info, upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, upsert_asset_with_royalty_amount, - upsert_asset_with_seq, upsert_collection_info, + upsert_asset_with_seq, upsert_collection_info, upsert_creators, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -20,8 +19,11 @@ use blockbuster::{ use chrono::Utc; use digital_asset_types::{ dao::{ - asset, asset_authority, asset_creators, asset_v1_account_attachments, - sea_orm_active_enums::{ChainMutability, Mutability, OwnerType, RoyaltyTargetType}, + asset, asset_authority, asset_v1_account_attachments, + sea_orm_active_enums::{ + ChainMutability, Mutability, OwnerType, RoyaltyTargetType, SpecificationAssetClass, + SpecificationVersions, V1AccountAttachments, + }, }, json::ChainDataV1, }; @@ -30,11 +32,6 @@ use num_traits::FromPrimitive; use sea_orm::{ entity::*, query::*, sea_query::OnConflict, ConnectionTrait, DbBackend, EntityTrait, JsonValue, }; -use std::collections::HashSet; - -use digital_asset_types::dao::sea_orm_active_enums::{ - SpecificationAssetClass, SpecificationVersions, V1AccountAttachments, -}; // TODO -> consider moving structs into these functions to avoid clone @@ -219,86 +216,15 @@ where .await .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; - // Insert into `asset_creators` table as long as there wasn't a subsequent `update_metadata`.` - let creators = &metadata.creators; - if !creators.is_empty() - && creators_should_be_updated(txn, id_bytes.to_vec(), seq as i64).await? - { - // Vec to hold base creator information. - let mut db_creator_infos = Vec::with_capacity(creators.len()); - - // Vec to hold info on whether a creator is verified. This info is protected by `seq` number. - let mut db_creator_verified_infos = Vec::with_capacity(creators.len()); - - // Set to prevent duplicates. - let mut creators_set = HashSet::new(); - - for (i, c) in creators.iter().enumerate() { - if creators_set.contains(&c.address) { - continue; - } - - db_creator_infos.push(asset_creators::ActiveModel { - asset_id: Set(id_bytes.to_vec()), - creator: Set(c.address.to_bytes().to_vec()), - position: Set(i as i16), - share: Set(c.share as i32), - slot_updated: Set(Some(slot_i)), - ..Default::default() - }); - - db_creator_verified_infos.push(asset_creators::ActiveModel { - asset_id: Set(id_bytes.to_vec()), - creator: Set(c.address.to_bytes().to_vec()), - verified: Set(c.verified), - verified_seq: Set(Some(seq as i64)), - ..Default::default() - }); - - creators_set.insert(c.address); - } - - // This statement will update base information for each creator. - let query = asset_creators::Entity::insert_many(db_creator_infos) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Position, - asset_creators::Column::Share, - asset_creators::Column::SlotUpdated, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - txn.execute(query).await?; - - // This statement will update whether the creator is verified and the - // `verified_seq` number. - let mut query = asset_creators::Entity::insert_many(db_creator_verified_infos) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Verified, - asset_creators::Column::VerifiedSeq, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", - query.sql - ); - txn.execute(query).await?; - - upsert_asset_with_creators_added_seq(txn, id_bytes.to_vec(), seq as i64) - .await?; - } + // Upsert into `asset_creators` table. + upsert_creators( + txn, + id_bytes.to_vec(), + &metadata.creators, + slot_i, + seq as i64, + ) + .await?; // Insert into `asset_authority` table. let model = asset_authority::ActiveModel { diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 1b08f20e4..835f9cd0c 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -1,9 +1,9 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, creators_should_be_updated, save_changelog_event, - upsert_asset_data, upsert_asset_with_creators_added_seq, upsert_asset_with_leaf_info, - upsert_asset_with_royalty_amount, upsert_asset_with_seq, + asset_was_decompressed, save_changelog_event, upsert_asset_data, + upsert_asset_with_leaf_info, upsert_asset_with_royalty_amount, upsert_asset_with_seq, + upsert_creators, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -22,10 +22,7 @@ use digital_asset_types::{ }; use log::warn; use num_traits::FromPrimitive; -use sea_orm::{ - entity::*, query::*, sea_query::OnConflict, ConnectionTrait, DbBackend, EntityTrait, JsonValue, -}; -use std::collections::HashSet; +use sea_orm::{entity::*, query::*, ConnectionTrait, EntityTrait, JsonValue}; pub async fn update_metadata<'c, T>( parsing_result: &BubblegumInstruction, @@ -169,105 +166,22 @@ where upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; // Update `asset_creators` table. - if creators_should_be_updated(txn, id_bytes.to_vec(), seq as i64).await? { - if let Some(creators) = &update_args.creators { - // Vec to hold base creator information. - let mut db_creator_infos = Vec::with_capacity(creators.len()); - - // Vec to hold info on whether a creator is verified. This info is protected by `seq` number. - let mut db_creator_verified_infos = Vec::with_capacity(creators.len()); - - // Set to prevent duplicates. - let mut creators_set = HashSet::new(); - - for (i, c) in creators.iter().enumerate() { - if creators_set.contains(&c.address) { - continue; - } - - db_creator_infos.push(asset_creators::ActiveModel { - asset_id: Set(id_bytes.to_vec()), - creator: Set(c.address.to_bytes().to_vec()), - position: Set(i as i16), - share: Set(c.share as i32), - slot_updated: Set(Some(slot_i)), - ..Default::default() - }); - - db_creator_verified_infos.push(asset_creators::ActiveModel { - asset_id: Set(id_bytes.to_vec()), - creator: Set(c.address.to_bytes().to_vec()), - verified: Set(c.verified), - verified_seq: Set(Some(seq as i64)), - ..Default::default() - }); - - creators_set.insert(c.address); - } - // Remove creators no longer present in creator array. - let db_creators_to_remove: Vec> = current_metadata - .creators - .iter() - .filter(|c| !creators_set.contains(&c.address)) - .map(|c| c.address.to_bytes().to_vec()) - .collect(); - - asset_creators::Entity::delete_many() - .filter( - Condition::all() - .add(asset_creators::Column::AssetId.eq(id_bytes.to_vec())) - .add( - asset_creators::Column::Creator - .is_in(db_creators_to_remove), - ), - ) - .exec(txn) - .await?; - - // This statement will update base information for each creator. - let query = asset_creators::Entity::insert_many(db_creator_infos) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Position, - asset_creators::Column::Share, - asset_creators::Column::SlotUpdated, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - txn.execute(query).await?; - - // This statement will update whether the creator is verified and the - // `verified_seq` number. - let mut query = - asset_creators::Entity::insert_many(db_creator_verified_infos) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Verified, - asset_creators::Column::VerifiedSeq, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", - query.sql - ); - txn.execute(query).await?; - - upsert_asset_with_creators_added_seq(txn, id_bytes.to_vec(), seq as i64) - .await?; - } - } + // Delete any existing creators. + asset_creators::Entity::delete_many() + .filter( + Condition::all().add(asset_creators::Column::AssetId.eq(id_bytes.to_vec())), + ) + .exec(txn) + .await?; + + // Upsert into `asset_creators` table. + let creators = if let Some(creators) = &update_args.creators { + creators + } else { + ¤t_metadata.creators + }; + upsert_creators(txn, id_bytes.to_vec(), creators, slot_i, seq as i64).await?; if uri.is_empty() { warn!( From 3581269659a42173563569a31fbe43ad734a3ff4 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Sun, 3 Dec 2023 16:18:31 -0800 Subject: [PATCH 17/46] Update to latest blockbuster beta --- Cargo.lock | 4 ++-- das_api/Cargo.toml | 2 +- digital_asset_types/Cargo.toml | 2 +- nft_ingester/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e33acdb9..2b2c42ddc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -881,9 +881,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "blockbuster" -version = "0.9.0-beta.1" +version = "0.9.0-beta.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e0240c1218958c0d51284d783fa055f551d769bb8b7a4abf635b17fa9620dc" +checksum = "40ab97783defb671f7214f158a517844cb8fa5da781e4d8d46a17e15bc79f213" dependencies = [ "anchor-lang", "async-trait", diff --git a/das_api/Cargo.toml b/das_api/Cargo.toml index c99d8b21a..bd2bbff51 100644 --- a/das_api/Cargo.toml +++ b/das_api/Cargo.toml @@ -33,7 +33,7 @@ schemars = "0.8.6" schemars_derive = "0.8.6" open-rpc-derive = { version = "0.0.4"} open-rpc-schema = { version = "0.0.4"} -blockbuster = "0.9.0-beta.1" +blockbuster = "=0.9.0-beta.3" anchor-lang = "0.28.0" mpl-token-metadata = { version = "=2.0.0-beta.1", features = ["serde-feature"] } mpl-candy-machine-core = { version = "2.0.1", features = ["no-entrypoint"] } diff --git a/digital_asset_types/Cargo.toml b/digital_asset_types/Cargo.toml index 8e4b18bb9..8d79be1f3 100644 --- a/digital_asset_types/Cargo.toml +++ b/digital_asset_types/Cargo.toml @@ -18,7 +18,7 @@ solana-sdk = "~1.16.16" num-traits = "0.2.15" num-derive = "0.3.3" thiserror = "1.0.31" -blockbuster = "0.9.0-beta.1" +blockbuster = "=0.9.0-beta.3" jsonpath_lib = "0.3.0" mime_guess = "2.0.4" url = "2.3.1" diff --git a/nft_ingester/Cargo.toml b/nft_ingester/Cargo.toml index 6fb9f998c..8a44d32b4 100644 --- a/nft_ingester/Cargo.toml +++ b/nft_ingester/Cargo.toml @@ -35,7 +35,7 @@ spl-concurrent-merkle-tree = "0.2.0" uuid = "1.0.0" async-trait = "0.1.53" num-traits = "0.2.15" -blockbuster = "0.9.0-beta.1" +blockbuster = "=0.9.0-beta.3" figment = { version = "0.10.6", features = ["env", "toml", "yaml"] } cadence = "0.29.0" cadence-macros = "0.29.0" From e62eb61eeb4b8913c6723a64e7c802ec9bed820d Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Sun, 3 Dec 2023 16:37:52 -0800 Subject: [PATCH 18/46] Use less than or equal for download metadata seq check --- nft_ingester/src/tasks/common/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nft_ingester/src/tasks/common/mod.rs b/nft_ingester/src/tasks/common/mod.rs index 19f1a12e9..3b533c2f7 100644 --- a/nft_ingester/src/tasks/common/mod.rs +++ b/nft_ingester/src/tasks/common/mod.rs @@ -123,7 +123,7 @@ impl BgTask for DownloadMetadataTask { if download_metadata.seq != 0 { query = query.filter( Condition::any() - .add(asset_data::Column::DownloadMetadataSeq.lt(download_metadata.seq)) + .add(asset_data::Column::DownloadMetadataSeq.lte(download_metadata.seq)) .add(asset_data::Column::DownloadMetadataSeq.is_null()), ); } From 5808834d4fa1a10036e8cba170f26d1929337172 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Sun, 3 Dec 2023 16:42:26 -0800 Subject: [PATCH 19/46] Index verified for token metadata collection --- .../src/program_transformers/token_metadata/v1_asset.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 7cdfe8ba5..70fdaee63 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -283,7 +283,7 @@ pub async fn save_v1_asset( group_key: Set("collection".to_string()), group_value: Set(Some(c.key.to_string())), verified: Set(c.verified), - seq: Set(None), + group_info_seq: Set(None), slot_updated: Set(Some(slot_i)), ..Default::default() }; @@ -294,10 +294,10 @@ pub async fn save_v1_asset( asset_grouping::Column::GroupKey, ]) .update_columns([ - asset_grouping::Column::GroupKey, asset_grouping::Column::GroupValue, - asset_grouping::Column::Seq, + asset_grouping::Column::Verified, asset_grouping::Column::SlotUpdated, + asset_grouping::Column::GroupInfoSeq, ]) .to_owned(), ) From e38f64a3b28fbe4c2a3e0a02eff029597a95893e Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Sun, 3 Dec 2023 16:48:30 -0800 Subject: [PATCH 20/46] Add slot_updated to initial asset upsert, and removed duplicate items --- nft_ingester/src/program_transformers/bubblegum/mint_v1.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 6f0a22275..93ffbaca5 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -121,10 +121,8 @@ where id: Set(id_bytes.to_vec()), owner_type: Set(OwnerType::Single), frozen: Set(false), - tree_id: Set(Some(tree_id.clone())), specification_version: Set(Some(SpecificationVersions::V1)), specification_asset_class: Set(Some(SpecificationAssetClass::Nft)), - nonce: Set(Some(nonce as i64)), royalty_target_type: Set(RoyaltyTargetType::Creators), royalty_target: Set(None), asset_data: Set(Some(id_bytes.to_vec())), @@ -144,6 +142,7 @@ where asset::Column::RoyaltyTargetType, asset::Column::RoyaltyTarget, asset::Column::AssetData, + asset::Column::SlotUpdated, ]) .to_owned(), ) From 933e299041ef688e5e9f52435a4bbc908d709459 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Sun, 3 Dec 2023 23:09:08 -0800 Subject: [PATCH 21/46] Remove asset_was_decompressed Replaced with WHERE clauses on each upsert. Move remaining upserts from mint_v1 to db.rs. Remove upsert to asset_v1_account_attachments from mint_V1. Combine upserts for asset base info and royalty amount. --- .../program_transformers/bubblegum/burn.rs | 3 - .../bubblegum/cancel_redeem.rs | 8 +- .../bubblegum/collection_verification.rs | 9 +- .../bubblegum/creator_verification.rs | 7 +- .../src/program_transformers/bubblegum/db.rs | 226 +++++++++++++----- .../bubblegum/decompress.rs | 8 +- .../bubblegum/delegate.rs | 8 +- .../program_transformers/bubblegum/mint_v1.rs | 119 ++------- .../program_transformers/bubblegum/redeem.rs | 9 +- .../bubblegum/transfer.rs | 10 +- .../bubblegum/update_metadata.rs | 38 ++- .../token_metadata/v1_asset.rs | 6 +- 12 files changed, 207 insertions(+), 244 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/burn.rs b/nft_ingester/src/program_transformers/bubblegum/burn.rs index adca1324f..f59a64c65 100644 --- a/nft_ingester/src/program_transformers/bubblegum/burn.rs +++ b/nft_ingester/src/program_transformers/bubblegum/burn.rs @@ -23,9 +23,6 @@ where T: ConnectionTrait + TransactionTrait, { if let Some(cl) = &parsing_result.tree_update { - // Note: We do not check whether the asset has been decompressed here because we know if it - // was burned then it could not have been decompressed later. - let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, cl_audits).await?; let leaf_index = cl.index; let (asset_id, _) = Pubkey::find_program_address( diff --git a/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs b/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs index ba6dd3073..5edc5b0e7 100644 --- a/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs +++ b/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs @@ -1,7 +1,7 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, save_changelog_event, upsert_asset_with_leaf_info, + save_changelog_event, upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, }, }; @@ -31,12 +31,6 @@ where .. } => { let id_bytes = id.to_bytes(); - - // First check to see if this asset has been decompressed and if so do not update. - if asset_was_decompressed(txn, id_bytes.to_vec()).await? { - return Ok(()); - } - let owner_bytes = owner.to_bytes().to_vec(); let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None diff --git a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs index fd88d6c35..8f2429cef 100644 --- a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs @@ -1,6 +1,4 @@ -use crate::program_transformers::bubblegum::{ - asset_was_decompressed, upsert_asset_with_seq, upsert_collection_info, -}; +use crate::program_transformers::bubblegum::{upsert_asset_with_seq, upsert_collection_info}; use blockbuster::{ instruction::InstructionBundle, programs::bubblegum::{BubblegumInstruction, LeafSchema, Payload}, @@ -44,11 +42,6 @@ where LeafSchema::V1 { id, .. } => id.to_bytes().to_vec(), }; - // First check to see if this asset has been decompressed and if so do not update. - if asset_was_decompressed(txn, id_bytes.to_vec()).await? { - return Ok(()); - } - let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; diff --git a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs index 85c6e9857..7538317fa 100644 --- a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs @@ -1,7 +1,7 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, save_changelog_event, upsert_asset_with_leaf_info, + save_changelog_event, upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, upsert_creator_verified, }, }; @@ -52,11 +52,6 @@ where } => { let id_bytes = id.to_bytes(); - // First check to see if this asset has been decompressed and if so do not update. - if asset_was_decompressed(txn, id_bytes.to_vec()).await? { - return Ok(()); - } - let owner_bytes = owner.to_bytes().to_vec(); let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index e788526db..eb04f79b1 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -1,7 +1,11 @@ use crate::error::IngesterError; use digital_asset_types::dao::{ - asset, asset_creators, asset_data, asset_grouping, backfill_items, cl_audits, cl_items, - sea_orm_active_enums::{ChainMutability, Mutability}, + asset, asset_authority, asset_creators, asset_data, asset_grouping, backfill_items, cl_audits, + cl_items, + sea_orm_active_enums::{ + ChainMutability, Mutability, OwnerType, RoyaltyTargetType, SpecificationAssetClass, + SpecificationVersions, + }, }; use log::{debug, info}; use mpl_bubblegum::types::{Collection, Creator}; @@ -177,9 +181,10 @@ where ) .build(DbBackend::Postgres); + // Do not overwrite changes that happened after decompression (asset.seq = 0). // If the asset was decompressed, don't update the leaf info since we cleared it during decompression. query.sql = format!( - "{} WHERE (NOT asset.was_decompressed) AND (excluded.leaf_seq >= asset.leaf_seq OR asset.leaf_seq IS NULL)", + "{} WHERE asset.seq != 0 AND (NOT asset.was_decompressed) AND (excluded.leaf_seq >= asset.leaf_seq OR asset.leaf_seq IS NULL)", query.sql ); @@ -208,7 +213,7 @@ where ..Default::default() }; - let query = asset::Entity::insert(model) + let mut query = asset::Entity::insert(model) .on_conflict( OnConflict::column(asset::Column::Id) .update_columns([ @@ -222,6 +227,10 @@ where .to_owned(), ) .build(DbBackend::Postgres); + + // Do not overwrite changes that happened after decompression (asset.seq = 0). + query.sql = format!("{} WHERE asset.seq != 0", query.sql); + txn.execute(query) .await .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; @@ -258,8 +267,11 @@ where .to_owned(), ) .build(DbBackend::Postgres); + + // Do not overwrite changes that happened after decompression (asset.seq = 0). + // Do not overwrite changes from a later Bubblegum instruction. query.sql = format!( - "{} WHERE excluded.owner_delegate_seq >= asset.owner_delegate_seq OR asset.owner_delegate_seq IS NULL", + "{} WHERE asset.seq != 0 AND (excluded.owner_delegate_seq >= asset.owner_delegate_seq OR asset.owner_delegate_seq IS NULL)", query.sql ); @@ -305,7 +317,13 @@ where .to_owned(), ) .build(DbBackend::Postgres); - query.sql = format!("{} WHERE NOT asset.was_decompressed", query.sql); + + // Do not overwrite changes that happened after decompression (asset.seq = 0). + // Do not overwrite changes from Bubblegum decompress instruction itself. + query.sql = format!( + "{} WHERE asset.seq != 0 AND (NOT asset.was_decompressed)", + query.sql + ); txn.execute(query).await?; Ok(()) @@ -329,8 +347,10 @@ where ) .build(DbBackend::Postgres); + // Do not overwrite changes that happened after decompression (asset.seq = 0). + // Do not overwrite changes from a later Bubblegum instruction. query.sql = format!( - "{} WHERE excluded.seq >= asset.seq OR asset.seq IS NULL", + "{} WHERE (asset.seq != 0 AND excluded.seq >= asset.seq) OR asset.seq IS NULL", query.sql ); @@ -364,7 +384,8 @@ where // want to insert a creator that was removed from a later `update_metadata`. And we don't need // to worry about creator verification in that case because the `update_metadata` updates // creator verification state as well. - if creators_should_be_updated(txn, asset_id, seq).await? { + let multi_txn = txn.begin().await?; + if creators_should_be_updated(&multi_txn, asset_id, seq).await? { let mut query = asset_creators::Entity::insert(model) .on_conflict( OnConflict::columns([ @@ -384,11 +405,15 @@ where query.sql, ); - txn.execute(query) + multi_txn + .execute(query) .await .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; } + // Close out transaction and relinqish the lock. + multi_txn.commit().await?; + Ok(()) } @@ -433,8 +458,9 @@ where ) .build(DbBackend::Postgres); + // Do not overwrite changes that happened after decompression (asset_grouping.group_info_seq = 0). query.sql = format!( - "{} WHERE excluded.group_info_seq >= asset_grouping.group_info_seq OR asset_grouping.group_info_seq IS NULL", + "{} WHERE (asset_grouping.group_info_seq != 0 AND excluded.group_info_seq >= asset_grouping.group_info_seq) OR asset_grouping.group_info_seq IS NULL", query.sql ); @@ -498,49 +524,13 @@ where .to_owned(), ) .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.base_info_seq >= asset_data.base_info_seq OR asset_data.base_info_seq IS NULL)", - query.sql - ); - txn.execute(query) - .await - .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; - - Ok(()) -} - -pub async fn upsert_asset_with_royalty_amount( - txn: &T, - id: Vec, - royalty_amount: i32, - seq: i64, -) -> Result<(), IngesterError> -where - T: ConnectionTrait + TransactionTrait, -{ - let model = asset::ActiveModel { - id: Set(id.clone()), - royalty_amount: Set(royalty_amount), - royalty_amount_seq: Set(Some(seq)), - ..Default::default() - }; - - let mut query = asset::Entity::insert(model) - .on_conflict( - OnConflict::column(asset::Column::Id) - .update_columns([ - asset::Column::RoyaltyAmount, - asset::Column::RoyaltyAmountSeq, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); + // Do not overwrite changes that happened after decompression (asset_data.base_info_seq = 0). + // Do not overwrite changes from a later Bubblegum instruction. query.sql = format!( - "{} WHERE excluded.royalty_amount_seq >= asset.royalty_amount_seq OR royalty_amount_seq.seq IS NULL)", + "{} WHERE (asset_data.base_info_seq != 0 AND excluded.base_info_seq >= asset_data.base_info_seq) OR asset_data.base_info_seq IS NULL)", query.sql ); - txn.execute(query) .await .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; @@ -548,18 +538,6 @@ where Ok(()) } -pub async fn asset_was_decompressed(txn: &T, id: Vec) -> Result -where - T: ConnectionTrait + TransactionTrait, -{ - if let Some(asset) = asset::Entity::find_by_id(id).one(txn).await? { - if let Some(0) = asset.seq { - return Ok(true); - } - }; - Ok(false) -} - pub async fn creators_should_be_updated( txn: &T, id: Vec, @@ -569,6 +547,9 @@ where T: ConnectionTrait + TransactionTrait, { if let Some(asset) = asset::Entity::find_by_id(id).one(txn).await? { + if let Some(0) = asset.seq { + return Ok(false); + } if let Some(creators_added_seq) = asset.creators_added_seq { if seq < creators_added_seq { return Ok(false); @@ -622,7 +603,14 @@ pub async fn upsert_creators( where T: ConnectionTrait + TransactionTrait, { - if creators_should_be_updated(txn, id.clone(), seq).await? { + let multi_txn = txn.begin().await?; + if creators_should_be_updated(&multi_txn, id.clone(), seq).await? { + // Delete any existing creators. + asset_creators::Entity::delete_many() + .filter(Condition::all().add(asset_creators::Column::AssetId.eq(id.clone()))) + .exec(&multi_txn) + .await?; + if !creators.is_empty() { // Vec to hold base creator information. let mut db_creator_infos = Vec::with_capacity(creators.len()); @@ -673,7 +661,7 @@ where .to_owned(), ) .build(DbBackend::Postgres); - txn.execute(query).await?; + multi_txn.execute(query).await?; // This statement will update whether the creator is verified and the // `verified_seq` number. @@ -694,11 +682,119 @@ where "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", query.sql ); - txn.execute(query).await?; + multi_txn.execute(query).await?; } - upsert_asset_with_creators_added_seq(txn, id, seq).await?; + upsert_asset_with_creators_added_seq(&multi_txn, id, seq).await?; } + // Close out transaction and relinqish the lock. + multi_txn.commit().await?; + + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +pub async fn upsert_asset_base_info( + txn: &T, + id: Vec, + owner_type: OwnerType, + frozen: bool, + specification_version: SpecificationVersions, + specification_asset_class: SpecificationAssetClass, + royalty_target_type: RoyaltyTargetType, + royalty_target: Option>, + royalty_amount: i32, + slot_updated: i64, + seq: i64, +) -> Result<(), IngesterError> +where + T: ConnectionTrait + TransactionTrait, +{ + // Set initial mint info. + let asset_model = asset::ActiveModel { + id: Set(id.clone()), + owner_type: Set(owner_type), + frozen: Set(frozen), + specification_version: Set(Some(specification_version)), + specification_asset_class: Set(Some(specification_asset_class)), + royalty_target_type: Set(royalty_target_type), + royalty_target: Set(royalty_target), + royalty_amount: Set(royalty_amount), + asset_data: Set(Some(id)), + slot_updated: Set(Some(slot_updated)), + //royalty_amount_seq: Set(Some(seq)), + ..Default::default() + }; + + // Upsert asset table base info. + let mut query = asset::Entity::insert(asset_model) + .on_conflict( + OnConflict::columns([asset::Column::Id]) + .update_columns([ + asset::Column::OwnerType, + asset::Column::Frozen, + asset::Column::SpecificationVersion, + asset::Column::SpecificationAssetClass, + asset::Column::RoyaltyTargetType, + asset::Column::RoyaltyTarget, + asset::Column::RoyaltyAmount, + asset::Column::AssetData, + asset::Column::SlotUpdated, + asset::Column::RoyaltyAmountSeq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + + // Do not overwrite changes that happened after decompression (asset.seq = 0). + // Do not overwrite changes from a later Bubblegum instruction. + query.sql = format!( + "{} WHERE asset.seq != 0 AND (excluded.royalty_amount_seq >= asset.royalty_amount_seq OR royalty_amount_seq.seq IS NULL)", + query.sql + ); + + txn.execute(query) + .await + .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; + + Ok(()) +} + +pub async fn upsert_asset_authority( + txn: &T, + asset_id: Vec, + authority: Vec, + slot_updated: i64, + seq: i64, +) -> Result<(), IngesterError> +where + T: ConnectionTrait + TransactionTrait, +{ + let model = asset_authority::ActiveModel { + asset_id: Set(asset_id), + authority: Set(authority), + seq: Set(seq), + slot_updated: Set(slot_updated), + ..Default::default() + }; + + // Do not attempt to modify any existing values: + // `ON CONFLICT ('asset_id') DO NOTHING`. + let mut query = asset_authority::Entity::insert(model) + .on_conflict( + OnConflict::columns([asset_authority::Column::AssetId]) + .do_nothing() + .to_owned(), + ) + .build(DbBackend::Postgres); + + // Do not overwrite changes that happened after decompression (asset_authority.seq = 0). + query.sql = format!("{} WHERE asset_authority.seq != 0", query.sql); + + txn.execute(query) + .await + .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; + Ok(()) } diff --git a/nft_ingester/src/program_transformers/bubblegum/decompress.rs b/nft_ingester/src/program_transformers/bubblegum/decompress.rs index 6e9e0341a..c2e686039 100644 --- a/nft_ingester/src/program_transformers/bubblegum/decompress.rs +++ b/nft_ingester/src/program_transformers/bubblegum/decompress.rs @@ -1,8 +1,7 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, upsert_asset_with_compression_info, - upsert_asset_with_leaf_info_for_decompression, + upsert_asset_with_compression_info, upsert_asset_with_leaf_info_for_decompression, }, }; use blockbuster::{instruction::InstructionBundle, programs::bubblegum::BubblegumInstruction}; @@ -18,11 +17,6 @@ where { let id_bytes = bundle.keys.get(3).unwrap().0.as_slice(); - // First check to see if this asset has been decompressed and if so do not update. - if asset_was_decompressed(txn, id_bytes.to_vec()).await? { - return Ok(()); - } - // Partial update of asset table with just leaf. upsert_asset_with_leaf_info_for_decompression(txn, id_bytes.to_vec()).await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/delegate.rs b/nft_ingester/src/program_transformers/bubblegum/delegate.rs index 5f646b269..8d65a4ec0 100644 --- a/nft_ingester/src/program_transformers/bubblegum/delegate.rs +++ b/nft_ingester/src/program_transformers/bubblegum/delegate.rs @@ -1,7 +1,7 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, save_changelog_event, upsert_asset_with_leaf_info, + save_changelog_event, upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, }, }; @@ -30,12 +30,6 @@ where .. } => { let id_bytes = id.to_bytes(); - - // First check to see if this asset has been decompressed and if so do not update. - if asset_was_decompressed(txn, id_bytes.to_vec()).await? { - return Ok(()); - } - let owner_bytes = owner.to_bytes().to_vec(); let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 93ffbaca5..6a25e34c6 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -1,39 +1,29 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, save_changelog_event, upsert_asset_data, + save_changelog_event, upsert_asset_authority, upsert_asset_base_info, upsert_asset_data, upsert_asset_with_compression_info, upsert_asset_with_leaf_info, - upsert_asset_with_owner_and_delegate_info, upsert_asset_with_royalty_amount, - upsert_asset_with_seq, upsert_collection_info, upsert_creators, + upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, upsert_collection_info, + upsert_creators, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; use blockbuster::{ instruction::InstructionBundle, programs::bubblegum::{BubblegumInstruction, LeafSchema, Payload}, - token_metadata::{ - pda::find_master_edition_account, - state::{TokenStandard, UseMethod, Uses}, - }, + token_metadata::state::{TokenStandard, UseMethod, Uses}, }; use chrono::Utc; use digital_asset_types::{ - dao::{ - asset, asset_authority, asset_v1_account_attachments, - sea_orm_active_enums::{ - ChainMutability, Mutability, OwnerType, RoyaltyTargetType, SpecificationAssetClass, - SpecificationVersions, V1AccountAttachments, - }, + dao::sea_orm_active_enums::{ + ChainMutability, Mutability, OwnerType, RoyaltyTargetType, SpecificationAssetClass, + SpecificationVersions, }, json::ChainDataV1, }; use log::warn; use num_traits::FromPrimitive; -use sea_orm::{ - entity::*, query::*, sea_query::OnConflict, ConnectionTrait, DbBackend, EntityTrait, JsonValue, -}; - -// TODO -> consider moving structs into these functions to avoid clone +use sea_orm::{query::*, ConnectionTrait, JsonValue}; pub async fn mint_v1<'c, T>( parsing_result: &BubblegumInstruction, @@ -60,14 +50,7 @@ where nonce, .. } => { - let (edition_attachment_address, _) = find_master_edition_account(&id); let id_bytes = id.to_bytes(); - - // First check to see if this asset has been decompressed and if so do not update. - if asset_was_decompressed(txn, id_bytes.to_vec()).await? { - return Ok(None); - } - let slot_i = bundle.slot as i64; let uri = metadata.uri.replace('\0', ""); let name = metadata.name.clone().into_bytes(); @@ -116,46 +99,18 @@ where }; let tree_id = bundle.keys.get(3).unwrap().0.to_vec(); - // Set initial mint info. - let asset_model = asset::ActiveModel { - id: Set(id_bytes.to_vec()), - owner_type: Set(OwnerType::Single), - frozen: Set(false), - specification_version: Set(Some(SpecificationVersions::V1)), - specification_asset_class: Set(Some(SpecificationAssetClass::Nft)), - royalty_target_type: Set(RoyaltyTargetType::Creators), - royalty_target: Set(None), - asset_data: Set(Some(id_bytes.to_vec())), - slot_updated: Set(Some(slot_i)), - ..Default::default() - }; - // Upsert asset table base info. - let query = asset::Entity::insert(asset_model) - .on_conflict( - OnConflict::columns([asset::Column::Id]) - .update_columns([ - asset::Column::OwnerType, - asset::Column::Frozen, - asset::Column::SpecificationVersion, - asset::Column::SpecificationAssetClass, - asset::Column::RoyaltyTargetType, - asset::Column::RoyaltyTarget, - asset::Column::AssetData, - asset::Column::SlotUpdated, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - - txn.execute(query) - .await - .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; - - upsert_asset_with_royalty_amount( + upsert_asset_base_info( txn, id_bytes.to_vec(), + OwnerType::Single, + false, + SpecificationVersions::V1, + SpecificationAssetClass::Nft, + RoyaltyTargetType::Creators, + None, metadata.seller_fee_basis_points as i32, + slot_i, seq as i64, ) .await?; @@ -197,24 +152,6 @@ where upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; - let attachment = asset_v1_account_attachments::ActiveModel { - id: Set(edition_attachment_address.to_bytes().to_vec()), - slot_updated: Set(slot_i), - attachment_type: Set(V1AccountAttachments::MasterEditionV2), - ..Default::default() - }; - - let query = asset_v1_account_attachments::Entity::insert(attachment) - .on_conflict( - OnConflict::columns([asset_v1_account_attachments::Column::Id]) - .do_nothing() - .to_owned(), - ) - .build(DbBackend::Postgres); - txn.execute(query) - .await - .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; - // Upsert into `asset_creators` table. upsert_creators( txn, @@ -226,26 +163,10 @@ where .await?; // Insert into `asset_authority` table. - let model = asset_authority::ActiveModel { - asset_id: Set(id_bytes.to_vec()), - authority: Set(bundle.keys.get(0).unwrap().0.to_vec()), //TODO - we need to rem,ove the optional bubblegum signer logic - seq: Set(seq as i64), - slot_updated: Set(slot_i), - ..Default::default() - }; - - // Do not attempt to modify any existing values: - // `ON CONFLICT ('asset_id') DO NOTHING`. - let query = asset_authority::Entity::insert(model) - .on_conflict( - OnConflict::columns([asset_authority::Column::AssetId]) - .do_nothing() - .to_owned(), - ) - .build(DbBackend::Postgres); - txn.execute(query) - .await - .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; + //TODO - we need to remove the optional bubblegum signer logic + let authority = bundle.keys.get(0).unwrap().0.to_vec(); + upsert_asset_authority(txn, id_bytes.to_vec(), authority, seq as i64, slot_i) + .await?; // Upsert into `asset_grouping` table with base collection info. upsert_collection_info( diff --git a/nft_ingester/src/program_transformers/bubblegum/redeem.rs b/nft_ingester/src/program_transformers/bubblegum/redeem.rs index 3dc0bc999..380a8e8c3 100644 --- a/nft_ingester/src/program_transformers/bubblegum/redeem.rs +++ b/nft_ingester/src/program_transformers/bubblegum/redeem.rs @@ -4,8 +4,7 @@ use log::debug; use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, save_changelog_event, u32_to_u8_array, upsert_asset_with_leaf_info, - upsert_asset_with_seq, + save_changelog_event, u32_to_u8_array, upsert_asset_with_leaf_info, upsert_asset_with_seq, }, }; use blockbuster::{instruction::InstructionBundle, programs::bubblegum::BubblegumInstruction}; @@ -33,12 +32,6 @@ where ); debug!("Indexing redeem for asset id: {:?}", asset_id); let id_bytes = asset_id.to_bytes(); - - // First check to see if this asset has been decompressed and if so do not update. - if asset_was_decompressed(txn, id_bytes.to_vec()).await? { - return Ok(()); - } - let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; diff --git a/nft_ingester/src/program_transformers/bubblegum/transfer.rs b/nft_ingester/src/program_transformers/bubblegum/transfer.rs index 07abbe523..aea88e9ed 100644 --- a/nft_ingester/src/program_transformers/bubblegum/transfer.rs +++ b/nft_ingester/src/program_transformers/bubblegum/transfer.rs @@ -2,8 +2,8 @@ use super::save_changelog_event; use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, upsert_asset_with_leaf_info, - upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, + upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, + upsert_asset_with_seq, }, }; use blockbuster::{ @@ -32,12 +32,6 @@ where .. } => { let id_bytes = id.to_bytes(); - - // First check to see if this asset has been decompressed and if so do not update. - if asset_was_decompressed(txn, id_bytes.to_vec()).await? { - return Ok(()); - } - let owner_bytes = owner.to_bytes().to_vec(); let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 835f9cd0c..e185fc07d 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -1,9 +1,8 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - asset_was_decompressed, save_changelog_event, upsert_asset_data, - upsert_asset_with_leaf_info, upsert_asset_with_royalty_amount, upsert_asset_with_seq, - upsert_creators, + save_changelog_event, upsert_asset_base_info, upsert_asset_data, + upsert_asset_with_leaf_info, upsert_asset_with_seq, upsert_creators, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -14,15 +13,15 @@ use blockbuster::{ }; use chrono::Utc; use digital_asset_types::{ - dao::{ - asset_creators, - sea_orm_active_enums::{ChainMutability, Mutability}, + dao::sea_orm_active_enums::{ + ChainMutability, Mutability, OwnerType, RoyaltyTargetType, SpecificationAssetClass, + SpecificationVersions, }, json::ChainDataV1, }; use log::warn; use num_traits::FromPrimitive; -use sea_orm::{entity::*, query::*, ConnectionTrait, EntityTrait, JsonValue}; +use sea_orm::{query::*, ConnectionTrait, JsonValue}; pub async fn update_metadata<'c, T>( parsing_result: &BubblegumInstruction, @@ -51,12 +50,6 @@ where return match le.schema { LeafSchema::V1 { id, nonce, .. } => { let id_bytes = id.to_bytes(); - - // First check to see if this asset has been decompressed and if so do not update. - if asset_was_decompressed(txn, id_bytes.to_vec()).await? { - return Ok(None); - } - let slot_i = bundle.slot as i64; let uri = if let Some(uri) = &update_args.uri { @@ -141,10 +134,17 @@ where current_metadata.seller_fee_basis_points }; - upsert_asset_with_royalty_amount( + upsert_asset_base_info( txn, id_bytes.to_vec(), + OwnerType::Single, + false, + SpecificationVersions::V1, + SpecificationAssetClass::Nft, + RoyaltyTargetType::Creators, + None, seller_fee_basis_points as i32, + slot_i, seq as i64, ) .await?; @@ -165,16 +165,6 @@ where upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; - // Update `asset_creators` table. - - // Delete any existing creators. - asset_creators::Entity::delete_many() - .filter( - Condition::all().add(asset_creators::Column::AssetId.eq(id_bytes.to_vec())), - ) - .exec(txn) - .await?; - // Upsert into `asset_creators` table. let creators = if let Some(creators) = &update_args.creators { creators diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 70fdaee63..a6c7826a9 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -149,8 +149,8 @@ pub async fn save_v1_asset( id: Set(id.to_vec()), raw_name: Set(Some(name.to_vec())), raw_symbol: Set(Some(symbol.to_vec())), + base_info_seq: Set(Some(0)), download_metadata_seq: Set(Some(0)), - ..Default::default() }; let txn = conn.begin().await?; let mut query = asset_data::Entity::insert(asset_data_model) @@ -165,6 +165,8 @@ pub async fn save_v1_asset( asset_data::Column::Reindex, asset_data::Column::RawName, asset_data::Column::RawSymbol, + asset_data::Column::BaseInfoSeq, + asset_data::Column::DownloadMetadataSeq, ]) .to_owned(), ) @@ -283,7 +285,7 @@ pub async fn save_v1_asset( group_key: Set("collection".to_string()), group_value: Set(Some(c.key.to_string())), verified: Set(c.verified), - group_info_seq: Set(None), + group_info_seq: Set(Some(0)), slot_updated: Set(Some(slot_i)), ..Default::default() }; From 8732ba97eec7638f62e356555781dd3e568df1f4 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Sun, 3 Dec 2023 23:50:57 -0800 Subject: [PATCH 22/46] Rename royalty_amount_seq to base_info_seq --- .../src/dao/generated/asset.rs | 6 +- .../src/dao/generated/sea_orm_active_enums.rs | 112 +++++++++--------- digital_asset_types/tests/common.rs | 2 +- ...01_add_seq_numbers_bgum_update_metadata.rs | 4 +- .../src/program_transformers/bubblegum/db.rs | 6 +- 5 files changed, 65 insertions(+), 65 deletions(-) diff --git a/digital_asset_types/src/dao/generated/asset.rs b/digital_asset_types/src/dao/generated/asset.rs index b8e116e93..16c7a9d72 100644 --- a/digital_asset_types/src/dao/generated/asset.rs +++ b/digital_asset_types/src/dao/generated/asset.rs @@ -46,7 +46,7 @@ pub struct Model { pub owner_delegate_seq: Option, pub was_decompressed: bool, pub leaf_seq: Option, - pub royalty_amount_seq: Option, + pub base_info_seq: Option, pub creators_added_seq: Option, } @@ -80,7 +80,7 @@ pub enum Column { OwnerDelegateSeq, WasDecompressed, LeafSeq, - RoyaltyAmountSeq, + BaseInfoSeq, CreatorsAddedSeq, } @@ -137,7 +137,7 @@ impl ColumnTrait for Column { Self::OwnerDelegateSeq => ColumnType::BigInteger.def().null(), Self::WasDecompressed => ColumnType::Boolean.def(), Self::LeafSeq => ColumnType::BigInteger.def().null(), - Self::RoyaltyAmountSeq => ColumnType::BigInteger.def().null(), + Self::BaseInfoSeq => ColumnType::BigInteger.def().null(), Self::CreatorsAddedSeq => ColumnType::BigInteger.def().null(), } } diff --git a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs index bf72d7957..bdc071103 100644 --- a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs +++ b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs @@ -3,41 +3,19 @@ use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] -pub enum OwnerType { - #[sea_orm(string_value = "single")] - Single, - #[sea_orm(string_value = "token")] - Token, - #[sea_orm(string_value = "unknown")] - Unknown, -} #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", - enum_name = "specification_asset_class" + enum_name = "royalty_target_type" )] -pub enum SpecificationAssetClass { - #[sea_orm(string_value = "FUNGIBLE_ASSET")] - FungibleAsset, - #[sea_orm(string_value = "FUNGIBLE_TOKEN")] - FungibleToken, - #[sea_orm(string_value = "IDENTITY_NFT")] - IdentityNft, - #[sea_orm(string_value = "NFT")] - Nft, - #[sea_orm(string_value = "NON_TRANSFERABLE_NFT")] - NonTransferableNft, - #[sea_orm(string_value = "PRINT")] - Print, - #[sea_orm(string_value = "PRINTABLE_NFT")] - PrintableNft, - #[sea_orm(string_value = "PROGRAMMABLE_NFT")] - ProgrammableNft, - #[sea_orm(string_value = "TRANSFER_RESTRICTED_NFT")] - TransferRestrictedNft, +pub enum RoyaltyTargetType { + #[sea_orm(string_value = "creators")] + Creators, + #[sea_orm(string_value = "fanout")] + Fanout, + #[sea_orm(string_value = "single")] + Single, #[sea_orm(string_value = "unknown")] Unknown, } @@ -60,6 +38,18 @@ pub enum V1AccountAttachments { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] +pub enum TaskStatus { + #[sea_orm(string_value = "failed")] + Failed, + #[sea_orm(string_value = "pending")] + Pending, + #[sea_orm(string_value = "running")] + Running, + #[sea_orm(string_value = "success")] + Success, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", @@ -76,34 +66,44 @@ pub enum SpecificationVersions { V2, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] +pub enum Mutability { + #[sea_orm(string_value = "immutable")] + Immutable, + #[sea_orm(string_value = "mutable")] + Mutable, + #[sea_orm(string_value = "unknown")] + Unknown, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", - enum_name = "royalty_target_type" + enum_name = "specification_asset_class" )] -pub enum RoyaltyTargetType { - #[sea_orm(string_value = "creators")] - Creators, - #[sea_orm(string_value = "fanout")] - Fanout, - #[sea_orm(string_value = "single")] - Single, +pub enum SpecificationAssetClass { + #[sea_orm(string_value = "FUNGIBLE_ASSET")] + FungibleAsset, + #[sea_orm(string_value = "FUNGIBLE_TOKEN")] + FungibleToken, + #[sea_orm(string_value = "IDENTITY_NFT")] + IdentityNft, + #[sea_orm(string_value = "NFT")] + Nft, + #[sea_orm(string_value = "NON_TRANSFERABLE_NFT")] + NonTransferableNft, + #[sea_orm(string_value = "PRINT")] + Print, + #[sea_orm(string_value = "PRINTABLE_NFT")] + PrintableNft, + #[sea_orm(string_value = "PROGRAMMABLE_NFT")] + ProgrammableNft, + #[sea_orm(string_value = "TRANSFER_RESTRICTED_NFT")] + TransferRestrictedNft, #[sea_orm(string_value = "unknown")] Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] -pub enum TaskStatus { - #[sea_orm(string_value = "failed")] - Failed, - #[sea_orm(string_value = "pending")] - Pending, - #[sea_orm(string_value = "running")] - Running, - #[sea_orm(string_value = "success")] - Success, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] pub enum ChainMutability { #[sea_orm(string_value = "immutable")] @@ -114,12 +114,12 @@ pub enum ChainMutability { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] -pub enum Mutability { - #[sea_orm(string_value = "immutable")] - Immutable, - #[sea_orm(string_value = "mutable")] - Mutable, +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] +pub enum OwnerType { + #[sea_orm(string_value = "single")] + Single, + #[sea_orm(string_value = "token")] + Token, #[sea_orm(string_value = "unknown")] Unknown, } diff --git a/digital_asset_types/tests/common.rs b/digital_asset_types/tests/common.rs index 50bfc194a..8f4efca10 100644 --- a/digital_asset_types/tests/common.rs +++ b/digital_asset_types/tests/common.rs @@ -159,7 +159,7 @@ pub fn create_asset( owner_delegate_seq: Some(0), was_decompressed: false, leaf_seq: Some(0), - royalty_amount_seq: Some(0), + base_info_seq: Some(0), creators_added_seq: Some(0), }, ) diff --git a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs index 6881c3c8b..d25f3ef1b 100644 --- a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs +++ b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs @@ -36,7 +36,7 @@ impl MigrationTrait for Migration { .alter_table( Table::alter() .table(asset::Entity) - .add_column(ColumnDef::new(Alias::new("royalty_amount_seq")).big_integer()) + .add_column(ColumnDef::new(Alias::new("base_info_seq")).big_integer()) .add_column(ColumnDef::new(Alias::new("creators_added_seq")).big_integer()) .to_owned(), ) @@ -72,7 +72,7 @@ impl MigrationTrait for Migration { .alter_table( Table::alter() .table(asset::Entity) - .drop_column(Alias::new("royalty_amount_seq")) + .drop_column(Alias::new("base_info_seq")) .drop_column(Alias::new("creators_added_seq")) .to_owned(), ) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index eb04f79b1..5f9f30a5c 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -723,7 +723,7 @@ where royalty_amount: Set(royalty_amount), asset_data: Set(Some(id)), slot_updated: Set(Some(slot_updated)), - //royalty_amount_seq: Set(Some(seq)), + base_info_seq: Set(Some(seq)), ..Default::default() }; @@ -741,7 +741,7 @@ where asset::Column::RoyaltyAmount, asset::Column::AssetData, asset::Column::SlotUpdated, - asset::Column::RoyaltyAmountSeq, + asset::Column::BaseInfoSeq, ]) .to_owned(), ) @@ -750,7 +750,7 @@ where // Do not overwrite changes that happened after decompression (asset.seq = 0). // Do not overwrite changes from a later Bubblegum instruction. query.sql = format!( - "{} WHERE asset.seq != 0 AND (excluded.royalty_amount_seq >= asset.royalty_amount_seq OR royalty_amount_seq.seq IS NULL)", + "{} WHERE asset.seq != 0 AND (excluded.base_info_seq >= asset.base_info_seq OR asset.base_info_seq IS NULL)", query.sql ); From 25006872a5e761b9621d1e077b1c4f86f4a85e27 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 4 Dec 2023 00:42:32 -0800 Subject: [PATCH 23/46] Fix typo in WHERE clause --- nft_ingester/src/program_transformers/bubblegum/db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 5f9f30a5c..3b444740b 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -528,7 +528,7 @@ where // Do not overwrite changes that happened after decompression (asset_data.base_info_seq = 0). // Do not overwrite changes from a later Bubblegum instruction. query.sql = format!( - "{} WHERE (asset_data.base_info_seq != 0 AND excluded.base_info_seq >= asset_data.base_info_seq) OR asset_data.base_info_seq IS NULL)", + "{} WHERE (asset_data.base_info_seq != 0 AND excluded.base_info_seq >= asset_data.base_info_seq) OR asset_data.base_info_seq IS NULL", query.sql ); txn.execute(query) From fe1d522d9c6ed4524420f80ff94c329ba7c15b30 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 4 Dec 2023 08:20:12 -0800 Subject: [PATCH 24/46] Do not delete existing creators in mint_v1 --- .../src/program_transformers/bubblegum/db.rs | 13 ++++++++----- .../src/program_transformers/bubblegum/mint_v1.rs | 1 + .../bubblegum/update_metadata.rs | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 3b444740b..e9aca1356 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -599,17 +599,20 @@ pub async fn upsert_creators( creators: &Vec, slot_updated: i64, seq: i64, + delete_existing: bool, ) -> Result<(), IngesterError> where T: ConnectionTrait + TransactionTrait, { let multi_txn = txn.begin().await?; if creators_should_be_updated(&multi_txn, id.clone(), seq).await? { - // Delete any existing creators. - asset_creators::Entity::delete_many() - .filter(Condition::all().add(asset_creators::Column::AssetId.eq(id.clone()))) - .exec(&multi_txn) - .await?; + if delete_existing { + // Delete any existing creators. + asset_creators::Entity::delete_many() + .filter(Condition::all().add(asset_creators::Column::AssetId.eq(id.clone()))) + .exec(&multi_txn) + .await?; + } if !creators.is_empty() { // Vec to hold base creator information. diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 6a25e34c6..576171545 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -159,6 +159,7 @@ where &metadata.creators, slot_i, seq as i64, + false, ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index e185fc07d..a39b1bbf5 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -171,7 +171,7 @@ where } else { ¤t_metadata.creators }; - upsert_creators(txn, id_bytes.to_vec(), creators, slot_i, seq as i64).await?; + upsert_creators(txn, id_bytes.to_vec(), creators, slot_i, seq as i64, true).await?; if uri.is_empty() { warn!( From d26fe0027fd67455f57bc782ea0f1a845aafda70 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 4 Dec 2023 11:41:05 -0800 Subject: [PATCH 25/46] Update comments around database txns --- .../src/program_transformers/bubblegum/db.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index e9aca1356..f44b92985 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -384,7 +384,12 @@ where // want to insert a creator that was removed from a later `update_metadata`. And we don't need // to worry about creator verification in that case because the `update_metadata` updates // creator verification state as well. + + // Note that if the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. let multi_txn = txn.begin().await?; + if creators_should_be_updated(&multi_txn, asset_id, seq).await? { let mut query = asset_creators::Entity::insert(model) .on_conflict( @@ -411,7 +416,7 @@ where .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; } - // Close out transaction and relinqish the lock. + // Commit transaction and relinqish the lock. multi_txn.commit().await?; Ok(()) @@ -604,7 +609,11 @@ pub async fn upsert_creators( where T: ConnectionTrait + TransactionTrait, { + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. let multi_txn = txn.begin().await?; + if creators_should_be_updated(&multi_txn, id.clone(), seq).await? { if delete_existing { // Delete any existing creators. @@ -691,7 +700,7 @@ where upsert_asset_with_creators_added_seq(&multi_txn, id, seq).await?; } - // Close out transaction and relinqish the lock. + // Commit transaction and relinqish the lock. multi_txn.commit().await?; Ok(()) From c4b1ab486cd90e5ea51a11003e78eb6b90ec7d8c Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 4 Dec 2023 12:18:36 -0800 Subject: [PATCH 26/46] Use transaction in mint_V1 and update_metadata --- .../program_transformers/bubblegum/mint_v1.rs | 18 +++++++++++++----- .../bubblegum/update_metadata.rs | 16 ++++++++++++---- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 576171545..4885a2e79 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -99,9 +99,14 @@ where }; let tree_id = bundle.keys.get(3).unwrap().0.to_vec(); + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Upsert asset table base info. upsert_asset_base_info( - txn, + &multi_txn, id_bytes.to_vec(), OwnerType::Single, false, @@ -117,7 +122,7 @@ where // Partial update of asset table with just compression info elements. upsert_asset_with_compression_info( - txn, + &multi_txn, id_bytes.to_vec(), true, false, @@ -129,7 +134,7 @@ where // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( - txn, + &multi_txn, id_bytes.to_vec(), nonce as i64, tree_id, @@ -142,7 +147,7 @@ where // Partial update of asset table with just leaf owner and delegate. upsert_asset_with_owner_and_delegate_info( - txn, + &multi_txn, id_bytes.to_vec(), owner.to_bytes().to_vec(), delegate, @@ -150,7 +155,10 @@ where ) .await?; - upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; + upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; + + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; // Upsert into `asset_creators` table. upsert_creators( diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index a39b1bbf5..f787adf96 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -126,7 +126,12 @@ where ) .await?; - // Partial update of asset table with just royalty amount (seller fee basis points). + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + // Upsert asset table base info. let seller_fee_basis_points = if let Some(seller_fee_basis_points) = update_args.seller_fee_basis_points { seller_fee_basis_points @@ -135,7 +140,7 @@ where }; upsert_asset_base_info( - txn, + &multi_txn, id_bytes.to_vec(), OwnerType::Single, false, @@ -152,7 +157,7 @@ where // Partial update of asset table with just leaf. let tree_id = bundle.keys.get(5).unwrap().0.to_vec(); upsert_asset_with_leaf_info( - txn, + &multi_txn, id_bytes.to_vec(), nonce as i64, tree_id, @@ -163,7 +168,10 @@ where ) .await?; - upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; + upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; + + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; // Upsert into `asset_creators` table. let creators = if let Some(creators) = &update_args.creators { From 9e305c9c05cf31a131bf6f6428a68e55a75a4745 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 4 Dec 2023 12:42:00 -0800 Subject: [PATCH 27/46] Use transaction for other Bubblegum instructions asset table updates --- .../program_transformers/bubblegum/burn.rs | 12 ++++++++-- .../bubblegum/cancel_redeem.rs | 21 +++++++++++++----- .../bubblegum/collection_verification.rs | 14 +++++++++--- .../bubblegum/creator_verification.rs | 14 +++++++++--- .../bubblegum/decompress.rs | 16 +++++++++++--- .../bubblegum/delegate.rs | 20 ++++++++++++----- .../program_transformers/bubblegum/redeem.rs | 12 ++++++++-- .../bubblegum/transfer.rs | 22 ++++++++++++++----- 8 files changed, 101 insertions(+), 30 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/burn.rs b/nft_ingester/src/program_transformers/bubblegum/burn.rs index f59a64c65..72f13a852 100644 --- a/nft_ingester/src/program_transformers/bubblegum/burn.rs +++ b/nft_ingester/src/program_transformers/bubblegum/burn.rs @@ -42,6 +42,11 @@ where ..Default::default() }; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Upsert asset table `burnt` column. let query = asset::Entity::insert(asset_model) .on_conflict( @@ -50,9 +55,12 @@ where .to_owned(), ) .build(DbBackend::Postgres); - txn.execute(query).await?; + multi_txn.execute(query).await?; + + upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; - upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; return Ok(()); } diff --git a/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs b/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs index 5edc5b0e7..184e7df5f 100644 --- a/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs +++ b/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs @@ -22,8 +22,7 @@ where { if let (Some(le), Some(cl)) = (&parsing_result.leaf_update, &parsing_result.tree_update) { let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, cl_audits).await?; - #[allow(unreachable_patterns)] - return match le.schema { + match le.schema { LeafSchema::V1 { id, owner, @@ -40,9 +39,14 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( - txn, + &multi_txn, id_bytes.to_vec(), nonce, tree_id.to_vec(), @@ -55,7 +59,7 @@ where // Partial update of asset table with just leaf owner and delegate. upsert_asset_with_owner_and_delegate_info( - txn, + &multi_txn, id_bytes.to_vec(), owner_bytes, delegate, @@ -63,9 +67,14 @@ where ) .await?; - upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await + upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; + + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; + + return Ok(()); } - }; + } } Err(IngesterError::ParsingError( "Ix not parsed correctly".to_string(), diff --git a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs index 8f2429cef..0bbf5fc00 100644 --- a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs @@ -45,9 +45,14 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( - txn, + &multi_txn, id_bytes.to_vec(), nonce, tree_id.to_vec(), @@ -58,10 +63,10 @@ where ) .await?; - upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; + upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; upsert_collection_info( - txn, + &multi_txn, id_bytes.to_vec(), Some(Collection { key: *collection, @@ -72,6 +77,9 @@ where ) .await?; + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; + return Ok(()); }; Err(IngesterError::ParsingError( diff --git a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs index 7538317fa..b84b1fd8b 100644 --- a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs @@ -61,9 +61,14 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Partial update of asset table with just leaf info. upsert_asset_with_leaf_info( - txn, + &multi_txn, id_bytes.to_vec(), nonce, tree_id.to_vec(), @@ -76,7 +81,7 @@ where // Partial update of asset table with just leaf owner and delegate. upsert_asset_with_owner_and_delegate_info( - txn, + &multi_txn, id_bytes.to_vec(), owner_bytes, delegate, @@ -84,7 +89,10 @@ where ) .await?; - upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; + upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; + + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; id_bytes.to_vec() } diff --git a/nft_ingester/src/program_transformers/bubblegum/decompress.rs b/nft_ingester/src/program_transformers/bubblegum/decompress.rs index c2e686039..90cad0576 100644 --- a/nft_ingester/src/program_transformers/bubblegum/decompress.rs +++ b/nft_ingester/src/program_transformers/bubblegum/decompress.rs @@ -17,11 +17,16 @@ where { let id_bytes = bundle.keys.get(3).unwrap().0.as_slice(); + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Partial update of asset table with just leaf. - upsert_asset_with_leaf_info_for_decompression(txn, id_bytes.to_vec()).await?; + upsert_asset_with_leaf_info_for_decompression(&multi_txn, id_bytes.to_vec()).await?; upsert_asset_with_compression_info( - txn, + &multi_txn, id_bytes.to_vec(), false, false, @@ -29,5 +34,10 @@ where Some(id_bytes.to_vec()), true, ) - .await + .await?; + + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; + + Ok(()) } diff --git a/nft_ingester/src/program_transformers/bubblegum/delegate.rs b/nft_ingester/src/program_transformers/bubblegum/delegate.rs index 8d65a4ec0..17d2cba22 100644 --- a/nft_ingester/src/program_transformers/bubblegum/delegate.rs +++ b/nft_ingester/src/program_transformers/bubblegum/delegate.rs @@ -22,7 +22,7 @@ where { if let (Some(le), Some(cl)) = (&parsing_result.leaf_update, &parsing_result.tree_update) { let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, cl_audits).await?; - return match le.schema { + match le.schema { LeafSchema::V1 { id, owner, @@ -38,9 +38,14 @@ where }; let tree_id = cl.id.to_bytes(); + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( - txn, + &multi_txn, id_bytes.to_vec(), cl.index as i64, tree_id.to_vec(), @@ -53,7 +58,7 @@ where // Partial update of asset table with just leaf owner and delegate. upsert_asset_with_owner_and_delegate_info( - txn, + &multi_txn, id_bytes.to_vec(), owner_bytes, delegate, @@ -61,9 +66,14 @@ where ) .await?; - upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await + upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; + + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; + + return Ok(()); } - }; + } } Err(IngesterError::ParsingError( "Ix not parsed correctly".to_string(), diff --git a/nft_ingester/src/program_transformers/bubblegum/redeem.rs b/nft_ingester/src/program_transformers/bubblegum/redeem.rs index 380a8e8c3..69549ad09 100644 --- a/nft_ingester/src/program_transformers/bubblegum/redeem.rs +++ b/nft_ingester/src/program_transformers/bubblegum/redeem.rs @@ -35,9 +35,14 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( - txn, + &multi_txn, id_bytes.to_vec(), nonce, tree_id.to_vec(), @@ -48,7 +53,10 @@ where ) .await?; - upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await?; + upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; + + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; return Ok(()); } diff --git a/nft_ingester/src/program_transformers/bubblegum/transfer.rs b/nft_ingester/src/program_transformers/bubblegum/transfer.rs index aea88e9ed..29c6c3461 100644 --- a/nft_ingester/src/program_transformers/bubblegum/transfer.rs +++ b/nft_ingester/src/program_transformers/bubblegum/transfer.rs @@ -23,8 +23,8 @@ where { if let (Some(le), Some(cl)) = (&parsing_result.leaf_update, &parsing_result.tree_update) { let seq = save_changelog_event(cl, bundle.slot, bundle.txn_id, txn, cl_audits).await?; - #[allow(unreachable_patterns)] - return match le.schema { + + match le.schema { LeafSchema::V1 { id, owner, @@ -41,9 +41,14 @@ where let tree_id = cl.id.to_bytes(); let nonce = cl.index as i64; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Partial update of asset table with just leaf. upsert_asset_with_leaf_info( - txn, + &multi_txn, id_bytes.to_vec(), nonce, tree_id.to_vec(), @@ -56,7 +61,7 @@ where // Partial update of asset table with just leaf owner and delegate. upsert_asset_with_owner_and_delegate_info( - txn, + &multi_txn, id_bytes.to_vec(), owner_bytes, delegate, @@ -64,9 +69,14 @@ where ) .await?; - upsert_asset_with_seq(txn, id_bytes.to_vec(), seq as i64).await + upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; + + // Commit transaction and relinqish the lock. + multi_txn.commit().await?; + + return Ok(()); } - }; + } } Err(IngesterError::ParsingError( "Ix not parsed correctly".to_string(), From c3798c0eb771656bebd8317efb6773a84e0b5b5d Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 4 Dec 2023 13:18:39 -0800 Subject: [PATCH 28/46] Fix tree_id key index in update_metadata --- .../src/program_transformers/bubblegum/update_metadata.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index f787adf96..a2432eb53 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -155,7 +155,7 @@ where .await?; // Partial update of asset table with just leaf. - let tree_id = bundle.keys.get(5).unwrap().0.to_vec(); + let tree_id = bundle.keys.get(8).unwrap().0.to_vec(); upsert_asset_with_leaf_info( &multi_txn, id_bytes.to_vec(), From 7c0f9be88456fafe9f7deb7553c5e117766b8a29 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Wed, 6 Dec 2023 11:20:21 -0800 Subject: [PATCH 29/46] Remove use of was_decompressed flag on asset table --- .../program_transformers/bubblegum/burn.rs | 3 +- .../src/program_transformers/bubblegum/db.rs | 38 +++++++++++-------- .../bubblegum/decompress.rs | 27 ++----------- .../program_transformers/bubblegum/mint_v1.rs | 1 - .../token_metadata/v1_asset.rs | 2 + 5 files changed, 29 insertions(+), 42 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/burn.rs b/nft_ingester/src/program_transformers/bubblegum/burn.rs index 72f13a852..3f8914596 100644 --- a/nft_ingester/src/program_transformers/bubblegum/burn.rs +++ b/nft_ingester/src/program_transformers/bubblegum/burn.rs @@ -47,7 +47,8 @@ where // automatically rolled back. let multi_txn = txn.begin().await?; - // Upsert asset table `burnt` column. + // Upsert asset table `burnt` column. Note we don't check for decompression (asset.seq = 0) + // because we know if the item was burnt it could not have been decompressed later. let query = asset::Entity::insert(asset_model) .on_conflict( OnConflict::columns([asset::Column::Id]) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index f44b92985..8234be054 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -182,9 +182,9 @@ where .build(DbBackend::Postgres); // Do not overwrite changes that happened after decompression (asset.seq = 0). - // If the asset was decompressed, don't update the leaf info since we cleared it during decompression. + // Do not overwrite changes from a later Bubblegum instruction. query.sql = format!( - "{} WHERE asset.seq != 0 AND (NOT asset.was_decompressed) AND (excluded.leaf_seq >= asset.leaf_seq OR asset.leaf_seq IS NULL)", + "{} WHERE asset.seq != 0 AND (excluded.leaf_seq >= asset.leaf_seq OR asset.leaf_seq IS NULL)", query.sql ); @@ -195,7 +195,7 @@ where Ok(()) } -pub async fn upsert_asset_with_leaf_info_for_decompression( +pub async fn upsert_asset_with_leaf_and_compression_info_for_decompression( txn: &T, id: Vec, ) -> Result<(), IngesterError> @@ -203,13 +203,17 @@ where T: ConnectionTrait + TransactionTrait, { let model = asset::ActiveModel { - id: Set(id), + id: Set(id.clone()), nonce: Set(Some(0)), tree_id: Set(None), leaf: Set(None), data_hash: Set(None), creator_hash: Set(None), - leaf_seq: Set(None), + compressed: Set(false), + compressible: Set(false), + supply: Set(1), + supply_mint: Set(Some(id)), + seq: Set(Some(0)), ..Default::default() }; @@ -223,6 +227,11 @@ where asset::Column::DataHash, asset::Column::CreatorHash, asset::Column::LeafSeq, + asset::Column::Compressed, + asset::Column::Compressible, + asset::Column::Supply, + asset::Column::SupplyMint, + asset::Column::Seq, ]) .to_owned(), ) @@ -252,7 +261,7 @@ where id: Set(id), owner: Set(Some(owner)), delegate: Set(delegate), - owner_delegate_seq: Set(Some(seq)), // gummyroll seq + owner_delegate_seq: Set(Some(seq)), ..Default::default() }; @@ -289,7 +298,6 @@ pub async fn upsert_asset_with_compression_info( compressible: bool, supply: i64, supply_mint: Option>, - was_decompressed: bool, ) -> Result<(), IngesterError> where T: ConnectionTrait + TransactionTrait, @@ -300,7 +308,6 @@ where compressible: Set(compressible), supply: Set(supply), supply_mint: Set(supply_mint), - was_decompressed: Set(was_decompressed), ..Default::default() }; @@ -312,18 +319,13 @@ where asset::Column::Compressible, asset::Column::Supply, asset::Column::SupplyMint, - asset::Column::WasDecompressed, ]) .to_owned(), ) .build(DbBackend::Postgres); // Do not overwrite changes that happened after decompression (asset.seq = 0). - // Do not overwrite changes from Bubblegum decompress instruction itself. - query.sql = format!( - "{} WHERE asset.seq != 0 AND (NOT asset.was_decompressed)", - query.sql - ); + query.sql = format!("{} WHERE asset.seq != 0", query.sql); txn.execute(query).await?; Ok(()) @@ -616,9 +618,13 @@ where if creators_should_be_updated(&multi_txn, id.clone(), seq).await? { if delete_existing { - // Delete any existing creators. + // Delete all existing creators that haven't been verified at a higher sequence number. asset_creators::Entity::delete_many() - .filter(Condition::all().add(asset_creators::Column::AssetId.eq(id.clone()))) + .filter( + Condition::all() + .add(asset_creators::Column::AssetId.eq(id.clone())) + .add(asset_creators::Column::VerifiedSeq.lt(seq)), + ) .exec(&multi_txn) .await?; } diff --git a/nft_ingester/src/program_transformers/bubblegum/decompress.rs b/nft_ingester/src/program_transformers/bubblegum/decompress.rs index 90cad0576..951fa7e4b 100644 --- a/nft_ingester/src/program_transformers/bubblegum/decompress.rs +++ b/nft_ingester/src/program_transformers/bubblegum/decompress.rs @@ -1,8 +1,6 @@ use crate::{ error::IngesterError, - program_transformers::bubblegum::{ - upsert_asset_with_compression_info, upsert_asset_with_leaf_info_for_decompression, - }, + program_transformers::bubblegum::upsert_asset_with_leaf_and_compression_info_for_decompression, }; use blockbuster::{instruction::InstructionBundle, programs::bubblegum::BubblegumInstruction}; use sea_orm::{query::*, ConnectionTrait}; @@ -17,27 +15,8 @@ where { let id_bytes = bundle.keys.get(3).unwrap().0.as_slice(); - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - - // Partial update of asset table with just leaf. - upsert_asset_with_leaf_info_for_decompression(&multi_txn, id_bytes.to_vec()).await?; - - upsert_asset_with_compression_info( - &multi_txn, - id_bytes.to_vec(), - false, - false, - 1, - Some(id_bytes.to_vec()), - true, - ) - .await?; - - // Commit transaction and relinqish the lock. - multi_txn.commit().await?; + // Partial update of asset table with leaf and compression info. + upsert_asset_with_leaf_and_compression_info_for_decompression(txn, id_bytes.to_vec()).await?; Ok(()) } diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 4885a2e79..7932b58a7 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -128,7 +128,6 @@ where false, 1, None, - false, ) .await?; diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index a6c7826a9..b0a370974 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -192,6 +192,8 @@ pub async fn save_v1_asset( nonce: Set(Some(0)), seq: Set(Some(0)), leaf: Set(None), + data_hash: Set(None), + creator_hash: Set(None), compressed: Set(false), compressible: Set(false), royalty_target_type: Set(RoyaltyTargetType::Creators), From 974966ee990ea237abca6ef5395bc4d640aaccba Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Wed, 6 Dec 2023 11:41:38 -0800 Subject: [PATCH 30/46] Add migration to remove was_decompressed and regenerate SeaORM types --- .../src/dao/generated/asset.rs | 3 - .../src/dao/generated/sea_orm_active_enums.rs | 88 +++++++++---------- digital_asset_types/tests/common.rs | 1 - migration/src/lib.rs | 2 + ...20231206_120101_remove_was_decompressed.rs | 39 ++++++++ 5 files changed, 85 insertions(+), 48 deletions(-) create mode 100644 migration/src/m20231206_120101_remove_was_decompressed.rs diff --git a/digital_asset_types/src/dao/generated/asset.rs b/digital_asset_types/src/dao/generated/asset.rs index 16c7a9d72..1a9d18481 100644 --- a/digital_asset_types/src/dao/generated/asset.rs +++ b/digital_asset_types/src/dao/generated/asset.rs @@ -44,7 +44,6 @@ pub struct Model { pub data_hash: Option, pub creator_hash: Option, pub owner_delegate_seq: Option, - pub was_decompressed: bool, pub leaf_seq: Option, pub base_info_seq: Option, pub creators_added_seq: Option, @@ -78,7 +77,6 @@ pub enum Column { DataHash, CreatorHash, OwnerDelegateSeq, - WasDecompressed, LeafSeq, BaseInfoSeq, CreatorsAddedSeq, @@ -135,7 +133,6 @@ impl ColumnTrait for Column { Self::DataHash => ColumnType::Char(Some(50u32)).def().null(), Self::CreatorHash => ColumnType::Char(Some(50u32)).def().null(), Self::OwnerDelegateSeq => ColumnType::BigInteger.def().null(), - Self::WasDecompressed => ColumnType::Boolean.def(), Self::LeafSeq => ColumnType::BigInteger.def().null(), Self::BaseInfoSeq => ColumnType::BigInteger.def().null(), Self::CreatorsAddedSeq => ColumnType::BigInteger.def().null(), diff --git a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs index bdc071103..2018fd436 100644 --- a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs +++ b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs @@ -3,6 +3,22 @@ use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm( + rs_type = "String", + db_type = "Enum", + enum_name = "specification_versions" +)] +pub enum SpecificationVersions { + #[sea_orm(string_value = "unknown")] + Unknown, + #[sea_orm(string_value = "v0")] + V0, + #[sea_orm(string_value = "v1")] + V1, + #[sea_orm(string_value = "v2")] + V2, +} #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", @@ -20,20 +36,12 @@ pub enum RoyaltyTargetType { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm( - rs_type = "String", - db_type = "Enum", - enum_name = "v1_account_attachments" -)] -pub enum V1AccountAttachments { - #[sea_orm(string_value = "edition")] - Edition, - #[sea_orm(string_value = "edition_marker")] - EditionMarker, - #[sea_orm(string_value = "master_edition_v1")] - MasterEditionV1, - #[sea_orm(string_value = "master_edition_v2")] - MasterEditionV2, +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] +pub enum ChainMutability { + #[sea_orm(string_value = "immutable")] + Immutable, + #[sea_orm(string_value = "mutable")] + Mutable, #[sea_orm(string_value = "unknown")] Unknown, } @@ -50,28 +58,12 @@ pub enum TaskStatus { Success, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm( - rs_type = "String", - db_type = "Enum", - enum_name = "specification_versions" -)] -pub enum SpecificationVersions { - #[sea_orm(string_value = "unknown")] - Unknown, - #[sea_orm(string_value = "v0")] - V0, - #[sea_orm(string_value = "v1")] - V1, - #[sea_orm(string_value = "v2")] - V2, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] -pub enum Mutability { - #[sea_orm(string_value = "immutable")] - Immutable, - #[sea_orm(string_value = "mutable")] - Mutable, +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] +pub enum OwnerType { + #[sea_orm(string_value = "single")] + Single, + #[sea_orm(string_value = "token")] + Token, #[sea_orm(string_value = "unknown")] Unknown, } @@ -104,8 +96,8 @@ pub enum SpecificationAssetClass { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] -pub enum ChainMutability { +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] +pub enum Mutability { #[sea_orm(string_value = "immutable")] Immutable, #[sea_orm(string_value = "mutable")] @@ -114,12 +106,20 @@ pub enum ChainMutability { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] -pub enum OwnerType { - #[sea_orm(string_value = "single")] - Single, - #[sea_orm(string_value = "token")] - Token, +#[sea_orm( + rs_type = "String", + db_type = "Enum", + enum_name = "v1_account_attachments" +)] +pub enum V1AccountAttachments { + #[sea_orm(string_value = "edition")] + Edition, + #[sea_orm(string_value = "edition_marker")] + EditionMarker, + #[sea_orm(string_value = "master_edition_v1")] + MasterEditionV1, + #[sea_orm(string_value = "master_edition_v2")] + MasterEditionV2, #[sea_orm(string_value = "unknown")] Unknown, } diff --git a/digital_asset_types/tests/common.rs b/digital_asset_types/tests/common.rs index 8f4efca10..40ad4509f 100644 --- a/digital_asset_types/tests/common.rs +++ b/digital_asset_types/tests/common.rs @@ -157,7 +157,6 @@ pub fn create_asset( alt_id: None, creator_hash: None, owner_delegate_seq: Some(0), - was_decompressed: false, leaf_seq: Some(0), base_info_seq: Some(0), creators_added_seq: Some(0), diff --git a/migration/src/lib.rs b/migration/src/lib.rs index 7b4be4523..c54bdc81a 100644 --- a/migration/src/lib.rs +++ b/migration/src/lib.rs @@ -31,6 +31,7 @@ mod m20230726_013107_remove_not_null_constraint_from_group_value; mod m20230918_182123_add_raw_name_symbol; mod m20230919_072154_cl_audits; mod m20231019_120101_add_seq_numbers_bgum_update_metadata; +mod m20231206_120101_remove_was_decompressed; pub struct Migrator; @@ -69,6 +70,7 @@ impl MigratorTrait for Migrator { Box::new(m20230918_182123_add_raw_name_symbol::Migration), Box::new(m20230919_072154_cl_audits::Migration), Box::new(m20231019_120101_add_seq_numbers_bgum_update_metadata::Migration), + Box::new(m20231206_120101_remove_was_decompressed::Migration), ] } } diff --git a/migration/src/m20231206_120101_remove_was_decompressed.rs b/migration/src/m20231206_120101_remove_was_decompressed.rs new file mode 100644 index 000000000..27dac3fa9 --- /dev/null +++ b/migration/src/m20231206_120101_remove_was_decompressed.rs @@ -0,0 +1,39 @@ +use digital_asset_types::dao::asset; +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + Table::alter() + .table(asset::Entity) + .drop_column(Alias::new("was_decompressed")) + .to_owned(), + ) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + Table::alter() + .table(asset::Entity) + .add_column( + ColumnDef::new(Alias::new("was_decompressed")) + .boolean() + .not_null() + .default(false), + ) + .to_owned(), + ) + .await?; + + Ok(()) + } +} From 6ccfb91aaea187a6bbb83ef915172eda75bb2ec6 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Fri, 8 Dec 2023 18:23:27 -0800 Subject: [PATCH 31/46] Combine upsert_asset_base_info and upsert_creators and add lock --- .../program_transformers/bubblegum/burn.rs | 1 - .../bubblegum/cancel_redeem.rs | 1 - .../bubblegum/collection_verification.rs | 1 - .../bubblegum/creator_verification.rs | 1 - .../src/program_transformers/bubblegum/db.rs | 186 +++++++----------- .../bubblegum/delegate.rs | 1 - .../program_transformers/bubblegum/mint_v1.rs | 35 ++-- .../program_transformers/bubblegum/redeem.rs | 1 - .../bubblegum/transfer.rs | 1 - .../bubblegum/update_metadata.rs | 37 ++-- .../token_metadata/v1_asset.rs | 1 - 11 files changed, 102 insertions(+), 164 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/burn.rs b/nft_ingester/src/program_transformers/bubblegum/burn.rs index 3f8914596..ef6c1dc7c 100644 --- a/nft_ingester/src/program_transformers/bubblegum/burn.rs +++ b/nft_ingester/src/program_transformers/bubblegum/burn.rs @@ -60,7 +60,6 @@ where upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; - // Commit transaction and relinqish the lock. multi_txn.commit().await?; return Ok(()); diff --git a/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs b/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs index 184e7df5f..d1a91ae45 100644 --- a/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs +++ b/nft_ingester/src/program_transformers/bubblegum/cancel_redeem.rs @@ -69,7 +69,6 @@ where upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; - // Commit transaction and relinqish the lock. multi_txn.commit().await?; return Ok(()); diff --git a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs index 0bbf5fc00..c0397687a 100644 --- a/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/collection_verification.rs @@ -77,7 +77,6 @@ where ) .await?; - // Commit transaction and relinqish the lock. multi_txn.commit().await?; return Ok(()); diff --git a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs index b84b1fd8b..6a765a930 100644 --- a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs @@ -91,7 +91,6 @@ where upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; - // Commit transaction and relinqish the lock. multi_txn.commit().await?; id_bytes.to_vec() diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 8234be054..a8721c0f3 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -392,7 +392,7 @@ where // automatically rolled back. let multi_txn = txn.begin().await?; - if creators_should_be_updated(&multi_txn, asset_id, seq).await? { + if lock_asset_creators_and_check_asset_base_info_seq(&multi_txn, asset_id, seq).await? { let mut query = asset_creators::Entity::insert(model) .on_conflict( OnConflict::columns([ @@ -418,7 +418,6 @@ where .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; } - // Commit transaction and relinqish the lock. multi_txn.commit().await?; Ok(()) @@ -545,7 +544,7 @@ where Ok(()) } -pub async fn creators_should_be_updated( +pub async fn lock_asset_creators_and_check_asset_base_info_seq( txn: &T, id: Vec, seq: i64, @@ -553,12 +552,23 @@ pub async fn creators_should_be_updated( where T: ConnectionTrait + TransactionTrait, { + // Lock asset_creators table from any updates. + let lock_query = Statement::from_string( + DbBackend::Postgres, + "LOCK TABLE asset_creators IN ACCESS EXCLUSIVE MODE".to_string(), + ); + txn.execute(lock_query).await?; + + // Select asset and lock that particular row. if let Some(asset) = asset::Entity::find_by_id(id).one(txn).await? { + // Don't overwrite changes from after decompression. if let Some(0) = asset.seq { return Ok(false); } - if let Some(creators_added_seq) = asset.creators_added_seq { - if seq < creators_added_seq { + + // Don't overwrite changes from a subsequent Bubblegum instruction (i.e. update_metadata). + if let Some(base_info_seq) = asset.base_info_seq { + if seq < base_info_seq { return Ok(false); } } @@ -566,47 +576,21 @@ where Ok(true) } -pub async fn upsert_asset_with_creators_added_seq( - txn: &T, - id: Vec, - seq: i64, -) -> Result<(), IngesterError> -where - T: ConnectionTrait + TransactionTrait, -{ - let model = asset::ActiveModel { - id: Set(id), - creators_added_seq: Set(Some(seq)), - ..Default::default() - }; - - let mut query = asset::Entity::insert(model) - .on_conflict( - OnConflict::column(asset::Column::Id) - .update_columns([asset::Column::CreatorsAddedSeq]) - .to_owned(), - ) - .build(DbBackend::Postgres); - - query.sql = format!( - "{} WHERE excluded.creators_added_seq >= asset.creators_added_seq OR asset.creators_added_seq IS NULL", - query.sql - ); - - txn.execute(query) - .await - .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; - - Ok(()) -} - -pub async fn upsert_creators( +#[allow(clippy::too_many_arguments)] +pub async fn upsert_asset_base_info_and_creators( txn: &T, id: Vec, - creators: &Vec, + owner_type: OwnerType, + frozen: bool, + specification_version: SpecificationVersions, + specification_asset_class: SpecificationAssetClass, + royalty_target_type: RoyaltyTargetType, + royalty_target: Option>, + royalty_amount: i32, slot_updated: i64, seq: i64, - delete_existing: bool, + creators: &Vec, + delete_existing_creators: bool, ) -> Result<(), IngesterError> where T: ConnectionTrait + TransactionTrait, @@ -616,8 +600,52 @@ where // automatically rolled back. let multi_txn = txn.begin().await?; - if creators_should_be_updated(&multi_txn, id.clone(), seq).await? { - if delete_existing { + if lock_asset_creators_and_check_asset_base_info_seq(&multi_txn, id.clone(), seq).await? { + // Set base info for asset. + let asset_model = asset::ActiveModel { + id: Set(id.clone()), + owner_type: Set(owner_type), + frozen: Set(frozen), + specification_version: Set(Some(specification_version)), + specification_asset_class: Set(Some(specification_asset_class)), + royalty_target_type: Set(royalty_target_type), + royalty_target: Set(royalty_target), + royalty_amount: Set(royalty_amount), + asset_data: Set(Some(id.clone())), + slot_updated: Set(Some(slot_updated)), + base_info_seq: Set(Some(seq)), + ..Default::default() + }; + + // Upsert asset table base info. + let mut query = asset::Entity::insert(asset_model) + .on_conflict( + OnConflict::columns([asset::Column::Id]) + .update_columns([ + asset::Column::OwnerType, + asset::Column::Frozen, + asset::Column::SpecificationVersion, + asset::Column::SpecificationAssetClass, + asset::Column::RoyaltyTargetType, + asset::Column::RoyaltyTarget, + asset::Column::RoyaltyAmount, + asset::Column::AssetData, + asset::Column::SlotUpdated, + asset::Column::BaseInfoSeq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + query.sql = format!( + "{} WHERE asset.seq != 0 AND (excluded.base_info_seq >= asset.base_info_seq OR asset.base_info_seq IS NULL)", + query.sql + ); + + txn.execute(query) + .await + .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; + + if delete_existing_creators { // Delete all existing creators that haven't been verified at a higher sequence number. asset_creators::Entity::delete_many() .filter( @@ -702,83 +730,13 @@ where ); multi_txn.execute(query).await?; } - - upsert_asset_with_creators_added_seq(&multi_txn, id, seq).await?; } - // Commit transaction and relinqish the lock. multi_txn.commit().await?; Ok(()) } -#[allow(clippy::too_many_arguments)] -pub async fn upsert_asset_base_info( - txn: &T, - id: Vec, - owner_type: OwnerType, - frozen: bool, - specification_version: SpecificationVersions, - specification_asset_class: SpecificationAssetClass, - royalty_target_type: RoyaltyTargetType, - royalty_target: Option>, - royalty_amount: i32, - slot_updated: i64, - seq: i64, -) -> Result<(), IngesterError> -where - T: ConnectionTrait + TransactionTrait, -{ - // Set initial mint info. - let asset_model = asset::ActiveModel { - id: Set(id.clone()), - owner_type: Set(owner_type), - frozen: Set(frozen), - specification_version: Set(Some(specification_version)), - specification_asset_class: Set(Some(specification_asset_class)), - royalty_target_type: Set(royalty_target_type), - royalty_target: Set(royalty_target), - royalty_amount: Set(royalty_amount), - asset_data: Set(Some(id)), - slot_updated: Set(Some(slot_updated)), - base_info_seq: Set(Some(seq)), - ..Default::default() - }; - - // Upsert asset table base info. - let mut query = asset::Entity::insert(asset_model) - .on_conflict( - OnConflict::columns([asset::Column::Id]) - .update_columns([ - asset::Column::OwnerType, - asset::Column::Frozen, - asset::Column::SpecificationVersion, - asset::Column::SpecificationAssetClass, - asset::Column::RoyaltyTargetType, - asset::Column::RoyaltyTarget, - asset::Column::RoyaltyAmount, - asset::Column::AssetData, - asset::Column::SlotUpdated, - asset::Column::BaseInfoSeq, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - - // Do not overwrite changes that happened after decompression (asset.seq = 0). - // Do not overwrite changes from a later Bubblegum instruction. - query.sql = format!( - "{} WHERE asset.seq != 0 AND (excluded.base_info_seq >= asset.base_info_seq OR asset.base_info_seq IS NULL)", - query.sql - ); - - txn.execute(query) - .await - .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; - - Ok(()) -} - pub async fn upsert_asset_authority( txn: &T, asset_id: Vec, diff --git a/nft_ingester/src/program_transformers/bubblegum/delegate.rs b/nft_ingester/src/program_transformers/bubblegum/delegate.rs index 17d2cba22..4cb4aadc2 100644 --- a/nft_ingester/src/program_transformers/bubblegum/delegate.rs +++ b/nft_ingester/src/program_transformers/bubblegum/delegate.rs @@ -68,7 +68,6 @@ where upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; - // Commit transaction and relinqish the lock. multi_txn.commit().await?; return Ok(()); diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 7932b58a7..06b8fcfba 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -1,10 +1,9 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_authority, upsert_asset_base_info, upsert_asset_data, - upsert_asset_with_compression_info, upsert_asset_with_leaf_info, + save_changelog_event, upsert_asset_authority, upsert_asset_base_info_and_creators, + upsert_asset_data, upsert_asset_with_compression_info, upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, upsert_collection_info, - upsert_creators, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -99,14 +98,9 @@ where }; let tree_id = bundle.keys.get(3).unwrap().0.to_vec(); - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - - // Upsert asset table base info. - upsert_asset_base_info( - &multi_txn, + // Upsert `asset` table base info and `asset_creators` table. + upsert_asset_base_info_and_creators( + txn, id_bytes.to_vec(), OwnerType::Single, false, @@ -117,9 +111,16 @@ where metadata.seller_fee_basis_points as i32, slot_i, seq as i64, + &metadata.creators, + false, ) .await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Partial update of asset table with just compression info elements. upsert_asset_with_compression_info( &multi_txn, @@ -156,20 +157,8 @@ where upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; - // Commit transaction and relinqish the lock. multi_txn.commit().await?; - // Upsert into `asset_creators` table. - upsert_creators( - txn, - id_bytes.to_vec(), - &metadata.creators, - slot_i, - seq as i64, - false, - ) - .await?; - // Insert into `asset_authority` table. //TODO - we need to remove the optional bubblegum signer logic let authority = bundle.keys.get(0).unwrap().0.to_vec(); diff --git a/nft_ingester/src/program_transformers/bubblegum/redeem.rs b/nft_ingester/src/program_transformers/bubblegum/redeem.rs index 69549ad09..484f5cdc9 100644 --- a/nft_ingester/src/program_transformers/bubblegum/redeem.rs +++ b/nft_ingester/src/program_transformers/bubblegum/redeem.rs @@ -55,7 +55,6 @@ where upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; - // Commit transaction and relinqish the lock. multi_txn.commit().await?; return Ok(()); diff --git a/nft_ingester/src/program_transformers/bubblegum/transfer.rs b/nft_ingester/src/program_transformers/bubblegum/transfer.rs index 29c6c3461..230167991 100644 --- a/nft_ingester/src/program_transformers/bubblegum/transfer.rs +++ b/nft_ingester/src/program_transformers/bubblegum/transfer.rs @@ -71,7 +71,6 @@ where upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; - // Commit transaction and relinqish the lock. multi_txn.commit().await?; return Ok(()); diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index a2432eb53..183eeeb93 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -1,8 +1,8 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_base_info, upsert_asset_data, - upsert_asset_with_leaf_info, upsert_asset_with_seq, upsert_creators, + save_changelog_event, upsert_asset_base_info_and_creators, upsert_asset_data, + upsert_asset_with_leaf_info, upsert_asset_with_seq, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -126,12 +126,7 @@ where ) .await?; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - - // Upsert asset table base info. + // Upsert `asset` table base info and `asset_creators` table. let seller_fee_basis_points = if let Some(seller_fee_basis_points) = update_args.seller_fee_basis_points { seller_fee_basis_points @@ -139,8 +134,14 @@ where current_metadata.seller_fee_basis_points }; - upsert_asset_base_info( - &multi_txn, + let creators = if let Some(creators) = &update_args.creators { + creators + } else { + ¤t_metadata.creators + }; + + upsert_asset_base_info_and_creators( + txn, id_bytes.to_vec(), OwnerType::Single, false, @@ -151,9 +152,16 @@ where seller_fee_basis_points as i32, slot_i, seq as i64, + creators, + true, ) .await?; + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + // Partial update of asset table with just leaf. let tree_id = bundle.keys.get(8).unwrap().0.to_vec(); upsert_asset_with_leaf_info( @@ -170,17 +178,8 @@ where upsert_asset_with_seq(&multi_txn, id_bytes.to_vec(), seq as i64).await?; - // Commit transaction and relinqish the lock. multi_txn.commit().await?; - // Upsert into `asset_creators` table. - let creators = if let Some(creators) = &update_args.creators { - creators - } else { - ¤t_metadata.creators - }; - upsert_creators(txn, id_bytes.to_vec(), creators, slot_i, seq as i64, true).await?; - if uri.is_empty() { warn!( "URI is empty for mint {}. Skipping background task.", diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index b0a370974..f0634de02 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -366,7 +366,6 @@ pub async fn save_v1_asset( asset_creators::Column::Creator, asset_creators::Column::Share, asset_creators::Column::Verified, - asset_creators::Column::VerifiedSeq, asset_creators::Column::SlotUpdated, ]) .to_owned(), From b7c9ebee53adebad0085fc04c3d5cb483c101cf3 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Fri, 8 Dec 2023 18:54:55 -0800 Subject: [PATCH 32/46] Remove unneeded creators_added_seq --- .../src/dao/generated/asset.rs | 3 - .../src/dao/generated/sea_orm_active_enums.rs | 116 +++++++++--------- digital_asset_types/tests/common.rs | 1 - ...01_add_seq_numbers_bgum_update_metadata.rs | 2 - 4 files changed, 58 insertions(+), 64 deletions(-) diff --git a/digital_asset_types/src/dao/generated/asset.rs b/digital_asset_types/src/dao/generated/asset.rs index 1a9d18481..0c32be2fe 100644 --- a/digital_asset_types/src/dao/generated/asset.rs +++ b/digital_asset_types/src/dao/generated/asset.rs @@ -46,7 +46,6 @@ pub struct Model { pub owner_delegate_seq: Option, pub leaf_seq: Option, pub base_info_seq: Option, - pub creators_added_seq: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -79,7 +78,6 @@ pub enum Column { OwnerDelegateSeq, LeafSeq, BaseInfoSeq, - CreatorsAddedSeq, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] @@ -135,7 +133,6 @@ impl ColumnTrait for Column { Self::OwnerDelegateSeq => ColumnType::BigInteger.def().null(), Self::LeafSeq => ColumnType::BigInteger.def().null(), Self::BaseInfoSeq => ColumnType::BigInteger.def().null(), - Self::CreatorsAddedSeq => ColumnType::BigInteger.def().null(), } } } diff --git a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs index 2018fd436..e4253e267 100644 --- a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs +++ b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs @@ -7,37 +7,39 @@ use serde::{Deserialize, Serialize}; #[sea_orm( rs_type = "String", db_type = "Enum", - enum_name = "specification_versions" + enum_name = "v1_account_attachments" )] -pub enum SpecificationVersions { +pub enum V1AccountAttachments { + #[sea_orm(string_value = "edition")] + Edition, + #[sea_orm(string_value = "edition_marker")] + EditionMarker, + #[sea_orm(string_value = "master_edition_v1")] + MasterEditionV1, + #[sea_orm(string_value = "master_edition_v2")] + MasterEditionV2, #[sea_orm(string_value = "unknown")] Unknown, - #[sea_orm(string_value = "v0")] - V0, - #[sea_orm(string_value = "v1")] - V1, - #[sea_orm(string_value = "v2")] - V2, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", - enum_name = "royalty_target_type" + enum_name = "specification_versions" )] -pub enum RoyaltyTargetType { - #[sea_orm(string_value = "creators")] - Creators, - #[sea_orm(string_value = "fanout")] - Fanout, - #[sea_orm(string_value = "single")] - Single, +pub enum SpecificationVersions { #[sea_orm(string_value = "unknown")] Unknown, + #[sea_orm(string_value = "v0")] + V0, + #[sea_orm(string_value = "v1")] + V1, + #[sea_orm(string_value = "v2")] + V2, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] -pub enum ChainMutability { +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] +pub enum Mutability { #[sea_orm(string_value = "immutable")] Immutable, #[sea_orm(string_value = "mutable")] @@ -46,28 +48,6 @@ pub enum ChainMutability { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] -pub enum TaskStatus { - #[sea_orm(string_value = "failed")] - Failed, - #[sea_orm(string_value = "pending")] - Pending, - #[sea_orm(string_value = "running")] - Running, - #[sea_orm(string_value = "success")] - Success, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] -pub enum OwnerType { - #[sea_orm(string_value = "single")] - Single, - #[sea_orm(string_value = "token")] - Token, - #[sea_orm(string_value = "unknown")] - Unknown, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", @@ -96,30 +76,50 @@ pub enum SpecificationAssetClass { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] -pub enum Mutability { - #[sea_orm(string_value = "immutable")] - Immutable, - #[sea_orm(string_value = "mutable")] - Mutable, - #[sea_orm(string_value = "unknown")] - Unknown, +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] +pub enum TaskStatus { + #[sea_orm(string_value = "failed")] + Failed, + #[sea_orm(string_value = "pending")] + Pending, + #[sea_orm(string_value = "running")] + Running, + #[sea_orm(string_value = "success")] + Success, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", - enum_name = "v1_account_attachments" + enum_name = "royalty_target_type" )] -pub enum V1AccountAttachments { - #[sea_orm(string_value = "edition")] - Edition, - #[sea_orm(string_value = "edition_marker")] - EditionMarker, - #[sea_orm(string_value = "master_edition_v1")] - MasterEditionV1, - #[sea_orm(string_value = "master_edition_v2")] - MasterEditionV2, +pub enum RoyaltyTargetType { + #[sea_orm(string_value = "creators")] + Creators, + #[sea_orm(string_value = "fanout")] + Fanout, + #[sea_orm(string_value = "single")] + Single, + #[sea_orm(string_value = "unknown")] + Unknown, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] +pub enum OwnerType { + #[sea_orm(string_value = "single")] + Single, + #[sea_orm(string_value = "token")] + Token, + #[sea_orm(string_value = "unknown")] + Unknown, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] +pub enum ChainMutability { + #[sea_orm(string_value = "immutable")] + Immutable, + #[sea_orm(string_value = "mutable")] + Mutable, #[sea_orm(string_value = "unknown")] Unknown, } diff --git a/digital_asset_types/tests/common.rs b/digital_asset_types/tests/common.rs index 40ad4509f..3a5948a39 100644 --- a/digital_asset_types/tests/common.rs +++ b/digital_asset_types/tests/common.rs @@ -159,7 +159,6 @@ pub fn create_asset( owner_delegate_seq: Some(0), leaf_seq: Some(0), base_info_seq: Some(0), - creators_added_seq: Some(0), }, ) } diff --git a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs index d25f3ef1b..b9cd7075d 100644 --- a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs +++ b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs @@ -37,7 +37,6 @@ impl MigrationTrait for Migration { Table::alter() .table(asset::Entity) .add_column(ColumnDef::new(Alias::new("base_info_seq")).big_integer()) - .add_column(ColumnDef::new(Alias::new("creators_added_seq")).big_integer()) .to_owned(), ) .await?; @@ -73,7 +72,6 @@ impl MigrationTrait for Migration { Table::alter() .table(asset::Entity) .drop_column(Alias::new("base_info_seq")) - .drop_column(Alias::new("creators_added_seq")) .to_owned(), ) .await?; From fa0839db2c03c379d70ffb195b188012dc3b7d8d Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Fri, 8 Dec 2023 19:04:17 -0800 Subject: [PATCH 33/46] Switch to EXCLUSIVE mode lock --- nft_ingester/src/program_transformers/bubblegum/db.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index a8721c0f3..3683d6c68 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -552,10 +552,10 @@ pub async fn lock_asset_creators_and_check_asset_base_info_seq( where T: ConnectionTrait + TransactionTrait, { - // Lock asset_creators table from any updates. + // Lock asset_creators table from any updates, but allow reads. let lock_query = Statement::from_string( DbBackend::Postgres, - "LOCK TABLE asset_creators IN ACCESS EXCLUSIVE MODE".to_string(), + "LOCK TABLE asset_creators IN EXCLUSIVE MODE".to_string(), ); txn.execute(lock_query).await?; From 4b96e9767301f9bb44806e2ac11eb9fc658a3b7e Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Wed, 13 Dec 2023 13:38:15 -0800 Subject: [PATCH 34/46] Add NULL condition check on asset.seq --- nft_ingester/src/program_transformers/bubblegum/db.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 3683d6c68..577d68939 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -184,7 +184,7 @@ where // Do not overwrite changes that happened after decompression (asset.seq = 0). // Do not overwrite changes from a later Bubblegum instruction. query.sql = format!( - "{} WHERE asset.seq != 0 AND (excluded.leaf_seq >= asset.leaf_seq OR asset.leaf_seq IS NULL)", + "{} WHERE (asset.seq != 0 OR asset.seq IS NULL) AND (excluded.leaf_seq >= asset.leaf_seq OR asset.leaf_seq IS NULL)", query.sql ); @@ -238,7 +238,7 @@ where .build(DbBackend::Postgres); // Do not overwrite changes that happened after decompression (asset.seq = 0). - query.sql = format!("{} WHERE asset.seq != 0", query.sql); + query.sql = format!("{} WHERE asset.seq != 0 OR asset.seq IS NULL", query.sql); txn.execute(query) .await @@ -280,7 +280,7 @@ where // Do not overwrite changes that happened after decompression (asset.seq = 0). // Do not overwrite changes from a later Bubblegum instruction. query.sql = format!( - "{} WHERE asset.seq != 0 AND (excluded.owner_delegate_seq >= asset.owner_delegate_seq OR asset.owner_delegate_seq IS NULL)", + "{} WHERE (asset.seq != 0 OR asset.seq IS NULL) AND (excluded.owner_delegate_seq >= asset.owner_delegate_seq OR asset.owner_delegate_seq IS NULL)", query.sql ); @@ -325,7 +325,7 @@ where .build(DbBackend::Postgres); // Do not overwrite changes that happened after decompression (asset.seq = 0). - query.sql = format!("{} WHERE asset.seq != 0", query.sql); + query.sql = format!("{} WHERE asset.seq != 0 OR asset.seq IS NULL", query.sql); txn.execute(query).await?; Ok(()) @@ -637,7 +637,7 @@ where ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE asset.seq != 0 AND (excluded.base_info_seq >= asset.base_info_seq OR asset.base_info_seq IS NULL)", + "{} WHERE (asset.seq != 0 OR asset.seq IS NULL) AND (excluded.base_info_seq >= asset.base_info_seq OR asset.base_info_seq IS NULL)", query.sql ); From 88031523b43c4566900b36337661f4eac729df1a Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 18 Dec 2023 02:12:57 -0800 Subject: [PATCH 35/46] Refactored creator indexing * Use new Blockbuster that always updates all creators and verification status. * Remove deleting creators with lower sequence numbers as it would not work due to race conditions. * Add concept of "empty" creator value to support Bubblegum empty creator arrays. * Add filtering out of old creators or having no creators to DAS code. * Also get authority and tree_id accounts from Bubblegum during mint and update_metadata. --- Cargo.lock | 3 +- das_api/Cargo.toml | 2 +- digital_asset_types/Cargo.toml | 2 +- digital_asset_types/src/dao/scopes/asset.rs | 37 +- nft_ingester/Cargo.toml | 4 +- .../bubblegum/creator_verification.rs | 35 +- .../src/program_transformers/bubblegum/db.rs | 337 ++++++------------ .../program_transformers/bubblegum/mint_v1.rs | 57 ++- .../src/program_transformers/bubblegum/mod.rs | 7 +- .../bubblegum/update_metadata.rs | 25 +- tools/acc_forwarder/Cargo.toml | 2 +- tools/load_generation/Cargo.toml | 2 +- 12 files changed, 227 insertions(+), 286 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b2c42ddc..af8da0e7e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -882,8 +882,7 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "blockbuster" version = "0.9.0-beta.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ab97783defb671f7214f158a517844cb8fa5da781e4d8d46a17e15bc79f213" +source = "git+https://github.com/metaplex-foundation/blockbuster.git?rev=f9194397c75017808eb2d7b6f0229f43238cd317#f9194397c75017808eb2d7b6f0229f43238cd317" dependencies = [ "anchor-lang", "async-trait", diff --git a/das_api/Cargo.toml b/das_api/Cargo.toml index bd2bbff51..7c2e94cf0 100644 --- a/das_api/Cargo.toml +++ b/das_api/Cargo.toml @@ -33,7 +33,7 @@ schemars = "0.8.6" schemars_derive = "0.8.6" open-rpc-derive = { version = "0.0.4"} open-rpc-schema = { version = "0.0.4"} -blockbuster = "=0.9.0-beta.3" +blockbuster = {git = "https://github.com/metaplex-foundation/blockbuster.git", rev = "f9194397c75017808eb2d7b6f0229f43238cd317"} anchor-lang = "0.28.0" mpl-token-metadata = { version = "=2.0.0-beta.1", features = ["serde-feature"] } mpl-candy-machine-core = { version = "2.0.1", features = ["no-entrypoint"] } diff --git a/digital_asset_types/Cargo.toml b/digital_asset_types/Cargo.toml index 8d79be1f3..223beacff 100644 --- a/digital_asset_types/Cargo.toml +++ b/digital_asset_types/Cargo.toml @@ -18,7 +18,7 @@ solana-sdk = "~1.16.16" num-traits = "0.2.15" num-derive = "0.3.3" thiserror = "1.0.31" -blockbuster = "=0.9.0-beta.3" +blockbuster = {git = "https://github.com/metaplex-foundation/blockbuster.git", rev = "f9194397c75017808eb2d7b6f0229f43238cd317"} jsonpath_lib = "0.3.0" mime_guess = "2.0.4" url = "2.3.1" diff --git a/digital_asset_types/src/dao/scopes/asset.rs b/digital_asset_types/src/dao/scopes/asset.rs index cd9db02ba..7dccb9042 100644 --- a/digital_asset_types/src/dao/scopes/asset.rs +++ b/digital_asset_types/src/dao/scopes/asset.rs @@ -1,18 +1,14 @@ -use crate::{ - dao::{ - asset::{self, Entity}, - asset_authority, asset_creators, asset_data, asset_grouping, FullAsset, - GroupingSize, Pagination, - }, - dapi::common::safe_select, - rpc::{response::AssetList}, +use crate::dao::{ + asset::{self}, + asset_authority, asset_creators, asset_data, asset_grouping, FullAsset, GroupingSize, + Pagination, }; use indexmap::IndexMap; use sea_orm::{entity::*, query::*, ConnectionTrait, DbErr, Order}; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; -pub fn paginate<'db, T>(pagination: &Pagination, limit: u64, stmt: T) -> T +pub fn paginate(pagination: &Pagination, limit: u64, stmt: T) -> T where T: QueryFilter + QuerySelect, { @@ -313,11 +309,30 @@ pub async fn get_by_id( .order_by_asc(asset_authority::Column::AssetId) .all(conn) .await?; - let creators: Vec = asset_creators::Entity::find() + let mut creators: Vec = asset_creators::Entity::find() .filter(asset_creators::Column::AssetId.eq(asset.id.clone())) .order_by_asc(asset_creators::Column::Position) .all(conn) .await?; + + // If the first creator is an empty Vec, it means the creator array is empty (which is allowed + // in Bubblegum). + if !creators.is_empty() && creators[0].creator.is_empty() { + creators.clear(); + } else { + // Any creators that are not the max slot_updated value are stale rows, so remove them. + let max_slot_updated = creators.iter().map(|creator| creator.slot_updated).max(); + if let Some(max_slot_updated) = max_slot_updated { + creators.retain(|creator| creator.slot_updated == max_slot_updated); + } + + // Any creators that are not the max seq are stale rows, so remove them. + let max_seq = creators.iter().map(|creator| creator.verified_seq).max(); + if let Some(max_seq) = max_seq { + creators.retain(|creator| creator.verified_seq == max_seq); + } + } + let grouping: Vec = asset_grouping::Entity::find() .filter(asset_grouping::Column::AssetId.eq(asset.id.clone())) .filter(asset_grouping::Column::GroupValue.is_not_null()) diff --git a/nft_ingester/Cargo.toml b/nft_ingester/Cargo.toml index 8a44d32b4..594c440eb 100644 --- a/nft_ingester/Cargo.toml +++ b/nft_ingester/Cargo.toml @@ -35,13 +35,13 @@ spl-concurrent-merkle-tree = "0.2.0" uuid = "1.0.0" async-trait = "0.1.53" num-traits = "0.2.15" -blockbuster = "=0.9.0-beta.3" +blockbuster = {git = "https://github.com/metaplex-foundation/blockbuster.git", rev = "f9194397c75017808eb2d7b6f0229f43238cd317"} figment = { version = "0.10.6", features = ["env", "toml", "yaml"] } cadence = "0.29.0" cadence-macros = "0.29.0" solana-sdk = "~1.16.16" solana-client = "~1.16.16" -spl-token = { version = ">= 3.5.0, < 5.0", features = ["no-entrypoint"] } +spl-token = { version = "4.0.0", features = ["no-entrypoint"] } solana-transaction-status = "~1.16.16" solana-account-decoder = "~1.16.16" solana-geyser-plugin-interface = "~1.16.16" diff --git a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs index 6a765a930..a1ea3a050 100644 --- a/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs +++ b/nft_ingester/src/program_transformers/bubblegum/creator_verification.rs @@ -1,8 +1,8 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_with_leaf_info, - upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, upsert_creator_verified, + save_changelog_event, upsert_asset_creators, upsert_asset_with_leaf_info, + upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, }, }; use blockbuster::{ @@ -10,13 +10,13 @@ use blockbuster::{ programs::bubblegum::{BubblegumInstruction, LeafSchema, Payload}, }; use log::debug; +use mpl_bubblegum::types::Creator; use sea_orm::{ConnectionTrait, TransactionTrait}; pub async fn process<'c, T>( parsing_result: &BubblegumInstruction, bundle: &InstructionBundle<'c>, txn: &'c T, - value: bool, cl_audits: bool, ) -> Result<(), IngesterError> where @@ -27,10 +27,26 @@ where &parsing_result.tree_update, &parsing_result.payload, ) { - let (creator, verify) = match payload { + let (updated_creators, creator, verify) = match payload { Payload::CreatorVerification { - creator, verify, .. - } => (creator, verify), + metadata, + creator, + verify, + } => { + let updated_creators: Vec = metadata + .creators + .iter() + .map(|c| { + let mut c = c.clone(); + if c.address == *creator { + c.verified = *verify + }; + c + }) + .collect(); + + (updated_creators, creator, verify) + } _ => { return Err(IngesterError::ParsingError( "Ix not parsed correctly".to_string(), @@ -97,11 +113,12 @@ where } }; - upsert_creator_verified( + // Upsert creators to `asset_creators` table. + upsert_asset_creators( txn, asset_id_bytes, - creator.to_bytes().to_vec(), - value, + &updated_creators, + bundle.slot as i64, seq as i64, ) .await?; diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 577d68939..d719ce9f8 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -363,66 +363,6 @@ where Ok(()) } -pub async fn upsert_creator_verified( - txn: &T, - asset_id: Vec, - creator: Vec, - verified: bool, - seq: i64, -) -> Result<(), IngesterError> -where - T: ConnectionTrait + TransactionTrait, -{ - let model = asset_creators::ActiveModel { - asset_id: Set(asset_id.clone()), - creator: Set(creator), - verified: Set(verified), - verified_seq: Set(Some(seq)), - ..Default::default() - }; - - // Only upsert a creator if the asset table's creator array seq is at a lower value. That seq - // gets updated when we set up the creator array in `mintV1` or `update_metadata`. We don't - // want to insert a creator that was removed from a later `update_metadata`. And we don't need - // to worry about creator verification in that case because the `update_metadata` updates - // creator verification state as well. - - // Note that if the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - - if lock_asset_creators_and_check_asset_base_info_seq(&multi_txn, asset_id, seq).await? { - let mut query = asset_creators::Entity::insert(model) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Verified, - asset_creators::Column::VerifiedSeq, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - - query.sql = format!( - "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq is NULL", - query.sql, -); - - multi_txn - .execute(query) - .await - .map_err(|db_err| IngesterError::StorageWriteError(db_err.to_string()))?; - } - - multi_txn.commit().await?; - - Ok(()) -} - pub async fn upsert_collection_info( txn: &T, asset_id: Vec, @@ -544,40 +484,8 @@ where Ok(()) } -pub async fn lock_asset_creators_and_check_asset_base_info_seq( - txn: &T, - id: Vec, - seq: i64, -) -> Result -where - T: ConnectionTrait + TransactionTrait, -{ - // Lock asset_creators table from any updates, but allow reads. - let lock_query = Statement::from_string( - DbBackend::Postgres, - "LOCK TABLE asset_creators IN EXCLUSIVE MODE".to_string(), - ); - txn.execute(lock_query).await?; - - // Select asset and lock that particular row. - if let Some(asset) = asset::Entity::find_by_id(id).one(txn).await? { - // Don't overwrite changes from after decompression. - if let Some(0) = asset.seq { - return Ok(false); - } - - // Don't overwrite changes from a subsequent Bubblegum instruction (i.e. update_metadata). - if let Some(base_info_seq) = asset.base_info_seq { - if seq < base_info_seq { - return Ok(false); - } - } - } - Ok(true) -} - #[allow(clippy::too_many_arguments)] -pub async fn upsert_asset_base_info_and_creators( +pub async fn upsert_asset_base_info( txn: &T, id: Vec, owner_type: OwnerType, @@ -589,150 +497,130 @@ pub async fn upsert_asset_base_info_and_creators( royalty_amount: i32, slot_updated: i64, seq: i64, - creators: &Vec, - delete_existing_creators: bool, ) -> Result<(), IngesterError> where T: ConnectionTrait + TransactionTrait, { - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - - if lock_asset_creators_and_check_asset_base_info_seq(&multi_txn, id.clone(), seq).await? { - // Set base info for asset. - let asset_model = asset::ActiveModel { - id: Set(id.clone()), - owner_type: Set(owner_type), - frozen: Set(frozen), - specification_version: Set(Some(specification_version)), - specification_asset_class: Set(Some(specification_asset_class)), - royalty_target_type: Set(royalty_target_type), - royalty_target: Set(royalty_target), - royalty_amount: Set(royalty_amount), - asset_data: Set(Some(id.clone())), - slot_updated: Set(Some(slot_updated)), - base_info_seq: Set(Some(seq)), - ..Default::default() - }; + // Set base info for asset. + let asset_model = asset::ActiveModel { + id: Set(id.clone()), + owner_type: Set(owner_type), + frozen: Set(frozen), + specification_version: Set(Some(specification_version)), + specification_asset_class: Set(Some(specification_asset_class)), + royalty_target_type: Set(royalty_target_type), + royalty_target: Set(royalty_target), + royalty_amount: Set(royalty_amount), + asset_data: Set(Some(id.clone())), + slot_updated: Set(Some(slot_updated)), + base_info_seq: Set(Some(seq)), + ..Default::default() + }; - // Upsert asset table base info. - let mut query = asset::Entity::insert(asset_model) - .on_conflict( - OnConflict::columns([asset::Column::Id]) - .update_columns([ - asset::Column::OwnerType, - asset::Column::Frozen, - asset::Column::SpecificationVersion, - asset::Column::SpecificationAssetClass, - asset::Column::RoyaltyTargetType, - asset::Column::RoyaltyTarget, - asset::Column::RoyaltyAmount, - asset::Column::AssetData, - asset::Column::SlotUpdated, - asset::Column::BaseInfoSeq, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - query.sql = format!( + // Upsert asset table base info. + let mut query = asset::Entity::insert(asset_model) + .on_conflict( + OnConflict::columns([asset::Column::Id]) + .update_columns([ + asset::Column::OwnerType, + asset::Column::Frozen, + asset::Column::SpecificationVersion, + asset::Column::SpecificationAssetClass, + asset::Column::RoyaltyTargetType, + asset::Column::RoyaltyTarget, + asset::Column::RoyaltyAmount, + asset::Column::AssetData, + asset::Column::SlotUpdated, + asset::Column::BaseInfoSeq, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + query.sql = format!( "{} WHERE (asset.seq != 0 OR asset.seq IS NULL) AND (excluded.base_info_seq >= asset.base_info_seq OR asset.base_info_seq IS NULL)", query.sql ); - txn.execute(query) - .await - .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; - - if delete_existing_creators { - // Delete all existing creators that haven't been verified at a higher sequence number. - asset_creators::Entity::delete_many() - .filter( - Condition::all() - .add(asset_creators::Column::AssetId.eq(id.clone())) - .add(asset_creators::Column::VerifiedSeq.lt(seq)), - ) - .exec(&multi_txn) - .await?; - } + txn.execute(query) + .await + .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; - if !creators.is_empty() { - // Vec to hold base creator information. - let mut db_creator_infos = Vec::with_capacity(creators.len()); - - // Vec to hold info on whether a creator is verified. This info is protected by `seq` number. - let mut db_creator_verified_infos = Vec::with_capacity(creators.len()); - - // Set to prevent duplicates. - let mut creators_set = HashSet::new(); - - for (i, c) in creators.iter().enumerate() { - if creators_set.contains(&c.address) { - continue; - } - - db_creator_infos.push(asset_creators::ActiveModel { - asset_id: Set(id.clone()), - creator: Set(c.address.to_bytes().to_vec()), - position: Set(i as i16), - share: Set(c.share as i32), - slot_updated: Set(Some(slot_updated)), - ..Default::default() - }); - - db_creator_verified_infos.push(asset_creators::ActiveModel { - asset_id: Set(id.clone()), - creator: Set(c.address.to_bytes().to_vec()), - verified: Set(c.verified), - verified_seq: Set(Some(seq)), - ..Default::default() - }); - - creators_set.insert(c.address); + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +pub async fn upsert_asset_creators( + txn: &T, + id: Vec, + creators: &Vec, + slot_updated: i64, + seq: i64, +) -> Result<(), IngesterError> +where + T: ConnectionTrait + TransactionTrait, +{ + // Vec to hold base creator information. + let mut db_creator_infos = Vec::with_capacity(creators.len()); + + if creators.is_empty() { + db_creator_infos.push(asset_creators::ActiveModel { + asset_id: Set(id.clone()), + creator: Set(vec![]), + position: Set(0), + share: Set(100), + slot_updated: Set(Some(slot_updated)), + verified: Set(false), + verified_seq: Set(Some(seq)), + ..Default::default() + }); + } else { + // Set to prevent duplicates. + let mut creators_set = HashSet::new(); + + for (i, c) in creators.iter().enumerate() { + if creators_set.contains(&c.address) { + continue; } - // This statement will update base information for each creator. - let query = asset_creators::Entity::insert_many(db_creator_infos) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Position, - asset_creators::Column::Share, - asset_creators::Column::SlotUpdated, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - multi_txn.execute(query).await?; - - // This statement will update whether the creator is verified and the - // `verified_seq` number. - let mut query = asset_creators::Entity::insert_many(db_creator_verified_infos) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Creator, - ]) - .update_columns([ - asset_creators::Column::Verified, - asset_creators::Column::VerifiedSeq, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", - query.sql - ); - multi_txn.execute(query).await?; + db_creator_infos.push(asset_creators::ActiveModel { + asset_id: Set(id.clone()), + creator: Set(c.address.to_bytes().to_vec()), + position: Set(i as i16), + share: Set(c.share as i32), + slot_updated: Set(Some(slot_updated)), + verified: Set(c.verified), + verified_seq: Set(Some(seq)), + ..Default::default() + }); + + creators_set.insert(c.address); } } - multi_txn.commit().await?; + // This statement will update base information for each creator. + let mut query = asset_creators::Entity::insert_many(db_creator_infos) + .on_conflict( + OnConflict::columns([ + asset_creators::Column::AssetId, + asset_creators::Column::Position, + ]) + .update_columns([ + asset_creators::Column::Creator, + asset_creators::Column::Share, + asset_creators::Column::Verified, + asset_creators::Column::VerifiedSeq, + asset_creators::Column::SlotUpdated, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + + query.sql = format!( + "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", + query.sql + ); + + txn.execute(query).await?; Ok(()) } @@ -766,7 +654,10 @@ where .build(DbBackend::Postgres); // Do not overwrite changes that happened after decompression (asset_authority.seq = 0). - query.sql = format!("{} WHERE asset_authority.seq != 0", query.sql); + query.sql = format!( + "{} WHERE asset_authority.seq != 0 OR asset_authority.seq IS NULL", + query.sql + ); txn.execute(query) .await diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index 06b8fcfba..cc78d09d7 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -1,9 +1,10 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_authority, upsert_asset_base_info_and_creators, - upsert_asset_data, upsert_asset_with_compression_info, upsert_asset_with_leaf_info, - upsert_asset_with_owner_and_delegate_info, upsert_asset_with_seq, upsert_collection_info, + save_changelog_event, upsert_asset_authority, upsert_asset_base_info, + upsert_asset_creators, upsert_asset_data, upsert_asset_with_compression_info, + upsert_asset_with_leaf_info, upsert_asset_with_owner_and_delegate_info, + upsert_asset_with_seq, upsert_collection_info, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, }; @@ -33,7 +34,15 @@ pub async fn mint_v1<'c, T>( where T: ConnectionTrait + TransactionTrait, { - if let (Some(le), Some(cl), Some(Payload::MintV1 { args })) = ( + if let ( + Some(le), + Some(cl), + Some(Payload::MintV1 { + args, + authority, + tree_id, + }), + ) = ( &parsing_result.leaf_update, &parsing_result.tree_update, &parsing_result.payload, @@ -90,16 +99,20 @@ where ) .await?; - // Insert into `asset` table. + // Upsert `asset` table base info. let delegate = if owner == delegate || delegate.to_bytes() == [0; 32] { None } else { Some(delegate.to_bytes().to_vec()) }; - let tree_id = bundle.keys.get(3).unwrap().0.to_vec(); + + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; // Upsert `asset` table base info and `asset_creators` table. - upsert_asset_base_info_and_creators( + upsert_asset_base_info( txn, id_bytes.to_vec(), OwnerType::Single, @@ -111,16 +124,9 @@ where metadata.seller_fee_basis_points as i32, slot_i, seq as i64, - &metadata.creators, - false, ) .await?; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - // Partial update of asset table with just compression info elements. upsert_asset_with_compression_info( &multi_txn, @@ -137,7 +143,7 @@ where &multi_txn, id_bytes.to_vec(), nonce as i64, - tree_id, + tree_id.to_vec(), le.leaf_hash.to_vec(), le.schema.data_hash(), le.schema.creator_hash(), @@ -159,11 +165,26 @@ where multi_txn.commit().await?; + // Upsert creators to `asset_creators` table. + upsert_asset_creators( + txn, + id_bytes.to_vec(), + &metadata.creators, + slot_i, + seq as i64, + ) + .await?; + // Insert into `asset_authority` table. //TODO - we need to remove the optional bubblegum signer logic - let authority = bundle.keys.get(0).unwrap().0.to_vec(); - upsert_asset_authority(txn, id_bytes.to_vec(), authority, seq as i64, slot_i) - .await?; + upsert_asset_authority( + txn, + id_bytes.to_vec(), + authority.to_vec(), + seq as i64, + slot_i, + ) + .await?; // Upsert into `asset_grouping` table with base collection info. upsert_collection_info( diff --git a/nft_ingester/src/program_transformers/bubblegum/mod.rs b/nft_ingester/src/program_transformers/bubblegum/mod.rs index 5ed21dbf7..a034080ff 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mod.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mod.rs @@ -85,11 +85,8 @@ where InstructionName::DecompressV1 => { decompress::decompress(parsing_result, bundle, txn).await?; } - InstructionName::VerifyCreator => { - creator_verification::process(parsing_result, bundle, txn, true, cl_audits).await?; - } - InstructionName::UnverifyCreator => { - creator_verification::process(parsing_result, bundle, txn, false, cl_audits).await?; + InstructionName::VerifyCreator | InstructionName::UnverifyCreator => { + creator_verification::process(parsing_result, bundle, txn, cl_audits).await?; } InstructionName::VerifyCollection | InstructionName::UnverifyCollection diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 183eeeb93..988ca23f2 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -1,7 +1,7 @@ use crate::{ error::IngesterError, program_transformers::bubblegum::{ - save_changelog_event, upsert_asset_base_info_and_creators, upsert_asset_data, + save_changelog_event, upsert_asset_base_info, upsert_asset_creators, upsert_asset_data, upsert_asset_with_leaf_info, upsert_asset_with_seq, }, tasks::{DownloadMetadata, IntoTaskData, TaskData}, @@ -38,6 +38,7 @@ where Some(Payload::UpdateMetadata { current_metadata, update_args, + tree_id, }), ) = ( &parsing_result.leaf_update, @@ -126,7 +127,7 @@ where ) .await?; - // Upsert `asset` table base info and `asset_creators` table. + // Upsert `asset` table base info. let seller_fee_basis_points = if let Some(seller_fee_basis_points) = update_args.seller_fee_basis_points { seller_fee_basis_points @@ -140,7 +141,12 @@ where ¤t_metadata.creators }; - upsert_asset_base_info_and_creators( + // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has + // an error and this function returns it using the `?` operator), then the transaction is + // automatically rolled back. + let multi_txn = txn.begin().await?; + + upsert_asset_base_info( txn, id_bytes.to_vec(), OwnerType::Single, @@ -152,23 +158,15 @@ where seller_fee_basis_points as i32, slot_i, seq as i64, - creators, - true, ) .await?; - // Begin a transaction. If the transaction goes out of scope (i.e. one of the executions has - // an error and this function returns it using the `?` operator), then the transaction is - // automatically rolled back. - let multi_txn = txn.begin().await?; - // Partial update of asset table with just leaf. - let tree_id = bundle.keys.get(8).unwrap().0.to_vec(); upsert_asset_with_leaf_info( &multi_txn, id_bytes.to_vec(), nonce as i64, - tree_id, + tree_id.to_vec(), le.leaf_hash.to_vec(), le.schema.data_hash(), le.schema.creator_hash(), @@ -180,6 +178,9 @@ where multi_txn.commit().await?; + // Upsert creators to `asset_creators` table. + upsert_asset_creators(txn, id_bytes.to_vec(), creators, slot_i, seq as i64).await?; + if uri.is_empty() { warn!( "URI is empty for mint {}. Skipping background task.", diff --git a/tools/acc_forwarder/Cargo.toml b/tools/acc_forwarder/Cargo.toml index 04e0ee842..96d5d4eec 100644 --- a/tools/acc_forwarder/Cargo.toml +++ b/tools/acc_forwarder/Cargo.toml @@ -24,6 +24,6 @@ solana-account-decoder = "~1.16.16" solana-client = "~1.16.16" solana-sdk = "~1.16.16" solana-transaction-status = "~1.16.16" -spl-token = { version = ">= 3.5.0, < 5.0", features = ["no-entrypoint"] } +spl-token = { version = "4.0.0", features = ["no-entrypoint"] } tokio = { version = "1.23.0", features = ["macros", "rt-multi-thread", "time"] } txn_forwarder = { path = "../txn_forwarder" } diff --git a/tools/load_generation/Cargo.toml b/tools/load_generation/Cargo.toml index 2f9452300..e0fb82ace 100644 --- a/tools/load_generation/Cargo.toml +++ b/tools/load_generation/Cargo.toml @@ -12,5 +12,5 @@ solana-client = "~1.16.16" solana-program = "~1.16.16" solana-sdk = "~1.16.16" spl-associated-token-account = { version = ">= 1.1.3, < 3.0", features = ["no-entrypoint"] } -spl-token = { version = ">= 3.5.0, < 5.0", features = ["no-entrypoint"] } +spl-token = { version = "4.0.0", features = ["no-entrypoint"] } tokio = { version ="1.25.0", features = ["macros", "rt-multi-thread"] } From 1a80b98d4e9f2b77e35773119165f9a2017a9863 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 18 Dec 2023 14:00:36 -0800 Subject: [PATCH 36/46] Add conditions to creator upsert, add another check at DAS API level --- digital_asset_types/src/dao/scopes/asset.rs | 17 ++- .../src/program_transformers/bubblegum/db.rs | 16 ++- .../token_metadata/v1_asset.rs | 125 ++++++++++-------- 3 files changed, 89 insertions(+), 69 deletions(-) diff --git a/digital_asset_types/src/dao/scopes/asset.rs b/digital_asset_types/src/dao/scopes/asset.rs index 7dccb9042..5a8fdd176 100644 --- a/digital_asset_types/src/dao/scopes/asset.rs +++ b/digital_asset_types/src/dao/scopes/asset.rs @@ -326,10 +326,19 @@ pub async fn get_by_id( creators.retain(|creator| creator.slot_updated == max_slot_updated); } - // Any creators that are not the max seq are stale rows, so remove them. - let max_seq = creators.iter().map(|creator| creator.verified_seq).max(); - if let Some(max_seq) = max_seq { - creators.retain(|creator| creator.verified_seq == max_seq); + // Any creators that are not the max seq are stale rows or updated by Token Metadata (seq = 0), so remove them. + let seq = if creators + .iter() + .map(|creator| creator.verified_seq) + .any(|seq| seq == Some(0)) + { + Some(Some(0)) + } else { + creators.iter().map(|creator| creator.verified_seq).max() + }; + + if let Some(seq) = seq { + creators.retain(|creator| creator.verified_seq == seq); } } diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index d719ce9f8..1cef480e4 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -560,10 +560,12 @@ where T: ConnectionTrait + TransactionTrait, { // Vec to hold base creator information. - let mut db_creator_infos = Vec::with_capacity(creators.len()); + let mut db_creators = Vec::with_capacity(creators.len()); if creators.is_empty() { - db_creator_infos.push(asset_creators::ActiveModel { + // Bubblegum supports empty creator array. In this case insert an empty Vec + // for the creator. + db_creators.push(asset_creators::ActiveModel { asset_id: Set(id.clone()), creator: Set(vec![]), position: Set(0), @@ -582,14 +584,14 @@ where continue; } - db_creator_infos.push(asset_creators::ActiveModel { + db_creators.push(asset_creators::ActiveModel { asset_id: Set(id.clone()), - creator: Set(c.address.to_bytes().to_vec()), position: Set(i as i16), + creator: Set(c.address.to_bytes().to_vec()), share: Set(c.share as i32), - slot_updated: Set(Some(slot_updated)), verified: Set(c.verified), verified_seq: Set(Some(seq)), + slot_updated: Set(Some(slot_updated)), ..Default::default() }); @@ -598,7 +600,7 @@ where } // This statement will update base information for each creator. - let mut query = asset_creators::Entity::insert_many(db_creator_infos) + let mut query = asset_creators::Entity::insert_many(db_creators) .on_conflict( OnConflict::columns([ asset_creators::Column::AssetId, @@ -616,7 +618,7 @@ where .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE excluded.verified_seq >= asset_creators.verified_seq OR asset_creators.verified_seq IS NULL", + "{} WHERE (asset_creators.verified_seq != 0 AND excluded.verified_seq >= asset_creators.verified_seq) OR asset_creators.verified_seq IS NULL", query.sql ); diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index f0634de02..48c74498e 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -319,68 +319,77 @@ pub async fn save_v1_asset( if !creators.is_empty() { let mut creators_set = HashSet::new(); - let existing_creators: Vec = asset_creators::Entity::find() - .filter( - Condition::all() - .add(asset_creators::Column::AssetId.eq(id.to_vec())) - .add(asset_creators::Column::SlotUpdated.lt(slot_i)), - ) - .all(conn) - .await?; - if !existing_creators.is_empty() { - let mut db_creators = Vec::with_capacity(creators.len()); - for (i, c) in creators.into_iter().enumerate() { - if creators_set.contains(&c.address) { - continue; - } - db_creators.push(asset_creators::ActiveModel { - asset_id: Set(id.to_vec()), - creator: Set(c.address.to_bytes().to_vec()), - share: Set(c.share as i32), - verified: Set(c.verified), - slot_updated: Set(Some(slot_i)), - position: Set(i as i16), - ..Default::default() - }); - creators_set.insert(c.address); - } - let txn = conn.begin().await?; - asset_creators::Entity::delete_many() - .filter( - Condition::all() - .add(asset_creators::Column::AssetId.eq(id.to_vec())) - .add(asset_creators::Column::SlotUpdated.lt(slot_i)), - ) - .exec(&txn) - .await?; + // TODO: We may not need to care about existing creators. + // let existing_creators: Vec = asset_creators::Entity::find() + // .filter( + // Condition::all() + // .add(asset_creators::Column::AssetId.eq(id.to_vec())) + // .add(asset_creators::Column::SlotUpdated.lt(slot_i)), + // ) + // .all(conn) + // .await?; - if !db_creators.is_empty() { - let mut query = asset_creators::Entity::insert_many(db_creators) - .on_conflict( - OnConflict::columns([ - asset_creators::Column::AssetId, - asset_creators::Column::Position, - ]) - .update_columns([ - asset_creators::Column::Creator, - asset_creators::Column::Share, - asset_creators::Column::Verified, - asset_creators::Column::SlotUpdated, - ]) - .to_owned(), - ) - .build(DbBackend::Postgres); - query.sql = format!( - "{} WHERE excluded.slot_updated > asset_creators.slot_updated", - query.sql - ); - txn.execute(query) - .await - .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; + //if !existing_creators.is_empty() { + + let mut db_creators = Vec::with_capacity(creators.len()); + for (i, c) in creators.into_iter().enumerate() { + if creators_set.contains(&c.address) { + continue; } - txn.commit().await?; + db_creators.push(asset_creators::ActiveModel { + asset_id: Set(id.to_vec()), + position: Set(i as i16), + creator: Set(c.address.to_bytes().to_vec()), + share: Set(c.share as i32), + verified: Set(c.verified), + verified_seq: Set(Some(0)), + slot_updated: Set(Some(slot_i)), + ..Default::default() + }); + creators_set.insert(c.address); + } + + let txn = conn.begin().await?; + + // TODO: Delete we don't need to delete existing as it won't truly work with concurrent + // processes anyways so we should filter out stale rows at the API level. + // asset_creators::Entity::delete_many() + // .filter( + // Condition::all() + // .add(asset_creators::Column::AssetId.eq(id.to_vec())) + // .add(asset_creators::Column::SlotUpdated.lt(slot_i)), + // ) + // .exec(&txn) + // .await?; + + if !db_creators.is_empty() { + let mut query = asset_creators::Entity::insert_many(db_creators) + .on_conflict( + OnConflict::columns([ + asset_creators::Column::AssetId, + asset_creators::Column::Position, + ]) + .update_columns([ + asset_creators::Column::Creator, + asset_creators::Column::Share, + asset_creators::Column::Verified, + asset_creators::Column::VerifiedSeq, + asset_creators::Column::SlotUpdated, + ]) + .to_owned(), + ) + .build(DbBackend::Postgres); + query.sql = format!( + "{} WHERE excluded.slot_updated > asset_creators.slot_updated", + query.sql + ); + txn.execute(query) + .await + .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; } + txn.commit().await?; + //} } if uri.is_empty() { warn!( From 2c14eeb33f1355fa3ca5a4b166db037d96a20798 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Mon, 18 Dec 2023 14:23:24 -0800 Subject: [PATCH 37/46] Rename asset_creators.verified_seq back to just regular seq --- .../src/dao/generated/asset_creators.rs | 6 +- .../src/dao/generated/sea_orm_active_enums.rs | 100 +++++++++--------- digital_asset_types/src/dao/scopes/asset.rs | 6 +- digital_asset_types/tests/common.rs | 2 +- ...01_add_seq_numbers_bgum_update_metadata.rs | 24 ----- .../src/program_transformers/bubblegum/db.rs | 12 +-- .../token_metadata/v1_asset.rs | 4 +- 7 files changed, 65 insertions(+), 89 deletions(-) diff --git a/digital_asset_types/src/dao/generated/asset_creators.rs b/digital_asset_types/src/dao/generated/asset_creators.rs index 346ed3b2e..21f34dcf7 100644 --- a/digital_asset_types/src/dao/generated/asset_creators.rs +++ b/digital_asset_types/src/dao/generated/asset_creators.rs @@ -19,7 +19,7 @@ pub struct Model { pub creator: Vec, pub share: i32, pub verified: bool, - pub verified_seq: Option, + pub seq: Option, pub slot_updated: Option, pub position: i16, } @@ -31,7 +31,7 @@ pub enum Column { Creator, Share, Verified, - VerifiedSeq, + Seq, SlotUpdated, Position, } @@ -62,7 +62,7 @@ impl ColumnTrait for Column { Self::Creator => ColumnType::Binary.def(), Self::Share => ColumnType::Integer.def(), Self::Verified => ColumnType::Boolean.def(), - Self::VerifiedSeq => ColumnType::BigInteger.def().null(), + Self::Seq => ColumnType::BigInteger.def().null(), Self::SlotUpdated => ColumnType::BigInteger.def().null(), Self::Position => ColumnType::SmallInteger.def(), } diff --git a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs index e4253e267..90cc813eb 100644 --- a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs +++ b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs @@ -7,35 +7,17 @@ use serde::{Deserialize, Serialize}; #[sea_orm( rs_type = "String", db_type = "Enum", - enum_name = "v1_account_attachments" -)] -pub enum V1AccountAttachments { - #[sea_orm(string_value = "edition")] - Edition, - #[sea_orm(string_value = "edition_marker")] - EditionMarker, - #[sea_orm(string_value = "master_edition_v1")] - MasterEditionV1, - #[sea_orm(string_value = "master_edition_v2")] - MasterEditionV2, - #[sea_orm(string_value = "unknown")] - Unknown, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm( - rs_type = "String", - db_type = "Enum", - enum_name = "specification_versions" + enum_name = "royalty_target_type" )] -pub enum SpecificationVersions { +pub enum RoyaltyTargetType { + #[sea_orm(string_value = "creators")] + Creators, + #[sea_orm(string_value = "fanout")] + Fanout, + #[sea_orm(string_value = "single")] + Single, #[sea_orm(string_value = "unknown")] Unknown, - #[sea_orm(string_value = "v0")] - V0, - #[sea_orm(string_value = "v1")] - V1, - #[sea_orm(string_value = "v2")] - V2, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] @@ -76,42 +58,60 @@ pub enum SpecificationAssetClass { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] -pub enum TaskStatus { - #[sea_orm(string_value = "failed")] - Failed, - #[sea_orm(string_value = "pending")] - Pending, - #[sea_orm(string_value = "running")] - Running, - #[sea_orm(string_value = "success")] - Success, +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] +pub enum OwnerType { + #[sea_orm(string_value = "single")] + Single, + #[sea_orm(string_value = "token")] + Token, + #[sea_orm(string_value = "unknown")] + Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", - enum_name = "royalty_target_type" + enum_name = "v1_account_attachments" )] -pub enum RoyaltyTargetType { - #[sea_orm(string_value = "creators")] - Creators, - #[sea_orm(string_value = "fanout")] - Fanout, - #[sea_orm(string_value = "single")] - Single, +pub enum V1AccountAttachments { + #[sea_orm(string_value = "edition")] + Edition, + #[sea_orm(string_value = "edition_marker")] + EditionMarker, + #[sea_orm(string_value = "master_edition_v1")] + MasterEditionV1, + #[sea_orm(string_value = "master_edition_v2")] + MasterEditionV2, #[sea_orm(string_value = "unknown")] Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] -pub enum OwnerType { - #[sea_orm(string_value = "single")] - Single, - #[sea_orm(string_value = "token")] - Token, +#[sea_orm( + rs_type = "String", + db_type = "Enum", + enum_name = "specification_versions" +)] +pub enum SpecificationVersions { #[sea_orm(string_value = "unknown")] Unknown, + #[sea_orm(string_value = "v0")] + V0, + #[sea_orm(string_value = "v1")] + V1, + #[sea_orm(string_value = "v2")] + V2, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] +pub enum TaskStatus { + #[sea_orm(string_value = "failed")] + Failed, + #[sea_orm(string_value = "pending")] + Pending, + #[sea_orm(string_value = "running")] + Running, + #[sea_orm(string_value = "success")] + Success, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] diff --git a/digital_asset_types/src/dao/scopes/asset.rs b/digital_asset_types/src/dao/scopes/asset.rs index 5a8fdd176..d0574f5dd 100644 --- a/digital_asset_types/src/dao/scopes/asset.rs +++ b/digital_asset_types/src/dao/scopes/asset.rs @@ -329,16 +329,16 @@ pub async fn get_by_id( // Any creators that are not the max seq are stale rows or updated by Token Metadata (seq = 0), so remove them. let seq = if creators .iter() - .map(|creator| creator.verified_seq) + .map(|creator| creator.seq) .any(|seq| seq == Some(0)) { Some(Some(0)) } else { - creators.iter().map(|creator| creator.verified_seq).max() + creators.iter().map(|creator| creator.seq).max() }; if let Some(seq) = seq { - creators.retain(|creator| creator.verified_seq == seq); + creators.retain(|creator| creator.seq == seq); } } diff --git a/digital_asset_types/tests/common.rs b/digital_asset_types/tests/common.rs index 3a5948a39..bf9c88ac1 100644 --- a/digital_asset_types/tests/common.rs +++ b/digital_asset_types/tests/common.rs @@ -184,7 +184,7 @@ pub fn create_asset_creator( creator, share, verified, - verified_seq: Some(0), + seq: Some(0), slot_updated: Some(0), position: 0, }, diff --git a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs index b9cd7075d..f0e9d8f40 100644 --- a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs +++ b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs @@ -10,18 +10,6 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager - .get_connection() - .execute(Statement::from_string( - DatabaseBackend::Postgres, - " - ALTER TABLE asset_creators - RENAME COLUMN seq to verified_seq; - " - .to_string(), - )) - .await?; - manager .alter_table( Table::alter() @@ -45,18 +33,6 @@ impl MigrationTrait for Migration { } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager - .get_connection() - .execute(Statement::from_string( - DatabaseBackend::Postgres, - " - ALTER TABLE asset_creators - RENAME COLUMN verified_seq to seq; - " - .to_string(), - )) - .await?; - manager .alter_table( Table::alter() diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 1cef480e4..8f10c986f 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -567,12 +567,12 @@ where // for the creator. db_creators.push(asset_creators::ActiveModel { asset_id: Set(id.clone()), - creator: Set(vec![]), position: Set(0), + creator: Set(vec![]), share: Set(100), - slot_updated: Set(Some(slot_updated)), verified: Set(false), - verified_seq: Set(Some(seq)), + slot_updated: Set(Some(slot_updated)), + seq: Set(Some(seq)), ..Default::default() }); } else { @@ -590,8 +590,8 @@ where creator: Set(c.address.to_bytes().to_vec()), share: Set(c.share as i32), verified: Set(c.verified), - verified_seq: Set(Some(seq)), slot_updated: Set(Some(slot_updated)), + seq: Set(Some(seq)), ..Default::default() }); @@ -610,15 +610,15 @@ where asset_creators::Column::Creator, asset_creators::Column::Share, asset_creators::Column::Verified, - asset_creators::Column::VerifiedSeq, asset_creators::Column::SlotUpdated, + asset_creators::Column::Seq, ]) .to_owned(), ) .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE (asset_creators.verified_seq != 0 AND excluded.verified_seq >= asset_creators.verified_seq) OR asset_creators.verified_seq IS NULL", + "{} WHERE (asset_creators.seq != 0 AND excluded.seq >= asset_creators.seq) OR asset_creators.seq IS NULL", query.sql ); diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 48c74498e..2db9ac74b 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -343,8 +343,8 @@ pub async fn save_v1_asset( creator: Set(c.address.to_bytes().to_vec()), share: Set(c.share as i32), verified: Set(c.verified), - verified_seq: Set(Some(0)), slot_updated: Set(Some(slot_i)), + seq: Set(Some(0)), ..Default::default() }); creators_set.insert(c.address); @@ -374,8 +374,8 @@ pub async fn save_v1_asset( asset_creators::Column::Creator, asset_creators::Column::Share, asset_creators::Column::Verified, - asset_creators::Column::VerifiedSeq, asset_creators::Column::SlotUpdated, + asset_creators::Column::Seq, ]) .to_owned(), ) From 4c39726f73142ecc5409440eaf04d7edf3012b62 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Wed, 20 Dec 2023 10:40:25 -0800 Subject: [PATCH 38/46] Remove unneeded condition on asset_authority upsert --- .../src/program_transformers/bubblegum/db.rs | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/db.rs b/nft_ingester/src/program_transformers/bubblegum/db.rs index 8f10c986f..6b12eea32 100644 --- a/nft_ingester/src/program_transformers/bubblegum/db.rs +++ b/nft_ingester/src/program_transformers/bubblegum/db.rs @@ -618,9 +618,9 @@ where .build(DbBackend::Postgres); query.sql = format!( - "{} WHERE (asset_creators.seq != 0 AND excluded.seq >= asset_creators.seq) OR asset_creators.seq IS NULL", - query.sql - ); + "{} WHERE (asset_creators.seq != 0 AND excluded.seq >= asset_creators.seq) OR asset_creators.seq IS NULL", + query.sql + ); txn.execute(query).await?; @@ -645,9 +645,10 @@ where ..Default::default() }; - // Do not attempt to modify any existing values: + // This value is only written during `mint_V1`` or after an item is decompressed, so do not + // attempt to modify any existing values: // `ON CONFLICT ('asset_id') DO NOTHING`. - let mut query = asset_authority::Entity::insert(model) + let query = asset_authority::Entity::insert(model) .on_conflict( OnConflict::columns([asset_authority::Column::AssetId]) .do_nothing() @@ -655,12 +656,6 @@ where ) .build(DbBackend::Postgres); - // Do not overwrite changes that happened after decompression (asset_authority.seq = 0). - query.sql = format!( - "{} WHERE asset_authority.seq != 0 OR asset_authority.seq IS NULL", - query.sql - ); - txn.execute(query) .await .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; From f669e764edf3b41ff918bba930171dc8eaedd113 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Wed, 20 Dec 2023 14:58:20 -0800 Subject: [PATCH 39/46] Apply stale creator filtering to all DAS API queries --- digital_asset_types/src/dao/scopes/asset.rs | 61 +++++++++++-------- .../token_metadata/v1_asset.rs | 26 -------- 2 files changed, 36 insertions(+), 51 deletions(-) diff --git a/digital_asset_types/src/dao/scopes/asset.rs b/digital_asset_types/src/dao/scopes/asset.rs index d0574f5dd..65b4d264f 100644 --- a/digital_asset_types/src/dao/scopes/asset.rs +++ b/digital_asset_types/src/dao/scopes/asset.rs @@ -228,12 +228,15 @@ pub async fn get_related_for_assets( } } - let creators = asset_creators::Entity::find() + let mut creators = asset_creators::Entity::find() .filter(asset_creators::Column::AssetId.is_in(ids.clone())) .order_by_asc(asset_creators::Column::AssetId) .order_by_asc(asset_creators::Column::Position) .all(conn) .await?; + + filter_out_stale_creators(&mut creators); + for c in creators.into_iter() { if let Some(asset) = assets_map.get_mut(&c.asset_id) { asset.creators.push(c); @@ -315,18 +318,47 @@ pub async fn get_by_id( .all(conn) .await?; + filter_out_stale_creators(&mut creators); + + let grouping: Vec = asset_grouping::Entity::find() + .filter(asset_grouping::Column::AssetId.eq(asset.id.clone())) + .filter(asset_grouping::Column::GroupValue.is_not_null()) + .filter( + Condition::any() + .add(asset_grouping::Column::Verified.eq(true)) + // Older versions of the indexer did not have the verified flag. A group would be present if and only if it was verified. + // Therefore if verified is null, we can assume that the group is verified. + .add(asset_grouping::Column::Verified.is_null()), + ) + .order_by_asc(asset_grouping::Column::AssetId) + .all(conn) + .await?; + Ok(FullAsset { + asset, + data, + authorities, + creators, + groups: grouping, + }) +} + +fn filter_out_stale_creators(creators: &mut Vec) { // If the first creator is an empty Vec, it means the creator array is empty (which is allowed - // in Bubblegum). + // for compressed assets in Bubblegum). if !creators.is_empty() && creators[0].creator.is_empty() { creators.clear(); } else { - // Any creators that are not the max slot_updated value are stale rows, so remove them. + // For both compressed and non-compressed assets, any creators that do not have the max + // `slot_updated` value are stale and should be removed. let max_slot_updated = creators.iter().map(|creator| creator.slot_updated).max(); if let Some(max_slot_updated) = max_slot_updated { creators.retain(|creator| creator.slot_updated == max_slot_updated); } - // Any creators that are not the max seq are stale rows or updated by Token Metadata (seq = 0), so remove them. + // For compressed assets, any creators that do not have the max `seq` value are stale and + // should be removed. A `seq` value of 0 indicates a decompressed or never-compressed + // asset. So if a `seq` value of 0 is present, then all creators with nonzero `seq` values + // are stale and should be removed. let seq = if creators .iter() .map(|creator| creator.seq) @@ -341,25 +373,4 @@ pub async fn get_by_id( creators.retain(|creator| creator.seq == seq); } } - - let grouping: Vec = asset_grouping::Entity::find() - .filter(asset_grouping::Column::AssetId.eq(asset.id.clone())) - .filter(asset_grouping::Column::GroupValue.is_not_null()) - .filter( - Condition::any() - .add(asset_grouping::Column::Verified.eq(true)) - // Older versions of the indexer did not have the verified flag. A group would be present if and only if it was verified. - // Therefore if verified is null, we can assume that the group is verified. - .add(asset_grouping::Column::Verified.is_null()), - ) - .order_by_asc(asset_grouping::Column::AssetId) - .all(conn) - .await?; - Ok(FullAsset { - asset, - data, - authorities, - creators, - groups: grouping, - }) } diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 2db9ac74b..968d2f626 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -319,19 +319,6 @@ pub async fn save_v1_asset( if !creators.is_empty() { let mut creators_set = HashSet::new(); - - // TODO: We may not need to care about existing creators. - // let existing_creators: Vec = asset_creators::Entity::find() - // .filter( - // Condition::all() - // .add(asset_creators::Column::AssetId.eq(id.to_vec())) - // .add(asset_creators::Column::SlotUpdated.lt(slot_i)), - // ) - // .all(conn) - // .await?; - - //if !existing_creators.is_empty() { - let mut db_creators = Vec::with_capacity(creators.len()); for (i, c) in creators.into_iter().enumerate() { if creators_set.contains(&c.address) { @@ -351,18 +338,6 @@ pub async fn save_v1_asset( } let txn = conn.begin().await?; - - // TODO: Delete we don't need to delete existing as it won't truly work with concurrent - // processes anyways so we should filter out stale rows at the API level. - // asset_creators::Entity::delete_many() - // .filter( - // Condition::all() - // .add(asset_creators::Column::AssetId.eq(id.to_vec())) - // .add(asset_creators::Column::SlotUpdated.lt(slot_i)), - // ) - // .exec(&txn) - // .await?; - if !db_creators.is_empty() { let mut query = asset_creators::Entity::insert_many(db_creators) .on_conflict( @@ -389,7 +364,6 @@ pub async fn save_v1_asset( .map_err(|db_err| IngesterError::AssetIndexError(db_err.to_string()))?; } txn.commit().await?; - //} } if uri.is_empty() { warn!( From 9155fa12d8d69bfbc65f52e422cbf85f08df59b2 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Wed, 20 Dec 2023 22:36:46 -0800 Subject: [PATCH 40/46] Use latest blockbuster beta release --- Cargo.lock | 5 +++-- das_api/Cargo.toml | 2 +- digital_asset_types/Cargo.toml | 2 +- nft_ingester/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index af8da0e7e..94b48fe01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -881,8 +881,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "blockbuster" -version = "0.9.0-beta.3" -source = "git+https://github.com/metaplex-foundation/blockbuster.git?rev=f9194397c75017808eb2d7b6f0229f43238cd317#f9194397c75017808eb2d7b6f0229f43238cd317" +version = "0.9.0-beta.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28b3563251837f2d8da4ea723d3dd8a31cf8919b5dc43344953d209ebcadd539" dependencies = [ "anchor-lang", "async-trait", diff --git a/das_api/Cargo.toml b/das_api/Cargo.toml index 7c2e94cf0..dcd6af7b3 100644 --- a/das_api/Cargo.toml +++ b/das_api/Cargo.toml @@ -33,7 +33,7 @@ schemars = "0.8.6" schemars_derive = "0.8.6" open-rpc-derive = { version = "0.0.4"} open-rpc-schema = { version = "0.0.4"} -blockbuster = {git = "https://github.com/metaplex-foundation/blockbuster.git", rev = "f9194397c75017808eb2d7b6f0229f43238cd317"} +blockbuster = "=0.9.0-beta.4" anchor-lang = "0.28.0" mpl-token-metadata = { version = "=2.0.0-beta.1", features = ["serde-feature"] } mpl-candy-machine-core = { version = "2.0.1", features = ["no-entrypoint"] } diff --git a/digital_asset_types/Cargo.toml b/digital_asset_types/Cargo.toml index 223beacff..92f820278 100644 --- a/digital_asset_types/Cargo.toml +++ b/digital_asset_types/Cargo.toml @@ -18,7 +18,7 @@ solana-sdk = "~1.16.16" num-traits = "0.2.15" num-derive = "0.3.3" thiserror = "1.0.31" -blockbuster = {git = "https://github.com/metaplex-foundation/blockbuster.git", rev = "f9194397c75017808eb2d7b6f0229f43238cd317"} +blockbuster = "=0.9.0-beta.4" jsonpath_lib = "0.3.0" mime_guess = "2.0.4" url = "2.3.1" diff --git a/nft_ingester/Cargo.toml b/nft_ingester/Cargo.toml index 594c440eb..985aae4eb 100644 --- a/nft_ingester/Cargo.toml +++ b/nft_ingester/Cargo.toml @@ -35,7 +35,7 @@ spl-concurrent-merkle-tree = "0.2.0" uuid = "1.0.0" async-trait = "0.1.53" num-traits = "0.2.15" -blockbuster = {git = "https://github.com/metaplex-foundation/blockbuster.git", rev = "f9194397c75017808eb2d7b6f0229f43238cd317"} +blockbuster = "=0.9.0-beta.4" figment = { version = "0.10.6", features = ["env", "toml", "yaml"] } cadence = "0.29.0" cadence-macros = "0.29.0" From 6ccd27a0121744efdba4d26de92a4008b79829e9 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Wed, 20 Dec 2023 23:55:42 -0800 Subject: [PATCH 41/46] Remove download_metadata_seq and add URI match check instead --- .../program_transformers/bubblegum/mint_v1.rs | 3 +- .../bubblegum/update_metadata.rs | 1 - .../token_metadata/v1_asset.rs | 1 - nft_ingester/src/tasks/common/mod.rs | 58 +++++++++++++------ tools/bgtask_creator/src/main.rs | 1 - 5 files changed, 41 insertions(+), 23 deletions(-) diff --git a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs index cc78d09d7..643b25bf1 100644 --- a/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs +++ b/nft_ingester/src/program_transformers/bubblegum/mint_v1.rs @@ -206,8 +206,7 @@ where let mut task = DownloadMetadata { asset_data_id: id_bytes.to_vec(), - uri: metadata.uri.clone(), - seq: seq as i64, + uri, created_at: Some(Utc::now().naive_utc()), }; task.sanitize(); diff --git a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs index 988ca23f2..393cdbbd2 100644 --- a/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs +++ b/nft_ingester/src/program_transformers/bubblegum/update_metadata.rs @@ -192,7 +192,6 @@ where let mut task = DownloadMetadata { asset_data_id: id_bytes.to_vec(), uri, - seq: seq as i64, created_at: Some(Utc::now().naive_utc()), }; task.sanitize(); diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 968d2f626..6ee9c64ae 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -376,7 +376,6 @@ pub async fn save_v1_asset( let mut task = DownloadMetadata { asset_data_id: id.to_vec(), uri, - seq: 0, created_at: Some(Utc::now().naive_utc()), }; task.sanitize(); diff --git a/nft_ingester/src/tasks/common/mod.rs b/nft_ingester/src/tasks/common/mod.rs index 3b533c2f7..42e2d5230 100644 --- a/nft_ingester/src/tasks/common/mod.rs +++ b/nft_ingester/src/tasks/common/mod.rs @@ -18,7 +18,6 @@ const TASK_NAME: &str = "DownloadMetadata"; pub struct DownloadMetadata { pub asset_data_id: Vec, pub uri: String, - pub seq: i64, #[serde(skip_serializing)] pub created_at: Option, } @@ -107,33 +106,56 @@ impl BgTask for DownloadMetadataTask { } _ => serde_json::Value::String("Invalid Uri".to_string()), //TODO -> enumize this. }; + + match asset_data::Entity::find_by_id(download_metadata.asset_data_id.clone()) + .select_only() + .column(asset_data::Column::MetadataUrl) + .one(db) + .await? + { + Some(asset) => { + if asset.metadata_url != download_metadata.uri { + debug!( + "skipping download metadata of old URI for {:?}", + bs58::encode(download_metadata.asset_data_id.clone()).into_string() + ); + return Ok(()); + } + } + None => { + return Err(IngesterError::UnrecoverableTaskError(format!( + "failed to find URI in database for {:?}", + bs58::encode(download_metadata.asset_data_id.clone()).into_string() + ))); + } + } + let model = asset_data::ActiveModel { id: Unchanged(download_metadata.asset_data_id.clone()), metadata: Set(body), reindex: Set(Some(false)), - download_metadata_seq: Set(Some(download_metadata.seq)), ..Default::default() }; debug!( "download metadata for {:?}", bs58::encode(download_metadata.asset_data_id.clone()).into_string() ); - let mut query = asset_data::Entity::update(model) - .filter(asset_data::Column::Id.eq(download_metadata.asset_data_id.clone())); - if download_metadata.seq != 0 { - query = query.filter( - Condition::any() - .add(asset_data::Column::DownloadMetadataSeq.lte(download_metadata.seq)) - .add(asset_data::Column::DownloadMetadataSeq.is_null()), - ); - } - query.exec(db).await.map(|_| ()).map_err(|db| { - IngesterError::TaskManagerError(format!( - "Database error with {}, error: {}", - self.name(), - db - )) - })?; + asset_data::Entity::update(model) + .filter(asset_data::Column::Id.eq(download_metadata.asset_data_id.clone())) + .filter( + Condition::all() + .add(asset_data::Column::MetadataUrl.eq(download_metadata.uri.clone())), + ) + .exec(db) + .await + .map(|_| ()) + .map_err(|db| { + IngesterError::TaskManagerError(format!( + "Database error with {}, error: {}", + self.name(), + db + )) + })?; if meta_url.is_err() { return Err(IngesterError::UnrecoverableTaskError(format!( diff --git a/tools/bgtask_creator/src/main.rs b/tools/bgtask_creator/src/main.rs index 17d1bba0c..a08dd87d1 100644 --- a/tools/bgtask_creator/src/main.rs +++ b/tools/bgtask_creator/src/main.rs @@ -322,7 +322,6 @@ WHERE let mut task = DownloadMetadata { asset_data_id: asset.id, uri: asset.metadata_url, - seq: 0, created_at: Some(Utc::now().naive_utc()), }; From ef1f8112757b9308a874a646795b5ff2abba9110 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Thu, 21 Dec 2023 01:09:48 -0800 Subject: [PATCH 42/46] Fix task URI initial query --- nft_ingester/src/tasks/common/mod.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/nft_ingester/src/tasks/common/mod.rs b/nft_ingester/src/tasks/common/mod.rs index 42e2d5230..c10fd9ee3 100644 --- a/nft_ingester/src/tasks/common/mod.rs +++ b/nft_ingester/src/tasks/common/mod.rs @@ -75,6 +75,11 @@ impl DownloadMetadataTask { } } +#[derive(FromQueryResult, Debug, Default, Clone, Eq, PartialEq)] +struct MetadataUrl { + pub metadata_url: String, +} + #[async_trait] impl BgTask for DownloadMetadataTask { fn name(&self) -> &'static str { @@ -107,12 +112,12 @@ impl BgTask for DownloadMetadataTask { _ => serde_json::Value::String("Invalid Uri".to_string()), //TODO -> enumize this. }; - match asset_data::Entity::find_by_id(download_metadata.asset_data_id.clone()) + let query = asset_data::Entity::find_by_id(download_metadata.asset_data_id.clone()) .select_only() .column(asset_data::Column::MetadataUrl) - .one(db) - .await? - { + .build(DbBackend::Postgres); + + match MetadataUrl::find_by_statement(query).one(db).await? { Some(asset) => { if asset.metadata_url != download_metadata.uri { debug!( From 3dc3557556dd813d44faf93cf95be962ef8b2167 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Thu, 21 Dec 2023 01:42:11 -0800 Subject: [PATCH 43/46] Regenerate Sea ORM types without download_metadata_seq --- .../src/dao/generated/asset_data.rs | 3 - .../src/dao/generated/sea_orm_active_enums.rs | 96 +++++++++---------- digital_asset_types/tests/common.rs | 1 - digital_asset_types/tests/json_parsing.rs | 1 - ...01_add_seq_numbers_bgum_update_metadata.rs | 2 - .../token_metadata/v1_asset.rs | 2 - 6 files changed, 48 insertions(+), 57 deletions(-) diff --git a/digital_asset_types/src/dao/generated/asset_data.rs b/digital_asset_types/src/dao/generated/asset_data.rs index 17bbc2e43..f6bf697b2 100644 --- a/digital_asset_types/src/dao/generated/asset_data.rs +++ b/digital_asset_types/src/dao/generated/asset_data.rs @@ -27,7 +27,6 @@ pub struct Model { pub raw_name: Option>, pub raw_symbol: Option>, pub base_info_seq: Option, - pub download_metadata_seq: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -43,7 +42,6 @@ pub enum Column { RawName, RawSymbol, BaseInfoSeq, - DownloadMetadataSeq, } #[derive(Copy, Clone, Debug, EnumIter, DerivePrimaryKey)] @@ -78,7 +76,6 @@ impl ColumnTrait for Column { Self::RawName => ColumnType::Binary.def().null(), Self::RawSymbol => ColumnType::Binary.def().null(), Self::BaseInfoSeq => ColumnType::BigInteger.def().null(), - Self::DownloadMetadataSeq => ColumnType::BigInteger.def().null(), } } } diff --git a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs index 90cc813eb..11362ae1c 100644 --- a/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs +++ b/digital_asset_types/src/dao/generated/sea_orm_active_enums.rs @@ -3,6 +3,34 @@ use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] +pub enum TaskStatus { + #[sea_orm(string_value = "failed")] + Failed, + #[sea_orm(string_value = "pending")] + Pending, + #[sea_orm(string_value = "running")] + Running, + #[sea_orm(string_value = "success")] + Success, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm( + rs_type = "String", + db_type = "Enum", + enum_name = "specification_versions" +)] +pub enum SpecificationVersions { + #[sea_orm(string_value = "unknown")] + Unknown, + #[sea_orm(string_value = "v0")] + V0, + #[sea_orm(string_value = "v1")] + V1, + #[sea_orm(string_value = "v2")] + V2, +} #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", @@ -20,6 +48,16 @@ pub enum RoyaltyTargetType { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] +pub enum OwnerType { + #[sea_orm(string_value = "single")] + Single, + #[sea_orm(string_value = "token")] + Token, + #[sea_orm(string_value = "unknown")] + Unknown, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "mutability")] pub enum Mutability { #[sea_orm(string_value = "immutable")] @@ -30,6 +68,16 @@ pub enum Mutability { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] +#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] +pub enum ChainMutability { + #[sea_orm(string_value = "immutable")] + Immutable, + #[sea_orm(string_value = "mutable")] + Mutable, + #[sea_orm(string_value = "unknown")] + Unknown, +} +#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", @@ -58,16 +106,6 @@ pub enum SpecificationAssetClass { Unknown, } #[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "owner_type")] -pub enum OwnerType { - #[sea_orm(string_value = "single")] - Single, - #[sea_orm(string_value = "token")] - Token, - #[sea_orm(string_value = "unknown")] - Unknown, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] #[sea_orm( rs_type = "String", db_type = "Enum", @@ -85,41 +123,3 @@ pub enum V1AccountAttachments { #[sea_orm(string_value = "unknown")] Unknown, } -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm( - rs_type = "String", - db_type = "Enum", - enum_name = "specification_versions" -)] -pub enum SpecificationVersions { - #[sea_orm(string_value = "unknown")] - Unknown, - #[sea_orm(string_value = "v0")] - V0, - #[sea_orm(string_value = "v1")] - V1, - #[sea_orm(string_value = "v2")] - V2, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "task_status")] -pub enum TaskStatus { - #[sea_orm(string_value = "failed")] - Failed, - #[sea_orm(string_value = "pending")] - Pending, - #[sea_orm(string_value = "running")] - Running, - #[sea_orm(string_value = "success")] - Success, -} -#[derive(Debug, Clone, PartialEq, EnumIter, DeriveActiveEnum, Serialize, Deserialize)] -#[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "chain_mutability")] -pub enum ChainMutability { - #[sea_orm(string_value = "immutable")] - Immutable, - #[sea_orm(string_value = "mutable")] - Mutable, - #[sea_orm(string_value = "unknown")] - Unknown, -} diff --git a/digital_asset_types/tests/common.rs b/digital_asset_types/tests/common.rs index bf9c88ac1..e486ef289 100644 --- a/digital_asset_types/tests/common.rs +++ b/digital_asset_types/tests/common.rs @@ -86,7 +86,6 @@ pub fn create_asset_data( raw_name: Some(metadata.name.into_bytes().to_vec().clone()), raw_symbol: Some(metadata.symbol.into_bytes().to_vec().clone()), base_info_seq: Some(0), - download_metadata_seq: Some(0), }, ) } diff --git a/digital_asset_types/tests/json_parsing.rs b/digital_asset_types/tests/json_parsing.rs index c10ca12e3..b689010ec 100644 --- a/digital_asset_types/tests/json_parsing.rs +++ b/digital_asset_types/tests/json_parsing.rs @@ -37,7 +37,6 @@ pub async fn parse_onchain_json(json: serde_json::Value) -> Content { raw_name: Some(String::from("Handalf").into_bytes().to_vec()), raw_symbol: Some(String::from("").into_bytes().to_vec()), base_info_seq: Some(0), - download_metadata_seq: Some(0), }; v1_content_from_json(&asset_data).unwrap() diff --git a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs index f0e9d8f40..78e71b21e 100644 --- a/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs +++ b/migration/src/m20231019_120101_add_seq_numbers_bgum_update_metadata.rs @@ -15,7 +15,6 @@ impl MigrationTrait for Migration { Table::alter() .table(asset_data::Entity) .add_column(ColumnDef::new(Alias::new("base_info_seq")).big_integer()) - .add_column(ColumnDef::new(Alias::new("download_metadata_seq")).big_integer()) .to_owned(), ) .await?; @@ -38,7 +37,6 @@ impl MigrationTrait for Migration { Table::alter() .table(asset_data::Entity) .drop_column(Alias::new("base_info_seq")) - .drop_column(Alias::new("download_metadata_seq")) .to_owned(), ) .await?; diff --git a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs index 6ee9c64ae..74a52f29f 100644 --- a/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs +++ b/nft_ingester/src/program_transformers/token_metadata/v1_asset.rs @@ -150,7 +150,6 @@ pub async fn save_v1_asset( raw_name: Set(Some(name.to_vec())), raw_symbol: Set(Some(symbol.to_vec())), base_info_seq: Set(Some(0)), - download_metadata_seq: Set(Some(0)), }; let txn = conn.begin().await?; let mut query = asset_data::Entity::insert(asset_data_model) @@ -166,7 +165,6 @@ pub async fn save_v1_asset( asset_data::Column::RawName, asset_data::Column::RawSymbol, asset_data::Column::BaseInfoSeq, - asset_data::Column::DownloadMetadataSeq, ]) .to_owned(), ) From 896af3d9fc71135fd08a879727d848cd23030112 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Thu, 21 Dec 2023 01:57:08 -0800 Subject: [PATCH 44/46] asset_grouping.verified option remove --- digital_asset_types/src/dapi/common/asset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/digital_asset_types/src/dapi/common/asset.rs b/digital_asset_types/src/dapi/common/asset.rs index 2377ddf32..a67a2caaf 100644 --- a/digital_asset_types/src/dapi/common/asset.rs +++ b/digital_asset_types/src/dapi/common/asset.rs @@ -292,7 +292,7 @@ pub fn to_grouping( .filter_map(|model| { let verified = match options.show_unverified_collections { // Null verified indicates legacy data, meaning it is verified. - true => Some(model.verified.unwrap_or(true)), + true => Some(model.verified), false => None, }; // Filter out items where group_value is None. From 36df8e5b54d6ac1ee14da9e4823c7ed87eeeffb3 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Thu, 21 Dec 2023 12:35:41 -0800 Subject: [PATCH 45/46] Fix filtering for getAssetsByCreator --- digital_asset_types/src/dao/scopes/asset.rs | 53 ++++++++++++++------- 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/digital_asset_types/src/dao/scopes/asset.rs b/digital_asset_types/src/dao/scopes/asset.rs index 213bb5726..ebda56afe 100644 --- a/digital_asset_types/src/dao/scopes/asset.rs +++ b/digital_asset_types/src/dao/scopes/asset.rs @@ -58,7 +58,7 @@ pub async fn get_by_creator( show_unverified_collections: bool, ) -> Result, DbErr> { let mut condition = Condition::all() - .add(asset_creators::Column::Creator.eq(creator)) + .add(asset_creators::Column::Creator.eq(creator.clone())) .add(asset::Column::Supply.gt(0)); if only_verified { condition = condition.add(asset_creators::Column::Verified.eq(true)); @@ -72,6 +72,7 @@ pub async fn get_by_creator( pagination, limit, show_unverified_collections, + Some(creator), ) .await } @@ -130,6 +131,7 @@ pub async fn get_by_grouping( pagination, limit, show_unverified_collections, + None, ) .await } @@ -203,6 +205,7 @@ pub async fn get_by_authority( pagination, limit, show_unverified_collections, + None, ) .await } @@ -216,6 +219,7 @@ async fn get_by_related_condition( pagination: &Pagination, limit: u64, show_unverified_collections: bool, + required_creator: Option>, ) -> Result, DbErr> where E: RelationTrait, @@ -233,13 +237,14 @@ where let assets = paginate(pagination, limit, stmt, sort_direction, asset::Column::Id) .all(conn) .await?; - get_related_for_assets(conn, assets, show_unverified_collections).await + get_related_for_assets(conn, assets, show_unverified_collections, required_creator).await } pub async fn get_related_for_assets( conn: &impl ConnectionTrait, assets: Vec, show_unverified_collections: bool, + required_creator: Option>, ) -> Result, DbErr> { let asset_ids = assets.iter().map(|a| a.id.clone()).collect::>(); @@ -272,32 +277,47 @@ pub async fn get_related_for_assets( acc }); let ids = assets_map.keys().cloned().collect::>(); - let authorities = asset_authority::Entity::find() - .filter(asset_authority::Column::AssetId.is_in(ids.clone())) - .order_by_asc(asset_authority::Column::AssetId) - .all(conn) - .await?; - for a in authorities.into_iter() { - if let Some(asset) = assets_map.get_mut(&a.asset_id) { - asset.authorities.push(a); - } - } - let mut creators = asset_creators::Entity::find() + // Get all creators for all assets in `assets_map``. + let creators = asset_creators::Entity::find() .filter(asset_creators::Column::AssetId.is_in(ids.clone())) .order_by_asc(asset_creators::Column::AssetId) .order_by_asc(asset_creators::Column::Position) .all(conn) .await?; - filter_out_stale_creators(&mut creators); - + // Add the creators to the assets in `asset_map``. for c in creators.into_iter() { if let Some(asset) = assets_map.get_mut(&c.asset_id) { asset.creators.push(c); } } + // Filter out stale creators from each asset. + for (_id, asset) in assets_map.iter_mut() { + filter_out_stale_creators(&mut asset.creators); + } + + // If we passed in a required creator, we make sure that creator is still in the creator array + // of each asset after stale creators were filtered out above. Only retain those assets that + // have the required creator. This corrects `getAssetByCreators` from returning assets for + // which the required creator is no longer in the creator array. + if let Some(required) = required_creator { + assets_map.retain(|_id, asset| asset.creators.iter().any(|c| c.creator == required)); + } + + let ids = assets_map.keys().cloned().collect::>(); + let authorities = asset_authority::Entity::find() + .filter(asset_authority::Column::AssetId.is_in(ids.clone())) + .order_by_asc(asset_authority::Column::AssetId) + .all(conn) + .await?; + for a in authorities.into_iter() { + if let Some(asset) = assets_map.get_mut(&a.asset_id) { + asset.authorities.push(a); + } + } + let cond = if show_unverified_collections { Condition::all() } else { @@ -348,7 +368,8 @@ pub async fn get_assets_by_condition( let assets = paginate(pagination, limit, stmt, sort_direction, asset::Column::Id) .all(conn) .await?; - let full_assets = get_related_for_assets(conn, assets, show_unverified_collections).await?; + let full_assets = + get_related_for_assets(conn, assets, show_unverified_collections, None).await?; Ok(full_assets) } From c2875df566f0fb88d16b1baa9b0b17718f48b467 Mon Sep 17 00:00:00 2001 From: Michael Danenberg <56533526+danenbm@users.noreply.github.com> Date: Tue, 2 Jan 2024 15:19:59 -0800 Subject: [PATCH 46/46] Update to blockbuster 0.9.0-beta.5 and mpl-bubblegum 1.0.1-beta.4 --- Cargo.lock | 8 ++++---- das_api/Cargo.toml | 4 ++-- digital_asset_types/Cargo.toml | 2 +- nft_ingester/Cargo.toml | 4 ++-- tools/fetch_trees/Cargo.toml | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 94b48fe01..ba8f0af05 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -881,9 +881,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "blockbuster" -version = "0.9.0-beta.4" +version = "0.9.0-beta.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b3563251837f2d8da4ea723d3dd8a31cf8919b5dc43344953d209ebcadd539" +checksum = "a32a0edd58b3aaaf55684bc9ad82e012b3345cb46a25fcae507b3b9034b83d44" dependencies = [ "anchor-lang", "async-trait", @@ -2882,9 +2882,9 @@ dependencies = [ [[package]] name = "mpl-bubblegum" -version = "1.0.1-beta.3" +version = "1.0.1-beta.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29346f26192bb7f73330196fde4c8cfb35675bf1a4b026cd088f7ca8fda69f3f" +checksum = "e59d102fe6f8b063a06a226874ea815b269316390ce3bf991b29ea9c54ccc467" dependencies = [ "borsh 0.10.3", "kaigan", diff --git a/das_api/Cargo.toml b/das_api/Cargo.toml index dcd6af7b3..533e6cc43 100644 --- a/das_api/Cargo.toml +++ b/das_api/Cargo.toml @@ -33,9 +33,9 @@ schemars = "0.8.6" schemars_derive = "0.8.6" open-rpc-derive = { version = "0.0.4"} open-rpc-schema = { version = "0.0.4"} -blockbuster = "=0.9.0-beta.4" +blockbuster = "=0.9.0-beta.5" anchor-lang = "0.28.0" mpl-token-metadata = { version = "=2.0.0-beta.1", features = ["serde-feature"] } mpl-candy-machine-core = { version = "2.0.1", features = ["no-entrypoint"] } -mpl-bubblegum = "1.0.1-beta.3" +mpl-bubblegum = "=1.0.1-beta.4" mpl-candy-guard = { version = "2.0.0", features = ["no-entrypoint"] } diff --git a/digital_asset_types/Cargo.toml b/digital_asset_types/Cargo.toml index 92f820278..3464035fa 100644 --- a/digital_asset_types/Cargo.toml +++ b/digital_asset_types/Cargo.toml @@ -18,7 +18,7 @@ solana-sdk = "~1.16.16" num-traits = "0.2.15" num-derive = "0.3.3" thiserror = "1.0.31" -blockbuster = "=0.9.0-beta.4" +blockbuster = "=0.9.0-beta.5" jsonpath_lib = "0.3.0" mime_guess = "2.0.4" url = "2.3.1" diff --git a/nft_ingester/Cargo.toml b/nft_ingester/Cargo.toml index 985aae4eb..9f81313bf 100644 --- a/nft_ingester/Cargo.toml +++ b/nft_ingester/Cargo.toml @@ -29,13 +29,13 @@ flatbuffers = "23.1.21" lazy_static = "1.4.0" regex = "1.5.5" digital_asset_types = { path = "../digital_asset_types", features = ["json_types", "sql_types"] } -mpl-bubblegum = "1.0.1-beta.3" +mpl-bubblegum = "=1.0.1-beta.4" spl-account-compression = { version = "0.2.0", features = ["no-entrypoint"] } spl-concurrent-merkle-tree = "0.2.0" uuid = "1.0.0" async-trait = "0.1.53" num-traits = "0.2.15" -blockbuster = "=0.9.0-beta.4" +blockbuster = "=0.9.0-beta.5" figment = { version = "0.10.6", features = ["env", "toml", "yaml"] } cadence = "0.29.0" cadence-macros = "0.29.0" diff --git a/tools/fetch_trees/Cargo.toml b/tools/fetch_trees/Cargo.toml index 4035014ca..9461e85b5 100644 --- a/tools/fetch_trees/Cargo.toml +++ b/tools/fetch_trees/Cargo.toml @@ -9,7 +9,7 @@ anyhow = "1.0.70" async-trait = "0.1.53" borsh = "~0.10.3" clap = { version = "4.2.2", features = ["derive", "cargo"] } -mpl-bubblegum = "1.0.1-beta.3" +mpl-bubblegum = "=1.0.1-beta.4" solana-account-decoder = "~1.16.16" solana-client = "~1.16.16" solana-sdk = "~1.16.16"