Skip to content
This repository has been archived by the owner on Jan 22, 2025. It is now read-only.

Commit

Permalink
records-txs
Browse files Browse the repository at this point in the history
Signed-off-by: Sean Young <[email protected]>
  • Loading branch information
seanyoung committed Mar 1, 2024
1 parent 9bb59aa commit a05ffdd
Show file tree
Hide file tree
Showing 8 changed files with 149 additions and 5 deletions.
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions ledger-tool/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ crossbeam-channel = { workspace = true }
csv = { workspace = true }
dashmap = { workspace = true }
futures = { workspace = true }
hex = { workspace = true }
histogram = { workspace = true }
itertools = { workspace = true }
log = { workspace = true }
Expand Down
5 changes: 4 additions & 1 deletion ledger-tool/src/ledger_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ pub fn load_and_process_ledger_or_exit(
process_options: ProcessOptions,
snapshot_archive_path: Option<PathBuf>,
incremental_snapshot_archive_path: Option<PathBuf>,
transaction_status_sender: Option<TransactionStatusSender>,
) -> (Arc<RwLock<BankForks>>, Option<StartingSnapshotHashes>) {
load_and_process_ledger(
arg_matches,
Expand All @@ -108,6 +109,7 @@ pub fn load_and_process_ledger_or_exit(
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
transaction_status_sender,
)
.unwrap_or_else(|err| {
eprintln!("Exiting. Failed to load and process ledger: {err}");
Expand All @@ -122,6 +124,7 @@ pub fn load_and_process_ledger(
process_options: ProcessOptions,
snapshot_archive_path: Option<PathBuf>,
incremental_snapshot_archive_path: Option<PathBuf>,
transaction_status_sender: Option<TransactionStatusSender>,
) -> Result<(Arc<RwLock<BankForks>>, Option<StartingSnapshotHashes>), LoadAndProcessLedgerError> {
let bank_snapshots_dir = if blockstore.is_primary_access() {
blockstore.ledger_path().join("snapshot")
Expand Down Expand Up @@ -387,7 +390,7 @@ pub fn load_and_process_ledger(
Some(transaction_status_service),
)
} else {
(None, None)
(transaction_status_sender, None)
};

let result = blockstore_processor::process_blockstore_from_root(
Expand Down
138 changes: 136 additions & 2 deletions ledger-tool/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use {
},
dashmap::DashMap,
log::*,
serde::Serialize,
serde::{Deserialize, Serialize},
solana_account_decoder::UiAccountEncoding,
solana_accounts_db::{
accounts_db::CalcAccountsHashDataSource, accounts_index::ScanConfig,
Expand All @@ -41,7 +41,9 @@ use {
solana_ledger::{
blockstore::{create_new_ledger, Blockstore},
blockstore_options::{AccessType, LedgerColumnOptions},
blockstore_processor::ProcessSlotCallback,
blockstore_processor::{
ProcessSlotCallback, TransactionStatusMessage, TransactionStatusSender,
},
use_snapshot_archives_at_startup,
},
solana_measure::{measure, measure::Measure},
Expand Down Expand Up @@ -73,6 +75,7 @@ use {
transaction::{MessageHash, SanitizedTransaction, SimpleAddressLoader},
},
solana_stake_program::stake_state::{self, PointValue},
solana_svm::transaction_results::TransactionExecutionDetails,
solana_unified_scheduler_pool::DefaultSchedulerPool,
solana_vote_program::{
self,
Expand Down Expand Up @@ -1069,6 +1072,13 @@ fn main() {
.value_name("FILENAME")
.help("Record slots to a file"),
)
.arg(
Arg::with_name("record_txs")
.long("record-txs")
.default_value("transactions.json")
.value_name("FILENAME")
.help("Record transactions to a file"),
)
.arg(
Arg::with_name("verify_slots")
.long("verify-slots")
Expand Down Expand Up @@ -1602,6 +1612,7 @@ fn main() {
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
None,
);

println!(
Expand All @@ -1627,6 +1638,7 @@ fn main() {
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
None,
);
println!("{}", &bank_forks.read().unwrap().working_bank().hash());
}
Expand Down Expand Up @@ -1659,6 +1671,14 @@ fn main() {
exit(1);
}

let (sender, receiver) = if arg_matches.occurrences_of("record_txs") > 0 {
let (sender, receiver) = crossbeam_channel::unbounded();

(Some(TransactionStatusSender { sender }), Some(receiver))
} else {
(None, None)
};

let (slot_callback, record_slots_file, recorded_slots) = if arg_matches
.occurrences_of("record_slots")
> 0
Expand Down Expand Up @@ -1769,6 +1789,7 @@ fn main() {
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
sender,
);

if print_accounts_stats {
Expand All @@ -1784,6 +1805,115 @@ fn main() {
.ok();
}

#[derive(Serialize, Deserialize, Default, Clone)]
struct Transaction {
index: usize,
accounts: Vec<String>,
instructions: Vec<Instruction>,
is_simple_vote_tx: bool,
execution_results: Option<TransactionExecutionDetails>,
}

#[derive(Serialize, Deserialize, Clone)]
struct Instruction {
program_id: String,
accounts: Vec<String>,
data: String,
}

#[derive(Serialize, Deserialize)]
struct SlotDetails {
slot: Slot,
transactions: Vec<Transaction>,
}

if let Some(recv) = receiver {
let mut slots: Vec<SlotDetails> = Vec::new();
for tsm in recv {
if let TransactionStatusMessage::Batch(batch) = tsm {
let slot = batch.bank.slot();

assert_eq!(batch.transactions.len(), batch.execution_results.len());

let transactions: Vec<_> = batch
.transactions
.iter()
.enumerate()
.map(|(no, tx)| {
let message = tx.message();

let accounts: Vec<String> = message
.account_keys()
.iter()
.map(|acc| acc.to_string())
.collect();

let instructions = message
.instructions()
.iter()
.map(|ix| {
let program_id =
accounts[ix.program_id_index as usize].clone();

let accounts = ix
.accounts
.iter()
.map(|idx| accounts[*idx as usize].clone())
.collect();

let data = hex::encode(&ix.data);

Instruction {
program_id,
accounts,
data,
}
})
.collect();

let execution_results = batch.execution_results[no].clone();

let is_simple_vote_tx = tx.is_simple_vote_transaction();

Transaction {
accounts,
instructions,
is_simple_vote_tx,
execution_results,
index: batch.transaction_indexes[no],
}
})
.collect();

if let Some(recorded_slot) =
slots.iter_mut().find(|f| f.slot == slot)
{
recorded_slot.transactions.extend(transactions);

recorded_slot
.transactions
.sort_by(|a, b| a.index.cmp(&b.index));
} else {
slots.push(SlotDetails { slot, transactions });
}
}
}

let filename = Path::new(arg_matches.value_of_os("record_txs").unwrap());

let file = File::open(filename).unwrap_or_else(|err| {
eprintln!("Unable to read file: {}: {err:#}", filename.display());
exit(1);
});

// writing the json file ends up with a syscall for each number, comma, indentation etc.
// use BufWriter to speed things up

let writer = std::io::BufWriter::new(file);

serde_json::to_writer_pretty(writer, &slots).unwrap();
}

if let Some(recorded_slots_file) = record_slots_file {
if let Ok(recorded_slots) = recorded_slots.clone().unwrap().lock() {
let bank_hashes =
Expand Down Expand Up @@ -1826,6 +1956,7 @@ fn main() {
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
None,
);

let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config);
Expand Down Expand Up @@ -1999,6 +2130,7 @@ fn main() {
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
None,
);
let mut bank = bank_forks
.read()
Expand Down Expand Up @@ -2392,6 +2524,7 @@ fn main() {
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
None,
);
let bank = bank_forks.read().unwrap().working_bank();

Expand Down Expand Up @@ -2444,6 +2577,7 @@ fn main() {
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
None,
);
let bank_forks = bank_forks.read().unwrap();
let slot = bank_forks.working_bank().slot();
Expand Down
1 change: 1 addition & 0 deletions ledger-tool/src/program.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ fn load_blockstore(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> Arc<Bank
process_options,
snapshot_archive_path,
incremental_snapshot_archive_path,
None,
);
let bank = bank_forks.read().unwrap().working_bank();
bank
Expand Down
1 change: 1 addition & 0 deletions programs/sbf/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions svm/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ edition = { workspace = true }
itertools = { workspace = true }
log = { workspace = true }
percentage = { workspace = true }
serde = { workspace = true }
solana-bpf-loader-program = { workspace = true }
solana-frozen-abi = { workspace = true }
solana-frozen-abi-macro = { workspace = true }
Expand Down
5 changes: 3 additions & 2 deletions svm/src/transaction_results.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
)]
pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList};
use {
serde::{Deserialize, Serialize},
solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch,
solana_sdk::{
nonce_info::{NonceFull, NonceInfo},
Expand Down Expand Up @@ -69,7 +70,7 @@ impl TransactionExecutionResult {
}
}

#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TransactionExecutionDetails {
pub status: transaction::Result<()>,
pub log_messages: Option<Vec<String>>,
Expand All @@ -82,7 +83,7 @@ pub struct TransactionExecutionDetails {
pub accounts_data_len_delta: i64,
}

#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum DurableNonceFee {
Valid(u64),
Invalid,
Expand Down

0 comments on commit a05ffdd

Please sign in to comment.