From d33ed3ec6d874487e60707c4cba9aa6f0af62e1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 21:56:35 +0800 Subject: [PATCH 001/529] build(deps): bump serde_json from 1.0.121 to 1.0.122 (#2401) * build(deps): bump serde_json from 1.0.121 to 1.0.122 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.121 to 1.0.122. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.121...v1.0.122) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 775ef121ff268c..3ad30b81b6009d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5033,9 +5033,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.121" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ "itoa", "memchr", diff --git a/Cargo.toml b/Cargo.toml index b4b9b3eedf8bbc..9c034531b7c46f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -319,7 +319,7 @@ seqlock = "0.2.0" serde = "1.0.204" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" serde_derive = "1.0.204" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 -serde_json = "1.0.121" +serde_json = "1.0.122" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 417ceccbbbd5fb..4dd3992ed8f6e8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4199,9 +4199,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.121" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ "itoa", "memchr", From 32e78812fd95e9d9a4b09997bc75790405905013 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 21:56:48 +0800 Subject: [PATCH 002/529] build(deps): bump bytes from 1.6.1 to 1.7.1 (#2402) * build(deps): bump bytes from 1.6.1 to 1.7.1 Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.6.1 to 1.7.1. - [Release notes](https://github.com/tokio-rs/bytes/releases) - [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/bytes/compare/v1.6.1...v1.7.1) --- updated-dependencies: - dependency-name: bytes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3ad30b81b6009d..7ffc70b7e370c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1163,9 +1163,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "bytesize" diff --git a/Cargo.toml b/Cargo.toml index 9c034531b7c46f..6a27879dc36e53 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -190,7 +190,7 @@ bytecount = "0.6.8" bytemuck = "1.16.3" bytemuck_derive = "1.7.0" byteorder = "1.5.0" -bytes = "1.6" +bytes = "1.7" bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 4dd3992ed8f6e8..16a475c9d77ac0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -879,9 +879,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "bzip2" From 3f3f48fcddfd3b78df61cbb7c6001b022ca1f9a3 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 2 Aug 2024 10:08:46 -0400 Subject: [PATCH 003/529] TransactionView: parsing helper functions (#2257) --- Cargo.lock | 10 + Cargo.toml | 2 + transaction-view/Cargo.toml | 26 +++ transaction-view/benches/bytes.rs | 90 +++++++++ transaction-view/src/bytes.rs | 310 ++++++++++++++++++++++++++++++ transaction-view/src/lib.rs | 9 + transaction-view/src/result.rs | 3 + 7 files changed, 450 insertions(+) create mode 100644 transaction-view/Cargo.toml create mode 100644 transaction-view/benches/bytes.rs create mode 100644 transaction-view/src/bytes.rs create mode 100644 transaction-view/src/lib.rs create mode 100644 transaction-view/src/result.rs diff --git a/Cargo.lock b/Cargo.lock index 7ffc70b7e370c6..d9175d0d9c0682 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -217,6 +217,16 @@ dependencies = [ "solana-version", ] +[[package]] +name = "agave-transaction-view" +version = "2.1.0" +dependencies = [ + "agave-transaction-view", + "bincode", + "criterion", + "solana-sdk", +] + [[package]] name = "agave-validator" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 6a27879dc36e53..de49b9b8ac1016 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -129,6 +129,7 @@ members = [ "transaction-dos", "transaction-metrics-tracker", "transaction-status", + "transaction-view", "turbine", "type-overrides", "udp-client", @@ -159,6 +160,7 @@ edition = "2021" [workspace.dependencies] Inflector = "0.11.4" +agave-transaction-view = { path = "transaction-view", version = "=2.1.0" } aquamarine = "0.3.3" aes-gcm-siv = "0.11.1" ahash = "0.8.10" diff --git a/transaction-view/Cargo.toml b/transaction-view/Cargo.toml new file mode 100644 index 00000000000000..0b3f4e828c969d --- /dev/null +++ b/transaction-view/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "agave-transaction-view" +description = "Agave TranactionView" +documentation = "https://docs.rs/agave-transaction-view" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-sdk = { workspace = true } + +[dev-dependencies] +# See order-crates-for-publishing.py for using this unusual `path = "."` +agave-transaction-view = { path = ".", features = ["dev-context-only-utils"] } +bincode = { workspace = true } +criterion = { workspace = true } + +[features] +dev-context-only-utils = [] + +[[bench]] +name = "bytes" +harness = false diff --git a/transaction-view/benches/bytes.rs b/transaction-view/benches/bytes.rs new file mode 100644 index 00000000000000..e5803cf1dec822 --- /dev/null +++ b/transaction-view/benches/bytes.rs @@ -0,0 +1,90 @@ +use { + agave_transaction_view::bytes::{optimized_read_compressed_u16, read_compressed_u16}, + bincode::{serialize_into, DefaultOptions, Options}, + criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}, + solana_sdk::{ + packet::PACKET_DATA_SIZE, + short_vec::{decode_shortu16_len, ShortU16}, + }, +}; + +fn setup() -> Vec<(u16, usize, Vec)> { + let options = DefaultOptions::new().with_fixint_encoding(); // Ensure fixed-int encoding + + // Create a vector of all valid u16 values serialized into 16-byte buffers. + let mut values = Vec::with_capacity(PACKET_DATA_SIZE); + for value in 0..PACKET_DATA_SIZE as u16 { + let short_u16 = ShortU16(value); + let mut buffer = vec![0u8; 16]; + let serialized_len = options + .serialized_size(&short_u16) + .expect("Failed to get serialized size"); + serialize_into(&mut buffer[..], &short_u16).expect("Serialization failed"); + values.push((value, serialized_len as usize, buffer)); + } + + values +} + +fn bench_u16_parsing(c: &mut Criterion) { + let values_serialized_lengths_and_buffers = setup(); + let mut group = c.benchmark_group("compressed_u16_parsing"); + group.throughput(Throughput::Elements( + values_serialized_lengths_and_buffers.len() as u64, + )); + + // Benchmark the decode_shortu16_len function from `solana-sdk` + group.bench_function("short_u16_decode", |c| { + c.iter(|| { + decode_shortu16_len_iter(&values_serialized_lengths_and_buffers); + }) + }); + + // Benchmark `read_compressed_u16` + group.bench_function("read_compressed_u16", |c| { + c.iter(|| { + read_compressed_u16_iter(&values_serialized_lengths_and_buffers); + }) + }); + + group.bench_function("optimized_read_compressed_u16", |c| { + c.iter(|| { + optimized_read_compressed_u16_iter(&values_serialized_lengths_and_buffers); + }) + }); +} + +fn decode_shortu16_len_iter(values_serialized_lengths_and_buffers: &[(u16, usize, Vec)]) { + for (value, serialized_len, buffer) in values_serialized_lengths_and_buffers.iter() { + let (read_value, bytes_read) = decode_shortu16_len(black_box(buffer)).unwrap(); + assert_eq!(read_value, *value as usize, "Value mismatch for: {}", value); + assert_eq!( + bytes_read, *serialized_len, + "Offset mismatch for: {}", + value + ); + } +} + +fn read_compressed_u16_iter(values_serialized_lengths_and_buffers: &[(u16, usize, Vec)]) { + for (value, serialized_len, buffer) in values_serialized_lengths_and_buffers.iter() { + let mut offset = 0; + let read_value = read_compressed_u16(black_box(buffer), &mut offset).unwrap(); + assert_eq!(read_value, *value, "Value mismatch for: {}", value); + assert_eq!(offset, *serialized_len, "Offset mismatch for: {}", value); + } +} + +fn optimized_read_compressed_u16_iter( + values_serialized_lengths_and_buffers: &[(u16, usize, Vec)], +) { + for (value, serialized_len, buffer) in values_serialized_lengths_and_buffers.iter() { + let mut offset = 0; + let read_value = optimized_read_compressed_u16(black_box(buffer), &mut offset).unwrap(); + assert_eq!(read_value, *value, "Value mismatch for: {}", value); + assert_eq!(offset, *serialized_len, "Offset mismatch for: {}", value); + } +} + +criterion_group!(benches, bench_u16_parsing); +criterion_main!(benches); diff --git a/transaction-view/src/bytes.rs b/transaction-view/src/bytes.rs new file mode 100644 index 00000000000000..a67d8a2ddd8b35 --- /dev/null +++ b/transaction-view/src/bytes.rs @@ -0,0 +1,310 @@ +use crate::result::{Result, TransactionParsingError}; + +/// Check that the buffer has at least `len` bytes remaining starting at +/// `offset`. Returns Err if the buffer is too short. +/// +/// Assumptions: +/// - The current offset is not greater than `bytes.len()`. +#[inline(always)] +pub fn check_remaining(bytes: &[u8], offset: usize, len: usize) -> Result<()> { + if len > bytes.len().wrapping_sub(offset) { + Err(TransactionParsingError) + } else { + Ok(()) + } +} + +/// Check that the buffer has at least 1 byte remaining starting at `offset`. +/// Returns Err if the buffer is too short. +#[inline(always)] +pub fn read_byte(bytes: &[u8], offset: &mut usize) -> Result { + // Implicitly checks that the offset is within bounds, no need + // to call `check_remaining` explicitly here. + let value = bytes.get(*offset).copied().ok_or(TransactionParsingError); + *offset = offset.wrapping_add(1); + value +} + +/// Read a compressed u16 from `bytes` starting at `offset`. +/// If the buffer is too short or the encoding is invalid, return Err. +/// `offset` is updated to point to the byte after the compressed u16. +/// +/// Assumptions: +/// - The current offset is not greater than `bytes.len()`. +#[inline(always)] +pub fn read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result { + let mut result = 0u16; + let mut shift = 0u16; + + for i in 0..3 { + // Implicitly checks that the offset is within bounds, no need + // to call check_remaining explicitly here. + let byte = *bytes + .get(offset.wrapping_add(i)) + .ok_or(TransactionParsingError)?; + // non-minimal encoding or overflow + if (i > 0 && byte == 0) || (i == 2 && byte > 3) { + return Err(TransactionParsingError); + } + result |= ((byte & 0x7F) as u16) << shift; + shift = shift.wrapping_add(7); + if byte & 0x80 == 0 { + *offset = offset.wrapping_add(i).wrapping_add(1); + return Ok(result); + } + } + + // if we reach here, it means that all 3 bytes were used + *offset = offset.wrapping_add(3); + Ok(result) +} + +/// Domain-specific optimization for reading a compressed u16. +/// The compressed u16's are only used for array-lengths in our transaction +/// format. The transaction packet has a maximum size of 1232 bytes. +/// This means that the maximum array length within a **valid** transaction is +/// 1232. This has a minimally encoded length of 2 bytes. +/// Although the encoding scheme allows for more, any arrays with this length +/// would be too large to fit in a packet. This function optimizes for this +/// case, and reads a maximum of 2 bytes. +/// If the buffer is too short or the encoding is invalid, return Err. +/// `offset` is updated to point to the byte after the compressed u16. +#[inline(always)] +pub fn optimized_read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result { + let mut result = 0u16; + + // First byte + let byte1 = *bytes.get(*offset).ok_or(TransactionParsingError)?; + result |= (byte1 & 0x7F) as u16; + if byte1 & 0x80 == 0 { + *offset = offset.wrapping_add(1); + return Ok(result); + } + + // Second byte + let byte2 = *bytes + .get(offset.wrapping_add(1)) + .ok_or(TransactionParsingError)?; + if byte2 == 0 || byte2 & 0x80 != 0 { + return Err(TransactionParsingError); // non-minimal encoding or overflow + } + result |= ((byte2 & 0x7F) as u16) << 7; + *offset = offset.wrapping_add(2); + + Ok(result) +} + +/// Update the `offset` to point to the byte after an array of length `len` and +/// of type `T`. If the buffer is too short, return Err. +/// +/// Assumptions: +/// 1. The current offset is not greater than `bytes.len()`. +/// 2. The size of `T` is small enough such that a usize will not overflow if +/// given the maximum array size (u16::MAX). +#[inline(always)] +pub fn offset_array_len(bytes: &[u8], offset: &mut usize, len: u16) -> Result<()> { + let array_len_bytes = usize::from(len).wrapping_mul(core::mem::size_of::()); + check_remaining(bytes, *offset, array_len_bytes)?; + *offset = offset.wrapping_add(array_len_bytes); + Ok(()) +} + +/// Update the `offset` to point t the byte after the `T`. +/// If the buffer is too short, return Err. +/// +/// Assumptions: +/// 1. The current offset is not greater than `bytes.len()`. +/// 2. The size of `T` is small enough such that a usize will not overflow. +#[inline(always)] +pub fn offset_type(bytes: &[u8], offset: &mut usize) -> Result<()> { + let type_size = core::mem::size_of::(); + check_remaining(bytes, *offset, type_size)?; + *offset = offset.wrapping_add(type_size); + Ok(()) +} + +#[cfg(test)] +mod tests { + use { + super::*, + bincode::{serialize_into, DefaultOptions, Options}, + solana_sdk::{packet::PACKET_DATA_SIZE, short_vec::ShortU16}, + }; + + #[test] + fn test_check_remaining() { + // Empty buffer checks + assert!(check_remaining(&[], 0, 0).is_ok()); + assert!(check_remaining(&[], 0, 1).is_err()); + + // Buffer with data checks + assert!(check_remaining(&[1, 2, 3], 0, 0).is_ok()); + assert!(check_remaining(&[1, 2, 3], 0, 1).is_ok()); + assert!(check_remaining(&[1, 2, 3], 0, 3).is_ok()); + assert!(check_remaining(&[1, 2, 3], 0, 4).is_err()); + + // Non-zero offset. + assert!(check_remaining(&[1, 2, 3], 1, 0).is_ok()); + assert!(check_remaining(&[1, 2, 3], 1, 1).is_ok()); + assert!(check_remaining(&[1, 2, 3], 1, 2).is_ok()); + assert!(check_remaining(&[1, 2, 3], 1, usize::MAX).is_err()); + } + + #[test] + fn test_read_byte() { + let bytes = [5, 6, 7]; + let mut offset = 0; + assert_eq!(read_byte(&bytes, &mut offset), Ok(5)); + assert_eq!(offset, 1); + assert_eq!(read_byte(&bytes, &mut offset), Ok(6)); + assert_eq!(offset, 2); + assert_eq!(read_byte(&bytes, &mut offset), Ok(7)); + assert_eq!(offset, 3); + assert!(read_byte(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_read_compressed_u16() { + let mut buffer = [0u8; 1024]; + let options = DefaultOptions::new().with_fixint_encoding(); // Ensure fixed-int encoding + + // Test all possible u16 values + for value in 0..=u16::MAX { + let mut offset; + let short_u16 = ShortU16(value); + + // Serialize the value into the buffer + serialize_into(&mut buffer[..], &short_u16).expect("Serialization failed"); + + // Use bincode's size calculation to determine the length of the serialized data + let serialized_len = options + .serialized_size(&short_u16) + .expect("Failed to get serialized size"); + + // Reset offset + offset = 0; + + // Read the value back using unchecked_read_u16_compressed + let read_value = read_compressed_u16(&buffer, &mut offset); + + // Assert that the read value matches the original value + assert_eq!(read_value, Ok(value), "Value mismatch for: {}", value); + + // Assert that the offset matches the serialized length + assert_eq!( + offset, serialized_len as usize, + "Offset mismatch for: {}", + value + ); + } + + // Test bounds. + // All 0s => 0 + assert_eq!(Ok(0), read_compressed_u16(&[0; 3], &mut 0)); + // Overflow + assert!(read_compressed_u16(&[0xFF, 0xFF, 0x04], &mut 0).is_err()); + assert_eq!( + read_compressed_u16(&[0xFF, 0xFF, 0x03], &mut 0), + Ok(u16::MAX) + ); + + // overflow errors + assert!(read_compressed_u16(&[u8::MAX; 1], &mut 0).is_err()); + assert!(read_compressed_u16(&[u8::MAX; 2], &mut 0).is_err()); + + // Minimal encoding checks + assert!(read_compressed_u16(&[0x81, 0x80, 0x00], &mut 0).is_err()); + } + + #[test] + fn test_optimized_read_compressed_u16() { + let mut buffer = [0u8; 1024]; + let options = DefaultOptions::new().with_fixint_encoding(); // Ensure fixed-int encoding + + // Test all possible u16 values under the packet length + for value in 0..=PACKET_DATA_SIZE as u16 { + let mut offset; + let short_u16 = ShortU16(value); + + // Serialize the value into the buffer + serialize_into(&mut buffer[..], &short_u16).expect("Serialization failed"); + + // Use bincode's size calculation to determine the length of the serialized data + let serialized_len = options + .serialized_size(&short_u16) + .expect("Failed to get serialized size"); + + // Reset offset + offset = 0; + + // Read the value back using unchecked_read_u16_compressed + let read_value = optimized_read_compressed_u16(&buffer, &mut offset); + + // Assert that the read value matches the original value + assert_eq!(read_value, Ok(value), "Value mismatch for: {}", value); + + // Assert that the offset matches the serialized length + assert_eq!( + offset, serialized_len as usize, + "Offset mismatch for: {}", + value + ); + } + + // Test bounds. + // All 0s => 0 + assert_eq!(Ok(0), optimized_read_compressed_u16(&[0; 3], &mut 0)); + // Overflow + assert!(optimized_read_compressed_u16(&[0xFF, 0xFF, 0x04], &mut 0).is_err()); + assert!(optimized_read_compressed_u16(&[0xFF, 0x80], &mut 0).is_err()); + + // overflow errors + assert!(optimized_read_compressed_u16(&[u8::MAX; 1], &mut 0).is_err()); + assert!(optimized_read_compressed_u16(&[u8::MAX; 2], &mut 0).is_err()); + + // Minimal encoding checks + assert!(optimized_read_compressed_u16(&[0x81, 0x00], &mut 0).is_err()); + } + + #[test] + fn test_offset_array_len() { + #[repr(C)] + struct MyStruct { + _a: u8, + _b: u8, + } + const _: () = assert!(core::mem::size_of::() == 2); + + // Test with a buffer that is too short + let bytes = [0u8; 1]; + let mut offset = 0; + assert!(offset_array_len::(&bytes, &mut offset, 1).is_err()); + + // Test with a buffer that is long enough + let bytes = [0u8; 4]; + let mut offset = 0; + assert!(offset_array_len::(&bytes, &mut offset, 2).is_ok()); + assert_eq!(offset, 4); + } + + #[test] + fn test_offset_type() { + #[repr(C)] + struct MyStruct { + _a: u8, + _b: u8, + } + const _: () = assert!(core::mem::size_of::() == 2); + + // Test with a buffer that is too short + let bytes = [0u8; 1]; + let mut offset = 0; + assert!(offset_type::(&bytes, &mut offset).is_err()); + + // Test with a buffer that is long enough + let bytes = [0u8; 4]; + let mut offset = 0; + assert!(offset_type::(&bytes, &mut offset).is_ok()); + assert_eq!(offset, 2); + } +} diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs new file mode 100644 index 00000000000000..a16187f62ccd82 --- /dev/null +++ b/transaction-view/src/lib.rs @@ -0,0 +1,9 @@ +// Parsing helpers only need to be public for benchmarks. +#[cfg(feature = "dev-context-only-utils")] +#[allow(dead_code)] +pub mod bytes; +#[cfg(not(feature = "dev-context-only-utils"))] +#[allow(dead_code)] +mod bytes; + +pub mod result; diff --git a/transaction-view/src/result.rs b/transaction-view/src/result.rs new file mode 100644 index 00000000000000..1997a784b73650 --- /dev/null +++ b/transaction-view/src/result.rs @@ -0,0 +1,3 @@ +#[derive(Debug, PartialEq, Eq)] +pub struct TransactionParsingError; +pub type Result = core::result::Result; // no distinction between errors for now From a60fbc2288d626a4f1846052c8fcb98d3f9ea58d Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 2 Aug 2024 14:35:17 +0000 Subject: [PATCH 004/529] rolls out chained Merkle shreds to ~5% of testnet (#2389) --- turbine/src/broadcast_stage/standard_broadcast_run.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index f108fc08226a6b..0ddbe1020f5f98 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -506,8 +506,14 @@ impl BroadcastRun for StandardBroadcastRun { } } -fn should_chain_merkle_shreds(_slot: Slot, cluster_type: ClusterType) -> bool { - cluster_type == ClusterType::Development +fn should_chain_merkle_shreds(slot: Slot, cluster_type: ClusterType) -> bool { + match cluster_type { + ClusterType::Development => true, + ClusterType::Devnet => false, + ClusterType::MainnetBeta => false, + // Roll out chained Merkle shreds to ~5% of testnet. + ClusterType::Testnet => slot % 19 == 1, + } } #[cfg(test)] From fb80e48fb000b61e44bb87a7351d9263e57f6d31 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Fri, 2 Aug 2024 16:46:10 -0300 Subject: [PATCH 005/529] Bump platform tools version to v1.42 (#2355) --- sdk/cargo-build-sbf/src/main.rs | 6 +++--- sdk/sbf/scripts/install.sh | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 0da59ff230b385..6d02499c8fbc6d 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -156,7 +156,7 @@ fn find_installed_platform_tools() -> Vec { } fn get_latest_platform_tools_version() -> Result { - let url = "https://github.com/solana-labs/platform-tools/releases/latest"; + let url = "https://github.com/anza-xyz/platform-tools/releases/latest"; let resp = reqwest::blocking::get(url).map_err(|err| format!("Failed to GET {url}: {err}"))?; let path = std::path::Path::new(resp.url().path()); let version = path.file_name().unwrap().to_string_lossy().to_string(); @@ -618,7 +618,7 @@ fn build_solana_package( install_if_missing( config, package, - "https://github.com/solana-labs/platform-tools/releases/download", + "https://github.com/anza-xyz/platform-tools/releases/download", platform_tools_download_file_name.as_str(), &target_path, ) @@ -913,7 +913,7 @@ fn main() { // The following line is scanned by CI configuration script to // separate cargo caches according to the version of platform-tools. - let platform_tools_version = String::from("v1.41"); + let platform_tools_version = String::from("v1.42"); let rust_base_version = get_base_rust_version(platform_tools_version.as_str()); let version = format!( "{}\nplatform-tools {}\n{}", diff --git a/sdk/sbf/scripts/install.sh b/sdk/sbf/scripts/install.sh index 12343a413ed7b7..e51f46d58c418a 100755 --- a/sdk/sbf/scripts/install.sh +++ b/sdk/sbf/scripts/install.sh @@ -109,7 +109,7 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install platform tools -version=v1.41 +version=v1.42 if [[ ! -e platform-tools-$version.md || ! -e platform-tools ]]; then ( set -e From ee290ef980ce1cad20722e4338125728ac84af66 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 2 Aug 2024 15:58:19 -0500 Subject: [PATCH 006/529] fix trace to be more helpful (#2417) --- accounts-db/src/accounts_db.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 39f7f84918fa78..53ded010f792ec 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -7778,10 +7778,11 @@ impl AccountsDb { if Self::should_not_shrink(alive_bytes, total_bytes) { trace!( - "shrink_slot_forced ({}): not able to shrink at all: alive/stored: {} ({}b / {}b) save: {}", + "shrink_slot_forced ({}): not able to shrink at all: alive/stored: {}/{} ({}b / {}b) save: {}", slot, alive_count, stored_count, + alive_bytes, total_bytes, total_bytes.saturating_sub(alive_bytes), ); From eae498738618fa017f01b5fb16d93e320410d75a Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 2 Aug 2024 15:58:33 -0500 Subject: [PATCH 007/529] only mark status as shrinking when we have something to shrink (#2360) --- accounts-db/src/accounts_db.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 53ded010f792ec..6520d4de189d64 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4818,7 +4818,8 @@ impl AccountsDb { return 0; } - let _guard = self.active_stats.activate(ActiveStatItem::Shrink); + let _guard = (!shrink_slots.is_empty()) + .then_some(|| self.active_stats.activate(ActiveStatItem::Shrink)); let mut measure_shrink_all_candidates = Measure::start("shrink_all_candidate_slots-ms"); let num_candidates = shrink_slots.len(); From 752a061deac370e7ccaa6ec9af01ebeec785afbf Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 2 Aug 2024 14:16:21 -0700 Subject: [PATCH 008/529] Wen restart send heaviest fork earlier when first hearing from supermajority. (#1675) * Send HeaviestFork the first time we hear from supermajority. * Fix lint errors. * - Send RestartHeaviestFork every 5 minutes - Send the first one when greater or equal to the threshold - Add corresponding unit tests * Replace magic numbers with constants. --- wen-restart/src/wen_restart.rs | 249 +++++++++++++++++++++++++++++---- 1 file changed, 218 insertions(+), 31 deletions(-) diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 44af862ec38cde..5e794ce2f43560 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -68,8 +68,8 @@ const REPAIR_THRESHOLD: f64 = 0.42; const HEAVIEST_FORK_THRESHOLD_DELTA: f64 = 0.38; // We allow at most 5% of the stake to disagree with us. const HEAVIEST_FORK_DISAGREE_THRESHOLD_PERCENT: f64 = 5.0; -// We update HeaviestFork every 30 minutes or when we can exit. -const HEAVIEST_REFRESH_INTERVAL_IN_SECONDS: u64 = 1800; +// We update HeaviestFork every 5 minutes at least. +const HEAVIEST_REFRESH_INTERVAL_IN_SECONDS: u64 = 300; #[derive(Debug, PartialEq)] pub enum WenRestartError { @@ -639,17 +639,14 @@ pub(crate) fn aggregate_restart_heaviest_fork( .as_mut() .unwrap() .total_active_stake = total_active_stake; - cluster_info.push_restart_heaviest_fork( - heaviest_fork_slot, - heaviest_fork_hash, - total_active_stake, - ); let mut progress_last_sent = Instant::now(); let mut cursor = solana_gossip::crds::Cursor::default(); let mut progress_changed = false; let majority_stake_required = (total_stake as f64 / 100.0 * adjusted_threshold_percent as f64).round() as u64; + let mut total_active_stake_higher_than_supermajority = false; + let mut first_time_entering_loop = true; loop { if exit.load(Ordering::Relaxed) { return Err(WenRestartError::Exiting.into()); @@ -690,10 +687,21 @@ pub(crate) fn aggregate_restart_heaviest_fork( total_stake ); let can_exit = total_active_stake_seen_supermajority >= majority_stake_required; - // Only send out updates every 30 minutes or when we can exit. + let saw_supermajority_first_time = current_total_active_stake + >= majority_stake_required + && !total_active_stake_higher_than_supermajority + && { + total_active_stake_higher_than_supermajority = true; + true + }; + // Only send out updates every 5 minutes or when we can exit or active stake passes supermajority + // the first time. if progress_last_sent.elapsed().as_secs() >= HEAVIEST_REFRESH_INTERVAL_IN_SECONDS || can_exit + || first_time_entering_loop + || saw_supermajority_first_time { + first_time_entering_loop = false; cluster_info.push_restart_heaviest_fork( heaviest_fork_slot, heaviest_fork_hash, @@ -1183,8 +1191,10 @@ mod tests { const EXPECTED_SLOTS: Slot = 90; const TICKS_PER_SLOT: u64 = 2; const TOTAL_VALIDATOR_COUNT: u16 = 20; - const MY_INDEX: usize = 0; + const MY_INDEX: usize = TOTAL_VALIDATOR_COUNT as usize - 1; const WAIT_FOR_THREAD_TIMEOUT: u64 = 10_000; + const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80; + const NON_CONFORMING_VALIDATOR_PERCENT: u64 = 5; fn push_restart_last_voted_fork_slots( cluster_info: Arc, @@ -1492,11 +1502,18 @@ mod tests { .unwrap(); let mut rng = rand::thread_rng(); let mut expected_received_last_voted_fork_slots = HashMap::new(); - // Skip the first 5 validators, because 0 is myself, we only need 15 more to reach 80%. + let validators_to_take: usize = + (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT * TOTAL_VALIDATOR_COUNT as u64 / 100 - 1) + .try_into() + .unwrap(); let mut last_voted_fork_slots_from_others = test_state.last_voted_fork_slots.clone(); last_voted_fork_slots_from_others.reverse(); last_voted_fork_slots_from_others.append(&mut expected_slots_to_repair.clone()); - for keypairs in test_state.validator_voting_keypairs.iter().skip(5) { + for keypairs in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let node_pubkey = keypairs.node_keypair.pubkey(); let node = ContactInfo::new_rand(&mut rng, Some(node_pubkey)); let last_vote_hash = Hash::new_unique(); @@ -1545,8 +1562,20 @@ mod tests { } // Now simulate receiving HeaviestFork messages. let mut expected_received_heaviest_fork = HashMap::new(); + let validators_to_take: usize = ((WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64 + / 100 + - 1) + .try_into() + .unwrap(); // HeaviestFork only requires 75% vs 80% required for LastVotedForkSlots. We have 5% stake, so we need 70%. - for keypairs in test_state.validator_voting_keypairs.iter().skip(6) { + let total_active_stake_during_heaviest_fork = (validators_to_take + 1) as u64 * 100; + for keypairs in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let node_pubkey = keypairs.node_keypair.pubkey(); let node = ContactInfo::new_rand(&mut rng, Some(node_pubkey)); let now = timestamp(); @@ -1555,7 +1584,7 @@ mod tests { &node, expected_heaviest_fork_slot, &expected_heaviest_fork_bankhash, - 1500, + total_active_stake_during_heaviest_fork, &keypairs.node_keypair, now, ); @@ -1564,7 +1593,7 @@ mod tests { HeaviestForkRecord { slot: expected_heaviest_fork_slot, bankhash: expected_heaviest_fork_bankhash.to_string(), - total_active_stake: 1500, + total_active_stake: total_active_stake_during_heaviest_fork, shred_version: SHRED_VERSION as u32, wallclock: now, }, @@ -1582,9 +1611,18 @@ mod tests { let mut expected_slots_stake_map: HashMap = test_state .last_voted_fork_slots .iter() - .map(|slot| (*slot, 1600)) + .map(|slot| { + ( + *slot, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT * TOTAL_VALIDATOR_COUNT as u64, + ) + }) .collect(); - expected_slots_stake_map.extend(expected_slots_to_repair.iter().map(|slot| (*slot, 1500))); + expected_slots_stake_map.extend( + expected_slots_to_repair + .iter() + .map(|slot| (*slot, total_active_stake_during_heaviest_fork)), + ); assert_eq!( progress, WenRestartProgress { @@ -1599,7 +1637,8 @@ mod tests { received: expected_received_last_voted_fork_slots, final_result: Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: expected_slots_stake_map, - total_active_stake: 1600, + // We are simulating 5% joined LastVotedForkSlots but not HeaviestFork. + total_active_stake: total_active_stake_during_heaviest_fork + 100, }), }), my_heaviest_fork: Some(HeaviestForkRecord { @@ -1610,16 +1649,17 @@ mod tests { .unwrap() .bankhash .to_string(), - total_active_stake: 1500, + total_active_stake: total_active_stake_during_heaviest_fork, shred_version: SHRED_VERSION as u32, wallclock: 0, }), heaviest_fork_aggregate: Some(HeaviestForkAggregateRecord { received: expected_received_heaviest_fork, final_result: Some(HeaviestForkAggregateFinal { - total_active_stake: 1500, - total_active_stake_seen_supermajority: 1500, - total_active_stake_agreed_with_me: 1500, + total_active_stake: total_active_stake_during_heaviest_fork, + total_active_stake_seen_supermajority: + total_active_stake_during_heaviest_fork, + total_active_stake_agreed_with_me: total_active_stake_during_heaviest_fork, }), }), my_snapshot: Some(GenerateSnapshotRecord { @@ -2026,7 +2066,6 @@ mod tests { let mut last_voted_fork_slots_from_others = test_state.last_voted_fork_slots.clone(); last_voted_fork_slots_from_others.reverse(); last_voted_fork_slots_from_others.append(&mut expected_slots_to_repair.clone()); - // Skip the first 5 validators, because 0 is myself, we need 15 so it hits 80%. let progress = WenRestartProgress { state: RestartState::LastVotedForkSlots.into(), my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { @@ -2037,7 +2076,15 @@ mod tests { }), ..Default::default() }; - for keypairs in test_state.validator_voting_keypairs.iter().skip(5) { + let validators_to_take: usize = + (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT * TOTAL_VALIDATOR_COUNT as u64 / 100 - 1) + .try_into() + .unwrap(); + for keypairs in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let wen_restart_proto_path_clone = test_state.wen_restart_proto_path.clone(); let cluster_info_clone = test_state.cluster_info.clone(); let bank_forks_clone = test_state.bank_forks.clone(); @@ -2051,7 +2098,7 @@ mod tests { .spawn(move || { assert!(aggregate_restart_last_voted_fork_slots( &wen_restart_proto_path_clone, - 80, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT, cluster_info_clone, &last_voted_fork_slots, bank_forks_clone, @@ -2485,25 +2532,157 @@ mod tests { ); } + fn start_aggregate_heaviest_fork_thread( + test_state: &WenRestartTestInitResult, + heaviest_fork_slot: Slot, + heaviest_fork_bankhash: Hash, + exit: Arc, + expected_error: Option, + ) -> std::thread::JoinHandle<()> { + let progress = wen_restart_proto::WenRestartProgress { + state: RestartState::HeaviestFork.into(), + my_heaviest_fork: Some(HeaviestForkRecord { + slot: heaviest_fork_slot, + bankhash: heaviest_fork_bankhash.to_string(), + total_active_stake: WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + .saturating_mul(TOTAL_VALIDATOR_COUNT as u64), + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + ..Default::default() + }; + let wen_restart_path = test_state.wen_restart_proto_path.clone(); + let cluster_info = test_state.cluster_info.clone(); + let bank_forks = test_state.bank_forks.clone(); + Builder::new() + .name("solana-wen-restart-aggregate-heaviest-fork".to_string()) + .spawn(move || { + let result = aggregate_restart_heaviest_fork( + &wen_restart_path, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT, + cluster_info, + bank_forks, + exit, + &mut progress.clone(), + ); + if let Some(expected_error) = expected_error { + assert_eq!( + result.unwrap_err().downcast::().unwrap(), + expected_error + ); + } else { + assert!(result.is_ok()); + } + }) + .unwrap() + } + + #[test] + fn test_aggregate_heaviest_fork_send_gossip_early() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let test_state = wen_restart_test_init(&ledger_path); + let heaviest_fork_slot = test_state.last_voted_fork_slots[0] + 3; + let heaviest_fork_bankhash = Hash::new_unique(); + + let mut cursor = solana_gossip::crds::Cursor::default(); + // clear the heaviest fork queue so we make sure a new HeaviestFork is sent out later. + let _ = test_state + .cluster_info + .get_restart_heaviest_fork(&mut cursor); + + let exit = Arc::new(AtomicBool::new(false)); + let thread = start_aggregate_heaviest_fork_thread( + &test_state, + heaviest_fork_slot, + heaviest_fork_bankhash, + exit.clone(), + Some(WenRestartError::Exiting), + ); + // Simulating everyone sending out the first RestartHeaviestFork message, Gossip propagation takes + // time, so the observed_stake is probably smaller than actual active stake. We should send out + // heaviest fork indicating we have active stake exceeding supermajority. + let validators_to_take: usize = ((WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64 + / 100 + - 1) + .try_into() + .unwrap(); + for keypair in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { + let node_pubkey = keypair.node_keypair.pubkey(); + let node = ContactInfo::new_rand(&mut rand::thread_rng(), Some(node_pubkey)); + let now = timestamp(); + push_restart_heaviest_fork( + test_state.cluster_info.clone(), + &node, + heaviest_fork_slot, + &heaviest_fork_bankhash, + 100, + &keypair.node_keypair, + now, + ); + } + let my_pubkey = test_state.cluster_info.id(); + let mut found_myself = false; + let expected_active_stake = (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64; + while !found_myself { + sleep(Duration::from_millis(100)); + test_state.cluster_info.flush_push_queue(); + for gossip_record in test_state + .cluster_info + .get_restart_heaviest_fork(&mut cursor) + { + if gossip_record.from == my_pubkey + && gossip_record.observed_stake == expected_active_stake + { + found_myself = true; + break; + } + } + } + exit.store(true, Ordering::Relaxed); + assert!(thread.join().is_ok()); + } + #[test] fn test_aggregate_heaviest_fork() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let test_state = wen_restart_test_init(&ledger_path); let heaviest_fork_slot = test_state.last_voted_fork_slots[0] + 3; let heaviest_fork_bankhash = Hash::new_unique(); + let expected_active_stake = (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64; let progress = wen_restart_proto::WenRestartProgress { state: RestartState::HeaviestFork.into(), my_heaviest_fork: Some(HeaviestForkRecord { slot: heaviest_fork_slot, bankhash: heaviest_fork_bankhash.to_string(), - total_active_stake: 1500, + total_active_stake: expected_active_stake, shred_version: SHRED_VERSION as u32, wallclock: 0, }), ..Default::default() }; let different_bankhash = Hash::new_unique(); - for keypair in test_state.validator_voting_keypairs.iter().skip(6) { + let validators_to_take: usize = ((WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64 + / 100 + - 1) + .try_into() + .unwrap(); + for keypair in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let node_pubkey = keypair.node_keypair.pubkey(); let node = ContactInfo::new_rand(&mut rand::thread_rng(), Some(node_pubkey)); let now = timestamp(); @@ -2512,7 +2691,7 @@ mod tests { &node, heaviest_fork_slot, &different_bankhash, - 1500, + expected_active_stake, &keypair.node_keypair, now, ); @@ -2523,7 +2702,7 @@ mod tests { assert_eq!( aggregate_restart_heaviest_fork( &test_state.wen_restart_proto_path, - 80, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT, test_state.cluster_info.clone(), test_state.bank_forks.clone(), Arc::new(AtomicBool::new(false)), @@ -2539,7 +2718,15 @@ mod tests { ), ); // If we have enough stake agreeing with us, we should be able to aggregate the heaviest fork. - for keypair in test_state.validator_voting_keypairs.iter().skip(6) { + let validators_to_take: usize = + (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT * TOTAL_VALIDATOR_COUNT as u64 / 100 - 1) + .try_into() + .unwrap(); + for keypair in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let node_pubkey = keypair.node_keypair.pubkey(); let node = ContactInfo::new_rand(&mut rand::thread_rng(), Some(node_pubkey)); let now = timestamp(); @@ -2548,14 +2735,14 @@ mod tests { &node, heaviest_fork_slot, &heaviest_fork_bankhash, - 1500, + expected_active_stake, &keypair.node_keypair, now, ); } assert!(aggregate_restart_heaviest_fork( &test_state.wen_restart_proto_path, - 80, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT, test_state.cluster_info.clone(), test_state.bank_forks.clone(), Arc::new(AtomicBool::new(false)), From cb782d0a4d5b7e89687a289fe08948b6e10b4a86 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 2 Aug 2024 14:19:05 -0700 Subject: [PATCH 009/529] Disable replay while in wen_restart. (#2007) --- core/src/tvu.rs | 103 +++++++++++++++++++++++++++++++----------------- 1 file changed, 66 insertions(+), 37 deletions(-) diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 4dcd7bbfa3e589..083ff02bbb4abc 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -66,7 +66,7 @@ pub struct Tvu { retransmit_stage: RetransmitStage, window_service: WindowService, cluster_slots_service: ClusterSlotsService, - replay_stage: ReplayStage, + replay_stage: Option, blockstore_cleanup_service: Option, cost_update_service: CostUpdateService, voting_service: VotingService, @@ -160,6 +160,8 @@ impl Tvu { cluster_slots: Arc, wen_restart_repair_slots: Option>>>, ) -> Result { + let in_wen_restart = wen_restart_repair_slots.is_some(); + let TvuSockets { repair: repair_socket, fetch: fetch_sockets, @@ -312,33 +314,37 @@ impl Tvu { let drop_bank_service = DropBankService::new(drop_bank_receiver); - let replay_stage = ReplayStage::new( - replay_stage_config, - blockstore.clone(), - bank_forks.clone(), - cluster_info.clone(), - ledger_signal_receiver, - duplicate_slots_receiver, - poh_recorder.clone(), - tower, - vote_tracker, - cluster_slots, - retransmit_slots_sender, - ancestor_duplicate_slots_receiver, - replay_vote_sender, - duplicate_confirmed_slots_receiver, - gossip_verified_vote_hash_receiver, - cluster_slots_update_sender, - cost_update_sender, - voting_sender, - drop_bank_sender, - block_metadata_notifier, - log_messages_bytes_limit, - prioritization_fee_cache.clone(), - dumped_slots_sender, - banking_tracer, - popular_pruned_forks_receiver, - )?; + let replay_stage = if in_wen_restart { + None + } else { + Some(ReplayStage::new( + replay_stage_config, + blockstore.clone(), + bank_forks.clone(), + cluster_info.clone(), + ledger_signal_receiver, + duplicate_slots_receiver, + poh_recorder.clone(), + tower, + vote_tracker, + cluster_slots, + retransmit_slots_sender, + ancestor_duplicate_slots_receiver, + replay_vote_sender, + duplicate_confirmed_slots_receiver, + gossip_verified_vote_hash_receiver, + cluster_slots_update_sender, + cost_update_sender, + voting_sender, + drop_bank_sender, + block_metadata_notifier, + log_messages_bytes_limit, + prioritization_fee_cache.clone(), + dumped_slots_sender, + banking_tracer, + popular_pruned_forks_receiver, + )?) + }; let blockstore_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { BlockstoreCleanupService::new(blockstore.clone(), max_ledger_shreds, exit.clone()) @@ -381,7 +387,9 @@ impl Tvu { if self.blockstore_cleanup_service.is_some() { self.blockstore_cleanup_service.unwrap().join()?; } - self.replay_stage.join()?; + if self.replay_stage.is_some() { + self.replay_stage.unwrap().join()?; + } self.cost_update_service.join()?; self.voting_service.join()?; if let Some(warmup_service) = self.warm_quic_cache_service { @@ -414,10 +422,7 @@ pub mod tests { std::sync::atomic::{AtomicU64, Ordering}, }; - #[ignore] - #[test] - #[serial] - fn test_tvu_exit() { + fn test_tvu_exit(enable_wen_restart: bool) { solana_logger::setup(); let leader = Node::new_localhost(); let target1_keypair = Keypair::new(); @@ -428,15 +433,17 @@ pub mod tests { let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); - let keypair = Arc::new(Keypair::new()); let (turbine_quic_endpoint_sender, _turbine_quic_endpoint_receiver) = tokio::sync::mpsc::channel(/*capacity:*/ 128); let (_turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver) = unbounded(); let (repair_quic_endpoint_sender, _repair_quic_endpoint_receiver) = tokio::sync::mpsc::channel(/*buffer:*/ 128); //start cluster_info1 - let cluster_info1 = - ClusterInfo::new(target1.info.clone(), keypair, SocketAddrSpace::Unspecified); + let cluster_info1 = ClusterInfo::new( + target1.info.clone(), + target1_keypair.into(), + SocketAddrSpace::Unspecified, + ); cluster_info1.insert_info(leader.info); let cref1 = Arc::new(cluster_info1); @@ -464,6 +471,11 @@ pub mod tests { let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let outstanding_repair_requests = Arc::>::default(); let cluster_slots = Arc::new(ClusterSlots::default()); + let wen_restart_repair_slots = if enable_wen_restart { + Some(Arc::new(RwLock::new(vec![]))) + } else { + None + }; let tvu = Tvu::new( &vote_keypair.pubkey(), Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])), @@ -520,11 +532,28 @@ pub mod tests { repair_quic_endpoint_sender, outstanding_repair_requests, cluster_slots, - None, + wen_restart_repair_slots, ) .expect("assume success"); + if enable_wen_restart { + assert!(tvu.replay_stage.is_none()) + } else { + assert!(tvu.replay_stage.is_some()) + } exit.store(true, Ordering::Relaxed); tvu.join().unwrap(); poh_service.join().unwrap(); } + + #[test] + #[serial] + fn test_tvu_exit_no_wen_restart() { + test_tvu_exit(false); + } + + #[test] + #[serial] + fn test_tvu_exit_with_wen_restart() { + test_tvu_exit(true); + } } From 0ea4543e711a68586ab7eedc5b2b4f0fd0671c59 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Fri, 2 Aug 2024 19:09:25 -0300 Subject: [PATCH 010/529] Remove workaround compiler crash (#2338) Remove workaround 16-byte aligned crash --- programs/sbf/rust/sysvar/src/lib.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/programs/sbf/rust/sysvar/src/lib.rs b/programs/sbf/rust/sysvar/src/lib.rs index 88b7a4aa404b4e..50f6891d85e3ed 100644 --- a/programs/sbf/rust/sysvar/src/lib.rs +++ b/programs/sbf/rust/sysvar/src/lib.rs @@ -29,7 +29,7 @@ pub fn process_instruction( sysvar::clock::id().log(); let clock = Clock::from_account_info(&accounts[2]).unwrap(); assert_ne!(clock, Clock::default()); - let got_clock = Clock::get().unwrap(); + let got_clock = Clock::get()?; assert_eq!(clock, got_clock); } @@ -39,7 +39,7 @@ pub fn process_instruction( sysvar::epoch_schedule::id().log(); let epoch_schedule = EpochSchedule::from_account_info(&accounts[3]).unwrap(); assert_eq!(epoch_schedule, EpochSchedule::default()); - let got_epoch_schedule = EpochSchedule::get().unwrap(); + let got_epoch_schedule = EpochSchedule::get()?; assert_eq!(epoch_schedule, got_epoch_schedule); } @@ -47,9 +47,8 @@ pub fn process_instruction( msg!("Instructions identifier:"); sysvar::instructions::id().log(); assert_eq!(*accounts[4].owner, sysvar::id()); - let index = instructions::load_current_index_checked(&accounts[4]).unwrap(); - let instruction = - instructions::load_instruction_at_checked(index as usize, &accounts[4]).unwrap(); + let index = instructions::load_current_index_checked(&accounts[4])?; + let instruction = instructions::load_instruction_at_checked(index as usize, &accounts[4])?; assert_eq!(0, index); assert_eq!( instruction, @@ -86,7 +85,7 @@ pub fn process_instruction( msg!("Rent identifier:"); sysvar::rent::id().log(); let rent = Rent::from_account_info(&accounts[6]).unwrap(); - let got_rent = Rent::get().unwrap(); + let got_rent = Rent::get()?; assert_eq!(rent, got_rent); } @@ -116,7 +115,7 @@ pub fn process_instruction( msg!("EpochRewards identifier:"); sysvar::epoch_rewards::id().log(); let epoch_rewards = EpochRewards::from_account_info(&accounts[10]).unwrap(); - let got_epoch_rewards = EpochRewards::get().unwrap(); + let got_epoch_rewards = EpochRewards::get()?; assert_eq!(epoch_rewards, got_epoch_rewards); } From 3739f5a8ced03c0183f2f91501df27579698838d Mon Sep 17 00:00:00 2001 From: Brooks Date: Sat, 3 Aug 2024 00:15:28 -0400 Subject: [PATCH 011/529] hash-cache-tool: Use ahash::HashMap (#2420) --- accounts-db/accounts-hash-cache-tool/src/main.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index ed0159c3d81b92..c84c68672e677b 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -1,4 +1,5 @@ use { + ahash::{HashMap, RandomState}, bytemuck::Zeroable as _, clap::{ crate_description, crate_name, value_t_or_exit, App, AppSettings, Arg, ArgMatches, @@ -11,7 +12,6 @@ use { }, std::{ cmp::Ordering, - collections::HashMap, fs::{self, File, Metadata}, io::{self, BufReader, Read}, mem::size_of, @@ -377,9 +377,9 @@ fn do_diff_dirs( } // if the binary data of the files are different, they are not equal - let ahash_random_state = ahash::RandomState::new(); - let hash1 = ahash_random_state.hash_one(mmap1.as_ref()); - let hash2 = ahash_random_state.hash_one(mmap2.as_ref()); + let hasher = RandomState::new(); + let hash1 = hasher.hash_one(mmap1.as_ref()); + let hash2 = hasher.hash_one(mmap2.as_ref()); if hash1 != hash2 { return false; } From 96deb8b5e4561fb480759eea389b61323dced1d3 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Sat, 3 Aug 2024 09:25:16 -0700 Subject: [PATCH 012/529] Add node_id_to_stake() to count total stake for each node pubkey. (#2424) * Add node_id_to_stake() to count total stake for each node pubkey. * Fix bad indent. * Fix test name. --- runtime/src/epoch_stakes.rs | 92 +++++++++++++++++----- runtime/src/stakes.rs | 15 ++++ wen-restart/src/heaviest_fork_aggregate.rs | 17 +--- 3 files changed, 93 insertions(+), 31 deletions(-) diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 015daabe7f86c3..4841b2713c34e7 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -57,6 +57,12 @@ impl EpochStakes { &self.node_id_to_vote_accounts } + pub fn node_id_to_stake(&self, node_id: &Pubkey) -> Option { + self.node_id_to_vote_accounts + .get(node_id) + .map(|x| x.total_stake) + } + pub fn epoch_authorized_voters(&self) -> &Arc { &self.epoch_authorized_voters } @@ -218,9 +224,10 @@ pub(crate) mod tests { use { super::*, crate::{stake_account::StakeAccount, stakes::StakesCache}, + im::HashMap as ImHashMap, solana_sdk::{account::AccountSharedData, rent::Rent}, solana_stake_program::stake_state::{self, Delegation}, - solana_vote::vote_account::VoteAccount, + solana_vote::vote_account::{VoteAccount, VoteAccounts}, solana_vote_program::vote_state::{self, create_account_with_authorized}, std::iter, }; @@ -231,12 +238,12 @@ pub(crate) mod tests { authorized_voter: Pubkey, } - #[test] - fn test_parse_epoch_vote_accounts() { - let stake_per_account = 100; - let num_vote_accounts_per_node = 2; + fn new_vote_accounts( + num_nodes: usize, + num_vote_accounts_per_node: usize, + ) -> HashMap> { // Create some vote accounts for each pubkey - let vote_accounts_map: HashMap> = (0..10) + (0..num_nodes) .map(|_| { let node_id = solana_sdk::pubkey::new_rand(); ( @@ -259,7 +266,32 @@ pub(crate) mod tests { .collect(), ) }) - .collect(); + .collect() + } + + fn new_epoch_vote_accounts( + vote_accounts_map: &HashMap>, + node_id_to_stake_fn: impl Fn(&Pubkey) -> u64, + ) -> VoteAccountsHashMap { + // Create and process the vote accounts + vote_accounts_map + .iter() + .flat_map(|(node_id, vote_accounts)| { + vote_accounts.iter().map(|v| { + let vote_account = VoteAccount::try_from(v.account.clone()).unwrap(); + (v.vote_account, (node_id_to_stake_fn(node_id), vote_account)) + }) + }) + .collect() + } + + #[test] + fn test_parse_epoch_vote_accounts() { + let stake_per_account = 100; + let num_vote_accounts_per_node = 2; + let num_nodes = 10; + + let vote_accounts_map = new_vote_accounts(num_nodes, num_vote_accounts_per_node); let expected_authorized_voters: HashMap<_, _> = vote_accounts_map .iter() @@ -286,16 +318,8 @@ pub(crate) mod tests { }) .collect(); - // Create and process the vote accounts - let epoch_vote_accounts: HashMap<_, _> = vote_accounts_map - .iter() - .flat_map(|(_, vote_accounts)| { - vote_accounts.iter().map(|v| { - let vote_account = VoteAccount::try_from(v.account.clone()).unwrap(); - (v.vote_account, (stake_per_account, vote_account)) - }) - }) - .collect(); + let epoch_vote_accounts = + new_epoch_vote_accounts(&vote_accounts_map, |_| stake_per_account); let (total_stake, mut node_id_to_vote_accounts, epoch_authorized_voters) = EpochStakes::parse_epoch_vote_accounts(&epoch_vote_accounts, 0); @@ -319,7 +343,7 @@ pub(crate) mod tests { ); assert_eq!( total_stake, - vote_accounts_map.len() as u64 * num_vote_accounts_per_node as u64 * 100 + num_nodes as u64 * num_vote_accounts_per_node as u64 * 100 ); } @@ -485,4 +509,36 @@ pub(crate) mod tests { assert!(versioned.contains_key(&epoch2)); assert!(versioned.contains_key(&epoch3)); } + + #[test] + fn test_node_id_to_stake() { + let num_nodes = 10; + let num_vote_accounts_per_node = 2; + + let vote_accounts_map = new_vote_accounts(num_nodes, num_vote_accounts_per_node); + let node_id_to_stake_map = vote_accounts_map + .keys() + .enumerate() + .map(|(index, node_id)| (*node_id, ((index + 1) * 100) as u64)) + .collect::>(); + let epoch_vote_accounts = new_epoch_vote_accounts(&vote_accounts_map, |node_id| { + *node_id_to_stake_map.get(node_id).unwrap() + }); + let epoch_stakes = EpochStakes::new( + Arc::new(StakesEnum::Accounts(Stakes::new_for_tests( + 0, + VoteAccounts::from(Arc::new(epoch_vote_accounts)), + ImHashMap::default(), + ))), + 0, + ); + + assert_eq!(epoch_stakes.total_stake(), 11000); + for (node_id, stake) in node_id_to_stake_map.iter() { + assert_eq!( + epoch_stakes.node_id_to_stake(node_id), + Some(*stake * num_vote_accounts_per_node as u64) + ); + } + } } diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 4f2dedf1facb07..0e4d7b6109ef41 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -316,6 +316,21 @@ impl Stakes { }) } + #[cfg(test)] + pub fn new_for_tests( + epoch: Epoch, + vote_accounts: VoteAccounts, + stake_delegations: ImHashMap, + ) -> Self { + Self { + vote_accounts, + stake_delegations, + unused: 0, + epoch, + stake_history: StakeHistory::default(), + } + } + pub(crate) fn history(&self) -> &StakeHistory { &self.stake_history } diff --git a/wen-restart/src/heaviest_fork_aggregate.rs b/wen-restart/src/heaviest_fork_aggregate.rs index 0b43b800d18573..dac13bd8274568 100644 --- a/wen-restart/src/heaviest_fork_aggregate.rs +++ b/wen-restart/src/heaviest_fork_aggregate.rs @@ -44,7 +44,7 @@ impl HeaviestForkAggregate { let mut block_stake_map = HashMap::new(); block_stake_map.insert( (my_heaviest_fork_slot, my_heaviest_fork_hash), - Self::validator_stake(epoch_stakes, my_pubkey), + epoch_stakes.node_id_to_stake(my_pubkey).unwrap_or(0), ); Self { supermajority_threshold: wait_for_supermajority_threshold_percent as f64 / 100.0, @@ -58,15 +58,6 @@ impl HeaviestForkAggregate { } } - // TODO(wen): this will a function in separate EpochStakesMap class later. - fn validator_stake(epoch_stakes: &EpochStakes, pubkey: &Pubkey) -> u64 { - epoch_stakes - .node_id_to_vote_accounts() - .get(pubkey) - .map(|x| x.total_stake) - .unwrap_or_default() - } - pub(crate) fn aggregate_from_record( &mut self, key_string: &str, @@ -110,7 +101,7 @@ impl HeaviestForkAggregate { ) -> Option { let total_stake = self.epoch_stakes.total_stake(); let from = &received_heaviest_fork.from; - let sender_stake = Self::validator_stake(&self.epoch_stakes, from); + let sender_stake = self.epoch_stakes.node_id_to_stake(from).unwrap_or(0); if sender_stake == 0 { warn!( "Gossip should not accept zero-stake RestartLastVotedFork from {:?}", @@ -183,7 +174,7 @@ impl HeaviestForkAggregate { // TODO(wen): use better epoch stake and add a test later. pub(crate) fn total_active_stake(&self) -> u64 { self.active_peers.iter().fold(0, |sum: u64, pubkey| { - sum.saturating_add(Self::validator_stake(&self.epoch_stakes, pubkey)) + sum.saturating_add(self.epoch_stakes.node_id_to_stake(pubkey).unwrap_or(0)) }) } @@ -191,7 +182,7 @@ impl HeaviestForkAggregate { self.active_peers_seen_supermajority .iter() .fold(0, |sum: u64, pubkey| { - sum.saturating_add(Self::validator_stake(&self.epoch_stakes, pubkey)) + sum.saturating_add(self.epoch_stakes.node_id_to_stake(pubkey).unwrap_or(0)) }) } From ccabfcf84921977202fd06d3197cbcea83742133 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Sun, 4 Aug 2024 23:34:51 +0800 Subject: [PATCH 013/529] refactor: bank transaction counts (#2382) --- core/src/banking_stage/committer.rs | 14 +- core/src/banking_stage/consume_worker.rs | 32 +- core/src/banking_stage/consumer.rs | 294 +++++++++--------- core/src/banking_stage/leader_slot_metrics.rs | 104 ++++--- core/src/banking_stage/qos_service.rs | 4 +- runtime/src/bank.rs | 55 ++-- 6 files changed, 254 insertions(+), 249 deletions(-) diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index 6e5f411dc0f4ae..d91900299107c8 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -74,10 +74,7 @@ impl Committer { bank: &Arc, pre_balance_info: &mut PreBalanceInfo, execute_and_commit_timings: &mut LeaderExecuteAndCommitTimings, - signature_count: u64, - executed_transactions_count: usize, - executed_non_vote_transactions_count: usize, - executed_with_successful_result_count: usize, + execution_counts: &ExecutedTransactionCounts, ) -> (u64, Vec) { let executed_transactions = execution_results .iter() @@ -90,14 +87,7 @@ impl Committer { execution_results, last_blockhash, lamports_per_signature, - ExecutedTransactionCounts { - executed_transactions_count: executed_transactions_count as u64, - executed_non_vote_transactions_count: executed_non_vote_transactions_count as u64, - executed_with_failure_result_count: executed_transactions_count - .saturating_sub(executed_with_successful_result_count) - as u64, - signature_count, - }, + execution_counts, &mut execute_and_commit_timings.execute_timings, )); execute_and_commit_timings.commit_us = commit_time_us; diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 57a4778d3204b3..449ea9ab963a39 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -224,9 +224,7 @@ impl ConsumeWorkerMetrics { fn update_on_execute_and_commit_transactions_output( &self, ExecuteAndCommitTransactionsOutput { - transactions_attempted_execution_count, - executed_transactions_count, - executed_with_successful_result_count, + transaction_counts, retryable_transaction_indexes, execute_and_commit_timings, error_counters, @@ -237,13 +235,19 @@ impl ConsumeWorkerMetrics { ) { self.count_metrics .transactions_attempted_execution_count - .fetch_add(*transactions_attempted_execution_count, Ordering::Relaxed); + .fetch_add( + transaction_counts.attempted_execution_count, + Ordering::Relaxed, + ); self.count_metrics .executed_transactions_count - .fetch_add(*executed_transactions_count, Ordering::Relaxed); + .fetch_add(transaction_counts.executed_count, Ordering::Relaxed); self.count_metrics .executed_with_successful_result_count - .fetch_add(*executed_with_successful_result_count, Ordering::Relaxed); + .fetch_add( + transaction_counts.executed_with_successful_result_count, + Ordering::Relaxed, + ); self.count_metrics .retryable_transaction_count .fetch_add(retryable_transaction_indexes.len(), Ordering::Relaxed); @@ -406,12 +410,12 @@ impl ConsumeWorkerMetrics { } struct ConsumeWorkerCountMetrics { - transactions_attempted_execution_count: AtomicUsize, - executed_transactions_count: AtomicUsize, - executed_with_successful_result_count: AtomicUsize, + transactions_attempted_execution_count: AtomicU64, + executed_transactions_count: AtomicU64, + executed_with_successful_result_count: AtomicU64, retryable_transaction_count: AtomicUsize, retryable_expired_bank_count: AtomicUsize, - cost_model_throttled_transactions_count: AtomicUsize, + cost_model_throttled_transactions_count: AtomicU64, min_prioritization_fees: AtomicU64, max_prioritization_fees: AtomicU64, } @@ -419,12 +423,12 @@ struct ConsumeWorkerCountMetrics { impl Default for ConsumeWorkerCountMetrics { fn default() -> Self { Self { - transactions_attempted_execution_count: AtomicUsize::default(), - executed_transactions_count: AtomicUsize::default(), - executed_with_successful_result_count: AtomicUsize::default(), + transactions_attempted_execution_count: AtomicU64::default(), + executed_transactions_count: AtomicU64::default(), + executed_with_successful_result_count: AtomicU64::default(), retryable_transaction_count: AtomicUsize::default(), retryable_expired_bank_count: AtomicUsize::default(), - cost_model_throttled_transactions_count: AtomicUsize::default(), + cost_model_throttled_transactions_count: AtomicU64::default(), min_prioritization_fees: AtomicU64::new(u64::MAX), max_prioritization_fees: AtomicU64::default(), } diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index f4e15edd0888f9..9965b1c3214c3d 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -2,7 +2,9 @@ use { super::{ committer::{CommitTransactionDetails, Committer, PreBalanceInfo}, immutable_deserialized_packet::ImmutableDeserializedPacket, - leader_slot_metrics::{LeaderSlotMetricsTracker, ProcessTransactionsSummary}, + leader_slot_metrics::{ + LeaderSlotMetricsTracker, ProcessTransactionsCounts, ProcessTransactionsSummary, + }, leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, qos_service::QosService, unprocessed_transaction_storage::{ConsumeScannerPayload, UnprocessedTransactionStorage}, @@ -46,21 +48,16 @@ pub const TARGET_NUM_TRANSACTIONS_PER_BATCH: usize = 64; pub struct ProcessTransactionBatchOutput { // The number of transactions filtered out by the cost model - pub(crate) cost_model_throttled_transactions_count: usize, + pub(crate) cost_model_throttled_transactions_count: u64, // Amount of time spent running the cost model pub(crate) cost_model_us: u64, pub execute_and_commit_transactions_output: ExecuteAndCommitTransactionsOutput, } pub struct ExecuteAndCommitTransactionsOutput { - // Total number of transactions that were passed as candidates for execution - pub(crate) transactions_attempted_execution_count: usize, - // The number of transactions of that were executed. See description of in `ProcessTransactionsSummary` - // for possible outcomes of execution. - pub(crate) executed_transactions_count: usize, - // Total number of the executed transactions that returned success/not - // an error. - pub(crate) executed_with_successful_result_count: usize, + // Transactions counts reported to `ConsumeWorkerMetrics` and then + // accumulated later for `LeaderSlotMetrics` + pub(crate) transaction_counts: ExecuteAndCommitTransactionsCounts, // Transactions that either were not executed, or were executed and failed to be committed due // to the block ending. pub(crate) retryable_transaction_indexes: Vec, @@ -73,6 +70,18 @@ pub struct ExecuteAndCommitTransactionsOutput { pub(crate) max_prioritization_fees: u64, } +#[derive(Debug, Default, PartialEq)] +pub struct ExecuteAndCommitTransactionsCounts { + // Total number of transactions that were passed as candidates for execution + pub(crate) attempted_execution_count: u64, + // The number of transactions of that were executed. See description of in `ProcessTransactionsSummary` + // for possible outcomes of execution. + pub(crate) executed_count: u64, + // Total number of the executed transactions that returned success/not + // an error. + pub(crate) executed_with_successful_result_count: u64, +} + pub struct Consumer { committer: Committer, transaction_recorder: TransactionRecorder, @@ -275,17 +284,8 @@ impl Consumer { ) -> ProcessTransactionsSummary { let mut chunk_start = 0; let mut all_retryable_tx_indexes = vec![]; - // All the transactions that attempted execution. See description of - // struct ProcessTransactionsSummary above for possible outcomes. - let mut total_transactions_attempted_execution_count: usize = 0; - // All transactions that were executed and committed - let mut total_committed_transactions_count: usize = 0; - // All transactions that were executed and committed with a successful result - let mut total_committed_transactions_with_successful_result_count: usize = 0; - // All transactions that were executed but then failed record because the - // slot ended - let mut total_failed_commit_count: usize = 0; - let mut total_cost_model_throttled_transactions_count: usize = 0; + let mut total_transaction_counts = ProcessTransactionsCounts::default(); + let mut total_cost_model_throttled_transactions_count: u64 = 0; let mut total_cost_model_us: u64 = 0; let mut total_execute_and_commit_timings = LeaderExecuteAndCommitTimings::default(); let mut total_error_counters = TransactionErrorMetrics::default(); @@ -315,9 +315,7 @@ impl Consumer { saturating_add_assign!(total_cost_model_us, new_cost_model_us); let ExecuteAndCommitTransactionsOutput { - transactions_attempted_execution_count: new_transactions_attempted_execution_count, - executed_transactions_count: new_executed_transactions_count, - executed_with_successful_result_count: new_executed_with_successful_result_count, + transaction_counts: new_transaction_counts, retryable_transaction_indexes: new_retryable_transaction_indexes, commit_transactions_result: new_commit_transactions_result, execute_and_commit_timings: new_execute_and_commit_timings, @@ -329,33 +327,16 @@ impl Consumer { total_execute_and_commit_timings.accumulate(&new_execute_and_commit_timings); total_error_counters.accumulate(&new_error_counters); - saturating_add_assign!( - total_transactions_attempted_execution_count, - new_transactions_attempted_execution_count + total_transaction_counts.accumulate( + &new_transaction_counts, + new_commit_transactions_result.is_ok(), ); + overall_min_prioritization_fees = std::cmp::min(overall_min_prioritization_fees, min_prioritization_fees); overall_max_prioritization_fees = std::cmp::min(overall_max_prioritization_fees, max_prioritization_fees); - trace!( - "process_transactions result: {:?}", - new_commit_transactions_result - ); - - if new_commit_transactions_result.is_ok() { - saturating_add_assign!( - total_committed_transactions_count, - new_executed_transactions_count - ); - saturating_add_assign!( - total_committed_transactions_with_successful_result_count, - new_executed_with_successful_result_count - ); - } else { - saturating_add_assign!(total_failed_commit_count, new_executed_transactions_count); - } - // Add the retryable txs (transactions that errored in a way that warrants a retry) // to the list of unprocessed txs. all_retryable_tx_indexes.extend_from_slice(&new_retryable_transaction_indexes); @@ -387,11 +368,7 @@ impl Consumer { ProcessTransactionsSummary { reached_max_poh_height, - transactions_attempted_execution_count: total_transactions_attempted_execution_count, - committed_transactions_count: total_committed_transactions_count, - committed_transactions_with_successful_result_count: - total_committed_transactions_with_successful_result_count, - failed_commit_count: total_failed_commit_count, + transaction_counts: total_transaction_counts, retryable_transaction_indexes: all_retryable_tx_indexes, cost_model_throttled_transactions_count: total_cost_model_throttled_transactions_count, cost_model_us: total_cost_model_us, @@ -646,13 +623,15 @@ impl Consumer { let LoadAndExecuteTransactionsOutput { execution_results, - executed_transactions_count, - executed_non_vote_transactions_count, - executed_with_successful_result_count, - signature_count, + execution_counts, } = load_and_execute_transactions_output; - let transactions_attempted_execution_count = execution_results.len(); + let transaction_counts = ExecuteAndCommitTransactionsCounts { + executed_count: execution_counts.executed_transactions_count, + executed_with_successful_result_count: execution_counts.executed_successfully_count, + attempted_execution_count: execution_results.len() as u64, + }; + let (executed_transactions, execution_results_to_transactions_us) = measure_us!(execution_results .iter() @@ -701,9 +680,7 @@ impl Consumer { )); return ExecuteAndCommitTransactionsOutput { - transactions_attempted_execution_count, - executed_transactions_count, - executed_with_successful_result_count, + transaction_counts, retryable_transaction_indexes, commit_transactions_result: Err(recorder_err), execute_and_commit_timings, @@ -713,27 +690,25 @@ impl Consumer { }; } - let (commit_time_us, commit_transaction_statuses) = if executed_transactions_count != 0 { - self.committer.commit_transactions( - batch, - execution_results, - last_blockhash, - lamports_per_signature, - starting_transaction_index, - bank, - &mut pre_balance_info, - &mut execute_and_commit_timings, - signature_count, - executed_transactions_count, - executed_non_vote_transactions_count, - executed_with_successful_result_count, - ) - } else { - ( - 0, - vec![CommitTransactionDetails::NotCommitted; execution_results.len()], - ) - }; + let (commit_time_us, commit_transaction_statuses) = + if execution_counts.executed_transactions_count != 0 { + self.committer.commit_transactions( + batch, + execution_results, + last_blockhash, + lamports_per_signature, + starting_transaction_index, + bank, + &mut pre_balance_info, + &mut execute_and_commit_timings, + &execution_counts, + ) + } else { + ( + 0, + vec![CommitTransactionDetails::NotCommitted; execution_results.len()], + ) + }; drop(freeze_lock); @@ -752,14 +727,12 @@ impl Consumer { ); debug_assert_eq!( - commit_transaction_statuses.len(), - transactions_attempted_execution_count + transaction_counts.attempted_execution_count, + commit_transaction_statuses.len() as u64, ); ExecuteAndCommitTransactionsOutput { - transactions_attempted_execution_count, - executed_transactions_count, - executed_with_successful_result_count, + transaction_counts, retryable_transaction_indexes, commit_transactions_result: Ok(commit_transaction_statuses), execute_and_commit_timings, @@ -1140,16 +1113,19 @@ mod tests { consumer.process_and_record_transactions(&bank, &transactions, 0); let ExecuteAndCommitTransactionsOutput { - transactions_attempted_execution_count, - executed_transactions_count, - executed_with_successful_result_count, + transaction_counts, commit_transactions_result, .. } = process_transactions_batch_output.execute_and_commit_transactions_output; - assert_eq!(transactions_attempted_execution_count, 1); - assert_eq!(executed_transactions_count, 1); - assert_eq!(executed_with_successful_result_count, 1); + assert_eq!( + transaction_counts, + ExecuteAndCommitTransactionsCounts { + attempted_execution_count: 1, + executed_count: 1, + executed_with_successful_result_count: 1, + } + ); assert!(commit_transactions_result.is_ok()); // Tick up to max tick height @@ -1185,17 +1161,20 @@ mod tests { consumer.process_and_record_transactions(&bank, &transactions, 0); let ExecuteAndCommitTransactionsOutput { - transactions_attempted_execution_count, - executed_transactions_count, - executed_with_successful_result_count, + transaction_counts, retryable_transaction_indexes, commit_transactions_result, .. } = process_transactions_batch_output.execute_and_commit_transactions_output; - assert_eq!(transactions_attempted_execution_count, 1); - // Transactions was still executed, just wasn't committed, so should be counted here. - assert_eq!(executed_transactions_count, 1); - assert_eq!(executed_with_successful_result_count, 1); + assert_eq!( + transaction_counts, + ExecuteAndCommitTransactionsCounts { + attempted_execution_count: 1, + // Transactions was still executed, just wasn't committed, so should be counted here. + executed_count: 1, + executed_with_successful_result_count: 1, + } + ); assert_eq!(retryable_transaction_indexes, vec![0]); assert_matches!( commit_transactions_result, @@ -1323,16 +1302,19 @@ mod tests { let process_transactions_batch_output = consumer.process_and_record_transactions(&bank, &transactions, 0); let ExecuteAndCommitTransactionsOutput { - transactions_attempted_execution_count, - executed_transactions_count, - executed_with_successful_result_count, + transaction_counts, commit_transactions_result, .. } = process_transactions_batch_output.execute_and_commit_transactions_output; - assert_eq!(transactions_attempted_execution_count, 1); - assert_eq!(executed_transactions_count, 1); - assert_eq!(executed_with_successful_result_count, 0); + assert_eq!( + transaction_counts, + ExecuteAndCommitTransactionsCounts { + attempted_execution_count: 1, + executed_count: 1, + executed_with_successful_result_count: 0, + } + ); assert!(commit_transactions_result.is_ok()); // Ensure that poh did the last tick after recording transactions @@ -1425,17 +1407,20 @@ mod tests { consumer.process_and_record_transactions(&bank, &transactions, 0); let ExecuteAndCommitTransactionsOutput { - transactions_attempted_execution_count, - executed_transactions_count, - executed_with_successful_result_count, + transaction_counts, commit_transactions_result, retryable_transaction_indexes, .. } = process_transactions_batch_output.execute_and_commit_transactions_output; - assert_eq!(transactions_attempted_execution_count, 1); - assert_eq!(executed_transactions_count, 0); - assert_eq!(executed_with_successful_result_count, 0); + assert_eq!( + transaction_counts, + ExecuteAndCommitTransactionsCounts { + attempted_execution_count: 1, + executed_count: 0, + executed_with_successful_result_count: 0, + } + ); assert!(retryable_transaction_indexes.is_empty()); assert_eq!( commit_transactions_result.ok(), @@ -1517,11 +1502,11 @@ mod tests { consumer.process_and_record_transactions(&bank, &transactions, 0); let ExecuteAndCommitTransactionsOutput { - executed_with_successful_result_count, + transaction_counts, commit_transactions_result, .. } = process_transactions_batch_output.execute_and_commit_transactions_output; - assert_eq!(executed_with_successful_result_count, 1); + assert_eq!(transaction_counts.executed_with_successful_result_count, 1); assert!(commit_transactions_result.is_ok()); let block_cost = get_block_cost(); @@ -1547,12 +1532,12 @@ mod tests { consumer.process_and_record_transactions(&bank, &transactions, 0); let ExecuteAndCommitTransactionsOutput { - executed_with_successful_result_count, + transaction_counts, commit_transactions_result, retryable_transaction_indexes, .. } = process_transactions_batch_output.execute_and_commit_transactions_output; - assert_eq!(executed_with_successful_result_count, 1); + assert_eq!(transaction_counts.executed_with_successful_result_count, 1); assert!(commit_transactions_result.is_ok()); // first one should have been committed, second one not committed due to AccountInUse error during @@ -1671,16 +1656,21 @@ mod tests { let _ = poh_simulator.join(); let ExecuteAndCommitTransactionsOutput { - transactions_attempted_execution_count, - executed_transactions_count, + transaction_counts, retryable_transaction_indexes, commit_transactions_result, .. } = process_transactions_batch_output.execute_and_commit_transactions_output; - assert_eq!(transactions_attempted_execution_count, 2); - assert_eq!(executed_transactions_count, 1); - assert_eq!(retryable_transaction_indexes, vec![1],); + assert_eq!( + transaction_counts, + ExecuteAndCommitTransactionsCounts { + attempted_execution_count: 2, + executed_count: 1, + executed_with_successful_result_count: 1, + } + ); + assert_eq!(retryable_transaction_indexes, vec![1]); assert!(commit_transactions_result.is_ok()); } Blockstore::destroy(ledger_path.path()).unwrap(); @@ -1723,28 +1713,30 @@ mod tests { genesis_config.hash(), )); - let transactions_count = transactions.len(); + let transactions_len = transactions.len(); let ProcessTransactionsSummary { reached_max_poh_height, - transactions_attempted_execution_count, - committed_transactions_count, - committed_transactions_with_successful_result_count, - failed_commit_count, + transaction_counts, retryable_transaction_indexes, .. } = execute_transactions_with_dummy_poh_service(bank, transactions); // All the transactions should have been replayed, but only 1 committed assert!(!reached_max_poh_height); - assert_eq!(transactions_attempted_execution_count, transactions_count); - // Both transactions should have been committed, even though one was an error, - // because InstructionErrors are committed - assert_eq!(committed_transactions_count, 2); - assert_eq!(committed_transactions_with_successful_result_count, 1); - assert_eq!(failed_commit_count, 0); + assert_eq!( + transaction_counts, + ProcessTransactionsCounts { + attempted_execution_count: transactions_len as u64, + // Both transactions should have been committed, even though one was an error, + // because InstructionErrors are committed + committed_transactions_count: 2, + committed_transactions_with_successful_result_count: 1, + executed_but_failed_commit: 0, + } + ); assert_eq!( retryable_transaction_indexes, - (1..transactions_count - 1).collect::>() + (1..transactions_len - 1).collect::>() ); } @@ -1782,28 +1774,30 @@ mod tests { genesis_config.hash(), )); - let transactions_count = transactions.len(); + let transactions_len = transactions.len(); let ProcessTransactionsSummary { reached_max_poh_height, - transactions_attempted_execution_count, - committed_transactions_count, - committed_transactions_with_successful_result_count, - failed_commit_count, + transaction_counts, retryable_transaction_indexes, .. } = execute_transactions_with_dummy_poh_service(bank, transactions); // All the transactions should have been replayed, but only 2 committed (first and last) assert!(!reached_max_poh_height); - assert_eq!(transactions_attempted_execution_count, transactions_count); - assert_eq!(committed_transactions_count, 2); - assert_eq!(committed_transactions_with_successful_result_count, 2); - assert_eq!(failed_commit_count, 0,); + assert_eq!( + transaction_counts, + ProcessTransactionsCounts { + attempted_execution_count: transactions_len as u64, + committed_transactions_count: 2, + committed_transactions_with_successful_result_count: 2, + executed_but_failed_commit: 0, + } + ); // Everything except first and last index of the transactions failed and are last retryable assert_eq!( retryable_transaction_indexes, - (1..transactions_count - 1).collect::>() + (1..transactions_len - 1).collect::>() ); } @@ -1861,19 +1855,21 @@ mod tests { let ProcessTransactionsSummary { reached_max_poh_height, - transactions_attempted_execution_count, - committed_transactions_count, - committed_transactions_with_successful_result_count, - failed_commit_count, + transaction_counts, mut retryable_transaction_indexes, .. } = process_transactions_summary; assert!(reached_max_poh_height); - assert_eq!(transactions_attempted_execution_count, 1); - assert_eq!(failed_commit_count, 1); - // MaxHeightReached error does not commit, should be zero here - assert_eq!(committed_transactions_count, 0); - assert_eq!(committed_transactions_with_successful_result_count, 0); + assert_eq!( + transaction_counts, + ProcessTransactionsCounts { + attempted_execution_count: 1, + // MaxHeightReached error does not commit, should be zero here + committed_transactions_count: 0, + committed_transactions_with_successful_result_count: 0, + executed_but_failed_commit: 1, + } + ); retryable_transaction_indexes.sort_unstable(); let expected: Vec = (0..transactions.len()).collect(); diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index 390e128b6c8428..4e290600a4de3c 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -1,5 +1,6 @@ use { super::{ + consumer::ExecuteAndCommitTransactionsCounts, leader_slot_timing_metrics::{LeaderExecuteAndCommitTimings, LeaderSlotTimingMetrics}, packet_deserializer::PacketReceiverStats, unprocessed_transaction_storage::{ @@ -19,47 +20,80 @@ use { /// counted in `Self::retryable_transaction_indexes`. /// 2) Did not execute due to some fatal error like too old, or duplicate signature. These /// will be dropped from the transactions queue and not counted in `Self::retryable_transaction_indexes` -/// 3) Were executed and committed, captured by `committed_transactions_count` below. -/// 4) Were executed and failed commit, captured by `failed_commit_count` below. +/// 3) Were executed and committed, captured by `transaction_counts` below. +/// 4) Were executed and failed commit, captured by `transaction_counts` below. pub(crate) struct ProcessTransactionsSummary { - // Returns true if we hit the end of the block/max PoH height for the block before - // processing all the transactions in the batch. + /// Returns true if we hit the end of the block/max PoH height for the block + /// before processing all the transactions in the batch. pub reached_max_poh_height: bool, - // Total number of transactions that were passed as candidates for execution. See description - // of struct above for possible outcomes for these transactions - pub transactions_attempted_execution_count: usize, + /// Total transaction counts tracked for reporting `LeaderSlotMetrics`. See + /// description of struct above for possible outcomes for these transactions + pub transaction_counts: ProcessTransactionsCounts, - // Total number of transactions that made it into the block - pub committed_transactions_count: usize, - - // Total number of transactions that made it into the block where the transactions - // output from execution was success/no error. - pub committed_transactions_with_successful_result_count: usize, - - // All transactions that were executed but then failed record because the - // slot ended - pub failed_commit_count: usize, - - // Indexes of transactions in the transactions slice that were not committed but are retryable + /// Indexes of transactions in the transactions slice that were not + /// committed but are retryable pub retryable_transaction_indexes: Vec, - // The number of transactions filtered out by the cost model - pub cost_model_throttled_transactions_count: usize, + /// The number of transactions filtered out by the cost model + pub cost_model_throttled_transactions_count: u64, - // Total amount of time spent running the cost model + /// Total amount of time spent running the cost model pub cost_model_us: u64, - // Breakdown of time spent executing and committing transactions + /// Breakdown of time spent executing and committing transactions pub execute_and_commit_timings: LeaderExecuteAndCommitTimings, - // Breakdown of all the transaction errors from transactions passed for execution + /// Breakdown of all the transaction errors from transactions passed for + /// execution pub error_counters: TransactionErrorMetrics, pub min_prioritization_fees: u64, pub max_prioritization_fees: u64, } +#[derive(Debug, Default, PartialEq)] +pub struct ProcessTransactionsCounts { + /// Total number of transactions that were passed as candidates for execution + pub attempted_execution_count: u64, + /// Total number of transactions that made it into the block + pub committed_transactions_count: u64, + /// Total number of transactions that made it into the block where the + /// transactions output from execution was success/no error. + pub committed_transactions_with_successful_result_count: u64, + /// All transactions that were executed but then failed record because the + /// slot ended + pub executed_but_failed_commit: u64, +} + +impl ProcessTransactionsCounts { + pub fn accumulate( + &mut self, + transaction_counts: &ExecuteAndCommitTransactionsCounts, + committed: bool, + ) { + saturating_add_assign!( + self.attempted_execution_count, + transaction_counts.attempted_execution_count + ); + if committed { + saturating_add_assign!( + self.committed_transactions_count, + transaction_counts.executed_count + ); + saturating_add_assign!( + self.committed_transactions_with_successful_result_count, + transaction_counts.executed_with_successful_result_count + ); + } else { + saturating_add_assign!( + self.executed_but_failed_commit, + transaction_counts.executed_count + ); + } + } +} + // Metrics describing prioritization fee information for each transaction storage before processing transactions #[derive(Debug, Default)] struct LeaderPrioritizationFeesMetrics { @@ -559,10 +593,7 @@ impl LeaderSlotMetricsTracker { ) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { let ProcessTransactionsSummary { - transactions_attempted_execution_count, - committed_transactions_count, - committed_transactions_with_successful_result_count, - failed_commit_count, + transaction_counts, ref retryable_transaction_indexes, cost_model_throttled_transactions_count, cost_model_us, @@ -577,28 +608,28 @@ impl LeaderSlotMetricsTracker { leader_slot_metrics .packet_count_metrics .transactions_attempted_execution_count, - *transactions_attempted_execution_count as u64 + transaction_counts.attempted_execution_count ); saturating_add_assign!( leader_slot_metrics .packet_count_metrics .committed_transactions_count, - *committed_transactions_count as u64 + transaction_counts.committed_transactions_count ); saturating_add_assign!( leader_slot_metrics .packet_count_metrics .committed_transactions_with_successful_result_count, - *committed_transactions_with_successful_result_count as u64 + transaction_counts.committed_transactions_with_successful_result_count ); saturating_add_assign!( leader_slot_metrics .packet_count_metrics .executed_transactions_failed_commit_count, - *failed_commit_count as u64 + transaction_counts.executed_but_failed_commit ); saturating_add_assign!( @@ -612,9 +643,10 @@ impl LeaderSlotMetricsTracker { leader_slot_metrics .packet_count_metrics .nonretryable_errored_transactions_count, - transactions_attempted_execution_count - .saturating_sub(*committed_transactions_count) - .saturating_sub(retryable_transaction_indexes.len()) as u64 + transaction_counts + .attempted_execution_count + .saturating_sub(transaction_counts.committed_transactions_count) + .saturating_sub(retryable_transaction_indexes.len() as u64) ); saturating_add_assign!( @@ -635,7 +667,7 @@ impl LeaderSlotMetricsTracker { leader_slot_metrics .packet_count_metrics .cost_model_throttled_transactions_count, - *cost_model_throttled_transactions_count as u64 + *cost_model_throttled_transactions_count ); saturating_add_assign!( diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index eafc3052aa26dd..afa871277cce42 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -44,7 +44,7 @@ impl QosService { bank: &Bank, transactions: &[SanitizedTransaction], pre_results: impl Iterator>, - ) -> (Vec>, usize) { + ) -> (Vec>, u64) { let transaction_costs = self.compute_transaction_costs(&bank.feature_set, transactions.iter(), pre_results); let (transactions_qos_cost_results, num_included) = self.select_transactions_per_cost( @@ -56,7 +56,7 @@ impl QosService { transactions_qos_cost_results.iter(), )); let cost_model_throttled_transactions_count = - transactions.len().saturating_sub(num_included); + transactions.len().saturating_sub(num_included) as u64; ( transactions_qos_cost_results, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 2eeb6f924b13a3..cebf31f2d7f945 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -321,14 +321,9 @@ pub struct LoadAndExecuteTransactionsOutput { // Vector of results indicating whether a transaction was executed or could not // be executed. Note executed transactions can still have failed! pub execution_results: Vec, - // Total number of transactions that were executed - pub executed_transactions_count: usize, - // Number of non-vote transactions that were executed - pub executed_non_vote_transactions_count: usize, - // Total number of the executed transactions that returned success/not - // an error. - pub executed_with_successful_result_count: usize, - pub signature_count: u64, + // Executed transaction counts used to update bank transaction counts and + // for metrics reporting. + pub execution_counts: ExecutedTransactionCounts, } pub struct TransactionSimulationResult { @@ -890,10 +885,11 @@ struct PrevEpochInflationRewards { foundation_rate: f64, } +#[derive(Debug, Default, PartialEq)] pub struct ExecutedTransactionCounts { pub executed_transactions_count: u64, + pub executed_successfully_count: u64, pub executed_non_vote_transactions_count: u64, - pub executed_with_failure_result_count: u64, pub signature_count: u64, } @@ -3635,10 +3631,7 @@ impl Bank { measure_us!(self.collect_logs(sanitized_txs, &sanitized_output.execution_results)); timings.saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_us); - let mut signature_count = 0; - let mut executed_transactions_count: usize = 0; - let mut executed_non_vote_transactions_count: usize = 0; - let mut executed_with_successful_result_count: usize = 0; + let mut execution_counts = ExecutedTransactionCounts::default(); let err_count = &mut error_counters.total; for (execution_result, tx) in sanitized_output.execution_results.iter().zip(sanitized_txs) { @@ -3656,17 +3649,18 @@ impl Bank { // Signature count must be accumulated only if the transaction // is executed, otherwise a mismatched count between banking and // replay could occur - signature_count += u64::from(tx.message().header().num_required_signatures); - executed_transactions_count += 1; + execution_counts.signature_count += + u64::from(tx.message().header().num_required_signatures); + execution_counts.executed_transactions_count += 1; if !tx.is_simple_vote_transaction() { - executed_non_vote_transactions_count += 1; + execution_counts.executed_non_vote_transactions_count += 1; } } match execution_result.flattened_result() { Ok(()) => { - executed_with_successful_result_count += 1; + execution_counts.executed_successfully_count += 1; } Err(err) => { if *err_count == 0 { @@ -3679,10 +3673,7 @@ impl Bank { LoadAndExecuteTransactionsOutput { execution_results: sanitized_output.execution_results, - executed_transactions_count, - executed_non_vote_transactions_count, - executed_with_successful_result_count, - signature_count, + execution_counts, } } @@ -3888,7 +3879,7 @@ impl Bank { mut execution_results: Vec, last_blockhash: Hash, lamports_per_signature: u64, - counts: ExecutedTransactionCounts, + execution_counts: &ExecutedTransactionCounts, timings: &mut ExecuteTimings, ) -> Vec { assert!( @@ -3899,9 +3890,9 @@ impl Bank { let ExecutedTransactionCounts { executed_transactions_count, executed_non_vote_transactions_count, - executed_with_failure_result_count, + executed_successfully_count, signature_count, - } = counts; + } = *execution_counts; self.increment_transaction_count(executed_transactions_count); self.increment_non_vote_transaction_count_since_restart( @@ -3909,6 +3900,8 @@ impl Bank { ); self.increment_signature_count(signature_count); + let executed_with_failure_result_count = + executed_transactions_count.saturating_sub(executed_successfully_count); if executed_with_failure_result_count > 0 { self.transaction_error_count .fetch_add(executed_with_failure_result_count, Relaxed); @@ -4702,10 +4695,7 @@ impl Bank { let LoadAndExecuteTransactionsOutput { execution_results, - executed_transactions_count, - executed_non_vote_transactions_count, - executed_with_successful_result_count, - signature_count, + execution_counts, } = self.load_and_execute_transactions( batch, max_age, @@ -4729,14 +4719,7 @@ impl Bank { execution_results, last_blockhash, lamports_per_signature, - ExecutedTransactionCounts { - executed_transactions_count: executed_transactions_count as u64, - executed_non_vote_transactions_count: executed_non_vote_transactions_count as u64, - executed_with_failure_result_count: executed_transactions_count - .saturating_sub(executed_with_successful_result_count) - as u64, - signature_count, - }, + &execution_counts, timings, ); let post_balances = if collect_balances { From ecc05c50b81006e2cd43146eb80148dec54f65ce Mon Sep 17 00:00:00 2001 From: steviez Date: Sun, 4 Aug 2024 16:46:40 -0500 Subject: [PATCH 014/529] Cleanup PoH speed check error (#2400) Cleanup PoH speed check logs and error The current logging and error message from the Poh speed check are confusing. If the node fails, the error message states that the node is too slow. But, the reported numbers are slot durations in nanoseconds where a slower node will have a larger number. Lastly, the reported numbers aren't labeled with a unit so it is hard to make sense of this without looking at the actual code. The check now computes and reports hashes per second. --- core/src/validator.rs | 44 +++++++++++++++++++++++-------------------- entry/src/poh.rs | 6 +++--- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 97acfb70533f8f..4052a48e144a6f 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -39,7 +39,7 @@ use { utils::{move_and_async_delete_path, move_and_async_delete_path_contents}, }, solana_client::connection_cache::{ConnectionCache, Protocol}, - solana_entry::poh::compute_hash_time_ns, + solana_entry::poh::compute_hash_time, solana_geyser_plugin_manager::{ geyser_plugin_service::GeyserPluginService, GeyserPluginManagerRequest, }, @@ -1680,29 +1680,28 @@ fn active_vote_account_exists_in_bank(bank: &Bank, vote_account: &Pubkey) -> boo fn check_poh_speed( genesis_config: &GenesisConfig, maybe_hash_samples: Option, -) -> Result<(), String> { +) -> Result<(), ValidatorError> { if let Some(hashes_per_tick) = genesis_config.hashes_per_tick() { let ticks_per_slot = genesis_config.ticks_per_slot(); let hashes_per_slot = hashes_per_tick * ticks_per_slot; - let hash_samples = maybe_hash_samples.unwrap_or(hashes_per_slot); - let hash_time_ns = compute_hash_time_ns(hash_samples); - - let my_ns_per_slot = (hash_time_ns * hashes_per_slot) / hash_samples; - debug!("computed: ns_per_slot: {}", my_ns_per_slot); - let target_ns_per_slot = genesis_config.ns_per_slot() as u64; - debug!( - "cluster ns_per_hash: {}ns ns_per_slot: {}", - target_ns_per_slot / hashes_per_slot, - target_ns_per_slot + + let hash_time = compute_hash_time(hash_samples); + let my_hashes_per_second = (hash_samples as f64 / hash_time.as_secs_f64()) as u64; + let target_slot_duration = Duration::from_nanos(genesis_config.ns_per_slot() as u64); + let target_hashes_per_second = + (hashes_per_slot as f64 / target_slot_duration.as_secs_f64()) as u64; + + info!( + "PoH speed check: \ + computed hashes per second {my_hashes_per_second}, \ + target hashes per second {target_hashes_per_second}" ); - if my_ns_per_slot < target_ns_per_slot { - let extra_ns = target_ns_per_slot - my_ns_per_slot; - info!("PoH speed check: Will sleep {}ns per slot.", extra_ns); - } else { - return Err(format!( - "PoH is slower than cluster target tick rate! mine: {my_ns_per_slot} cluster: {target_ns_per_slot}.", - )); + if my_hashes_per_second < target_hashes_per_second { + return Err(ValidatorError::PohTooSlow { + mine: my_hashes_per_second, + target: target_hashes_per_second, + }); } } Ok(()) @@ -1861,7 +1860,7 @@ fn load_blockstore( } if !config.no_poh_speed_test { - check_poh_speed(&genesis_config, None)?; + check_poh_speed(&genesis_config, None).map_err(|err| format!("{err}"))?; } let mut blockstore = @@ -2345,6 +2344,11 @@ pub enum ValidatorError { #[error("{0}")] Other(String), + #[error( + "PoH hashes/second rate is slower than the cluster target: mine {mine}, cluster {target}" + )] + PohTooSlow { mine: u64, target: u64 }, + #[error(transparent)] TraceError(#[from] TraceError), diff --git a/entry/src/poh.rs b/entry/src/poh.rs index b54c8a745ae4fd..9bf6be66a0594f 100644 --- a/entry/src/poh.rs +++ b/entry/src/poh.rs @@ -109,18 +109,18 @@ impl Poh { } } -pub fn compute_hash_time_ns(hashes_sample_size: u64) -> u64 { +pub fn compute_hash_time(hashes_sample_size: u64) -> Duration { info!("Running {} hashes...", hashes_sample_size); let mut v = Hash::default(); let start = Instant::now(); for _ in 0..hashes_sample_size { v = hash(v.as_ref()); } - start.elapsed().as_nanos() as u64 + start.elapsed() } pub fn compute_hashes_per_tick(duration: Duration, hashes_sample_size: u64) -> u64 { - let elapsed_ms = compute_hash_time_ns(hashes_sample_size) / (1000 * 1000); + let elapsed_ms = compute_hash_time(hashes_sample_size).as_millis() as u64; duration.as_millis() as u64 * hashes_sample_size / elapsed_ms } From 2316fea4c0852e59c071f72d72db020017ffd7d0 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 5 Aug 2024 15:54:00 +0800 Subject: [PATCH 015/529] ci: move benchmark to Github Action (#2251) * ci: remove deprecated benchmark scripts * ci: add upload benchmark script * ci: add benchmark github actions script * ci: slient curl when uploading benchmark datapoint * ci: add name as a tag * empty commit 1 * empty commit 2 * empty commit 3 * empty commit 4 * empty commit 5 * empty commit 6 * empty commit 7 * empty commit 8 * empty commit 9 * empty commit 10 * unify benchmark result format --- .buildkite/scripts/build-bench.sh | 26 ------- .buildkite/scripts/build-bench.test.sh | 35 --------- .github/workflows/benchmark.yml | 98 ++++++++++++++++++++++++++ ci/buildkite-pipeline.sh | 17 ----- ci/buildkite-solana-private.sh | 15 ---- ci/upload-benchmark.sh | 88 +++++++++++++++++++++++ 6 files changed, 186 insertions(+), 93 deletions(-) delete mode 100755 .buildkite/scripts/build-bench.sh delete mode 100755 .buildkite/scripts/build-bench.test.sh create mode 100644 .github/workflows/benchmark.yml create mode 100755 ci/upload-benchmark.sh diff --git a/.buildkite/scripts/build-bench.sh b/.buildkite/scripts/build-bench.sh deleted file mode 100755 index 27f156c141fe03..00000000000000 --- a/.buildkite/scripts/build-bench.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -set -e -here=$(dirname "$0") - -# shellcheck source=.buildkite/scripts/common.sh -source "$here"/common.sh - -agent="${1-solana}" - -build_steps() { - cat <` to ensure the datapoints are correct + # it should looks similar to this: + # + # ``` + # datapoint: ,commit=xxxx,test_suite=xxxx,name=bench_accounts_delta_hash median=48035858,deviation=2118806i + # ``` + # + # you only need to check `name`, `median` and `deviation` + # + test: + - { + name: "solana-sdk", + commands: ["cargo +$rust_nightly bench -p solana-sdk"], + } + - { + name: "solana-runtime", + commands: ["cargo +$rust_nightly bench -p solana-runtime"], + } + - { + name: "solana-gossip", + commands: ["cargo +$rust_nightly bench -p solana-gossip"], + } + - { + name: "solana-poh", + commands: ["cargo +$rust_nightly bench -p solana-poh"], + } + - { + name: "solana-core", + commands: ["cargo +$rust_nightly bench -p solana-core"], + } + - { + name: "sbf", + before_command: "make -C programs/sbf all", + commands: + [ + "cargo +$rust_nightly bench --manifest-path programs/sbf/Cargo.toml --features=sbf_c", + ], + } + # spliting solana-accounts-db because it includes criterion bench + - { + name: "solana-accounts-db", + commands: + [ + "cargo +$rust_nightly bench -p solana-accounts-db --bench accounts_index", + "cargo +$rust_nightly bench -p solana-accounts-db --bench accounts", + "cargo +$rust_nightly bench -p solana-accounts-db --bench append_vec", + "cargo +$rust_nightly bench -p solana-accounts-db --bench bench_accounts_file -- --output-format bencher", + "cargo +$rust_nightly bench -p solana-accounts-db --bench bench_hashing -- --output-format bencher", + "cargo +$rust_nightly bench -p solana-accounts-db --bench bench_serde -- --output-format bencher", + ], + } + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Before Command + if: ${{ matrix.test.before_command != '' }} + run: | + ${{ matrix.test.before_command }} + + - name: Command + run: | + source ci/rust-version.sh nightly + echo '${{ toJson(matrix.test.commands) }}' | jq -r '.[]' | while read command; do + eval $command | tee -a benchmark + done + + - name: Upload Result + run: | + TEST_SUITE="${{ matrix.test.name }}" \ + COMMIT_HASH="$(git rev-parse HEAD)" \ + INFLUX_HOST="${{ secrets.BENCHMARK_INFLUX_HOST }}" \ + INFLUX_DB="${{ secrets.BENCHMARK_INFLUX_DB }}" \ + INFLUX_USER="${{ secrets.BENCHMARK_INFLUX_USER }}" \ + INFLUX_PASSWORD="${{ secrets.BENCHMARK_INFLUX_PASSWORD }}" \ + INFLUX_MEASUREMENT="${{ secrets.BENCHMARK_INFLUX_MEASUREMENT }}" \ + ./ci/upload-benchmark.sh benchmark diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 4fc27feeac69e1..0c8327ee5961b3 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -283,23 +283,6 @@ EOF "wasm skipped as no relevant files were modified" fi - # Benches... - if affects \ - .rs$ \ - Cargo.lock$ \ - Cargo.toml$ \ - ^ci/rust-version.sh \ - ^ci/test-coverage.sh \ - ^ci/test-bench.sh \ - ^ci/bench \ - .buildkite/scripts/build-bench.sh \ - ; then - .buildkite/scripts/build-bench.sh >> "$output_file" - else - annotate --style info --context test-bench \ - "Bench skipped as no .rs files were modified" - fi - # Coverage... if affects \ .rs$ \ diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index d514ac0ad25c65..c412c93257cf1e 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -214,21 +214,6 @@ EOF "wasm skipped as no relevant files were modified" fi - # Benches... - if affects \ - .rs$ \ - Cargo.lock$ \ - Cargo.toml$ \ - ^ci/rust-version.sh \ - ^ci/test-coverage.sh \ - ^ci/test-bench.sh \ - ; then - .buildkite/scripts/build-bench.sh sol-private >> "$output_file" - else - annotate --style info --context test-bench \ - "Bench skipped as no .rs files were modified" - fi - # Coverage... if affects \ .rs$ \ diff --git a/ci/upload-benchmark.sh b/ci/upload-benchmark.sh new file mode 100755 index 00000000000000..c775cc38b1189b --- /dev/null +++ b/ci/upload-benchmark.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +set -e + +usage() { + cat <&2 +USAGE: + $0 + +REQUIRED ENVIRONMENTS: + INFLUX_HOST Hostname or IP address of the InfluxDB server + INFLUX_DB Name of the InfluxDB database + INFLUX_USER Username for InfluxDB + INFLUX_PASSWORD Password for InfluxDB + INFLUX_MEASUREMENT Measurement for InfluxDB + +OPTIONAL ENVIRONMENTS: + COMMIT_HASH Commit hash of the benchmark file + TEST_SUITE The group name for all tests in the benchmark file + DRY_RUN Dry run + +ARGS: + The output file generated by running + \`cargo bench -- -Z unstable-options --format=json\` + contains the benchmark results in JSON format +EOF +} + +print_error_and_exit() { + local msg="$1" + echo "error: $msg" >&2 + echo "" + usage + exit 1 +} + +check_env() { + local var_name="$1" + if [ -z "${!var_name}" ]; then + print_error_and_exit "Environment variable $var_name is required" + fi +} + +filepath="$1" +if [ ! -f "$filepath" ]; then + print_error_and_exit "invalid " +fi + +if [ -z "$COMMIT_HASH" ]; then + COMMIT_HASH=$(uuidgen) +fi + +if [ -z "$TEST_SUITE" ]; then + TEST_SUITE="$(basename "${BENCHMARK_FILEPATH}")-$(date +%s)" +fi + +if [ -z "$DRY_RUN" ]; then + required_env_vars=( + "INFLUX_HOST" + "INFLUX_DB" + "INFLUX_USER" + "INFLUX_PASSWORD" + "INFLUX_MEASUREMENT" + ) + for var in "${required_env_vars[@]}"; do + check_env "$var" + done +fi + +while IFS= read -r line; do + + if [[ $line =~ ^test\ (.*)\ \.\.\.\ bench:\ *([0-9,]+)\ ns\/iter\ \(\+\/-\ *([0-9,]+)\) ]]; then + test_name="${BASH_REMATCH[1]}" + ns_iter="${BASH_REMATCH[2]}" + plus_minus="${BASH_REMATCH[3]}" + + ns_iter=$(echo "$ns_iter" | tr -d ',') + plus_minus=$(echo "$plus_minus" | tr -d ',') + + datapoint="${INFLUX_MEASUREMENT},commit=${COMMIT_HASH},test_suite=${TEST_SUITE},name=${test_name} median=${ns_iter}i,deviation=${plus_minus}i" + echo "datapoint: $datapoint" + + if [[ -z "$DRY_RUN" ]]; then + curl -s -X POST "${INFLUX_HOST}/write?db=${INFLUX_DB}" --data-binary "$datapoint" + fi + fi + +done <"$filepath" From 4ee8920bb8c31ea608ca24819daf5ef94185b3c1 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 5 Aug 2024 08:29:35 -0400 Subject: [PATCH 016/529] hash-cache-tool: Adds fn to extract latest entries (#2434) --- Cargo.lock | 1 + .../accounts-hash-cache-tool/Cargo.toml | 1 + .../accounts-hash-cache-tool/src/main.rs | 67 +++++++++---------- 3 files changed, 33 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d9175d0d9c0682..e8330c34483243 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -72,6 +72,7 @@ dependencies = [ "clap 2.33.3", "memmap2", "solana-accounts-db", + "solana-program", "solana-version", ] diff --git a/accounts-db/accounts-hash-cache-tool/Cargo.toml b/accounts-db/accounts-hash-cache-tool/Cargo.toml index 38681cd463c612..dc41b8212c97b2 100644 --- a/accounts-db/accounts-hash-cache-tool/Cargo.toml +++ b/accounts-db/accounts-hash-cache-tool/Cargo.toml @@ -15,6 +15,7 @@ bytemuck = { workspace = true } clap = { workspace = true } memmap2 = { workspace = true } solana-accounts-db = { workspace = true } +solana-program = { workspace = true } solana-version = { workspace = true } [features] diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index c84c68672e677b..6601ac79e34008 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -7,9 +7,10 @@ use { }, memmap2::Mmap, solana_accounts_db::{ - parse_cache_hash_data_filename, CacheHashDataFileEntry, CacheHashDataFileHeader, - ParsedCacheHashDataFilename, + accounts_hash::AccountHash, parse_cache_hash_data_filename, CacheHashDataFileEntry, + CacheHashDataFileHeader, ParsedCacheHashDataFilename, }, + solana_program::pubkey::Pubkey, std::{ cmp::Ordering, fs::{self, File, Metadata}, @@ -57,6 +58,7 @@ fn main() { ) .subcommand( SubCommand::with_name(CMD_DIFF) + .about("Compares cache files") .subcommand( SubCommand::with_name(CMD_DIFF_FILES) .about("Diff two accounts hash cache files") @@ -179,40 +181,9 @@ fn do_inspect(file: impl AsRef, force: bool) -> Result<(), String> { } fn do_diff_files(file1: impl AsRef, file2: impl AsRef) -> Result<(), String> { - let force = false; // skipping sanity checks is not supported when diffing - let (mut reader1, header1) = open_file(&file1, force).map_err(|err| { - format!( - "failed to open accounts hash cache file 1 '{}': {err}", - file1.as_ref().display(), - ) - })?; - let (mut reader2, header2) = open_file(&file2, force).map_err(|err| { - format!( - "failed to open accounts hash cache file 2 '{}': {err}", - file2.as_ref().display(), - ) - })?; - // Note: Purposely open both files before reading either one. This way, if there's an error - // opening file 2, we can bail early without having to wait for file 1 to be read completely. - - // extract the entries from both files - let do_extract = |reader: &mut BufReader<_>, header: &CacheHashDataFileHeader| { - let mut entries = Vec::new(); - scan_file(reader, header.count, |entry| { - entries.push(entry); - })?; - - // entries in the file are sorted by pubkey then slot, - // so we want to keep the *last* entry (if there are duplicates) - let entries: HashMap<_, _> = entries - .into_iter() - .map(|entry| (entry.pubkey, (entry.hash, entry.lamports))) - .collect(); - Ok::<_, String>(entries) - }; - let entries1 = do_extract(&mut reader1, &header1) + let entries1 = extract_latest_entries_in(&file1) .map_err(|err| format!("failed to extract entries from file 1: {err}"))?; - let entries2 = do_extract(&mut reader2, &header2) + let entries2 = extract_latest_entries_in(&file2) .map_err(|err| format!("failed to extract entries from file 2: {err}"))?; // compute the differences between the files @@ -494,7 +465,31 @@ fn get_cache_files_in(dir: impl AsRef) -> Result, io::E Ok(cache_files) } -/// Scan file with `reader` and apply `user_fn` to each entry +/// Returns the entries in `file` +/// +/// If there are multiple entries for a pubkey, only the latest is returned. +fn extract_latest_entries_in( + file: impl AsRef, +) -> Result, String> { + let force = false; // skipping sanity checks is not supported when extracting entries + let (reader, header) = open_file(&file, force).map_err(|err| { + format!( + "failed to open accounts hash cache file '{}': {err}", + file.as_ref().display(), + ) + })?; + + // entries in the file are sorted by pubkey then slot, + // so we want to keep the *last* entry (if there are duplicates) + let mut entries = HashMap::default(); + scan_file(reader, header.count, |entry| { + entries.insert(entry.pubkey, (entry.hash, entry.lamports)); + })?; + + Ok(entries) +} + +/// Scans file with `reader` and applies `user_fn` to each entry /// /// NOTE: `reader`'s cursor must already be at the first entry; i.e. *past* the header. fn scan_file( From c171eed7bb1fcd8d7611cc814cb173088d741ed8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 22:22:57 +0800 Subject: [PATCH 017/529] build(deps): bump regex from 1.10.5 to 1.10.6 (#2437) * build(deps): bump regex from 1.10.5 to 1.10.6 Bumps [regex](https://github.com/rust-lang/regex) from 1.10.5 to 1.10.6. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.10.5...1.10.6) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e8330c34483243..5a6faedbb5deed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4615,9 +4615,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick 1.0.1", "memchr", diff --git a/Cargo.toml b/Cargo.toml index de49b9b8ac1016..695ae501a18688 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -308,7 +308,7 @@ rand = "0.8.5" rand_chacha = "0.3.1" rayon = "1.10.0" reed-solomon-erasure = "6.0.0" -regex = "1.10.5" +regex = "1.10.6" reqwest = { version = "0.11.27", default-features = false } reqwest-middleware = "0.2.5" rolling-file = "0.2.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 16a475c9d77ac0..636594d23b6e96 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3834,9 +3834,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick 1.0.1", "memchr", From 3fa4696f39c8fb072e9128fe37914d5e190b565e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 22:23:09 +0800 Subject: [PATCH 018/529] build(deps): bump hidapi from 2.6.1 to 2.6.2 (#2438) Bumps [hidapi](https://github.com/ruabmbua/hidapi-rs) from 2.6.1 to 2.6.2. - [Release notes](https://github.com/ruabmbua/hidapi-rs/releases) - [Commits](https://github.com/ruabmbua/hidapi-rs/compare/v2.6.1...v2.6.2) --- updated-dependencies: - dependency-name: hidapi dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a6faedbb5deed..7adc005c653f9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2656,9 +2656,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hidapi" -version = "2.6.1" +version = "2.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e58251020fe88fe0dae5ebcc1be92b4995214af84725b375d08354d0311c23c" +checksum = "7ad5e383c2cf354bf4b54b7adf1dc781942fa9ec2ebc0714ad32524de968edce" dependencies = [ "cc", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index 695ae501a18688..a74d306311a334 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -237,7 +237,7 @@ gethostname = "0.2.3" getrandom = "0.2.10" goauth = "0.13.1" hex = "0.4.3" -hidapi = { version = "2.6.1", default-features = false } +hidapi = { version = "2.6.2", default-features = false } histogram = "0.6.9" hmac = "0.12.1" http = "0.2.12" From 51208f8eebf1d5c3eba6d2d63de1b6673cd3d173 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 22:23:18 +0800 Subject: [PATCH 019/529] build(deps): bump flate2 from 1.0.30 to 1.0.31 (#2439) * build(deps): bump flate2 from 1.0.30 to 1.0.31 Bumps [flate2](https://github.com/rust-lang/flate2-rs) from 1.0.30 to 1.0.31. - [Release notes](https://github.com/rust-lang/flate2-rs/releases) - [Commits](https://github.com/rust-lang/flate2-rs/commits) --- updated-dependencies: - dependency-name: flate2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7adc005c653f9e..4b8c897130d45b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2219,9 +2219,9 @@ checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "7f211bbe8e69bbd0cfdea405084f128ae8b4aaa6b0b522fc8f2b009084797920" dependencies = [ "crc32fast", "miniz_oxide", diff --git a/Cargo.toml b/Cargo.toml index a74d306311a334..45f6625f01ec6d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -226,7 +226,7 @@ env_logger = "0.9.3" etcd-client = "0.11.1" fast-math = "0.1" fd-lock = "3.0.13" -flate2 = "1.0.30" +flate2 = "1.0.31" fnv = "1.0.7" fs_extra = "1.3.0" futures = "0.3.30" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 636594d23b6e96..ca380ee3a93749 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1657,9 +1657,9 @@ checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "7f211bbe8e69bbd0cfdea405084f128ae8b4aaa6b0b522fc8f2b009084797920" dependencies = [ "crc32fast", "miniz_oxide", From d0027ddb6472e97b237e8002ef9f4b933ec67d2b Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 5 Aug 2024 10:14:55 -0500 Subject: [PATCH 020/529] Separate AccountLocks + Refactor (#2390) --- accounts-db/src/account_locks.rs | 155 ++++++++++++++++++++++++ accounts-db/src/accounts.rs | 195 +++++-------------------------- accounts-db/src/lib.rs | 1 + 3 files changed, 185 insertions(+), 166 deletions(-) create mode 100644 accounts-db/src/account_locks.rs diff --git a/accounts-db/src/account_locks.rs b/accounts-db/src/account_locks.rs new file mode 100644 index 00000000000000..dcd512e7d72984 --- /dev/null +++ b/accounts-db/src/account_locks.rs @@ -0,0 +1,155 @@ +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; +use { + ahash::{AHashMap, AHashSet}, + solana_sdk::{pubkey::Pubkey, transaction::TransactionError}, + std::collections::hash_map, +}; + +#[derive(Debug, Default)] +pub struct AccountLocks { + write_locks: AHashSet, + readonly_locks: AHashMap, +} + +impl AccountLocks { + /// Lock the account keys in `keys` for a transaction. + /// The bool in the tuple indicates if the account is writable. + /// Returns an error if any of the accounts are already locked in a way + /// that conflicts with the requested lock. + pub fn try_lock_accounts<'a>( + &mut self, + keys: impl Iterator + Clone, + ) -> Result<(), TransactionError> { + for (key, writable) in keys.clone() { + if writable { + if !self.can_write_lock(key) { + return Err(TransactionError::AccountInUse); + } + } else if !self.can_read_lock(key) { + return Err(TransactionError::AccountInUse); + } + } + + for (key, writable) in keys { + if writable { + self.lock_write(key); + } else { + self.lock_readonly(key); + } + } + + Ok(()) + } + + /// Unlock the account keys in `keys` after a transaction. + /// The bool in the tuple indicates if the account is writable. + /// In debug-mode this function will panic if an attempt is made to unlock + /// an account that wasn't locked in the way requested. + pub fn unlock_accounts<'a>(&mut self, keys: impl Iterator) { + for (k, writable) in keys { + if writable { + self.unlock_write(k); + } else { + self.unlock_readonly(k); + } + } + } + + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn is_locked_readonly(&self, key: &Pubkey) -> bool { + self.readonly_locks + .get(key) + .map_or(false, |count| *count > 0) + } + + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn is_locked_write(&self, key: &Pubkey) -> bool { + self.write_locks.contains(key) + } + + fn can_read_lock(&self, key: &Pubkey) -> bool { + // If the key is not write-locked, it can be read-locked + !self.is_locked_write(key) + } + + fn can_write_lock(&self, key: &Pubkey) -> bool { + // If the key is not read-locked or write-locked, it can be write-locked + !self.is_locked_readonly(key) && !self.is_locked_write(key) + } + + fn lock_readonly(&mut self, key: &Pubkey) { + *self.readonly_locks.entry(*key).or_default() += 1; + } + + fn lock_write(&mut self, key: &Pubkey) { + self.write_locks.insert(*key); + } + + fn unlock_readonly(&mut self, key: &Pubkey) { + if let hash_map::Entry::Occupied(mut occupied_entry) = self.readonly_locks.entry(*key) { + let count = occupied_entry.get_mut(); + *count -= 1; + if *count == 0 { + occupied_entry.remove_entry(); + } + } else { + debug_assert!( + false, + "Attempted to remove a read-lock for a key that wasn't read-locked" + ); + } + } + + fn unlock_write(&mut self, key: &Pubkey) { + let removed = self.write_locks.remove(key); + debug_assert!( + removed, + "Attempted to remove a write-lock for a key that wasn't write-locked" + ); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_account_locks() { + let mut account_locks = AccountLocks::default(); + + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + + // Add write and read-lock. + let result = account_locks.try_lock_accounts([(&key1, true), (&key2, false)].into_iter()); + assert!(result.is_ok()); + + // Try to add duplicate write-lock. + let result = account_locks.try_lock_accounts([(&key1, true)].into_iter()); + assert_eq!(result, Err(TransactionError::AccountInUse)); + + // Try to add write lock on read-locked account. + let result = account_locks.try_lock_accounts([(&key2, true)].into_iter()); + assert_eq!(result, Err(TransactionError::AccountInUse)); + + // Try to add read lock on write-locked account. + let result = account_locks.try_lock_accounts([(&key1, false)].into_iter()); + assert_eq!(result, Err(TransactionError::AccountInUse)); + + // Add read lock on read-locked account. + let result = account_locks.try_lock_accounts([(&key2, false)].into_iter()); + assert!(result.is_ok()); + + // Unlock write and read locks. + account_locks.unlock_accounts([(&key1, true), (&key2, false)].into_iter()); + + // No more remaining write-locks. Read-lock remains. + assert!(!account_locks.is_locked_write(&key1)); + assert!(account_locks.is_locked_readonly(&key2)); + + // Unlock read lock. + account_locks.unlock_accounts([(&key2, false)].into_iter()); + assert!(!account_locks.is_locked_readonly(&key2)); + } +} diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index bb8656648c23c9..d781bb45e11b8a 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -1,5 +1,6 @@ use { crate::{ + account_locks::AccountLocks, accounts_db::{ AccountStorageEntry, AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, ScanAccountStorageData, ScanStorageResult, VerifyAccountsHashAndLamportsConfig, @@ -8,7 +9,6 @@ use { ancestors::Ancestors, storable_accounts::StorableAccounts, }, - ahash::{AHashMap, AHashSet}, dashmap::DashMap, log::*, solana_sdk::{ @@ -18,13 +18,13 @@ use { message::v0::{LoadedAddresses, MessageAddressTableLookup}, pubkey::Pubkey, slot_hashes::SlotHashes, - transaction::{Result, SanitizedTransaction, TransactionError}, + transaction::{Result, SanitizedTransaction}, transaction_context::TransactionAccount, }, solana_svm_transaction::svm_message::SVMMessage, std::{ cmp::Reverse, - collections::{hash_map, BinaryHeap, HashSet}, + collections::{BinaryHeap, HashSet}, ops::RangeBounds, sync::{ atomic::{AtomicUsize, Ordering}, @@ -35,58 +35,6 @@ use { pub type PubkeyAccountSlot = (Pubkey, AccountSharedData, Slot); -#[derive(Debug, Default)] -pub struct AccountLocks { - write_locks: AHashSet, - readonly_locks: AHashMap, -} - -impl AccountLocks { - fn is_locked_readonly(&self, key: &Pubkey) -> bool { - self.readonly_locks - .get(key) - .map_or(false, |count| *count > 0) - } - - fn is_locked_write(&self, key: &Pubkey) -> bool { - self.write_locks.contains(key) - } - - fn insert_new_readonly(&mut self, key: &Pubkey) { - assert!(self.readonly_locks.insert(*key, 1).is_none()); - } - - fn lock_readonly(&mut self, key: &Pubkey) -> bool { - self.readonly_locks.get_mut(key).map_or(false, |count| { - *count += 1; - true - }) - } - - fn unlock_readonly(&mut self, key: &Pubkey) { - if let hash_map::Entry::Occupied(mut occupied_entry) = self.readonly_locks.entry(*key) { - let count = occupied_entry.get_mut(); - *count -= 1; - if *count == 0 { - occupied_entry.remove_entry(); - } - } else { - debug_assert!( - false, - "Attempted to remove a read-lock for a key that wasn't read-locked" - ); - } - } - - fn unlock_write(&mut self, key: &Pubkey) { - let removed = self.write_locks.remove(key); - debug_assert!( - removed, - "Attempted to remove a write-lock for a key that wasn't write-locked" - ); - } -} - struct TransactionAccountLocksIterator<'a, T: SVMMessage> { transaction: &'a T, } @@ -561,48 +509,6 @@ impl Accounts { self.accounts_db.store_uncached(slot, &[(pubkey, account)]); } - fn lock_account<'a>( - &self, - account_locks: &mut AccountLocks, - keys: impl Iterator + Clone, - ) -> Result<()> { - for (k, writable) in keys.clone() { - if writable { - if account_locks.is_locked_write(k) || account_locks.is_locked_readonly(k) { - debug!("Writable account in use: {:?}", k); - return Err(TransactionError::AccountInUse); - } - } else if account_locks.is_locked_write(k) { - debug!("Read-only account in use: {:?}", k); - return Err(TransactionError::AccountInUse); - } - } - - for (k, writable) in keys { - if writable { - account_locks.write_locks.insert(*k); - } else if !account_locks.lock_readonly(k) { - account_locks.insert_new_readonly(k); - } - } - - Ok(()) - } - - fn unlock_account<'a>( - &self, - account_locks: &mut AccountLocks, - keys: impl Iterator, - ) { - for (k, writable) in keys { - if writable { - account_locks.unlock_write(k); - } else { - account_locks.unlock_readonly(k); - } - } - } - /// This function will prevent multiple threads from modifying the same account state at the /// same time #[must_use] @@ -653,7 +559,7 @@ impl Accounts { .into_iter() .map(|tx_account_locks_result| match tx_account_locks_result { Ok(tx_account_locks) => { - self.lock_account(account_locks, tx_account_locks.accounts_with_is_writable()) + account_locks.try_lock_accounts(tx_account_locks.accounts_with_is_writable()) } Err(err) => Err(err), }) @@ -674,10 +580,7 @@ impl Accounts { for (tx, res) in txs_and_results { if res.is_ok() { let tx_account_locks = TransactionAccountLocksIterator::new(tx.message()); - self.unlock_account( - &mut account_locks, - tx_account_locks.accounts_with_is_writable(), - ); + account_locks.unlock_accounts(tx_account_locks.accounts_with_is_writable()); } } } @@ -714,7 +617,7 @@ mod tests { message::{Message, MessageHeader}, native_loader, signature::{signers::Signers, Keypair, Signer}, - transaction::{Transaction, MAX_TX_ACCOUNT_LOCKS}, + transaction::{Transaction, TransactionError, MAX_TX_ACCOUNT_LOCKS}, }, std::{ borrow::Cow, @@ -1046,16 +949,11 @@ mod tests { let results0 = accounts.lock_accounts([tx.clone()].iter(), MAX_TX_ACCOUNT_LOCKS); assert_eq!(results0, vec![Ok(())]); - assert_eq!( - *accounts - .account_locks - .lock() - .unwrap() - .readonly_locks - .get(&keypair1.pubkey()) - .unwrap(), - 1 - ); + assert!(accounts + .account_locks + .lock() + .unwrap() + .is_locked_readonly(&keypair1.pubkey())); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( @@ -1086,16 +984,11 @@ mod tests { Err(TransactionError::AccountInUse), // Read-only account (keypair1) cannot also be locked as writable ], ); - assert_eq!( - *accounts - .account_locks - .lock() - .unwrap() - .readonly_locks - .get(&keypair1.pubkey()) - .unwrap(), - 2 - ); + assert!(accounts + .account_locks + .lock() + .unwrap() + .is_locked_readonly(&keypair1.pubkey())); accounts.unlock_accounts(iter::once(&tx).zip(&results0)); accounts.unlock_accounts(txs.iter().zip(&results1)); @@ -1120,8 +1013,7 @@ mod tests { .account_locks .lock() .unwrap() - .readonly_locks - .contains_key(&keypair1.pubkey())); + .is_locked_readonly(&keypair1.pubkey())); } #[test] @@ -1235,29 +1127,22 @@ mod tests { assert!(results0[0].is_ok()); // Instruction program-id account demoted to readonly - assert_eq!( - *accounts - .account_locks - .lock() - .unwrap() - .readonly_locks - .get(&native_loader::id()) - .unwrap(), - 1 - ); + assert!(accounts + .account_locks + .lock() + .unwrap() + .is_locked_readonly(&native_loader::id())); // Non-program accounts remain writable assert!(accounts .account_locks .lock() .unwrap() - .write_locks - .contains(&keypair0.pubkey())); + .is_locked_write(&keypair0.pubkey())); assert!(accounts .account_locks .lock() .unwrap() - .write_locks - .contains(&keypair1.pubkey())); + .is_locked_write(&keypair1.pubkey())); } impl Accounts { @@ -1346,40 +1231,18 @@ mod tests { ], ); - // verify that keypair0 read-only lock twice (for tx0 and tx2) - assert_eq!( - *accounts - .account_locks - .lock() - .unwrap() - .readonly_locks - .get(&keypair0.pubkey()) - .unwrap(), - 2 - ); - // verify that keypair2 (for tx1) is not write-locked - assert!(!accounts - .account_locks - .lock() - .unwrap() - .write_locks - .contains(&keypair2.pubkey())); - - accounts.unlock_accounts(txs.iter().zip(&results)); - - // check all locks to be removed + // verify that keypair0 read-only locked assert!(accounts .account_locks .lock() .unwrap() - .readonly_locks - .is_empty()); - assert!(accounts + .is_locked_readonly(&keypair0.pubkey())); + // verify that keypair2 (for tx1) is not write-locked + assert!(!accounts .account_locks .lock() .unwrap() - .write_locks - .is_empty()); + .is_locked_write(&keypair2.pubkey())); } #[test] diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index da4f3a9549db67..3cc8f686eff2e0 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -5,6 +5,7 @@ extern crate lazy_static; pub mod account_info; +mod account_locks; pub mod account_storage; pub mod accounts; mod accounts_cache; From a1c00e502a54682231d2a6c0343aa40d0adbb449 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 5 Aug 2024 10:21:31 -0500 Subject: [PATCH 021/529] Break genesis loading and checks out of load_blockstore() (#2436) Currently, load_blockstore() does much more than just load the blockstore. Aside from the name being confusing, everything being in that single function caused some inefficiency, such as loading genesis a second time to have the config available earlier. So, break opening genesis and genesis related checks into a separate function. --- core/src/validator.rs | 78 +++++++++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 32 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 4052a48e144a6f..757c29f30299a9 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -35,7 +35,9 @@ use { accounts_db::{AccountShrinkThreshold, AccountsDbConfig}, accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, - hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, + hardened_unpack::{ + open_genesis_config, OpenGenesisConfigError, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, + }, utils::{move_and_async_delete_path, move_and_async_delete_path_contents}, }, solana_client::connection_cache::{ConnectionCache, Protocol}, @@ -591,9 +593,7 @@ impl Validator { "ledger directory does not exist or is not accessible: {ledger_path:?}" )); } - let genesis_config = - open_genesis_config(ledger_path, config.max_genesis_archive_unpacked_size) - .context("Failed to open genesis config")?; + let genesis_config = load_genesis(config, ledger_path)?; metrics_config_sanity_check(genesis_config.cluster_type)?; @@ -703,7 +703,6 @@ impl Validator { PohTimingReportService::new(poh_timing_point_receiver, exit.clone()); let ( - genesis_config, bank_forks, blockstore, original_blockstore_root, @@ -727,6 +726,7 @@ impl Validator { ) = load_blockstore( config, ledger_path, + &genesis_config, exit.clone(), &start_progress, accounts_update_notifier, @@ -1809,10 +1809,44 @@ fn blockstore_options_from_config(config: &ValidatorConfig) -> BlockstoreOptions } } +fn load_genesis( + config: &ValidatorConfig, + ledger_path: &Path, +) -> Result { + let genesis_config = open_genesis_config(ledger_path, config.max_genesis_archive_unpacked_size) + .map_err(ValidatorError::OpenGenesisConfig)?; + + // This needs to be limited otherwise the state in the VoteAccount data + // grows too large + let leader_schedule_slot_offset = genesis_config.epoch_schedule.leader_schedule_slot_offset; + let slots_per_epoch = genesis_config.epoch_schedule.slots_per_epoch; + let leader_epoch_offset = (leader_schedule_slot_offset + slots_per_epoch - 1) / slots_per_epoch; + assert!(leader_epoch_offset <= MAX_LEADER_SCHEDULE_EPOCH_OFFSET); + + let genesis_hash = genesis_config.hash(); + info!("genesis hash: {}", genesis_hash); + + if let Some(expected_genesis_hash) = config.expected_genesis_hash { + if genesis_hash != expected_genesis_hash { + return Err(ValidatorError::GenesisHashMismatch( + genesis_hash, + expected_genesis_hash, + )); + } + } + + if !config.no_poh_speed_test { + check_poh_speed(&genesis_config, None)?; + } + + Ok(genesis_config) +} + #[allow(clippy::type_complexity)] fn load_blockstore( config: &ValidatorConfig, ledger_path: &Path, + genesis_config: &GenesisConfig, exit: Arc, start_progress: &Arc>, accounts_update_notifier: Option, @@ -1821,7 +1855,6 @@ fn load_blockstore( poh_timing_point_sender: Option, ) -> Result< ( - GenesisConfig, Arc>, Arc, Slot, @@ -1838,30 +1871,6 @@ fn load_blockstore( > { info!("loading ledger from {:?}...", ledger_path); *start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger; - let genesis_config = open_genesis_config(ledger_path, config.max_genesis_archive_unpacked_size) - .map_err(|err| format!("Failed to open genesis config: {err}"))?; - - // This needs to be limited otherwise the state in the VoteAccount data - // grows too large - let leader_schedule_slot_offset = genesis_config.epoch_schedule.leader_schedule_slot_offset; - let slots_per_epoch = genesis_config.epoch_schedule.slots_per_epoch; - let leader_epoch_offset = (leader_schedule_slot_offset + slots_per_epoch - 1) / slots_per_epoch; - assert!(leader_epoch_offset <= MAX_LEADER_SCHEDULE_EPOCH_OFFSET); - - let genesis_hash = genesis_config.hash(); - info!("genesis hash: {}", genesis_hash); - - if let Some(expected_genesis_hash) = config.expected_genesis_hash { - if genesis_hash != expected_genesis_hash { - return Err(format!( - "genesis hash mismatch: hash={genesis_hash} expected={expected_genesis_hash}. Delete the ledger directory to continue: {ledger_path:?}", - )); - } - } - - if !config.no_poh_speed_test { - check_poh_speed(&genesis_config, None).map_err(|err| format!("{err}"))?; - } let mut blockstore = Blockstore::open_with_options(ledger_path, blockstore_options_from_config(config)) @@ -1918,7 +1927,7 @@ fn load_blockstore( let (bank_forks, mut leader_schedule_cache, starting_snapshot_hashes) = bank_forks_utils::load_bank_forks( - &genesis_config, + genesis_config, &blockstore, config.account_paths.clone(), Some(&config.snapshot_config), @@ -1950,7 +1959,6 @@ fn load_blockstore( } Ok(( - genesis_config, bank_forks, blockstore, original_blockstore_root, @@ -2338,9 +2346,15 @@ pub enum ValidatorError { #[error("Bad expected bank hash")] BadExpectedBankHash, + #[error("genesis hash mismatch: actual={0}, expected={1}")] + GenesisHashMismatch(Hash, Hash), + #[error("Ledger does not have enough data to wait for supermajority")] NotEnoughLedgerData, + #[error("failed to open genesis: {0}")] + OpenGenesisConfig(#[source] OpenGenesisConfigError), + #[error("{0}")] Other(String), From 6ac4fe32e28d8ceb4085072b61fa0c6cb09baac1 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 5 Aug 2024 10:30:58 -0500 Subject: [PATCH 022/529] TransactionView: Signature Meta (#2408) --- transaction-view/src/bytes.rs | 20 +++-- transaction-view/src/lib.rs | 2 + transaction-view/src/signature_meta.rs | 108 +++++++++++++++++++++++++ 3 files changed, 122 insertions(+), 8 deletions(-) create mode 100644 transaction-view/src/signature_meta.rs diff --git a/transaction-view/src/bytes.rs b/transaction-view/src/bytes.rs index a67d8a2ddd8b35..563acf0e9ae5e0 100644 --- a/transaction-view/src/bytes.rs +++ b/transaction-view/src/bytes.rs @@ -102,7 +102,11 @@ pub fn optimized_read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result /// 2. The size of `T` is small enough such that a usize will not overflow if /// given the maximum array size (u16::MAX). #[inline(always)] -pub fn offset_array_len(bytes: &[u8], offset: &mut usize, len: u16) -> Result<()> { +pub fn advance_offset_for_array( + bytes: &[u8], + offset: &mut usize, + len: u16, +) -> Result<()> { let array_len_bytes = usize::from(len).wrapping_mul(core::mem::size_of::()); check_remaining(bytes, *offset, array_len_bytes)?; *offset = offset.wrapping_add(array_len_bytes); @@ -116,7 +120,7 @@ pub fn offset_array_len(bytes: &[u8], offset: &mut usize, len: u16) -> /// 1. The current offset is not greater than `bytes.len()`. /// 2. The size of `T` is small enough such that a usize will not overflow. #[inline(always)] -pub fn offset_type(bytes: &[u8], offset: &mut usize) -> Result<()> { +pub fn advance_offset_for_type(bytes: &[u8], offset: &mut usize) -> Result<()> { let type_size = core::mem::size_of::(); check_remaining(bytes, *offset, type_size)?; *offset = offset.wrapping_add(type_size); @@ -267,7 +271,7 @@ mod tests { } #[test] - fn test_offset_array_len() { + fn test_advance_offset_for_array() { #[repr(C)] struct MyStruct { _a: u8, @@ -278,17 +282,17 @@ mod tests { // Test with a buffer that is too short let bytes = [0u8; 1]; let mut offset = 0; - assert!(offset_array_len::(&bytes, &mut offset, 1).is_err()); + assert!(advance_offset_for_array::(&bytes, &mut offset, 1).is_err()); // Test with a buffer that is long enough let bytes = [0u8; 4]; let mut offset = 0; - assert!(offset_array_len::(&bytes, &mut offset, 2).is_ok()); + assert!(advance_offset_for_array::(&bytes, &mut offset, 2).is_ok()); assert_eq!(offset, 4); } #[test] - fn test_offset_type() { + fn test_advance_offset_for_type() { #[repr(C)] struct MyStruct { _a: u8, @@ -299,12 +303,12 @@ mod tests { // Test with a buffer that is too short let bytes = [0u8; 1]; let mut offset = 0; - assert!(offset_type::(&bytes, &mut offset).is_err()); + assert!(advance_offset_for_type::(&bytes, &mut offset).is_err()); // Test with a buffer that is long enough let bytes = [0u8; 4]; let mut offset = 0; - assert!(offset_type::(&bytes, &mut offset).is_ok()); + assert!(advance_offset_for_type::(&bytes, &mut offset).is_ok()); assert_eq!(offset, 2); } } diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index a16187f62ccd82..e73eaa0cba1249 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -7,3 +7,5 @@ pub mod bytes; mod bytes; pub mod result; +#[allow(dead_code)] +mod signature_meta; diff --git a/transaction-view/src/signature_meta.rs b/transaction-view/src/signature_meta.rs new file mode 100644 index 00000000000000..9e511068c02147 --- /dev/null +++ b/transaction-view/src/signature_meta.rs @@ -0,0 +1,108 @@ +use { + crate::{ + bytes::{advance_offset_for_array, read_byte}, + result::{Result, TransactionParsingError}, + }, + solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Signature}, +}; + +/// Meta data for accessing transaction-level signatures in a transaction view. +pub(crate) struct SignatureMeta { + /// The number of signatures in the transaction. + pub(crate) num_signatures: u16, + /// Offset to the first signature in the transaction packet. + pub(crate) offset: u16, +} + +impl SignatureMeta { + /// Get the number of signatures and the offset to the first signature in + /// the transaction packet, starting at the given `offset`. + pub(crate) fn try_new(bytes: &[u8], offset: &mut usize) -> Result { + // The packet has a maximum length of 1232 bytes. + // Each signature must be paired with a unique static pubkey, so each + // signature really requires 96 bytes. This means the maximum number of + // signatures in a **valid** transaction packet is 12. + // In our u16 encoding scheme, 12 would be encoded as a single byte. + // Rather than using the u16 decoding, we can simply read the byte and + // verify that the MSB is not set. + const MAX_SIGNATURES_PER_PACKET: u16 = (PACKET_DATA_SIZE + / (core::mem::size_of::() + core::mem::size_of::())) + as u16; + // Maximum number of signatures should be represented by a single byte, + // thus the MSB should not be set. + const _: () = assert!(MAX_SIGNATURES_PER_PACKET & 0b1000_0000 == 0); + + let num_signatures = read_byte(bytes, offset)? as u16; + if num_signatures == 0 || num_signatures > MAX_SIGNATURES_PER_PACKET { + return Err(TransactionParsingError); + } + + let signature_offset = *offset as u16; + advance_offset_for_array::(bytes, offset, num_signatures)?; + + Ok(Self { + num_signatures, + offset: signature_offset, + }) + } +} + +#[cfg(test)] +mod tests { + use {super::*, solana_sdk::short_vec::ShortVec}; + + #[test] + fn test_zero_signatures() { + let bytes = bincode::serialize(&ShortVec(Vec::::new())).unwrap(); + let mut offset = 0; + assert!(SignatureMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_one_signature() { + let bytes = bincode::serialize(&ShortVec(vec![Signature::default()])).unwrap(); + let mut offset = 0; + let meta = SignatureMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(meta.num_signatures, 1); + assert_eq!(meta.offset, 1); + assert_eq!(offset, 1 + core::mem::size_of::()); + } + + #[test] + fn test_max_signatures() { + let signatures = vec![Signature::default(); 12]; + let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); + let mut offset = 0; + let meta = SignatureMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(meta.num_signatures, 12); + assert_eq!(meta.offset, 1); + assert_eq!(offset, 1 + 12 * core::mem::size_of::()); + } + + #[test] + fn test_non_zero_offset() { + let mut bytes = bincode::serialize(&ShortVec(vec![Signature::default()])).unwrap(); + bytes.insert(0, 0); // Insert a byte at the beginning of the packet. + let mut offset = 1; // Start at the second byte. + let meta = SignatureMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(meta.num_signatures, 1); + assert_eq!(meta.offset, 2); + assert_eq!(offset, 2 + core::mem::size_of::()); + } + + #[test] + fn test_too_many_signatures() { + let signatures = vec![Signature::default(); 13]; + let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); + let mut offset = 0; + assert!(SignatureMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_u16_max_signatures() { + let signatures = vec![Signature::default(); u16::MAX as usize]; + let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); + let mut offset = 0; + assert!(SignatureMeta::try_new(&bytes, &mut offset).is_err()); + } +} From 42422f29f4077efc43636c83ad35b26387649e21 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 5 Aug 2024 11:57:23 -0500 Subject: [PATCH 023/529] TransactionView: Message Header Meta (#2409) --- transaction-view/src/lib.rs | 2 + transaction-view/src/message_header_meta.rs | 118 ++++++++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 transaction-view/src/message_header_meta.rs diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index e73eaa0cba1249..41870f9d690d91 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -6,6 +6,8 @@ pub mod bytes; #[allow(dead_code)] mod bytes; +#[allow(dead_code)] +mod message_header_meta; pub mod result; #[allow(dead_code)] mod signature_meta; diff --git a/transaction-view/src/message_header_meta.rs b/transaction-view/src/message_header_meta.rs new file mode 100644 index 00000000000000..c144db2f4e8edc --- /dev/null +++ b/transaction-view/src/message_header_meta.rs @@ -0,0 +1,118 @@ +use { + crate::{ + bytes::read_byte, + result::{Result, TransactionParsingError}, + }, + solana_sdk::message::MESSAGE_VERSION_PREFIX, +}; + +/// A byte that represents the version of the transaction. +#[derive(Copy, Clone, Default)] +#[repr(u8)] +pub enum TransactionVersion { + #[default] + Legacy = u8::MAX, + V0 = 0, +} + +/// Meta data for accessing message header fields in a transaction view. +pub(crate) struct MessageHeaderMeta { + /// The offset to the first byte of the message in the transaction packet. + pub(crate) offset: u16, + /// The version of the transaction. + pub(crate) version: TransactionVersion, + /// The number of signatures required for this message to be considered + /// valid. + pub(crate) num_required_signatures: u8, + /// The last `num_readonly_signed_accounts` of the signed keys are + /// read-only. + pub(crate) num_readonly_signed_accounts: u8, + /// The last `num_readonly_unsigned_accounts` of the unsigned keys are + /// read-only accounts. + pub(crate) num_readonly_unsigned_accounts: u8, +} + +impl MessageHeaderMeta { + pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { + // Get the message offset. + // We know the offset does not exceed packet length, and our packet + // length is less than u16::MAX, so we can safely cast to u16. + let message_offset = *offset as u16; + + // Read the message prefix byte if present. This byte is present in V0 + // transactions but not in legacy transactions. + // The message header begins immediately after the message prefix byte + // if present. + let message_prefix = read_byte(bytes, offset)?; + let (version, num_required_signatures) = if message_prefix & MESSAGE_VERSION_PREFIX != 0 { + let version = message_prefix & !MESSAGE_VERSION_PREFIX; + match version { + 0 => (TransactionVersion::V0, read_byte(bytes, offset)?), + _ => return Err(TransactionParsingError), + } + } else { + // Legacy transaction. The `message_prefix` that was just read is + // actually the number of required signatures. + (TransactionVersion::Legacy, message_prefix) + }; + + let num_readonly_signed_accounts = read_byte(bytes, offset)?; + let num_readonly_unsigned_accounts = read_byte(bytes, offset)?; + + Ok(Self { + offset: message_offset, + version, + num_required_signatures, + num_readonly_signed_accounts, + num_readonly_unsigned_accounts, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_invalid_version() { + let bytes = [0b1000_0001]; + let mut offset = 0; + assert!(MessageHeaderMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_legacy_transaction_missing_header_byte() { + let bytes = [5, 0]; + let mut offset = 0; + assert!(MessageHeaderMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_legacy_transaction_valid() { + let bytes = [5, 1, 2]; + let mut offset = 0; + let header = MessageHeaderMeta::try_new(&bytes, &mut offset).unwrap(); + assert!(matches!(header.version, TransactionVersion::Legacy)); + assert_eq!(header.num_required_signatures, 5); + assert_eq!(header.num_readonly_signed_accounts, 1); + assert_eq!(header.num_readonly_unsigned_accounts, 2); + } + + #[test] + fn test_v0_transaction_missing_header_byte() { + let bytes = [MESSAGE_VERSION_PREFIX, 5, 1]; + let mut offset = 0; + assert!(MessageHeaderMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_v0_transaction_valid() { + let bytes = [MESSAGE_VERSION_PREFIX, 5, 1, 2]; + let mut offset = 0; + let header = MessageHeaderMeta::try_new(&bytes, &mut offset).unwrap(); + assert!(matches!(header.version, TransactionVersion::V0)); + assert_eq!(header.num_required_signatures, 5); + assert_eq!(header.num_readonly_signed_accounts, 1); + assert_eq!(header.num_readonly_unsigned_accounts, 2); + } +} From 7a9317fe25621c211fe4ab5491b88a4757d4b6d4 Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Mon, 5 Aug 2024 11:27:56 -0700 Subject: [PATCH 024/529] ci/do-audit.sh: Remove stale exceptions (#2426) None of these show up in our audit reports anymore. Also, clarify exceptions that are still present, to make it easier to connect them to the root cause and asses their validity in the future. --- ci/do-audit.sh | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/ci/do-audit.sh b/ci/do-audit.sh index df395e8a2bbc22..49c4510d7bb44f 100755 --- a/ci/do-audit.sh +++ b/ci/do-audit.sh @@ -18,20 +18,26 @@ while [[ -n $1 ]]; do done cargo_audit_ignores=( - # Potential segfault in the time crate + # === main repo === # - # Blocked on chrono updating `time` to >= 0.2.23 - --ignore RUSTSEC-2020-0071 - - # tokio: vulnerability affecting named pipes on Windows - # - # Exception is a stopgap to unblock CI - # https://github.com/solana-labs/solana/issues/29586 - --ignore RUSTSEC-2023-0001 - + # Crate: ed25519-dalek + # Version: 1.0.1 + # Title: Double Public Key Signing Function Oracle Attack on `ed25519-dalek` + # Date: 2022-06-11 + # ID: RUSTSEC-2022-0093 + # URL: https://rustsec.org/advisories/RUSTSEC-2022-0093 + # Solution: Upgrade to >=2 --ignore RUSTSEC-2022-0093 - # curve25519-dalek + # === programs/sbf === + # + # Crate: curve25519-dalek + # Version: 3.2.1 + # Title: Timing variability in `curve25519-dalek`'s `Scalar29::sub`/`Scalar52::sub` + # Date: 2024-06-18 + # ID: RUSTSEC-2024-0344 + # URL: https://rustsec.org/advisories/RUSTSEC-2024-0344 + # Solution: Upgrade to >=4.1.3 --ignore RUSTSEC-2024-0344 ) scripts/cargo-for-all-lock-files.sh audit "${cargo_audit_ignores[@]}" | $dep_tree_filter From 4e5af1e90b0efee2d06c6b6ea63adc063cda2b10 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 5 Aug 2024 14:34:42 -0500 Subject: [PATCH 025/529] calculate ancient slot boundary correctly for hash calc (#2444) --- .../src/accounts_db/scan_account_storage.rs | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/accounts-db/src/accounts_db/scan_account_storage.rs b/accounts-db/src/accounts_db/scan_account_storage.rs index 5a9f0739cb0ada..71c944e722e5ab 100644 --- a/accounts-db/src/accounts_db/scan_account_storage.rs +++ b/accounts-db/src/accounts_db/scan_account_storage.rs @@ -183,12 +183,18 @@ impl AccountsDb { where S: AppendVecScan, { - let oldest_non_ancient_slot = self.get_oldest_non_ancient_slot_for_hash_calc_scan( - snapshot_storages.max_slot_inclusive(), - config, - ); - let splitter = SplitAncientStorages::new(oldest_non_ancient_slot, snapshot_storages); - + let oldest_non_ancient_slot_for_split = self + .get_oldest_non_ancient_slot_for_hash_calc_scan( + snapshot_storages.max_slot_inclusive(), + config, + ); + let splitter = + SplitAncientStorages::new(oldest_non_ancient_slot_for_split, snapshot_storages); + let oldest_non_ancient_slot_for_identification = self + .get_oldest_non_ancient_slot_from_slot( + config.epoch_schedule, + snapshot_storages.max_slot_inclusive(), + ); let slots_per_epoch = config .rent_collector .epoch_schedule @@ -297,10 +303,7 @@ impl AccountsDb { let mut init_accum = true; // load from cache failed, so create the cache file for this chunk for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { - let ancient = - oldest_non_ancient_slot.is_some_and(|oldest_non_ancient_slot| { - slot < oldest_non_ancient_slot - }); + let ancient = slot < oldest_non_ancient_slot_for_identification; let (_, scan_us) = measure_us!(if let Some(storage) = storage { if init_accum { From 57144b0ceadc03ca15b47f656d44cb3e7909b75e Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 5 Aug 2024 22:56:43 +0000 Subject: [PATCH 026/529] extends Turbine fanout experiment to wider fanout values (#2373) Based on previous Turbine fanout experiment, wider fanouts are more effective in propagating shreds and reducing repairs: https://discord.com/channels/428295358100013066/478692221441409024/1265782094211321897 In order to identify optimal fanout value, this commit extends the experiment with wider fanout values. --- sdk/src/feature_set.rs | 5 +++++ turbine/src/cluster_nodes.rs | 39 ++++++++++++++++++++++-------------- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 887d2e547f19b2..4626240a949de4 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -841,6 +841,10 @@ pub mod vote_only_retransmitter_signed_fec_sets { solana_sdk::declare_id!("RfEcA95xnhuwooVAhUUksEJLZBF7xKCLuqrJoqk4Zph"); } +pub mod enable_turbine_extended_fanout_experiments { + solana_sdk::declare_id!("BZn14Liea52wtBwrXUxTv6vojuTTmfc7XGEDTXrvMD7b"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -1046,6 +1050,7 @@ lazy_static! { (move_stake_and_move_lamports_ixs::id(), "Enable MoveStake and MoveLamports stake program instructions #1610"), (ed25519_precompile_verify_strict::id(), "Use strict verification in ed25519 precompile SIMD-0152"), (vote_only_retransmitter_signed_fec_sets::id(), "vote only on retransmitter signed fec sets"), + (enable_turbine_extended_fanout_experiments::id(), "enable turbine extended fanout experiments #"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 8cc5f29033fd86..42236d908da90a 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -564,8 +564,31 @@ pub fn make_test_cluster( } pub(crate) fn get_data_plane_fanout(shred_slot: Slot, root_bank: &Bank) -> usize { - if enable_turbine_fanout_experiments(shred_slot, root_bank) { + if check_feature_activation( + &feature_set::disable_turbine_fanout_experiments::id(), + shred_slot, + root_bank, + ) { + DATA_PLANE_FANOUT + } else if check_feature_activation( + &feature_set::enable_turbine_extended_fanout_experiments::id(), + shred_slot, + root_bank, + ) { // Allocate ~2% of slots to turbine fanout experiments. + match shred_slot % 359 { + 11 => 1152, + 61 => 1280, + 111 => 1024, + 161 => 1408, + 211 => 896, + 261 => 1536, + 311 => 768, + _ => DATA_PLANE_FANOUT, + } + } else { + // feature_set::enable_turbine_fanout_experiments + // is already activated on all clusters. match shred_slot % 359 { 11 => 64, 61 => 768, @@ -576,23 +599,9 @@ pub(crate) fn get_data_plane_fanout(shred_slot: Slot, root_bank: &Bank) -> usize 311 => 384, _ => DATA_PLANE_FANOUT, } - } else { - DATA_PLANE_FANOUT } } -fn enable_turbine_fanout_experiments(shred_slot: Slot, root_bank: &Bank) -> bool { - check_feature_activation( - &feature_set::enable_turbine_fanout_experiments::id(), - shred_slot, - root_bank, - ) && !check_feature_activation( - &feature_set::disable_turbine_fanout_experiments::id(), - shred_slot, - root_bank, - ) -} - // Returns true if the feature is effective for the shred slot. #[must_use] pub fn check_feature_activation(feature: &Pubkey, shred_slot: Slot, root_bank: &Bank) -> bool { From c986303988b707866151b12a94e8383f4aa54751 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Mon, 5 Aug 2024 20:44:06 -0300 Subject: [PATCH 027/529] Create edge case program for call arguments (#2422) --- programs/sbf/Cargo.lock | 9 ++ programs/sbf/Cargo.toml | 3 + programs/sbf/rust/call_args/Cargo.toml | 16 +++ programs/sbf/rust/call_args/src/lib.rs | 177 +++++++++++++++++++++++++ programs/sbf/tests/programs.rs | 146 ++++++++++++++++++++ 5 files changed, 351 insertions(+) create mode 100644 programs/sbf/rust/call_args/Cargo.toml create mode 100644 programs/sbf/rust/call_args/src/lib.rs diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ca380ee3a93749..91b3667d25e861 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5708,6 +5708,7 @@ version = "2.1.0" dependencies = [ "agave-validator", "bincode", + "borsh 1.5.1", "byteorder 1.5.0", "elf", "itertools 0.10.5", @@ -5790,6 +5791,14 @@ dependencies = [ "solana-program", ] +[[package]] +name = "solana-sbf-rust-call-args" +version = "2.1.0" +dependencies = [ + "borsh 1.5.1", + "solana-program", +] + [[package]] name = "solana-sbf-rust-call-depth" version = "2.1.0" diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 44ca0165600fd1..4c21253716b46a 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -11,6 +11,7 @@ edition = "2021" array-bytes = "=1.4.1" bincode = { version = "1.1.4", default-features = false } blake3 = "1.0.0" +borsh = "1.5.1" byteorder = "1.3.2" elf = "0.0.10" getrandom = "0.2.10" @@ -90,6 +91,7 @@ frozen-abi = [] [dev-dependencies] agave-validator = { workspace = true } bincode = { workspace = true } +borsh = { workspace = true } byteorder = { workspace = true } elf = { workspace = true } itertools = { workspace = true } @@ -131,6 +133,7 @@ members = [ "rust/alt_bn128", "rust/alt_bn128_compression", "rust/big_mod_exp", + "rust/call_args", "rust/call_depth", "rust/caller_access", "rust/curve25519", diff --git a/programs/sbf/rust/call_args/Cargo.toml b/programs/sbf/rust/call_args/Cargo.toml new file mode 100644 index 00000000000000..82f81f04a457e1 --- /dev/null +++ b/programs/sbf/rust/call_args/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "solana-sbf-rust-call-args" +version = { workspace = true } +description = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +borsh = { workspace = true } +solana-program = { workspace = true } + +[lib] +crate-type = ["cdylib"] diff --git a/programs/sbf/rust/call_args/src/lib.rs b/programs/sbf/rust/call_args/src/lib.rs new file mode 100644 index 00000000000000..1925eff36125bb --- /dev/null +++ b/programs/sbf/rust/call_args/src/lib.rs @@ -0,0 +1,177 @@ +use { + borsh::{from_slice, to_vec, BorshDeserialize, BorshSerialize}, + solana_program::{ + account_info::AccountInfo, entrypoint::ProgramResult, program::set_return_data, + pubkey::Pubkey, + }, +}; + +#[derive(BorshSerialize, BorshDeserialize, Clone, Copy)] +struct Test128 { + a: u128, + b: u128, +} + +#[derive(BorshDeserialize)] +struct InputData { + test_128: Test128, + arg1: i64, + arg2: i64, + arg3: i64, + arg4: i64, + arg5: i64, + arg6: i64, + arg7: i64, + arg8: i64, +} + +#[derive(BorshSerialize)] +struct OutputData { + res_128: u128, + res_256: Test128, + many_args_1: i64, + many_args_2: i64, +} + +solana_program::entrypoint!(entry); + +pub fn entry(_program_id: &Pubkey, _accounts: &[AccountInfo], data: &[u8]) -> ProgramResult { + // This code is supposed to occupy stack space. The purpose of this test is to make sure + // we operate on the limits of the stack frame safely. + let buffer: [u8; 3800] = [1; 3800]; + + let mut x: [u8; 16] = [0; 16]; + x.copy_from_slice(&buffer[3784..3800]); + x[10] = 0x39; + x[11] = 0x37; + + // Assert the function call hasn't overwritten these values + check_arr(x); + assert_eq!(x[10], 0x39); + assert_eq!(x[11], 0x37); + + // The function call must not overwrite the values and the return must be correct. + let y = check_arr_and_return(x); + assert_eq!(x[10], 0x39); + assert_eq!(x[11], 0x37); + assert_eq!(y[10], 0x39); + assert_eq!(y[11], 0x37); + assert_eq!(y[15], 17); + + let decoded: InputData = from_slice::(data).unwrap(); + + let output = OutputData { + res_128: test_128_arg(decoded.test_128.a, decoded.test_128.b), + res_256: test_256_arg(decoded.test_128), + many_args_1: many_args( + decoded.arg1, + decoded.arg2, + decoded.arg3, + decoded.arg4, + decoded.arg5, + decoded.arg6, + decoded.arg7, + decoded.arg8, + ), + many_args_2: many_args_stack_space( + decoded.arg1, + decoded.arg2, + decoded.arg3, + decoded.arg4, + decoded.arg5, + decoded.arg6, + decoded.arg7, + decoded.arg8, + ), + }; + + let encoded = to_vec(&output).unwrap(); + + set_return_data(encoded.as_slice()); + + Ok(()) +} + +// In this function the argument is promoted to a pointer, so it does not overwrite the stack. +#[allow(improper_ctypes_definitions)] +#[inline(never)] +extern "C" fn check_arr(x: [u8; 16]) { + for (idx, item) in x.iter().enumerate() { + if idx != 10 && idx != 11 { + assert!(*item == 1u8); + } + } + assert_eq!(x[11], 0x37); + assert_eq!(x[10], 0x39); +} + +// Both the argument and return value are promoted to pointers. +#[allow(improper_ctypes_definitions)] +#[inline(never)] +extern "C" fn check_arr_and_return(mut x: [u8; 16]) -> [u8; 16] { + for (idx, item) in x.iter().enumerate() { + if idx != 10 && idx != 11 { + assert!(*item == 1u8); + } + } + assert_eq!(x[11], 0x37); + assert_eq!(x[10], 0x39); + x[15] = 17; + x +} + +// Test a 128 bit argument +#[allow(clippy::arithmetic_side_effects)] +#[inline(never)] +fn test_128_arg(x: u128, y: u128) -> u128 { + x % y +} + +// Test a 256-bit argument +#[allow(clippy::arithmetic_side_effects)] +#[inline(never)] +fn test_256_arg(x: Test128) -> Test128 { + Test128 { + a: x.a + x.b, + b: x.a - x.b, + } +} + +// Test a function that needs to save arguments in the stack +#[allow(clippy::arithmetic_side_effects)] +#[inline(never)] +extern "C" fn many_args(a: i64, b: i64, c: i64, d: i64, e: i64, f: i64, g: i64, h: i64) -> i64 { + let i = a + b; + let j = i - c; + let k = j + d; + let l = k - e; + let m = l % f; + let n = m - g; + n + h +} + +// Test a function that utilizes stack space and needs to retrieve arguments from the caller stack +#[allow(clippy::arithmetic_side_effects)] +#[inline(never)] +extern "C" fn many_args_stack_space( + a: i64, + b: i64, + c: i64, + d: i64, + e: i64, + f: i64, + g: i64, + h: i64, +) -> i64 { + let s: [i64; 3] = [1, 2, 3]; + let i = a + b; + let j = i - c; + let k = j + d; + let l = k - e; + let m = l % f; + let n = m - g; + let o = n + h; + let p = o + s[0]; + let q = p + s[1]; + q - s[2] +} diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index c9c5f99b05cd2b..f9afcf03013566 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -9,6 +9,7 @@ #[cfg(feature = "sbf_rust")] use { + borsh::{from_slice, to_vec, BorshDeserialize, BorshSerialize}, itertools::izip, solana_account_decoder::parse_bpf_loader::{ parse_bpf_upgradeable_loader, BpfUpgradeableLoaderAccountType, @@ -70,6 +71,7 @@ use { map_inner_instructions, ConfirmedTransactionWithStatusMeta, TransactionStatusMeta, TransactionWithStatusMeta, VersionedTransactionWithStatusMeta, }, + solana_type_overrides::rand, std::{ assert_eq, cell::RefCell, @@ -5126,3 +5128,147 @@ fn test_stack_heap_zeroed() { ); } } + +#[test] +fn test_function_call_args() { + // This function tests edge compiler edge cases when calling functions with more than five + // arguments and passing by value arguments with more than 16 bytes. + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank); + let authority_keypair = Keypair::new(); + + let (bank, program_id) = load_upgradeable_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &mint_keypair, + &authority_keypair, + "solana_sbf_rust_call_args", + ); + + #[derive(BorshSerialize, BorshDeserialize, PartialEq, Eq, Debug)] + struct Test128 { + a: u128, + b: u128, + } + + #[derive(BorshSerialize)] + struct InputData { + test_128: Test128, + arg1: i64, + arg2: i64, + arg3: i64, + arg4: i64, + arg5: i64, + arg6: i64, + arg7: i64, + arg8: i64, + } + + #[derive(BorshDeserialize)] + struct OutputData { + res_128: u128, + res_256: Test128, + many_args_1: i64, + many_args_2: i64, + } + + let input_data = InputData { + test_128: Test128 { + a: rand::random::(), + b: rand::random::(), + }, + arg1: rand::random::(), + arg2: rand::random::(), + arg3: rand::random::(), + arg4: rand::random::(), + arg5: rand::random::(), + arg6: rand::random::(), + arg7: rand::random::(), + arg8: rand::random::(), + }; + + let instruction_data = to_vec(&input_data).unwrap(); + let account_metas = vec![ + AccountMeta::new(mint_keypair.pubkey(), true), + AccountMeta::new(Keypair::new().pubkey(), false), + ]; + + let instruction = Instruction::new_with_bytes(program_id, &instruction_data, account_metas); + let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); + + let tx = Transaction::new(&[&mint_keypair], message.clone(), bank.last_blockhash()); + + let txs = vec![tx]; + let tx_batch = bank.prepare_batch_for_tests(txs); + let result = bank + .load_execute_and_commit_transactions( + &tx_batch, + MAX_PROCESSING_AGE, + false, + ExecutionRecordingConfig { + enable_cpi_recording: false, + enable_log_recording: false, + enable_return_data_recording: true, + }, + &mut ExecuteTimings::default(), + None, + ) + .0; + + fn verify_many_args(input: &InputData) -> i64 { + let a = input + .arg1 + .overflowing_add(input.arg2) + .0 + .overflowing_sub(input.arg3) + .0 + .overflowing_add(input.arg4) + .0 + .overflowing_sub(input.arg5) + .0; + (a % input.arg6) + .overflowing_sub(input.arg7) + .0 + .overflowing_add(input.arg8) + .0 + } + + let return_data = &result[0] + .as_ref() + .unwrap() + .execution_details + .return_data + .as_ref() + .unwrap() + .data; + let decoded: OutputData = from_slice::(return_data).unwrap(); + assert_eq!( + decoded.res_128, + input_data.test_128.a % input_data.test_128.b + ); + assert_eq!( + decoded.res_256, + Test128 { + a: input_data + .test_128 + .a + .overflowing_add(input_data.test_128.b) + .0, + b: input_data + .test_128 + .a + .overflowing_sub(input_data.test_128.b) + .0 + } + ); + assert_eq!(decoded.many_args_1, verify_many_args(&input_data)); + assert_eq!(decoded.many_args_2, verify_many_args(&input_data)); +} From 5e0956e404ca5a91e4cb8bf03e3cc58db7fc439d Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 6 Aug 2024 07:50:55 +0800 Subject: [PATCH 028/529] refactor: move bank check transaction logic to new module (#2328) --- runtime/src/bank.rs | 182 +------------ runtime/src/bank/check_transactions.rs | 348 +++++++++++++++++++++++++ runtime/src/bank/tests.rs | 162 +----------- 3 files changed, 364 insertions(+), 328 deletions(-) create mode 100644 runtime/src/bank/check_transactions.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index cebf31f2d7f945..02ef725a4ba20e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -89,7 +89,6 @@ use { solana_cost_model::cost_tracker::CostTracker, solana_loader_v4_program::create_program_runtime_environment_v2, solana_measure::{measure::Measure, measure_time, measure_us}, - solana_perf::perf_libs, solana_program_runtime::{ invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, }, @@ -103,9 +102,9 @@ use { clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK, DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, - MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU, - SECONDS_PER_DAY, UPDATED_HASHES_PER_TICK2, UPDATED_HASHES_PER_TICK3, - UPDATED_HASHES_PER_TICK4, UPDATED_HASHES_PER_TICK5, UPDATED_HASHES_PER_TICK6, + MAX_TRANSACTION_FORWARDING_DELAY, SECONDS_PER_DAY, UPDATED_HASHES_PER_TICK2, + UPDATED_HASHES_PER_TICK3, UPDATED_HASHES_PER_TICK4, UPDATED_HASHES_PER_TICK5, + UPDATED_HASHES_PER_TICK6, }, epoch_info::EpochInfo, epoch_schedule::EpochSchedule, @@ -124,8 +123,7 @@ use { message::{AccountKeys, SanitizedMessage}, native_loader, native_token::LAMPORTS_PER_SOL, - nonce::{self, state::DurableNonce, NONCED_TX_MARKER_IX_INDEX}, - nonce_account, + nonce::state::DurableNonce, packet::PACKET_DATA_SIZE, precompiles::get_precompiles, pubkey::Pubkey, @@ -141,7 +139,7 @@ use { sysvar::{self, last_restart_slot::LastRestartSlot, Sysvar, SysvarId}, timing::years_as_slots, transaction::{ - self, MessageHash, Result, SanitizedTransaction, Transaction, TransactionError, + MessageHash, Result, SanitizedTransaction, Transaction, TransactionError, TransactionVerificationMode, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, }, transaction_context::{TransactionAccount, TransactionReturnData}, @@ -151,12 +149,9 @@ use { stake_state::StakeStateV2, }, solana_svm::{ - account_loader::{ - collect_rent_from_account, CheckedTransactionDetails, TransactionCheckResult, - }, + account_loader::collect_rent_from_account, account_overrides::AccountOverrides, account_saver::collect_accounts_to_store, - nonce_info::NonceInfo, transaction_commit_result::{CommittedTransaction, TransactionCommitResult}, transaction_error_metrics::TransactionErrorMetrics, transaction_execution_result::{ @@ -199,6 +194,7 @@ use { ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, }, solana_program_runtime::{loaded_programs::ProgramCacheForTxBatch, sysvar_cache::SysvarCache}, + solana_sdk::nonce, solana_svm::program_loader::load_program_with_pubkey, solana_system_program::{get_system_account_kind, SystemAccountKind}, }; @@ -216,6 +212,7 @@ mod address_lookup_table; pub mod bank_hash_details; mod builtin_programs; pub mod builtins; +mod check_transactions; pub mod epoch_accounts_hash_utils; mod fee_distribution; mod metrics; @@ -3426,96 +3423,6 @@ impl Bank { self.rc.accounts.accounts_db.remove_unrooted_slots(slots) } - fn check_age( - &self, - sanitized_txs: &[impl core::borrow::Borrow], - lock_results: &[Result<()>], - max_age: usize, - error_counters: &mut TransactionErrorMetrics, - ) -> Vec { - let hash_queue = self.blockhash_queue.read().unwrap(); - let last_blockhash = hash_queue.last_hash(); - let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash); - - sanitized_txs - .iter() - .zip(lock_results) - .map(|(tx, lock_res)| match lock_res { - Ok(()) => self.check_transaction_age( - tx.borrow(), - max_age, - &next_durable_nonce, - &hash_queue, - error_counters, - ), - Err(e) => Err(e.clone()), - }) - .collect() - } - - fn check_transaction_age( - &self, - tx: &SanitizedTransaction, - max_age: usize, - next_durable_nonce: &DurableNonce, - hash_queue: &BlockhashQueue, - error_counters: &mut TransactionErrorMetrics, - ) -> TransactionCheckResult { - let recent_blockhash = tx.message().recent_blockhash(); - if let Some(hash_info) = hash_queue.get_hash_info_if_valid(recent_blockhash, max_age) { - Ok(CheckedTransactionDetails { - nonce: None, - lamports_per_signature: hash_info.lamports_per_signature(), - }) - } else if let Some((nonce, nonce_data)) = - self.check_and_load_message_nonce_account(tx.message(), next_durable_nonce) - { - Ok(CheckedTransactionDetails { - nonce: Some(nonce), - lamports_per_signature: nonce_data.get_lamports_per_signature(), - }) - } else { - error_counters.blockhash_not_found += 1; - Err(TransactionError::BlockhashNotFound) - } - } - - fn is_transaction_already_processed( - &self, - sanitized_tx: &SanitizedTransaction, - status_cache: &BankStatusCache, - ) -> bool { - let key = sanitized_tx.message_hash(); - let transaction_blockhash = sanitized_tx.message().recent_blockhash(); - status_cache - .get_status(key, transaction_blockhash, &self.ancestors) - .is_some() - } - - fn check_status_cache( - &self, - sanitized_txs: &[impl core::borrow::Borrow], - lock_results: Vec, - error_counters: &mut TransactionErrorMetrics, - ) -> Vec { - let rcache = self.status_cache.read().unwrap(); - sanitized_txs - .iter() - .zip(lock_results) - .map(|(sanitized_tx, lock_result)| { - let sanitized_tx = sanitized_tx.borrow(); - if lock_result.is_ok() - && self.is_transaction_already_processed(sanitized_tx, &rcache) - { - error_counters.already_processed += 1; - return Err(TransactionError::AlreadyProcessed); - } - - lock_result - }) - .collect() - } - pub fn get_hash_age(&self, hash: &Hash) -> Option { self.blockhash_queue.read().unwrap().get_hash_age(hash) } @@ -3527,49 +3434,6 @@ impl Bank { .is_hash_valid_for_age(hash, max_age) } - fn load_message_nonce_account( - &self, - message: &SanitizedMessage, - ) -> Option<(NonceInfo, nonce::state::Data)> { - let nonce_address = message.get_durable_nonce()?; - let nonce_account = self.get_account_with_fixed_root(nonce_address)?; - let nonce_data = - nonce_account::verify_nonce_account(&nonce_account, message.recent_blockhash())?; - - let nonce_is_authorized = message - .get_ix_signers(NONCED_TX_MARKER_IX_INDEX as usize) - .any(|signer| signer == &nonce_data.authority); - if !nonce_is_authorized { - return None; - } - - Some((NonceInfo::new(*nonce_address, nonce_account), nonce_data)) - } - - fn check_and_load_message_nonce_account( - &self, - message: &SanitizedMessage, - next_durable_nonce: &DurableNonce, - ) -> Option<(NonceInfo, nonce::state::Data)> { - let nonce_is_advanceable = message.recent_blockhash() != next_durable_nonce.as_hash(); - if nonce_is_advanceable { - self.load_message_nonce_account(message) - } else { - None - } - } - - pub fn check_transactions( - &self, - sanitized_txs: &[impl core::borrow::Borrow], - lock_results: &[Result<()>], - max_age: usize, - error_counters: &mut TransactionErrorMetrics, - ) -> Vec { - let lock_results = self.check_age(sanitized_txs, lock_results, max_age, error_counters); - self.check_status_cache(sanitized_txs, lock_results, error_counters) - } - pub fn collect_balances(&self, batch: &TransactionBatch) -> TransactionBalances { let mut balances: TransactionBalances = vec![]; for transaction in batch.sanitized_transactions() { @@ -6545,36 +6409,6 @@ impl Bank { .try_get_epoch_accounts_hash() } - /// Checks a batch of sanitized transactions again bank for age and status - pub fn check_transactions_with_forwarding_delay( - &self, - transactions: &[SanitizedTransaction], - filter: &[transaction::Result<()>], - forward_transactions_to_leader_at_slot_offset: u64, - ) -> Vec { - let mut error_counters = TransactionErrorMetrics::default(); - // The following code also checks if the blockhash for a transaction is too old - // The check accounts for - // 1. Transaction forwarding delay - // 2. The slot at which the next leader will actually process the transaction - // Drop the transaction if it will expire by the time the next node receives and processes it - let api = perf_libs::api(); - let max_tx_fwd_delay = if api.is_none() { - MAX_TRANSACTION_FORWARDING_DELAY - } else { - MAX_TRANSACTION_FORWARDING_DELAY_GPU - }; - - self.check_transactions( - transactions, - filter, - (MAX_PROCESSING_AGE) - .saturating_sub(max_tx_fwd_delay) - .saturating_sub(forward_transactions_to_leader_at_slot_offset as usize), - &mut error_counters, - ) - } - pub fn is_in_slot_hashes_history(&self, slot: &Slot) -> bool { if slot < &self.slot { if let Ok(slot_hashes) = self.transaction_processor.sysvar_cache().get_slot_hashes() { diff --git a/runtime/src/bank/check_transactions.rs b/runtime/src/bank/check_transactions.rs new file mode 100644 index 00000000000000..d966d986fb8305 --- /dev/null +++ b/runtime/src/bank/check_transactions.rs @@ -0,0 +1,348 @@ +use { + super::{Bank, BankStatusCache}, + solana_accounts_db::blockhash_queue::BlockhashQueue, + solana_perf::perf_libs, + solana_sdk::{ + clock::{ + MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, + MAX_TRANSACTION_FORWARDING_DELAY_GPU, + }, + message::SanitizedMessage, + nonce::{self, state::DurableNonce, NONCED_TX_MARKER_IX_INDEX}, + nonce_account, + transaction::{Result as TransactionResult, SanitizedTransaction, TransactionError}, + }, + solana_svm::{ + account_loader::{CheckedTransactionDetails, TransactionCheckResult}, + nonce_info::NonceInfo, + transaction_error_metrics::TransactionErrorMetrics, + }, +}; + +impl Bank { + /// Checks a batch of sanitized transactions again bank for age and status + pub fn check_transactions_with_forwarding_delay( + &self, + transactions: &[SanitizedTransaction], + filter: &[TransactionResult<()>], + forward_transactions_to_leader_at_slot_offset: u64, + ) -> Vec { + let mut error_counters = TransactionErrorMetrics::default(); + // The following code also checks if the blockhash for a transaction is too old + // The check accounts for + // 1. Transaction forwarding delay + // 2. The slot at which the next leader will actually process the transaction + // Drop the transaction if it will expire by the time the next node receives and processes it + let api = perf_libs::api(); + let max_tx_fwd_delay = if api.is_none() { + MAX_TRANSACTION_FORWARDING_DELAY + } else { + MAX_TRANSACTION_FORWARDING_DELAY_GPU + }; + + self.check_transactions( + transactions, + filter, + (MAX_PROCESSING_AGE) + .saturating_sub(max_tx_fwd_delay) + .saturating_sub(forward_transactions_to_leader_at_slot_offset as usize), + &mut error_counters, + ) + } + + pub fn check_transactions( + &self, + sanitized_txs: &[impl core::borrow::Borrow], + lock_results: &[TransactionResult<()>], + max_age: usize, + error_counters: &mut TransactionErrorMetrics, + ) -> Vec { + let lock_results = self.check_age(sanitized_txs, lock_results, max_age, error_counters); + self.check_status_cache(sanitized_txs, lock_results, error_counters) + } + + fn check_age( + &self, + sanitized_txs: &[impl core::borrow::Borrow], + lock_results: &[TransactionResult<()>], + max_age: usize, + error_counters: &mut TransactionErrorMetrics, + ) -> Vec { + let hash_queue = self.blockhash_queue.read().unwrap(); + let last_blockhash = hash_queue.last_hash(); + let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash); + + sanitized_txs + .iter() + .zip(lock_results) + .map(|(tx, lock_res)| match lock_res { + Ok(()) => self.check_transaction_age( + tx.borrow(), + max_age, + &next_durable_nonce, + &hash_queue, + error_counters, + ), + Err(e) => Err(e.clone()), + }) + .collect() + } + + fn check_transaction_age( + &self, + tx: &SanitizedTransaction, + max_age: usize, + next_durable_nonce: &DurableNonce, + hash_queue: &BlockhashQueue, + error_counters: &mut TransactionErrorMetrics, + ) -> TransactionCheckResult { + let recent_blockhash = tx.message().recent_blockhash(); + if let Some(hash_info) = hash_queue.get_hash_info_if_valid(recent_blockhash, max_age) { + Ok(CheckedTransactionDetails { + nonce: None, + lamports_per_signature: hash_info.lamports_per_signature(), + }) + } else if let Some((nonce, nonce_data)) = + self.check_and_load_message_nonce_account(tx.message(), next_durable_nonce) + { + Ok(CheckedTransactionDetails { + nonce: Some(nonce), + lamports_per_signature: nonce_data.get_lamports_per_signature(), + }) + } else { + error_counters.blockhash_not_found += 1; + Err(TransactionError::BlockhashNotFound) + } + } + + pub(super) fn check_and_load_message_nonce_account( + &self, + message: &SanitizedMessage, + next_durable_nonce: &DurableNonce, + ) -> Option<(NonceInfo, nonce::state::Data)> { + let nonce_is_advanceable = message.recent_blockhash() != next_durable_nonce.as_hash(); + if nonce_is_advanceable { + self.load_message_nonce_account(message) + } else { + None + } + } + + pub(super) fn load_message_nonce_account( + &self, + message: &SanitizedMessage, + ) -> Option<(NonceInfo, nonce::state::Data)> { + let nonce_address = message.get_durable_nonce()?; + let nonce_account = self.get_account_with_fixed_root(nonce_address)?; + let nonce_data = + nonce_account::verify_nonce_account(&nonce_account, message.recent_blockhash())?; + + let nonce_is_authorized = message + .get_ix_signers(NONCED_TX_MARKER_IX_INDEX as usize) + .any(|signer| signer == &nonce_data.authority); + if !nonce_is_authorized { + return None; + } + + Some((NonceInfo::new(*nonce_address, nonce_account), nonce_data)) + } + + fn check_status_cache( + &self, + sanitized_txs: &[impl core::borrow::Borrow], + lock_results: Vec, + error_counters: &mut TransactionErrorMetrics, + ) -> Vec { + let rcache = self.status_cache.read().unwrap(); + sanitized_txs + .iter() + .zip(lock_results) + .map(|(sanitized_tx, lock_result)| { + let sanitized_tx = sanitized_tx.borrow(); + if lock_result.is_ok() + && self.is_transaction_already_processed(sanitized_tx, &rcache) + { + error_counters.already_processed += 1; + return Err(TransactionError::AlreadyProcessed); + } + + lock_result + }) + .collect() + } + + fn is_transaction_already_processed( + &self, + sanitized_tx: &SanitizedTransaction, + status_cache: &BankStatusCache, + ) -> bool { + let key = sanitized_tx.message_hash(); + let transaction_blockhash = sanitized_tx.message().recent_blockhash(); + status_cache + .get_status(key, transaction_blockhash, &self.ancestors) + .is_some() + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::bank::tests::{ + get_nonce_blockhash, get_nonce_data_from_account, new_sanitized_message, + setup_nonce_with_bank, + }, + solana_sdk::{ + feature_set::FeatureSet, hash::Hash, message::Message, signature::Keypair, + signer::Signer, system_instruction, + }, + }; + + #[test] + fn test_check_and_load_message_nonce_account_ok() { + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); + let custodian_pubkey = custodian_keypair.pubkey(); + let nonce_pubkey = nonce_keypair.pubkey(); + + let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); + let message = new_sanitized_message(Message::new_with_blockhash( + &[ + system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey), + system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), + ], + Some(&custodian_pubkey), + &nonce_hash, + )); + let nonce_account = bank.get_account(&nonce_pubkey).unwrap(); + let nonce_data = get_nonce_data_from_account(&nonce_account).unwrap(); + assert_eq!( + bank.check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()), + Some((NonceInfo::new(nonce_pubkey, nonce_account), nonce_data)) + ); + } + + #[test] + fn test_check_and_load_message_nonce_account_not_nonce_fail() { + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); + let custodian_pubkey = custodian_keypair.pubkey(); + let nonce_pubkey = nonce_keypair.pubkey(); + + let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); + let message = new_sanitized_message(Message::new_with_blockhash( + &[ + system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), + system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey), + ], + Some(&custodian_pubkey), + &nonce_hash, + )); + assert!(bank + .check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()) + .is_none()); + } + + #[test] + fn test_check_and_load_message_nonce_account_missing_ix_pubkey_fail() { + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); + let custodian_pubkey = custodian_keypair.pubkey(); + let nonce_pubkey = nonce_keypair.pubkey(); + + let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); + let mut message = Message::new_with_blockhash( + &[ + system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey), + system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), + ], + Some(&custodian_pubkey), + &nonce_hash, + ); + message.instructions[0].accounts.clear(); + assert!(bank + .check_and_load_message_nonce_account( + &new_sanitized_message(message), + &bank.next_durable_nonce(), + ) + .is_none()); + } + + #[test] + fn test_check_and_load_message_nonce_account_nonce_acc_does_not_exist_fail() { + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); + let custodian_pubkey = custodian_keypair.pubkey(); + let nonce_pubkey = nonce_keypair.pubkey(); + let missing_keypair = Keypair::new(); + let missing_pubkey = missing_keypair.pubkey(); + + let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); + let message = new_sanitized_message(Message::new_with_blockhash( + &[ + system_instruction::advance_nonce_account(&missing_pubkey, &nonce_pubkey), + system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), + ], + Some(&custodian_pubkey), + &nonce_hash, + )); + assert!(bank + .check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()) + .is_none()); + } + + #[test] + fn test_check_and_load_message_nonce_account_bad_tx_hash_fail() { + let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( + 10_000_000, + |_| {}, + 5_000_000, + 250_000, + None, + FeatureSet::all_enabled(), + ) + .unwrap(); + let custodian_pubkey = custodian_keypair.pubkey(); + let nonce_pubkey = nonce_keypair.pubkey(); + + let message = new_sanitized_message(Message::new_with_blockhash( + &[ + system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey), + system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), + ], + Some(&custodian_pubkey), + &Hash::default(), + )); + assert!(bank + .check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()) + .is_none()); + } +} diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index c1d03382ee45d0..21c70faa2534c0 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -101,7 +101,7 @@ use { }, solana_stake_program::stake_state::{self, StakeStateV2}, solana_svm::{ - account_loader::LoadedTransaction, nonce_info::NonceInfo, + account_loader::LoadedTransaction, transaction_commit_result::TransactionCommitResultExtensions, transaction_execution_result::ExecutedTransaction, }, @@ -192,7 +192,7 @@ pub(in crate::bank) fn create_genesis_config(lamports: u64) -> (GenesisConfig, K solana_sdk::genesis_config::create_genesis_config(lamports) } -fn new_sanitized_message(message: Message) -> SanitizedMessage { +pub(in crate::bank) fn new_sanitized_message(message: Message) -> SanitizedMessage { SanitizedMessage::try_from_legacy_message(message, &ReservedAccountKeys::empty_key_set()) .unwrap() } @@ -4815,13 +4815,15 @@ fn test_banks_leak() { } } -fn get_nonce_blockhash(bank: &Bank, nonce_pubkey: &Pubkey) -> Option { +pub(in crate::bank) fn get_nonce_blockhash(bank: &Bank, nonce_pubkey: &Pubkey) -> Option { let account = bank.get_account(nonce_pubkey)?; let nonce_data = get_nonce_data_from_account(&account)?; Some(nonce_data.blockhash()) } -fn get_nonce_data_from_account(account: &AccountSharedData) -> Option { +pub(in crate::bank) fn get_nonce_data_from_account( + account: &AccountSharedData, +) -> Option { let nonce_versions = StateMut::::state(account).ok()?; if let nonce::State::Initialized(nonce_data) = nonce_versions.state() { Some(nonce_data.clone()) @@ -4864,7 +4866,7 @@ fn nonce_setup( type NonceSetup = (Arc, Keypair, Keypair, Keypair, Arc>); -fn setup_nonce_with_bank( +pub(in crate::bank) fn setup_nonce_with_bank( supply_lamports: u64, mut genesis_cfg_fn: F, custodian_lamports: u64, @@ -4912,161 +4914,13 @@ where } impl Bank { - fn next_durable_nonce(&self) -> DurableNonce { + pub(in crate::bank) fn next_durable_nonce(&self) -> DurableNonce { let hash_queue = self.blockhash_queue.read().unwrap(); let last_blockhash = hash_queue.last_hash(); DurableNonce::from_blockhash(&last_blockhash) } } -#[test] -fn test_check_and_load_message_nonce_account_ok() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); - let custodian_pubkey = custodian_keypair.pubkey(); - let nonce_pubkey = nonce_keypair.pubkey(); - - let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); - let message = new_sanitized_message(Message::new_with_blockhash( - &[ - system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey), - system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), - ], - Some(&custodian_pubkey), - &nonce_hash, - )); - let nonce_account = bank.get_account(&nonce_pubkey).unwrap(); - let nonce_data = get_nonce_data_from_account(&nonce_account).unwrap(); - assert_eq!( - bank.check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()), - Some((NonceInfo::new(nonce_pubkey, nonce_account), nonce_data)) - ); -} - -#[test] -fn test_check_and_load_message_nonce_account_not_nonce_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); - let custodian_pubkey = custodian_keypair.pubkey(); - let nonce_pubkey = nonce_keypair.pubkey(); - - let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); - let message = new_sanitized_message(Message::new_with_blockhash( - &[ - system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), - system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey), - ], - Some(&custodian_pubkey), - &nonce_hash, - )); - assert!(bank - .check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()) - .is_none()); -} - -#[test] -fn test_check_and_load_message_nonce_account_missing_ix_pubkey_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); - let custodian_pubkey = custodian_keypair.pubkey(); - let nonce_pubkey = nonce_keypair.pubkey(); - - let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); - let mut message = Message::new_with_blockhash( - &[ - system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey), - system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), - ], - Some(&custodian_pubkey), - &nonce_hash, - ); - message.instructions[0].accounts.clear(); - assert!(bank - .check_and_load_message_nonce_account( - &new_sanitized_message(message), - &bank.next_durable_nonce(), - ) - .is_none()); -} - -#[test] -fn test_check_and_load_message_nonce_account_nonce_acc_does_not_exist_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); - let custodian_pubkey = custodian_keypair.pubkey(); - let nonce_pubkey = nonce_keypair.pubkey(); - let missing_keypair = Keypair::new(); - let missing_pubkey = missing_keypair.pubkey(); - - let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); - let message = new_sanitized_message(Message::new_with_blockhash( - &[ - system_instruction::advance_nonce_account(&missing_pubkey, &nonce_pubkey), - system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), - ], - Some(&custodian_pubkey), - &nonce_hash, - )); - assert!(bank - .check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()) - .is_none()); -} - -#[test] -fn test_check_and_load_message_nonce_account_bad_tx_hash_fail() { - let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( - 10_000_000, - |_| {}, - 5_000_000, - 250_000, - None, - FeatureSet::all_enabled(), - ) - .unwrap(); - let custodian_pubkey = custodian_keypair.pubkey(); - let nonce_pubkey = nonce_keypair.pubkey(); - - let message = new_sanitized_message(Message::new_with_blockhash( - &[ - system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey), - system_instruction::transfer(&custodian_pubkey, &nonce_pubkey, 100_000), - ], - Some(&custodian_pubkey), - &Hash::default(), - )); - assert!(bank - .check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()) - .is_none()); -} - #[test] fn test_assign_from_nonce_account_fail() { let (bank, _bank_forks) = create_simple_test_arc_bank(100_000_000); From a2a849fd8b0f062fa22a9a0338b913a263e6bdae Mon Sep 17 00:00:00 2001 From: CeciEstErmat <86524414+ceciEstErmat@users.noreply.github.com> Date: Tue, 6 Aug 2024 03:12:09 +0200 Subject: [PATCH 029/529] Fix setting an account override for fee payer (#2375) --- svm/src/transaction_processor.rs | 75 +++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index c2fb997b3f522f..f7862724e7f314 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -236,6 +236,7 @@ impl TransactionBatchProcessor { let (validation_results, validate_fees_us) = measure_us!(self.validate_fees( callbacks, + config.account_overrides, sanitized_txs, check_results, &environment.feature_set, @@ -360,6 +361,7 @@ impl TransactionBatchProcessor { fn validate_fees( &self, callbacks: &CB, + account_overrides: Option<&AccountOverrides>, sanitized_txs: &[impl core::borrow::Borrow], check_results: Vec, feature_set: &FeatureSet, @@ -375,6 +377,7 @@ impl TransactionBatchProcessor { let message = sanitized_tx.borrow().message(); self.validate_transaction_fee_payer( callbacks, + account_overrides, message, checked_details, feature_set, @@ -393,6 +396,7 @@ impl TransactionBatchProcessor { fn validate_transaction_fee_payer( &self, callbacks: &CB, + account_overrides: Option<&AccountOverrides>, message: &SanitizedMessage, checked_details: CheckedTransactionDetails, feature_set: &FeatureSet, @@ -409,8 +413,12 @@ impl TransactionBatchProcessor { })?; let fee_payer_address = message.fee_payer(); - let Some(mut fee_payer_account) = callbacks.get_account_shared_data(fee_payer_address) - else { + + let fee_payer_account = account_overrides + .and_then(|overrides| overrides.get(fee_payer_address).cloned()) + .or_else(|| callbacks.get_account_shared_data(fee_payer_address)); + + let Some(mut fee_payer_account) = fee_payer_account else { error_counters.account_not_found += 1; return Err(TransactionError::AccountNotFound); }; @@ -1836,6 +1844,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, + None, &message, CheckedTransactionDetails { nonce: None, @@ -1908,6 +1917,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, + None, &message, CheckedTransactionDetails { nonce: None, @@ -1955,6 +1965,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, + None, &message, CheckedTransactionDetails { nonce: None, @@ -1987,6 +1998,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, + None, &message, CheckedTransactionDetails { nonce: None, @@ -2023,6 +2035,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, + None, &message, CheckedTransactionDetails { nonce: None, @@ -2057,6 +2070,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, + None, &message, CheckedTransactionDetails { nonce: None, @@ -2088,6 +2102,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, + None, &message, CheckedTransactionDetails { nonce: None, @@ -2149,6 +2164,7 @@ mod tests { )); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, + None, &message, CheckedTransactionDetails { nonce: nonce.clone(), @@ -2206,6 +2222,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); let result = batch_processor.validate_transaction_fee_payer( &mock_bank, + None, &message, CheckedTransactionDetails { nonce: None, @@ -2221,4 +2238,58 @@ mod tests { assert_eq!(result, Err(TransactionError::InsufficientFundsForFee)); } } + + #[test] + fn test_validate_account_override_usage_on_validate_fee() { + /* + The test setups an account override with enough lamport to pass validate fee. + The account_db has the account with minimum rent amount thus would fail the validate_free. + The test verify that the override is used with a passing test of validate fee. + */ + let lamports_per_signature = 5000; + + let message = + new_unchecked_sanitized_message(Message::new(&[], Some(&Pubkey::new_unique()))); + + let fee_payer_address = message.fee_payer(); + let transaction_fee = lamports_per_signature; + let rent_collector = RentCollector::default(); + let min_balance = rent_collector.rent.minimum_balance(0); + + let fee_payer_account = AccountSharedData::new(min_balance, 0, &Pubkey::default()); + let mut mock_accounts = HashMap::new(); + mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); + + let necessary_balance = min_balance + transaction_fee; + let mut account_overrides = AccountOverrides::default(); + let fee_payer_account_override = + AccountSharedData::new(necessary_balance, 0, &Pubkey::default()); + account_overrides.set_account(fee_payer_address, Some(fee_payer_account_override)); + + let mock_bank = MockBankCallback { + account_shared_data: Arc::new(RwLock::new(mock_accounts)), + }; + + let mut error_counters = TransactionErrorMetrics::default(); + let batch_processor = TransactionBatchProcessor::::default(); + + let result = batch_processor.validate_transaction_fee_payer( + &mock_bank, + Some(&account_overrides), + &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }, + &FeatureSet::default(), + &FeeStructure::default(), + &rent_collector, + &mut error_counters, + ); + assert!( + result.is_ok(), + "test_account_override_used: {:?}", + result.err() + ); + } } From beb3f582f784a96e59e06ef8f34e855258bcd98c Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 6 Aug 2024 09:30:54 -0400 Subject: [PATCH 030/529] Replaces SPS's channel with pending packages that has one slot per snapshot kind (#2446) --- core/src/accounts_hash_verifier.rs | 33 +- core/src/snapshot_packager_service.rs | 179 +---------- .../pending_snapshot_packages.rs | 290 ++++++++++++++++++ core/src/validator.rs | 42 ++- core/tests/epoch_accounts_hash.rs | 11 +- core/tests/snapshots.rs | 11 +- ledger-tool/src/ledger_utils.rs | 8 +- 7 files changed, 346 insertions(+), 228 deletions(-) create mode 100644 core/src/snapshot_packager_service/pending_snapshot_packages.rs diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 87a16610547290..78b6c2d0298c80 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -1,6 +1,7 @@ //! Service to calculate accounts hashes use { + crate::snapshot_packager_service::PendingSnapshotPackages, crossbeam_channel::{Receiver, Sender}, solana_accounts_db::{ accounts_db::CalcAccountsHashKind, @@ -24,7 +25,7 @@ use { io::Result as IoResult, sync::{ atomic::{AtomicBool, Ordering}, - Arc, + Arc, Mutex, }, thread::{self, Builder, JoinHandle}, time::Duration, @@ -39,7 +40,7 @@ impl AccountsHashVerifier { pub fn new( accounts_package_sender: Sender, accounts_package_receiver: Receiver, - snapshot_package_sender: Option>, + pending_snapshot_packages: Arc>, exit: Arc, snapshot_config: SnapshotConfig, ) -> Self { @@ -71,9 +72,8 @@ impl AccountsHashVerifier { let (result, handling_time_us) = measure_us!(Self::process_accounts_package( accounts_package, - snapshot_package_sender.as_ref(), + &pending_snapshot_packages, &snapshot_config, - &exit, )); if let Err(err) = result { error!("Stopping AccountsHashVerifier! Fatal error while processing accounts package: {err}"); @@ -208,9 +208,8 @@ impl AccountsHashVerifier { #[allow(clippy::too_many_arguments)] fn process_accounts_package( accounts_package: AccountsPackage, - snapshot_package_sender: Option<&Sender>, + pending_snapshot_packages: &Mutex, snapshot_config: &SnapshotConfig, - exit: &AtomicBool, ) -> IoResult<()> { let (accounts_hash_kind, bank_incremental_snapshot_persistence) = Self::calculate_and_verify_accounts_hash(&accounts_package, snapshot_config)?; @@ -221,11 +220,10 @@ impl AccountsHashVerifier { Self::submit_for_packaging( accounts_package, - snapshot_package_sender, + pending_snapshot_packages, snapshot_config, accounts_hash_kind, bank_incremental_snapshot_persistence, - exit, ); Ok(()) @@ -462,11 +460,10 @@ impl AccountsHashVerifier { fn submit_for_packaging( accounts_package: AccountsPackage, - snapshot_package_sender: Option<&Sender>, + pending_snapshot_packages: &Mutex, snapshot_config: &SnapshotConfig, accounts_hash_kind: AccountsHashKind, bank_incremental_snapshot_persistence: Option, - exit: &AtomicBool, ) { if !snapshot_config.should_generate_snapshots() || !matches!( @@ -476,24 +473,16 @@ impl AccountsHashVerifier { { return; } - let Some(snapshot_package_sender) = snapshot_package_sender else { - return; - }; let snapshot_package = SnapshotPackage::new( accounts_package, accounts_hash_kind, bank_incremental_snapshot_persistence, ); - let send_result = snapshot_package_sender.send(snapshot_package); - if let Err(err) = send_result { - // Sending the snapshot package should never fail *unless* we're shutting down. - let snapshot_package = &err.0; - assert!( - exit.load(Ordering::Relaxed), - "Failed to send snapshot package: {err}, {snapshot_package:?}" - ); - } + pending_snapshot_packages + .lock() + .unwrap() + .push(snapshot_package); } pub fn join(self) -> thread::Result<()> { diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index ebfa0a9bbe869a..f9c40e4f9b13fe 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -1,20 +1,19 @@ +mod pending_snapshot_packages; mod snapshot_gossip_manager; +pub use pending_snapshot_packages::PendingSnapshotPackages; use { - crossbeam_channel::{Receiver, Sender}, snapshot_gossip_manager::SnapshotGossipManager, solana_gossip::cluster_info::ClusterInfo, solana_measure::{measure::Measure, measure_us}, solana_perf::thread::renice_this_thread, solana_runtime::{ - snapshot_config::SnapshotConfig, - snapshot_hash::StartingSnapshotHashes, - snapshot_package::{self, SnapshotPackage}, - snapshot_utils, + snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, + snapshot_package::SnapshotPackage, snapshot_utils, }, std::{ sync::{ atomic::{AtomicBool, Ordering}, - Arc, + Arc, Mutex, }, thread::{self, Builder, JoinHandle}, time::Duration, @@ -30,8 +29,7 @@ impl SnapshotPackagerService { const LOOP_LIMITER: Duration = Duration::from_millis(100); pub fn new( - snapshot_package_sender: Sender, - snapshot_package_receiver: Receiver, + pending_snapshot_packages: Arc>, starting_snapshot_hashes: Option, exit: Arc, cluster_info: Arc, @@ -51,13 +49,8 @@ impl SnapshotPackagerService { break; } - let Some(( - snapshot_package, - num_outstanding_snapshot_packages, - num_re_enqueued_snapshot_packages, - )) = Self::get_next_snapshot_package( - &snapshot_package_sender, - &snapshot_package_receiver, + let Some(snapshot_package) = Self::get_next_snapshot_package( + &pending_snapshot_packages, ) else { std::thread::sleep(Self::LOOP_LIMITER); @@ -108,16 +101,6 @@ impl SnapshotPackagerService { let handling_time_us = measure_handling.end_as_us(); datapoint_info!( "snapshot_packager_service", - ( - "num_outstanding_snapshot_packages", - num_outstanding_snapshot_packages, - i64 - ), - ( - "num_re_enqueued_snapshot_packages", - num_re_enqueued_snapshot_packages, - i64 - ), ("enqueued_time_us", enqueued_time.as_micros(), i64), ("handling_time_us", handling_time_us, i64), ("archive_time_us", archive_time_us, i64), @@ -146,148 +129,10 @@ impl SnapshotPackagerService { self.t_snapshot_packager.join() } - /// Get the next snapshot package to handle - /// - /// Look through the snapshot package channel to find the highest priority one to handle next. - /// If there are no snapshot packages in the channel, return None. Otherwise return the - /// highest priority one. Unhandled snapshot packages with slots GREATER-THAN the handled one - /// will be re-enqueued. The remaining will be dropped. - /// - /// Also return the number of snapshot packages initially in the channel, and the number of - /// ones re-enqueued. + /// Returns the next snapshot package to handle fn get_next_snapshot_package( - snapshot_package_sender: &Sender, - snapshot_package_receiver: &Receiver, - ) -> Option<( - SnapshotPackage, - /*num outstanding snapshot packages*/ usize, - /*num re-enqueued snapshot packages*/ usize, - )> { - let mut snapshot_packages: Vec<_> = snapshot_package_receiver.try_iter().collect(); - // `select_nth()` panics if the slice is empty, so return if that's the case - if snapshot_packages.is_empty() { - return None; - } - let snapshot_packages_len = snapshot_packages.len(); - debug!("outstanding snapshot packages ({snapshot_packages_len}): {snapshot_packages:?}"); - - snapshot_packages.select_nth_unstable_by( - snapshot_packages_len - 1, - snapshot_package::cmp_snapshot_packages_by_priority, - ); - // SAFETY: We know `snapshot_packages` is not empty, so its len is >= 1, - // therefore there is always an element to pop. - let snapshot_package = snapshot_packages.pop().unwrap(); - let handled_snapshot_package_slot = snapshot_package.slot; - // re-enqueue any remaining snapshot packages for slots GREATER-THAN the snapshot package - // that will be handled - let num_re_enqueued_snapshot_packages = snapshot_packages - .into_iter() - .filter(|snapshot_package| snapshot_package.slot > handled_snapshot_package_slot) - .map(|snapshot_package| { - snapshot_package_sender - .try_send(snapshot_package) - .expect("re-enqueue snapshot package") - }) - .count(); - - Some(( - snapshot_package, - snapshot_packages_len, - num_re_enqueued_snapshot_packages, - )) - } -} - -#[cfg(test)] -mod tests { - use { - super::*, - rand::seq::SliceRandom, - solana_runtime::snapshot_package::{SnapshotKind, SnapshotPackage}, - solana_sdk::clock::Slot, - }; - - /// Ensure that unhandled snapshot packages are properly re-enqueued or dropped - /// - /// The snapshot package handler should re-enqueue unhandled snapshot packages, if those - /// unhandled snapshot packages are for slots GREATER-THAN the last handled snapshot package. - /// Otherwise, they should be dropped. - #[test] - fn test_get_next_snapshot_package() { - fn new(snapshot_kind: SnapshotKind, slot: Slot) -> SnapshotPackage { - SnapshotPackage { - snapshot_kind, - slot, - block_height: slot, - ..SnapshotPackage::default_for_tests() - } - } - fn new_full(slot: Slot) -> SnapshotPackage { - new(SnapshotKind::FullSnapshot, slot) - } - fn new_incr(slot: Slot, base: Slot) -> SnapshotPackage { - new(SnapshotKind::IncrementalSnapshot(base), slot) - } - - let (snapshot_package_sender, snapshot_package_receiver) = crossbeam_channel::unbounded(); - - // Populate the channel so that re-enqueueing and dropping will be tested - let mut snapshot_packages = [ - new_full(100), - new_incr(110, 100), - new_incr(210, 100), - new_full(300), - new_incr(310, 300), - new_full(400), // <-- handle 1st - new_incr(410, 400), - new_incr(420, 400), // <-- handle 2nd - ]; - // Shuffle the snapshot packages to simulate receiving new snapshot packages from AHV - // simultaneously as SPS is handling them. - snapshot_packages.shuffle(&mut rand::thread_rng()); - snapshot_packages - .into_iter() - .for_each(|snapshot_package| snapshot_package_sender.send(snapshot_package).unwrap()); - - // The Full Snapshot from slot 400 is handled 1st - // (the older full snapshots are skipped and dropped) - let ( - snapshot_package, - _num_outstanding_snapshot_packages, - num_re_enqueued_snapshot_packages, - ) = SnapshotPackagerService::get_next_snapshot_package( - &snapshot_package_sender, - &snapshot_package_receiver, - ) - .unwrap(); - assert_eq!(snapshot_package.snapshot_kind, SnapshotKind::FullSnapshot,); - assert_eq!(snapshot_package.slot, 400); - assert_eq!(num_re_enqueued_snapshot_packages, 2); - - // The Incremental Snapshot from slot 420 is handled 2nd - // (the older incremental snapshot from slot 410 is skipped and dropped) - let ( - snapshot_package, - _num_outstanding_snapshot_packages, - num_re_enqueued_snapshot_packages, - ) = SnapshotPackagerService::get_next_snapshot_package( - &snapshot_package_sender, - &snapshot_package_receiver, - ) - .unwrap(); - assert_eq!( - snapshot_package.snapshot_kind, - SnapshotKind::IncrementalSnapshot(400), - ); - assert_eq!(snapshot_package.slot, 420); - assert_eq!(num_re_enqueued_snapshot_packages, 0); - - // And now the snapshot package channel is empty! - assert!(SnapshotPackagerService::get_next_snapshot_package( - &snapshot_package_sender, - &snapshot_package_receiver - ) - .is_none()); + pending_snapshot_packages: &Mutex, + ) -> Option { + pending_snapshot_packages.lock().unwrap().pop() } } diff --git a/core/src/snapshot_packager_service/pending_snapshot_packages.rs b/core/src/snapshot_packager_service/pending_snapshot_packages.rs new file mode 100644 index 00000000000000..726a3dd39477f7 --- /dev/null +++ b/core/src/snapshot_packager_service/pending_snapshot_packages.rs @@ -0,0 +1,290 @@ +use { + solana_runtime::snapshot_package::{ + cmp_snapshot_packages_by_priority, SnapshotKind, SnapshotPackage, + }, + std::cmp::Ordering::Greater, +}; + +/// Snapshot packages that are pending for archival +#[derive(Debug, Default)] +pub struct PendingSnapshotPackages { + full: Option, + incremental: Option, +} + +impl PendingSnapshotPackages { + /// Adds `snapshot_package` as a pending snapshot package + /// + /// This will overwrite currently-pending in-kind packages. + /// + /// Note: This function will panic if `snapshot_package` is *older* + /// than any currently-pending in-kind packages. + pub fn push(&mut self, snapshot_package: SnapshotPackage) { + match snapshot_package.snapshot_kind { + SnapshotKind::FullSnapshot => { + if let Some(pending_full_snapshot_package) = self.full.as_ref() { + // snapshots are monotonically increasing; only overwrite *old* packages + assert!(pending_full_snapshot_package + .snapshot_kind + .is_full_snapshot()); + assert_eq!( + cmp_snapshot_packages_by_priority( + &snapshot_package, + pending_full_snapshot_package, + ), + Greater, + "full snapshot package must be newer than pending package, \ + old: {pending_full_snapshot_package:?}, new: {snapshot_package:?}", + ); + info!( + "overwrote pending full snapshot package, old slot: {}, new slot: {}", + pending_full_snapshot_package.slot, snapshot_package.slot, + ); + } + self.full = Some(snapshot_package) + } + SnapshotKind::IncrementalSnapshot(_) => { + if let Some(pending_incremental_snapshot_package) = self.incremental.as_ref() { + // snapshots are monotonically increasing; only overwrite *old* packages + assert!(pending_incremental_snapshot_package + .snapshot_kind + .is_incremental_snapshot()); + assert_eq!( + cmp_snapshot_packages_by_priority( + &snapshot_package, + pending_incremental_snapshot_package, + ), + Greater, + "incremental snapshot package must be newer than pending package, \ + old: {pending_incremental_snapshot_package:?}, new: {snapshot_package:?}", + ); + info!( + "overwrote pending incremental snapshot package, old slot: {}, new slot: {}", + pending_incremental_snapshot_package.slot, snapshot_package.slot, + ); + } + self.incremental = Some(snapshot_package) + } + } + } + + /// Returns the next pending snapshot package to handle + pub fn pop(&mut self) -> Option { + let pending_full = self.full.take(); + let pending_incremental = self.incremental.take(); + match (pending_full, pending_incremental) { + (Some(pending_full), pending_incremental) => { + // If there is a pending incremental snapshot package, check its slot. + // If its slot is greater than the full snapshot package's, + // re-enqueue it, otherwise drop it. + // Note that it is *not supported* to handle incremental snapshots with + // slots *older* than the latest full snapshot. This is why we do not + // re-enqueue every incremental snapshot. + if let Some(pending_incremental) = pending_incremental { + let SnapshotKind::IncrementalSnapshot(base_slot) = + &pending_incremental.snapshot_kind + else { + panic!( + "the pending incremental snapshot package must be of kind \ + IncrementalSnapshot, but instead was {pending_incremental:?}", + ); + }; + if pending_incremental.slot > pending_full.slot + && *base_slot >= pending_full.slot + { + self.incremental = Some(pending_incremental); + } + } + + assert!(pending_full.snapshot_kind.is_full_snapshot()); + Some(pending_full) + } + (None, Some(pending_incremental)) => { + assert!(pending_incremental.snapshot_kind.is_incremental_snapshot()); + Some(pending_incremental) + } + (None, None) => None, + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_runtime::snapshot_package::{SnapshotKind, SnapshotPackage}, + solana_sdk::clock::Slot, + }; + + fn new(snapshot_kind: SnapshotKind, slot: Slot) -> SnapshotPackage { + SnapshotPackage { + snapshot_kind, + slot, + block_height: slot, + ..SnapshotPackage::default_for_tests() + } + } + fn new_full(slot: Slot) -> SnapshotPackage { + new(SnapshotKind::FullSnapshot, slot) + } + fn new_incr(slot: Slot, base: Slot) -> SnapshotPackage { + new(SnapshotKind::IncrementalSnapshot(base), slot) + } + + #[test] + fn test_default() { + let pending_snapshot_packages = PendingSnapshotPackages::default(); + assert!(pending_snapshot_packages.full.is_none()); + assert!(pending_snapshot_packages.incremental.is_none()); + } + + #[test] + fn test_push() { + let mut pending_snapshot_packages = PendingSnapshotPackages::default(); + + // ensure we can push full snapshot packages + let slot = 100; + pending_snapshot_packages.push(new_full(slot)); + assert_eq!(pending_snapshot_packages.full.as_ref().unwrap().slot, slot); + assert!(pending_snapshot_packages.incremental.is_none()); + + // ensure we can overwrite full snapshot packages + let slot = slot + 100; + pending_snapshot_packages.push(new_full(slot)); + assert_eq!(pending_snapshot_packages.full.as_ref().unwrap().slot, slot); + assert!(pending_snapshot_packages.incremental.is_none()); + + // ensure we can push incremental packages + let full_slot = slot; + let slot = full_slot + 10; + pending_snapshot_packages.push(new_incr(slot, full_slot)); + assert_eq!( + pending_snapshot_packages.full.as_ref().unwrap().slot, + full_slot, + ); + assert_eq!( + pending_snapshot_packages.incremental.as_ref().unwrap().slot, + slot, + ); + + // ensure we can overwrite incremental packages + let slot = slot + 10; + pending_snapshot_packages.push(new_incr(slot, full_slot)); + assert_eq!( + pending_snapshot_packages.full.as_ref().unwrap().slot, + full_slot, + ); + assert_eq!( + pending_snapshot_packages.incremental.as_ref().unwrap().slot, + slot, + ); + + // ensure pushing a full package doesn't affect the incremental package + // (we already tested above that pushing an incremental doesn't affect the full) + let incremental_slot = slot; + let slot = full_slot + 100; + pending_snapshot_packages.push(new_full(slot)); + assert_eq!(pending_snapshot_packages.full.as_ref().unwrap().slot, slot); + assert_eq!( + pending_snapshot_packages.incremental.as_ref().unwrap().slot, + incremental_slot, + ); + } + + #[test] + #[should_panic(expected = "full snapshot package must be newer than pending package")] + fn test_push_older_full() { + let slot = 100; + let mut pending_snapshot_packages = PendingSnapshotPackages { + full: Some(new_full(slot)), + incremental: None, + }; + + // pushing an older full should panic + pending_snapshot_packages.push(new_full(slot - 1)); + } + + #[test] + #[should_panic(expected = "incremental snapshot package must be newer than pending package")] + fn test_push_older_incremental() { + let base = 100; + let slot = base + 20; + let mut pending_snapshot_packages = PendingSnapshotPackages { + full: None, + incremental: Some(new_incr(slot, base)), + }; + + // pushing an older incremental should panic + pending_snapshot_packages.push(new_incr(slot - 1, base)); + } + + #[test] + fn test_pop() { + let mut pending_snapshot_packages = PendingSnapshotPackages::default(); + + // ensure we can call pop when there are no pending packages + assert!(pending_snapshot_packages.pop().is_none()); + + // ensure pop returns full when there's only a full + let slot = 100; + pending_snapshot_packages.full = Some(new_full(slot)); + pending_snapshot_packages.incremental = None; + let snapshot_package = pending_snapshot_packages.pop().unwrap(); + assert!(snapshot_package.snapshot_kind.is_full_snapshot()); + assert_eq!(snapshot_package.slot, slot); + + // ensure pop returns incremental when there's only an incremental + let base = 100; + let slot = base + 10; + pending_snapshot_packages.full = None; + pending_snapshot_packages.incremental = Some(new_incr(slot, base)); + let snapshot_package = pending_snapshot_packages.pop().unwrap(); + assert!(snapshot_package.snapshot_kind.is_incremental_snapshot()); + assert_eq!(snapshot_package.slot, slot); + + // ensure pop returns full when there's both a full and newer incremental + let full_slot = 100; + let incr_slot = full_slot + 10; + pending_snapshot_packages.full = Some(new_full(full_slot)); + pending_snapshot_packages.incremental = Some(new_incr(incr_slot, full_slot)); + let snapshot_package = pending_snapshot_packages.pop().unwrap(); + assert!(snapshot_package.snapshot_kind.is_full_snapshot()); + assert_eq!(snapshot_package.slot, full_slot); + + // ..and then the second pop returns the incremental + let snapshot_package = pending_snapshot_packages.pop().unwrap(); + assert!(snapshot_package.snapshot_kind.is_incremental_snapshot()); + assert_eq!(snapshot_package.slot, incr_slot); + + // but, if there's a full and *older* incremental, pop should return + // the full and *not* re-enqueue the incremental + let full_slot = 200; + let incr_slot = full_slot - 10; + pending_snapshot_packages.full = Some(new_full(full_slot)); + pending_snapshot_packages.incremental = Some(new_incr(incr_slot, full_slot)); + let snapshot_package = pending_snapshot_packages.pop().unwrap(); + assert!(snapshot_package.snapshot_kind.is_full_snapshot()); + assert_eq!(snapshot_package.slot, full_slot); + assert!(pending_snapshot_packages.incremental.is_none()); + } + + #[test] + #[should_panic] + fn test_pop_invalid_pending_full() { + let mut pending_snapshot_packages = PendingSnapshotPackages { + full: Some(new_incr(110, 100)), // <-- invalid! `full` is IncrementalSnapshot + incremental: None, + }; + pending_snapshot_packages.pop(); + } + + #[test] + #[should_panic] + fn test_pop_invalid_pending_incremental() { + let mut pending_snapshot_packages = PendingSnapshotPackages { + full: None, + incremental: Some(new_full(100)), // <-- invalid! `incremental` is FullSnapshot + }; + pending_snapshot_packages.pop(); + } +} diff --git a/core/src/validator.rs b/core/src/validator.rs index 757c29f30299a9..c6e67556e0030b 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -19,7 +19,7 @@ use { rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService}, sample_performance_service::SamplePerformanceService, sigverify, - snapshot_packager_service::SnapshotPackagerService, + snapshot_packager_service::{PendingSnapshotPackages, SnapshotPackagerService}, stats_reporter_service::StatsReporterService, system_monitor_service::{ verify_net_stats_access, SystemMonitorService, SystemMonitorStatsReportConfig, @@ -132,7 +132,7 @@ use { path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, RwLock, + Arc, Mutex, RwLock, }, thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, @@ -774,33 +774,27 @@ impl Validator { config.accounts_hash_interval_slots, )); - let (snapshot_package_sender, snapshot_packager_service) = - if config.snapshot_config.should_generate_snapshots() { - let enable_gossip_push = true; - let (snapshot_package_sender, snapshot_package_receiver) = - crossbeam_channel::unbounded(); - let snapshot_packager_service = SnapshotPackagerService::new( - snapshot_package_sender.clone(), - snapshot_package_receiver, - starting_snapshot_hashes, - exit.clone(), - cluster_info.clone(), - config.snapshot_config.clone(), - enable_gossip_push, - ); - ( - Some(snapshot_package_sender), - Some(snapshot_packager_service), - ) - } else { - (None, None) - }; + let pending_snapshot_packages = Arc::new(Mutex::new(PendingSnapshotPackages::default())); + let snapshot_packager_service = if config.snapshot_config.should_generate_snapshots() { + let enable_gossip_push = true; + let snapshot_packager_service = SnapshotPackagerService::new( + pending_snapshot_packages.clone(), + starting_snapshot_hashes, + exit.clone(), + cluster_info.clone(), + config.snapshot_config.clone(), + enable_gossip_push, + ); + Some(snapshot_packager_service) + } else { + None + }; let (accounts_package_sender, accounts_package_receiver) = crossbeam_channel::unbounded(); let accounts_hash_verifier = AccountsHashVerifier::new( accounts_package_sender.clone(), accounts_package_receiver, - snapshot_package_sender, + pending_snapshot_packages, exit.clone(), config.snapshot_config.clone(), ); diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 938031fa05ffbe..f70815a640c595 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -12,7 +12,7 @@ use { }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, - snapshot_packager_service::SnapshotPackagerService, + snapshot_packager_service::{PendingSnapshotPackages, SnapshotPackagerService}, }, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_runtime::{ @@ -43,7 +43,7 @@ use { mem::ManuallyDrop, sync::{ atomic::{AtomicBool, Ordering}, - Arc, RwLock, + Arc, Mutex, RwLock, }, time::Duration, }, @@ -180,10 +180,9 @@ impl BackgroundServices { ) -> Self { info!("Starting background services..."); - let (snapshot_package_sender, snapshot_package_receiver) = crossbeam_channel::unbounded(); + let pending_snapshot_packages = Arc::new(Mutex::new(PendingSnapshotPackages::default())); let snapshot_packager_service = SnapshotPackagerService::new( - snapshot_package_sender.clone(), - snapshot_package_receiver, + pending_snapshot_packages.clone(), None, exit.clone(), cluster_info.clone(), @@ -195,7 +194,7 @@ impl BackgroundServices { let accounts_hash_verifier = AccountsHashVerifier::new( accounts_package_sender.clone(), accounts_package_receiver, - Some(snapshot_package_sender), + pending_snapshot_packages, exit.clone(), snapshot_config.clone(), ); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index de56e29e0d3b47..2a6c77ddb0a0c0 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -12,7 +12,7 @@ use { }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, - snapshot_packager_service::SnapshotPackagerService, + snapshot_packager_service::{PendingSnapshotPackages, SnapshotPackagerService}, }, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_runtime::{ @@ -50,7 +50,7 @@ use { path::PathBuf, sync::{ atomic::{AtomicBool, Ordering}, - Arc, RwLock, + Arc, Mutex, RwLock, }, time::{Duration, Instant}, }, @@ -667,7 +667,7 @@ fn test_snapshots_with_background_services( let (pruned_banks_sender, pruned_banks_receiver) = unbounded(); let (snapshot_request_sender, snapshot_request_receiver) = unbounded(); let (accounts_package_sender, accounts_package_receiver) = unbounded(); - let (snapshot_package_sender, snapshot_package_receiver) = unbounded(); + let pending_snapshot_packages = Arc::new(Mutex::new(PendingSnapshotPackages::default())); let bank_forks = snapshot_test_config.bank_forks.clone(); bank_forks @@ -700,8 +700,7 @@ fn test_snapshots_with_background_services( let exit = Arc::new(AtomicBool::new(false)); let snapshot_packager_service = SnapshotPackagerService::new( - snapshot_package_sender.clone(), - snapshot_package_receiver, + pending_snapshot_packages.clone(), None, exit.clone(), cluster_info.clone(), @@ -712,7 +711,7 @@ fn test_snapshots_with_background_services( let accounts_hash_verifier = AccountsHashVerifier::new( accounts_package_sender, accounts_package_receiver, - Some(snapshot_package_sender), + pending_snapshot_packages, exit.clone(), snapshot_test_config.snapshot_config.clone(), ); diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 152ec84ed66f70..94778ee4b22407 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -8,7 +8,8 @@ use { utils::{create_all_accounts_run_and_snapshot_dirs, move_and_async_delete_path_contents}, }, solana_core::{ - accounts_hash_verifier::AccountsHashVerifier, validator::BlockVerificationMethod, + accounts_hash_verifier::AccountsHashVerifier, + snapshot_packager_service::PendingSnapshotPackages, validator::BlockVerificationMethod, }, solana_geyser_plugin_manager::geyser_plugin_service::{ GeyserPluginService, GeyserPluginServiceError, @@ -48,7 +49,7 @@ use { process::exit, sync::{ atomic::{AtomicBool, Ordering}, - Arc, RwLock, + Arc, Mutex, RwLock, }, }, thiserror::Error, @@ -335,11 +336,12 @@ pub fn load_and_process_ledger( } } + let pending_snapshot_packages = Arc::new(Mutex::new(PendingSnapshotPackages::default())); let (accounts_package_sender, accounts_package_receiver) = crossbeam_channel::unbounded(); let accounts_hash_verifier = AccountsHashVerifier::new( accounts_package_sender.clone(), accounts_package_receiver, - None, + pending_snapshot_packages, exit.clone(), SnapshotConfig::new_load_only(), ); From c84900d3acff58204ad0e9db2874868c69fe191e Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 6 Aug 2024 10:47:19 -0500 Subject: [PATCH 031/529] shrink/ancient pack purge zero lamport accounts (#2312) * shrink/ancient pack purge zero lamport accounts * pr feedback * add = * comment * fix comment typo --- accounts-db/src/accounts_db.rs | 233 +++++++++++++++++++++++++ accounts-db/src/ancient_append_vecs.rs | 1 + 2 files changed, 234 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 6520d4de189d64..62de2fbd61660c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -475,6 +475,7 @@ pub(crate) struct ShrinkCollect<'a, T: ShrinkCollectRefs<'a>> { pub(crate) slot: Slot, pub(crate) capacity: u64, pub(crate) unrefed_pubkeys: Vec<&'a Pubkey>, + pub(crate) zero_lamport_single_ref_pubkeys: Vec<&'a Pubkey>, pub(crate) alive_accounts: T, /// total size in storage of all alive accounts pub(crate) alive_total_bytes: usize, @@ -524,6 +525,8 @@ struct LoadAccountsIndexForShrink<'a, T: ShrinkCollectRefs<'a>> { alive_accounts: T, /// pubkeys that were unref'd in the accounts index because they were dead unrefed_pubkeys: Vec<&'a Pubkey>, + /// pubkeys that are the last remaining zero lamport instance of an account + zero_lamport_single_ref_pubkeys: Vec<&'a Pubkey>, /// true if all alive accounts are zero lamport accounts all_are_zero_lamports: bool, /// index entries we need to hold onto to keep them from getting flushed @@ -2011,6 +2014,7 @@ pub struct ShrinkStats { dead_accounts: AtomicU64, alive_accounts: AtomicU64, accounts_loaded: AtomicU64, + purged_zero_lamports: AtomicU64, } impl ShrinkStats { @@ -2109,6 +2113,11 @@ impl ShrinkStats { self.accounts_loaded.swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "purged_zero_lamports_count", + self.purged_zero_lamports.swap(0, Ordering::Relaxed), + i64 + ), ); } } @@ -2309,6 +2318,13 @@ impl ShrinkAncientStats { self.many_refs_old_alive.swap(0, Ordering::Relaxed), i64 ), + ( + "purged_zero_lamports_count", + self.shrink_stats + .purged_zero_lamports + .swap(0, Ordering::Relaxed), + i64 + ), ); } } @@ -3793,12 +3809,14 @@ impl AccountsDb { let count = accounts.len(); let mut alive_accounts = T::with_capacity(count, slot_to_shrink); let mut unrefed_pubkeys = Vec::with_capacity(count); + let mut zero_lamport_single_ref_pubkeys = Vec::with_capacity(count); let mut alive = 0; let mut dead = 0; let mut index = 0; let mut all_are_zero_lamports = true; let mut index_entries_being_shrunk = Vec::with_capacity(accounts.len()); + let latest_full_snapshot_slot = self.latest_full_snapshot_slot(); self.accounts_index.scan( accounts.iter().map(|account| account.pubkey()), |pubkey, slots_refs, entry| { @@ -3817,6 +3835,21 @@ impl AccountsDb { unrefed_pubkeys.push(pubkey); result = AccountsIndexScanResult::Unref; dead += 1; + } else if stored_account.is_zero_lamport() + && ref_count == 1 + && latest_full_snapshot_slot + .map(|latest_full_snapshot_slot| { + latest_full_snapshot_slot >= slot_to_shrink + }) + .unwrap_or(true) + { + // only do this if our slot is prior to the latest full snapshot + // we found a zero lamport account that is the only instance of this account. We can delete it completely. + zero_lamport_single_ref_pubkeys.push(pubkey); + self.add_uncleaned_pubkeys_after_shrink( + slot_to_shrink, + [*pubkey].into_iter(), + ); } else { // Hold onto the index entry arc so that it cannot be flushed. // Since we are shrinking these entries, we need to disambiguate storage ids during this period and those only exist in the in-memory accounts index. @@ -3839,6 +3872,7 @@ impl AccountsDb { LoadAccountsIndexForShrink { alive_accounts, unrefed_pubkeys, + zero_lamport_single_ref_pubkeys, all_are_zero_lamports, index_entries_being_shrunk, } @@ -3939,6 +3973,7 @@ impl AccountsDb { let len = stored_accounts.len(); let alive_accounts_collect = Mutex::new(T::with_capacity(len, slot)); let unrefed_pubkeys_collect = Mutex::new(Vec::with_capacity(len)); + let zero_lamport_single_ref_pubkeys_collect = Mutex::new(Vec::with_capacity(len)); stats .accounts_loaded .fetch_add(len as u64, Ordering::Relaxed); @@ -3955,6 +3990,7 @@ impl AccountsDb { alive_accounts, mut unrefed_pubkeys, all_are_zero_lamports, + mut zero_lamport_single_ref_pubkeys, mut index_entries_being_shrunk, } = self.load_accounts_index_for_shrink(stored_accounts, stats, slot); @@ -3967,6 +4003,10 @@ impl AccountsDb { .lock() .unwrap() .append(&mut unrefed_pubkeys); + zero_lamport_single_ref_pubkeys_collect + .lock() + .unwrap() + .append(&mut zero_lamport_single_ref_pubkeys); index_entries_being_shrunk_outer .lock() .unwrap() @@ -3979,6 +4019,9 @@ impl AccountsDb { let alive_accounts = alive_accounts_collect.into_inner().unwrap(); let unrefed_pubkeys = unrefed_pubkeys_collect.into_inner().unwrap(); + let zero_lamport_single_ref_pubkeys = zero_lamport_single_ref_pubkeys_collect + .into_inner() + .unwrap(); index_read_elapsed.stop(); stats @@ -4002,6 +4045,7 @@ impl AccountsDb { slot, capacity: *capacity, unrefed_pubkeys, + zero_lamport_single_ref_pubkeys, alive_accounts, alive_total_bytes, total_starting_accounts: len, @@ -4010,6 +4054,41 @@ impl AccountsDb { } } + /// These accounts were found during shrink of `slot` to be slot_list=[slot] and ref_count == 1 and lamports = 0. + /// This means this slot contained the only account data for this pubkey and it is zero lamport. + /// Thus, we did NOT treat this as an alive account, so we did NOT copy the zero lamport account to the new + /// storage. So, the account will no longer be alive or exist at `slot`. + /// So, first, remove the ref count since this newly shrunk storage will no longer access it. + /// Second, remove `slot` from the index entry's slot list. If the slot list is now empty, then the + /// pubkey can be removed completely from the index. + /// In parallel with this code (which is running in the bg), the same pubkey could be revived and written to + /// as part of tx processing. In that case, the slot list will contain a slot in the write cache and the + /// index entry will NOT be deleted. + fn remove_zero_lamport_single_ref_accounts_after_shrink( + &self, + zero_lamport_single_ref_pubkeys: &[&Pubkey], + slot: Slot, + stats: &ShrinkStats, + ) { + stats.purged_zero_lamports.fetch_add( + zero_lamport_single_ref_pubkeys.len() as u64, + Ordering::Relaxed, + ); + + // we have to unref before we `purge_keys_exact`. Otherwise, we could race with the foreground with tx processing + // reviving this index entry and then we'd unref the revived version, which is a refcount bug. + self.accounts_index.scan( + zero_lamport_single_ref_pubkeys.iter().cloned(), + |_pubkey, _slots_refs, _entry| AccountsIndexScanResult::Unref, + Some(AccountsIndexScanResult::Unref), + false, + ); + + zero_lamport_single_ref_pubkeys.iter().for_each(|k| { + _ = self.purge_keys_exact([&(**k, slot)].into_iter()); + }); + } + /// common code from shrink and combine_ancient_slots /// get rid of all original store_ids in the slot pub(crate) fn remove_old_stores_shrink<'a, T: ShrinkCollectRefs<'a>>( @@ -4020,7 +4099,19 @@ impl AccountsDb { shrink_can_be_active: bool, ) { let mut time = Measure::start("remove_old_stores_shrink"); + + // handle the zero lamport alive accounts before calling clean + // We have to update the index entries for these zero lamport pubkeys before we remove the storage in `mark_dirty_dead_stores` + // that contained the accounts. + self.remove_zero_lamport_single_ref_accounts_after_shrink( + &shrink_collect.zero_lamport_single_ref_pubkeys, + shrink_collect.slot, + stats, + ); + // Purge old, overwritten storage entries + // This has the side effect of dropping `shrink_in_progress`, which removes the old storage completely. The + // index has to be correct before we drop the old storage. let dead_storages = self.mark_dirty_dead_stores( shrink_collect.slot, // If all accounts are zero lamports, then we want to mark the entire OLD append vec as dirty. @@ -10884,6 +10975,146 @@ pub mod tests { assert_eq!(accounts.alive_account_count_in_slot(1), 0); } + #[test] + fn test_remove_zero_lamport_single_ref_accounts_after_shrink() { + for pass in 0..3 { + let accounts = AccountsDb::new_single_for_tests(); + let pubkey_zero = Pubkey::from([1; 32]); + let pubkey2 = Pubkey::from([2; 32]); + let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); + let zero_lamport_account = + AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + let slot = 1; + + accounts.store_for_tests( + slot, + &[(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)], + ); + + // Simulate rooting the zero-lamport account, writes it to storage + accounts.calculate_accounts_delta_hash(slot); + accounts.add_root_and_flush_write_cache(slot); + + if pass > 0 { + // store in write cache + accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]); + if pass == 2 { + // move to a storage (causing ref count to increase) + accounts.calculate_accounts_delta_hash(slot + 1); + accounts.add_root_and_flush_write_cache(slot + 1); + } + } + + accounts.accounts_index.get_and_then(&pubkey_zero, |entry| { + let expected_ref_count = if pass < 2 { 1 } else { 2 }; + assert_eq!(entry.unwrap().ref_count(), expected_ref_count, "{pass}"); + let expected_slot_list = if pass < 1 { 1 } else { 2 }; + assert_eq!( + entry.unwrap().slot_list.read().unwrap().len(), + expected_slot_list + ); + (false, ()) + }); + accounts.accounts_index.get_and_then(&pubkey2, |entry| { + assert!(entry.is_some()); + (false, ()) + }); + + let zero_lamport_single_ref_pubkeys = [&pubkey_zero]; + accounts.remove_zero_lamport_single_ref_accounts_after_shrink( + &zero_lamport_single_ref_pubkeys, + slot, + &ShrinkStats::default(), + ); + + accounts.accounts_index.get_and_then(&pubkey_zero, |entry| { + if pass == 0 { + // should not exist in index at all + assert!(entry.is_none(), "{pass}"); + } else { + // alive only in slot + 1 + assert_eq!(entry.unwrap().slot_list.read().unwrap().len(), 1); + assert_eq!( + entry + .unwrap() + .slot_list + .read() + .unwrap() + .first() + .map(|(s, _)| s) + .cloned() + .unwrap(), + slot + 1 + ); + // refcount = 1 if we flushed the write cache for slot + 1 + let expected_ref_count = if pass < 2 { 0 } else { 1 }; + assert_eq!( + entry.map(|e| e.ref_count()), + Some(expected_ref_count), + "{pass}" + ); + } + (false, ()) + }); + + accounts.accounts_index.get_and_then(&pubkey2, |entry| { + assert!(entry.is_some(), "{pass}"); + (false, ()) + }); + } + } + + #[test] + fn test_shrink_zero_lamport_single_ref_account() { + solana_logger::setup(); + + // store a zero and non-zero lamport account + // make sure clean marks the ref_count=1, zero lamport account dead and removes pubkey from index completely + let accounts = AccountsDb::new_single_for_tests(); + let pubkey_zero = Pubkey::from([1; 32]); + let pubkey2 = Pubkey::from([2; 32]); + let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); + let zero_lamport_account = + AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + + // Store a zero-lamport account and a non-zero lamport account + accounts.store_for_tests( + 1, + &[(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)], + ); + + // Simulate rooting the zero-lamport account, should be a + // candidate for cleaning + accounts.calculate_accounts_delta_hash(1); + accounts.add_root_and_flush_write_cache(1); + + // for testing, we need to cause shrink to think this will be productive. + // The zero lamport account isn't dead, but it can become dead inside shrink. + accounts + .storage + .get_slot_storage_entry(1) + .unwrap() + .alive_bytes + .fetch_sub(aligned_stored_size(0), Ordering::Relaxed); + + // Slot 1 should be cleaned, but + // zero-lamport account should not be cleaned since last full snapshot root is before slot 1 + accounts.shrink_slot_forced(1); + + assert!(accounts.storage.get_slot_storage_entry(1).is_some()); + + // the zero lamport account should be marked as dead + assert_eq!(accounts.alive_account_count_in_slot(1), 1); + + // zero lamport account should be dead in the index + assert!(!accounts + .accounts_index + .contains_with(&pubkey_zero, None, None)); + // other account should still be alive + assert!(accounts.accounts_index.contains_with(&pubkey2, None, None)); + assert!(accounts.storage.get_slot_storage_entry(1).is_some()); + } + #[test] fn test_clean_multiple_zero_lamport_decrements_index_ref_count() { solana_logger::setup(); @@ -15910,6 +16141,8 @@ pub mod tests { debug!("space: {space}, lamports: {lamports}, alive: {alive}, account_count: {account_count}, append_opposite_alive_account: {append_opposite_alive_account}, append_opposite_zero_lamport_account: {append_opposite_zero_lamport_account}, normal_account_count: {normal_account_count}"); let db = AccountsDb::new_single_for_tests(); let slot5 = 5; + // don't do special zero lamport account handling + db.set_latest_full_snapshot_slot(0); let mut account = AccountSharedData::new( lamports, space, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 998781d8c04850..ffe025e7474a39 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -3836,6 +3836,7 @@ pub mod tests { unrefed_pubkeys: unrefed_pubkeys.iter().collect(), // irrelevant fields + zero_lamport_single_ref_pubkeys: Vec::default(), slot: 0, capacity: 0, alive_accounts: ShrinkCollectAliveSeparatedByRefs { From 0b02f8c99ac9e5eb0507d1ff796b7e48b2a1fdc4 Mon Sep 17 00:00:00 2001 From: Brennan Date: Tue, 6 Aug 2024 10:03:46 -0700 Subject: [PATCH 032/529] use squash to add all roots (#1981) --- ledger-tool/src/main.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 78c36acf910ca7..1005f30a71cd65 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -2224,9 +2224,13 @@ fn main() { }; let bank = if let Some(warp_slot) = warp_slot { - // need to flush the write cache in order to use Storages to calculate - // the accounts hash, and need to root `bank` before flushing the cache - bank.rc.accounts.accounts_db.add_root(bank.slot()); + // Need to flush the write cache in order to use + // Storages to calculate the accounts hash, and need to + // root `bank` before flushing the cache. Use squash to + // root all unrooted parents as well and avoid panicking + // during snapshot creation if we try to add roots out + // of order. + bank.squash(); bank.force_flush_accounts_cache(); Arc::new(Bank::warp_from_parent( bank.clone(), From 72d83413a65c4435320ee1fba84b798e1dbfb542 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 6 Aug 2024 12:57:16 -0500 Subject: [PATCH 033/529] Update PoH speed check to derive rate from Bank (#2447) The PoH speed check currently determines the target hash rate by reading values from genesis. However, the hashes per tick rate has been increased via feature gates. Thus, the speed check is using a slower hash rate for comparison than what is actually active. So, update the PoH speed check to derive PoH hash rate from a Bank instead --- CHANGELOG.md | 1 + core/src/validator.rs | 83 ++++++++++++++++++++++++++----------------- 2 files changed, 52 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1da9bd05f68b70..c5ab97ef8658dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ Release channels have their own copy of this changelog: * removed the unreleased `redelegate` instruction processor and CLI commands (#2213) * Changes * SDK: removed the `respan` macro. This was marked as "internal use only" and was no longer used internally. + * `agave-validator`: Update PoH speed check to compare against current hash rate from a Bank (#2447) ## [2.0.0] * Breaking diff --git a/core/src/validator.rs b/core/src/validator.rs index c6e67556e0030b..373eebf4ae1be2 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -735,6 +735,11 @@ impl Validator { Some(poh_timing_point_sender.clone()), ) .map_err(ValidatorError::Other)?; + + if !config.no_poh_speed_test { + check_poh_speed(&bank_forks.read().unwrap().root_bank(), None)?; + } + let hard_forks = bank_forks.read().unwrap().root_bank().hard_forks(); if !hard_forks.is_empty() { info!("Hard forks: {:?}", hard_forks); @@ -745,7 +750,6 @@ impl Validator { &genesis_config.hash(), Some(&hard_forks), )); - Self::print_node_info(&node); if let Some(expected_shred_version) = config.expected_shred_version { @@ -1671,33 +1675,35 @@ fn active_vote_account_exists_in_bank(bank: &Bank, vote_account: &Pubkey) -> boo false } -fn check_poh_speed( - genesis_config: &GenesisConfig, - maybe_hash_samples: Option, -) -> Result<(), ValidatorError> { - if let Some(hashes_per_tick) = genesis_config.hashes_per_tick() { - let ticks_per_slot = genesis_config.ticks_per_slot(); - let hashes_per_slot = hashes_per_tick * ticks_per_slot; - let hash_samples = maybe_hash_samples.unwrap_or(hashes_per_slot); - - let hash_time = compute_hash_time(hash_samples); - let my_hashes_per_second = (hash_samples as f64 / hash_time.as_secs_f64()) as u64; - let target_slot_duration = Duration::from_nanos(genesis_config.ns_per_slot() as u64); - let target_hashes_per_second = - (hashes_per_slot as f64 / target_slot_duration.as_secs_f64()) as u64; +fn check_poh_speed(bank: &Bank, maybe_hash_samples: Option) -> Result<(), ValidatorError> { + let Some(hashes_per_tick) = bank.hashes_per_tick() else { + warn!("Unable to read hashes per tick from Bank, skipping PoH speed check"); + return Ok(()); + }; - info!( - "PoH speed check: \ + let ticks_per_slot = bank.ticks_per_slot(); + let hashes_per_slot = hashes_per_tick * ticks_per_slot; + let hash_samples = maybe_hash_samples.unwrap_or(hashes_per_slot); + + let hash_time = compute_hash_time(hash_samples); + let my_hashes_per_second = (hash_samples as f64 / hash_time.as_secs_f64()) as u64; + + let target_slot_duration = Duration::from_nanos(bank.ns_per_slot as u64); + let target_hashes_per_second = + (hashes_per_slot as f64 / target_slot_duration.as_secs_f64()) as u64; + + info!( + "PoH speed check: \ computed hashes per second {my_hashes_per_second}, \ target hashes per second {target_hashes_per_second}" - ); - if my_hashes_per_second < target_hashes_per_second { - return Err(ValidatorError::PohTooSlow { - mine: my_hashes_per_second, - target: target_hashes_per_second, - }); - } + ); + if my_hashes_per_second < target_hashes_per_second { + return Err(ValidatorError::PohTooSlow { + mine: my_hashes_per_second, + target: target_hashes_per_second, + }); } + Ok(()) } @@ -1829,10 +1835,6 @@ fn load_genesis( } } - if !config.no_poh_speed_test { - check_poh_speed(&genesis_config, None)?; - } - Ok(genesis_config) } @@ -2931,11 +2933,25 @@ mod tests { )); } + fn target_tick_duration() -> Duration { + // DEFAULT_MS_PER_SLOT = 400 + // DEFAULT_TICKS_PER_SLOT = 64 + // MS_PER_TICK = 6 + // + // But, DEFAULT_MS_PER_SLOT / DEFAULT_TICKS_PER_SLOT = 6.25 + // + // So, convert to microseconds first to avoid the integer rounding error + let target_tick_duration_us = solana_sdk::clock::DEFAULT_MS_PER_SLOT * 1000 + / solana_sdk::clock::DEFAULT_TICKS_PER_SLOT; + assert_eq!(target_tick_duration_us, 6250); + Duration::from_micros(target_tick_duration_us) + } + #[test] fn test_poh_speed() { solana_logger::setup(); let poh_config = PohConfig { - target_tick_duration: Duration::from_millis(solana_sdk::clock::MS_PER_TICK), + target_tick_duration: target_tick_duration(), // make PoH rate really fast to cause the panic condition hashes_per_tick: Some(100 * solana_sdk::clock::DEFAULT_HASHES_PER_TICK), ..PohConfig::default() @@ -2944,13 +2960,15 @@ mod tests { poh_config, ..GenesisConfig::default() }; - assert!(check_poh_speed(&genesis_config, Some(10_000)).is_err()); + let bank = Bank::new_for_tests(&genesis_config); + assert!(check_poh_speed(&bank, Some(10_000)).is_err()); } #[test] fn test_poh_speed_no_hashes_per_tick() { + solana_logger::setup(); let poh_config = PohConfig { - target_tick_duration: Duration::from_millis(solana_sdk::clock::MS_PER_TICK), + target_tick_duration: target_tick_duration(), hashes_per_tick: None, ..PohConfig::default() }; @@ -2958,6 +2976,7 @@ mod tests { poh_config, ..GenesisConfig::default() }; - check_poh_speed(&genesis_config, Some(10_000)).unwrap(); + let bank = Bank::new_for_tests(&genesis_config); + check_poh_speed(&bank, Some(10_000)).unwrap(); } } From 427a18b22e65cc8f81a01350646731c98158188b Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 6 Aug 2024 14:37:08 -0400 Subject: [PATCH 034/529] Improves panic message if send() fails in streaming_unpack_snapshot() (#2459) --- accounts-db/src/hardened_unpack.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/hardened_unpack.rs b/accounts-db/src/hardened_unpack.rs index 27ce7b2772ec81..09073fb111113e 100644 --- a/accounts-db/src/hardened_unpack.rs +++ b/accounts-db/src/hardened_unpack.rs @@ -339,7 +339,13 @@ pub fn streaming_unpack_snapshot( |_, _| {}, |entry_path_buf| { if entry_path_buf.is_file() { - sender.send(entry_path_buf).unwrap(); + let result = sender.send(entry_path_buf); + if let Err(err) = result { + panic!( + "failed to send path '{}' from unpacker to rebuilder: {err}", + err.0.display(), + ); + } } }, ) From 44f6e03ef0e8978efbd4eb29f1cd5370dddba778 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 6 Aug 2024 15:02:34 -0400 Subject: [PATCH 035/529] hash-cache-tool: Prints capitalization and num accounts when diffing files (#2462) --- .../accounts-hash-cache-tool/src/main.rs | 58 +++++++++++++++---- 1 file changed, 46 insertions(+), 12 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 6601ac79e34008..5d8d8b9e9fd049 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -12,7 +12,7 @@ use { }, solana_program::pubkey::Pubkey, std::{ - cmp::Ordering, + cmp::{self, Ordering}, fs::{self, File, Metadata}, io::{self, BufReader, Read}, mem::size_of, @@ -181,11 +181,32 @@ fn do_inspect(file: impl AsRef, force: bool) -> Result<(), String> { } fn do_diff_files(file1: impl AsRef, file2: impl AsRef) -> Result<(), String> { - let entries1 = extract_latest_entries_in(&file1) + let LatestEntriesInfo { + latest_entries: entries1, + capitalization: capitalization1, + } = extract_latest_entries_in(&file1) .map_err(|err| format!("failed to extract entries from file 1: {err}"))?; - let entries2 = extract_latest_entries_in(&file2) + let LatestEntriesInfo { + latest_entries: entries2, + capitalization: capitalization2, + } = extract_latest_entries_in(&file2) .map_err(|err| format!("failed to extract entries from file 2: {err}"))?; + let num_accounts1 = entries1.len(); + let num_accounts2 = entries2.len(); + let num_accounts_width = { + let width1 = (num_accounts1 as f64).log10().ceil() as usize; + let width2 = (num_accounts2 as f64).log10().ceil() as usize; + cmp::max(width1, width2) + }; + let lamports_width = { + let width1 = (capitalization1 as f64).log10().ceil() as usize; + let width2 = (capitalization2 as f64).log10().ceil() as usize; + cmp::max(width1, width2) + }; + println!("File 1: number of accounts: {num_accounts1:num_accounts_width$}, capitalization: {capitalization1:lamports_width$} lamports"); + println!("File 2: number of accounts: {num_accounts2:num_accounts_width$}, capitalization: {capitalization2:lamports_width$} lamports"); + // compute the differences between the files let do_compute = |lhs: &HashMap<_, (_, _)>, rhs: &HashMap<_, (_, _)>| { let mut unique_entries = Vec::new(); @@ -231,7 +252,7 @@ fn do_diff_files(file1: impl AsRef, file2: impl AsRef) -> Result<(), for (i, entry) in entries.iter().enumerate() { total_lamports += entry.lamports; println!( - "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {}", + "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", entry.pubkey.to_string(), entry.hash.0.to_string(), entry.lamports, @@ -252,13 +273,13 @@ fn do_diff_files(file1: impl AsRef, file2: impl AsRef) -> Result<(), } else { for (i, (lhs, rhs)) in mismatch_entries.iter().enumerate() { println!( - "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {}", + "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", lhs.pubkey.to_string(), lhs.hash.0.to_string(), lhs.lamports, ); println!( - "{i:count_width$}: file 2: {:44}, hash: {:44}, lamports: {}", + "{i:count_width$}: file 2: {:44}, hash: {:44}, lamports: {:lamports_width$}", "(same)".to_string(), rhs.hash.0.to_string(), rhs.lamports, @@ -465,12 +486,10 @@ fn get_cache_files_in(dir: impl AsRef) -> Result, io::E Ok(cache_files) } -/// Returns the entries in `file` +/// Returns the entries in `file`, and the capitalization /// /// If there are multiple entries for a pubkey, only the latest is returned. -fn extract_latest_entries_in( - file: impl AsRef, -) -> Result, String> { +fn extract_latest_entries_in(file: impl AsRef) -> Result { let force = false; // skipping sanity checks is not supported when extracting entries let (reader, header) = open_file(&file, force).map_err(|err| { format!( @@ -481,12 +500,21 @@ fn extract_latest_entries_in( // entries in the file are sorted by pubkey then slot, // so we want to keep the *last* entry (if there are duplicates) + let mut capitalization = Saturating(0); let mut entries = HashMap::default(); scan_file(reader, header.count, |entry| { - entries.insert(entry.pubkey, (entry.hash, entry.lamports)); + capitalization += entry.lamports; + let old_value = entries.insert(entry.pubkey, (entry.hash, entry.lamports)); + if let Some((_, old_lamports)) = old_value { + // back out the old value's lamports, so we only keep the latest's for capitalization + capitalization -= old_lamports; + } })?; - Ok(entries) + Ok(LatestEntriesInfo { + latest_entries: entries, + capitalization: capitalization.0, + }) } /// Scans file with `reader` and applies `user_fn` to each entry @@ -578,6 +606,12 @@ struct CacheFileInfo { parsed: ParsedCacheHashDataFilename, } +#[derive(Debug)] +struct LatestEntriesInfo { + latest_entries: HashMap, + capitalization: u64, // lamports +} + #[derive(Debug)] struct ElapsedOnDrop { message: String, From f1de5c0f85b5dd510e72102eb558cb016ad94de6 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 6 Aug 2024 14:46:22 -0500 Subject: [PATCH 036/529] update test for shrink removing zero lamports (#2457) * update test for shrink removing zero lamports * clean up comment --- accounts-db/src/accounts_db.rs | 110 +++++++++++++++++++++------------ 1 file changed, 72 insertions(+), 38 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 62de2fbd61660c..28610eb79eb20a 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -11067,52 +11067,86 @@ pub mod tests { #[test] fn test_shrink_zero_lamport_single_ref_account() { solana_logger::setup(); + // note that 'None' checks the case based on the default value of `latest_full_snapshot_slot` in `AccountsDb` + for latest_full_snapshot_slot in [None, Some(0), Some(1), Some(2)] { + // store a zero and non-zero lamport account + // make sure clean marks the ref_count=1, zero lamport account dead and removes pubkey from index completely + let accounts = AccountsDb::new_single_for_tests(); + let pubkey_zero = Pubkey::from([1; 32]); + let pubkey2 = Pubkey::from([2; 32]); + let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); + let zero_lamport_account = + AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + let slot = 1; + // Store a zero-lamport account and a non-zero lamport account + accounts.store_for_tests( + slot, + &[(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)], + ); - // store a zero and non-zero lamport account - // make sure clean marks the ref_count=1, zero lamport account dead and removes pubkey from index completely - let accounts = AccountsDb::new_single_for_tests(); - let pubkey_zero = Pubkey::from([1; 32]); - let pubkey2 = Pubkey::from([2; 32]); - let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); - let zero_lamport_account = - AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + // Simulate rooting the zero-lamport account, should be a + // candidate for cleaning + accounts.calculate_accounts_delta_hash(slot); + accounts.add_root_and_flush_write_cache(slot); - // Store a zero-lamport account and a non-zero lamport account - accounts.store_for_tests( - 1, - &[(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)], - ); + // for testing, we need to cause shrink to think this will be productive. + // The zero lamport account isn't dead, but it can become dead inside shrink. + accounts + .storage + .get_slot_storage_entry(slot) + .unwrap() + .alive_bytes + .fetch_sub(aligned_stored_size(0), Ordering::Relaxed); - // Simulate rooting the zero-lamport account, should be a - // candidate for cleaning - accounts.calculate_accounts_delta_hash(1); - accounts.add_root_and_flush_write_cache(1); + if let Some(latest_full_snapshot_slot) = latest_full_snapshot_slot { + accounts.set_latest_full_snapshot_slot(latest_full_snapshot_slot); + } - // for testing, we need to cause shrink to think this will be productive. - // The zero lamport account isn't dead, but it can become dead inside shrink. - accounts - .storage - .get_slot_storage_entry(1) - .unwrap() - .alive_bytes - .fetch_sub(aligned_stored_size(0), Ordering::Relaxed); + // Shrink the slot. The behavior on the zero lamport account will depend on `latest_full_snapshot_slot`. + accounts.shrink_slot_forced(slot); - // Slot 1 should be cleaned, but - // zero-lamport account should not be cleaned since last full snapshot root is before slot 1 - accounts.shrink_slot_forced(1); + assert!( + accounts.storage.get_slot_storage_entry(1).is_some(), + "{latest_full_snapshot_slot:?}" + ); - assert!(accounts.storage.get_slot_storage_entry(1).is_some()); + let expected_alive_count = if latest_full_snapshot_slot.unwrap_or(Slot::MAX) < slot { + // zero lamport account should NOT be dead in the index + assert!( + accounts + .accounts_index + .contains_with(&pubkey_zero, None, None), + "{latest_full_snapshot_slot:?}" + ); + 2 + } else { + // zero lamport account should be dead in the index + assert!( + !accounts + .accounts_index + .contains_with(&pubkey_zero, None, None), + "{latest_full_snapshot_slot:?}" + ); + // the zero lamport account should be marked as dead + 1 + }; - // the zero lamport account should be marked as dead - assert_eq!(accounts.alive_account_count_in_slot(1), 1); + assert_eq!( + accounts.alive_account_count_in_slot(slot), + expected_alive_count, + "{latest_full_snapshot_slot:?}" + ); - // zero lamport account should be dead in the index - assert!(!accounts - .accounts_index - .contains_with(&pubkey_zero, None, None)); - // other account should still be alive - assert!(accounts.accounts_index.contains_with(&pubkey2, None, None)); - assert!(accounts.storage.get_slot_storage_entry(1).is_some()); + // other account should still be alive + assert!( + accounts.accounts_index.contains_with(&pubkey2, None, None), + "{latest_full_snapshot_slot:?}" + ); + assert!( + accounts.storage.get_slot_storage_entry(slot).is_some(), + "{latest_full_snapshot_slot:?}" + ); + } } #[test] From 15dbe7fb0fc07e11aaad89de1576016412c7eb9e Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 6 Aug 2024 17:37:38 -0400 Subject: [PATCH 037/529] replay: do not start leader for a block we already have shreds for (#2416) * replay: do not start leader for a block we already have shreds for * pr feedback: comment, move existing check to blockstore fn * move blockstore read after tick height check * pr feedback: resuse blockstore fn in next_leader_slot --- core/src/replay_stage.rs | 144 ++++++++++++++++++++++++++-- ledger/src/blockstore.rs | 7 ++ ledger/src/leader_schedule_cache.rs | 12 +-- poh/src/poh_recorder.rs | 9 ++ 4 files changed, 157 insertions(+), 15 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index fb0e7890cd1b38..d128289cb7bb93 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1992,6 +1992,14 @@ impl ReplayStage { } } + /// Checks if it is time for us to start producing a leader block. + /// Fails if: + /// - Current PoH has not satisfied criteria to start my leader block + /// - Startup verification is not complete, + /// - Bank forks already contains a bank for this leader slot + /// - We have not landed a vote yet and the `wait_for_vote_to_start_leader` flag is set + /// - We have failed the propagated check + /// Returns whether a new working bank was created and inserted into bank forks. #[allow(clippy::too_many_arguments)] fn maybe_start_leader( my_pubkey: &Pubkey, @@ -2005,7 +2013,7 @@ impl ReplayStage { banking_tracer: &Arc, has_new_vote_been_rooted: bool, track_transaction_indexes: bool, - ) { + ) -> bool { // all the individual calls to poh_recorder.read() are designed to // increase granularity, decrease contention @@ -2019,7 +2027,7 @@ impl ReplayStage { } => (poh_slot, parent_slot), PohLeaderStatus::NotReached => { trace!("{} poh_recorder hasn't reached_leader_slot", my_pubkey); - return; + return false; } }; @@ -2035,12 +2043,12 @@ impl ReplayStage { if !parent.is_startup_verification_complete() { info!("startup verification incomplete, so skipping my leader slot"); - return; + return false; } if bank_forks.read().unwrap().get(poh_slot).is_some() { warn!("{} already have bank in forks at {}?", my_pubkey, poh_slot); - return; + return false; } trace!( "{} poh_slot {} parent_slot {}", @@ -2052,7 +2060,7 @@ impl ReplayStage { if let Some(next_leader) = leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) { if !has_new_vote_been_rooted { info!("Haven't landed a vote, so skipping my leader slot"); - return; + return false; } trace!( @@ -2064,7 +2072,7 @@ impl ReplayStage { // I guess I missed my slot if next_leader != *my_pubkey { - return; + return false; } datapoint_info!( @@ -2098,7 +2106,7 @@ impl ReplayStage { latest_unconfirmed_leader_slot, ); } - return; + return false; } let root_slot = bank_forks.read().unwrap().root(); @@ -2133,8 +2141,10 @@ impl ReplayStage { .write() .unwrap() .set_bank(tpu_bank, track_transaction_indexes); + true } else { error!("{} No next leader found", my_pubkey); + false } } @@ -9097,4 +9107,124 @@ pub(crate) mod tests { .is_candidate(&(5, bank_forks.bank_hash(5).unwrap())) .unwrap()); } + + #[test] + fn test_skip_leader_slot_for_existing_slot() { + solana_logger::setup(); + + let ReplayBlockstoreComponents { + blockstore, + my_pubkey, + leader_schedule_cache, + poh_recorder, + vote_simulator, + rpc_subscriptions, + .. + } = replay_blockstore_components(None, 1, None); + let VoteSimulator { + bank_forks, + mut progress, + .. + } = vote_simulator; + + let working_bank = bank_forks.read().unwrap().working_bank(); + assert!(working_bank.is_complete()); + assert!(working_bank.is_frozen()); + // Mark startup verification as complete to avoid skipping leader slots + working_bank.set_startup_verification_complete(); + + // Insert a block two slots greater than current bank. This slot does + // not have a corresponding Bank in BankForks; this emulates a scenario + // where the block had previously been created and added to BankForks, + // but then got removed. This could be the case if the Bank was not on + // the major fork. + let dummy_slot = working_bank.slot() + 2; + let initial_slot = working_bank.slot(); + let num_entries = 10; + let merkle_variant = true; + let (shreds, _) = make_slot_entries(dummy_slot, initial_slot, num_entries, merkle_variant); + blockstore.insert_shreds(shreds, None, false).unwrap(); + + // Reset PoH recorder to the completed bank to ensure consistent state + ReplayStage::reset_poh_recorder( + &my_pubkey, + &blockstore, + working_bank.clone(), + &poh_recorder, + &leader_schedule_cache, + ); + + // Register just over one slot worth of ticks directly with PoH recorder + let num_poh_ticks = + (working_bank.ticks_per_slot() * working_bank.hashes_per_tick().unwrap()) + 1; + poh_recorder + .write() + .map(|mut poh_recorder| { + for _ in 0..num_poh_ticks + 1 { + poh_recorder.tick(); + } + }) + .unwrap(); + + let poh_recorder = Arc::new(poh_recorder); + let (retransmit_slots_sender, _) = unbounded(); + let (banking_tracer, _) = BankingTracer::new(None).unwrap(); + // A vote has not technically been rooted, but it doesn't matter for + // this test to use true to avoid skipping the leader slot + let has_new_vote_been_rooted = true; + let track_transaction_indexes = false; + + // We should not attempt to start leader for the dummy_slot + assert_matches!( + poh_recorder.read().unwrap().reached_leader_slot(&my_pubkey), + PohLeaderStatus::NotReached + ); + assert!(!ReplayStage::maybe_start_leader( + &my_pubkey, + &bank_forks, + &poh_recorder, + &leader_schedule_cache, + &rpc_subscriptions, + &mut progress, + &retransmit_slots_sender, + &mut SkippedSlotsInfo::default(), + &banking_tracer, + has_new_vote_been_rooted, + track_transaction_indexes, + )); + + // Register another slots worth of ticks with PoH recorder + poh_recorder + .write() + .map(|mut poh_recorder| { + for _ in 0..num_poh_ticks + 1 { + poh_recorder.tick(); + } + }) + .unwrap(); + + // We should now start leader for dummy_slot + 1 + let good_slot = dummy_slot + 1; + assert!(ReplayStage::maybe_start_leader( + &my_pubkey, + &bank_forks, + &poh_recorder, + &leader_schedule_cache, + &rpc_subscriptions, + &mut progress, + &retransmit_slots_sender, + &mut SkippedSlotsInfo::default(), + &banking_tracer, + has_new_vote_been_rooted, + track_transaction_indexes, + )); + // Get the new working bank, which is also the new leader bank/slot + let working_bank = bank_forks.read().unwrap().working_bank(); + // The new bank's slot must NOT be dummy_slot as the blockstore already + // had a shred inserted for dummy_slot prior to maybe_start_leader(). + // maybe_start_leader() must not pick dummy_slot to avoid creating a + // duplicate block. + assert_eq!(working_bank.slot(), good_slot); + assert_eq!(working_bank.parent_slot(), initial_slot); + } } diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 21d5418b7c6038..8dc7d71df7574d 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -4049,6 +4049,13 @@ impl Blockstore { Ok(duplicate_slots_iterator.map(|(slot, _)| slot)) } + pub fn has_existing_shreds_for_slot(&self, slot: Slot) -> bool { + match self.meta(slot).unwrap() { + Some(meta) => meta.received > 0, + None => false, + } + } + /// Returns the max root or 0 if it does not exist pub fn max_root(&self) -> Slot { self.max_root.load(Ordering::Relaxed) diff --git a/ledger/src/leader_schedule_cache.rs b/ledger/src/leader_schedule_cache.rs index b65f7593c5e6a3..9354b0c13c373f 100644 --- a/ledger/src/leader_schedule_cache.rs +++ b/ledger/src/leader_schedule_cache.rs @@ -139,14 +139,10 @@ impl LeaderScheduleCache { .map(move |i| i as Slot + first_slot) }) .skip_while(|slot| { - match blockstore { - None => false, - // Skip slots we have already sent a shred for. - Some(blockstore) => match blockstore.meta(*slot).unwrap() { - Some(meta) => meta.received > 0, - None => false, - }, - } + // Skip slots we already have shreds for + blockstore + .map(|bs| bs.has_existing_shreds_for_slot(*slot)) + .unwrap_or(false) }); let first_slot = schedule.next()?; let max_slot = first_slot.saturating_add(max_slot_range); diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index f9a22a9c27afb1..fab2d9f62559e6 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -576,6 +576,15 @@ impl PohRecorder { return PohLeaderStatus::NotReached; } + if self.blockstore.has_existing_shreds_for_slot(next_poh_slot) { + // We already have existing shreds for this slot. This can happen when this block was previously + // created and added to BankForks, however a recent PoH reset caused this bank to be removed + // as it was not part of the rooted fork. If this slot is not the first slot for this leader, + // and the first slot was previously ticked over, the check in `leader_schedule_cache::next_leader_slot` + // will not suffice, as it only checks if there are shreds for the first slot. + return PohLeaderStatus::NotReached; + } + assert!(next_tick_height >= self.start_tick_height); let poh_slot = next_poh_slot; let parent_slot = self.start_slot(); From 66686853334ea0ff12a156a565f9e65a915f87cd Mon Sep 17 00:00:00 2001 From: Illia Bobyr Date: Tue, 6 Aug 2024 19:32:18 -0700 Subject: [PATCH 038/529] local-cluster: Custom genesis size (#2427) Some local-cluster tests may require larger than the default genesis, in order to speed up the initial test setup. --- ledger/src/blockstore.rs | 35 +++++++++++++++++++++++++----- ledger/src/lib.rs | 5 +++++ local-cluster/src/local_cluster.rs | 15 +++++++++---- 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 8dc7d71df7574d..569b2bdd8b7cfe 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -34,9 +34,7 @@ use { rand::Rng, rayon::iter::{IntoParallelIterator, ParallelIterator}, rocksdb::{DBRawIterator, LiveFile}, - solana_accounts_db::hardened_unpack::{ - unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, - }, + solana_accounts_db::hardened_unpack::unpack_genesis_archive, solana_entry::entry::{create_ticks, Entry}, solana_measure::measure::Measure, solana_metrics::{ @@ -4963,6 +4961,22 @@ macro_rules! create_new_tmp_ledger { $crate::blockstore::create_new_ledger_from_name( $crate::tmp_ledger_name!(), $genesis_config, + $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, + $crate::blockstore_options::LedgerColumnOptions::default(), + ) + }; +} + +#[macro_export] +macro_rules! create_new_tmp_ledger_with_size { + ( + $genesis_config:expr, + $max_genesis_archive_unpacked_size:expr $(,)? + ) => { + $crate::blockstore::create_new_ledger_from_name( + $crate::tmp_ledger_name!(), + $genesis_config, + $max_genesis_archive_unpacked_size, $crate::blockstore_options::LedgerColumnOptions::default(), ) }; @@ -4974,6 +4988,7 @@ macro_rules! create_new_tmp_ledger_fifo { $crate::blockstore::create_new_ledger_from_name( $crate::tmp_ledger_name!(), $genesis_config, + $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, $crate::blockstore_options::LedgerColumnOptions { shred_storage_type: $crate::blockstore_options::ShredStorageType::RocksFifo( $crate::blockstore_options::BlockstoreRocksFifoOptions::new_for_tests(), @@ -4990,6 +5005,7 @@ macro_rules! create_new_tmp_ledger_auto_delete { $crate::blockstore::create_new_ledger_from_name_auto_delete( $crate::tmp_ledger_name!(), $genesis_config, + $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, $crate::blockstore_options::LedgerColumnOptions::default(), ) }; @@ -5001,6 +5017,7 @@ macro_rules! create_new_tmp_ledger_fifo_auto_delete { $crate::blockstore::create_new_ledger_from_name_auto_delete( $crate::tmp_ledger_name!(), $genesis_config, + $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, $crate::blockstore_options::LedgerColumnOptions { shred_storage_type: $crate::blockstore_options::ShredStorageType::RocksFifo( $crate::blockstore_options::BlockstoreRocksFifoOptions::new_for_tests(), @@ -5027,10 +5044,15 @@ pub(crate) fn verify_shred_slots(slot: Slot, parent: Slot, root: Slot) -> bool { pub fn create_new_ledger_from_name( name: &str, genesis_config: &GenesisConfig, + max_genesis_archive_unpacked_size: u64, column_options: LedgerColumnOptions, ) -> (PathBuf, Hash) { - let (ledger_path, blockhash) = - create_new_ledger_from_name_auto_delete(name, genesis_config, column_options); + let (ledger_path, blockhash) = create_new_ledger_from_name_auto_delete( + name, + genesis_config, + max_genesis_archive_unpacked_size, + column_options, + ); (ledger_path.into_path(), blockhash) } @@ -5041,13 +5063,14 @@ pub fn create_new_ledger_from_name( pub fn create_new_ledger_from_name_auto_delete( name: &str, genesis_config: &GenesisConfig, + max_genesis_archive_unpacked_size: u64, column_options: LedgerColumnOptions, ) -> (TempDir, Hash) { let ledger_path = get_ledger_path_from_name_auto_delete(name); let blockhash = create_new_ledger( ledger_path.path(), genesis_config, - MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, + max_genesis_archive_unpacked_size, column_options, ) .unwrap(); diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index 62c274a74e4550..a7007b49fa4223 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -46,3 +46,8 @@ extern crate log; #[cfg_attr(feature = "frozen-abi", macro_use)] #[cfg(feature = "frozen-abi")] extern crate solana_frozen_abi_macro; + +#[doc(hidden)] +pub mod macro_reexports { + pub use solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE; +} diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 9374e93770ba90..0666d9b2be3f61 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -18,7 +18,7 @@ use { contact_info::{ContactInfo, Protocol}, gossip_service::discover_cluster, }, - solana_ledger::{create_new_tmp_ledger, shred::Shred}, + solana_ledger::{create_new_tmp_ledger_with_size, shred::Shred}, solana_rpc_client::rpc_client::RpcClient, solana_runtime::{ genesis_utils::{ @@ -312,9 +312,13 @@ impl LocalCluster { .native_instruction_processors .extend_from_slice(&config.native_instruction_processors); - let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); - let leader_contact_info = leader_node.info.clone(); let mut leader_config = safe_clone_config(&config.validator_configs[0]); + let (leader_ledger_path, _blockhash) = create_new_tmp_ledger_with_size!( + &genesis_config, + leader_config.max_genesis_archive_unpacked_size, + ); + + let leader_contact_info = leader_node.info.clone(); leader_config.rpc_addrs = Some(( leader_node.info.rpc().unwrap(), leader_node.info.rpc_pubsub().unwrap(), @@ -494,7 +498,10 @@ impl LocalCluster { let validator_pubkey = validator_keypair.pubkey(); let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey()); let contact_info = validator_node.info.clone(); - let (ledger_path, _blockhash) = create_new_tmp_ledger!(&self.genesis_config); + let (ledger_path, _blockhash) = create_new_tmp_ledger_with_size!( + &self.genesis_config, + validator_config.max_genesis_archive_unpacked_size, + ); // Give the validator some lamports to setup vote accounts if is_listener { From 83bb29e5ba386ae755f11c27119aec166acf4427 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Wed, 7 Aug 2024 22:31:17 +0800 Subject: [PATCH 039/529] Add warning to builtin instruction costs (#2467) --- builtins-default-costs/src/lib.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/builtins-default-costs/src/lib.rs b/builtins-default-costs/src/lib.rs index 71659c23fd3b63..48a210b2197338 100644 --- a/builtins-default-costs/src/lib.rs +++ b/builtins-default-costs/src/lib.rs @@ -12,6 +12,14 @@ use { // Number of compute units for each built-in programs lazy_static! { /// Number of compute units for each built-in programs + /// + /// DEVELOPER WARNING: This map CANNOT be modified without causing a + /// consensus failure because this map is used to calculate the compute + /// limit for transactions that don't specify a compute limit themselves as + /// of https://github.com/anza-xyz/agave/issues/2212. It's also used to + /// calculate the cost of a transaction which is used in replay to enforce + /// block cost limits as of + /// https://github.com/solana-labs/solana/issues/29595. pub static ref BUILTIN_INSTRUCTION_COSTS: AHashMap = [ (solana_stake_program::id(), solana_stake_program::stake_instruction::DEFAULT_COMPUTE_UNITS), (solana_config_program::id(), solana_config_program::config_processor::DEFAULT_COMPUTE_UNITS), @@ -26,6 +34,7 @@ lazy_static! { // Note: These are precompile, run directly in bank during sanitizing; (secp256k1_program::id(), 0), (ed25519_program::id(), 0), + // DO NOT ADD MORE ENTRIES TO THIS MAP ] .iter() .cloned() From 9db1d9c58eef55f090a4534f2e72eb1746a52f6e Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 8 Aug 2024 01:13:50 +0800 Subject: [PATCH 040/529] clean up uses of activated `vote_state_add_vote_latency` feature (#2469) clean up uses of activated vote_state_add_vote_latency feature --- cli/src/vote.rs | 10 +--- local-cluster/src/local_cluster.rs | 16 +----- programs/vote/src/vote_processor.rs | 76 ++--------------------------- programs/vote/src/vote_state/mod.rs | 76 +++++++++++------------------ 4 files changed, 36 insertions(+), 142 deletions(-) diff --git a/cli/src/vote.rs b/cli/src/vote.rs index f89740f7a6f8f0..c5070e35c37cb0 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -31,7 +31,7 @@ use { solana_rpc_client_api::config::RpcGetVoteAccountsConfig, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ - account::Account, commitment_config::CommitmentConfig, feature, message::Message, + account::Account, commitment_config::CommitmentConfig, message::Message, native_token::lamports_to_sol, pubkey::Pubkey, system_instruction::SystemError, transaction::Transaction, }, @@ -819,13 +819,7 @@ pub fn process_create_vote_account( let fee_payer = config.signers[fee_payer]; let nonce_authority = config.signers[nonce_authority]; - - let is_feature_active = (!sign_only) - .then(solana_sdk::feature_set::vote_state_add_vote_latency::id) - .and_then(|feature_address| rpc_client.get_account(&feature_address).ok()) - .and_then(|account| feature::from_account(&account)) - .map_or(false, |feature| feature.activated_at.is_some()); - let space = VoteStateVersions::vote_state_size_of(is_feature_active) as u64; + let space = VoteStateVersions::vote_state_size_of(true) as u64; let build_message = |lamports| { let vote_init = VoteInit { diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 0666d9b2be3f61..d47adcea941313 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -32,7 +32,6 @@ use { clock::{Slot, DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE}, commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, - feature_set, genesis_config::{ClusterType, GenesisConfig}, message::Message, poh_config::PohConfig, @@ -778,18 +777,6 @@ impl LocalCluster { == 0 { // 1) Create vote account - // Unlike the bootstrap validator we have to check if the new vote state is being used - // as the cluster is already running, and using the wrong account size will cause the - // InitializeAccount tx to fail - let use_current_vote_state = client - .rpc_client() - .poll_get_balance_with_commitment( - &feature_set::vote_state_add_vote_latency::id(), - CommitmentConfig::processed(), - ) - .unwrap_or(0) - > 0; - let instructions = vote_instruction::create_account_with_config( &from_account.pubkey(), &vote_account_pubkey, @@ -801,8 +788,7 @@ impl LocalCluster { }, amount, vote_instruction::CreateVoteAccountConfig { - space: vote_state::VoteStateVersions::vote_state_size_of(use_current_vote_state) - as u64, + space: vote_state::VoteStateVersions::vote_state_size_of(true) as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, ); diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index b43b874de69271..748a8e9d6915d5 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -46,7 +46,6 @@ fn process_authorize_with_seed_instruction( authorization_type, &expected_authority_keys, &clock, - invoke_context.get_feature_set(), ) } @@ -75,25 +74,12 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - vote_state::initialize_account( - &mut me, - &vote_init, - &signers, - &clock, - invoke_context.get_feature_set(), - ) + vote_state::initialize_account(&mut me, &vote_init, &signers, &clock) } VoteInstruction::Authorize(voter_pubkey, vote_authorize) => { let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - vote_state::authorize( - &mut me, - &voter_pubkey, - vote_authorize, - &signers, - &clock, - invoke_context.get_feature_set(), - ) + vote_state::authorize(&mut me, &voter_pubkey, vote_authorize, &signers, &clock) } VoteInstruction::AuthorizeWithSeed(args) => { instruction_context.check_number_of_instruction_accounts(3)?; @@ -132,12 +118,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| let node_pubkey = transaction_context.get_key_of_account_at_index( instruction_context.get_index_of_instruction_account_in_transaction(1)?, )?; - vote_state::update_validator_identity( - &mut me, - node_pubkey, - &signers, - invoke_context.get_feature_set(), - ) + vote_state::update_validator_identity(&mut me, node_pubkey, &signers) } VoteInstruction::UpdateCommission(commission) => { let sysvar_cache = invoke_context.get_sysvar_cache(); @@ -228,7 +209,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| &signers, &rent_sysvar, &clock_sysvar, - invoke_context.get_feature_set(), ) } VoteInstruction::AuthorizeChecked(vote_authorize) => { @@ -241,14 +221,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - vote_state::authorize( - &mut me, - voter_pubkey, - vote_authorize, - &signers, - &clock, - invoke_context.get_feature_set(), - ) + vote_state::authorize(&mut me, voter_pubkey, vote_authorize, &signers, &clock) } } }); @@ -275,7 +248,6 @@ mod tests { solana_sdk::{ account::{self, Account, AccountSharedData, ReadableAccount}, account_utils::StateMut, - feature_set::FeatureSet, hash::Hash, instruction::{AccountMeta, Instruction}, pubkey::Pubkey, @@ -322,27 +294,6 @@ mod tests { ) } - fn process_instruction_disabled_features( - instruction_data: &[u8], - transaction_accounts: Vec<(Pubkey, AccountSharedData)>, - instruction_accounts: Vec, - expected_result: Result<(), InstructionError>, - ) -> Vec { - mock_process_instruction( - &id(), - Vec::new(), - instruction_data, - transaction_accounts, - instruction_accounts, - expected_result, - Entrypoint::vm, - |invoke_context| { - invoke_context.mock_set_feature_set(std::sync::Arc::new(FeatureSet::default())); - }, - |_invoke_context| {}, - ) - } - fn process_instruction_as_one_arg( instruction: &Instruction, expected_result: Result<(), InstructionError>, @@ -1796,15 +1747,7 @@ mod tests { (sysvar::rent::id(), create_default_rent_account()), ]; - // should succeed when vote_state_add_vote_latency is disabled - process_instruction_disabled_features( - &instructions[1].data, - transaction_accounts.clone(), - instructions[1].accounts.clone(), - Ok(()), - ); - - // should fail, if vote_state_add_vote_latency is enabled + // should fail, since VoteState1_14_11 isn't supported anymore process_instruction( &instructions[1].data, transaction_accounts, @@ -1845,15 +1788,6 @@ mod tests { (sysvar::rent::id(), create_default_rent_account()), ]; - // should fail, if vote_state_add_vote_latency is disabled - process_instruction_disabled_features( - &instructions[1].data, - transaction_accounts.clone(), - instructions[1].accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - - // succeeds, since vote_state_add_vote_latency is enabled process_instruction( &instructions[1].data, transaction_accounts, diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 817441da321a75..21a5c0426beffc 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -166,33 +166,24 @@ pub fn to(versioned: &VoteStateVersions, account: &mut T) -> fn set_vote_account_state( vote_account: &mut BorrowedAccount, vote_state: VoteState, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { - // Only if vote_state_add_vote_latency feature is enabled should the new version of vote state be stored - if feature_set.is_active(&feature_set::vote_state_add_vote_latency::id()) { - // If the account is not large enough to store the vote state, then attempt a realloc to make it large enough. - // The realloc can only proceed if the vote account has balance sufficient for rent exemption at the new size. - if (vote_account.get_data().len() < VoteStateVersions::vote_state_size_of(true)) - && (!vote_account - .is_rent_exempt_at_data_length(VoteStateVersions::vote_state_size_of(true)) - || vote_account - .set_data_length(VoteStateVersions::vote_state_size_of(true)) - .is_err()) - { - // Account cannot be resized to the size of a vote state as it will not be rent exempt, or failed to be - // resized for other reasons. So store the V1_14_11 version. - return vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( - VoteState1_14_11::from(vote_state), - ))); - } - // Vote account is large enough to store the newest version of vote state - vote_account.set_state(&VoteStateVersions::new_current(vote_state)) - // Else when the vote_state_add_vote_latency feature is not enabled, then the V1_14_11 version is stored - } else { - vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( + // If the account is not large enough to store the vote state, then attempt a realloc to make it large enough. + // The realloc can only proceed if the vote account has balance sufficient for rent exemption at the new size. + if (vote_account.get_data().len() < VoteStateVersions::vote_state_size_of(true)) + && (!vote_account + .is_rent_exempt_at_data_length(VoteStateVersions::vote_state_size_of(true)) + || vote_account + .set_data_length(VoteStateVersions::vote_state_size_of(true)) + .is_err()) + { + // Account cannot be resized to the size of a vote state as it will not be rent exempt, or failed to be + // resized for other reasons. So store the V1_14_11 version. + return vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( VoteState1_14_11::from(vote_state), - ))) + ))); } + // Vote account is large enough to store the newest version of vote state + vote_account.set_state(&VoteStateVersions::new_current(vote_state)) } /// Checks the proposed vote state with the current and @@ -852,7 +843,6 @@ pub fn authorize( vote_authorize: VoteAuthorize, signers: &HashSet, clock: &Clock, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut vote_state: VoteState = vote_account .get_state::()? @@ -887,7 +877,7 @@ pub fn authorize( } } - set_vote_account_state(vote_account, vote_state, feature_set) + set_vote_account_state(vote_account, vote_state) } /// Update the node_pubkey, requires signature of the authorized voter @@ -895,7 +885,6 @@ pub fn update_validator_identity( vote_account: &mut BorrowedAccount, node_pubkey: &Pubkey, signers: &HashSet, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut vote_state: VoteState = vote_account .get_state::()? @@ -909,7 +898,7 @@ pub fn update_validator_identity( vote_state.node_pubkey = *node_pubkey; - set_vote_account_state(vote_account, vote_state, feature_set) + set_vote_account_state(vote_account, vote_state) } /// Update the vote account's commission @@ -958,7 +947,7 @@ pub fn update_commission( vote_state.commission = commission; - set_vote_account_state(vote_account, vote_state, feature_set) + set_vote_account_state(vote_account, vote_state) } /// Given a proposed new commission, returns true if this would be a commission increase, false otherwise @@ -1003,7 +992,6 @@ pub fn withdraw( signers: &HashSet, rent_sysvar: &Rent, clock: &Clock, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut vote_account = instruction_context .try_borrow_instruction_account(transaction_context, vote_account_index)?; @@ -1037,7 +1025,7 @@ pub fn withdraw( } else { // Deinitialize upon zero-balance datapoint_debug!("vote-account-close", ("allow", 1, i64)); - set_vote_account_state(&mut vote_account, VoteState::default(), feature_set)?; + set_vote_account_state(&mut vote_account, VoteState::default())?; } } else { let min_rent_exempt_balance = rent_sysvar.minimum_balance(vote_account.get_data().len()); @@ -1062,13 +1050,8 @@ pub fn initialize_account( vote_init: &VoteInit, signers: &HashSet, clock: &Clock, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { - if vote_account.get_data().len() - != VoteStateVersions::vote_state_size_of( - feature_set.is_active(&feature_set::vote_state_add_vote_latency::id()), - ) - { + if vote_account.get_data().len() != VoteStateVersions::vote_state_size_of(true) { return Err(InstructionError::InvalidAccountData); } let versioned = vote_account.get_state::()?; @@ -1080,7 +1063,7 @@ pub fn initialize_account( // node must agree to accept this vote account verify_authorized_signer(&vote_init.node_pubkey, signers)?; - set_vote_account_state(vote_account, VoteState::new(vote_init, clock), feature_set) + set_vote_account_state(vote_account, VoteState::new(vote_init, clock)) } fn verify_and_get_vote_state( @@ -1130,7 +1113,7 @@ pub fn process_vote_with_account( .ok_or(VoteError::EmptySlots) .and_then(|slot| vote_state.process_timestamp(*slot, timestamp))?; } - set_vote_account_state(vote_account, vote_state, feature_set) + set_vote_account_state(vote_account, vote_state) } pub fn process_vote_state_update( @@ -1150,7 +1133,7 @@ pub fn process_vote_state_update( vote_state_update, Some(feature_set), )?; - set_vote_account_state(vote_account, vote_state, feature_set) + set_vote_account_state(vote_account, vote_state) } pub fn do_process_vote_state_update( @@ -1200,7 +1183,7 @@ pub fn process_tower_sync( tower_sync, Some(feature_set), )?; - set_vote_account_state(vote_account, vote_state, feature_set) + set_vote_account_state(vote_account, vote_state) } fn do_process_tower_sync( @@ -1320,8 +1303,6 @@ mod tests { #[test] fn test_vote_state_upgrade_from_1_14_11() { - let mut feature_set = FeatureSet::default(); - // Create an initial vote account that is sized for the 1_14_11 version of vote state, and has only the // required lamports for rent exempt minimum at that size let node_pubkey = solana_sdk::pubkey::new_rand(); @@ -1417,7 +1398,7 @@ mod tests { // Now re-set the vote account state; because the feature is not enabled, the old 1_14_11 format should be // written out assert_eq!( - set_vote_account_state(&mut borrowed_account, vote_state.clone(), &feature_set), + set_vote_account_state(&mut borrowed_account, vote_state.clone()), Ok(()) ); let vote_state_version = borrowed_account.get_state::().unwrap(); @@ -1431,11 +1412,10 @@ mod tests { let vote_state = converted_vote_state; - // Test that when the feature is enabled, if the vote account does not have sufficient lamports to realloc, + // Test that if the vote account does not have sufficient lamports to realloc, // the old vote state is written out - feature_set.activate(&feature_set::vote_state_add_vote_latency::id(), 1); assert_eq!( - set_vote_account_state(&mut borrowed_account, vote_state.clone(), &feature_set), + set_vote_account_state(&mut borrowed_account, vote_state.clone()), Ok(()) ); let vote_state_version = borrowed_account.get_state::().unwrap(); @@ -1456,7 +1436,7 @@ mod tests { Ok(()) ); assert_eq!( - set_vote_account_state(&mut borrowed_account, vote_state.clone(), &feature_set), + set_vote_account_state(&mut borrowed_account, vote_state.clone()), Ok(()) ); let vote_state_version = borrowed_account.get_state::().unwrap(); From 784352c3765792105764c875c1a619e6f71ded55 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 8 Aug 2024 01:14:04 +0800 Subject: [PATCH 041/529] clean up activated `validate_fee_collector_account` feature (#2468) clean up activated validate_fee_collector_account featuer --- runtime/src/bank.rs | 5 - runtime/src/bank/fee_distribution.rs | 180 +++++---------------------- 2 files changed, 34 insertions(+), 151 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 02ef725a4ba20e..c7cfdd89b23d94 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6017,11 +6017,6 @@ impl Bank { .shrink_ancient_slots(self.epoch_schedule()) } - pub fn validate_fee_collector_account(&self) -> bool { - self.feature_set - .is_active(&feature_set::validate_fee_collector_account::id()) - } - pub fn read_cost_tracker(&self) -> LockResult> { self.cost_tracker.read() } diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 8ac0f8e7338b31..4dc511a5eee95c 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -18,12 +18,6 @@ use { thiserror::Error, }; -#[derive(Debug)] -struct DepositFeeOptions { - check_account_owner: bool, - check_rent_paying: bool, -} - #[derive(Error, Debug, PartialEq)] enum DepositFeeError { #[error("fee account became rent paying")] @@ -116,15 +110,7 @@ impl Bank { } fn deposit_or_burn_fee(&self, deposit: u64, burn: &mut u64) { - let validate_fee_collector = self.validate_fee_collector_account(); - match self.deposit_fees( - &self.collector_id, - deposit, - DepositFeeOptions { - check_account_owner: validate_fee_collector, - check_rent_paying: validate_fee_collector, - }, - ) { + match self.deposit_fees(&self.collector_id, deposit) { Ok(post_balance) => { self.rewards.write().unwrap().push(( self.collector_id, @@ -153,17 +139,12 @@ impl Bank { } // Deposits fees into a specified account and if successful, returns the new balance of that account - fn deposit_fees( - &self, - pubkey: &Pubkey, - fees: u64, - options: DepositFeeOptions, - ) -> Result { + fn deposit_fees(&self, pubkey: &Pubkey, fees: u64) -> Result { let mut account = self .get_account_with_fixed_root_no_cache(pubkey) .unwrap_or_default(); - if options.check_account_owner && !system_program::check_id(account.owner()) { + if !system_program::check_id(account.owner()) { return Err(DepositFeeError::InvalidAccountOwner); } @@ -173,13 +154,12 @@ impl Bank { if distribution.is_err() { return Err(DepositFeeError::LamportOverflow); } - if options.check_rent_paying { - let recipient_post_rent_state = RentState::from_account(&account, rent); - let rent_state_transition_allowed = - recipient_post_rent_state.transition_allowed_from(&recipient_pre_rent_state); - if !rent_state_transition_allowed { - return Err(DepositFeeError::InvalidRentPayingAccount); - } + + let recipient_post_rent_state = RentState::from_account(&account, rent); + let rent_state_transition_allowed = + recipient_post_rent_state.transition_allowed_from(&recipient_pre_rent_state); + if !rent_state_transition_allowed { + return Err(DepositFeeError::InvalidRentPayingAccount); } self.store_account(pubkey, &account); @@ -274,15 +254,7 @@ impl Bank { rent_share }; if rent_to_be_paid > 0 { - let check_account_owner = self.validate_fee_collector_account(); - match self.deposit_fees( - &pubkey, - rent_to_be_paid, - DepositFeeOptions { - check_account_owner, - check_rent_paying: true, - }, - ) { + match self.deposit_fees(&pubkey, rent_to_be_paid) { Ok(post_balance) => { rewards.push(( pubkey, @@ -359,8 +331,8 @@ pub mod tests { create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, solana_sdk::{ - account::AccountSharedData, feature_set, native_token::sol_to_lamports, pubkey, - rent::Rent, signature::Signer, + account::AccountSharedData, native_token::sol_to_lamports, pubkey, rent::Rent, + signature::Signer, }, std::sync::RwLock, }; @@ -376,34 +348,20 @@ pub mod tests { struct TestCase { scenario: Scenario, - disable_checks: bool, } impl TestCase { - fn new(scenario: Scenario, disable_checks: bool) -> Self { - Self { - scenario, - disable_checks, - } + fn new(scenario: Scenario) -> Self { + Self { scenario } } } for test_case in [ - TestCase::new(Scenario::Normal, false), - TestCase::new(Scenario::Normal, true), - TestCase::new(Scenario::InvalidOwner, false), - TestCase::new(Scenario::InvalidOwner, true), - TestCase::new(Scenario::RentPaying, false), - TestCase::new(Scenario::RentPaying, true), + TestCase::new(Scenario::Normal), + TestCase::new(Scenario::InvalidOwner), + TestCase::new(Scenario::RentPaying), ] { let mut genesis = create_genesis_config(0); - if test_case.disable_checks { - genesis - .genesis_config - .accounts - .remove(&feature_set::validate_fee_collector_account::id()) - .unwrap(); - } let rent = Rent::default(); let min_rent_exempt_balance = rent.minimum_balance(0); genesis.genesis_config.rent = rent; // Ensure rent is non-zero, as genesis_utils sets Rent::free by default @@ -434,7 +392,7 @@ pub mod tests { bank.deposit_or_burn_fee(deposit, &mut burn); let new_collector_id_balance = bank.get_balance(bank.collector_id()); - if test_case.scenario != Scenario::Normal && !test_case.disable_checks { + if test_case.scenario != Scenario::Normal { assert_eq!(initial_collector_id_balance, new_collector_id_balance); assert_eq!(initial_burn + deposit, burn); let locked_rewards = bank.rewards.read().unwrap(); @@ -590,15 +548,10 @@ pub mod tests { let genesis = create_genesis_config(initial_balance); let bank = Bank::new_for_tests(&genesis.genesis_config); let pubkey = genesis.mint_keypair.pubkey(); - let deposit_amount = 500; - let options = DepositFeeOptions { - check_account_owner: true, - check_rent_paying: true, - }; assert_eq!( - bank.deposit_fees(&pubkey, deposit_amount, options), + bank.deposit_fees(&pubkey, deposit_amount), Ok(initial_balance + deposit_amount), "New balance should be the sum of the initial balance and deposit amount" ); @@ -610,15 +563,10 @@ pub mod tests { let genesis = create_genesis_config(initial_balance); let bank = Bank::new_for_tests(&genesis.genesis_config); let pubkey = genesis.mint_keypair.pubkey(); - let deposit_amount = 500; - let options = DepositFeeOptions { - check_account_owner: false, - check_rent_paying: false, - }; assert_eq!( - bank.deposit_fees(&pubkey, deposit_amount, options), + bank.deposit_fees(&pubkey, deposit_amount), Err(DepositFeeError::LamportOverflow), "Expected an error due to lamport overflow" ); @@ -630,36 +578,13 @@ pub mod tests { let genesis = create_genesis_config_with_leader(0, &pubkey::new_rand(), initial_balance); let bank = Bank::new_for_tests(&genesis.genesis_config); let pubkey = genesis.voting_keypair.pubkey(); - let deposit_amount = 500; - // enable check_account_owner - { - let options = DepositFeeOptions { - check_account_owner: true, // Intentionally checking for account owner - check_rent_paying: false, - }; - - assert_eq!( - bank.deposit_fees(&pubkey, deposit_amount, options), - Err(DepositFeeError::InvalidAccountOwner), - "Expected an error due to invalid account owner" - ); - } - - // disable check_account_owner - { - let options = DepositFeeOptions { - check_account_owner: false, - check_rent_paying: false, - }; - - assert_eq!( - bank.deposit_fees(&pubkey, deposit_amount, options), - Ok(initial_balance + deposit_amount), - "New balance should be the sum of the initial balance and deposit amount" - ); - } + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount), + Err(DepositFeeError::InvalidAccountOwner), + "Expected an error due to invalid account owner" + ); } #[test] @@ -675,33 +600,11 @@ pub mod tests { let deposit_amount = 500; assert!(initial_balance + deposit_amount < min_rent_exempt_balance); - // enable check_rent_paying - { - let options = DepositFeeOptions { - check_account_owner: false, - check_rent_paying: true, - }; - - assert_eq!( - bank.deposit_fees(&pubkey, deposit_amount, options), - Err(DepositFeeError::InvalidRentPayingAccount), - "Expected an error due to invalid rent paying account" - ); - } - - // disable check_rent_paying - { - let options = DepositFeeOptions { - check_account_owner: false, - check_rent_paying: false, - }; - - assert_eq!( - bank.deposit_fees(&pubkey, deposit_amount, options), - Ok(initial_balance + deposit_amount), - "New balance should be the sum of the initial balance and deposit amount" - ); - } + assert_eq!( + bank.deposit_fees(&pubkey, deposit_amount), + Err(DepositFeeError::InvalidRentPayingAccount), + "Expected an error due to invalid rent paying account" + ); } #[test] @@ -857,36 +760,21 @@ pub mod tests { #[test] fn test_distribute_rent_to_validators_invalid_owner() { struct TestCase { - disable_owner_check: bool, use_invalid_owner: bool, } impl TestCase { - fn new(disable_owner_check: bool, use_invalid_owner: bool) -> Self { - Self { - disable_owner_check, - use_invalid_owner, - } + fn new(use_invalid_owner: bool) -> Self { + Self { use_invalid_owner } } } - for test_case in [ - TestCase::new(false, false), - TestCase::new(false, true), - TestCase::new(true, false), - TestCase::new(true, true), - ] { + for test_case in [TestCase::new(false), TestCase::new(true)] { let genesis_config_info = create_genesis_config_with_leader(0, &Pubkey::new_unique(), 100); let mut genesis_config = genesis_config_info.genesis_config; genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default - if test_case.disable_owner_check { - genesis_config - .accounts - .remove(&feature_set::validate_fee_collector_account::id()) - .unwrap(); - } let bank = Bank::new_for_tests(&genesis_config); let initial_balance = 1_000_000; @@ -904,7 +792,7 @@ pub mod tests { let new_capitalization = bank.capitalization(); let new_balance = bank.get_balance(bank.collector_id()); - if test_case.use_invalid_owner && !test_case.disable_owner_check { + if test_case.use_invalid_owner { assert_eq!(initial_balance, new_balance); assert_eq!(initial_capitalization - rent_fees, new_capitalization); assert_eq!(bank.rewards.read().unwrap().len(), 0); From 9d4867e24b8ac4e7ced5e3122ce844a5b56bcd65 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 7 Aug 2024 13:07:52 -0500 Subject: [PATCH 042/529] TransactionView: Static Account Keys Meta (#2410) --- transaction-view/src/lib.rs | 2 + .../src/static_account_keys_meta.rs | 95 +++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100644 transaction-view/src/static_account_keys_meta.rs diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index 41870f9d690d91..aa16270fb7313d 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -11,3 +11,5 @@ mod message_header_meta; pub mod result; #[allow(dead_code)] mod signature_meta; +#[allow(dead_code)] +mod static_account_keys_meta; diff --git a/transaction-view/src/static_account_keys_meta.rs b/transaction-view/src/static_account_keys_meta.rs new file mode 100644 index 00000000000000..46bf95c15b5e58 --- /dev/null +++ b/transaction-view/src/static_account_keys_meta.rs @@ -0,0 +1,95 @@ +use { + crate::{ + bytes::{advance_offset_for_array, read_byte}, + result::{Result, TransactionParsingError}, + }, + solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey}, +}; + +/// Contains meta-data about the static account keys in a transaction packet. +#[derive(Default)] +pub struct StaticAccountKeysMeta { + /// The number of static accounts in the transaction. + pub(crate) num_static_accounts: u16, + /// The offset to the first static account in the transaction. + pub(crate) offset: u16, +} + +impl StaticAccountKeysMeta { + pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { + // The packet has a maximum length of 1232 bytes. + // This means the maximum number of 32 byte keys is 38. + // 38 as an min-sized encoded u16 is 1 byte. + // We can simply read this byte, if it's >38 we can return None. + const MAX_STATIC_ACCOUNTS_PER_PACKET: u16 = + (PACKET_DATA_SIZE / core::mem::size_of::()) as u16; + // Max size must not have the MSB set so that it is size 1. + const _: () = assert!(MAX_STATIC_ACCOUNTS_PER_PACKET & 0b1000_0000 == 0); + + let num_static_accounts = read_byte(bytes, offset)? as u16; + if num_static_accounts == 0 || num_static_accounts > MAX_STATIC_ACCOUNTS_PER_PACKET { + return Err(TransactionParsingError); + } + + // We also know that the offset must be less than 3 here, since the + // compressed u16 can only use up to 3 bytes, so there is no need to + // check if the offset is greater than u16::MAX. + let static_accounts_offset = *offset as u16; + // Update offset for array of static accounts. + advance_offset_for_array::(bytes, offset, num_static_accounts)?; + + Ok(Self { + num_static_accounts, + offset: static_accounts_offset, + }) + } +} + +#[cfg(test)] +mod tests { + use {super::*, solana_sdk::short_vec::ShortVec}; + + #[test] + fn test_zero_accounts() { + let bytes = bincode::serialize(&ShortVec(Vec::::new())).unwrap(); + let mut offset = 0; + assert!(StaticAccountKeysMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_one_account() { + let bytes = bincode::serialize(&ShortVec(vec![Pubkey::default()])).unwrap(); + let mut offset = 0; + let meta = StaticAccountKeysMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(meta.num_static_accounts, 1); + assert_eq!(meta.offset, 1); + assert_eq!(offset, 1 + core::mem::size_of::()); + } + + #[test] + fn test_max_accounts() { + let signatures = vec![Pubkey::default(); 38]; + let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); + let mut offset = 0; + let meta = StaticAccountKeysMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(meta.num_static_accounts, 38); + assert_eq!(meta.offset, 1); + assert_eq!(offset, 1 + 38 * core::mem::size_of::()); + } + + #[test] + fn test_too_many_accounts() { + let signatures = vec![Pubkey::default(); 39]; + let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); + let mut offset = 0; + assert!(StaticAccountKeysMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_u16_max_accounts() { + let signatures = vec![Pubkey::default(); u16::MAX as usize]; + let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); + let mut offset = 0; + assert!(StaticAccountKeysMeta::try_new(&bytes, &mut offset).is_err()); + } +} From 4bc8d5d7a15b7e44803bbe7eb33bc6455aaa4ada Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 7 Aug 2024 15:02:00 -0400 Subject: [PATCH 043/529] banking_stage: do not drain votes that cannot land on our leader fork (#2465) --- .../banking_stage/latest_unprocessed_votes.rs | 54 +++++++++++++++---- sdk/program/src/vote/instruction.rs | 13 +++++ 2 files changed, 58 insertions(+), 9 deletions(-) diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 069c6e4bbb3d07..e7aaf7d561e6d4 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -8,9 +8,13 @@ use { solana_perf::packet::Packet, solana_runtime::bank::Bank, solana_sdk::{ + account::from_account, clock::{Slot, UnixTimestamp}, + hash::Hash, program_utils::limited_deserialize, pubkey::Pubkey, + slot_hashes::SlotHashes, + sysvar, }, solana_vote_program::vote_instruction::VoteInstruction, std::{ @@ -36,6 +40,7 @@ pub struct LatestValidatorVotePacket { pubkey: Pubkey, vote: Option>, slot: Slot, + hash: Hash, forwarded: bool, timestamp: Option, } @@ -70,11 +75,13 @@ impl LatestValidatorVotePacket { .first() .ok_or(DeserializedPacketError::VoteTransactionError)?; let slot = vote_state_update_instruction.last_voted_slot().unwrap_or(0); + let hash = vote_state_update_instruction.hash(); let timestamp = vote_state_update_instruction.timestamp(); Ok(Self { vote: Some(vote), slot, + hash, pubkey, vote_source, forwarded: false, @@ -97,6 +104,10 @@ impl LatestValidatorVotePacket { self.slot } + pub(crate) fn hash(&self) -> Hash { + self.hash + } + pub fn timestamp(&self) -> Option { self.timestamp } @@ -115,9 +126,6 @@ impl LatestValidatorVotePacket { } } -// TODO: replace this with rand::seq::index::sample_weighted once we can update rand to 0.8+ -// This requires updating dependencies of ed25519-dalek as rand_core is not compatible cross -// version https://github.com/dalek-cryptography/ed25519-dalek/pull/214 pub(crate) fn weighted_random_order_by_stake<'a>( bank: &Bank, pubkeys: impl Iterator, @@ -322,17 +330,30 @@ impl LatestUnprocessedVotes { } /// Drains all votes yet to be processed sorted by a weighted random ordering by stake + /// Do not touch votes that are for a different fork from `bank` as we know they will fail, + /// however the next bank could be built on a different fork and consume these votes. pub fn drain_unprocessed(&self, bank: Arc) -> Vec> { - let pubkeys_by_stake = weighted_random_order_by_stake( - &bank, - self.latest_votes_per_pubkey.read().unwrap().keys(), - ) - .collect_vec(); + let slot_hashes = bank + .get_account(&sysvar::slot_hashes::id()) + .and_then(|account| from_account::(&account)); + if slot_hashes.is_none() { + error!( + "Slot hashes sysvar doesn't exist on bank {}. Including all votes without filtering", + bank.slot() + ); + } + + let pubkeys_by_stake = { + let binding = self.latest_votes_per_pubkey.read().unwrap(); + weighted_random_order_by_stake(&bank, binding.keys()) + }; pubkeys_by_stake - .into_iter() .filter_map(|pubkey| { self.get_entry(pubkey).and_then(|lock| { let mut latest_vote = lock.write().unwrap(); + if !Self::is_valid_for_our_fork(&latest_vote, &slot_hashes) { + return None; + } latest_vote.take_vote().map(|vote| { self.num_unprocessed_votes.fetch_sub(1, Ordering::Relaxed); vote @@ -342,6 +363,21 @@ impl LatestUnprocessedVotes { .collect_vec() } + /// Check if `vote` can land in our fork based on `slot_hashes` + fn is_valid_for_our_fork( + vote: &LatestValidatorVotePacket, + slot_hashes: &Option, + ) -> bool { + let Some(slot_hashes) = slot_hashes else { + // When slot hashes is not present we do not filter + return true; + }; + slot_hashes + .get(&vote.slot()) + .map(|found_hash| *found_hash == vote.hash()) + .unwrap_or(false) + } + /// Sometimes we forward and hold the packets, sometimes we forward and clear. /// This also clears all gossip votes since by definition they have been forwarded pub fn clear_forwarded_packets(&self) { diff --git a/sdk/program/src/vote/instruction.rs b/sdk/program/src/vote/instruction.rs index b5d43b5c24c602..c4369dd26d8080 100644 --- a/sdk/program/src/vote/instruction.rs +++ b/sdk/program/src/vote/instruction.rs @@ -209,6 +209,19 @@ impl VoteInstruction { } } + /// Only to be used on vote instructions (guard with is_simple_vote), panics otherwise + pub fn hash(&self) -> Hash { + assert!(self.is_simple_vote()); + match self { + Self::Vote(v) | Self::VoteSwitch(v, _) => v.hash, + Self::UpdateVoteState(vote_state_update) + | Self::UpdateVoteStateSwitch(vote_state_update, _) + | Self::CompactUpdateVoteState(vote_state_update) + | Self::CompactUpdateVoteStateSwitch(vote_state_update, _) => vote_state_update.hash, + Self::TowerSync(tower_sync) | Self::TowerSyncSwitch(tower_sync, _) => tower_sync.hash, + _ => panic!("Tried to get hash on non simple vote instruction"), + } + } /// Only to be used on vote instructions (guard with is_simple_vote), panics otherwise pub fn timestamp(&self) -> Option { assert!(self.is_simple_vote()); From 2892b263da010cda5a9dff6cf3525109daba672f Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Wed, 7 Aug 2024 16:36:38 -0700 Subject: [PATCH 044/529] Add epoch_node_id_to_stake() to get total stake belonging to given node_id in given epoch. (#2478) * Add epoch_node_id_stake to get total stake belonging to given node_id in the given epoch. * Fix typos to test epoch 1. * Small fix. --- runtime/src/bank.rs | 7 +++++++ runtime/src/bank/tests.rs | 43 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index c7cfdd89b23d94..c3b6c97af348f0 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5913,6 +5913,13 @@ impl Bank { .get(node_id) } + /// Get the total stake belonging to vote accounts associated with the given node id for the + /// given epoch. + pub fn epoch_node_id_to_stake(&self, epoch: Epoch, node_id: &Pubkey) -> Option { + self.epoch_stakes(epoch) + .and_then(|epoch_stakes| epoch_stakes.node_id_to_stake(node_id)) + } + /// Get the fixed total stake of all vote accounts for current epoch pub fn total_epoch_stake(&self) -> u64 { self.epoch_stakes diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 21c70faa2534c0..288d0b8bb23a1b 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -12984,3 +12984,46 @@ fn test_blockhash_last_valid_block_height() { ); assert!(!bank.is_blockhash_valid(&last_blockhash)); } + +#[test] +fn test_bank_epoch_stakes() { + solana_logger::setup(); + let num_of_nodes: u64 = 30; + let stakes = (1..num_of_nodes.checked_add(1).expect("Shouldn't be big")).collect::>(); + let voting_keypairs = stakes + .iter() + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect::>(); + let total_stake = stakes.iter().sum(); + let GenesisConfigInfo { genesis_config, .. } = + create_genesis_config_with_vote_accounts(1_000_000_000, &voting_keypairs, stakes.clone()); + + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank1 = Bank::new_from_parent( + bank0.clone(), + &Pubkey::default(), + bank0.get_slots_in_epoch(0) + 1, + ); + + let initial_epochs = bank0.epoch_stake_keys(); + assert_eq!(initial_epochs, vec![0, 1]); + + assert_eq!(bank0.epoch(), 0); + assert_eq!(bank0.epoch_total_stake(0), Some(total_stake)); + assert_eq!(bank0.epoch_node_id_to_stake(0, &Pubkey::new_unique()), None); + for (i, keypair) in voting_keypairs.iter().enumerate() { + assert_eq!( + bank0.epoch_node_id_to_stake(0, &keypair.node_keypair.pubkey()), + Some(stakes[i]) + ); + } + assert_eq!(bank1.epoch(), 1); + assert_eq!(bank1.epoch_total_stake(1), Some(total_stake)); + assert_eq!(bank1.epoch_node_id_to_stake(1, &Pubkey::new_unique()), None); + for (i, keypair) in voting_keypairs.iter().enumerate() { + assert_eq!( + bank1.epoch_node_id_to_stake(1, &keypair.node_keypair.pubkey()), + Some(stakes[i]) + ); + } +} From 1f9cbb04aff21475f30ec57cf63659e19b107ab1 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 8 Aug 2024 09:05:14 +0800 Subject: [PATCH 045/529] fix: remove expensive versioned epoch stakes clone (#2453) * fix: remove expensive versioned epoch stakes clone * Add custom partialeq impl for dcou --- runtime/src/bank/serde_snapshot.rs | 7 +- runtime/src/epoch_stakes.rs | 104 ++++++++++++++++++++++++++--- runtime/src/serde_snapshot.rs | 7 +- runtime/src/stake_account.rs | 4 +- runtime/src/stakes.rs | 22 +++++- sdk/program/src/stake/state.rs | 7 ++ 6 files changed, 131 insertions(+), 20 deletions(-) diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 2f633c0910b2d8..f575aa64813f37 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -6,7 +6,8 @@ mod tests { epoch_accounts_hash_utils, test_utils as bank_test_utils, Bank, EpochRewardStatus, }, epoch_stakes::{ - EpochAuthorizedVoters, EpochStakes, NodeIdToVoteAccounts, VersionedEpochStakes, + EpochAuthorizedVoters, EpochStakes, NodeIdToVoteAccounts, StakesSerdeWrapper, + VersionedEpochStakes, }, genesis_utils::activate_all_features, runtime_config::RuntimeConfig, @@ -306,7 +307,7 @@ mod tests { bank.epoch_stakes.insert( 42, EpochStakes::from(VersionedEpochStakes::Current { - stakes: Stakes::::default(), + stakes: StakesSerdeWrapper::Stake(Stakes::::default()), total_stake: 42, node_id_to_vote_accounts: Arc::::default(), epoch_authorized_voters: Arc::::default(), @@ -535,7 +536,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "CeNFPePrUfgJT2GNr7zYfMQVuJwGyU46bz1Skq7hAPht") + frozen_abi(digest = "7a6C1oFtgZiMtZig7FbX9289xn55QadQ962rX61Gheef") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 4841b2713c34e7..d38695f642471c 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -1,7 +1,11 @@ use { - crate::stakes::{Stakes, StakesEnum}, - serde::{Deserialize, Serialize}, + crate::{ + stake_account::StakeAccount, + stakes::{Stakes, StakesEnum}, + }, + serde::{Deserialize, Deserializer, Serialize, Serializer}, solana_sdk::{clock::Epoch, pubkey::Pubkey, stake::state::Stake}, + solana_stake_program::stake_state::Delegation, solana_vote::vote_account::VoteAccountsHashMap, std::{collections::HashMap, sync::Arc}, }; @@ -131,16 +135,78 @@ impl EpochStakes { } #[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))] +#[derive(Debug, Clone, Serialize, Deserialize)] pub enum VersionedEpochStakes { Current { - stakes: Stakes, + stakes: StakesSerdeWrapper, total_stake: u64, node_id_to_vote_accounts: Arc, epoch_authorized_voters: Arc, }, } +/// Wrapper struct with custom serialization to support serializing +/// `Stakes` as `Stakes` without doing a full deep clone of +/// the stake data. Serialization works by building a `Stakes<&Stake>` map which +/// borrows `&Stake` from `StakeAccount` entries in `Stakes`. Note +/// that `Stakes<&Stake>` still copies `Pubkey` keys so the `Stakes<&Stake>` +/// data structure still allocates a fair amount of memory but the memory only +/// remains allocated during serialization. +#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] +#[derive(Debug, Clone)] +pub enum StakesSerdeWrapper { + Stake(Stakes), + Account(Stakes>), +} + +#[cfg(feature = "dev-context-only-utils")] +impl PartialEq for StakesSerdeWrapper { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Stake(stakes), Self::Stake(other)) => stakes == other, + (Self::Account(stakes), Self::Account(other)) => stakes == other, + (Self::Stake(stakes), Self::Account(other)) => { + stakes == &Stakes::::from(other.clone()) + } + (Self::Account(stakes), Self::Stake(other)) => { + other == &Stakes::::from(stakes.clone()) + } + } + } +} + +impl From for StakesEnum { + fn from(stakes: StakesSerdeWrapper) -> Self { + match stakes { + StakesSerdeWrapper::Stake(stakes) => Self::Stakes(stakes), + StakesSerdeWrapper::Account(stakes) => Self::Accounts(stakes), + } + } +} + +impl Serialize for StakesSerdeWrapper { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + Self::Stake(stakes) => stakes.serialize(serializer), + Self::Account(stakes) => Stakes::<&Stake>::from(stakes).serialize(serializer), + } + } +} + +impl<'de> Deserialize<'de> for StakesSerdeWrapper { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let stakes = Stakes::::deserialize(deserializer)?; + Ok(Self::Stake(stakes)) + } +} + impl From for EpochStakes { fn from(versioned: VersionedEpochStakes) -> Self { let VersionedEpochStakes::Current { @@ -151,7 +217,7 @@ impl From for EpochStakes { } = versioned; Self { - stakes: Arc::new(StakesEnum::Stakes(stakes)), + stakes: Arc::new(stakes.into()), total_stake, node_id_to_vote_accounts, epoch_authorized_voters, @@ -196,7 +262,7 @@ pub(crate) fn split_epoch_stakes( versioned_epoch_stakes.insert( epoch, VersionedEpochStakes::Current { - stakes: Stakes::::from(stakes.clone()), + stakes: StakesSerdeWrapper::Account(stakes.clone()), total_stake, node_id_to_vote_accounts, epoch_authorized_voters, @@ -207,7 +273,7 @@ pub(crate) fn split_epoch_stakes( versioned_epoch_stakes.insert( epoch, VersionedEpochStakes::Current { - stakes: stakes.clone(), + stakes: StakesSerdeWrapper::Stake(stakes.clone()), total_stake, node_id_to_vote_accounts, epoch_authorized_voters, @@ -426,7 +492,7 @@ pub(crate) mod tests { assert_eq!( versioned.get(&epoch), Some(&VersionedEpochStakes::Current { - stakes: Stakes::::from(test_stakes), + stakes: StakesSerdeWrapper::Account(test_stakes), total_stake: epoch_stakes.total_stake, node_id_to_vote_accounts: epoch_stakes.node_id_to_vote_accounts, epoch_authorized_voters: epoch_stakes.epoch_authorized_voters, @@ -455,7 +521,7 @@ pub(crate) mod tests { assert_eq!( versioned.get(&epoch), Some(&VersionedEpochStakes::Current { - stakes: test_stakes, + stakes: StakesSerdeWrapper::Stake(test_stakes), total_stake: epoch_stakes.total_stake, node_id_to_vote_accounts: epoch_stakes.node_id_to_vote_accounts, epoch_authorized_voters: epoch_stakes.epoch_authorized_voters, @@ -506,8 +572,24 @@ pub(crate) mod tests { assert!(old.contains_key(&epoch1)); assert_eq!(versioned.len(), 2); - assert!(versioned.contains_key(&epoch2)); - assert!(versioned.contains_key(&epoch3)); + assert_eq!( + versioned.get(&epoch2), + Some(&VersionedEpochStakes::Current { + stakes: StakesSerdeWrapper::Account(Stakes::default()), + total_stake: 200, + node_id_to_vote_accounts: Arc::default(), + epoch_authorized_voters: Arc::default(), + }) + ); + assert_eq!( + versioned.get(&epoch3), + Some(&VersionedEpochStakes::Current { + stakes: StakesSerdeWrapper::Stake(Stakes::default()), + total_stake: 300, + node_id_to_vote_accounts: Arc::default(), + epoch_authorized_voters: Arc::default(), + }) + ); } #[test] diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index a83ed12325741c..3c923d72893a72 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -389,7 +389,8 @@ where /// added to this struct a minor release before they are added to the serialize /// struct. #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Clone, Debug, Deserialize, PartialEq)] +#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))] +#[derive(Clone, Debug, Deserialize)] struct ExtraFieldsToDeserialize { #[serde(deserialize_with = "default_on_eof")] lamports_per_signature: u64, @@ -408,8 +409,8 @@ struct ExtraFieldsToDeserialize { /// be added to the deserialize struct a minor release before they are added to /// this one. #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[cfg_attr(feature = "dev-context-only-utils", derive(Default))] -#[derive(Debug, Serialize, PartialEq)] +#[cfg_attr(feature = "dev-context-only-utils", derive(Default, PartialEq))] +#[derive(Debug, Serialize)] pub struct ExtraFieldsToSerialize<'a> { pub lamports_per_signature: u64, pub incremental_snapshot_persistence: Option<&'a BankIncrementalSnapshotPersistence>, diff --git a/runtime/src/stake_account.rs b/runtime/src/stake_account.rs index 7bf30c088d9ea3..e3d8b5a05de6d9 100644 --- a/runtime/src/stake_account.rs +++ b/runtime/src/stake_account.rs @@ -55,10 +55,10 @@ impl StakeAccount { } #[inline] - pub(crate) fn stake(&self) -> Stake { + pub(crate) fn stake(&self) -> &Stake { // Safe to unwrap here because StakeAccount will always // only wrap a stake-state. - self.stake_state.stake().unwrap() + self.stake_state.stake_ref().unwrap() } } diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 0e4d7b6109ef41..ff1b0d059edebe 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -544,12 +544,15 @@ impl From> for Stakes { } } +/// This conversion is very memory intensive so should only be used in +/// development contexts. +#[cfg(feature = "dev-context-only-utils")] impl From> for Stakes { fn from(stakes: Stakes) -> Self { let stake_delegations = stakes .stake_delegations .into_iter() - .map(|(pubkey, stake_account)| (pubkey, stake_account.stake())) + .map(|(pubkey, stake_account)| (pubkey, *stake_account.stake())) .collect(); Self { vote_accounts: stakes.vote_accounts, @@ -561,6 +564,23 @@ impl From> for Stakes { } } +impl<'a> From<&'a Stakes> for Stakes<&'a Stake> { + fn from(stakes: &'a Stakes) -> Self { + let stake_delegations = stakes + .stake_delegations + .iter() + .map(|(pubkey, stake_account)| (*pubkey, stake_account.stake())) + .collect(); + Self { + vote_accounts: stakes.vote_accounts.clone(), + stake_delegations, + unused: stakes.unused, + epoch: stakes.epoch, + stake_history: stakes.stake_history.clone(), + } + } +} + impl From> for Stakes { fn from(stakes: Stakes) -> Self { let stake_delegations = stakes diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index 22fc5ea44645c6..26673d0c7a8378 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -213,6 +213,13 @@ impl StakeStateV2 { } } + pub fn stake_ref(&self) -> Option<&Stake> { + match self { + StakeStateV2::Stake(_meta, stake, _stake_flags) => Some(stake), + _ => None, + } + } + pub fn delegation(&self) -> Option { match self { StakeStateV2::Stake(_meta, stake, _stake_flags) => Some(stake.delegation), From 169eeec4a3ffd420562d9e3e2594ef4bb8c29212 Mon Sep 17 00:00:00 2001 From: Jonathan Bakebwa Date: Thu, 8 Aug 2024 04:47:14 +0300 Subject: [PATCH 046/529] refactor: Optimize `instruction_accounts` creation in `invoke_context` (#2475) refactor: optimize instruction_accounts creation in invoke_context --- program-runtime/src/invoke_context.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 5c732463c9c353..96079b9a87072d 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -417,10 +417,10 @@ impl<'a> InvokeContext<'a> { let instruction_accounts = duplicate_indicies .into_iter() .map(|duplicate_index| { - Ok(deduplicated_instruction_accounts + deduplicated_instruction_accounts .get(duplicate_index) - .ok_or(InstructionError::NotEnoughAccountKeys)? - .clone()) + .cloned() + .ok_or(InstructionError::NotEnoughAccountKeys) }) .collect::, InstructionError>>()?; From fc31198ca87079ea3dde94cfbe5ea3ac08a8dd94 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 21:07:58 +0800 Subject: [PATCH 047/529] build(deps): bump hidapi from 2.6.2 to 2.6.3 (#2485) Bumps [hidapi](https://github.com/ruabmbua/hidapi-rs) from 2.6.2 to 2.6.3. - [Release notes](https://github.com/ruabmbua/hidapi-rs/releases) - [Commits](https://github.com/ruabmbua/hidapi-rs/compare/v2.6.2...v2.6.3) --- updated-dependencies: - dependency-name: hidapi dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b8c897130d45b..5c7f9d36bd68eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2656,9 +2656,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hidapi" -version = "2.6.2" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ad5e383c2cf354bf4b54b7adf1dc781942fa9ec2ebc0714ad32524de968edce" +checksum = "03b876ecf37e86b359573c16c8366bc3eba52b689884a0fc42ba3f67203d2a8b" dependencies = [ "cc", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index 45f6625f01ec6d..ca61d6806ae86b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -237,7 +237,7 @@ gethostname = "0.2.3" getrandom = "0.2.10" goauth = "0.13.1" hex = "0.4.3" -hidapi = { version = "2.6.2", default-features = false } +hidapi = { version = "2.6.3", default-features = false } histogram = "0.6.9" hmac = "0.12.1" http = "0.2.12" From 196845e7373618ad033999a6fa044334f9763987 Mon Sep 17 00:00:00 2001 From: Joe C Date: Thu, 8 Aug 2024 10:28:20 -0400 Subject: [PATCH 048/529] SDK: Add `PodSlotHashes` API (#2481) * SDK: Add `PodSlotHashes` API * SDK: Add tests for `PodSlotHashes` API * SDK: Deprecate `SlotHashesSysvar` API --- Cargo.lock | 1 + sdk/program/Cargo.toml | 1 + sdk/program/src/sysvar/slot_hashes.rs | 177 +++++++++++++++++++++++++- 3 files changed, 176 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c7f9d36bd68eb..0455ad9a1f632b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6929,6 +6929,7 @@ dependencies = [ "solana-secp256k1-recover", "solana-short-vec", "static_assertions", + "test-case", "thiserror", "wasm-bindgen", ] diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index e6c76f6d2a42a5..954c2fa6b6511e 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -83,6 +83,7 @@ itertools = { workspace = true } serde_json = { workspace = true } serial_test = { workspace = true } static_assertions = { workspace = true } +test-case = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/sdk/program/src/sysvar/slot_hashes.rs b/sdk/program/src/sysvar/slot_hashes.rs index 97a465165314e3..69ab49dc4e124a 100644 --- a/sdk/program/src/sysvar/slot_hashes.rs +++ b/sdk/program/src/sysvar/slot_hashes.rs @@ -58,6 +58,8 @@ use { bytemuck_derive::{Pod, Zeroable}, }; +const U64_SIZE: usize = std::mem::size_of::(); + crate::declare_sysvar_id!("SysvarS1otHashes111111111111111111111111111", SlotHashes); impl Sysvar for SlotHashes { @@ -72,16 +74,104 @@ impl Sysvar for SlotHashes { } } +/// A bytemuck-compatible (plain old data) version of `SlotHash`. #[derive(Copy, Clone, Default, Pod, Zeroable)] #[repr(C)] -struct PodSlotHash { - slot: Slot, - hash: Hash, +pub struct PodSlotHash { + pub slot: Slot, + pub hash: Hash, +} + +/// API for querying of the `SlotHashes` sysvar by on-chain programs. +/// +/// Hangs onto the allocated raw buffer from the account data, which can be +/// queried or accessed directly as a slice of `PodSlotHash`. +#[derive(Default)] +pub struct PodSlotHashes { + data: Vec, + slot_hashes_start: usize, + slot_hashes_end: usize, +} + +impl PodSlotHashes { + /// Fetch all of the raw sysvar data using the `sol_get_sysvar` syscall. + pub fn fetch() -> Result { + // Allocate an uninitialized buffer for the raw sysvar data. + let sysvar_len = SlotHashes::size_of(); + let mut data = vec![0; sysvar_len]; + + // Ensure the created buffer is aligned to 8. + if data.as_ptr().align_offset(8) != 0 { + return Err(ProgramError::InvalidAccountData); + } + + // Populate the buffer by fetching all sysvar data using the + // `sol_get_sysvar` syscall. + get_sysvar( + &mut data, + &SlotHashes::id(), + /* offset */ 0, + /* length */ sysvar_len as u64, + )?; + + // Get the number of slot hashes present in the data by reading the + // `u64` length at the beginning of the data, then use that count to + // calculate the length of the slot hashes data. + // + // The rest of the buffer is uninitialized and should not be accessed. + let length = data + .get(..U64_SIZE) + .and_then(|bytes| bytes.try_into().ok()) + .map(u64::from_le_bytes) + .and_then(|length| length.checked_mul(std::mem::size_of::() as u64)) + .ok_or(ProgramError::InvalidAccountData)?; + + let slot_hashes_start = U64_SIZE; + let slot_hashes_end = slot_hashes_start.saturating_add(length as usize); + + Ok(Self { + data, + slot_hashes_start, + slot_hashes_end, + }) + } + + /// Return the `SlotHashes` sysvar data as a slice of `PodSlotHash`. + /// Returns a slice of only the initialized sysvar data. + pub fn as_slice(&self) -> Result<&[PodSlotHash], ProgramError> { + self.data + .get(self.slot_hashes_start..self.slot_hashes_end) + .and_then(|data| bytemuck::try_cast_slice(data).ok()) + .ok_or(ProgramError::InvalidAccountData) + } + + /// Given a slot, get its corresponding hash in the `SlotHashes` sysvar + /// data. Returns `None` if the slot is not found. + pub fn get(&self, slot: &Slot) -> Result, ProgramError> { + self.as_slice().map(|pod_hashes| { + pod_hashes + .binary_search_by(|PodSlotHash { slot: this, .. }| slot.cmp(this)) + .map(|idx| pod_hashes[idx].hash) + .ok() + }) + } + + /// Given a slot, get its position in the `SlotHashes` sysvar data. Returns + /// `None` if the slot is not found. + pub fn position(&self, slot: &Slot) -> Result, ProgramError> { + self.as_slice().map(|pod_hashes| { + pod_hashes + .binary_search_by(|PodSlotHash { slot: this, .. }| slot.cmp(this)) + .ok() + }) + } } /// API for querying the `SlotHashes` sysvar. +#[deprecated(since = "2.1.0", note = "Please use `PodSlotHashes` instead")] pub struct SlotHashesSysvar; +#[allow(deprecated)] impl SlotHashesSysvar { /// Get a value from the sysvar entries by its key. /// Returns `None` if the key is not found. @@ -134,6 +224,7 @@ mod tests { sysvar::tests::mock_get_sysvar_syscall, }, serial_test::serial, + test_case::test_case, }; #[test] @@ -149,6 +240,86 @@ mod tests { ); } + fn mock_slot_hashes(slot_hashes: &SlotHashes) { + // The data is always `SlotHashes::size_of()`. + let mut data = vec![0; SlotHashes::size_of()]; + bincode::serialize_into(&mut data[..], slot_hashes).unwrap(); + mock_get_sysvar_syscall(&data); + } + + #[test_case(0)] + #[test_case(1)] + #[test_case(2)] + #[test_case(5)] + #[test_case(10)] + #[test_case(64)] + #[test_case(128)] + #[test_case(192)] + #[test_case(256)] + #[test_case(384)] + #[test_case(MAX_ENTRIES)] + #[serial] + fn test_pod_slot_hashes(num_entries: usize) { + let mut slot_hashes = vec![]; + for i in 0..num_entries { + slot_hashes.push(( + i as u64, + hash(&[(i >> 24) as u8, (i >> 16) as u8, (i >> 8) as u8, i as u8]), + )); + } + + let check_slot_hashes = SlotHashes::new(&slot_hashes); + mock_slot_hashes(&check_slot_hashes); + + let pod_slot_hashes = PodSlotHashes::fetch().unwrap(); + + // Assert the slice of `PodSlotHash` has the same length as + // `SlotHashes`. + let pod_slot_hashes_slice = pod_slot_hashes.as_slice().unwrap(); + assert_eq!(pod_slot_hashes_slice.len(), slot_hashes.len()); + + // Assert `PodSlotHashes` and `SlotHashes` contain the same slot hashes + // in the same order. + for slot in slot_hashes.iter().map(|(slot, _hash)| slot) { + // `get`: + assert_eq!( + pod_slot_hashes.get(slot).unwrap().as_ref(), + check_slot_hashes.get(slot), + ); + // `position`: + assert_eq!( + pod_slot_hashes.position(slot).unwrap(), + check_slot_hashes.position(slot), + ); + } + + // Check a few `None` values. + let not_a_slot = num_entries.saturating_add(1) as u64; + assert_eq!( + pod_slot_hashes.get(¬_a_slot).unwrap().as_ref(), + check_slot_hashes.get(¬_a_slot), + ); + assert_eq!(pod_slot_hashes.get(¬_a_slot).unwrap(), None); + assert_eq!( + pod_slot_hashes.position(¬_a_slot).unwrap(), + check_slot_hashes.position(¬_a_slot), + ); + assert_eq!(pod_slot_hashes.position(¬_a_slot).unwrap(), None); + + let not_a_slot = num_entries.saturating_add(2) as u64; + assert_eq!( + pod_slot_hashes.get(¬_a_slot).unwrap().as_ref(), + check_slot_hashes.get(¬_a_slot), + ); + assert_eq!(pod_slot_hashes.get(¬_a_slot).unwrap(), None); + assert_eq!( + pod_slot_hashes.position(¬_a_slot).unwrap(), + check_slot_hashes.position(¬_a_slot), + ); + assert_eq!(pod_slot_hashes.position(¬_a_slot).unwrap(), None); + } + + #[allow(deprecated)] #[serial] #[test] fn test_slot_hashes_sysvar() { From e8dfc9a9aec5b791db4ab6d3cac03259c513aa9a Mon Sep 17 00:00:00 2001 From: dmakarov Date: Thu, 8 Aug 2024 10:44:42 -0400 Subject: [PATCH 049/529] Refactor data structure representing account candidates for cleaning (#2296) * Refactor data structure representing account candidates for cleaning AccountsDB::clean_accounts makes unnecessary copies of large number of pubkeys and accompanying information to find and operate on the accounts that can be deleted from the accounts index. With this change the candidates for deletion are organized in a single data structure with necessary information being updated in-place, thus reducing memory requirements of the cleaning procedure. * Remove redundant method of CleaningInfo and make its fields private * Rework pubkey_to_slot_set construction * Use slice instead of Vec in parameter types * Remove redundant duplicate expression * Fix tests build * Replace Vec by Box * Fix clippy error * Add missing expect() message * Update expect message * Remove unnecessary check for empty bins * Remove redundant type specification * Formatting fixes * Exclude accounts with empty slot list * Fix name in an info message * Call scan once per pubkey * Fix deadlock in test * Remove redundant variable * Find affected pubkey in all bins * Undo BINS_FOR_TESTING change * Avoid acquiring read lock twice in same thread * comments * comments * replace if-then-else by then_some * Don't retain entries with empty slot list * add filtering on store counts loop * fix fmting on scan_accounts * extract out count_pubkeys * extract out `insert_pubkey` * fix fmt on `retain` * Remove hashset_to_vec timer * comments --------- Co-authored-by: jeff washington --- accounts-db/src/accounts_db.rs | 643 +++++++++++++++++++-------------- 1 file changed, 374 insertions(+), 269 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 28610eb79eb20a..2a9b0c8b1c7331 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -105,7 +105,7 @@ use { path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering}, - Arc, Condvar, Mutex, + Arc, Condvar, Mutex, RwLock, }, thread::{sleep, Builder}, time::{Duration, Instant}, @@ -1054,7 +1054,6 @@ pub enum AccountsHashVerificationError { struct CleanKeyTimings { collect_delta_keys_us: u64, delta_insert_us: u64, - hashset_to_vec_us: u64, dirty_store_processing_us: u64, delta_key_count: u64, dirty_pubkeys_count: u64, @@ -1345,6 +1344,19 @@ impl StoreAccountsTiming { } } +#[derive(Default, Debug)] +struct CleaningInfo { + slot_list: SlotList, + ref_count: u64, +} + +/// This is the return type of AccountsDb::construct_candidate_clean_keys. +/// It's a collection of pubkeys with associated information to +/// facilitate the decision making about which accounts can be removed +/// from the accounts index. In addition, the minimal dirty slot is +/// included in the returned value. +type CleaningCandidates = (Box<[RwLock>]>, Option); + /// Removing unrooted slots in Accounts Background Service needs to be synchronized with flushing /// slots from the Accounts Cache. This keeps track of those slots and the Mutex + Condvar for /// synchronization. @@ -2762,7 +2774,8 @@ impl AccountsDb { /// 1. one of the pubkeys in the store has account info to a store whose store count is not going to zero /// 2. a pubkey we were planning to remove is not removing all stores that contain the account fn calc_delete_dependencies( - purges: &HashMap, RefCount)>, + &self, + candidates: &[RwLock>], store_counts: &mut HashMap)>, min_slot: Option, ) { @@ -2770,77 +2783,99 @@ impl AccountsDb { // do not match the criteria of deleting all appendvecs which contain them // then increment their storage count. let mut already_counted = IntSet::default(); - for (pubkey, (slot_list, ref_count)) in purges.iter() { - let mut failed_slot = None; - let all_stores_being_deleted = slot_list.len() as RefCount == *ref_count; - if all_stores_being_deleted { - let mut delete = true; - for (slot, _account_info) in slot_list { - if let Some(count) = store_counts.get(slot).map(|s| s.0) { - debug!( - "calc_delete_dependencies() + for (bin_index, bin) in candidates.iter().enumerate() { + let bin = bin.read().unwrap(); + for ( + pubkey, + CleaningInfo { + slot_list, + ref_count, + }, + ) in bin.iter().filter(|x| !x.1.slot_list.is_empty()) + { + let mut failed_slot = None; + let all_stores_being_deleted = slot_list.len() as RefCount == *ref_count; + if all_stores_being_deleted { + let mut delete = true; + for (slot, _account_info) in slot_list { + if let Some(count) = store_counts.get(slot).map(|s| s.0) { + debug!( + "calc_delete_dependencies() slot: {slot}, count len: {count}" - ); - if count == 0 { - // this store CAN be removed - continue; + ); + if count == 0 { + // this store CAN be removed + continue; + } } + // One of the pubkeys in the store has account info to a store whose store count is not going to zero. + // If the store cannot be found, that also means store isn't being deleted. + failed_slot = Some(*slot); + delete = false; + break; } - // One of the pubkeys in the store has account info to a store whose store count is not going to zero. - // If the store cannot be found, that also means store isn't being deleted. - failed_slot = Some(*slot); - delete = false; - break; - } - if delete { - // this pubkey can be deleted from all stores it is in - continue; - } - } else { - // a pubkey we were planning to remove is not removing all stores that contain the account - debug!( - "calc_delete_dependencies(), + if delete { + // this pubkey can be deleted from all stores it is in + continue; + } + } else { + // a pubkey we were planning to remove is not removing all stores that contain the account + debug!( + "calc_delete_dependencies(), pubkey: {}, slot_list: {:?}, slot_list_len: {}, ref_count: {}", - pubkey, - slot_list, - slot_list.len(), - ref_count, - ); - } - - // increment store_counts to non-zero for all stores that can not be deleted. - let mut pending_stores = IntSet::default(); - for (slot, _account_info) in slot_list { - if !already_counted.contains(slot) { - pending_stores.insert(*slot); + pubkey, + slot_list, + slot_list.len(), + ref_count, + ); } - } - while !pending_stores.is_empty() { - let slot = pending_stores.iter().next().cloned().unwrap(); - if Some(slot) == min_slot { - if let Some(failed_slot) = failed_slot.take() { - info!("calc_delete_dependencies, oldest slot is not able to be deleted because of {pubkey} in slot {failed_slot}"); - } else { - info!("calc_delete_dependencies, oldest slot is not able to be deleted because of {pubkey}, slot list len: {}, ref count: {ref_count}", slot_list.len()); + + // increment store_counts to non-zero for all stores that can not be deleted. + let mut pending_stores = IntSet::default(); + for (slot, _account_info) in slot_list { + if !already_counted.contains(slot) { + pending_stores.insert(*slot); } } + while !pending_stores.is_empty() { + let slot = pending_stores.iter().next().cloned().unwrap(); + if Some(slot) == min_slot { + if let Some(failed_slot) = failed_slot.take() { + info!("calc_delete_dependencies, oldest slot is not able to be deleted because of {pubkey} in slot {failed_slot}"); + } else { + info!("calc_delete_dependencies, oldest slot is not able to be deleted because of {pubkey}, slot list len: {}, ref count: {ref_count}", slot_list.len()); + } + } - pending_stores.remove(&slot); - if !already_counted.insert(slot) { - continue; - } - // the point of all this code: remove the store count for all stores we cannot remove - if let Some(store_count) = store_counts.remove(&slot) { - // all pubkeys in this store also cannot be removed from all stores they are in - let affected_pubkeys = &store_count.1; - for key in affected_pubkeys { - for (slot, _account_info) in &purges.get(key).unwrap().0 { - if !already_counted.contains(slot) { - pending_stores.insert(*slot); + pending_stores.remove(&slot); + if !already_counted.insert(slot) { + continue; + } + // the point of all this code: remove the store count for all stores we cannot remove + if let Some(store_count) = store_counts.remove(&slot) { + // all pubkeys in this store also cannot be removed from all stores they are in + let affected_pubkeys = &store_count.1; + for key in affected_pubkeys { + let candidates_bin_index = + self.accounts_index.bin_calculator.bin_from_pubkey(key); + let mut update_pending_stores = + |bin: &HashMap| { + for (slot, _account_info) in &bin.get(key).unwrap().slot_list { + if !already_counted.contains(slot) { + pending_stores.insert(*slot); + } + } + }; + if candidates_bin_index == bin_index { + update_pending_stores(&bin); + } else { + update_pending_stores( + &candidates[candidates_bin_index].read().unwrap(), + ); } } } @@ -3003,6 +3038,13 @@ impl AccountsDb { self.remove_uncleaned_slots_and_collect_pubkeys(uncleaned_slots) } + fn count_pubkeys(candidates: &[RwLock>]) -> u64 { + candidates + .iter() + .map(|x| x.read().unwrap().len()) + .sum::() as u64 + } + /// Construct a vec of pubkeys for cleaning from: /// uncleaned_pubkeys - the delta set of updated pubkeys in rooted slots from the last clean /// dirty_stores - set of stores which had accounts removed or recently rooted @@ -3013,7 +3055,7 @@ impl AccountsDb { is_startup: bool, timings: &mut CleanKeyTimings, epoch_schedule: &EpochSchedule, - ) -> (Vec, Option) { + ) -> CleaningCandidates { let oldest_non_ancient_slot = self.get_oldest_non_ancient_slot(epoch_schedule); let mut dirty_store_processing_time = Measure::start("dirty_store_processing"); let max_slot_inclusive = @@ -3032,7 +3074,17 @@ impl AccountsDb { } }); let dirty_stores_len = dirty_stores.len(); - let pubkeys = DashSet::new(); + let num_bins = self.accounts_index.bins(); + let candidates: Box<_> = + std::iter::repeat_with(|| RwLock::new(HashMap::::new())) + .take(num_bins) + .collect(); + + let insert_pubkey = |pubkey: Pubkey| { + let index = self.accounts_index.bin_calculator.bin_from_pubkey(&pubkey); + let mut candidates_bin = candidates[index].write().unwrap(); + candidates_bin.insert(pubkey, CleaningInfo::default()); + }; let dirty_ancient_stores = AtomicUsize::default(); let mut dirty_store_routine = || { let chunk_size = 1.max(dirty_stores_len.saturating_div(rayon::current_num_threads())); @@ -3045,8 +3097,8 @@ impl AccountsDb { dirty_ancient_stores.fetch_add(1, Ordering::Relaxed); } oldest_dirty_slot = oldest_dirty_slot.min(*slot); - store.accounts.scan_pubkeys(|k| { - pubkeys.insert(*k); + store.accounts.scan_pubkeys(|key| { + insert_pubkey(*key); }); }); oldest_dirty_slot @@ -3066,12 +3118,12 @@ impl AccountsDb { dirty_store_routine(); }); } + timings.dirty_pubkeys_count = Self::count_pubkeys(&candidates); trace!( "dirty_stores.len: {} pubkeys.len: {}", dirty_stores_len, - pubkeys.len() + timings.dirty_pubkeys_count, ); - timings.dirty_pubkeys_count = pubkeys.len() as u64; dirty_store_processing_time.stop(); timings.dirty_store_processing_us += dirty_store_processing_time.as_us(); timings.dirty_ancient_stores = dirty_ancient_stores.load(Ordering::Relaxed); @@ -3086,19 +3138,14 @@ impl AccountsDb { self.thread_pool_clean.install(|| { delta_keys.par_iter().for_each(|keys| { for key in keys { - pubkeys.insert(*key); + insert_pubkey(*key); } }); }); delta_insert.stop(); timings.delta_insert_us += delta_insert.as_us(); - timings.delta_key_count = pubkeys.len() as u64; - - let mut hashset_to_vec = Measure::start("flat_map"); - let mut pubkeys: Vec = pubkeys.into_iter().collect(); - hashset_to_vec.stop(); - timings.hashset_to_vec_us += hashset_to_vec.as_us(); + timings.delta_key_count = Self::count_pubkeys(&candidates); // Check if we should purge any of the zero_lamport_accounts_to_purge_later, based on the // latest_full_snapshot_slot. @@ -3113,13 +3160,13 @@ impl AccountsDb { let is_candidate_for_clean = max_slot_inclusive >= *slot && latest_full_snapshot_slot >= *slot; if is_candidate_for_clean { - pubkeys.push(*pubkey); + insert_pubkey(*pubkey); } !is_candidate_for_clean }); } - (pubkeys, min_dirty_slot) + (candidates, min_dirty_slot) } /// Call clean_accounts() with the common parameters that tests/benches use. @@ -3222,23 +3269,14 @@ impl AccountsDb { self.report_store_stats(); let mut key_timings = CleanKeyTimings::default(); - let (mut candidates, min_dirty_slot) = self.construct_candidate_clean_keys( + let (candidates, min_dirty_slot) = self.construct_candidate_clean_keys( max_clean_root_inclusive, is_startup, &mut key_timings, epoch_schedule, ); - let mut sort = Measure::start("sort"); - if is_startup { - candidates.par_sort_unstable(); - } else { - self.thread_pool_clean - .install(|| candidates.par_sort_unstable()); - } - sort.stop(); - - let num_candidates = candidates.len(); + let num_candidates = Self::count_pubkeys(&candidates); let mut accounts_scan = Measure::start("accounts_scan"); let uncleaned_roots = self.accounts_index.clone_uncleaned_roots(); let found_not_zero_accum = AtomicU64::new(0); @@ -3247,109 +3285,115 @@ impl AccountsDb { let useful_accum = AtomicU64::new(0); // parallel scan the index. - let (mut purges_zero_lamports, purges_old_accounts) = { + let purges_old_accounts = { let do_clean_scan = || { candidates - .par_chunks(4096) - .map(|candidates: &[Pubkey]| { - let mut purges_zero_lamports = HashMap::new(); + .par_iter() + .map(|candidates_bin| { let mut purges_old_accounts = Vec::new(); let mut found_not_zero = 0; let mut not_found_on_fork = 0; let mut missing = 0; let mut useful = 0; - self.accounts_index.scan( - candidates.iter(), - |candidate, slot_list_and_ref_count, _entry| { - let mut useless = true; - if let Some((slot_list, ref_count)) = slot_list_and_ref_count { - // find the highest rooted slot in the slot list - let index_in_slot_list = self.accounts_index.latest_slot( - None, - slot_list, - max_clean_root_inclusive, - ); - - match index_in_slot_list { - Some(index_in_slot_list) => { - // found info relative to max_clean_root - let (slot, account_info) = - &slot_list[index_in_slot_list]; - if account_info.is_zero_lamport() { - useless = false; - // the latest one is zero lamports. we may be able to purge it. - // so, add to purges_zero_lamports - purges_zero_lamports.insert( - *candidate, - ( - // add all the rooted entries that contain this pubkey. we know the highest rooted entry is zero lamports - self.accounts_index.get_rooted_entries( - slot_list, - max_clean_root_inclusive, - ), - ref_count, - ), + let mut candidates_bin = candidates_bin.write().unwrap(); + // Iterate over each HashMap entry to + // avoid capturing the HashMap in the + // closure passed to scan thus making + // conflicting read and write borrows. + candidates_bin + .iter_mut() + .for_each(|(candidate_pubkey, candidate_info)| { + self.accounts_index.scan( + [*candidate_pubkey].iter(), + |candidate_pubkey, slot_list_and_ref_count, _entry| { + let mut useless = true; + if let Some((slot_list, ref_count)) = + slot_list_and_ref_count + { + // find the highest rooted slot in the slot list + let index_in_slot_list = + self.accounts_index.latest_slot( + None, + slot_list, + max_clean_root_inclusive, ); - } else { - found_not_zero += 1; - } - if uncleaned_roots.contains(slot) { - // Assertion enforced by `accounts_index.get()`, the latest slot - // will not be greater than the given `max_clean_root` - if let Some(max_clean_root_inclusive) = - max_clean_root_inclusive - { - assert!(slot <= &max_clean_root_inclusive); + + match index_in_slot_list { + Some(index_in_slot_list) => { + // found info relative to max_clean_root + let (slot, account_info) = + &slot_list[index_in_slot_list]; + if account_info.is_zero_lamport() { + useless = false; + // The latest one is zero lamports. We may be able to purge it. + // Add all the rooted entries that contain this pubkey. + // We know the highest rooted entry is zero lamports. + candidate_info.slot_list = + self.accounts_index.get_rooted_entries( + slot_list, + max_clean_root_inclusive, + ); + candidate_info.ref_count = ref_count; + } else { + found_not_zero += 1; + } + if uncleaned_roots.contains(slot) { + // Assertion enforced by `accounts_index.get()`, the latest slot + // will not be greater than the given `max_clean_root` + if let Some(max_clean_root_inclusive) = + max_clean_root_inclusive + { + assert!( + slot <= &max_clean_root_inclusive + ); + } + if slot_list.len() > 1 { + // no need to purge old accounts if there is only 1 slot in the slot list + purges_old_accounts + .push(*candidate_pubkey); + useless = false; + } else { + self.clean_accounts_stats + .uncleaned_roots_slot_list_1 + .fetch_add(1, Ordering::Relaxed); + } + } } - if slot_list.len() > 1 { - // no need to purge old accounts if there is only 1 slot in the slot list - purges_old_accounts.push(*candidate); + None => { + // This pubkey is in the index but not in a root slot, so clean + // it up by adding it to the to-be-purged list. + // + // Also, this pubkey must have been touched by some slot since + // it was in the dirty list, so we assume that the slot it was + // touched in must be unrooted. + not_found_on_fork += 1; useless = false; - } else { - self.clean_accounts_stats - .uncleaned_roots_slot_list_1 - .fetch_add(1, Ordering::Relaxed); + purges_old_accounts.push(*candidate_pubkey); } } + } else { + missing += 1; } - None => { - // This pubkey is in the index but not in a root slot, so clean - // it up by adding it to the to-be-purged list. - // - // Also, this pubkey must have been touched by some slot since - // it was in the dirty list, so we assume that the slot it was - // touched in must be unrooted. - not_found_on_fork += 1; - useless = false; - purges_old_accounts.push(*candidate); + if !useless { + useful += 1; } - } - } else { - missing += 1; - } - if !useless { - useful += 1; - } - AccountsIndexScanResult::OnlyKeepInMemoryIfDirty - }, - None, - false, - ); + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty + }, + None, + false, + ); + }); found_not_zero_accum.fetch_add(found_not_zero, Ordering::Relaxed); not_found_on_fork_accum.fetch_add(not_found_on_fork, Ordering::Relaxed); missing_accum.fetch_add(missing, Ordering::Relaxed); useful_accum.fetch_add(useful, Ordering::Relaxed); - (purges_zero_lamports, purges_old_accounts) + purges_old_accounts + }) + .reduce(Vec::new, |mut a, b| { + // Collapse down the vecs into one. + a.extend(b); + a }) - .reduce( - || (HashMap::new(), Vec::new()), - |mut a, b| { - // Collapse down the hashmaps/vecs into one. - a.0.extend(b.0); - a.1.extend(b.1); - a - }, - ) }; if is_startup { do_clean_scan() @@ -3376,84 +3420,105 @@ impl AccountsDb { // Calculate store counts as if everything was purged // Then purge if we can let mut store_counts: HashMap)> = HashMap::new(); - for (pubkey, (slot_list, ref_count)) in purges_zero_lamports.iter_mut() { - if purged_account_slots.contains_key(pubkey) { - *ref_count = self.accounts_index.ref_count_from_storage(pubkey); - } - slot_list.retain(|(slot, account_info)| { - let was_slot_purged = purged_account_slots - .get(pubkey) - .map(|slots_removed| slots_removed.contains(slot)) - .unwrap_or(false); - if was_slot_purged { - // No need to look up the slot storage below if the entire - // slot was purged - return false; + for candidates_bin in candidates.iter() { + for ( + pubkey, + CleaningInfo { + slot_list, + ref_count, + }, + ) in candidates_bin.write().unwrap().iter_mut() + { + if slot_list.is_empty() { + continue; // seems simpler than filtering. `candidates` contains all the pubkeys we original started with } - // Check if this update in `slot` to the account with `key` was reclaimed earlier by - // `clean_accounts_older_than_root()` - let was_reclaimed = removed_accounts - .get(slot) - .map(|store_removed| store_removed.contains(&account_info.offset())) - .unwrap_or(false); - if was_reclaimed { - return false; + if purged_account_slots.contains_key(pubkey) { + *ref_count = self.accounts_index.ref_count_from_storage(pubkey); } - if let Some(store_count) = store_counts.get_mut(slot) { - store_count.0 -= 1; - store_count.1.insert(*pubkey); - } else { - let mut key_set = HashSet::new(); - key_set.insert(*pubkey); - assert!( - !account_info.is_cached(), - "The Accounts Cache must be flushed first for this account info. pubkey: {}, slot: {}", - *pubkey, - *slot - ); - let count = self - .storage - .get_account_storage_entry(*slot, account_info.store_id()) - .map(|store| store.count()) - .unwrap() - - 1; - debug!( - "store_counts, inserting slot: {}, store id: {}, count: {}", - slot, account_info.store_id(), count - ); - store_counts.insert(*slot, (count, key_set)); - } - true - }); + slot_list.retain(|(slot, account_info)| { + let was_slot_purged = purged_account_slots + .get(pubkey) + .map(|slots_removed| slots_removed.contains(slot)) + .unwrap_or(false); + if was_slot_purged { + // No need to look up the slot storage below if the entire + // slot was purged + return false; + } + // Check if this update in `slot` to the account with `key` was reclaimed earlier by + // `clean_accounts_older_than_root()` + let was_reclaimed = removed_accounts + .get(slot) + .map(|store_removed| store_removed.contains(&account_info.offset())) + .unwrap_or(false); + if was_reclaimed { + return false; + } + if let Some(store_count) = store_counts.get_mut(slot) { + store_count.0 -= 1; + store_count.1.insert(*pubkey); + } else { + let mut key_set = HashSet::new(); + key_set.insert(*pubkey); + assert!( + !account_info.is_cached(), + "The Accounts Cache must be flushed first for this account info. pubkey: {}, slot: {}", + *pubkey, + *slot + ); + let count = self + .storage + .get_account_storage_entry(*slot, account_info.store_id()) + .map(|store| store.count()) + .unwrap() + - 1; + debug!( + "store_counts, inserting slot: {}, store id: {}, count: {}", + slot, account_info.store_id(), count + ); + store_counts.insert(*slot, (count, key_set)); + } + true + }); + } } store_counts_time.stop(); let mut calc_deps_time = Measure::start("calc_deps"); - Self::calc_delete_dependencies(&purges_zero_lamports, &mut store_counts, min_dirty_slot); + self.calc_delete_dependencies(&candidates, &mut store_counts, min_dirty_slot); calc_deps_time.stop(); let mut purge_filter = Measure::start("purge_filter"); self.filter_zero_lamport_clean_for_incremental_snapshots( max_clean_root_inclusive, &store_counts, - &mut purges_zero_lamports, + &candidates, ); purge_filter.stop(); let mut reclaims_time = Measure::start("reclaims"); // Recalculate reclaims with new purge set - let pubkey_to_slot_set: Vec<_> = purges_zero_lamports - .into_iter() - .map(|(key, (slots_list, _ref_count))| { - ( - key, - slots_list - .into_iter() - .map(|(slot, _)| slot) - .collect::>(), - ) - }) - .collect(); + let mut pubkey_to_slot_set = Vec::new(); + for candidates_bin in candidates.iter() { + let candidates_bin = candidates_bin.read().unwrap(); + let mut bin_set = candidates_bin + .iter() + .filter_map(|(pubkey, cleaning_info)| { + let CleaningInfo { + slot_list, + ref_count: _, + } = cleaning_info; + (!slot_list.is_empty()).then_some(( + *pubkey, + slot_list + .iter() + .map(|(slot, _)| *slot) + .collect::>(), + )) + }) + .collect::>(); + pubkey_to_slot_set.append(&mut bin_set); + } let (reclaims, pubkeys_removed_from_accounts_index2) = self.purge_keys_exact(pubkey_to_slot_set.iter()); @@ -3507,7 +3572,6 @@ impl AccountsDb { ("delta_insert_us", key_timings.delta_insert_us, i64), ("delta_key_count", key_timings.delta_key_count, i64), ("dirty_pubkeys_count", key_timings.dirty_pubkeys_count, i64), - ("sort_us", sort.as_us(), i64), ("useful_keys", useful_accum.load(Ordering::Relaxed), i64), ("total_keys_count", num_candidates, i64), ( @@ -3677,7 +3741,9 @@ impl AccountsDb { } /// During clean, some zero-lamport accounts that are marked for purge should *not* actually - /// get purged. Filter out those accounts here by removing them from 'purges_zero_lamports' + /// get purged. Filter out those accounts here by removing them from 'candidates'. + /// Candidates may contain entries with empty slots list in CleaningInfo. + /// The function removes such entries from 'candidates'. /// /// When using incremental snapshots, do not purge zero-lamport accounts if the slot is higher /// than the latest full snapshot slot. This is to protect against the following scenario: @@ -3701,7 +3767,7 @@ impl AccountsDb { &self, max_clean_root_inclusive: Option, store_counts: &HashMap)>, - purges_zero_lamports: &mut HashMap, RefCount)>, + candidates: &[RwLock>], ) { let latest_full_snapshot_slot = self.latest_full_snapshot_slot(); let should_filter_for_incremental_snapshots = max_clean_root_inclusive.unwrap_or(Slot::MAX) @@ -3711,31 +3777,42 @@ impl AccountsDb { "if filtering for incremental snapshots, then snapshots should be enabled", ); - purges_zero_lamports.retain(|pubkey, (slot_account_infos, _ref_count)| { - // Only keep purges_zero_lamports where the entire history of the account in the root set - // can be purged. All AppendVecs for those updates are dead. - for (slot, _account_info) in slot_account_infos.iter() { - if let Some(store_count) = store_counts.get(slot) { - if store_count.0 != 0 { - // one store this pubkey is in is not being removed, so this pubkey cannot be removed at all + for bin in candidates { + let mut bin = bin.write().unwrap(); + bin.retain(|pubkey, cleaning_info| { + let CleaningInfo { + slot_list, + ref_count: _, + } = cleaning_info; + if slot_list.is_empty() { + return false; + } + // Only keep candidates where the entire history of the account in the root set + // can be purged. All AppendVecs for those updates are dead. + for (slot, _account_info) in slot_list.iter() { + if let Some(store_count) = store_counts.get(slot) { + if store_count.0 != 0 { + // one store this pubkey is in is not being removed, so this pubkey cannot be removed at all + return false; + } + } else { + // store is not being removed, so this pubkey cannot be removed at all return false; } - } else { - // store is not being removed, so this pubkey cannot be removed at all - return false; } - } - // Exit early if not filtering more for incremental snapshots - if !should_filter_for_incremental_snapshots { - return true; - } + // Exit early if not filtering more for incremental snapshots + if !should_filter_for_incremental_snapshots { + return true; + } - let slot_account_info_at_highest_slot = slot_account_infos - .iter() - .max_by_key(|(slot, _account_info)| slot); + // Safety: We exited early if the slot list was empty, + // so we're guaranteed here that `.max_by_key()` returns Some. + let (slot, account_info) = slot_list + .iter() + .max_by_key(|(slot, _account_info)| slot) + .unwrap(); - slot_account_info_at_highest_slot.map_or(true, |(slot, account_info)| { // Do *not* purge zero-lamport accounts if the slot is greater than the last full // snapshot slot. Since we're `retain`ing the accounts-to-purge, I felt creating // the `cannot_purge` variable made this easier to understand. Accounts that do @@ -3748,8 +3825,8 @@ impl AccountsDb { .insert((*slot, *pubkey)); } !cannot_purge - }) - }); + }); + } } // Must be kept private!, does sensitive cleanup that should only be called from @@ -12721,18 +12798,40 @@ pub mod tests { accounts_index.add_root(1); accounts_index.add_root(2); accounts_index.add_root(3); - let mut purges = HashMap::new(); + let num_bins = accounts_index.bins(); + let candidates: Box<_> = + std::iter::repeat_with(|| RwLock::new(HashMap::::new())) + .take(num_bins) + .collect(); for key in [&key0, &key1, &key2] { let index_entry = accounts_index.get_cloned(key).unwrap(); let rooted_entries = accounts_index .get_rooted_entries(index_entry.slot_list.read().unwrap().as_slice(), None); let ref_count = index_entry.ref_count(); - purges.insert(*key, (rooted_entries, ref_count)); + let index = accounts_index.bin_calculator.bin_from_pubkey(key); + let mut candidates_bin = candidates[index].write().unwrap(); + candidates_bin.insert( + *key, + CleaningInfo { + slot_list: rooted_entries, + ref_count, + }, + ); } - for (key, (list, ref_count)) in &purges { - info!(" purge {} ref_count {} =>", key, ref_count); - for x in list { - info!(" {:?}", x); + for candidates_bin in candidates.iter() { + let candidates_bin = candidates_bin.read().unwrap(); + for ( + key, + CleaningInfo { + slot_list: list, + ref_count, + }, + ) in candidates_bin.iter() + { + info!(" purge {} ref_count {} =>", key, ref_count); + for x in list { + info!(" {:?}", x); + } } } @@ -12741,7 +12840,8 @@ pub mod tests { store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1]))); store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2]))); store_counts.insert(3, (1, HashSet::from_iter(vec![key2]))); - AccountsDb::calc_delete_dependencies(&purges, &mut store_counts, None); + let accounts = AccountsDb::new_single_for_tests(); + accounts.calc_delete_dependencies(&candidates, &mut store_counts, None); let mut stores: Vec<_> = store_counts.keys().cloned().collect(); stores.sort_unstable(); for store in &stores { @@ -15023,9 +15123,14 @@ pub mod tests { let store_count = 0; let mut store_counts = HashMap::default(); store_counts.insert(slot, (store_count, key_set)); - let mut purges_zero_lamports = HashMap::default(); - purges_zero_lamports.insert(pubkey, (vec![(slot, account_info)], 1)); - + let candidates = [RwLock::new(HashMap::new())]; + candidates[0].write().unwrap().insert( + pubkey, + CleaningInfo { + slot_list: vec![(slot, account_info)], + ref_count: 1, + }, + ); let accounts_db = AccountsDb::new_single_for_tests(); if let Some(latest_full_snapshot_slot) = test_params.latest_full_snapshot_slot { accounts_db.set_latest_full_snapshot_slot(latest_full_snapshot_slot); @@ -15033,11 +15138,11 @@ pub mod tests { accounts_db.filter_zero_lamport_clean_for_incremental_snapshots( test_params.max_clean_root, &store_counts, - &mut purges_zero_lamports, + &candidates, ); assert_eq!( - purges_zero_lamports.contains_key(&pubkey), + candidates[0].read().unwrap().contains_key(&pubkey), test_params.should_contain ); }; From 08f72d2ff2819eb40cabd21f0a482cc17a725370 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 9 Aug 2024 00:17:20 +0900 Subject: [PATCH 050/529] Make dcou faster and check lib targets as well (#2127) --- ci/buildkite-pipeline.sh | 5 +++-- ci/test-dev-context-only-utils.sh | 3 ++- scripts/check-dev-context-only-utils.sh | 10 ++++++---- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 0c8327ee5961b3..9246ad27e4c6c4 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -181,8 +181,9 @@ wait_step() { all_test_steps() { command_step checks1 "ci/docker-run-default-image.sh ci/test-checks.sh" 20 check - command_step checks2 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh check-bins" 15 check - command_step checks3 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh check-all-targets" 15 check + command_step dcou-1-of-3 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh --partition 1/3" 15 check + command_step dcou-2-of-3 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh --partition 2/3" 15 check + command_step dcou-3-of-3 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh --partition 3/3" 15 check command_step miri "ci/docker-run-default-image.sh ci/test-miri.sh" 5 check command_step frozen-abi "ci/docker-run-default-image.sh ./test-abi.sh" 15 check wait_step diff --git a/ci/test-dev-context-only-utils.sh b/ci/test-dev-context-only-utils.sh index bec640cdf209f8..6694b457145d58 100755 --- a/ci/test-dev-context-only-utils.sh +++ b/ci/test-dev-context-only-utils.sh @@ -2,4 +2,5 @@ set -eo pipefail -scripts/check-dev-context-only-utils.sh "$@" +scripts/check-dev-context-only-utils.sh check-all-targets "$@" +scripts/check-dev-context-only-utils.sh check-bins-and-lib "$@" diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index 65382d20663e46..78dcd6cad98773 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -42,8 +42,10 @@ printf -v allowed '"%s",' "${tainted_packages[@]}" allowed="${allowed%,}" mode=${1:-full} +# consume the mode, so that other arguments are forwarded to cargo-hack +shift case "$mode" in - tree | check-bins | check-all-targets | full) + tree | check-bins-and-lib | check-all-targets | full) ;; *) echo "$0: unrecognized mode: $mode"; @@ -156,9 +158,9 @@ fi # consistency with other CI steps and for the possibility of new similar lints. export RUSTFLAGS="-D warnings -Z threads=8 $RUSTFLAGS" -if [[ $mode = "check-bins" || $mode = "full" ]]; then - _ cargo "+${rust_nightly}" hack check --bins +if [[ $mode = "check-bins-and-lib" || $mode = "full" ]]; then + _ cargo "+${rust_nightly}" hack "$@" check fi if [[ $mode = "check-all-targets" || $mode = "full" ]]; then - _ cargo "+${rust_nightly}" hack check --all-targets + _ cargo "+${rust_nightly}" hack "$@" check --all-targets fi From c99095da982e23b4f2df0d8f4dfa19032b316659 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 8 Aug 2024 23:29:22 +0800 Subject: [PATCH 051/529] fix: borrow stakes delegation during snapshot serialization (#2455) --- runtime/src/bank.rs | 5 +- .../partitioned_epoch_rewards/calculation.rs | 6 +- runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/stake_account.rs | 4 +- runtime/src/stakes.rs | 106 ++------- runtime/src/stakes/serde_stakes.rs | 211 ++++++++++++++++++ sdk/program/src/stake/state.rs | 7 + 7 files changed, 239 insertions(+), 102 deletions(-) create mode 100644 runtime/src/stakes/serde_stakes.rs diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index c3b6c97af348f0..70fe81e9326063 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2234,8 +2234,7 @@ impl Bank { .fold( HashSet::default, |mut voter_pubkeys, (_stake_pubkey, stake_account)| { - let delegation = stake_account.delegation(); - voter_pubkeys.insert(delegation.voter_pubkey); + voter_pubkeys.insert(stake_account.delegation().voter_pubkey); voter_pubkeys }, ) @@ -2299,7 +2298,7 @@ impl Bank { }; if let Some(reward_calc_tracer) = reward_calc_tracer.as_ref() { let delegation = - InflationPointCalculationEvent::Delegation(delegation, solana_vote_program); + InflationPointCalculationEvent::Delegation(*delegation, solana_vote_program); let event = RewardCalculationEvent::Staking(stake_pubkey, &delegation); reward_calc_tracer(&event); } diff --git a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs index 67cd54944d4d57..9d929accb5cdb1 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs @@ -378,10 +378,9 @@ impl Bank { let stake_pubkey = **stake_pubkey; let stake_account = (*stake_account).to_owned(); - let delegation = stake_account.delegation(); + let vote_pubkey = stake_account.delegation().voter_pubkey; let (mut stake_account, stake_state) = <(AccountSharedData, StakeStateV2)>::from(stake_account); - let vote_pubkey = delegation.voter_pubkey; let vote_account = get_vote_account(&vote_pubkey)?; if vote_account.owner() != &solana_vote_program { return None; @@ -501,8 +500,7 @@ impl Bank { stake_delegations .par_iter() .map(|(_stake_pubkey, stake_account)| { - let delegation = stake_account.delegation(); - let vote_pubkey = delegation.voter_pubkey; + let vote_pubkey = stake_account.delegation().voter_pubkey; let Some(vote_account) = get_vote_account(&vote_pubkey) else { return 0; diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index f575aa64813f37..9159ea0ca45d36 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -536,7 +536,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "7a6C1oFtgZiMtZig7FbX9289xn55QadQ962rX61Gheef") + frozen_abi(digest = "DnUdXXELygo14vA8d6QoXo5bkJAQbTWqWW5Qf9RXXWgZ") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/runtime/src/stake_account.rs b/runtime/src/stake_account.rs index e3d8b5a05de6d9..d4fe0d65784f30 100644 --- a/runtime/src/stake_account.rs +++ b/runtime/src/stake_account.rs @@ -48,10 +48,10 @@ impl StakeAccount { impl StakeAccount { #[inline] - pub(crate) fn delegation(&self) -> Delegation { + pub(crate) fn delegation(&self) -> &Delegation { // Safe to unwrap here because StakeAccount will always // only wrap a stake-state which is a delegation. - self.stake_state.delegation().unwrap() + self.stake_state.delegation_ref().unwrap() } #[inline] diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index ff1b0d059edebe..085289ef95d932 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -26,6 +26,9 @@ use { thiserror::Error, }; +mod serde_stakes; +pub(crate) use serde_stakes::serde_stakes_enum_compat; + #[derive(Debug, Error)] pub enum Error { #[error("Invalid delegation: {0}")] @@ -283,7 +286,7 @@ impl Stakes { let stake_account = StakeAccount::try_from(stake_account)?; // Sanity check that the delegation is consistent with what is // stored in the account. - if stake_account.delegation() == *delegation { + if stake_account.delegation() == delegation { map.insert(*pubkey, stake_account); Ok(map) } else { @@ -527,12 +530,15 @@ impl StakesEnum { } } +/// This conversion is very memory intensive so should only be used in +/// development contexts. +#[cfg(feature = "dev-context-only-utils")] impl From> for Stakes { fn from(stakes: Stakes) -> Self { let stake_delegations = stakes .stake_delegations .into_iter() - .map(|(pubkey, stake_account)| (pubkey, stake_account.delegation())) + .map(|(pubkey, stake_account)| (pubkey, *stake_account.delegation())) .collect(); Self { vote_accounts: stakes.vote_accounts, @@ -581,6 +587,9 @@ impl<'a> From<&'a Stakes> for Stakes<&'a Stake> { } } +/// This conversion is memory intensive so should only be used in development +/// contexts. +#[cfg(feature = "dev-context-only-utils")] impl From> for Stakes { fn from(stakes: Stakes) -> Self { let stake_delegations = stakes @@ -598,6 +607,9 @@ impl From> for Stakes { } } +/// This conversion is memory intensive so should only be used in development +/// contexts. +#[cfg(feature = "dev-context-only-utils")] impl From for Stakes { fn from(stakes: StakesEnum) -> Self { match stakes { @@ -641,36 +653,6 @@ impl PartialEq for StakesEnum { } } -// In order to maintain backward compatibility, the StakesEnum in EpochStakes -// and SerializableVersionedBank should be serialized as Stakes. -pub(crate) mod serde_stakes_enum_compat { - use { - super::*, - serde::{Deserialize, Deserializer, Serialize, Serializer}, - }; - - pub(crate) fn serialize(stakes: &StakesEnum, serializer: S) -> Result - where - S: Serializer, - { - match stakes { - StakesEnum::Delegations(stakes) => stakes.serialize(serializer), - stakes => { - let stakes = Stakes::::from(stakes.clone()); - stakes.serialize(serializer) - } - } - } - - pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, - { - let stakes = Stakes::::deserialize(deserializer)?; - Ok(Arc::new(StakesEnum::Delegations(stakes))) - } -} - fn refresh_vote_accounts( thread_pool: &ThreadPool, epoch: Epoch, @@ -716,7 +698,6 @@ fn refresh_vote_accounts( pub(crate) mod tests { use { super::*, - rand::Rng, rayon::ThreadPoolBuilder, solana_sdk::{account::WritableAccount, pubkey::Pubkey, rent::Rent, stake}, solana_stake_program::stake_state, @@ -1143,63 +1124,4 @@ pub(crate) mod tests { ); } } - - #[test] - fn test_serde_stakes_enum_compat() { - #[derive(Debug, PartialEq, Deserialize, Serialize)] - struct Dummy { - head: String, - #[serde(with = "serde_stakes_enum_compat")] - stakes: Arc, - tail: String, - } - let mut rng = rand::thread_rng(); - let stakes_cache = StakesCache::new(Stakes { - unused: rng.gen(), - epoch: rng.gen(), - ..Stakes::default() - }); - for _ in 0..rng.gen_range(5usize..10) { - let vote_pubkey = solana_sdk::pubkey::new_rand(); - let vote_account = vote_state::create_account( - &vote_pubkey, - &solana_sdk::pubkey::new_rand(), // node_pubkey - rng.gen_range(0..101), // commission - rng.gen_range(0..1_000_000), // lamports - ); - stakes_cache.check_and_store(&vote_pubkey, &vote_account, None); - for _ in 0..rng.gen_range(10usize..20) { - let stake_pubkey = solana_sdk::pubkey::new_rand(); - let rent = Rent::with_slots_per_epoch(rng.gen()); - let stake_account = stake_state::create_account( - &stake_pubkey, // authorized - &vote_pubkey, - &vote_account, - &rent, - rng.gen_range(0..1_000_000), // lamports - ); - stakes_cache.check_and_store(&stake_pubkey, &stake_account, None); - } - } - let stakes: Stakes = stakes_cache.stakes().clone(); - assert!(stakes.vote_accounts.as_ref().len() >= 5); - assert!(stakes.stake_delegations.len() >= 50); - let dummy = Dummy { - head: String::from("dummy-head"), - stakes: Arc::new(StakesEnum::from(stakes.clone())), - tail: String::from("dummy-tail"), - }; - assert!(dummy.stakes.vote_accounts().as_ref().len() >= 5); - let data = bincode::serialize(&dummy).unwrap(); - let other: Dummy = bincode::deserialize(&data).unwrap(); - assert_eq!(other, dummy); - let stakes = Stakes::::from(stakes); - assert!(stakes.vote_accounts.as_ref().len() >= 5); - assert!(stakes.stake_delegations.len() >= 50); - let other = match &*other.stakes { - StakesEnum::Accounts(_) | StakesEnum::Stakes(_) => panic!("wrong type!"), - StakesEnum::Delegations(delegations) => delegations, - }; - assert_eq!(other, &stakes) - } } diff --git a/runtime/src/stakes/serde_stakes.rs b/runtime/src/stakes/serde_stakes.rs new file mode 100644 index 00000000000000..c96cef1b4327ae --- /dev/null +++ b/runtime/src/stakes/serde_stakes.rs @@ -0,0 +1,211 @@ +use { + super::{StakeAccount, Stakes, StakesEnum}, + crate::stake_history::StakeHistory, + im::HashMap as ImHashMap, + serde::{ser::SerializeMap, Serialize, Serializer}, + solana_sdk::{clock::Epoch, pubkey::Pubkey, stake::state::Delegation}, + solana_stake_program::stake_state::Stake, + solana_vote::vote_account::VoteAccounts, + std::sync::Arc, +}; + +// In order to maintain backward compatibility, the StakesEnum in EpochStakes +// and SerializableVersionedBank should be serialized as Stakes. +pub(crate) mod serde_stakes_enum_compat { + use { + super::*, + serde::{Deserialize, Deserializer, Serialize, Serializer}, + }; + + pub(crate) fn serialize(stakes: &StakesEnum, serializer: S) -> Result + where + S: Serializer, + { + match stakes { + StakesEnum::Delegations(stakes) => stakes.serialize(serializer), + StakesEnum::Stakes(stakes) => serialize_stakes_as_delegations(stakes, serializer), + StakesEnum::Accounts(stakes) => { + serialize_stake_accounts_as_delegations(stakes, serializer) + } + } + } + + pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let stakes = Stakes::::deserialize(deserializer)?; + Ok(Arc::new(StakesEnum::Delegations(stakes))) + } +} + +fn serialize_stakes_as_delegations( + stakes: &Stakes, + serializer: S, +) -> Result { + SerdeStakeVariantStakes::from(stakes.clone()).serialize(serializer) +} + +fn serialize_stake_accounts_as_delegations( + stakes: &Stakes, + serializer: S, +) -> Result { + SerdeStakeAccountVariantStakes::from(stakes.clone()).serialize(serializer) +} + +impl From> for SerdeStakeVariantStakes { + fn from(stakes: Stakes) -> Self { + let Stakes { + vote_accounts, + stake_delegations, + unused, + epoch, + stake_history, + } = stakes; + + Self { + vote_accounts, + stake_delegations: SerdeStakeMapWrapper(stake_delegations), + unused, + epoch, + stake_history, + } + } +} + +impl From> for SerdeStakeAccountVariantStakes { + fn from(stakes: Stakes) -> Self { + let Stakes { + vote_accounts, + stake_delegations, + unused, + epoch, + stake_history, + } = stakes; + + Self { + vote_accounts, + stake_delegations: SerdeStakeAccountMapWrapper(stake_delegations), + unused, + epoch, + stake_history, + } + } +} + +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[derive(Serialize)] +struct SerdeStakeVariantStakes { + vote_accounts: VoteAccounts, + stake_delegations: SerdeStakeMapWrapper, + unused: u64, + epoch: Epoch, + stake_history: StakeHistory, +} + +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[derive(Serialize)] +struct SerdeStakeAccountVariantStakes { + vote_accounts: VoteAccounts, + stake_delegations: SerdeStakeAccountMapWrapper, + unused: u64, + epoch: Epoch, + stake_history: StakeHistory, +} + +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +struct SerdeStakeMapWrapper(ImHashMap); +impl Serialize for SerdeStakeMapWrapper { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut s = serializer.serialize_map(Some(self.0.len()))?; + for (pubkey, stake) in self.0.iter() { + s.serialize_entry(pubkey, &stake.delegation)?; + } + s.end() + } +} + +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +struct SerdeStakeAccountMapWrapper(ImHashMap); +impl Serialize for SerdeStakeAccountMapWrapper { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut s = serializer.serialize_map(Some(self.0.len()))?; + for (pubkey, stake_account) in self.0.iter() { + s.serialize_entry(pubkey, stake_account.delegation())?; + } + s.end() + } +} + +#[cfg(test)] +mod tests { + use { + super::*, crate::stakes::StakesCache, rand::Rng, solana_sdk::rent::Rent, + solana_stake_program::stake_state, solana_vote_program::vote_state, + }; + + #[test] + fn test_serde_stakes_enum_compat() { + #[derive(Debug, PartialEq, Deserialize, Serialize)] + struct Dummy { + head: String, + #[serde(with = "serde_stakes_enum_compat")] + stakes: Arc, + tail: String, + } + let mut rng = rand::thread_rng(); + let stakes_cache = StakesCache::new(Stakes { + unused: rng.gen(), + epoch: rng.gen(), + ..Stakes::default() + }); + for _ in 0..rng.gen_range(5usize..10) { + let vote_pubkey = solana_sdk::pubkey::new_rand(); + let vote_account = vote_state::create_account( + &vote_pubkey, + &solana_sdk::pubkey::new_rand(), // node_pubkey + rng.gen_range(0..101), // commission + rng.gen_range(0..1_000_000), // lamports + ); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, None); + for _ in 0..rng.gen_range(10usize..20) { + let stake_pubkey = solana_sdk::pubkey::new_rand(); + let rent = Rent::with_slots_per_epoch(rng.gen()); + let stake_account = stake_state::create_account( + &stake_pubkey, // authorized + &vote_pubkey, + &vote_account, + &rent, + rng.gen_range(0..1_000_000), // lamports + ); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, None); + } + } + let stakes: Stakes = stakes_cache.stakes().clone(); + assert!(stakes.vote_accounts.as_ref().len() >= 5); + assert!(stakes.stake_delegations.len() >= 50); + let dummy = Dummy { + head: String::from("dummy-head"), + stakes: Arc::new(StakesEnum::from(stakes.clone())), + tail: String::from("dummy-tail"), + }; + assert!(dummy.stakes.vote_accounts().as_ref().len() >= 5); + let data = bincode::serialize(&dummy).unwrap(); + let other: Dummy = bincode::deserialize(&data).unwrap(); + assert_eq!(other, dummy); + let stakes = Stakes::::from(stakes); + assert!(stakes.vote_accounts.as_ref().len() >= 5); + assert!(stakes.stake_delegations.len() >= 50); + let other = match &*other.stakes { + StakesEnum::Accounts(_) | StakesEnum::Stakes(_) => panic!("wrong type!"), + StakesEnum::Delegations(delegations) => delegations, + }; + assert_eq!(other, &stakes) + } +} diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index 26673d0c7a8378..685b134b15c262 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -227,6 +227,13 @@ impl StakeStateV2 { } } + pub fn delegation_ref(&self) -> Option<&Delegation> { + match self { + StakeStateV2::Stake(_meta, stake, _stake_flags) => Some(&stake.delegation), + _ => None, + } + } + pub fn authorized(&self) -> Option { match self { StakeStateV2::Stake(meta, _stake, _stake_flags) => Some(meta.authorized), From 77ea09a74fcb1650f61ab66c25207036b843b7a0 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Thu, 8 Aug 2024 20:18:12 +0400 Subject: [PATCH 052/529] Only run frozen-abi build script when needed (#2443) * add cfg(feature = "frozen-abi") to build.rs * only depend on rustc_version when frozen-abi feature is activated * remove extraneous dirs that snuck in from another branch * update perf/build.rs as it's different from the standard build script * use symlink for svm/build.rs * remove unused build dep rustc_version from wen-restart * fmt Cargo.toml --- Cargo.lock | 1 - accounts-db/Cargo.toml | 3 ++- bloom/Cargo.toml | 3 ++- builtins-default-costs/Cargo.toml | 3 ++- cargo-registry/Cargo.toml | 2 +- compute-budget/Cargo.toml | 3 ++- core/Cargo.toml | 3 ++- cost-model/Cargo.toml | 3 ++- curves/secp256k1-recover/Cargo.toml | 4 ++-- frozen-abi/Cargo.toml | 5 +++++ frozen-abi/build.rs | 3 +++ frozen-abi/macro/Cargo.toml | 5 +++++ gossip/Cargo.toml | 3 ++- ledger/Cargo.toml | 3 ++- perf/Cargo.toml | 3 ++- perf/build.rs | 3 +++ program-runtime/Cargo.toml | 3 ++- programs/address-lookup-table/Cargo.toml | 2 +- programs/sbf/Cargo.lock | 23 ----------------------- programs/stake-tests/Cargo.toml | 2 +- programs/stake/Cargo.toml | 2 +- programs/vote/Cargo.toml | 3 ++- runtime-transaction/Cargo.toml | 2 +- runtime/Cargo.toml | 3 ++- sdk/Cargo.toml | 3 ++- sdk/program/Cargo.toml | 13 +++++++++---- short-vec/Cargo.toml | 4 ++-- svm/Cargo.toml | 3 ++- svm/build.rs | 23 +---------------------- version/Cargo.toml | 3 ++- vote/Cargo.toml | 3 ++- wen-restart/Cargo.toml | 1 - wen-restart/build.rs | 2 -- 33 files changed, 67 insertions(+), 78 deletions(-) mode change 100644 => 120000 svm/build.rs diff --git a/Cargo.lock b/Cargo.lock index 0455ad9a1f632b..1f2780297b29fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8047,7 +8047,6 @@ dependencies = [ "protobuf-src", "rand 0.8.5", "rayon", - "rustc_version 0.4.0", "serial_test", "solana-accounts-db", "solana-entry", diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index eada1ee5e3375e..8a6d7e4d32160a 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -80,7 +80,7 @@ test-case = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] dev-context-only-utils = [ @@ -89,6 +89,7 @@ dev-context-only-utils = [ "dep:solana-vote-program", ] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", diff --git a/bloom/Cargo.toml b/bloom/Cargo.toml index 640dfbb644a173..5234fb506d90aa 100644 --- a/bloom/Cargo.toml +++ b/bloom/Cargo.toml @@ -32,10 +32,11 @@ name = "solana_bloom" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", diff --git a/builtins-default-costs/Cargo.toml b/builtins-default-costs/Cargo.toml index 4b2c5bad2f33e4..ddc5f06a6d68ee 100644 --- a/builtins-default-costs/Cargo.toml +++ b/builtins-default-costs/Cargo.toml @@ -36,10 +36,11 @@ rand = "0.8.5" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "solana-vote-program/frozen-abi", ] diff --git a/cargo-registry/Cargo.toml b/cargo-registry/Cargo.toml index 4b9ca6c0c61881..3224845067a87a 100644 --- a/cargo-registry/Cargo.toml +++ b/cargo-registry/Cargo.toml @@ -40,7 +40,7 @@ toml = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] dev-context-only-utils = [] diff --git a/compute-budget/Cargo.toml b/compute-budget/Cargo.toml index 211aa9394806b4..55229cf808b4f0 100644 --- a/compute-budget/Cargo.toml +++ b/compute-budget/Cargo.toml @@ -14,10 +14,11 @@ solana-frozen-abi = { workspace = true, optional = true } solana-sdk = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "solana-sdk/frozen-abi", ] diff --git a/core/Cargo.toml b/core/Cargo.toml index e3dd1bdf1f629f..8d96a2a1b0d1b8 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -117,11 +117,12 @@ test-case = { workspace = true } sysctl = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] dev-context-only-utils = [] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-accounts-db/frozen-abi", diff --git a/cost-model/Cargo.toml b/cost-model/Cargo.toml index 366a27cebe7575..c33ba7a5415963 100644 --- a/cost-model/Cargo.toml +++ b/cost-model/Cargo.toml @@ -39,10 +39,11 @@ test-case = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-compute-budget/frozen-abi", diff --git a/curves/secp256k1-recover/Cargo.toml b/curves/secp256k1-recover/Cargo.toml index 545fb073e9e6ae..90208106bb5475 100644 --- a/curves/secp256k1-recover/Cargo.toml +++ b/curves/secp256k1-recover/Cargo.toml @@ -29,11 +29,11 @@ borsh = { workspace = true } libsecp256k1 = { workspace = true, features = ["hmac"] } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] borsh = ["dep:borsh"] -frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] +frozen-abi = ["dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index fa850652f84cec..5c71ec5c2b4596 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -31,3 +31,8 @@ solana-logger = { workspace = true } [build-dependencies] rustc_version = { workspace = true } + +[features] +default = ["frozen-abi"] +# no reason to deactivate this. It's needed because the build.rs is reused elsewhere +frozen-abi = [] diff --git a/frozen-abi/build.rs b/frozen-abi/build.rs index e17ca70cb4718b..a95ef31ad70f65 100644 --- a/frozen-abi/build.rs +++ b/frozen-abi/build.rs @@ -1,7 +1,10 @@ +#[cfg(feature = "frozen-abi")] extern crate rustc_version; +#[cfg(feature = "frozen-abi")] use rustc_version::{version_meta, Channel}; fn main() { + #[cfg(feature = "frozen-abi")] // Copied and adapted from // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example // Licensed under Apache-2.0 + MIT diff --git a/frozen-abi/macro/Cargo.toml b/frozen-abi/macro/Cargo.toml index eab2c15fe71bf1..fe3c79d4404cbe 100644 --- a/frozen-abi/macro/Cargo.toml +++ b/frozen-abi/macro/Cargo.toml @@ -19,3 +19,8 @@ syn = { workspace = true, features = ["full", "extra-traits"] } [build-dependencies] rustc_version = { workspace = true } + +[features] +default = ["frozen-abi"] +# no reason to deactivate this. It's needed because the build.rs is reused elsewhere +frozen-abi = [] diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 7720073236bad1..96f54442e7c98d 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -61,10 +61,11 @@ solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } test-case = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-bloom/frozen-abi", diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 61ab02470b5f05..c21f8028bdf95e 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -89,11 +89,12 @@ spl-pod = { workspace = true } test-case = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] dev-context-only-utils = [] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-runtime/frozen-abi", diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 71a213e7693d5c..4cf6e49d2f4f72 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -44,10 +44,11 @@ solana-logger = { workspace = true } test-case = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", diff --git a/perf/build.rs b/perf/build.rs index eef20dd887bc42..a38f6c73307cd7 100644 --- a/perf/build.rs +++ b/perf/build.rs @@ -1,4 +1,6 @@ +#[cfg(feature = "frozen-abi")] extern crate rustc_version; +#[cfg(feature = "frozen-abi")] use rustc_version::{version_meta, Channel}; fn main() { @@ -15,6 +17,7 @@ fn main() { // Copied and adapted from // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example // Licensed under Apache-2.0 + MIT + #[cfg(feature = "frozen-abi")] match version_meta().unwrap().channel { Channel::Stable => { println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index 0f9a22f7cd8d4a..1b6b914835c07f 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -48,10 +48,11 @@ name = "solana_program_runtime" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-compute-budget/frozen-abi", diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml index f721fcef11430d..1c61a6bdf466d2 100644 --- a/programs/address-lookup-table/Cargo.toml +++ b/programs/address-lookup-table/Cargo.toml @@ -24,7 +24,7 @@ solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [lib] crate-type = ["lib"] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 91b3667d25e861..c447d919b6211b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4505,7 +4505,6 @@ dependencies = [ "num_enum", "rand 0.8.5", "rayon", - "rustc_version", "seqlock", "serde", "serde_derive", @@ -4534,7 +4533,6 @@ dependencies = [ "log", "num-derive", "num-traits", - "rustc_version", "solana-log-collector", "solana-program", "solana-program-runtime", @@ -4600,7 +4598,6 @@ dependencies = [ "fnv", "log", "rand 0.8.5", - "rustc_version", "serde", "serde_derive", "solana-sanitize", @@ -4668,7 +4665,6 @@ dependencies = [ "ahash 0.8.10", "lazy_static", "log", - "rustc_version", "solana-address-lookup-table-program", "solana-bpf-loader-program", "solana-compute-budget-program", @@ -4769,7 +4765,6 @@ dependencies = [ name = "solana-compute-budget" version = "2.1.0" dependencies = [ - "rustc_version", "solana-sdk", ] @@ -4844,7 +4839,6 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustc_version", "rustls", "serde", "serde_bytes", @@ -4904,7 +4898,6 @@ dependencies = [ "ahash 0.8.10", "lazy_static", "log", - "rustc_version", "solana-builtins-default-costs", "solana-compute-budget", "solana-metrics", @@ -5051,7 +5044,6 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustc_version", "serde", "serde_bytes", "serde_derive", @@ -5129,7 +5121,6 @@ dependencies = [ "rayon", "reed-solomon-erasure", "rocksdb", - "rustc_version", "scopeguard", "serde", "serde_bytes", @@ -5272,7 +5263,6 @@ dependencies = [ "nix", "rand 0.8.5", "rayon", - "rustc_version", "serde", "solana-metrics", "solana-rayon-threadlimit", @@ -5334,7 +5324,6 @@ dependencies = [ "num-traits", "parking_lot 0.12.2", "rand 0.8.5", - "rustc_version", "serde", "serde_bytes", "serde_derive", @@ -5375,7 +5364,6 @@ dependencies = [ "num-traits", "percentage", "rand 0.8.5", - "rustc_version", "serde", "solana-compute-budget", "solana-log-collector", @@ -5643,7 +5631,6 @@ dependencies = [ "rand 0.8.5", "rayon", "regex", - "rustc_version", "serde", "serde_derive", "serde_json", @@ -5692,7 +5679,6 @@ name = "solana-runtime-transaction" version = "2.1.0" dependencies = [ "log", - "rustc_version", "solana-compute-budget", "solana-sdk", "thiserror", @@ -6206,7 +6192,6 @@ dependencies = [ "qualifier_attr", "rand 0.7.3", "rand 0.8.5", - "rustc_version", "serde", "serde_bytes", "serde_derive", @@ -6244,7 +6229,6 @@ version = "2.1.0" dependencies = [ "borsh 1.5.1", "libsecp256k1 0.6.0", - "rustc_version", "solana-define-syscall", "thiserror", ] @@ -6274,7 +6258,6 @@ dependencies = [ name = "solana-short-vec" version = "2.1.0" dependencies = [ - "rustc_version", "serde", ] @@ -6284,7 +6267,6 @@ version = "2.1.0" dependencies = [ "bincode", "log", - "rustc_version", "solana-config-program", "solana-log-collector", "solana-program-runtime", @@ -6382,7 +6364,6 @@ dependencies = [ "log", "percentage", "qualifier_attr", - "rustc_version", "serde", "serde_derive", "solana-bpf-loader-program", @@ -6626,7 +6607,6 @@ name = "solana-version" version = "2.1.0" dependencies = [ "log", - "rustc_version", "semver", "serde", "serde_derive", @@ -6640,7 +6620,6 @@ version = "2.1.0" dependencies = [ "itertools 0.12.1", "log", - "rustc_version", "serde", "serde_derive", "solana-sdk", @@ -6655,7 +6634,6 @@ dependencies = [ "log", "num-derive", "num-traits", - "rustc_version", "serde", "serde_derive", "solana-metrics", @@ -6676,7 +6654,6 @@ dependencies = [ "prost-types", "protobuf-src", "rayon", - "rustc_version", "solana-entry", "solana-gossip", "solana-ledger", diff --git a/programs/stake-tests/Cargo.toml b/programs/stake-tests/Cargo.toml index 4c89f4dd6d0fbf..7e3a461ba09e2b 100644 --- a/programs/stake-tests/Cargo.toml +++ b/programs/stake-tests/Cargo.toml @@ -20,7 +20,7 @@ solana-vote-program = { workspace = true } test-case = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 41eb6e6b43b9fb..e14fc532d34bca 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -27,7 +27,7 @@ solana-logger = { workspace = true } test-case = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [lib] crate-type = ["lib"] diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index 220175951cac7b..58a21e6fd8443f 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -30,7 +30,7 @@ solana-logger = { workspace = true } test-case = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [lib] crate-type = ["lib"] @@ -41,6 +41,7 @@ targets = ["x86_64-unknown-linux-gnu"] [features] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-program/frozen-abi", diff --git a/runtime-transaction/Cargo.toml b/runtime-transaction/Cargo.toml index f6c89438c1ede3..22635d6c121159 100644 --- a/runtime-transaction/Cargo.toml +++ b/runtime-transaction/Cargo.toml @@ -28,4 +28,4 @@ solana-program ={ workspace = true } targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 37beded4870b0e..b5df2c8bcded91 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -110,11 +110,12 @@ test-case = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] dev-context-only-utils = ["solana-svm/dev-context-only-utils"] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-accounts-db/frozen-abi", diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 7db5d780c76b07..d19d16a5a3fc71 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -37,6 +37,7 @@ full = [ borsh = ["dep:borsh", "solana-program/borsh", "solana-secp256k1-recover/borsh"] dev-context-only-utils = ["qualifier_attr"] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-program/frozen-abi", @@ -111,7 +112,7 @@ static_assertions = { workspace = true } tiny-bip39 = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 954c2fa6b6511e..cc220b1711db65 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -33,8 +33,8 @@ sha2 = { workspace = true } sha3 = { workspace = true } solana-atomic-u64 = { workspace = true } solana-decode-error = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = ["frozen-abi"] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = ["frozen-abi"] } solana-msg = { workspace = true } solana-program-memory = { workspace = true } solana-sanitize = { workspace = true } @@ -86,7 +86,7 @@ static_assertions = { workspace = true } test-case = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -98,4 +98,9 @@ crate-type = ["cdylib", "rlib"] default = ["borsh"] borsh = ["dep:borsh", "dep:borsh0-10"] dev-context-only-utils = ["dep:qualifier_attr"] -frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-short-vec/frozen-abi"] +frozen-abi = [ + "dep:rustc_version", + "dep:solana-frozen-abi", + "dep:solana-frozen-abi-macro", + "solana-short-vec/frozen-abi", +] diff --git a/short-vec/Cargo.toml b/short-vec/Cargo.toml index 32af444ac95e9c..2b0c7baf5f12ce 100644 --- a/short-vec/Cargo.toml +++ b/short-vec/Cargo.toml @@ -10,7 +10,7 @@ license = { workspace = true } edition = { workspace = true } [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [dependencies] serde = { workspace = true } @@ -23,7 +23,7 @@ bincode = { workspace = true } serde_json = { workspace = true } [features] -frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] +frozen-abi = ["dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 5f1cbd544eed10..e387a21eeedf6c 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -58,11 +58,12 @@ solana-svm-conformance = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] dev-context-only-utils = ["dep:qualifier_attr"] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-compute-budget/frozen-abi", diff --git a/svm/build.rs b/svm/build.rs deleted file mode 100644 index e17ca70cb4718b..00000000000000 --- a/svm/build.rs +++ /dev/null @@ -1,22 +0,0 @@ -extern crate rustc_version; -use rustc_version::{version_meta, Channel}; - -fn main() { - // Copied and adapted from - // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example - // Licensed under Apache-2.0 + MIT - match version_meta().unwrap().channel { - Channel::Stable => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Beta => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Nightly => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - Channel::Dev => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - } -} diff --git a/svm/build.rs b/svm/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/svm/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file diff --git a/version/Cargo.toml b/version/Cargo.toml index 427c05ca7a0019..7c8ae8f6820155 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -22,6 +22,7 @@ solana-sdk = { workspace = true } [features] dummy-for-ci-check = [] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", @@ -34,4 +35,4 @@ name = "solana_version" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } diff --git a/vote/Cargo.toml b/vote/Cargo.toml index 0d85ccda55e02a..89c4808ede0ed7 100644 --- a/vote/Cargo.toml +++ b/vote/Cargo.toml @@ -31,11 +31,12 @@ rand = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -rustc_version = { workspace = true } +rustc_version = { workspace = true, optional = true } [features] dev-context-only-utils = [] frozen-abi = [ + "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", diff --git a/wen-restart/Cargo.toml b/wen-restart/Cargo.toml index 25823c9d777855..f5fbf99a1e20c8 100644 --- a/wen-restart/Cargo.toml +++ b/wen-restart/Cargo.toml @@ -38,7 +38,6 @@ tempfile = { workspace = true } [build-dependencies] prost-build = { workspace = true } -rustc_version = { workspace = true } # windows users should install the protobuf compiler manually and set the PROTOC # envar to point to the installed binary diff --git a/wen-restart/build.rs b/wen-restart/build.rs index 30fdc64a9bcfe0..9b2f80c51de392 100644 --- a/wen-restart/build.rs +++ b/wen-restart/build.rs @@ -1,5 +1,3 @@ -extern crate rustc_version; - use std::io::Result; fn main() -> Result<()> { From 776544c47a22fbd31ccec2fea0796dfaed629a4a Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 8 Aug 2024 14:20:45 -0500 Subject: [PATCH 053/529] TransactionView: InstructionsMeta (#2476) --- transaction-view/src/instructions_meta.rs | 167 ++++++++++++++++++++++ transaction-view/src/lib.rs | 2 + 2 files changed, 169 insertions(+) create mode 100644 transaction-view/src/instructions_meta.rs diff --git a/transaction-view/src/instructions_meta.rs b/transaction-view/src/instructions_meta.rs new file mode 100644 index 00000000000000..ad380f1724548a --- /dev/null +++ b/transaction-view/src/instructions_meta.rs @@ -0,0 +1,167 @@ +use crate::{ + bytes::{advance_offset_for_array, check_remaining, optimized_read_compressed_u16, read_byte}, + result::Result, +}; + +/// Contains metadata about the instructions in a transaction packet. +#[derive(Default)] +pub struct InstructionsMeta { + /// The number of instructions in the transaction. + pub(crate) num_instructions: u16, + /// The offset to the first instruction in the transaction. + pub(crate) offset: u16, +} + +impl InstructionsMeta { + /// Get the number of instructions and offset to the first instruction. + /// The offset will be updated to point to the first byte after the last + /// instruction. + /// This function will parse each individual instruction to ensure the + /// instruction data is well-formed, but will not cache data related to + /// these instructions. + pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { + // Read the number of instructions at the current offset. + // Each instruction needs at least 3 bytes, so do a sanity check here to + // ensure we have enough bytes to read the number of instructions. + let num_instructions = optimized_read_compressed_u16(bytes, offset)?; + check_remaining( + bytes, + *offset, + 3usize.wrapping_mul(usize::from(num_instructions)), + )?; + + // We know the offset does not exceed packet length, and our packet + // length is less than u16::MAX, so we can safely cast to u16. + let instructions_offset = *offset as u16; + + // The instructions do not have a fixed size. So we must iterate over + // each instruction to find the total size of the instructions, + // and check for any malformed instructions or buffer overflows. + for _index in 0..num_instructions { + // Each instruction has 3 pieces: + // 1. Program ID index (u8) + // 2. Accounts indexes ([u8]) + // 3. Data ([u8]) + + // Read the program ID index. + let _program_id_index = read_byte(bytes, offset)?; + + // Read the number of account indexes, and then update the offset + // to skip over the account indexes. + let num_accounts = optimized_read_compressed_u16(bytes, offset)?; + advance_offset_for_array::(bytes, offset, num_accounts)?; + + // Read the length of the data, and then update the offset to skip + // over the data. + let data_len = optimized_read_compressed_u16(bytes, offset)?; + advance_offset_for_array::(bytes, offset, data_len)? + } + + Ok(Self { + num_instructions, + offset: instructions_offset, + }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{instruction::CompiledInstruction, short_vec::ShortVec}, + }; + + #[test] + fn test_zero_instructions() { + let bytes = bincode::serialize(&ShortVec(Vec::::new())).unwrap(); + let mut offset = 0; + let instructions_meta = InstructionsMeta::try_new(&bytes, &mut offset).unwrap(); + + assert_eq!(instructions_meta.num_instructions, 0); + assert_eq!(instructions_meta.offset, 1); + assert_eq!(offset, bytes.len()); + } + + #[test] + fn test_num_instructions_too_high() { + let mut bytes = bincode::serialize(&ShortVec(vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![], + data: vec![], + }])) + .unwrap(); + // modify the number of instructions to be too high + bytes[0] = 0x02; + let mut offset = 0; + assert!(InstructionsMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_single_instruction() { + let bytes = bincode::serialize(&ShortVec(vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![1, 2, 3], + data: vec![4, 5, 6, 7, 8, 9, 10], + }])) + .unwrap(); + let mut offset = 0; + let instructions_meta = InstructionsMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(instructions_meta.num_instructions, 1); + assert_eq!(instructions_meta.offset, 1); + assert_eq!(offset, bytes.len()); + } + + #[test] + fn test_multiple_instructions() { + let bytes = bincode::serialize(&ShortVec(vec![ + CompiledInstruction { + program_id_index: 0, + accounts: vec![1, 2, 3], + data: vec![4, 5, 6, 7, 8, 9, 10], + }, + CompiledInstruction { + program_id_index: 1, + accounts: vec![4, 5, 6], + data: vec![7, 8, 9, 10, 11, 12, 13], + }, + ])) + .unwrap(); + let mut offset = 0; + let instructions_meta = InstructionsMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(instructions_meta.num_instructions, 2); + assert_eq!(instructions_meta.offset, 1); + assert_eq!(offset, bytes.len()); + } + + #[test] + fn test_invalid_instruction_accounts_vec() { + let mut bytes = bincode::serialize(&ShortVec(vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![1, 2, 3], + data: vec![4, 5, 6, 7, 8, 9, 10], + }])) + .unwrap(); + + // modify the number of accounts to be too high + bytes[2] = 127; + + let mut offset = 0; + assert!(InstructionsMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_invalid_instruction_data_vec() { + let mut bytes = bincode::serialize(&ShortVec(vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![1, 2, 3], + data: vec![4, 5, 6, 7, 8, 9, 10], + }])) + .unwrap(); + + // modify the number of data bytes to be too high + bytes[6] = 127; + + let mut offset = 0; + assert!(InstructionsMeta::try_new(&bytes, &mut offset).is_err()); + } +} diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index aa16270fb7313d..145b4df16ed06c 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -6,6 +6,8 @@ pub mod bytes; #[allow(dead_code)] mod bytes; +#[allow(dead_code)] +mod instructions_meta; #[allow(dead_code)] mod message_header_meta; pub mod result; From fd3ee545e4f0f68eab0f574a0ff00fa1ea5e939c Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 8 Aug 2024 16:09:23 -0400 Subject: [PATCH 054/529] hash-cache-tool: Scan files with mmap and bins (#2504) --- .../accounts-hash-cache-tool/src/main.rs | 117 ++++++++++++++---- accounts-db/src/lib.rs | 2 +- accounts-db/src/pubkey_bins.rs | 4 +- 3 files changed, 93 insertions(+), 30 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 5d8d8b9e9fd049..98b9f914143f11 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -7,14 +7,16 @@ use { }, memmap2::Mmap, solana_accounts_db::{ - accounts_hash::AccountHash, parse_cache_hash_data_filename, CacheHashDataFileEntry, - CacheHashDataFileHeader, ParsedCacheHashDataFilename, + accounts_hash::AccountHash, parse_cache_hash_data_filename, + pubkey_bins::PubkeyBinCalculator24, CacheHashDataFileEntry, CacheHashDataFileHeader, + ParsedCacheHashDataFilename, }, solana_program::pubkey::Pubkey, std::{ cmp::{self, Ordering}, fs::{self, File, Metadata}, io::{self, BufReader, Read}, + iter, mem::size_of, num::Saturating, path::{Path, PathBuf}, @@ -295,10 +297,7 @@ fn do_diff_dirs( dir2: impl AsRef, then_diff_files: bool, ) -> Result<(), String> { - let _timer = ElapsedOnDrop { - message: "diffing directories took ".to_string(), - start: Instant::now(), - }; + let _timer = ElapsedOnDrop::new("diffing directories took "); let files1 = get_cache_files_in(dir1) .map_err(|err| format!("failed to get cache files in dir1: {err}"))?; @@ -358,10 +357,10 @@ fn do_diff_dirs( } // if the file headers have different entry counts, they are not equal - let Ok((mmap1, header1)) = map_file(&file1.path, false) else { + let Ok((mmap1, header1)) = mmap_file(&file1.path, false) else { return false; }; - let Ok((mmap2, header2)) = map_file(&file2.path, false) else { + let Ok((mmap2, header2)) = mmap_file(&file2.path, false) else { return false; }; if header1.count != header2.count { @@ -490,33 +489,81 @@ fn get_cache_files_in(dir: impl AsRef) -> Result, io::E /// /// If there are multiple entries for a pubkey, only the latest is returned. fn extract_latest_entries_in(file: impl AsRef) -> Result { - let force = false; // skipping sanity checks is not supported when extracting entries - let (reader, header) = open_file(&file, force).map_err(|err| { - format!( - "failed to open accounts hash cache file '{}': {err}", - file.as_ref().display(), - ) - })?; + const NUM_BINS: usize = 1; + let BinnedLatestEntriesInfo { + latest_entries, + capitalization, + } = extract_binned_latest_entries_in(iter::once(file), NUM_BINS)?; + assert_eq!(latest_entries.len(), NUM_BINS); + let mut latest_entries = Vec::from(latest_entries); + let latest_entries = latest_entries.pop().unwrap(); - // entries in the file are sorted by pubkey then slot, - // so we want to keep the *last* entry (if there are duplicates) + Ok(LatestEntriesInfo { + latest_entries, + capitalization, + }) +} + +/// Returns the entries in `files`, binned by pubkey, and the capitalization +/// +/// If there are multiple entries for a pubkey, only the latest is returned. +/// +/// Note: `files` must be sorted in ascending order, as insertion order is +/// relied on to guarantee the latest entry is returned. +fn extract_binned_latest_entries_in( + files: impl IntoIterator>, + bins: usize, +) -> Result { + let binner = PubkeyBinCalculator24::new(bins); + let mut entries: Box<_> = iter::repeat_with(HashMap::default).take(bins).collect(); let mut capitalization = Saturating(0); - let mut entries = HashMap::default(); - scan_file(reader, header.count, |entry| { - capitalization += entry.lamports; - let old_value = entries.insert(entry.pubkey, (entry.hash, entry.lamports)); - if let Some((_, old_lamports)) = old_value { - // back out the old value's lamports, so we only keep the latest's for capitalization - capitalization -= old_lamports; + + for file in files.into_iter() { + let force = false; // skipping sanity checks is not supported when extracting entries + let (mmap, header) = mmap_file(&file, force).map_err(|err| { + format!( + "failed to open accounts hash cache file '{}': {err}", + file.as_ref().display(), + ) + })?; + + let num_entries = scan_mmap(&mmap, |entry| { + capitalization += entry.lamports; + let bin = binner.bin_from_pubkey(&entry.pubkey); + let old_value = entries[bin].insert(entry.pubkey, (entry.hash, entry.lamports)); + if let Some((_, old_lamports)) = old_value { + // back out the old value's lamports, so we only keep the latest's for capitalization + capitalization -= old_lamports; + } + }); + + if num_entries != header.count { + return Err(format!( + "mismatched number of entries when scanning '{}': expected: {}, actual: {num_entries}", + file.as_ref().display(), header.count, + )); } - })?; + } - Ok(LatestEntriesInfo { + Ok(BinnedLatestEntriesInfo { latest_entries: entries, capitalization: capitalization.0, }) } +/// Scans `mmap` and applies `user_fn` to each entry +fn scan_mmap(mmap: &Mmap, mut user_fn: impl FnMut(&CacheHashDataFileEntry)) -> usize { + const SIZE_OF_ENTRY: usize = size_of::(); + let bytes = &mmap[size_of::()..]; + let mut num_entries = Saturating(0); + for chunk in bytes.chunks_exact(SIZE_OF_ENTRY) { + let entry = bytemuck::from_bytes(chunk); + user_fn(entry); + num_entries += 1; + } + num_entries.0 +} + /// Scans file with `reader` and applies `user_fn` to each entry /// /// NOTE: `reader`'s cursor must already be at the first entry; i.e. *past* the header. @@ -551,7 +598,7 @@ fn scan_file( Ok(()) } -fn map_file( +fn mmap_file( path: impl AsRef, force: bool, ) -> Result<(Mmap, CacheHashDataFileHeader), String> { @@ -612,12 +659,28 @@ struct LatestEntriesInfo { capitalization: u64, // lamports } +#[derive(Debug)] +struct BinnedLatestEntriesInfo { + latest_entries: Box<[HashMap]>, + capitalization: u64, // lamports +} + #[derive(Debug)] struct ElapsedOnDrop { message: String, start: Instant, } +impl ElapsedOnDrop { + #[must_use] + fn new(message: impl Into) -> Self { + Self { + message: message.into(), + start: Instant::now(), + } + } +} + impl Drop for ElapsedOnDrop { fn drop(&mut self) { let elapsed = self.start.elapsed(); diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 3cc8f686eff2e0..fccd1d43695732 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -31,7 +31,7 @@ pub mod epoch_accounts_hash; mod file_io; pub mod hardened_unpack; pub mod partitioned_rewards; -mod pubkey_bins; +pub mod pubkey_bins; mod read_only_accounts_cache; mod rolling_bit_field; pub mod secondary_index; diff --git a/accounts-db/src/pubkey_bins.rs b/accounts-db/src/pubkey_bins.rs index becfca39dd8e71..ec1d2ad62ea880 100644 --- a/accounts-db/src/pubkey_bins.rs +++ b/accounts-db/src/pubkey_bins.rs @@ -16,7 +16,7 @@ impl PubkeyBinCalculator24 { Self::num_bits::() as u32 - x.leading_zeros() - 1 } - pub(crate) fn new(bins: usize) -> Self { + pub fn new(bins: usize) -> Self { const MAX_BITS: u32 = 24; assert!(bins > 0); let max_plus_1 = 1 << MAX_BITS; @@ -29,7 +29,7 @@ impl PubkeyBinCalculator24 { } #[inline] - pub(crate) fn bin_from_pubkey(&self, pubkey: &Pubkey) -> usize { + pub fn bin_from_pubkey(&self, pubkey: &Pubkey) -> usize { let as_ref = pubkey.as_ref(); ((as_ref[0] as usize) << 16 | (as_ref[1] as usize) << 8 | (as_ref[2] as usize)) >> self.shift_bits From 1d825df4e1fce7649ca20bb6f8faba0c87a410e0 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 8 Aug 2024 21:01:28 +0000 Subject: [PATCH 055/529] checks for duplicate instances using the new ContactInfo (#2506) Working towards deprecating NodeInstance CRDS value, the commit adds check for duplicate instances using the new ContactInfo. --- gossip/src/cluster_info.rs | 21 ++++++++----- gossip/src/contact_info.rs | 63 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 8 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 0f11489333644d..46b505014bebc3 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2475,16 +2475,21 @@ impl ClusterInfo { // Check if there is a duplicate instance of // this node with more recent timestamp. - let instance = self.instance.read().unwrap(); - let check_duplicate_instance = |values: &[CrdsValue]| { - if should_check_duplicate_instance { - for value in values { - if instance.check_duplicate(value) { - return Err(GossipError::DuplicateNodeInstance); - } + let check_duplicate_instance = { + let instance = self.instance.read().unwrap(); + let my_contact_info = self.my_contact_info(); + move |values: &[CrdsValue]| { + if should_check_duplicate_instance + && values.iter().any(|value| { + instance.check_duplicate(value) + || matches!(&value.data, CrdsData::ContactInfo(other) + if my_contact_info.check_duplicate(other)) + }) + { + return Err(GossipError::DuplicateNodeInstance); } + Ok(()) } - Ok(()) }; let mut pings = Vec::new(); let mut rng = rand::thread_rng(); diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 9a5c1ce495813b..395f485f12516d 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -435,6 +435,14 @@ impl ContactInfo { node.set_serve_repair_quic((addr, port + 4)).unwrap(); node } + + // Returns true if the other contact-info is a duplicate instance of this + // node, with a more recent `outset` timestamp. + #[inline] + #[must_use] + pub(crate) fn check_duplicate(&self, other: &ContactInfo) -> bool { + self.pubkey == other.pubkey && self.outset < other.outset + } } impl Default for ContactInfo { @@ -1016,4 +1024,59 @@ mod tests { Err(Error::InvalidPort(0)) ); } + + #[test] + fn test_check_duplicate() { + let mut rng = rand::thread_rng(); + let mut node = ContactInfo::new( + Keypair::new().pubkey(), + rng.gen(), // wallclock + rng.gen(), // shred_version + ); + // Same contact-info is not a duplicate instance. + { + let other = node.clone(); + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + // Updated socket address is not a duplicate instance. + { + let mut other = node.clone(); + other.set_gossip(new_rand_socket(&mut rng)).unwrap(); + other.set_serve_repair(new_rand_socket(&mut rng)).unwrap(); + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + other.remove_serve_repair(); + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + // Updated wallclock is not a duplicate instance. + { + let other = node.clone(); + node.set_wallclock(rng.gen()); + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + // Different pubkey is not a duplicate instance. + { + let other = ContactInfo::new( + Keypair::new().pubkey(), + rng.gen(), // wallclock + rng.gen(), // shred_version + ); + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + // Same pubkey, more recent outset timestamp is a duplicate instance. + { + let other = ContactInfo::new( + node.pubkey, + rng.gen(), // wallclock + rng.gen(), // shred_version + ); + assert!(node.outset < other.outset); + assert!(node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + } } From 39af6c092d375f7d602dfe2a92c9ad1bff5a5548 Mon Sep 17 00:00:00 2001 From: Jon C Date: Thu, 8 Aug 2024 23:25:52 +0200 Subject: [PATCH 056/529] test-validator: Add flag to clone feature set from a cluster (#2480) * test-validator: Add flag to clone feature set from a cluster #### Problem Program devs run into issues when testing, where everything works great with a solana-test-validator, and then fails once they deploy to a real cluster. Most times, this happens because the test validator enables all features by default, and their program depended on a new feature without their knowledge. #### Summary of changes To make local development much easier, add a `--clone-feature-set` flag to solana-test-validator to easily mimic the functionality on the cluster targeted with `--url`. * Add changelog entry --- CHANGELOG.md | 1 + test-validator/src/lib.rs | 27 ++++++++++++++++++++++ validator/src/bin/solana-test-validator.rs | 13 +++++++++++ validator/src/cli.rs | 11 +++++++++ 4 files changed, 52 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5ab97ef8658dd..0cd6e2b22698d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ Release channels have their own copy of this changelog: * Changes * SDK: removed the `respan` macro. This was marked as "internal use only" and was no longer used internally. * `agave-validator`: Update PoH speed check to compare against current hash rate from a Bank (#2447) + * `solana-test-validator`: Add `--clone-feature-set` flag to mimic features from a target cluster (#2480) ## [2.0.0] * Breaking diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 39616c1fdd4936..218cf9e4141f18 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -43,6 +43,7 @@ use { commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, exit::Exit, + feature, feature_set::FEATURE_NAMES, fee_calculator::FeeRateGovernor, instruction::{AccountMeta, Instruction}, @@ -428,6 +429,32 @@ impl TestValidatorGenesis { Ok(self) } + pub fn clone_feature_set(&mut self, rpc_client: &RpcClient) -> Result<&mut Self, String> { + for feature_ids in FEATURE_NAMES + .keys() + .cloned() + .collect::>() + .chunks(MAX_MULTIPLE_ACCOUNTS) + { + rpc_client + .get_multiple_accounts(feature_ids) + .map_err(|err| format!("Failed to fetch: {err}"))? + .into_iter() + .zip(feature_ids) + .for_each(|(maybe_account, feature_id)| { + if maybe_account + .as_ref() + .and_then(feature::from_account) + .and_then(|feature| feature.activated_at) + .is_none() + { + self.deactivate_feature_set.insert(*feature_id); + } + }); + } + Ok(self) + } + pub fn add_accounts_from_json_files( &mut self, accounts: &[AccountInfo], diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 7f7865d8ac18e8..bba5a359093370 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -281,6 +281,8 @@ fn main() { .map(|v| v.into_iter().collect()) .unwrap_or_default(); + let clone_feature_set = matches.is_present("clone_feature_set"); + let warp_slot = if matches.is_present("warp_slot") { Some(match matches.value_of("warp_slot") { Some(_) => value_t_or_exit!(matches, "warp_slot", Slot), @@ -511,6 +513,17 @@ fn main() { } } + if clone_feature_set { + if let Err(e) = genesis.clone_feature_set( + cluster_rpc_client + .as_ref() + .expect("bug: --url argument missing?"), + ) { + println!("Error: clone_feature_set failed: {e}"); + exit(1); + } + } + if let Some(warp_slot) = warp_slot { genesis.warp_slot(warp_slot); } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index e1665383e777e3..9f4276d8d67ee0 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -2811,6 +2811,17 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .validator(is_parsable::) .takes_value(true) .help("Override the runtime's account lock limit per transaction"), + ) + .arg( + Arg::with_name("clone_feature_set") + .long("clone-feature-set") + .takes_value(false) + .requires("json_rpc_url") + .help( + "Copy a feature set from the cluster referenced by the --url \ + argument in the genesis configuration. If the ledger \ + already exists then this parameter is silently ignored", + ), ); } From 5aaa334c12f5fd174c0417ecf1ce023d6fe38184 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 8 Aug 2024 21:28:35 +0000 Subject: [PATCH 057/529] uses lazy LRU cache for ClusterNodesCache in Turbine (#2507) Current LRU cache implementation used in the code requires an exclusive lock even on the read path due to &mut self receiver: https://docs.rs/lru/latest/lru/struct.LruCache.html#method.get Most reads do not update the cache so this write-lock can unnecessary exacerbate lock contention. The commit switches to lazy LRU cache which allows shared lock on the read path and, additionally, it is generally faster. --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 1 + turbine/Cargo.toml | 1 + turbine/src/cluster_nodes.rs | 28 ++++++++++++++++------------ 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1f2780297b29fb..1844da987264ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7890,6 +7890,7 @@ dependencies = [ "crossbeam-channel", "futures 0.3.30", "itertools 0.12.1", + "lazy-lru", "log", "lru", "quinn", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c447d919b6211b..bdc95ba46949b0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6527,6 +6527,7 @@ dependencies = [ "crossbeam-channel", "futures 0.3.30", "itertools 0.12.1", + "lazy-lru", "log", "lru", "quinn", diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index 7b29f085fa0db9..a03d384d94516c 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -15,6 +15,7 @@ bytes = { workspace = true } crossbeam-channel = { workspace = true } futures = { workspace = true } itertools = { workspace = true } +lazy-lru = { workspace = true } log = { workspace = true } lru = { workspace = true } quinn = { workspace = true } diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 42236d908da90a..be54df44a601fa 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -1,7 +1,7 @@ use { crate::{broadcast_stage::BroadcastStage, retransmit_stage::RetransmitStage}, itertools::Itertools, - lru::LruCache, + lazy_lru::LruCache, rand::{seq::SliceRandom, Rng, SeedableRng}, rand_chacha::ChaChaRng, solana_gossip::{ @@ -31,7 +31,7 @@ use { iter::repeat_with, marker::PhantomData, net::{IpAddr, SocketAddr}, - sync::{Arc, Mutex, RwLock}, + sync::{Arc, RwLock}, time::{Duration, Instant}, }, thiserror::Error, @@ -78,7 +78,7 @@ type CacheEntry = Option<(/*as of:*/ Instant, Arc>)>; pub struct ClusterNodesCache { // Cache entries are wrapped in Arc>, so that, when needed, only // one thread does the computations to update the entry for the epoch. - cache: Mutex>>>>, + cache: RwLock>>>>, ttl: Duration, // Time to live. } @@ -434,7 +434,7 @@ impl ClusterNodesCache { ttl: Duration, ) -> Self { Self { - cache: Mutex::new(LruCache::new(cap)), + cache: RwLock::new(LruCache::new(cap)), ttl, } } @@ -442,15 +442,19 @@ impl ClusterNodesCache { impl ClusterNodesCache { fn get_cache_entry(&self, epoch: Epoch) -> Arc>> { - let mut cache = self.cache.lock().unwrap(); - match cache.get(&epoch) { - Some(entry) => Arc::clone(entry), - None => { - let entry = Arc::default(); - cache.put(epoch, Arc::clone(&entry)); - entry - } + if let Some(entry) = self.cache.read().unwrap().get(&epoch) { + return Arc::clone(entry); + } + let mut cache = self.cache.write().unwrap(); + // Have to recheck again here because the cache might have been updated + // by another thread in between the time this thread releases the read + // lock and obtains the write lock. + if let Some(entry) = cache.get(&epoch) { + return Arc::clone(entry); } + let entry = Arc::default(); + cache.put(epoch, Arc::clone(&entry)); + entry } pub(crate) fn get( From 605acd118f23a5232ffa8fc7b282f9b43393ebf0 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 8 Aug 2024 15:50:59 -0600 Subject: [PATCH 058/529] EpochRewards: decode points and rewards fields as Strings (#2501) Decode points and rewards fields as Strings --- account-decoder/src/parse_sysvar.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/account-decoder/src/parse_sysvar.rs b/account-decoder/src/parse_sysvar.rs index 71226c9ba885c6..2c3622778c4288 100644 --- a/account-decoder/src/parse_sysvar.rs +++ b/account-decoder/src/parse_sysvar.rs @@ -245,9 +245,9 @@ pub struct UiEpochRewards { pub distribution_starting_block_height: u64, pub num_partitions: u64, pub parent_blockhash: String, - pub total_points: u128, - pub total_rewards: u64, - pub distributed_rewards: u64, + pub total_points: String, + pub total_rewards: String, + pub distributed_rewards: String, pub active: bool, } @@ -257,9 +257,9 @@ impl From for UiEpochRewards { distribution_starting_block_height: epoch_rewards.distribution_starting_block_height, num_partitions: epoch_rewards.num_partitions, parent_blockhash: epoch_rewards.parent_blockhash.to_string(), - total_points: epoch_rewards.total_points, - total_rewards: epoch_rewards.total_rewards, - distributed_rewards: epoch_rewards.distributed_rewards, + total_points: epoch_rewards.total_points.to_string(), + total_rewards: epoch_rewards.total_rewards.to_string(), + distributed_rewards: epoch_rewards.distributed_rewards.to_string(), active: epoch_rewards.active, } } From 6051e0be39c262b795396d45e3ad2fd2d4a2ac2e Mon Sep 17 00:00:00 2001 From: Joe C Date: Thu, 8 Aug 2024 18:25:34 -0400 Subject: [PATCH 059/529] SVM: Examples: Add PayTube Example (#2474) * SVM: examples: add paytube example * fix comment * fix tests --- Cargo.lock | 22 ++- Cargo.toml | 2 + svm/examples/paytube/Cargo.toml | 22 +++ svm/examples/paytube/README.md | 18 ++ svm/examples/paytube/src/lib.rs | 202 +++++++++++++++++++++++ svm/examples/paytube/src/loader.rs | 58 +++++++ svm/examples/paytube/src/log.rs | 47 ++++++ svm/examples/paytube/src/processor.rs | 122 ++++++++++++++ svm/examples/paytube/src/settler.rs | 175 ++++++++++++++++++++ svm/examples/paytube/src/transaction.rs | 83 ++++++++++ svm/examples/paytube/tests/native_sol.rs | 72 ++++++++ svm/examples/paytube/tests/setup.rs | 87 ++++++++++ svm/examples/paytube/tests/spl_tokens.rs | 105 ++++++++++++ 13 files changed, 1013 insertions(+), 2 deletions(-) create mode 100644 svm/examples/paytube/Cargo.toml create mode 100644 svm/examples/paytube/README.md create mode 100644 svm/examples/paytube/src/lib.rs create mode 100644 svm/examples/paytube/src/loader.rs create mode 100644 svm/examples/paytube/src/log.rs create mode 100644 svm/examples/paytube/src/processor.rs create mode 100644 svm/examples/paytube/src/settler.rs create mode 100644 svm/examples/paytube/src/transaction.rs create mode 100644 svm/examples/paytube/tests/native_sol.rs create mode 100644 svm/examples/paytube/tests/setup.rs create mode 100644 svm/examples/paytube/tests/spl_tokens.rs diff --git a/Cargo.lock b/Cargo.lock index 1844da987264ac..446fdd4a96437d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7660,6 +7660,24 @@ dependencies = [ "prost-types", ] +[[package]] +name = "solana-svm-example-paytube" +version = "2.1.0" +dependencies = [ + "solana-bpf-loader-program", + "solana-client", + "solana-compute-budget", + "solana-logger", + "solana-program-runtime", + "solana-sdk", + "solana-svm", + "solana-system-program", + "solana-test-validator", + "spl-associated-token-account", + "spl-token", + "termcolor", +] + [[package]] name = "solana-svm-transaction" version = "2.1.0" @@ -8676,9 +8694,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] diff --git a/Cargo.toml b/Cargo.toml index ca61d6806ae86b..1e4266216a5e21 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,6 +120,7 @@ members = [ "svm", "svm-conformance", "svm-transaction", + "svm/examples/paytube", "test-validator", "thin-client", "timings", @@ -422,6 +423,7 @@ solana-storage-proto = { path = "storage-proto", version = "=2.1.0" } solana-streamer = { path = "streamer", version = "=2.1.0" } solana-svm = { path = "svm", version = "=2.1.0" } solana-svm-conformance = { path = "svm-conformance", version = "=2.1.0" } +solana-svm-example-paytube = { path = "svm/examples/paytube", version = "=2.1.0" } solana-svm-transaction = { path = "svm-transaction", version = "=2.1.0" } solana-system-program = { path = "programs/system", version = "=2.1.0" } solana-test-validator = { path = "test-validator", version = "=2.1.0" } diff --git a/svm/examples/paytube/Cargo.toml b/svm/examples/paytube/Cargo.toml new file mode 100644 index 00000000000000..ceb30ff55516d7 --- /dev/null +++ b/svm/examples/paytube/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "solana-svm-example-paytube" +description = "Reference example using Solana SVM API" +version = { workspace = true } +edition = { workspace = true } +publish = false + +[dependencies] +solana-bpf-loader-program = { workspace = true } +solana-client = { workspace = true } +solana-compute-budget = { workspace = true } +solana-logger = { workspace = true } +solana-program-runtime = { workspace = true } +solana-sdk = { workspace = true } +solana-svm = { workspace = true } +solana-system-program = { workspace = true } +spl-associated-token-account = { workspace = true } +spl-token = { workspace = true } +termcolor = "1.4.1" + +[dev-dependencies] +solana-test-validator = { workspace = true } diff --git a/svm/examples/paytube/README.md b/svm/examples/paytube/README.md new file mode 100644 index 00000000000000..c361783a038bc5 --- /dev/null +++ b/svm/examples/paytube/README.md @@ -0,0 +1,18 @@ +# PayTube + +A reference implementation of an off-chain [state channel](https://ethereum.org/en/developers/docs/scaling/state-channels/) +built using [Anza's SVM API](https://www.anza.xyz/blog/anzas-new-svm-api). + +With the release of Agave 2.0, we've decoupled the SVM API from the rest of the +runtime, which means it can be used outside the validator. This unlocks +SVM-based solutions such as sidecars, channels, rollups, and more. This project +demonstrates everything you need to know about boostrapping with this new API. + +PayTube is a state channel (more specifically a payment channel), designed to +allow multiple parties to transact amongst each other in SOL or SPL tokens +off-chain. When the channel is closed, the resulting changes in each user's +balances are posted to the base chain (Solana). + +Although this project is for demonstration purposes, a payment channel similar +to PayTube could be created that scales to handle massive bandwidth of +transfers, saving the overhead of posting transactions to the chain for last. diff --git a/svm/examples/paytube/src/lib.rs b/svm/examples/paytube/src/lib.rs new file mode 100644 index 00000000000000..7549e0261c6866 --- /dev/null +++ b/svm/examples/paytube/src/lib.rs @@ -0,0 +1,202 @@ +//! PayTube. A simple SPL payment channel. +//! +//! PayTube is an SVM-based payment channel that allows two parties to exchange +//! tokens off-chain. The channel is opened by invoking the PayTube "VM", +//! running on some arbitrary server(s). When transacting has concluded, the +//! channel is closed by submitting the final payment ledger to Solana. +//! +//! The final ledger tracks debits and credits to all registered token accounts +//! or system accounts (native SOL) during the lifetime of a channel. It is +//! then used to to craft a batch of transactions to submit to the settlement +//! chain (Solana). +//! +//! Users opt-in to using a PayTube channel by "registering" their token +//! accounts to the channel. This is done by delegating a token account to the +//! PayTube on-chain program on Solana. This delegation is temporary, and +//! released immediately after channel settlement. +//! +//! Note: This opt-in solution is for demonstration purposes only. +//! +//! ```text +//! +//! PayTube "VM" +//! +//! Bob Alice Bob Alice Will +//! | | | | | +//! | --o--o--o-> | | --o--o--o-> | | +//! | | | | --o--o--o-> | <--- PayTube +//! | <-o--o--o-- | | <-o--o--o-- | | Transactions +//! | | | | | +//! | --o--o--o-> | | -----o--o--o-----> | +//! | | | | +//! | --o--o--o-> | | <----o--o--o------ | +//! +//! \ / \ | / +//! +//! ------ ------ +//! Alice: x Alice: x +//! Bob: x Bob: x <--- Solana Transaction +//! Will: x with final ledgers +//! ------ ------ +//! +//! \\ \\ +//! x x +//! +//! Solana Solana <--- Settled to Solana +//! ``` +//! +//! The Solana SVM's `TransactionBatchProcessor` requires projects to provide a +//! "loader" plugin, which implements the `TransactionProcessingCallback` +//! interface. +//! +//! PayTube defines a `PayTubeAccountLoader` that implements the +//! `TransactionProcessingCallback` interface, and provides it to the +//! `TransactionBatchProcessor` to process PayTube transactions. + +mod loader; +mod log; +mod processor; +mod settler; +pub mod transaction; + +use { + crate::{ + loader::PayTubeAccountLoader, settler::PayTubeSettler, transaction::PayTubeTransaction, + }, + processor::{ + create_transaction_batch_processor, get_transaction_check_results, PayTubeForkGraph, + }, + solana_client::rpc_client::RpcClient, + solana_compute_budget::compute_budget::ComputeBudget, + solana_sdk::{ + feature_set::FeatureSet, fee::FeeStructure, hash::Hash, rent_collector::RentCollector, + signature::Keypair, + }, + solana_svm::transaction_processor::{ + TransactionProcessingConfig, TransactionProcessingEnvironment, + }, + std::sync::{Arc, RwLock}, + transaction::create_svm_transactions, +}; + +/// A PayTube channel instance. +/// +/// Facilitates native SOL or SPL token transfers amongst various channel +/// participants, settling the final changes in balances to the base chain. +pub struct PayTubeChannel { + /// I think you know why this is a bad idea... + keys: Vec, + rpc_client: RpcClient, +} + +impl PayTubeChannel { + pub fn new(keys: Vec, rpc_client: RpcClient) -> Self { + Self { keys, rpc_client } + } + + /// The PayTube API. Processes a batch of PayTube transactions. + /// + /// Obviously this is a very simple implementation, but one could imagine + /// a more complex service that employs custom functionality, such as: + /// + /// * Increased throughput for individual P2P transfers. + /// * Custom Solana transaction ordering (e.g. MEV). + /// + /// The general scaffold of the PayTube API would remain the same. + pub fn process_paytube_transfers(&self, transactions: &[PayTubeTransaction]) { + log::setup_solana_logging(); + log::creating_paytube_channel(); + + // PayTube default configs. + // + // These can be configurable for channel customization, including + // imposing resource or feature restrictions, but more commonly they + // would likely be hoisted from the cluster. + // + // For example purposes, they are provided as defaults here. + let compute_budget = ComputeBudget::default(); + let feature_set = FeatureSet::all_enabled(); + let fee_structure = FeeStructure::default(); + let lamports_per_signature = fee_structure.lamports_per_signature; + let rent_collector = RentCollector::default(); + + // PayTube loader/callback implementation. + // + // Required to provide the SVM API with a mechanism for loading + // accounts. + let account_loader = PayTubeAccountLoader::new(&self.rpc_client); + + // Solana SVM transaction batch processor. + // + // Creates an instance of `TransactionBatchProcessor`, which can be + // used by PayTube to process transactions using the SVM. + // + // This allows programs such as the System and Token programs to be + // translated and executed within a provisioned virtual machine, as + // well as offers many of the same functionality as the lower-level + // Solana runtime. + let fork_graph = Arc::new(RwLock::new(PayTubeForkGraph {})); + let processor = create_transaction_batch_processor( + &account_loader, + &feature_set, + &compute_budget, + Arc::clone(&fork_graph), + ); + + // The PayTube transaction processing runtime environment. + // + // Again, these can be configurable or hoisted from the cluster. + let processing_environment = TransactionProcessingEnvironment { + blockhash: Hash::default(), + epoch_total_stake: None, + epoch_vote_accounts: None, + feature_set: Arc::new(feature_set), + fee_structure: Some(&fee_structure), + lamports_per_signature, + rent_collector: Some(&rent_collector), + }; + + // The PayTube transaction processing config for Solana SVM. + // + // Extended configurations for even more customization of the SVM API. + let processing_config = TransactionProcessingConfig { + compute_budget: Some(compute_budget), + ..Default::default() + }; + + // Step 1: Convert the batch of PayTube transactions into + // SVM-compatible transactions for processing. + // + // In the future, the SVM API may allow for trait-based transactions. + // In this case, `PayTubeTransaction` could simply implement the + // interface, and avoid this conversion entirely. + let svm_transactions = create_svm_transactions(transactions); + + // Step 2: Process the SVM-compatible transactions with the SVM API. + log::processing_transactions(svm_transactions.len()); + let results = processor.load_and_execute_sanitized_transactions( + &account_loader, + &svm_transactions, + get_transaction_check_results(svm_transactions.len(), lamports_per_signature), + &processing_environment, + &processing_config, + ); + + // Step 3: Convert the SVM API processor results into a final ledger + // using `PayTubeSettler`, and settle the resulting balance differences + // to the Solana base chain. + // + // Here the settler is basically iterating over the transaction results + // to track debits and credits, but only for those transactions which + // were executed succesfully. + // + // The final ledger of debits and credits to each participant can then + // be packaged into a minimal number of settlement transactions for + // submission. + let settler = PayTubeSettler::new(&self.rpc_client, transactions, results, &self.keys); + log::settling_to_base_chain(settler.num_transactions()); + settler.process_settle(); + + log::channel_closed(); + } +} diff --git a/svm/examples/paytube/src/loader.rs b/svm/examples/paytube/src/loader.rs new file mode 100644 index 00000000000000..676598216200f5 --- /dev/null +++ b/svm/examples/paytube/src/loader.rs @@ -0,0 +1,58 @@ +//! PayTube's "account loader" component, which provides the SVM API with the +//! ability to load accounts for PayTube channels. +//! +//! The account loader is a simple example of an RPC client that can first load +//! an account from the base chain, then cache it locally within the protocol +//! for the duration of the channel. + +use { + solana_client::rpc_client::RpcClient, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + pubkey::Pubkey, + }, + solana_svm::transaction_processing_callback::TransactionProcessingCallback, + std::{collections::HashMap, sync::RwLock}, +}; + +/// An account loading mechanism to hoist accounts from the base chain up to +/// an active PayTube channel. +/// +/// Employs a simple cache mechanism to ensure accounts are only loaded once. +pub struct PayTubeAccountLoader<'a> { + cache: RwLock>, + rpc_client: &'a RpcClient, +} + +impl<'a> PayTubeAccountLoader<'a> { + pub fn new(rpc_client: &'a RpcClient) -> Self { + Self { + cache: RwLock::new(HashMap::new()), + rpc_client, + } + } +} + +/// Implementation of the SVM API's `TransactionProcessingCallback` interface. +/// +/// The SVM API requires this plugin be provided to provide the SVM with the +/// ability to load accounts. +/// +/// In the Agave validator, this implementation is Bank, powered by AccountsDB. +impl TransactionProcessingCallback for PayTubeAccountLoader<'_> { + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + if let Some(account) = self.cache.read().unwrap().get(pubkey) { + return Some(account.clone()); + } + + let account: AccountSharedData = self.rpc_client.get_account(pubkey).ok()?.into(); + self.cache.write().unwrap().insert(*pubkey, account.clone()); + + Some(account) + } + + fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { + self.get_account_shared_data(account) + .and_then(|account| owners.iter().position(|key| account.owner().eq(key))) + } +} diff --git a/svm/examples/paytube/src/log.rs b/svm/examples/paytube/src/log.rs new file mode 100644 index 00000000000000..92c75573080741 --- /dev/null +++ b/svm/examples/paytube/src/log.rs @@ -0,0 +1,47 @@ +//! Just logging! +use { + std::io::Write, + termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}, +}; + +fn log_magenta(msg: &str) { + let mut stdout = StandardStream::stdout(ColorChoice::Always); + + stdout + .set_color(ColorSpec::new().set_fg(Some(Color::Magenta)).set_bold(true)) + .unwrap(); + + writeln!(&mut stdout, "\n[PAYTUBE]: INFO: {}\n", msg).unwrap(); + + stdout.reset().unwrap(); +} + +pub(crate) fn setup_solana_logging() { + #[rustfmt::skip] + solana_logger::setup_with_default( + "solana_rbpf::vm=debug,\ + solana_runtime::message_processor=debug,\ + solana_runtime::system_instruction_processor=trace", + ); +} + +pub(crate) fn creating_paytube_channel() { + log_magenta("Creating PayTube channel..."); +} + +pub(crate) fn processing_transactions(num_transactions: usize) { + log_magenta("Processing PayTube transactions with the SVM API..."); + log_magenta(&format!("Number of transactions: {}", num_transactions)); +} + +pub(crate) fn settling_to_base_chain(num_transactions: usize) { + log_magenta("Settling results from PayTube to the base chain..."); + log_magenta(&format!( + "Number of settlement transactions: {}", + num_transactions + )); +} + +pub(crate) fn channel_closed() { + log_magenta("PayTube channel closed."); +} diff --git a/svm/examples/paytube/src/processor.rs b/svm/examples/paytube/src/processor.rs new file mode 100644 index 00000000000000..71eaccc956826b --- /dev/null +++ b/svm/examples/paytube/src/processor.rs @@ -0,0 +1,122 @@ +//! A helper to initialize Solana SVM API's `TransactionBatchProcessor`. + +use { + solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, + solana_compute_budget::compute_budget::ComputeBudget, + solana_program_runtime::loaded_programs::{ + BlockRelation, ForkGraph, LoadProgramMetrics, ProgramCacheEntry, + }, + solana_sdk::{account::ReadableAccount, clock::Slot, feature_set::FeatureSet, transaction}, + solana_svm::{ + account_loader::CheckedTransactionDetails, + transaction_processing_callback::TransactionProcessingCallback, + transaction_processor::TransactionBatchProcessor, + }, + solana_system_program::system_processor, + std::sync::{Arc, RwLock}, +}; + +/// In order to use the `TransactionBatchProcessor`, another trait - Solana +/// Program Runtime's `ForkGraph` - must be implemented, to tell the batch +/// processor how to work across forks. +/// +/// Since PayTube doesn't use slots or forks, this implementation is mocked. +pub(crate) struct PayTubeForkGraph {} + +impl ForkGraph for PayTubeForkGraph { + fn relationship(&self, _a: Slot, _b: Slot) -> BlockRelation { + BlockRelation::Unknown + } +} + +/// This function encapsulates some initial setup required to tweak the +/// `TransactionBatchProcessor` for use within PayTube. +/// +/// We're simply configuring the mocked fork graph on the SVM API's program +/// cache, then adding the System program to the processor's builtins. +pub(crate) fn create_transaction_batch_processor( + callbacks: &CB, + feature_set: &FeatureSet, + compute_budget: &ComputeBudget, + fork_graph: Arc>, +) -> TransactionBatchProcessor { + let processor = TransactionBatchProcessor::::default(); + + { + let mut cache = processor.program_cache.write().unwrap(); + + // Initialize the mocked fork graph. + // let fork_graph = Arc::new(RwLock::new(PayTubeForkGraph {})); + cache.fork_graph = Some(Arc::downgrade(&fork_graph)); + + // Initialize a proper cache environment. + // (Use Loader v4 program to initialize runtime v2 if desired) + cache.environments.program_runtime_v1 = Arc::new( + create_program_runtime_environment_v1(feature_set, compute_budget, false, false) + .unwrap(), + ); + + // Add the SPL Token program to the cache. + if let Some(program_account) = callbacks.get_account_shared_data(&spl_token::id()) { + let elf_bytes = program_account.data(); + let program_runtime_environment = cache.environments.program_runtime_v1.clone(); + cache.assign_program( + spl_token::id(), + Arc::new( + ProgramCacheEntry::new( + &solana_sdk::bpf_loader::id(), + program_runtime_environment, + 0, + 0, + elf_bytes, + elf_bytes.len(), + &mut LoadProgramMetrics::default(), + ) + .unwrap(), + ), + ); + } + } + + // Add the system program builtin. + processor.add_builtin( + callbacks, + solana_system_program::id(), + "system_program", + ProgramCacheEntry::new_builtin( + 0, + b"system_program".len(), + system_processor::Entrypoint::vm, + ), + ); + + // Add the BPF Loader v2 builtin, for the SPL Token program. + processor.add_builtin( + callbacks, + solana_sdk::bpf_loader::id(), + "solana_bpf_loader_program", + ProgramCacheEntry::new_builtin( + 0, + b"solana_bpf_loader_program".len(), + solana_bpf_loader_program::Entrypoint::vm, + ), + ); + + processor +} + +/// This function is also a mock. In the Agave validator, the bank pre-checks +/// transactions before providing them to the SVM API. We mock this step in +/// PayTube, since we don't need to perform such pre-checks. +pub(crate) fn get_transaction_check_results( + len: usize, + lamports_per_signature: u64, +) -> Vec> { + vec![ + transaction::Result::Ok(CheckedTransactionDetails { + nonce: None, + lamports_per_signature, + }); + len + ] +} diff --git a/svm/examples/paytube/src/settler.rs b/svm/examples/paytube/src/settler.rs new file mode 100644 index 00000000000000..5db63c4e675809 --- /dev/null +++ b/svm/examples/paytube/src/settler.rs @@ -0,0 +1,175 @@ +//! PayTube's "settler" component for settling the final ledgers across all +//! channel participants. +//! +//! When users are finished transacting, the resulting ledger is used to craft +//! a batch of transactions to settle all state changes to the base chain +//! (Solana). +//! +//! The interesting piece here is that there can be hundreds or thousands of +//! transactions across a handful of users, but only the resulting difference +//! between their balance when the channel opened and their balance when the +//! channel is about to close are needed to create the settlement transaction. + +use { + crate::transaction::PayTubeTransaction, + solana_client::{rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig}, + solana_sdk::{ + commitment_config::CommitmentConfig, instruction::Instruction as SolanaInstruction, + pubkey::Pubkey, signature::Keypair, signer::Signer, system_instruction, + transaction::Transaction as SolanaTransaction, + }, + solana_svm::transaction_processor::LoadAndExecuteSanitizedTransactionsOutput, + spl_associated_token_account::get_associated_token_address, + std::collections::HashMap, +}; + +/// The key used for storing ledger entries. +/// +/// Each entry in the ledger represents the movement of SOL or tokens between +/// two parties. The two keys of the two parties are stored in a sorted array +/// of length two, and the value's sign determines the direction of transfer. +/// +/// This design allows the ledger to combine transfers from a -> b and b -> a +/// in the same entry, calculating the final delta between two parties. +/// +/// Note that this design could be even _further_ optimized to minimize the +/// number of required settlement transactions in a few ways, including +/// combining transfers across parties, ignoring zero-balance changes, and +/// more. An on-chain program on the base chain could even facilitate +/// multi-party transfers, further reducing the number of required +/// settlement transactions. +#[derive(PartialEq, Eq, Hash)] +struct LedgerKey { + mint: Option, + keys: [Pubkey; 2], +} + +/// A ledger of PayTube transactions, used to deconstruct into base chain +/// transactions. +/// +/// The value is stored as a signed `i128`, in order to include a sign but also +/// provide enough room to store `u64::MAX`. +struct Ledger { + ledger: HashMap, +} + +impl Ledger { + fn new( + paytube_transactions: &[PayTubeTransaction], + svm_output: LoadAndExecuteSanitizedTransactionsOutput, + ) -> Self { + let mut ledger: HashMap = HashMap::new(); + paytube_transactions + .iter() + .zip(svm_output.execution_results) + .for_each(|(transaction, result)| { + // Only append to the ledger if the PayTube transaction was + // successful. + if result.was_executed_successfully() { + let mint = transaction.mint; + let mut keys = [transaction.from, transaction.to]; + keys.sort(); + let amount = if keys.iter().position(|k| k.eq(&transaction.from)).unwrap() == 0 + { + transaction.amount as i128 + } else { + (transaction.amount as i128) + .checked_neg() + .unwrap_or_default() + }; + ledger + .entry(LedgerKey { mint, keys }) + .and_modify(|e| *e = e.checked_add(amount).unwrap()) + .or_insert(amount); + } + }); + Self { ledger } + } + + fn generate_base_chain_instructions(&self) -> Vec { + self.ledger + .iter() + .map(|(key, amount)| { + let (from, to, amount) = if *amount < 0 { + (key.keys[1], key.keys[0], (amount * -1) as u64) + } else { + (key.keys[0], key.keys[1], *amount as u64) + }; + if let Some(mint) = key.mint { + let source_pubkey = get_associated_token_address(&from, &mint); + let destination_pubkey = get_associated_token_address(&to, &mint); + return spl_token::instruction::transfer( + &spl_token::id(), + &source_pubkey, + &destination_pubkey, + &from, + &[], + amount, + ) + .unwrap(); + } + system_instruction::transfer(&from, &to, amount) + }) + .collect::>() + } +} + +const CHUNK_SIZE: usize = 10; + +/// PayTube final transaction settler. +pub struct PayTubeSettler<'a> { + instructions: Vec, + keys: &'a [Keypair], + rpc_client: &'a RpcClient, +} + +impl<'a> PayTubeSettler<'a> { + /// Create a new instance of a `PayTubeSettler` by tallying up all + /// transfers into a ledger. + pub fn new( + rpc_client: &'a RpcClient, + paytube_transactions: &[PayTubeTransaction], + svm_output: LoadAndExecuteSanitizedTransactionsOutput, + keys: &'a [Keypair], + ) -> Self { + // Build the ledger from the processed PayTube transactions. + let ledger = Ledger::new(paytube_transactions, svm_output); + + // Build the Solana instructions from the ledger. + let instructions = ledger.generate_base_chain_instructions(); + + Self { + instructions, + keys, + rpc_client, + } + } + + /// Count how many settlement transactions are estimated to be required. + pub(crate) fn num_transactions(&self) -> usize { + self.instructions.len().div_ceil(CHUNK_SIZE) + } + + /// Settle the payment channel results to the Solana blockchain. + pub fn process_settle(&self) { + let recent_blockhash = self.rpc_client.get_latest_blockhash().unwrap(); + self.instructions.chunks(CHUNK_SIZE).for_each(|chunk| { + let transaction = SolanaTransaction::new_signed_with_payer( + chunk, + Some(&self.keys[0].pubkey()), + self.keys, + recent_blockhash, + ); + self.rpc_client + .send_and_confirm_transaction_with_spinner_and_config( + &transaction, + CommitmentConfig::processed(), + RpcSendTransactionConfig { + skip_preflight: true, + ..Default::default() + }, + ) + .unwrap(); + }); + } +} diff --git a/svm/examples/paytube/src/transaction.rs b/svm/examples/paytube/src/transaction.rs new file mode 100644 index 00000000000000..8e27a3dbb31f03 --- /dev/null +++ b/svm/examples/paytube/src/transaction.rs @@ -0,0 +1,83 @@ +//! PayTube's custom transaction format, tailored specifically for SOL or SPL +//! token transfers. +//! +//! Mostly for demonstration purposes, to show how projects may use completely +//! different transactions in their protocol, then convert the resulting state +//! transitions into the necessary transactions for the base chain - in this +//! case Solana. + +use { + solana_sdk::{ + instruction::Instruction as SolanaInstruction, + pubkey::Pubkey, + system_instruction, + transaction::{ + SanitizedTransaction as SolanaSanitizedTransaction, Transaction as SolanaTransaction, + }, + }, + spl_associated_token_account::get_associated_token_address, + std::collections::HashSet, +}; + +/// A simple PayTube transaction. Transfers SPL tokens or SOL from one account +/// to another. +/// +/// A `None` value for `mint` represents native SOL. +pub struct PayTubeTransaction { + pub mint: Option, + pub from: Pubkey, + pub to: Pubkey, + pub amount: u64, +} + +impl From<&PayTubeTransaction> for SolanaInstruction { + fn from(value: &PayTubeTransaction) -> Self { + let PayTubeTransaction { + mint, + from, + to, + amount, + } = value; + if let Some(mint) = mint { + let source_pubkey = get_associated_token_address(from, mint); + let destination_pubkey = get_associated_token_address(to, mint); + return spl_token::instruction::transfer( + &spl_token::id(), + &source_pubkey, + &destination_pubkey, + from, + &[], + *amount, + ) + .unwrap(); + } + system_instruction::transfer(from, to, *amount) + } +} + +impl From<&PayTubeTransaction> for SolanaTransaction { + fn from(value: &PayTubeTransaction) -> Self { + SolanaTransaction::new_with_payer(&[SolanaInstruction::from(value)], Some(&value.from)) + } +} + +impl From<&PayTubeTransaction> for SolanaSanitizedTransaction { + fn from(value: &PayTubeTransaction) -> Self { + SolanaSanitizedTransaction::try_from_legacy_transaction( + SolanaTransaction::from(value), + &HashSet::new(), + ) + .unwrap() + } +} + +/// Create a batch of Solana transactions, for the Solana SVM's transaction +/// processor, from a batch of PayTube instructions. +pub fn create_svm_transactions( + paytube_transactions: &[PayTubeTransaction], +) -> Vec { + paytube_transactions + .iter() + .map(SolanaSanitizedTransaction::from) + .collect() +} diff --git a/svm/examples/paytube/tests/native_sol.rs b/svm/examples/paytube/tests/native_sol.rs new file mode 100644 index 00000000000000..98a6dd30670e0e --- /dev/null +++ b/svm/examples/paytube/tests/native_sol.rs @@ -0,0 +1,72 @@ +mod setup; + +use { + setup::{system_account, TestValidatorContext}, + solana_sdk::{signature::Keypair, signer::Signer}, + solana_svm_example_paytube::{transaction::PayTubeTransaction, PayTubeChannel}, +}; + +#[test] +fn test_native_sol() { + let alice = Keypair::new(); + let bob = Keypair::new(); + let will = Keypair::new(); + + let alice_pubkey = alice.pubkey(); + let bob_pubkey = bob.pubkey(); + let will_pubkey = will.pubkey(); + + let accounts = vec![ + (alice_pubkey, system_account(10_000_000)), + (bob_pubkey, system_account(10_000_000)), + (will_pubkey, system_account(10_000_000)), + ]; + + let context = TestValidatorContext::start_with_accounts(accounts); + let test_validator = &context.test_validator; + let payer = context.payer.insecure_clone(); + + let rpc_client = test_validator.get_rpc_client(); + + let paytube_channel = PayTubeChannel::new(vec![payer, alice, bob, will], rpc_client); + + paytube_channel.process_paytube_transfers(&[ + // Alice -> Bob 2_000_000 + PayTubeTransaction { + from: alice_pubkey, + to: bob_pubkey, + amount: 2_000_000, + mint: None, + }, + // Bob -> Will 5_000_000 + PayTubeTransaction { + from: bob_pubkey, + to: will_pubkey, + amount: 5_000_000, + mint: None, + }, + // Alice -> Bob 2_000_000 + PayTubeTransaction { + from: alice_pubkey, + to: bob_pubkey, + amount: 2_000_000, + mint: None, + }, + // Will -> Alice 1_000_000 + PayTubeTransaction { + from: will_pubkey, + to: alice_pubkey, + amount: 1_000_000, + mint: None, + }, + ]); + + // Ledger: + // Alice: 10_000_000 - 2_000_000 - 2_000_000 + 1_000_000 = 7_000_000 + // Bob: 10_000_000 + 2_000_000 - 5_000_000 + 2_000_000 = 9_000_000 + // Will: 10_000_000 + 5_000_000 - 1_000_000 = 14_000_000 + let rpc_client = test_validator.get_rpc_client(); + assert_eq!(rpc_client.get_balance(&alice_pubkey).unwrap(), 7_000_000); + assert_eq!(rpc_client.get_balance(&bob_pubkey).unwrap(), 9_000_000); + assert_eq!(rpc_client.get_balance(&will_pubkey).unwrap(), 14_000_000); +} diff --git a/svm/examples/paytube/tests/setup.rs b/svm/examples/paytube/tests/setup.rs new file mode 100644 index 00000000000000..87f9b920b63f39 --- /dev/null +++ b/svm/examples/paytube/tests/setup.rs @@ -0,0 +1,87 @@ +#![allow(unused)] + +use { + solana_sdk::{ + account::{Account, AccountSharedData, ReadableAccount}, + epoch_schedule::EpochSchedule, + program_pack::Pack, + pubkey::Pubkey, + signature::Keypair, + system_program, + }, + solana_test_validator::{TestValidator, TestValidatorGenesis}, + spl_token::state::{Account as TokenAccount, Mint}, +}; + +const SLOTS_PER_EPOCH: u64 = 50; + +pub struct TestValidatorContext { + pub test_validator: TestValidator, + pub payer: Keypair, +} + +impl TestValidatorContext { + pub fn start_with_accounts(accounts: Vec<(Pubkey, AccountSharedData)>) -> Self { + let epoch_schedule = EpochSchedule::custom(SLOTS_PER_EPOCH, SLOTS_PER_EPOCH, false); + + let (test_validator, payer) = TestValidatorGenesis::default() + .epoch_schedule(epoch_schedule) + .add_accounts(accounts) + .start(); + + Self { + test_validator, + payer, + } + } +} + +pub fn get_token_account_balance(token_account: Account) -> u64 { + let state = TokenAccount::unpack(token_account.data()).unwrap(); + state.amount +} + +pub fn mint_account() -> AccountSharedData { + let data = { + let mut data = [0; Mint::LEN]; + Mint::pack( + Mint { + supply: 100_000_000, + decimals: 0, + is_initialized: true, + ..Default::default() + }, + &mut data, + ) + .unwrap(); + data + }; + let mut account = AccountSharedData::new(100_000_000, data.len(), &spl_token::id()); + account.set_data_from_slice(&data); + account +} + +pub fn system_account(lamports: u64) -> AccountSharedData { + AccountSharedData::new(lamports, 0, &system_program::id()) +} + +pub fn token_account(owner: &Pubkey, mint: &Pubkey, amount: u64) -> AccountSharedData { + let data = { + let mut data = [0; TokenAccount::LEN]; + TokenAccount::pack( + TokenAccount { + mint: *mint, + owner: *owner, + amount, + state: spl_token::state::AccountState::Initialized, + ..Default::default() + }, + &mut data, + ) + .unwrap(); + data + }; + let mut account = AccountSharedData::new(100_000_000, data.len(), &spl_token::id()); + account.set_data_from_slice(&data); + account +} diff --git a/svm/examples/paytube/tests/spl_tokens.rs b/svm/examples/paytube/tests/spl_tokens.rs new file mode 100644 index 00000000000000..88ab5db7e365ad --- /dev/null +++ b/svm/examples/paytube/tests/spl_tokens.rs @@ -0,0 +1,105 @@ +mod setup; + +use { + setup::{ + get_token_account_balance, mint_account, system_account, token_account, + TestValidatorContext, + }, + solana_sdk::{pubkey::Pubkey, signature::Keypair, signer::Signer}, + solana_svm_example_paytube::{transaction::PayTubeTransaction, PayTubeChannel}, + spl_associated_token_account::get_associated_token_address, +}; + +#[test] +fn test_spl_tokens() { + let mint = Pubkey::new_unique(); + + let alice = Keypair::new(); + let bob = Keypair::new(); + let will = Keypair::new(); + + let alice_pubkey = alice.pubkey(); + let alice_token_account_pubkey = get_associated_token_address(&alice_pubkey, &mint); + + let bob_pubkey = bob.pubkey(); + let bob_token_account_pubkey = get_associated_token_address(&bob_pubkey, &mint); + + let will_pubkey = will.pubkey(); + let will_token_account_pubkey = get_associated_token_address(&will_pubkey, &mint); + + let accounts = vec![ + (mint, mint_account()), + (alice_pubkey, system_account(10_000_000)), + ( + alice_token_account_pubkey, + token_account(&alice_pubkey, &mint, 10), + ), + (bob_pubkey, system_account(10_000_000)), + ( + bob_token_account_pubkey, + token_account(&bob_pubkey, &mint, 10), + ), + (will_pubkey, system_account(10_000_000)), + ( + will_token_account_pubkey, + token_account(&will_pubkey, &mint, 10), + ), + ]; + + let context = TestValidatorContext::start_with_accounts(accounts); + let test_validator = &context.test_validator; + let payer = context.payer.insecure_clone(); + + let rpc_client = test_validator.get_rpc_client(); + + let paytube_channel = PayTubeChannel::new(vec![payer, alice, bob, will], rpc_client); + + paytube_channel.process_paytube_transfers(&[ + // Alice -> Bob 2 + PayTubeTransaction { + from: alice_pubkey, + to: bob_pubkey, + amount: 2, + mint: Some(mint), + }, + // Bob -> Will 5 + PayTubeTransaction { + from: bob_pubkey, + to: will_pubkey, + amount: 5, + mint: Some(mint), + }, + // Alice -> Bob 2 + PayTubeTransaction { + from: alice_pubkey, + to: bob_pubkey, + amount: 2, + mint: Some(mint), + }, + // Will -> Alice 1 + PayTubeTransaction { + from: will_pubkey, + to: alice_pubkey, + amount: 1, + mint: Some(mint), + }, + ]); + + // Ledger: + // Alice: 10 - 2 - 2 + 1 = 7 + // Bob: 10 + 2 - 5 + 2 = 9 + // Will: 10 + 5 - 1 = 14 + let rpc_client = test_validator.get_rpc_client(); + assert_eq!( + get_token_account_balance(rpc_client.get_account(&alice_token_account_pubkey).unwrap()), + 7 + ); + assert_eq!( + get_token_account_balance(rpc_client.get_account(&bob_token_account_pubkey).unwrap()), + 9 + ); + assert_eq!( + get_token_account_balance(rpc_client.get_account(&will_token_account_pubkey).unwrap()), + 14 + ); +} From 33119c5df7727e5333d69d2b4a3be52caf7f80d9 Mon Sep 17 00:00:00 2001 From: Joe C Date: Thu, 8 Aug 2024 18:26:23 -0400 Subject: [PATCH 060/529] Runtime: Core BPF Migration: Add checks for executable program account (#2483) * Runtime: Core BPF: check `executable` on program load * Runtime: Core BPF: set `executable` on migration --- .../bank/builtins/core_bpf_migration/error.rs | 3 ++ .../bank/builtins/core_bpf_migration/mod.rs | 8 +++- .../core_bpf_migration/target_core_bpf.rs | 39 +++++++++++++++++-- 3 files changed, 46 insertions(+), 4 deletions(-) diff --git a/runtime/src/bank/builtins/core_bpf_migration/error.rs b/runtime/src/bank/builtins/core_bpf_migration/error.rs index f3059ed4509fe7..1bf64d003518c0 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/error.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/error.rs @@ -21,6 +21,9 @@ pub enum CoreBpfMigrationError { /// Incorrect account owner #[error("Incorrect account owner for {0:?}")] IncorrectOwner(Pubkey), + /// Program account not executable + #[error("Program account not executable for program {0:?}")] + ProgramAccountNotExecutable(Pubkey), /// Program has a data account #[error("Data account exists for program {0:?}")] ProgramHasDataAccount(Pubkey), diff --git a/runtime/src/bank/builtins/core_bpf_migration/mod.rs b/runtime/src/bank/builtins/core_bpf_migration/mod.rs index 871b173e5fa772..6fecbdeba3640e 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/mod.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/mod.rs @@ -88,7 +88,9 @@ impl Bank { }; let lamports = self.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::size_of_program()); - let account = AccountSharedData::new_data(lamports, &state, &bpf_loader_upgradeable::id())?; + let mut account = + AccountSharedData::new_data(lamports, &state, &bpf_loader_upgradeable::id())?; + account.set_executable(true); Ok(account) } @@ -557,6 +559,9 @@ pub(crate) mod tests { // Program account is owned by the upgradeable loader. assert_eq!(program_account.owner(), &bpf_loader_upgradeable::id()); + // Program account is executable. + assert!(program_account.executable()); + // Program account has the correct state, with a pointer to its program // data address. let program_account_state: UpgradeableLoaderState = program_account.state().unwrap(); @@ -887,6 +892,7 @@ pub(crate) mod tests { let owner = &bpf_loader_upgradeable::id(); let mut account = AccountSharedData::new(lamports, space, owner); + account.set_executable(true); account.data_as_mut_slice().copy_from_slice(&data); bank.store_account_and_update_capitalization(program_address, &account); account diff --git a/runtime/src/bank/builtins/core_bpf_migration/target_core_bpf.rs b/runtime/src/bank/builtins/core_bpf_migration/target_core_bpf.rs index a98a72528bb567..2e1e3ff484a82a 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/target_core_bpf.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/target_core_bpf.rs @@ -20,7 +20,8 @@ pub(crate) struct TargetCoreBpf { impl TargetCoreBpf { /// Collects the details of a Core BPF program and verifies it is properly /// configured. - /// The program account should exist with a pointer to its data account. + /// The program account should exist with a pointer to its data account + /// and it should be marked as executable. /// The program data account should exist with the correct state /// (a ProgramData header and the program ELF). pub(crate) fn new_checked( @@ -39,6 +40,13 @@ impl TargetCoreBpf { return Err(CoreBpfMigrationError::IncorrectOwner(*program_address)); } + // The program account should be executable. + if !program_account.executable() { + return Err(CoreBpfMigrationError::ProgramAccountNotExecutable( + *program_address, + )); + } + // The program account should have a pointer to its data account. match program_account.deserialize_data::()? { UpgradeableLoaderState::Program { @@ -94,11 +102,11 @@ mod tests { solana_sdk::{account::WritableAccount, bpf_loader_upgradeable}, }; - fn store_account(bank: &Bank, address: &Pubkey, data: &[u8], owner: &Pubkey) { + fn store_account(bank: &Bank, address: &Pubkey, data: &[u8], owner: &Pubkey, executable: bool) { let space = data.len(); let lamports = bank.get_minimum_balance_for_rent_exemption(space); let mut account = AccountSharedData::new(lamports, space, owner); - account.set_executable(true); + account.set_executable(executable); account.data_as_mut_slice().copy_from_slice(data); bank.store_account_and_update_capitalization(address, &account); } @@ -125,18 +133,36 @@ mod tests { }) .unwrap(), &Pubkey::new_unique(), // Not the upgradeable loader + true, ); assert_matches!( TargetCoreBpf::new_checked(&bank, &program_address).unwrap_err(), CoreBpfMigrationError::IncorrectOwner(..) ); + // Fail if the program account is not executable. + store_account( + &bank, + &program_address, + &bincode::serialize(&UpgradeableLoaderState::Program { + programdata_address: program_data_address, + }) + .unwrap(), + &bpf_loader_upgradeable::id(), + false, // Not executable + ); + assert_matches!( + TargetCoreBpf::new_checked(&bank, &program_address).unwrap_err(), + CoreBpfMigrationError::ProgramAccountNotExecutable(..) + ); + // Fail if the program account does not have the correct state. store_account( &bank, &program_address, &[4u8; 200], // Not the correct state &bpf_loader_upgradeable::id(), + true, ); assert_matches!( TargetCoreBpf::new_checked(&bank, &program_address).unwrap_err(), @@ -154,6 +180,7 @@ mod tests { }) .unwrap(), &bpf_loader_upgradeable::id(), + true, ); assert_matches!( TargetCoreBpf::new_checked(&bank, &program_address).unwrap_err(), @@ -171,6 +198,7 @@ mod tests { }) .unwrap(), &bpf_loader_upgradeable::id(), + true, ); // Store the proper program account. @@ -182,6 +210,7 @@ mod tests { }) .unwrap(), &bpf_loader_upgradeable::id(), + true, ); // Fail if the program data account does not exist. @@ -200,6 +229,7 @@ mod tests { }) .unwrap(), &Pubkey::new_unique(), // Not the upgradeable loader + false, ); assert_matches!( TargetCoreBpf::new_checked(&bank, &program_address).unwrap_err(), @@ -212,6 +242,7 @@ mod tests { &program_data_address, &[4u8; 200], // Not the correct state &bpf_loader_upgradeable::id(), + false, ); assert_matches!( TargetCoreBpf::new_checked(&bank, &program_address).unwrap_err(), @@ -228,6 +259,7 @@ mod tests { }) .unwrap(), &bpf_loader_upgradeable::id(), + false, ); assert_matches!( TargetCoreBpf::new_checked(&bank, &program_address).unwrap_err(), @@ -257,6 +289,7 @@ mod tests { &program_data_address, &data, &bpf_loader_upgradeable::id(), + false, ); let target_core_bpf = TargetCoreBpf::new_checked(&bank, &program_address).unwrap(); From c5e8d16a8deb83c0209337a234629980632b2ef3 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Thu, 8 Aug 2024 18:39:31 -0500 Subject: [PATCH 061/529] Bump rbpf version to 0.8.5 (#2515) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- programs/sbf/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 446fdd4a96437d..8fe1a845f15f33 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8198,9 +8198,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381f595f78accb55aeea018a90e3acf6048f960d932002737d249e3294bd58fe" +checksum = "1c1941b5ef0c3ce8f2ac5dd984d0fb1a97423c4ff2a02eec81e3913f02e2ac2b" dependencies = [ "byteorder", "combine", diff --git a/Cargo.toml b/Cargo.toml index 1e4266216a5e21..67a0d73b87e818 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -443,7 +443,7 @@ solana-zk-keygen = { path = "zk-keygen", version = "=2.1.0" } solana-zk-sdk = { path = "zk-sdk", version = "=2.1.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=2.1.0" } solana-zk-token-sdk = { path = "zk-token-sdk", version = "=2.1.0" } -solana_rbpf = "=0.8.2" +solana_rbpf = "=0.8.5" spl-associated-token-account = "=4.0.0" spl-instruction-padding = "0.2" spl-memo = "=5.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index bdc95ba46949b0..137387b34eabef 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6749,9 +6749,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381f595f78accb55aeea018a90e3acf6048f960d932002737d249e3294bd58fe" +checksum = "1c1941b5ef0c3ce8f2ac5dd984d0fb1a97423c4ff2a02eec81e3913f02e2ac2b" dependencies = [ "byteorder 1.5.0", "combine", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 4c21253716b46a..15d1775ab6559b 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -60,7 +60,7 @@ solana-transaction-status = { path = "../../transaction-status", version = "=2.1 solana-type-overrides = { path = "../../type-overrides", version = "=2.1.0" } agave-validator = { path = "../../validator", version = "=2.1.0" } solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=2.1.0" } -solana_rbpf = "=0.8.2" +solana_rbpf = "=0.8.5" thiserror = "1.0" [package] From 2792e91d84f0b61cf4a0f0bf964c3e0ad9e3b91b Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Fri, 9 Aug 2024 09:33:29 +0800 Subject: [PATCH 062/529] Improve type safety in stake state module (#2482) * improve type safety in stake state module * feedback * fix new method --- sdk/program/src/stake/state.rs | 60 +++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index 685b134b15c262..139df906fc12b2 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -1,4 +1,5 @@ #![allow(clippy::arithmetic_side_effects)] +#![deny(clippy::wildcard_enum_match_arm)] // Remove the following `allow` when `StakeState` is removed, required to avoid // warnings from uses of deprecated types during trait derivations. #![allow(deprecated)] @@ -105,23 +106,23 @@ impl StakeState { pub fn stake(&self) -> Option { match self { - StakeState::Stake(_meta, stake) => Some(*stake), - _ => None, + Self::Stake(_meta, stake) => Some(*stake), + Self::Uninitialized | Self::Initialized(_) | Self::RewardsPool => None, } } pub fn delegation(&self) -> Option { match self { - StakeState::Stake(_meta, stake) => Some(stake.delegation), - _ => None, + Self::Stake(_meta, stake) => Some(stake.delegation), + Self::Uninitialized | Self::Initialized(_) | Self::RewardsPool => None, } } pub fn authorized(&self) -> Option { match self { - StakeState::Stake(meta, _stake) => Some(meta.authorized), - StakeState::Initialized(meta) => Some(meta.authorized), - _ => None, + Self::Stake(meta, _stake) => Some(meta.authorized), + Self::Initialized(meta) => Some(meta.authorized), + Self::Uninitialized | Self::RewardsPool => None, } } @@ -131,9 +132,9 @@ impl StakeState { pub fn meta(&self) -> Option { match self { - StakeState::Stake(meta, _stake) => Some(*meta), - StakeState::Initialized(meta) => Some(*meta), - _ => None, + Self::Stake(meta, _stake) => Some(*meta), + Self::Initialized(meta) => Some(*meta), + Self::Uninitialized | Self::RewardsPool => None, } } } @@ -208,37 +209,37 @@ impl StakeStateV2 { pub fn stake(&self) -> Option { match self { - StakeStateV2::Stake(_meta, stake, _stake_flags) => Some(*stake), - _ => None, + Self::Stake(_meta, stake, _stake_flags) => Some(*stake), + Self::Uninitialized | Self::Initialized(_) | Self::RewardsPool => None, } } pub fn stake_ref(&self) -> Option<&Stake> { match self { - StakeStateV2::Stake(_meta, stake, _stake_flags) => Some(stake), - _ => None, + Self::Stake(_meta, stake, _stake_flags) => Some(stake), + Self::Uninitialized | Self::Initialized(_) | Self::RewardsPool => None, } } pub fn delegation(&self) -> Option { match self { - StakeStateV2::Stake(_meta, stake, _stake_flags) => Some(stake.delegation), - _ => None, + Self::Stake(_meta, stake, _stake_flags) => Some(stake.delegation), + Self::Uninitialized | Self::Initialized(_) | Self::RewardsPool => None, } } pub fn delegation_ref(&self) -> Option<&Delegation> { match self { StakeStateV2::Stake(_meta, stake, _stake_flags) => Some(&stake.delegation), - _ => None, + Self::Uninitialized | Self::Initialized(_) | Self::RewardsPool => None, } } pub fn authorized(&self) -> Option { match self { - StakeStateV2::Stake(meta, _stake, _stake_flags) => Some(meta.authorized), - StakeStateV2::Initialized(meta) => Some(meta.authorized), - _ => None, + Self::Stake(meta, _stake, _stake_flags) => Some(meta.authorized), + Self::Initialized(meta) => Some(meta.authorized), + Self::Uninitialized | Self::RewardsPool => None, } } @@ -248,9 +249,9 @@ impl StakeStateV2 { pub fn meta(&self) -> Option { match self { - StakeStateV2::Stake(meta, _stake, _stake_flags) => Some(*meta), - StakeStateV2::Initialized(meta) => Some(*meta), - _ => None, + Self::Stake(meta, _stake, _stake_flags) => Some(*meta), + Self::Initialized(meta) => Some(*meta), + Self::Uninitialized | Self::RewardsPool => None, } } } @@ -375,10 +376,15 @@ impl Authorized { signers: &HashSet, stake_authorize: StakeAuthorize, ) -> Result<(), InstructionError> { - match stake_authorize { - StakeAuthorize::Staker if signers.contains(&self.staker) => Ok(()), - StakeAuthorize::Withdrawer if signers.contains(&self.withdrawer) => Ok(()), - _ => Err(InstructionError::MissingRequiredSignature), + let authorized_signer = match stake_authorize { + StakeAuthorize::Staker => &self.staker, + StakeAuthorize::Withdrawer => &self.withdrawer, + }; + + if signers.contains(authorized_signer) { + Ok(()) + } else { + Err(InstructionError::MissingRequiredSignature) } } From 15c5dcbee3eeba27a0714c7538323fc2ad000ef2 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Fri, 9 Aug 2024 15:07:01 +0800 Subject: [PATCH 063/529] Revert "checks for duplicate instances using the new ContactInfo (#2506)" (#2521) --- gossip/src/cluster_info.rs | 21 +++++-------- gossip/src/contact_info.rs | 63 -------------------------------------- 2 files changed, 8 insertions(+), 76 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 46b505014bebc3..0f11489333644d 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2475,21 +2475,16 @@ impl ClusterInfo { // Check if there is a duplicate instance of // this node with more recent timestamp. - let check_duplicate_instance = { - let instance = self.instance.read().unwrap(); - let my_contact_info = self.my_contact_info(); - move |values: &[CrdsValue]| { - if should_check_duplicate_instance - && values.iter().any(|value| { - instance.check_duplicate(value) - || matches!(&value.data, CrdsData::ContactInfo(other) - if my_contact_info.check_duplicate(other)) - }) - { - return Err(GossipError::DuplicateNodeInstance); + let instance = self.instance.read().unwrap(); + let check_duplicate_instance = |values: &[CrdsValue]| { + if should_check_duplicate_instance { + for value in values { + if instance.check_duplicate(value) { + return Err(GossipError::DuplicateNodeInstance); + } } - Ok(()) } + Ok(()) }; let mut pings = Vec::new(); let mut rng = rand::thread_rng(); diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 395f485f12516d..9a5c1ce495813b 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -435,14 +435,6 @@ impl ContactInfo { node.set_serve_repair_quic((addr, port + 4)).unwrap(); node } - - // Returns true if the other contact-info is a duplicate instance of this - // node, with a more recent `outset` timestamp. - #[inline] - #[must_use] - pub(crate) fn check_duplicate(&self, other: &ContactInfo) -> bool { - self.pubkey == other.pubkey && self.outset < other.outset - } } impl Default for ContactInfo { @@ -1024,59 +1016,4 @@ mod tests { Err(Error::InvalidPort(0)) ); } - - #[test] - fn test_check_duplicate() { - let mut rng = rand::thread_rng(); - let mut node = ContactInfo::new( - Keypair::new().pubkey(), - rng.gen(), // wallclock - rng.gen(), // shred_version - ); - // Same contact-info is not a duplicate instance. - { - let other = node.clone(); - assert!(!node.check_duplicate(&other)); - assert!(!other.check_duplicate(&node)); - } - // Updated socket address is not a duplicate instance. - { - let mut other = node.clone(); - other.set_gossip(new_rand_socket(&mut rng)).unwrap(); - other.set_serve_repair(new_rand_socket(&mut rng)).unwrap(); - assert!(!node.check_duplicate(&other)); - assert!(!other.check_duplicate(&node)); - other.remove_serve_repair(); - assert!(!node.check_duplicate(&other)); - assert!(!other.check_duplicate(&node)); - } - // Updated wallclock is not a duplicate instance. - { - let other = node.clone(); - node.set_wallclock(rng.gen()); - assert!(!node.check_duplicate(&other)); - assert!(!other.check_duplicate(&node)); - } - // Different pubkey is not a duplicate instance. - { - let other = ContactInfo::new( - Keypair::new().pubkey(), - rng.gen(), // wallclock - rng.gen(), // shred_version - ); - assert!(!node.check_duplicate(&other)); - assert!(!other.check_duplicate(&node)); - } - // Same pubkey, more recent outset timestamp is a duplicate instance. - { - let other = ContactInfo::new( - node.pubkey, - rng.gen(), // wallclock - rng.gen(), // shred_version - ); - assert!(node.outset < other.outset); - assert!(node.check_duplicate(&other)); - assert!(!other.check_duplicate(&node)); - } - } } From 2cc0b4ac0fec0388a9cb972ce15e875c21e995f6 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 9 Aug 2024 15:09:35 +0800 Subject: [PATCH 064/529] clippy: byte_char_slices (#2494) fix byte_char_slices --- zk-sdk/src/range_proof/generators.rs | 4 ++-- zk-token-sdk/src/range_proof/generators.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/zk-sdk/src/range_proof/generators.rs b/zk-sdk/src/range_proof/generators.rs index f67baf5fef2d4b..901f4f83027a41 100644 --- a/zk-sdk/src/range_proof/generators.rs +++ b/zk-sdk/src/range_proof/generators.rs @@ -95,13 +95,13 @@ impl RangeProofGens { } self.G_vec.extend( - &mut GeneratorsChain::new(&[b'G']) + &mut GeneratorsChain::new(b"G") .fast_forward(self.gens_capacity) .take(new_capacity - self.gens_capacity), ); self.H_vec.extend( - &mut GeneratorsChain::new(&[b'H']) + &mut GeneratorsChain::new(b"H") .fast_forward(self.gens_capacity) .take(new_capacity - self.gens_capacity), ); diff --git a/zk-token-sdk/src/range_proof/generators.rs b/zk-token-sdk/src/range_proof/generators.rs index da2dcbcf7cb734..b33ce32001a359 100644 --- a/zk-token-sdk/src/range_proof/generators.rs +++ b/zk-token-sdk/src/range_proof/generators.rs @@ -96,13 +96,13 @@ impl BulletproofGens { } self.G_vec.extend( - &mut GeneratorsChain::new(&[b'G']) + &mut GeneratorsChain::new(b"G") .fast_forward(self.gens_capacity) .take(new_capacity - self.gens_capacity), ); self.H_vec.extend( - &mut GeneratorsChain::new(&[b'H']) + &mut GeneratorsChain::new(b"H") .fast_forward(self.gens_capacity) .take(new_capacity - self.gens_capacity), ); From 4ac13008735fe65d82fde35fb6517a6e376b664e Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 9 Aug 2024 15:10:15 +0800 Subject: [PATCH 065/529] clippy: manual_inspect (#2496) fix manual_inspect --- install/src/command.rs | 3 +-- ledger/src/ancestor_iterator.rs | 3 +-- poh/src/poh_recorder.rs | 3 +-- remote-wallet/src/bin/ledger-udev.rs | 3 +-- svm/src/transaction_processor.rs | 3 +-- test-validator/src/lib.rs | 3 +-- turbine/src/retransmit_stage.rs | 10 ++++------ 7 files changed, 10 insertions(+), 18 deletions(-) diff --git a/install/src/command.rs b/install/src/command.rs index 8a81e1d72337ec..30996f564c1220 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -130,9 +130,8 @@ fn download_to_temp( impl Read for DownloadProgress { fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.response.read(buf).map(|n| { + self.response.read(buf).inspect(|&n| { self.progress_bar.inc(n as u64); - n }) } } diff --git a/ledger/src/ancestor_iterator.rs b/ledger/src/ancestor_iterator.rs index dc1abc774114bc..50761e63d6286a 100644 --- a/ledger/src/ancestor_iterator.rs +++ b/ledger/src/ancestor_iterator.rs @@ -35,7 +35,7 @@ impl<'a> Iterator for AncestorIterator<'a> { fn next(&mut self) -> Option { let current = self.current; - current.map(|slot| { + current.inspect(|&slot| { if slot != 0 { self.current = self .blockstore @@ -45,7 +45,6 @@ impl<'a> Iterator for AncestorIterator<'a> { } else { self.current = None; } - slot }) } } diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index fab2d9f62559e6..4bbd0ad3eb8214 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -992,11 +992,10 @@ impl PohRecorder { self.send_entry_us += send_entry_us; send_entry_res?; let starting_transaction_index = - working_bank.transaction_index.map(|transaction_index| { + working_bank.transaction_index.inspect(|transaction_index| { let next_starting_transaction_index = transaction_index.saturating_add(num_transactions); working_bank.transaction_index = Some(next_starting_transaction_index); - transaction_index }); return Ok(starting_transaction_index); } diff --git a/remote-wallet/src/bin/ledger-udev.rs b/remote-wallet/src/bin/ledger-udev.rs index fef91aeb518158..0f7de7098914e9 100644 --- a/remote-wallet/src/bin/ledger-udev.rs +++ b/remote-wallet/src/bin/ledger-udev.rs @@ -27,9 +27,8 @@ fn main() -> Result<(), Box> { .append(true) .create(true) .open(LEDGER_UDEV_RULES_LOCATION) - .map_err(|e| { + .inspect_err(|_e| { println!("Could not write to file; this script requires sudo privileges"); - e })?; file.write_all(LEDGER_UDEV_RULES.as_bytes())?; diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index f7862724e7f314..fa2bd505948a24 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -407,9 +407,8 @@ impl TransactionBatchProcessor { let compute_budget_limits = process_compute_budget_instructions( message.program_instructions_iter(), ) - .map_err(|err| { + .inspect_err(|_err| { error_counters.invalid_compute_budget += 1; - err })?; let fee_payer_address = message.fee_payer(); diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 218cf9e4141f18..a86b5c79d87070 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -629,14 +629,13 @@ impl TestValidatorGenesis { socket_addr_space, rpc_to_plugin_manager_receiver, ) - .map(|test_validator| { + .inspect(|test_validator| { let runtime = tokio::runtime::Builder::new_current_thread() .enable_io() .enable_time() .build() .unwrap(); runtime.block_on(test_validator.wait_for_nonzero_fees()); - test_validator }) } diff --git a/turbine/src/retransmit_stage.rs b/turbine/src/retransmit_stage.rs index 32537db6b9abe9..d8d13e7f935cad 100644 --- a/turbine/src/retransmit_stage.rs +++ b/turbine/src/retransmit_stage.rs @@ -265,9 +265,8 @@ fn retransmit( quic_endpoint_sender, stats, ) - .map_err(|err| { - stats.record_error(&err); - err + .inspect_err(|err| { + stats.record_error(err); }) .ok()?; Some((key.slot(), root_distance, num_nodes)) @@ -290,9 +289,8 @@ fn retransmit( quic_endpoint_sender, stats, ) - .map_err(|err| { - stats.record_error(&err); - err + .inspect_err(|err| { + stats.record_error(err); }) .ok()?; Some((key.slot(), root_distance, num_nodes)) From aee7e84ebee8bf0ce134904a96dcbc1d3654f075 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 9 Aug 2024 15:10:28 +0800 Subject: [PATCH 066/529] clippy: needless_borrows_for_generic_args (#2495) fix needless_borrows_for_generic_args --- keygen/src/keygen.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 0965b4ce8ad04b..faf3a493ff7a7b 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -703,7 +703,7 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { .count .fetch_sub(1, Ordering::Relaxed); if !no_outfile { - write_keypair_file(&keypair, &format!("{}.json", keypair.pubkey())) + write_keypair_file(&keypair, format!("{}.json", keypair.pubkey())) .unwrap(); println!( "Wrote keypair to {}", From c63fe65af657d59b9b7a31953d7eddf0867b2f40 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 9 Aug 2024 15:12:34 +0800 Subject: [PATCH 067/529] removed unused struct, MockInstruction (#2491) removed unsued struct --- svm/src/message_processor.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/svm/src/message_processor.rs b/svm/src/message_processor.rs index 21348bc1ae7d95..4bcc7d37492ee8 100644 --- a/svm/src/message_processor.rs +++ b/svm/src/message_processor.rs @@ -170,15 +170,6 @@ mod tests { std::sync::Arc, }; - #[derive(Debug, serde_derive::Serialize, serde_derive::Deserialize)] - enum MockInstruction { - NoopSuccess, - NoopFail, - ModifyOwned, - ModifyNotOwned, - ModifyReadonly, - } - fn new_sanitized_message(message: Message) -> SanitizedMessage { SanitizedMessage::try_from_legacy_message(message, &ReservedAccountKeys::empty_key_set()) .unwrap() From c20b1d64a6db418264da2ec71a7f6939f272ac55 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 9 Aug 2024 15:12:45 +0800 Subject: [PATCH 068/529] fix unused-must-use (#2490) unused-must-use --- install/src/command.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/src/command.rs b/install/src/command.rs index 30996f564c1220..014753128fcec6 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -507,7 +507,7 @@ fn add_to_path(new_path: &str) -> bool { Ok(()) } append_file(&rcfile, &shell_export_string).unwrap_or_else(|err| { - format!("Unable to append to {rcfile:?}: {err}"); + println!("Unable to append to {rcfile:?}: {err}"); }); modified_rcfiles = true; } From e769f3a3944c398a11daebcfb6428893c7449f6d Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 9 Aug 2024 15:12:55 +0800 Subject: [PATCH 069/529] clippy: manual_unwrap_or (#2489) --- rpc-client/src/mock_sender.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/rpc-client/src/mock_sender.rs b/rpc-client/src/mock_sender.rs index 9730a6ff24a983..6654804cc32c25 100644 --- a/rpc-client/src/mock_sender.rs +++ b/rpc-client/src/mock_sender.rs @@ -242,11 +242,7 @@ impl RpcSender for MockSender { range: RpcBlockProductionRange { first_slot: config_range.first_slot, last_slot: { - if let Some(last_slot) = config_range.last_slot { - last_slot - } else { - 2 - } + config_range.last_slot.unwrap_or(2) }, }, }, From adbc39cb51b05a9569cbd18cfd91c1b83b961aaf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 19:46:39 +0800 Subject: [PATCH 070/529] build(deps): bump lazy-lru from 0.1.2 to 0.1.3 (#2522) * build(deps): bump lazy-lru from 0.1.2 to 0.1.3 Bumps [lazy-lru](https://github.com/behzadnouri/lazy-lru) from 0.1.2 to 0.1.3. - [Commits](https://github.com/behzadnouri/lazy-lru/commits) --- updated-dependencies: - dependency-name: lazy-lru dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8fe1a845f15f33..18ae0c7ee0c91b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3174,9 +3174,9 @@ dependencies = [ [[package]] name = "lazy-lru" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81b33bc1276f3df38e938ed17bbb3d5c5eef758aa1a9997ec8388799ba3eef1" +checksum = "8b031495510a5a17bfb14e9f1fc00f6efdebfaa9ab04a876a4e153b042a3fe06" dependencies = [ "hashbrown 0.14.3", ] diff --git a/Cargo.toml b/Cargo.toml index 67a0d73b87e818..a23e2c27bc3e0e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -261,7 +261,7 @@ jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" -lazy-lru = "0.1.2" +lazy-lru = "0.1.3" lazy_static = "1.5.0" libc = "0.2.155" libloading = "0.7.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 137387b34eabef..05d1ae23ac8eb8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2500,9 +2500,9 @@ dependencies = [ [[package]] name = "lazy-lru" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81b33bc1276f3df38e938ed17bbb3d5c5eef758aa1a9997ec8388799ba3eef1" +checksum = "8b031495510a5a17bfb14e9f1fc00f6efdebfaa9ab04a876a4e153b042a3fe06" dependencies = [ "hashbrown 0.14.3", ] From 82a8dcc1b78ac1107e827082889a0b3cb758e054 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Fri, 9 Aug 2024 19:48:21 +0800 Subject: [PATCH 071/529] refactor: serde stakes (#2519) --- runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/epoch_stakes.rs | 4 +-- runtime/src/serde_snapshot.rs | 4 +-- runtime/src/stakes.rs | 2 +- runtime/src/stakes/serde_stakes.rs | 42 +++++++++++++++--------------- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 9159ea0ca45d36..70ab7e01a473a8 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -536,7 +536,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "DnUdXXELygo14vA8d6QoXo5bkJAQbTWqWW5Qf9RXXWgZ") + frozen_abi(digest = "HRBDXrGrHMZU4cNebKHT7jEmhrgd3h1c2qUMMywrGPiq") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index d38695f642471c..6400e05c3b2e5d 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -1,7 +1,7 @@ use { crate::{ stake_account::StakeAccount, - stakes::{Stakes, StakesEnum}, + stakes::{serde_stakes_to_delegation_format, Stakes, StakesEnum}, }, serde::{Deserialize, Deserializer, Serialize, Serializer}, solana_sdk::{clock::Epoch, pubkey::Pubkey, stake::state::Stake}, @@ -24,7 +24,7 @@ pub struct NodeVoteAccounts { #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))] pub struct EpochStakes { - #[serde(with = "crate::stakes::serde_stakes_enum_compat")] + #[serde(with = "serde_stakes_to_delegation_format")] stakes: Arc, total_stake: u64, node_id_to_vote_accounts: Arc, diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 3c923d72893a72..b49e336ac4aa27 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -10,7 +10,7 @@ use { runtime_config::RuntimeConfig, serde_snapshot::storage::SerializableAccountStorageEntry, snapshot_utils::{SnapshotError, StorageAndNextAccountsFileId}, - stakes::{serde_stakes_enum_compat, Stakes, StakesEnum}, + stakes::{serde_stakes_to_delegation_format, Stakes, StakesEnum}, }, bincode::{self, config::Options, Error}, log::*, @@ -237,7 +237,7 @@ struct SerializableVersionedBank { rent_collector: RentCollector, epoch_schedule: EpochSchedule, inflation: Inflation, - #[serde(serialize_with = "serde_stakes_enum_compat::serialize")] + #[serde(serialize_with = "serde_stakes_to_delegation_format::serialize")] stakes: StakesEnum, unused_accounts: UnusedAccounts, epoch_stakes: HashMap, diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 085289ef95d932..22dcd3931d65c9 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -27,7 +27,7 @@ use { }; mod serde_stakes; -pub(crate) use serde_stakes::serde_stakes_enum_compat; +pub(crate) use serde_stakes::serde_stakes_to_delegation_format; #[derive(Debug, Error)] pub enum Error { diff --git a/runtime/src/stakes/serde_stakes.rs b/runtime/src/stakes/serde_stakes.rs index c96cef1b4327ae..b7c260b29426f1 100644 --- a/runtime/src/stakes/serde_stakes.rs +++ b/runtime/src/stakes/serde_stakes.rs @@ -11,7 +11,7 @@ use { // In order to maintain backward compatibility, the StakesEnum in EpochStakes // and SerializableVersionedBank should be serialized as Stakes. -pub(crate) mod serde_stakes_enum_compat { +pub(crate) mod serde_stakes_to_delegation_format { use { super::*, serde::{Deserialize, Deserializer, Serialize, Serializer}, @@ -23,9 +23,9 @@ pub(crate) mod serde_stakes_enum_compat { { match stakes { StakesEnum::Delegations(stakes) => stakes.serialize(serializer), - StakesEnum::Stakes(stakes) => serialize_stakes_as_delegations(stakes, serializer), + StakesEnum::Stakes(stakes) => serialize_stakes_to_delegation_format(stakes, serializer), StakesEnum::Accounts(stakes) => { - serialize_stake_accounts_as_delegations(stakes, serializer) + serialize_stake_accounts_to_delegation_format(stakes, serializer) } } } @@ -39,21 +39,21 @@ pub(crate) mod serde_stakes_enum_compat { } } -fn serialize_stakes_as_delegations( +fn serialize_stakes_to_delegation_format( stakes: &Stakes, serializer: S, ) -> Result { - SerdeStakeVariantStakes::from(stakes.clone()).serialize(serializer) + SerdeStakesToDelegationFormat::from(stakes.clone()).serialize(serializer) } -fn serialize_stake_accounts_as_delegations( +fn serialize_stake_accounts_to_delegation_format( stakes: &Stakes, serializer: S, ) -> Result { - SerdeStakeAccountVariantStakes::from(stakes.clone()).serialize(serializer) + SerdeStakeAccountsToDelegationFormat::from(stakes.clone()).serialize(serializer) } -impl From> for SerdeStakeVariantStakes { +impl From> for SerdeStakesToDelegationFormat { fn from(stakes: Stakes) -> Self { let Stakes { vote_accounts, @@ -65,7 +65,7 @@ impl From> for SerdeStakeVariantStakes { Self { vote_accounts, - stake_delegations: SerdeStakeMapWrapper(stake_delegations), + stake_delegations: SerdeStakeMapToDelegationFormat(stake_delegations), unused, epoch, stake_history, @@ -73,7 +73,7 @@ impl From> for SerdeStakeVariantStakes { } } -impl From> for SerdeStakeAccountVariantStakes { +impl From> for SerdeStakeAccountsToDelegationFormat { fn from(stakes: Stakes) -> Self { let Stakes { vote_accounts, @@ -85,7 +85,7 @@ impl From> for SerdeStakeAccountVariantStakes { Self { vote_accounts, - stake_delegations: SerdeStakeAccountMapWrapper(stake_delegations), + stake_delegations: SerdeStakeAccountMapToDelegationFormat(stake_delegations), unused, epoch, stake_history, @@ -95,9 +95,9 @@ impl From> for SerdeStakeAccountVariantStakes { #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Serialize)] -struct SerdeStakeVariantStakes { +struct SerdeStakesToDelegationFormat { vote_accounts: VoteAccounts, - stake_delegations: SerdeStakeMapWrapper, + stake_delegations: SerdeStakeMapToDelegationFormat, unused: u64, epoch: Epoch, stake_history: StakeHistory, @@ -105,17 +105,17 @@ struct SerdeStakeVariantStakes { #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Serialize)] -struct SerdeStakeAccountVariantStakes { +struct SerdeStakeAccountsToDelegationFormat { vote_accounts: VoteAccounts, - stake_delegations: SerdeStakeAccountMapWrapper, + stake_delegations: SerdeStakeAccountMapToDelegationFormat, unused: u64, epoch: Epoch, stake_history: StakeHistory, } #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -struct SerdeStakeMapWrapper(ImHashMap); -impl Serialize for SerdeStakeMapWrapper { +struct SerdeStakeMapToDelegationFormat(ImHashMap); +impl Serialize for SerdeStakeMapToDelegationFormat { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -129,8 +129,8 @@ impl Serialize for SerdeStakeMapWrapper { } #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -struct SerdeStakeAccountMapWrapper(ImHashMap); -impl Serialize for SerdeStakeAccountMapWrapper { +struct SerdeStakeAccountMapToDelegationFormat(ImHashMap); +impl Serialize for SerdeStakeAccountMapToDelegationFormat { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -151,11 +151,11 @@ mod tests { }; #[test] - fn test_serde_stakes_enum_compat() { + fn test_serde_stakes_to_delegation_format() { #[derive(Debug, PartialEq, Deserialize, Serialize)] struct Dummy { head: String, - #[serde(with = "serde_stakes_enum_compat")] + #[serde(with = "serde_stakes_to_delegation_format")] stakes: Arc, tail: String, } From 029463166afc073351bda560c8c32b6502260063 Mon Sep 17 00:00:00 2001 From: Jon C Date: Fri, 9 Aug 2024 14:12:49 +0200 Subject: [PATCH 072/529] ci: Add rule for Firedancer review on native programs (#2233) * ci: Add rule for Firedancer review on native programs The Firedancer team maintains a line-for-line reimplementation of the native programs, and until native programs are moved to BPF, those implementations must exactly match their Agave counterparts. In the past, the Firedancer team has requested to be included in reviews that touch code in the native programs. To make it harder to forget, add an automated to any PRs that touch native programs. I've omitted test crates and and the ZK Token Proof program, since I believe that won't be included in Firedancer. * Add stake and vote in SDK * Update language --- .mergify.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index d50fd5e1e277ba..ae3b1a27d4565a 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -183,6 +183,29 @@ pull_request_rules: 2. Open a follow-up PR to update the JavaScript client `@solana/web3.js` ([example](https://github.com/solana-labs/solana-web3.js/pull/2868/files)) Thank you for keeping the RPC clients in sync with the server API @{{author}}. + - name: Reminder to add Firedancer team to changes in `programs/` and `sdk/` + conditions: + - or: + - files~=^programs/address-lookup-table/src/.*\.rs$ + - files~=^programs/bpf_loader/src/.*\.rs$ + - files~=^programs/compute_budget/src/.*\.rs$ + - files~=^programs/config/src/.*\.rs$ + - files~=^programs/loader-v4/src/.*\.rs$ + - files~=^programs/stake/src/.*\.rs$ + - files~=^programs/system/src/.*\.rs$ + - files~=^programs/vote/src/.*\.rs$ + - files~=^programs/zk-elgamal-proof/src/.*\.rs$ + - files~=^sdk/program/src/stake.*\.rs$ # includes stake_history.rs + - files~=^sdk/program/src/vote/.*\.rs$ + actions: + comment: + message: | + The Firedancer team maintains a line-for-line reimplementation of the + native programs, and until native programs are moved to BPF, those + implementations must exactly match their Agave counterparts. + If this PR represents a change to a native program implementation (not + tests), please include a reviewer from the Firedancer team. And please + keep refactors to a minimum. commands_restrictions: # The author of copied PRs is the Mergify user. From 8b95e7debd9858b8d55fb71215a0b5005afafa14 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 9 Aug 2024 09:36:44 -0400 Subject: [PATCH 073/529] hash-cache-tool: Adds command to diff state (#2505) --- Cargo.lock | 1 + .../accounts-hash-cache-tool/Cargo.toml | 1 + .../accounts-hash-cache-tool/src/main.rs | 211 ++++++++++++++++++ 3 files changed, 213 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 18ae0c7ee0c91b..fed5e828bdbe1e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -71,6 +71,7 @@ dependencies = [ "bytemuck", "clap 2.33.3", "memmap2", + "rayon", "solana-accounts-db", "solana-program", "solana-version", diff --git a/accounts-db/accounts-hash-cache-tool/Cargo.toml b/accounts-db/accounts-hash-cache-tool/Cargo.toml index dc41b8212c97b2..908875a4662ceb 100644 --- a/accounts-db/accounts-hash-cache-tool/Cargo.toml +++ b/accounts-db/accounts-hash-cache-tool/Cargo.toml @@ -14,6 +14,7 @@ ahash = { workspace = true } bytemuck = { workspace = true } clap = { workspace = true } memmap2 = { workspace = true } +rayon = { workspace = true } solana-accounts-db = { workspace = true } solana-program = { workspace = true } solana-version = { workspace = true } diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 98b9f914143f11..7817b35c4fac3d 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -6,6 +6,7 @@ use { SubCommand, }, memmap2::Mmap, + rayon::prelude::*, solana_accounts_db::{ accounts_hash::AccountHash, parse_cache_hash_data_filename, pubkey_bins::PubkeyBinCalculator24, CacheHashDataFileEntry, CacheHashDataFileHeader, @@ -20,6 +21,7 @@ use { mem::size_of, num::Saturating, path::{Path, PathBuf}, + sync::RwLock, time::Instant, }, }; @@ -28,6 +30,7 @@ const CMD_INSPECT: &str = "inspect"; const CMD_DIFF: &str = "diff"; const CMD_DIFF_FILES: &str = "files"; const CMD_DIFF_DIRS: &str = "directories"; +const CMD_DIFF_STATE: &str = "state"; fn main() { let matches = App::new(crate_name!()) @@ -102,6 +105,29 @@ fn main() { .takes_value(false) .help("After diff-ing the directories, diff the files that were found to have mismatches"), ), + ) + .subcommand( + SubCommand::with_name(CMD_DIFF_STATE) + .about("Diff the final state of two accounts hash cache directories") + .long_about( + "Diff the final state of two accounts hash cache directories. \ + Load all the latest entries from each directory, then compare \ + the final states for anything missing or mismatching." + ) + .arg( + Arg::with_name("path1") + .index(1) + .takes_value(true) + .value_name("PATH1") + .help("Accounts hash cache directory 1 to diff"), + ) + .arg( + Arg::with_name("path2") + .index(2) + .takes_value(true) + .value_name("PATH2") + .help("Accounts hash cache directory 2 to diff"), + ), ), ) .get_matches(); @@ -119,6 +145,9 @@ fn main() { (CMD_DIFF_DIRS, Some(diff_subcommand_matches)) => { cmd_diff_dirs(&matches, diff_subcommand_matches) } + (CMD_DIFF_STATE, Some(diff_subcommand_matches)) => { + cmd_diff_state(&matches, diff_subcommand_matches) + } _ => unreachable!(), } } @@ -158,6 +187,15 @@ fn cmd_diff_dirs( do_diff_dirs(path1, path2, then_diff_files) } +fn cmd_diff_state( + _app_matches: &ArgMatches<'_>, + subcommand_matches: &ArgMatches<'_>, +) -> Result<(), String> { + let path1 = value_t_or_exit!(subcommand_matches, "path1", String); + let path2 = value_t_or_exit!(subcommand_matches, "path2", String); + do_diff_state(path1, path2) +} + fn do_inspect(file: impl AsRef, force: bool) -> Result<(), String> { let (reader, header) = open_file(&file, force).map_err(|err| { format!( @@ -440,6 +478,159 @@ fn do_diff_dirs( Ok(()) } +fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), String> { + const NUM_BINS: usize = 8192; + let extract = |dir: &Path| -> Result<_, String> { + let files = + get_cache_files_in(dir).map_err(|err| format!("failed to get cache files: {err}"))?; + let BinnedLatestEntriesInfo { + latest_entries, + capitalization, + } = extract_binned_latest_entries_in(files.iter().map(|file| &file.path), NUM_BINS) + .map_err(|err| format!("failed to extract entries: {err}"))?; + let num_accounts: usize = latest_entries.iter().map(|bin| bin.len()).sum(); + let entries = Vec::from(latest_entries); + let state: Box<_> = entries.into_iter().map(RwLock::new).collect(); + Ok((state, capitalization, num_accounts)) + }; + + let timer = LoggingTimer::new("Reconstructing state"); + let dir1 = dir1.as_ref(); + let dir2 = dir2.as_ref(); + let (state1, state2) = rayon::join(|| extract(dir1), || extract(dir2)); + let (state1, capitalization1, num_accounts1) = state1 + .map_err(|err| format!("failed to get state for dir 1 '{}': {err}", dir1.display()))?; + let (state2, capitalization2, num_accounts2) = state2 + .map_err(|err| format!("failed to get state for dir 2 '{}': {err}", dir2.display()))?; + drop(timer); + + let timer = LoggingTimer::new("Diffing state"); + let (mut mismatch_entries, mut unique_entries1) = (0..NUM_BINS) + .into_par_iter() + .map(|bindex| { + let mut bin1 = state1[bindex].write().unwrap(); + let mut bin2 = state2[bindex].write().unwrap(); + + let mut mismatch_entries = Vec::new(); + let mut unique_entries1 = Vec::new(); + for entry1 in bin1.drain() { + let (key1, value1) = entry1; + match bin2.remove(&key1) { + Some(value2) => { + // the pubkey was found in both states, so compare the hashes and lamports + if value1 == value2 { + // hashes and lamports are equal, so nothing to do + } else { + // otherwise we have a mismatch; note it + mismatch_entries.push((key1, value1, value2)); + } + } + None => { + // this pubkey was *not* found in state2, so its a unique entry in state1 + unique_entries1.push((key1, value1)); + } + } + } + (mismatch_entries, unique_entries1) + }) + .reduce( + || (Vec::new(), Vec::new()), + |mut accum, elem| { + accum.0.extend(elem.0); + accum.1.extend(elem.1); + accum + }, + ); + drop(timer); + + // all the remaining entries in state2 are the ones *not* found in state1 + let mut unique_entries2 = Vec::new(); + for bin in Vec::from(state2).into_iter() { + let mut bin = bin.write().unwrap(); + unique_entries2.extend(bin.drain()); + } + + // sort all the results by pubkey to make them saner to view + let timer = LoggingTimer::new("Sorting results"); + unique_entries1.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + unique_entries2.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + mismatch_entries.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + drop(timer); + + let num_accounts_width = { + let width1 = (num_accounts1 as f64).log10().ceil() as usize; + let width2 = (num_accounts2 as f64).log10().ceil() as usize; + cmp::max(width1, width2) + }; + let lamports_width = { + let width1 = (capitalization1 as f64).log10().ceil() as usize; + let width2 = (capitalization2 as f64).log10().ceil() as usize; + cmp::max(width1, width2) + }; + + println!("State 1: total number of accounts: {num_accounts1:num_accounts_width$}, total capitalization: {capitalization1:lamports_width$} lamports"); + println!("State 2: total number of accounts: {num_accounts2:num_accounts_width$}, total capitalization: {capitalization2:lamports_width$} lamports"); + + println!("Unique entries in state 1:"); + if unique_entries1.is_empty() { + println!("(none)"); + } else { + let count_width = (unique_entries1.len() as f64).log10().ceil() as usize; + let mut total_lamports = Saturating(0); + for (i, entry) in unique_entries1.iter().enumerate() { + total_lamports += entry.1 .1; + println!( + "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", + entry.0.to_string(), + entry.1 .0 .0.to_string(), + entry.1 .1, + ); + } + println!("total lamports: {}", total_lamports.0); + } + + println!("Unique entries in state 2:"); + if unique_entries1.is_empty() { + println!("(none)"); + } else { + let count_width = (unique_entries2.len() as f64).log10().ceil() as usize; + let mut total_lamports = Saturating(0); + for (i, entry) in unique_entries2.iter().enumerate() { + total_lamports += entry.1 .1; + println!( + "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", + entry.0.to_string(), + entry.1 .0 .0.to_string(), + entry.1 .1, + ); + } + println!("total lamports: {}", total_lamports.0); + } + + println!("Mismatch values:"); + let count_width = (mismatch_entries.len() as f64).log10().ceil() as usize; + if mismatch_entries.is_empty() { + println!("(none)"); + } else { + for (i, (pubkey, value1, value2)) in mismatch_entries.iter().enumerate() { + println!( + "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", + pubkey.to_string(), + value1.0 .0.to_string(), + value1.1, + ); + println!( + "{i:count_width$}: {:52}, hash: {:44}, lamports: {:lamports_width$}", + "(state 2 same)", + value2.0 .0.to_string(), + value2.1, + ); + } + } + + Ok(()) +} + /// Returns all the cache hash data files in `dir`, sorted in ascending slot-and-bin-range order fn get_cache_files_in(dir: impl AsRef) -> Result, io::Error> { fn get_files_in(dir: impl AsRef) -> Result, io::Error> { @@ -665,6 +856,26 @@ struct BinnedLatestEntriesInfo { capitalization: u64, // lamports } +#[derive(Debug)] +struct LoggingTimer { + _elapsed_on_drop: ElapsedOnDrop, +} + +impl LoggingTimer { + #[must_use] + fn new(message: impl Into) -> Self { + let message = message.into(); + let elapsed_on_drop = ElapsedOnDrop { + message: format!("{message}... Done in "), + start: Instant::now(), + }; + println!("{message}..."); + Self { + _elapsed_on_drop: elapsed_on_drop, + } + } +} + #[derive(Debug)] struct ElapsedOnDrop { message: String, From 69c72b2da7e546adaabbba7842dfefda21f748e7 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 9 Aug 2024 09:45:40 -0500 Subject: [PATCH 074/529] TransactionView: use u8 to store lengths that are cannot validly exceed u8 (#2508) --- transaction-view/src/signature_meta.rs | 30 +++++++++---------- .../src/static_account_keys_meta.rs | 23 +++++++------- 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/transaction-view/src/signature_meta.rs b/transaction-view/src/signature_meta.rs index 9e511068c02147..2d8a8231f1cd0b 100644 --- a/transaction-view/src/signature_meta.rs +++ b/transaction-view/src/signature_meta.rs @@ -6,10 +6,20 @@ use { solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Signature}, }; +// The packet has a maximum length of 1232 bytes. +// Each signature must be paired with a unique static pubkey, so each +// signature really requires 96 bytes. This means the maximum number of +// signatures in a **valid** transaction packet is 12. +// In our u16 encoding scheme, 12 would be encoded as a single byte. +// Rather than using the u16 decoding, we can simply read the byte and +// verify that the MSB is not set. +const MAX_SIGNATURES_PER_PACKET: u8 = + (PACKET_DATA_SIZE / (core::mem::size_of::() + core::mem::size_of::())) as u8; + /// Meta data for accessing transaction-level signatures in a transaction view. pub(crate) struct SignatureMeta { /// The number of signatures in the transaction. - pub(crate) num_signatures: u16, + pub(crate) num_signatures: u8, /// Offset to the first signature in the transaction packet. pub(crate) offset: u16, } @@ -18,27 +28,17 @@ impl SignatureMeta { /// Get the number of signatures and the offset to the first signature in /// the transaction packet, starting at the given `offset`. pub(crate) fn try_new(bytes: &[u8], offset: &mut usize) -> Result { - // The packet has a maximum length of 1232 bytes. - // Each signature must be paired with a unique static pubkey, so each - // signature really requires 96 bytes. This means the maximum number of - // signatures in a **valid** transaction packet is 12. - // In our u16 encoding scheme, 12 would be encoded as a single byte. - // Rather than using the u16 decoding, we can simply read the byte and - // verify that the MSB is not set. - const MAX_SIGNATURES_PER_PACKET: u16 = (PACKET_DATA_SIZE - / (core::mem::size_of::() + core::mem::size_of::())) - as u16; // Maximum number of signatures should be represented by a single byte, // thus the MSB should not be set. const _: () = assert!(MAX_SIGNATURES_PER_PACKET & 0b1000_0000 == 0); - let num_signatures = read_byte(bytes, offset)? as u16; + let num_signatures = read_byte(bytes, offset)?; if num_signatures == 0 || num_signatures > MAX_SIGNATURES_PER_PACKET { return Err(TransactionParsingError); } let signature_offset = *offset as u16; - advance_offset_for_array::(bytes, offset, num_signatures)?; + advance_offset_for_array::(bytes, offset, u16::from(num_signatures))?; Ok(Self { num_signatures, @@ -70,7 +70,7 @@ mod tests { #[test] fn test_max_signatures() { - let signatures = vec![Signature::default(); 12]; + let signatures = vec![Signature::default(); usize::from(MAX_SIGNATURES_PER_PACKET)]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; let meta = SignatureMeta::try_new(&bytes, &mut offset).unwrap(); @@ -92,7 +92,7 @@ mod tests { #[test] fn test_too_many_signatures() { - let signatures = vec![Signature::default(); 13]; + let signatures = vec![Signature::default(); usize::from(MAX_SIGNATURES_PER_PACKET) + 1]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; assert!(SignatureMeta::try_new(&bytes, &mut offset).is_err()); diff --git a/transaction-view/src/static_account_keys_meta.rs b/transaction-view/src/static_account_keys_meta.rs index 46bf95c15b5e58..9dc26fd2d354fd 100644 --- a/transaction-view/src/static_account_keys_meta.rs +++ b/transaction-view/src/static_account_keys_meta.rs @@ -6,27 +6,28 @@ use { solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey}, }; +// The packet has a maximum length of 1232 bytes. +// This means the maximum number of 32 byte keys is 38. +// 38 as an min-sized encoded u16 is 1 byte. +// We can simply read this byte, if it's >38 we can return None. +const MAX_STATIC_ACCOUNTS_PER_PACKET: u8 = + (PACKET_DATA_SIZE / core::mem::size_of::()) as u8; + /// Contains meta-data about the static account keys in a transaction packet. #[derive(Default)] pub struct StaticAccountKeysMeta { /// The number of static accounts in the transaction. - pub(crate) num_static_accounts: u16, + pub(crate) num_static_accounts: u8, /// The offset to the first static account in the transaction. pub(crate) offset: u16, } impl StaticAccountKeysMeta { pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { - // The packet has a maximum length of 1232 bytes. - // This means the maximum number of 32 byte keys is 38. - // 38 as an min-sized encoded u16 is 1 byte. - // We can simply read this byte, if it's >38 we can return None. - const MAX_STATIC_ACCOUNTS_PER_PACKET: u16 = - (PACKET_DATA_SIZE / core::mem::size_of::()) as u16; // Max size must not have the MSB set so that it is size 1. const _: () = assert!(MAX_STATIC_ACCOUNTS_PER_PACKET & 0b1000_0000 == 0); - let num_static_accounts = read_byte(bytes, offset)? as u16; + let num_static_accounts = read_byte(bytes, offset)?; if num_static_accounts == 0 || num_static_accounts > MAX_STATIC_ACCOUNTS_PER_PACKET { return Err(TransactionParsingError); } @@ -36,7 +37,7 @@ impl StaticAccountKeysMeta { // check if the offset is greater than u16::MAX. let static_accounts_offset = *offset as u16; // Update offset for array of static accounts. - advance_offset_for_array::(bytes, offset, num_static_accounts)?; + advance_offset_for_array::(bytes, offset, u16::from(num_static_accounts))?; Ok(Self { num_static_accounts, @@ -68,7 +69,7 @@ mod tests { #[test] fn test_max_accounts() { - let signatures = vec![Pubkey::default(); 38]; + let signatures = vec![Pubkey::default(); usize::from(MAX_STATIC_ACCOUNTS_PER_PACKET)]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; let meta = StaticAccountKeysMeta::try_new(&bytes, &mut offset).unwrap(); @@ -79,7 +80,7 @@ mod tests { #[test] fn test_too_many_accounts() { - let signatures = vec![Pubkey::default(); 39]; + let signatures = vec![Pubkey::default(); usize::from(MAX_STATIC_ACCOUNTS_PER_PACKET) + 1]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; assert!(StaticAccountKeysMeta::try_new(&bytes, &mut offset).is_err()); From 289a36691e35d589ffc7f036ecf8f46d5a0ad01f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 17:00:51 +0000 Subject: [PATCH 075/529] build(deps): bump serde from 1.0.204 to 1.0.205 (#2486) * build(deps): bump serde from 1.0.204 to 1.0.205 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.204 to 1.0.205. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.204...v1.0.205) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files * bump serde_derive to 1.0.205 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: yihau --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fed5e828bdbe1e..4bd42822c86cf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5016,9 +5016,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.204" +version = "1.0.205" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "e33aedb1a7135da52b7c21791455563facbbcc43d0f0f66165b42c21b3dfb150" dependencies = [ "serde_derive", ] @@ -5034,9 +5034,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.205" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "692d6f5ac90220161d6774db30c662202721e64aed9058d2c394f451261420c1" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index a23e2c27bc3e0e..5cc529ea29ab86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -319,9 +319,9 @@ rustls = { version = "0.21.12", default-features = false, features = ["quic"] } scopeguard = "1.2.0" semver = "1.0.23" seqlock = "0.2.0" -serde = "1.0.204" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde = "1.0.205" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" -serde_derive = "1.0.204" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_derive = "1.0.205" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.122" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 05d1ae23ac8eb8..b340cf6da24faa 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4170,9 +4170,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.204" +version = "1.0.205" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "e33aedb1a7135da52b7c21791455563facbbcc43d0f0f66165b42c21b3dfb150" dependencies = [ "serde_derive", ] @@ -4188,9 +4188,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.205" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "692d6f5ac90220161d6774db30c662202721e64aed9058d2c394f451261420c1" dependencies = [ "proc-macro2", "quote", From 935b45aa6b6e32b7712f43c13ff66628605b3719 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Sat, 10 Aug 2024 01:12:01 +0800 Subject: [PATCH 076/529] Rename types and metrics from executed to processed (#2435) --- core/src/banking_stage/committer.rs | 22 +- core/src/banking_stage/consume_worker.rs | 36 +-- core/src/banking_stage/consumer.rs | 124 +++++----- core/src/banking_stage/leader_slot_metrics.rs | 60 ++--- .../leader_slot_timing_metrics.rs | 4 +- ledger/src/blockstore_processor.rs | 4 +- poh/src/poh_recorder.rs | 6 +- runtime/src/bank.rs | 225 +++++++++--------- runtime/src/bank/tests.rs | 31 +-- svm/doc/spec.md | 6 +- svm/examples/paytube/src/settler.rs | 9 +- svm/src/account_saver.rs | 74 +++--- svm/src/lib.rs | 1 + svm/src/transaction_commit_result.rs | 4 +- svm/src/transaction_execution_result.rs | 38 --- svm/src/transaction_processing_result.rs | 48 ++++ svm/src/transaction_processor.rs | 25 +- svm/tests/conformance.rs | 18 +- svm/tests/integration_test.rs | 24 +- 19 files changed, 394 insertions(+), 365 deletions(-) create mode 100644 svm/src/transaction_processing_result.rs diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index d91900299107c8..c020c92866dec0 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -6,7 +6,7 @@ use { }, solana_measure::measure_us, solana_runtime::{ - bank::{Bank, ExecutedTransactionCounts, TransactionBalancesSet}, + bank::{Bank, ProcessedTransactionCounts, TransactionBalancesSet}, bank_utils, prioritization_fee_cache::PrioritizationFeeCache, transaction_batch::TransactionBatch, @@ -15,7 +15,9 @@ use { solana_sdk::{hash::Hash, pubkey::Pubkey, saturating_add_assign}, solana_svm::{ transaction_commit_result::{TransactionCommitResult, TransactionCommitResultExtensions}, - transaction_execution_result::TransactionExecutionResult, + transaction_processing_result::{ + TransactionProcessingResult, TransactionProcessingResultExtensions, + }, }, solana_transaction_status::{ token_balances::TransactionTokenBalancesSet, TransactionTokenBalance, @@ -67,27 +69,27 @@ impl Committer { pub(super) fn commit_transactions( &self, batch: &TransactionBatch, - execution_results: Vec, + processing_results: Vec, last_blockhash: Hash, lamports_per_signature: u64, starting_transaction_index: Option, bank: &Arc, pre_balance_info: &mut PreBalanceInfo, execute_and_commit_timings: &mut LeaderExecuteAndCommitTimings, - execution_counts: &ExecutedTransactionCounts, + processed_counts: &ProcessedTransactionCounts, ) -> (u64, Vec) { - let executed_transactions = execution_results + let processed_transactions = processing_results .iter() .zip(batch.sanitized_transactions()) - .filter_map(|(execution_result, tx)| execution_result.was_executed().then_some(tx)) + .filter_map(|(processing_result, tx)| processing_result.was_processed().then_some(tx)) .collect_vec(); let (commit_results, commit_time_us) = measure_us!(bank.commit_transactions( batch.sanitized_transactions(), - execution_results, + processing_results, last_blockhash, lamports_per_signature, - execution_counts, + processed_counts, &mut execute_and_commit_timings.execute_timings, )); execute_and_commit_timings.commit_us = commit_time_us; @@ -122,7 +124,7 @@ impl Committer { starting_transaction_index, ); self.prioritization_fee_cache - .update(bank, executed_transactions.into_iter()); + .update(bank, processed_transactions.into_iter()); }); execute_and_commit_timings.find_and_send_votes_us = find_and_send_votes_us; (commit_time_us, commit_transaction_statuses) @@ -145,7 +147,7 @@ impl Committer { let batch_transaction_indexes: Vec<_> = commit_results .iter() .map(|commit_result| { - if commit_result.was_executed() { + if commit_result.was_committed() { let this_transaction_index = transaction_index; saturating_add_assign!(transaction_index, 1); this_transaction_index diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 449ea9ab963a39..3902ce8829f163 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -234,18 +234,18 @@ impl ConsumeWorkerMetrics { }: &ExecuteAndCommitTransactionsOutput, ) { self.count_metrics - .transactions_attempted_execution_count + .transactions_attempted_processing_count .fetch_add( - transaction_counts.attempted_execution_count, + transaction_counts.attempted_processing_count, Ordering::Relaxed, ); self.count_metrics - .executed_transactions_count - .fetch_add(transaction_counts.executed_count, Ordering::Relaxed); + .processed_transactions_count + .fetch_add(transaction_counts.processed_count, Ordering::Relaxed); self.count_metrics - .executed_with_successful_result_count + .processed_with_successful_result_count .fetch_add( - transaction_counts.executed_with_successful_result_count, + transaction_counts.processed_with_successful_result_count, Ordering::Relaxed, ); self.count_metrics @@ -410,9 +410,9 @@ impl ConsumeWorkerMetrics { } struct ConsumeWorkerCountMetrics { - transactions_attempted_execution_count: AtomicU64, - executed_transactions_count: AtomicU64, - executed_with_successful_result_count: AtomicU64, + transactions_attempted_processing_count: AtomicU64, + processed_transactions_count: AtomicU64, + processed_with_successful_result_count: AtomicU64, retryable_transaction_count: AtomicUsize, retryable_expired_bank_count: AtomicUsize, cost_model_throttled_transactions_count: AtomicU64, @@ -423,9 +423,9 @@ struct ConsumeWorkerCountMetrics { impl Default for ConsumeWorkerCountMetrics { fn default() -> Self { Self { - transactions_attempted_execution_count: AtomicU64::default(), - executed_transactions_count: AtomicU64::default(), - executed_with_successful_result_count: AtomicU64::default(), + transactions_attempted_processing_count: AtomicU64::default(), + processed_transactions_count: AtomicU64::default(), + processed_with_successful_result_count: AtomicU64::default(), retryable_transaction_count: AtomicUsize::default(), retryable_expired_bank_count: AtomicUsize::default(), cost_model_throttled_transactions_count: AtomicU64::default(), @@ -441,19 +441,19 @@ impl ConsumeWorkerCountMetrics { "banking_stage_worker_counts", "id" => id, ( - "transactions_attempted_execution_count", - self.transactions_attempted_execution_count + "transactions_attempted_processing_count", + self.transactions_attempted_processing_count .swap(0, Ordering::Relaxed), i64 ), ( - "executed_transactions_count", - self.executed_transactions_count.swap(0, Ordering::Relaxed), + "processed_transactions_count", + self.processed_transactions_count.swap(0, Ordering::Relaxed), i64 ), ( - "executed_with_successful_result_count", - self.executed_with_successful_result_count + "processed_with_successful_result_count", + self.processed_with_successful_result_count .swap(0, Ordering::Relaxed), i64 ), diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 9965b1c3214c3d..5d1d7c1637c40c 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -3,7 +3,7 @@ use { committer::{CommitTransactionDetails, Committer, PreBalanceInfo}, immutable_deserialized_packet::ImmutableDeserializedPacket, leader_slot_metrics::{ - LeaderSlotMetricsTracker, ProcessTransactionsCounts, ProcessTransactionsSummary, + CommittedTransactionsCounts, LeaderSlotMetricsTracker, ProcessTransactionsSummary, }, leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, qos_service::QosService, @@ -34,6 +34,7 @@ use { solana_svm::{ account_loader::{validate_fee_payer, TransactionCheckResult}, transaction_error_metrics::TransactionErrorMetrics, + transaction_processing_result::TransactionProcessingResultExtensions, transaction_processor::{ExecutionRecordingConfig, TransactionProcessingConfig}, }, solana_timings::ExecuteTimings, @@ -57,7 +58,7 @@ pub struct ProcessTransactionBatchOutput { pub struct ExecuteAndCommitTransactionsOutput { // Transactions counts reported to `ConsumeWorkerMetrics` and then // accumulated later for `LeaderSlotMetrics` - pub(crate) transaction_counts: ExecuteAndCommitTransactionsCounts, + pub(crate) transaction_counts: LeaderProcessedTransactionCounts, // Transactions that either were not executed, or were executed and failed to be committed due // to the block ending. pub(crate) retryable_transaction_indexes: Vec, @@ -71,15 +72,15 @@ pub struct ExecuteAndCommitTransactionsOutput { } #[derive(Debug, Default, PartialEq)] -pub struct ExecuteAndCommitTransactionsCounts { - // Total number of transactions that were passed as candidates for execution - pub(crate) attempted_execution_count: u64, - // The number of transactions of that were executed. See description of in `ProcessTransactionsSummary` +pub struct LeaderProcessedTransactionCounts { + // Total number of transactions that were passed as candidates for processing + pub(crate) attempted_processing_count: u64, + // The number of transactions of that were processed. See description of in `ProcessTransactionsSummary` // for possible outcomes of execution. - pub(crate) executed_count: u64, - // Total number of the executed transactions that returned success/not + pub(crate) processed_count: u64, + // Total number of the processed transactions that returned success/not // an error. - pub(crate) executed_with_successful_result_count: u64, + pub(crate) processed_with_successful_result_count: u64, } pub struct Consumer { @@ -284,7 +285,7 @@ impl Consumer { ) -> ProcessTransactionsSummary { let mut chunk_start = 0; let mut all_retryable_tx_indexes = vec![]; - let mut total_transaction_counts = ProcessTransactionsCounts::default(); + let mut total_transaction_counts = CommittedTransactionsCounts::default(); let mut total_cost_model_throttled_transactions_count: u64 = 0; let mut total_cost_model_us: u64 = 0; let mut total_execute_and_commit_timings = LeaderExecuteAndCommitTimings::default(); @@ -622,22 +623,23 @@ impl Consumer { execute_and_commit_timings.load_execute_us = load_execute_us; let LoadAndExecuteTransactionsOutput { - execution_results, - execution_counts, + processing_results, + processed_counts, } = load_and_execute_transactions_output; - let transaction_counts = ExecuteAndCommitTransactionsCounts { - executed_count: execution_counts.executed_transactions_count, - executed_with_successful_result_count: execution_counts.executed_successfully_count, - attempted_execution_count: execution_results.len() as u64, + let transaction_counts = LeaderProcessedTransactionCounts { + processed_count: processed_counts.processed_transactions_count, + processed_with_successful_result_count: processed_counts + .processed_with_successful_result_count, + attempted_processing_count: processing_results.len() as u64, }; - let (executed_transactions, execution_results_to_transactions_us) = - measure_us!(execution_results + let (processed_transactions, processing_results_to_transactions_us) = + measure_us!(processing_results .iter() .zip(batch.sanitized_transactions()) - .filter_map(|(execution_result, tx)| { - if execution_result.was_executed() { + .filter_map(|(processing_result, tx)| { + if processing_result.was_processed() { Some(tx.to_versioned_transaction()) } else { None @@ -661,7 +663,7 @@ impl Consumer { let (record_transactions_summary, record_us) = measure_us!(self .transaction_recorder - .record_transactions(bank.slot(), executed_transactions)); + .record_transactions(bank.slot(), processed_transactions)); execute_and_commit_timings.record_us = record_us; let RecordTransactionsSummary { @@ -670,13 +672,13 @@ impl Consumer { starting_transaction_index, } = record_transactions_summary; execute_and_commit_timings.record_transactions_timings = RecordTransactionsTimings { - execution_results_to_transactions_us, + processing_results_to_transactions_us, ..record_transactions_timings }; if let Err(recorder_err) = record_transactions_result { - retryable_transaction_indexes.extend(execution_results.iter().enumerate().filter_map( - |(index, execution_result)| execution_result.was_executed().then_some(index), + retryable_transaction_indexes.extend(processing_results.iter().enumerate().filter_map( + |(index, processing_result)| processing_result.was_processed().then_some(index), )); return ExecuteAndCommitTransactionsOutput { @@ -691,22 +693,22 @@ impl Consumer { } let (commit_time_us, commit_transaction_statuses) = - if execution_counts.executed_transactions_count != 0 { + if processed_counts.processed_transactions_count != 0 { self.committer.commit_transactions( batch, - execution_results, + processing_results, last_blockhash, lamports_per_signature, starting_transaction_index, bank, &mut pre_balance_info, &mut execute_and_commit_timings, - &execution_counts, + &processed_counts, ) } else { ( 0, - vec![CommitTransactionDetails::NotCommitted; execution_results.len()], + vec![CommitTransactionDetails::NotCommitted; processing_results.len()], ) }; @@ -727,7 +729,7 @@ impl Consumer { ); debug_assert_eq!( - transaction_counts.attempted_execution_count, + transaction_counts.attempted_processing_count, commit_transaction_statuses.len() as u64, ); @@ -1120,10 +1122,10 @@ mod tests { assert_eq!( transaction_counts, - ExecuteAndCommitTransactionsCounts { - attempted_execution_count: 1, - executed_count: 1, - executed_with_successful_result_count: 1, + LeaderProcessedTransactionCounts { + attempted_processing_count: 1, + processed_count: 1, + processed_with_successful_result_count: 1, } ); assert!(commit_transactions_result.is_ok()); @@ -1168,11 +1170,11 @@ mod tests { } = process_transactions_batch_output.execute_and_commit_transactions_output; assert_eq!( transaction_counts, - ExecuteAndCommitTransactionsCounts { - attempted_execution_count: 1, - // Transactions was still executed, just wasn't committed, so should be counted here. - executed_count: 1, - executed_with_successful_result_count: 1, + LeaderProcessedTransactionCounts { + attempted_processing_count: 1, + // Transaction was still processed, just wasn't committed, so should be counted here. + processed_count: 1, + processed_with_successful_result_count: 1, } ); assert_eq!(retryable_transaction_indexes, vec![0]); @@ -1309,10 +1311,10 @@ mod tests { assert_eq!( transaction_counts, - ExecuteAndCommitTransactionsCounts { - attempted_execution_count: 1, - executed_count: 1, - executed_with_successful_result_count: 0, + LeaderProcessedTransactionCounts { + attempted_processing_count: 1, + processed_count: 1, + processed_with_successful_result_count: 0, } ); assert!(commit_transactions_result.is_ok()); @@ -1415,10 +1417,10 @@ mod tests { assert_eq!( transaction_counts, - ExecuteAndCommitTransactionsCounts { - attempted_execution_count: 1, - executed_count: 0, - executed_with_successful_result_count: 0, + LeaderProcessedTransactionCounts { + attempted_processing_count: 1, + processed_count: 0, + processed_with_successful_result_count: 0, } ); assert!(retryable_transaction_indexes.is_empty()); @@ -1506,7 +1508,7 @@ mod tests { commit_transactions_result, .. } = process_transactions_batch_output.execute_and_commit_transactions_output; - assert_eq!(transaction_counts.executed_with_successful_result_count, 1); + assert_eq!(transaction_counts.processed_with_successful_result_count, 1); assert!(commit_transactions_result.is_ok()); let block_cost = get_block_cost(); @@ -1537,7 +1539,7 @@ mod tests { retryable_transaction_indexes, .. } = process_transactions_batch_output.execute_and_commit_transactions_output; - assert_eq!(transaction_counts.executed_with_successful_result_count, 1); + assert_eq!(transaction_counts.processed_with_successful_result_count, 1); assert!(commit_transactions_result.is_ok()); // first one should have been committed, second one not committed due to AccountInUse error during @@ -1664,10 +1666,10 @@ mod tests { assert_eq!( transaction_counts, - ExecuteAndCommitTransactionsCounts { - attempted_execution_count: 2, - executed_count: 1, - executed_with_successful_result_count: 1, + LeaderProcessedTransactionCounts { + attempted_processing_count: 2, + processed_count: 1, + processed_with_successful_result_count: 1, } ); assert_eq!(retryable_transaction_indexes, vec![1]); @@ -1725,13 +1727,13 @@ mod tests { assert!(!reached_max_poh_height); assert_eq!( transaction_counts, - ProcessTransactionsCounts { - attempted_execution_count: transactions_len as u64, + CommittedTransactionsCounts { + attempted_processing_count: transactions_len as u64, // Both transactions should have been committed, even though one was an error, // because InstructionErrors are committed committed_transactions_count: 2, committed_transactions_with_successful_result_count: 1, - executed_but_failed_commit: 0, + processed_but_failed_commit: 0, } ); assert_eq!( @@ -1786,11 +1788,11 @@ mod tests { assert!(!reached_max_poh_height); assert_eq!( transaction_counts, - ProcessTransactionsCounts { - attempted_execution_count: transactions_len as u64, + CommittedTransactionsCounts { + attempted_processing_count: transactions_len as u64, committed_transactions_count: 2, committed_transactions_with_successful_result_count: 2, - executed_but_failed_commit: 0, + processed_but_failed_commit: 0, } ); @@ -1862,12 +1864,12 @@ mod tests { assert!(reached_max_poh_height); assert_eq!( transaction_counts, - ProcessTransactionsCounts { - attempted_execution_count: 1, + CommittedTransactionsCounts { + attempted_processing_count: 1, // MaxHeightReached error does not commit, should be zero here committed_transactions_count: 0, committed_transactions_with_successful_result_count: 0, - executed_but_failed_commit: 1, + processed_but_failed_commit: 1, } ); diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index 4e290600a4de3c..98cf4d72f92c91 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -1,6 +1,6 @@ use { super::{ - consumer::ExecuteAndCommitTransactionsCounts, + consumer::LeaderProcessedTransactionCounts, leader_slot_timing_metrics::{LeaderExecuteAndCommitTimings, LeaderSlotTimingMetrics}, packet_deserializer::PacketReceiverStats, unprocessed_transaction_storage::{ @@ -13,15 +13,15 @@ use { std::time::Instant, }; -/// A summary of what happened to transactions passed to the execution pipeline. +/// A summary of what happened to transactions passed to the processing pipeline. /// Transactions can -/// 1) Did not even make it to execution due to being filtered out by things like AccountInUse +/// 1) Did not even make it to processing due to being filtered out by things like AccountInUse /// lock conflicts or CostModel compute limits. These types of errors are retryable and /// counted in `Self::retryable_transaction_indexes`. -/// 2) Did not execute due to some fatal error like too old, or duplicate signature. These +/// 2) Did not process due to some fatal error like too old, or duplicate signature. These /// will be dropped from the transactions queue and not counted in `Self::retryable_transaction_indexes` -/// 3) Were executed and committed, captured by `transaction_counts` below. -/// 4) Were executed and failed commit, captured by `transaction_counts` below. +/// 3) Were processed and committed, captured by `transaction_counts` below. +/// 4) Were processed and failed commit, captured by `transaction_counts` below. pub(crate) struct ProcessTransactionsSummary { /// Returns true if we hit the end of the block/max PoH height for the block /// before processing all the transactions in the batch. @@ -29,7 +29,7 @@ pub(crate) struct ProcessTransactionsSummary { /// Total transaction counts tracked for reporting `LeaderSlotMetrics`. See /// description of struct above for possible outcomes for these transactions - pub transaction_counts: ProcessTransactionsCounts, + pub transaction_counts: CommittedTransactionsCounts, /// Indexes of transactions in the transactions slice that were not /// committed but are retryable @@ -53,42 +53,42 @@ pub(crate) struct ProcessTransactionsSummary { } #[derive(Debug, Default, PartialEq)] -pub struct ProcessTransactionsCounts { - /// Total number of transactions that were passed as candidates for execution - pub attempted_execution_count: u64, +pub struct CommittedTransactionsCounts { + /// Total number of transactions that were passed as candidates for processing + pub attempted_processing_count: u64, /// Total number of transactions that made it into the block pub committed_transactions_count: u64, - /// Total number of transactions that made it into the block where the - /// transactions output from execution was success/no error. + /// Total number of transactions that made it into the block where the transactions + /// output from processing was success/no error. pub committed_transactions_with_successful_result_count: u64, - /// All transactions that were executed but then failed record because the + /// All transactions that were processed but then failed record because the /// slot ended - pub executed_but_failed_commit: u64, + pub processed_but_failed_commit: u64, } -impl ProcessTransactionsCounts { +impl CommittedTransactionsCounts { pub fn accumulate( &mut self, - transaction_counts: &ExecuteAndCommitTransactionsCounts, + transaction_counts: &LeaderProcessedTransactionCounts, committed: bool, ) { saturating_add_assign!( - self.attempted_execution_count, - transaction_counts.attempted_execution_count + self.attempted_processing_count, + transaction_counts.attempted_processing_count ); if committed { saturating_add_assign!( self.committed_transactions_count, - transaction_counts.executed_count + transaction_counts.processed_count ); saturating_add_assign!( self.committed_transactions_with_successful_result_count, - transaction_counts.executed_with_successful_result_count + transaction_counts.processed_with_successful_result_count ); } else { saturating_add_assign!( - self.executed_but_failed_commit, - transaction_counts.executed_count + self.processed_but_failed_commit, + transaction_counts.processed_count ); } } @@ -173,10 +173,10 @@ struct LeaderSlotPacketCountMetrics { // duplicate signature checks retryable_packets_filtered_count: u64, - // total number of transactions that attempted execution in this slot. Should equal the sum + // total number of transactions that attempted processing in this slot. Should equal the sum // of `committed_transactions_count`, `retryable_errored_transaction_count`, and // `nonretryable_errored_transactions_count`. - transactions_attempted_execution_count: u64, + transactions_attempted_processing_count: u64, // total number of transactions that were executed and committed into the block // on this thread @@ -305,8 +305,8 @@ impl LeaderSlotPacketCountMetrics { i64 ), ( - "transactions_attempted_execution_count", - self.transactions_attempted_execution_count, + "transactions_attempted_processing_count", + self.transactions_attempted_processing_count, i64 ), ( @@ -607,8 +607,8 @@ impl LeaderSlotMetricsTracker { saturating_add_assign!( leader_slot_metrics .packet_count_metrics - .transactions_attempted_execution_count, - transaction_counts.attempted_execution_count + .transactions_attempted_processing_count, + transaction_counts.attempted_processing_count ); saturating_add_assign!( @@ -629,7 +629,7 @@ impl LeaderSlotMetricsTracker { leader_slot_metrics .packet_count_metrics .executed_transactions_failed_commit_count, - transaction_counts.executed_but_failed_commit + transaction_counts.processed_but_failed_commit ); saturating_add_assign!( @@ -644,7 +644,7 @@ impl LeaderSlotMetricsTracker { .packet_count_metrics .nonretryable_errored_transactions_count, transaction_counts - .attempted_execution_count + .attempted_processing_count .saturating_sub(transaction_counts.committed_transactions_count) .saturating_sub(retryable_transaction_indexes.len() as u64) ); diff --git a/core/src/banking_stage/leader_slot_timing_metrics.rs b/core/src/banking_stage/leader_slot_timing_metrics.rs index 6dbd697a956010..0de9296ce91aac 100644 --- a/core/src/banking_stage/leader_slot_timing_metrics.rs +++ b/core/src/banking_stage/leader_slot_timing_metrics.rs @@ -55,9 +55,9 @@ impl LeaderExecuteAndCommitTimings { "id" => id, ("slot", slot as i64, i64), ( - "execution_results_to_transactions_us", + "processing_results_to_transactions_us", self.record_transactions_timings - .execution_results_to_transactions_us as i64, + .processing_results_to_transactions_us as i64, i64 ), ( diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 24ff5ce8fb7baa..8d8211e02a07c1 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -57,7 +57,7 @@ use { }, }, solana_svm::{ - transaction_commit_result::TransactionCommitResult, + transaction_commit_result::{TransactionCommitResult, TransactionCommitResultExtensions}, transaction_processor::ExecutionRecordingConfig, }, solana_timings::{report_execute_timings, ExecuteTimingType, ExecuteTimings}, @@ -190,7 +190,7 @@ pub fn execute_batch( let committed_transactions = commit_results .iter() .zip(batch.sanitized_transactions()) - .filter_map(|(commit_result, tx)| commit_result.is_ok().then_some(tx)) + .filter_map(|(commit_result, tx)| commit_result.was_committed().then_some(tx)) .collect_vec(); let first_err = get_first_error(batch, &commit_results); diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 4bbd0ad3eb8214..5c6f5d26c7f4f5 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -113,7 +113,7 @@ impl Record { #[derive(Default, Debug)] pub struct RecordTransactionsTimings { - pub execution_results_to_transactions_us: u64, + pub processing_results_to_transactions_us: u64, pub hash_us: u64, pub poh_record_us: u64, } @@ -121,8 +121,8 @@ pub struct RecordTransactionsTimings { impl RecordTransactionsTimings { pub fn accumulate(&mut self, other: &RecordTransactionsTimings) { saturating_add_assign!( - self.execution_results_to_transactions_us, - other.execution_results_to_transactions_us + self.processing_results_to_transactions_us, + other.processing_results_to_transactions_us ); saturating_add_assign!(self.hash_us, other.hash_us); saturating_add_assign!(self.poh_record_us, other.poh_record_us); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 70fe81e9326063..6b93ec643ee031 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -155,9 +155,12 @@ use { transaction_commit_result::{CommittedTransaction, TransactionCommitResult}, transaction_error_metrics::TransactionErrorMetrics, transaction_execution_result::{ - TransactionExecutionDetails, TransactionExecutionResult, TransactionLoadedAccountsStats, + TransactionExecutionDetails, TransactionLoadedAccountsStats, }, transaction_processing_callback::TransactionProcessingCallback, + transaction_processing_result::{ + TransactionProcessingResult, TransactionProcessingResultExtensions, + }, transaction_processor::{ ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingConfig, TransactionProcessingEnvironment, @@ -315,12 +318,12 @@ impl BankRc { } pub struct LoadAndExecuteTransactionsOutput { - // Vector of results indicating whether a transaction was executed or could not - // be executed. Note executed transactions can still have failed! - pub execution_results: Vec, - // Executed transaction counts used to update bank transaction counts and + // Vector of results indicating whether a transaction was processed or could not + // be processed. Note processed transactions can still have failed! + pub processing_results: Vec, + // Processed transaction counts used to update bank transaction counts and // for metrics reporting. - pub execution_counts: ExecutedTransactionCounts, + pub processed_counts: ProcessedTransactionCounts, } pub struct TransactionSimulationResult { @@ -883,10 +886,10 @@ struct PrevEpochInflationRewards { } #[derive(Debug, Default, PartialEq)] -pub struct ExecutedTransactionCounts { - pub executed_transactions_count: u64, - pub executed_successfully_count: u64, - pub executed_non_vote_transactions_count: u64, +pub struct ProcessedTransactionCounts { + pub processed_transactions_count: u64, + pub processed_non_vote_transactions_count: u64, + pub processed_with_successful_result_count: u64, pub signature_count: u64, } @@ -3082,12 +3085,13 @@ impl Bank { fn update_transaction_statuses( &self, sanitized_txs: &[SanitizedTransaction], - execution_results: &[TransactionExecutionResult], + processing_results: &[TransactionProcessingResult], ) { let mut status_cache = self.status_cache.write().unwrap(); - assert_eq!(sanitized_txs.len(), execution_results.len()); - for (tx, execution_result) in sanitized_txs.iter().zip(execution_results) { - if let Some(details) = execution_result.details() { + assert_eq!(sanitized_txs.len(), processing_results.len()); + for (tx, processing_result) in sanitized_txs.iter().zip(processing_results) { + if let Ok(processed_tx) = &processing_result { + let details = &processed_tx.execution_details; // Add the message hash to the status cache to ensure that this message // won't be processed again with a different signature. status_cache.insert( @@ -3315,7 +3319,7 @@ impl Bank { let mut timings = ExecuteTimings::default(); let LoadAndExecuteTransactionsOutput { - mut execution_results, + mut processing_results, .. } = self.load_and_execute_transactions( &batch, @@ -3352,18 +3356,15 @@ impl Bank { debug!("simulate_transaction: {:?}", timings); - let execution_result = - execution_results - .pop() - .unwrap_or(TransactionExecutionResult::NotExecuted( - TransactionError::InvalidProgramForExecution, - )); - let flattened_result = execution_result.flattened_result(); + let processing_result = processing_results + .pop() + .unwrap_or(Err(TransactionError::InvalidProgramForExecution)); + let flattened_result = processing_result.flattened_result(); let (post_simulation_accounts, logs, return_data, inner_instructions) = - match execution_result { - TransactionExecutionResult::Executed(executed_tx) => { - let details = executed_tx.execution_details; - let post_simulation_accounts = executed_tx + match processing_result { + Ok(processed_tx) => { + let details = processed_tx.execution_details; + let post_simulation_accounts = processed_tx .loaded_transaction .accounts .into_iter() @@ -3376,7 +3377,7 @@ impl Bank { details.inner_instructions, ) } - TransactionExecutionResult::NotExecuted(_) => (vec![], None, None, None), + Err(_) => (vec![], None, None, None), }; let logs = logs.unwrap_or_default(); @@ -3491,39 +3492,43 @@ impl Bank { timings.accumulate(&sanitized_output.execute_timings); let ((), collect_logs_us) = - measure_us!(self.collect_logs(sanitized_txs, &sanitized_output.execution_results)); + measure_us!(self.collect_logs(sanitized_txs, &sanitized_output.processing_results)); timings.saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_us); - let mut execution_counts = ExecutedTransactionCounts::default(); + let mut processed_counts = ProcessedTransactionCounts::default(); let err_count = &mut error_counters.total; - for (execution_result, tx) in sanitized_output.execution_results.iter().zip(sanitized_txs) { + for (processing_result, tx) in sanitized_output + .processing_results + .iter() + .zip(sanitized_txs) + { if let Some(debug_keys) = &self.transaction_debug_keys { for key in tx.message().account_keys().iter() { if debug_keys.contains(key) { - let result = execution_result.flattened_result(); + let result = processing_result.flattened_result(); info!("slot: {} result: {:?} tx: {:?}", self.slot, result, tx); break; } } } - if execution_result.was_executed() { + if processing_result.was_processed() { // Signature count must be accumulated only if the transaction - // is executed, otherwise a mismatched count between banking and - // replay could occur - execution_counts.signature_count += + // is processed, otherwise a mismatched count between banking + // and replay could occur + processed_counts.signature_count += u64::from(tx.message().header().num_required_signatures); - execution_counts.executed_transactions_count += 1; + processed_counts.processed_transactions_count += 1; if !tx.is_simple_vote_transaction() { - execution_counts.executed_non_vote_transactions_count += 1; + processed_counts.processed_non_vote_transactions_count += 1; } } - match execution_result.flattened_result() { + match processing_result.flattened_result() { Ok(()) => { - execution_counts.executed_successfully_count += 1; + processed_counts.processed_with_successful_result_count += 1; } Err(err) => { if *err_count == 0 { @@ -3535,15 +3540,15 @@ impl Bank { } LoadAndExecuteTransactionsOutput { - execution_results: sanitized_output.execution_results, - execution_counts, + processing_results: sanitized_output.processing_results, + processed_counts, } } fn collect_logs( &self, transactions: &[SanitizedTransaction], - execution_results: &[TransactionExecutionResult], + processing_results: &[TransactionProcessingResult], ) { let transaction_log_collector_config = self.transaction_log_collector_config.read().unwrap(); @@ -3551,12 +3556,13 @@ impl Bank { return; } - let collected_logs: Vec<_> = execution_results + let collected_logs: Vec<_> = processing_results .iter() .zip(transactions) - .filter_map(|(execution_result, transaction)| { + .filter_map(|(processing_result, transaction)| { // Skip log collection for unprocessed transactions - let execution_details = execution_result.details()?; + let processed_tx = processing_result.processed_transaction()?; + let execution_details = &processed_tx.execution_details; Self::collect_transaction_logs( &transaction_log_collector_config, transaction, @@ -3698,17 +3704,17 @@ impl Bank { fn filter_program_errors_and_collect_fee( &self, - execution_results: &[TransactionExecutionResult], + processing_results: &[TransactionProcessingResult], ) { let mut fees = 0; - execution_results + processing_results .iter() - .for_each(|execution_result| match execution_result { - TransactionExecutionResult::Executed(executed_tx) => { - fees += executed_tx.loaded_transaction.fee_details.total_fee(); + .for_each(|processing_result| match processing_result { + Ok(processed_tx) => { + fees += processed_tx.loaded_transaction.fee_details.total_fee(); } - TransactionExecutionResult::NotExecuted(_) => {} + Err(_) => {} }); self.collector_fees.fetch_add(fees, Relaxed); @@ -3717,17 +3723,18 @@ impl Bank { // Note: this function is not yet used; next PR will call it behind a feature gate fn filter_program_errors_and_collect_fee_details( &self, - execution_results: &[TransactionExecutionResult], + processing_results: &[TransactionProcessingResult], ) { let mut accumulated_fee_details = FeeDetails::default(); - execution_results + processing_results .iter() - .for_each(|execution_result| match execution_result { - TransactionExecutionResult::Executed(executed_tx) => { - accumulated_fee_details.accumulate(&executed_tx.loaded_transaction.fee_details); + .for_each(|processing_result| match processing_result { + Ok(processed_tx) => { + accumulated_fee_details + .accumulate(&processed_tx.loaded_transaction.fee_details); } - TransactionExecutionResult::NotExecuted(_) => {} + Err(_) => {} }); self.collector_fee_details @@ -3739,10 +3746,10 @@ impl Bank { pub fn commit_transactions( &self, sanitized_txs: &[SanitizedTransaction], - mut execution_results: Vec, + mut processing_results: Vec, last_blockhash: Hash, lamports_per_signature: u64, - execution_counts: &ExecutedTransactionCounts, + processed_counts: &ProcessedTransactionCounts, timings: &mut ExecuteTimings, ) -> Vec { assert!( @@ -3750,39 +3757,36 @@ impl Bank { "commit_transactions() working on a bank that is already frozen or is undergoing freezing!" ); - let ExecutedTransactionCounts { - executed_transactions_count, - executed_non_vote_transactions_count, - executed_successfully_count, + let ProcessedTransactionCounts { + processed_transactions_count, + processed_non_vote_transactions_count, + processed_with_successful_result_count, signature_count, - } = *execution_counts; + } = *processed_counts; - self.increment_transaction_count(executed_transactions_count); + self.increment_transaction_count(processed_transactions_count); self.increment_non_vote_transaction_count_since_restart( - executed_non_vote_transactions_count, + processed_non_vote_transactions_count, ); self.increment_signature_count(signature_count); - let executed_with_failure_result_count = - executed_transactions_count.saturating_sub(executed_successfully_count); - if executed_with_failure_result_count > 0 { - self.transaction_error_count - .fetch_add(executed_with_failure_result_count, Relaxed); - } + let processed_with_failure_result_count = + processed_transactions_count.saturating_sub(processed_with_successful_result_count); + self.transaction_error_count + .fetch_add(processed_with_failure_result_count, Relaxed); - // Should be equivalent to checking `executed_transactions_count > 0` - if execution_results.iter().any(|result| result.was_executed()) { + if processed_transactions_count > 0 { self.is_delta.store(true, Relaxed); self.transaction_entries_count.fetch_add(1, Relaxed); self.transactions_per_entry_max - .fetch_max(executed_transactions_count, Relaxed); + .fetch_max(processed_transactions_count, Relaxed); } let ((), store_accounts_us) = measure_us!({ let durable_nonce = DurableNonce::from_blockhash(&last_blockhash); let (accounts_to_store, transactions) = collect_accounts_to_store( sanitized_txs, - &mut execution_results, + &mut processing_results, &durable_nonce, lamports_per_signature, ); @@ -3791,17 +3795,17 @@ impl Bank { .store_cached((self.slot(), accounts_to_store.as_slice()), &transactions); }); - self.collect_rent(&execution_results); + self.collect_rent(&processing_results); // Cached vote and stake accounts are synchronized with accounts-db // after each transaction. let ((), update_stakes_cache_us) = - measure_us!(self.update_stakes_cache(sanitized_txs, &execution_results)); + measure_us!(self.update_stakes_cache(sanitized_txs, &processing_results)); let ((), update_executors_us) = measure_us!({ let mut cache = None; - for execution_result in &execution_results { - if let TransactionExecutionResult::Executed(executed_tx) = execution_result { + for processing_result in &processing_results { + if let Some(executed_tx) = processing_result.processed_transaction() { let programs_modified_by_tx = &executed_tx.programs_modified_by_tx; if executed_tx.was_successful() && !programs_modified_by_tx.is_empty() { cache @@ -3814,9 +3818,10 @@ impl Bank { } }); - let accounts_data_len_delta = execution_results + let accounts_data_len_delta = processing_results .iter() - .filter_map(TransactionExecutionResult::details) + .filter_map(|processing_result| processing_result.processed_transaction()) + .map(|processed_tx| &processed_tx.execution_details) .filter_map(|details| { details .status @@ -3827,12 +3832,12 @@ impl Bank { self.update_accounts_data_size_delta_on_chain(accounts_data_len_delta); let ((), update_transaction_statuses_us) = - measure_us!(self.update_transaction_statuses(sanitized_txs, &execution_results)); + measure_us!(self.update_transaction_statuses(sanitized_txs, &processing_results)); if self.feature_set.is_active(&reward_full_priority_fee::id()) { - self.filter_program_errors_and_collect_fee_details(&execution_results) + self.filter_program_errors_and_collect_fee_details(&processing_results) } else { - self.filter_program_errors_and_collect_fee(&execution_results) + self.filter_program_errors_and_collect_fee(&processing_results) }; timings.saturating_add_in_place(ExecuteTimingType::StoreUs, store_accounts_us); @@ -3846,45 +3851,45 @@ impl Bank { update_transaction_statuses_us, ); - Self::create_commit_results(execution_results) + Self::create_commit_results(processing_results) } fn create_commit_results( - execution_results: Vec, + processing_results: Vec, ) -> Vec { - execution_results + processing_results .into_iter() - .map(|execution_result| match execution_result { - TransactionExecutionResult::Executed(executed_tx) => { - let loaded_tx = &executed_tx.loaded_transaction; + .map(|processing_result| match processing_result { + Ok(processed_tx) => { + let loaded_tx = &processed_tx.loaded_transaction; let loaded_account_stats = TransactionLoadedAccountsStats { loaded_accounts_data_size: loaded_tx.loaded_accounts_data_size, loaded_accounts_count: loaded_tx.accounts.len(), }; // Rent is only collected for successfully executed transactions - let rent_debits = if executed_tx.was_successful() { - executed_tx.loaded_transaction.rent_debits + let rent_debits = if processed_tx.was_successful() { + processed_tx.loaded_transaction.rent_debits } else { RentDebits::default() }; Ok(CommittedTransaction { loaded_account_stats, - execution_details: executed_tx.execution_details, - fee_details: executed_tx.loaded_transaction.fee_details, + execution_details: processed_tx.execution_details, + fee_details: processed_tx.loaded_transaction.fee_details, rent_debits, }) } - TransactionExecutionResult::NotExecuted(err) => Err(err), + Err(err) => Err(err), }) .collect() } - fn collect_rent(&self, execution_results: &[TransactionExecutionResult]) { - let collected_rent = execution_results + fn collect_rent(&self, processing_results: &[TransactionProcessingResult]) { + let collected_rent = processing_results .iter() - .filter_map(|executed_result| executed_result.executed_transaction()) + .filter_map(|processing_result| processing_result.processed_transaction()) .filter(|executed_tx| executed_tx.was_successful()) .map(|executed_tx| executed_tx.loaded_transaction.rent) .sum(); @@ -4557,8 +4562,8 @@ impl Bank { }; let LoadAndExecuteTransactionsOutput { - execution_results, - execution_counts, + processing_results, + processed_counts, } = self.load_and_execute_transactions( batch, max_age, @@ -4579,10 +4584,10 @@ impl Bank { self.last_blockhash_and_lamports_per_signature(); let commit_results = self.commit_transactions( batch.sanitized_transactions(), - execution_results, + processing_results, last_blockhash, lamports_per_signature, - &execution_counts, + &processed_counts, timings, ); let post_balances = if collect_balances { @@ -5822,16 +5827,16 @@ impl Bank { fn update_stakes_cache( &self, txs: &[SanitizedTransaction], - execution_results: &[TransactionExecutionResult], + processing_results: &[TransactionProcessingResult], ) { - debug_assert_eq!(txs.len(), execution_results.len()); + debug_assert_eq!(txs.len(), processing_results.len()); let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); txs.iter() - .zip(execution_results) - .filter_map(|(tx, execution_result)| { - execution_result - .executed_transaction() - .map(|executed_tx| (tx, executed_tx)) + .zip(processing_results) + .filter_map(|(tx, processing_result)| { + processing_result + .processed_transaction() + .map(|processed_tx| (tx, processed_tx)) }) .filter(|(_, executed_tx)| executed_tx.was_successful()) .flat_map(|(tx, executed_tx)| { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 288d0b8bb23a1b..a46ae1f8578342 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -232,8 +232,11 @@ fn test_race_register_tick_freeze() { } } -fn new_execution_result(status: Result<()>, fee_details: FeeDetails) -> TransactionExecutionResult { - TransactionExecutionResult::Executed(Box::new(ExecutedTransaction { +fn new_processing_result( + status: Result<()>, + fee_details: FeeDetails, +) -> TransactionProcessingResult { + Ok(ExecutedTransaction { loaded_transaction: LoadedTransaction { fee_details, ..LoadedTransaction::default() @@ -247,7 +250,7 @@ fn new_execution_result(status: Result<()>, fee_details: FeeDetails) -> Transact accounts_data_len_delta: 0, }, programs_modified_by_tx: HashMap::new(), - })) + }) } impl Bank { @@ -2874,9 +2877,9 @@ fn test_filter_program_errors_and_collect_fee() { let tx_fee = 42; let fee_details = FeeDetails::new(tx_fee, 0, false); - let execution_results = vec![ - new_execution_result(Ok(()), fee_details), - new_execution_result( + let processing_results = vec![ + new_processing_result(Ok(()), fee_details), + new_processing_result( Err(TransactionError::InstructionError( 1, SystemError::ResultWithNegativeLamports.into(), @@ -2886,7 +2889,7 @@ fn test_filter_program_errors_and_collect_fee() { ]; let initial_balance = bank.get_balance(&leader); - bank.filter_program_errors_and_collect_fee(&execution_results); + bank.filter_program_errors_and_collect_fee(&processing_results); bank.freeze(); assert_eq!( bank.get_balance(&leader), @@ -2905,9 +2908,9 @@ fn test_filter_program_errors_and_collect_priority_fee() { let priority_fee = 42; let fee_details: FeeDetails = FeeDetails::new(0, priority_fee, false); - let execution_results = vec![ - new_execution_result(Ok(()), fee_details), - new_execution_result( + let processing_results = vec![ + new_processing_result(Ok(()), fee_details), + new_processing_result( Err(TransactionError::InstructionError( 1, SystemError::ResultWithNegativeLamports.into(), @@ -2917,7 +2920,7 @@ fn test_filter_program_errors_and_collect_priority_fee() { ]; let initial_balance = bank.get_balance(&leader); - bank.filter_program_errors_and_collect_fee(&execution_results); + bank.filter_program_errors_and_collect_fee(&processing_results); bank.freeze(); assert_eq!( bank.get_balance(&leader), @@ -12800,9 +12803,9 @@ fn test_filter_program_errors_and_collect_fee_details() { let bank = Bank::new_for_tests(&genesis_config); let results = vec![ - TransactionExecutionResult::NotExecuted(TransactionError::AccountNotFound), - new_execution_result(Ok(()), tx_fee_details), - new_execution_result( + Err(TransactionError::AccountNotFound), + new_processing_result(Ok(()), tx_fee_details), + new_processing_result( Err(TransactionError::InstructionError( 0, SystemError::ResultWithNegativeLamports.into(), diff --git a/svm/doc/spec.md b/svm/doc/spec.md index a851b051bb909a..c40a928e4c4e4d 100644 --- a/svm/doc/spec.md +++ b/svm/doc/spec.md @@ -206,9 +206,9 @@ The output of the transaction batch processor's - `error_metrics`: Error metrics for transactions that were processed. - `execute_timings`: Timings for transaction batch execution. -- `execution_results`: Vector of results indicating whether a transaction was - executed or could not be executed. Note executed transactions can still have - failed! +- `processing_results`: Vector of results indicating whether a transaction was + processed or could not be processed for some reason. Note that processed + transactions can still have failed! # Functional Model diff --git a/svm/examples/paytube/src/settler.rs b/svm/examples/paytube/src/settler.rs index 5db63c4e675809..8923512ecd6915 100644 --- a/svm/examples/paytube/src/settler.rs +++ b/svm/examples/paytube/src/settler.rs @@ -18,7 +18,10 @@ use { pubkey::Pubkey, signature::Keypair, signer::Signer, system_instruction, transaction::Transaction as SolanaTransaction, }, - solana_svm::transaction_processor::LoadAndExecuteSanitizedTransactionsOutput, + solana_svm::{ + transaction_processing_result::TransactionProcessingResultExtensions, + transaction_processor::LoadAndExecuteSanitizedTransactionsOutput, + }, spl_associated_token_account::get_associated_token_address, std::collections::HashMap, }; @@ -61,11 +64,11 @@ impl Ledger { let mut ledger: HashMap = HashMap::new(); paytube_transactions .iter() - .zip(svm_output.execution_results) + .zip(svm_output.processing_results) .for_each(|(transaction, result)| { // Only append to the ledger if the PayTube transaction was // successful. - if result.was_executed_successfully() { + if result.was_processed_with_successful_result() { let mint = transaction.mint; let mut keys = [transaction.from, transaction.to]; keys.sort(); diff --git a/svm/src/account_saver.rs b/svm/src/account_saver.rs index 4ba2ec259fd87b..2657e7a7cb9717 100644 --- a/svm/src/account_saver.rs +++ b/svm/src/account_saver.rs @@ -1,7 +1,9 @@ use { crate::{ rollback_accounts::RollbackAccounts, - transaction_execution_result::TransactionExecutionResult, + transaction_processing_result::{ + TransactionProcessingResult, TransactionProcessingResultExtensions, + }, }, solana_sdk::{ account::AccountSharedData, nonce::state::DurableNonce, pubkey::Pubkey, @@ -14,20 +16,20 @@ use { // optimization edge cases where some write locked accounts have skip storage. fn max_number_of_accounts_to_collect( txs: &[SanitizedTransaction], - execution_results: &[TransactionExecutionResult], + processing_results: &[TransactionProcessingResult], ) -> usize { - execution_results + processing_results .iter() .zip(txs) - .filter_map(|(execution_result, tx)| { - execution_result - .executed_transaction() - .map(|executed_tx| (executed_tx, tx)) + .filter_map(|(processing_result, tx)| { + processing_result + .processed_transaction() + .map(|processed_tx| (processed_tx, tx)) }) .map( - |(executed_tx, tx)| match executed_tx.execution_details.status { + |(processed_tx, tx)| match processed_tx.execution_details.status { Ok(_) => tx.message().num_write_locks() as usize, - Err(_) => executed_tx.loaded_transaction.rollback_accounts.count(), + Err(_) => processed_tx.loaded_transaction.rollback_accounts.count(), }, ) .sum() @@ -35,35 +37,35 @@ fn max_number_of_accounts_to_collect( pub fn collect_accounts_to_store<'a>( txs: &'a [SanitizedTransaction], - execution_results: &'a mut [TransactionExecutionResult], + processing_results: &'a mut [TransactionProcessingResult], durable_nonce: &DurableNonce, lamports_per_signature: u64, ) -> ( Vec<(&'a Pubkey, &'a AccountSharedData)>, Vec>, ) { - let collect_capacity = max_number_of_accounts_to_collect(txs, execution_results); + let collect_capacity = max_number_of_accounts_to_collect(txs, processing_results); let mut accounts = Vec::with_capacity(collect_capacity); let mut transactions = Vec::with_capacity(collect_capacity); - for (execution_result, tx) in execution_results.iter_mut().zip(txs) { - let Some(executed_tx) = execution_result.executed_transaction_mut() else { + for (processing_result, transaction) in processing_results.iter_mut().zip(txs) { + let Some(processed_tx) = processing_result.processed_transaction_mut() else { // Don't store any accounts if tx wasn't executed continue; }; - if executed_tx.execution_details.status.is_ok() { + if processed_tx.execution_details.status.is_ok() { collect_accounts_for_successful_tx( &mut accounts, &mut transactions, - tx, - &executed_tx.loaded_transaction.accounts, + transaction, + &processed_tx.loaded_transaction.accounts, ); } else { collect_accounts_for_failed_tx( &mut accounts, &mut transactions, - tx, - &mut executed_tx.loaded_transaction.rollback_accounts, + transaction, + &mut processed_tx.loaded_transaction.rollback_accounts, durable_nonce, lamports_per_signature, ); @@ -180,11 +182,11 @@ mod tests { )) } - fn new_execution_result( + fn new_processing_result( status: Result<()>, loaded_transaction: LoadedTransaction, - ) -> TransactionExecutionResult { - TransactionExecutionResult::Executed(Box::new(ExecutedTransaction { + ) -> TransactionProcessingResult { + Ok(ExecutedTransaction { execution_details: TransactionExecutionDetails { status, log_messages: None, @@ -195,7 +197,7 @@ mod tests { }, loaded_transaction, programs_modified_by_tx: HashMap::new(), - })) + }) } #[test] @@ -260,14 +262,14 @@ mod tests { }; let txs = vec![tx0.clone(), tx1.clone()]; - let mut execution_results = vec![ - new_execution_result(Ok(()), loaded0), - new_execution_result(Ok(()), loaded1), + let mut processing_results = vec![ + new_processing_result(Ok(()), loaded0), + new_processing_result(Ok(()), loaded1), ]; - let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &execution_results); + let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 2); let (collected_accounts, transactions) = - collect_accounts_to_store(&txs, &mut execution_results, &DurableNonce::default(), 0); + collect_accounts_to_store(&txs, &mut processing_results, &DurableNonce::default(), 0); assert_eq!(collected_accounts.len(), 2); assert!(collected_accounts .iter() @@ -314,18 +316,18 @@ mod tests { }; let txs = vec![tx]; - let mut execution_results = vec![new_execution_result( + let mut processing_results = vec![new_processing_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, )), loaded, )]; - let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &execution_results); + let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 1); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let (collected_accounts, _) = - collect_accounts_to_store(&txs, &mut execution_results, &durable_nonce, 0); + collect_accounts_to_store(&txs, &mut processing_results, &durable_nonce, 0); assert_eq!(collected_accounts.len(), 1); assert_eq!( collected_accounts @@ -400,17 +402,17 @@ mod tests { let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let txs = vec![tx]; - let mut execution_results = vec![new_execution_result( + let mut processing_results = vec![new_processing_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, )), loaded, )]; - let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &execution_results); + let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 2); let (collected_accounts, _) = - collect_accounts_to_store(&txs, &mut execution_results, &durable_nonce, 0); + collect_accounts_to_store(&txs, &mut processing_results, &durable_nonce, 0); assert_eq!(collected_accounts.len(), 2); assert_eq!( collected_accounts @@ -498,17 +500,17 @@ mod tests { let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let txs = vec![tx]; - let mut execution_results = vec![new_execution_result( + let mut processing_results = vec![new_processing_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, )), loaded, )]; - let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &execution_results); + let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 1); let (collected_accounts, _) = - collect_accounts_to_store(&txs, &mut execution_results, &durable_nonce, 0); + collect_accounts_to_store(&txs, &mut processing_results, &durable_nonce, 0); assert_eq!(collected_accounts.len(), 1); let collected_nonce_account = collected_accounts .iter() diff --git a/svm/src/lib.rs b/svm/src/lib.rs index cbfef2305e41f9..b031ce7d6e1c53 100644 --- a/svm/src/lib.rs +++ b/svm/src/lib.rs @@ -15,6 +15,7 @@ pub mod transaction_commit_result; pub mod transaction_error_metrics; pub mod transaction_execution_result; pub mod transaction_processing_callback; +pub mod transaction_processing_result; pub mod transaction_processor; #[macro_use] diff --git a/svm/src/transaction_commit_result.rs b/svm/src/transaction_commit_result.rs index 5cc413d7b175f9..53590c0c5d2b50 100644 --- a/svm/src/transaction_commit_result.rs +++ b/svm/src/transaction_commit_result.rs @@ -18,13 +18,13 @@ pub struct CommittedTransaction { } pub trait TransactionCommitResultExtensions { - fn was_executed(&self) -> bool; + fn was_committed(&self) -> bool; fn was_executed_successfully(&self) -> bool; fn transaction_result(&self) -> TransactionResult<()>; } impl TransactionCommitResultExtensions for TransactionCommitResult { - fn was_executed(&self) -> bool { + fn was_committed(&self) -> bool { self.is_ok() } diff --git a/svm/src/transaction_execution_result.rs b/svm/src/transaction_execution_result.rs index a7c965fbed01bd..2ac684d4cc219c 100644 --- a/svm/src/transaction_execution_result.rs +++ b/svm/src/transaction_execution_result.rs @@ -51,44 +51,6 @@ impl ExecutedTransaction { } } -impl TransactionExecutionResult { - pub fn was_executed_successfully(&self) -> bool { - self.executed_transaction() - .map(|executed_tx| executed_tx.was_successful()) - .unwrap_or(false) - } - - pub fn was_executed(&self) -> bool { - self.executed_transaction().is_some() - } - - pub fn details(&self) -> Option<&TransactionExecutionDetails> { - self.executed_transaction() - .map(|executed_tx| &executed_tx.execution_details) - } - - pub fn flattened_result(&self) -> transaction::Result<()> { - match self { - Self::Executed(executed_tx) => executed_tx.execution_details.status.clone(), - Self::NotExecuted(err) => Err(err.clone()), - } - } - - pub fn executed_transaction(&self) -> Option<&ExecutedTransaction> { - match self { - Self::Executed(executed_tx) => Some(executed_tx.as_ref()), - Self::NotExecuted { .. } => None, - } - } - - pub fn executed_transaction_mut(&mut self) -> Option<&mut ExecutedTransaction> { - match self { - Self::Executed(executed_tx) => Some(executed_tx.as_mut()), - Self::NotExecuted { .. } => None, - } - } -} - #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct TransactionExecutionDetails { pub status: transaction::Result<()>, diff --git a/svm/src/transaction_processing_result.rs b/svm/src/transaction_processing_result.rs new file mode 100644 index 00000000000000..0ad68e0d18a803 --- /dev/null +++ b/svm/src/transaction_processing_result.rs @@ -0,0 +1,48 @@ +use { + crate::transaction_execution_result::ExecutedTransaction, + solana_sdk::transaction::Result as TransactionResult, +}; + +pub type TransactionProcessingResult = TransactionResult; +pub type ProcessedTransaction = ExecutedTransaction; + +pub trait TransactionProcessingResultExtensions { + fn was_processed(&self) -> bool; + fn was_processed_with_successful_result(&self) -> bool; + fn processed_transaction(&self) -> Option<&ProcessedTransaction>; + fn processed_transaction_mut(&mut self) -> Option<&mut ProcessedTransaction>; + fn flattened_result(&self) -> TransactionResult<()>; +} + +impl TransactionProcessingResultExtensions for TransactionProcessingResult { + fn was_processed(&self) -> bool { + self.is_ok() + } + + fn was_processed_with_successful_result(&self) -> bool { + match self { + Ok(processed_tx) => processed_tx.was_successful(), + Err(_) => false, + } + } + + fn processed_transaction(&self) -> Option<&ProcessedTransaction> { + match self { + Ok(processed_tx) => Some(processed_tx), + Err(_) => None, + } + } + + fn processed_transaction_mut(&mut self) -> Option<&mut ProcessedTransaction> { + match self { + Ok(processed_tx) => Some(processed_tx), + Err(_) => None, + } + } + + fn flattened_result(&self) -> TransactionResult<()> { + self.as_ref() + .map_err(|err| err.clone()) + .and_then(|processed_tx| processed_tx.execution_details.status.clone()) + } +} diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index fa2bd505948a24..54b1abb6740661 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -13,10 +13,9 @@ use { rollback_accounts::RollbackAccounts, transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, - transaction_execution_result::{ - ExecutedTransaction, TransactionExecutionDetails, TransactionExecutionResult, - }, + transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, transaction_processing_callback::TransactionProcessingCallback, + transaction_processing_result::TransactionProcessingResult, }, log::debug, percentage::Percentage, @@ -69,9 +68,10 @@ pub struct LoadAndExecuteSanitizedTransactionsOutput { pub error_metrics: TransactionErrorMetrics, /// Timings for transaction batch execution. pub execute_timings: ExecuteTimings, - // Vector of results indicating whether a transaction was executed or could not - // be executed. Note executed transactions can still have failed! - pub execution_results: Vec, + /// Vector of results indicating whether a transaction was processed or + /// could not be processed. Note processed transactions can still have a + /// failure result meaning that the transaction will be rolled back. + pub processing_results: Vec, } /// Configuration of the recording capabilities for transaction execution @@ -269,12 +269,11 @@ impl TransactionBatchProcessor { if program_cache_for_tx_batch.hit_max_limit { const ERROR: TransactionError = TransactionError::ProgramCacheHitMaxLimit; - let execution_results = - vec![TransactionExecutionResult::NotExecuted(ERROR); sanitized_txs.len()]; + let processing_results = vec![Err(ERROR); sanitized_txs.len()]; return LoadAndExecuteSanitizedTransactionsOutput { error_metrics, execute_timings, - execution_results, + processing_results, }; } @@ -294,12 +293,12 @@ impl TransactionBatchProcessor { &program_cache_for_tx_batch, )); - let (execution_results, execution_us): (Vec, u64) = + let (processing_results, execution_us): (Vec, u64) = measure_us!(loaded_transactions .into_iter() .zip(sanitized_txs.iter()) .map(|(load_result, tx)| match load_result { - Err(e) => TransactionExecutionResult::NotExecuted(e.clone()), + Err(e) => Err(e.clone()), Ok(loaded_transaction) => { let executed_tx = self.execute_loaded_transaction( tx, @@ -317,7 +316,7 @@ impl TransactionBatchProcessor { program_cache_for_tx_batch.merge(&executed_tx.programs_modified_by_tx); } - TransactionExecutionResult::Executed(Box::new(executed_tx)) + Ok(executed_tx) } }) .collect()); @@ -354,7 +353,7 @@ impl TransactionBatchProcessor { LoadAndExecuteSanitizedTransactionsOutput { error_metrics, execute_timings, - execution_results, + processing_results, } } diff --git a/svm/tests/conformance.rs b/svm/tests/conformance.rs index 8e82cf98623837..5b32c2e164c2d2 100644 --- a/svm/tests/conformance.rs +++ b/svm/tests/conformance.rs @@ -36,6 +36,7 @@ use { account_loader::CheckedTransactionDetails, program_loader, transaction_processing_callback::TransactionProcessingCallback, + transaction_processing_result::TransactionProcessingResultExtensions, transaction_processor::{ ExecutionRecordingConfig, TransactionBatchProcessor, TransactionProcessingConfig, TransactionProcessingEnvironment, @@ -311,17 +312,8 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool ); // Assert that the transaction has worked without errors. - if !result.execution_results[0].was_executed() - || result.execution_results[0] - .details() - .unwrap() - .status - .is_err() - { - if matches!( - result.execution_results[0].flattened_result(), - Err(TransactionError::InsufficientFundsForRent { .. }) - ) { + if let Err(err) = result.processing_results[0].flattened_result() { + if matches!(err, TransactionError::InsufficientFundsForRent { .. }) { // This is a transaction error not an instruction error, so execute the instruction // instead. execute_fixture_as_instr( @@ -344,7 +336,9 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool return; } - let executed_tx = result.execution_results[0].executed_transaction().unwrap(); + let executed_tx = result.processing_results[0] + .processed_transaction() + .unwrap(); let execution_details = &executed_tx.execution_details; let loaded_accounts = &executed_tx.loaded_transaction.accounts; verify_accounts_and_data( diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 8d8c80b8e89422..5070bca08e0907 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -33,8 +33,8 @@ use { }, solana_svm::{ account_loader::{CheckedTransactionDetails, TransactionCheckResult}, - transaction_execution_result::TransactionExecutionResult, transaction_processing_callback::TransactionProcessingCallback, + transaction_processing_result::TransactionProcessingResultExtensions, transaction_processor::{ ExecutionRecordingConfig, TransactionBatchProcessor, TransactionProcessingConfig, TransactionProcessingEnvironment, @@ -429,9 +429,11 @@ fn svm_integration() { &processing_config, ); - assert_eq!(result.execution_results.len(), 5); + assert_eq!(result.processing_results.len(), 5); - let executed_tx_0 = result.execution_results[0].executed_transaction().unwrap(); + let executed_tx_0 = result.processing_results[0] + .processed_transaction() + .unwrap(); assert!(executed_tx_0.was_successful()); let logs = executed_tx_0 .execution_details @@ -440,7 +442,9 @@ fn svm_integration() { .unwrap(); assert!(logs.contains(&"Program log: Hello, Solana!".to_string())); - let executed_tx_1 = result.execution_results[1].executed_transaction().unwrap(); + let executed_tx_1 = result.processing_results[1] + .processed_transaction() + .unwrap(); assert!(executed_tx_1.was_successful()); // The SVM does not commit the account changes in MockBank @@ -453,7 +457,9 @@ fn svm_integration() { .unwrap(); assert_eq!(recipient_data.1.lamports(), 900010); - let executed_tx_2 = result.execution_results[2].executed_transaction().unwrap(); + let executed_tx_2 = result.processing_results[2] + .processed_transaction() + .unwrap(); let return_data = executed_tx_2 .execution_details .return_data @@ -464,7 +470,9 @@ fn svm_integration() { let clock_info: Clock = bincode::deserialize(clock_data.data()).unwrap(); assert_eq!(clock_info.unix_timestamp, time); - let executed_tx_3 = result.execution_results[3].executed_transaction().unwrap(); + let executed_tx_3 = result.processing_results[3] + .processed_transaction() + .unwrap(); assert!(executed_tx_3.execution_details.status.is_err()); assert!(executed_tx_3 .execution_details @@ -474,7 +482,7 @@ fn svm_integration() { .contains(&"Transfer: insufficient lamports 900000, need 900050".to_string())); assert!(matches!( - result.execution_results[4], - TransactionExecutionResult::NotExecuted(TransactionError::BlockhashNotFound) + result.processing_results[4], + Err(TransactionError::BlockhashNotFound) )); } From f1a227959f7994cc5dfff7495e290aaa7fa4233d Mon Sep 17 00:00:00 2001 From: Brennan Date: Fri, 9 Aug 2024 10:36:29 -0700 Subject: [PATCH 077/529] reduce tss indent (#2533) --- rpc/src/transaction_status_service.rs | 165 +++++++++++++------------- 1 file changed, 83 insertions(+), 82 deletions(-) diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 49a78f22db7752..8a44c37229db2c 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -94,93 +94,94 @@ impl TransactionStatusService { token_balances.post_token_balances, transaction_indexes, ) { - if let Ok(committed_tx) = commit_result { - let CommittedTransaction { - execution_details: - TransactionExecutionDetails { - status, - log_messages, - inner_instructions, - return_data, - executed_units, - .. - }, - fee_details, - rent_debits, - .. - } = committed_tx; - let tx_account_locks = transaction.get_account_locks_unchecked(); - - let fee = fee_details.total_fee(); - let inner_instructions = inner_instructions.map(|inner_instructions| { - map_inner_instructions(inner_instructions).collect() - }); - - let pre_token_balances = Some(pre_token_balances); - let post_token_balances = Some(post_token_balances); - let rewards = Some( - rent_debits - .into_unordered_rewards_iter() - .map(|(pubkey, reward_info)| Reward { - pubkey: pubkey.to_string(), - lamports: reward_info.lamports, - post_balance: reward_info.post_balance, - reward_type: Some(reward_info.reward_type), - commission: reward_info.commission, - }) - .collect(), + let Ok(committed_tx) = commit_result else { + continue; + }; + + let CommittedTransaction { + execution_details: + TransactionExecutionDetails { + status, + log_messages, + inner_instructions, + return_data, + executed_units, + .. + }, + fee_details, + rent_debits, + .. + } = committed_tx; + let tx_account_locks = transaction.get_account_locks_unchecked(); + + let fee = fee_details.total_fee(); + let inner_instructions = inner_instructions.map(|inner_instructions| { + map_inner_instructions(inner_instructions).collect() + }); + + let pre_token_balances = Some(pre_token_balances); + let post_token_balances = Some(post_token_balances); + let rewards = Some( + rent_debits + .into_unordered_rewards_iter() + .map(|(pubkey, reward_info)| Reward { + pubkey: pubkey.to_string(), + lamports: reward_info.lamports, + post_balance: reward_info.post_balance, + reward_type: Some(reward_info.reward_type), + commission: reward_info.commission, + }) + .collect(), + ); + let loaded_addresses = transaction.get_loaded_addresses(); + let mut transaction_status_meta = TransactionStatusMeta { + status, + fee, + pre_balances, + post_balances, + inner_instructions, + log_messages, + pre_token_balances, + post_token_balances, + rewards, + loaded_addresses, + return_data, + compute_units_consumed: Some(executed_units), + }; + + if let Some(transaction_notifier) = transaction_notifier.as_ref() { + transaction_notifier.notify_transaction( + slot, + transaction_index, + transaction.signature(), + &transaction_status_meta, + &transaction, ); - let loaded_addresses = transaction.get_loaded_addresses(); - let mut transaction_status_meta = TransactionStatusMeta { - status, - fee, - pre_balances, - post_balances, - inner_instructions, - log_messages, - pre_token_balances, - post_token_balances, - rewards, - loaded_addresses, - return_data, - compute_units_consumed: Some(executed_units), - }; - - if let Some(transaction_notifier) = transaction_notifier.as_ref() { - transaction_notifier.notify_transaction( - slot, - transaction_index, - transaction.signature(), - &transaction_status_meta, - &transaction, - ); - } - - if !(enable_extended_tx_metadata_storage || transaction_notifier.is_some()) - { - transaction_status_meta.log_messages.take(); - transaction_status_meta.inner_instructions.take(); - transaction_status_meta.return_data.take(); - } + } - if enable_rpc_transaction_history { - if let Some(memos) = extract_and_fmt_memos(transaction.message()) { - blockstore - .write_transaction_memos(transaction.signature(), slot, memos) - .expect("Expect database write to succeed: TransactionMemos"); - } + if !(enable_extended_tx_metadata_storage || transaction_notifier.is_some()) { + transaction_status_meta.log_messages.take(); + transaction_status_meta.inner_instructions.take(); + transaction_status_meta.return_data.take(); + } + if enable_rpc_transaction_history { + if let Some(memos) = extract_and_fmt_memos(transaction.message()) { blockstore - .write_transaction_status( - slot, - *transaction.signature(), - tx_account_locks.writable, - tx_account_locks.readonly, - transaction_status_meta, - transaction_index, - ) - .expect("Expect database write to succeed: TransactionStatus"); + .write_transaction_memos(transaction.signature(), slot, memos) + .expect("Expect database write to succeed: TransactionMemos"); } + + blockstore + .write_transaction_status( + slot, + *transaction.signature(), + tx_account_locks.writable, + tx_account_locks.readonly, + transaction_status_meta, + transaction_index, + ) + .expect("Expect database write to succeed: TransactionStatus"); } } } From 497941f0f6a0c46b8388efaa705df396dff652e0 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 9 Aug 2024 18:07:16 +0000 Subject: [PATCH 078/529] Reverts "Revert "checks for duplicate instances using the new ContactInfo (#2506)"" (#2532) * Reapply "checks for duplicate instances using the new ContactInfo (#2506)" (#2521) This reverts commit 15c5dcbee3eeba27a0714c7538323fc2ad000ef2. * removes unwrap --- gossip/src/cluster_info.rs | 21 ++++++++----- gossip/src/contact_info.rs | 63 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 8 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 0f11489333644d..46b505014bebc3 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2475,16 +2475,21 @@ impl ClusterInfo { // Check if there is a duplicate instance of // this node with more recent timestamp. - let instance = self.instance.read().unwrap(); - let check_duplicate_instance = |values: &[CrdsValue]| { - if should_check_duplicate_instance { - for value in values { - if instance.check_duplicate(value) { - return Err(GossipError::DuplicateNodeInstance); - } + let check_duplicate_instance = { + let instance = self.instance.read().unwrap(); + let my_contact_info = self.my_contact_info(); + move |values: &[CrdsValue]| { + if should_check_duplicate_instance + && values.iter().any(|value| { + instance.check_duplicate(value) + || matches!(&value.data, CrdsData::ContactInfo(other) + if my_contact_info.check_duplicate(other)) + }) + { + return Err(GossipError::DuplicateNodeInstance); } + Ok(()) } - Ok(()) }; let mut pings = Vec::new(); let mut rng = rand::thread_rng(); diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 9a5c1ce495813b..5e4f5b27cac04a 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -435,6 +435,14 @@ impl ContactInfo { node.set_serve_repair_quic((addr, port + 4)).unwrap(); node } + + // Returns true if the other contact-info is a duplicate instance of this + // node, with a more recent `outset` timestamp. + #[inline] + #[must_use] + pub(crate) fn check_duplicate(&self, other: &ContactInfo) -> bool { + self.pubkey == other.pubkey && self.outset < other.outset + } } impl Default for ContactInfo { @@ -1016,4 +1024,59 @@ mod tests { Err(Error::InvalidPort(0)) ); } + + #[test] + fn test_check_duplicate() { + let mut rng = rand::thread_rng(); + let mut node = ContactInfo::new( + Keypair::new().pubkey(), + rng.gen(), // wallclock + rng.gen(), // shred_version + ); + // Same contact-info is not a duplicate instance. + { + let other = node.clone(); + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + // Updated socket address is not a duplicate instance. + { + let mut other = node.clone(); + while other.set_gossip(new_rand_socket(&mut rng)).is_err() {} + while other.set_serve_repair(new_rand_socket(&mut rng)).is_err() {} + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + other.remove_serve_repair(); + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + // Updated wallclock is not a duplicate instance. + { + let other = node.clone(); + node.set_wallclock(rng.gen()); + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + // Different pubkey is not a duplicate instance. + { + let other = ContactInfo::new( + Keypair::new().pubkey(), + rng.gen(), // wallclock + rng.gen(), // shred_version + ); + assert!(!node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + // Same pubkey, more recent outset timestamp is a duplicate instance. + { + let other = ContactInfo::new( + node.pubkey, + rng.gen(), // wallclock + rng.gen(), // shred_version + ); + assert!(node.outset < other.outset); + assert!(node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + } + } } From 2b9b50afd32cb277df35b44587169b75edaf0726 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Fri, 9 Aug 2024 14:34:13 -0500 Subject: [PATCH 079/529] only propagate staked `NodeInstances` (#2511) only propagate staked NodeInstances Co-authored-by: greg --- gossip/src/cluster_info.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 46b505014bebc3..d3b0a9857481cb 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -433,13 +433,13 @@ fn retain_staked(values: &mut Vec, stakes: &HashMap) { // Otherwise unstaked voting nodes will show up with no version in // the various dashboards. CrdsData::Version(_) => true, - CrdsData::NodeInstance(_) => true, CrdsData::AccountsHashes(_) => true, CrdsData::LowestSlot(_, _) | CrdsData::LegacyVersion(_) | CrdsData::DuplicateShred(_, _) | CrdsData::RestartHeaviestFork(_) - | CrdsData::RestartLastVotedForkSlots(_) => { + | CrdsData::RestartLastVotedForkSlots(_) + | CrdsData::NodeInstance(_) => { let stake = stakes.get(&value.pubkey()).copied(); stake.unwrap_or_default() >= MIN_STAKE_FOR_GOSSIP } From 8fa957189ca9feb92166f7d03b2ec835b40ce147 Mon Sep 17 00:00:00 2001 From: Brennan Date: Fri, 9 Aug 2024 12:39:07 -0700 Subject: [PATCH 080/529] Pass slot instead of Bank to TransactionStatusService (#2534) pass slot instead of bank --- core/src/banking_stage/committer.rs | 2 +- ledger-tool/src/main.rs | 6 ++---- ledger/src/blockstore_processor.rs | 10 ++++------ rpc/src/transaction_status_service.rs | 5 ++--- 4 files changed, 9 insertions(+), 14 deletions(-) diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index c020c92866dec0..7beb06a4d2de39 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -157,7 +157,7 @@ impl Committer { }) .collect(); transaction_status_sender.send_transaction_status_batch( - bank.clone(), + bank.slot(), txs, commit_results, TransactionBalancesSet::new( diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 1005f30a71cd65..60154480f4dd3e 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -683,8 +683,6 @@ fn record_transactions( ) { for tsm in recv { if let TransactionStatusMessage::Batch(batch) = tsm { - let slot = batch.bank.slot(); - assert_eq!(batch.transactions.len(), batch.commit_results.len()); let transactions: Vec<_> = batch @@ -725,11 +723,11 @@ fn record_transactions( let mut slots = slots.lock().unwrap(); - if let Some(recorded_slot) = slots.iter_mut().find(|f| f.slot == slot) { + if let Some(recorded_slot) = slots.iter_mut().find(|f| f.slot == batch.slot) { recorded_slot.transactions.extend(transactions); } else { slots.push(SlotDetails { - slot, + slot: batch.slot, transactions, ..Default::default() }); diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 8d8211e02a07c1..9bd863908d98dd 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -207,7 +207,7 @@ pub fn execute_batch( TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances); transaction_status_sender.send_transaction_status_batch( - bank.clone(), + bank.slot(), transactions, commit_results, balances, @@ -2108,7 +2108,7 @@ pub enum TransactionStatusMessage { } pub struct TransactionStatusBatch { - pub bank: Arc, + pub slot: Slot, pub transactions: Vec, pub commit_results: Vec, pub balances: TransactionBalancesSet, @@ -2124,19 +2124,17 @@ pub struct TransactionStatusSender { impl TransactionStatusSender { pub fn send_transaction_status_batch( &self, - bank: Arc, + slot: Slot, transactions: Vec, commit_results: Vec, balances: TransactionBalancesSet, token_balances: TransactionTokenBalancesSet, transaction_indexes: Vec, ) { - let slot = bank.slot(); - if let Err(e) = self .sender .send(TransactionStatusMessage::Batch(TransactionStatusBatch { - bank, + slot, transactions, commit_results, balances, diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 8a44c37229db2c..96e2d88a13c550 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -69,14 +69,13 @@ impl TransactionStatusService { ) -> Result<(), RecvTimeoutError> { match write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))? { TransactionStatusMessage::Batch(TransactionStatusBatch { - bank, + slot, transactions, commit_results, balances, token_balances, transaction_indexes, }) => { - let slot = bank.slot(); for ( transaction, commit_result, @@ -384,7 +383,7 @@ pub(crate) mod tests { let signature = *transaction.signature(); let transaction_index: usize = bank.transaction_count().try_into().unwrap(); let transaction_status_batch = TransactionStatusBatch { - bank, + slot, transactions: vec![transaction], commit_results: vec![commit_result], balances, From 0fda3049f4c6315f30e88ec1ee0279ba313c3b9c Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 9 Aug 2024 19:33:15 -0400 Subject: [PATCH 081/529] hash-cache-tool: Refactors width calculation (#2530) --- .../accounts-hash-cache-tool/src/main.rs | 37 +++++++++++-------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 7817b35c4fac3d..c6118f01797e70 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -203,7 +203,7 @@ fn do_inspect(file: impl AsRef, force: bool) -> Result<(), String> { file.as_ref().display(), ) })?; - let count_width = (header.count as f64).log10().ceil() as usize; + let count_width = width10(header.count as u64); let mut count = Saturating(0); scan_file(reader, header.count, |entry| { @@ -235,13 +235,13 @@ fn do_diff_files(file1: impl AsRef, file2: impl AsRef) -> Result<(), let num_accounts1 = entries1.len(); let num_accounts2 = entries2.len(); let num_accounts_width = { - let width1 = (num_accounts1 as f64).log10().ceil() as usize; - let width2 = (num_accounts2 as f64).log10().ceil() as usize; + let width1 = width10(num_accounts1 as u64); + let width2 = width10(num_accounts2 as u64); cmp::max(width1, width2) }; let lamports_width = { - let width1 = (capitalization1 as f64).log10().ceil() as usize; - let width2 = (capitalization2 as f64).log10().ceil() as usize; + let width1 = width10(capitalization1); + let width2 = width10(capitalization2); cmp::max(width1, width2) }; println!("File 1: number of accounts: {num_accounts1:num_accounts_width$}, capitalization: {capitalization1:lamports_width$} lamports"); @@ -284,7 +284,7 @@ fn do_diff_files(file1: impl AsRef, file2: impl AsRef) -> Result<(), // display the unique entries in each file let do_print = |entries: &[CacheHashDataFileEntry]| { - let count_width = (entries.len() as f64).log10().ceil() as usize; + let count_width = width10(entries.len() as u64); if entries.is_empty() { println!("(none)"); } else { @@ -307,7 +307,7 @@ fn do_diff_files(file1: impl AsRef, file2: impl AsRef) -> Result<(), do_print(&unique_entries2); println!("Mismatch values:"); - let count_width = (mismatch_entries.len() as f64).log10().ceil() as usize; + let count_width = width10(mismatch_entries.len() as u64); if mismatch_entries.is_empty() { println!("(none)"); } else { @@ -435,7 +435,7 @@ fn do_diff_dirs( } let do_print = |entries: &[&CacheFileInfo]| { - let count_width = (entries.len() as f64).log10().ceil() as usize; + let count_width = width10(entries.len() as u64); if entries.is_empty() { println!("(none)"); } else { @@ -450,7 +450,7 @@ fn do_diff_dirs( do_print(&uniques2); println!("Mismatch files:"); - let count_width = (mismatches.len() as f64).log10().ceil() as usize; + let count_width = width10(mismatches.len() as u64); if mismatches.is_empty() { println!("(none)"); } else { @@ -558,13 +558,13 @@ fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), S drop(timer); let num_accounts_width = { - let width1 = (num_accounts1 as f64).log10().ceil() as usize; - let width2 = (num_accounts2 as f64).log10().ceil() as usize; + let width1 = width10(num_accounts1 as u64); + let width2 = width10(num_accounts2 as u64); cmp::max(width1, width2) }; let lamports_width = { - let width1 = (capitalization1 as f64).log10().ceil() as usize; - let width2 = (capitalization2 as f64).log10().ceil() as usize; + let width1 = width10(capitalization1); + let width2 = width10(capitalization2); cmp::max(width1, width2) }; @@ -575,7 +575,7 @@ fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), S if unique_entries1.is_empty() { println!("(none)"); } else { - let count_width = (unique_entries1.len() as f64).log10().ceil() as usize; + let count_width = width10(unique_entries1.len() as u64); let mut total_lamports = Saturating(0); for (i, entry) in unique_entries1.iter().enumerate() { total_lamports += entry.1 .1; @@ -593,7 +593,7 @@ fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), S if unique_entries1.is_empty() { println!("(none)"); } else { - let count_width = (unique_entries2.len() as f64).log10().ceil() as usize; + let count_width = width10(unique_entries2.len() as u64); let mut total_lamports = Saturating(0); for (i, entry) in unique_entries2.iter().enumerate() { total_lamports += entry.1 .1; @@ -608,7 +608,7 @@ fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), S } println!("Mismatch values:"); - let count_width = (mismatch_entries.len() as f64).log10().ceil() as usize; + let count_width = width10(mismatch_entries.len() as u64); if mismatch_entries.is_empty() { println!("(none)"); } else { @@ -837,6 +837,11 @@ fn open_file( Ok((reader, header)) } +/// Returns the number of characters required to print `x` in base-10 +fn width10(x: u64) -> usize { + (x as f64).log10().ceil() as usize +} + #[derive(Debug)] struct CacheFileInfo { path: PathBuf, From 12e98b4dcd8a4456510638c804389f7a6af6382d Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 9 Aug 2024 21:19:01 -0400 Subject: [PATCH 082/529] hash-cache-tool: Refactors printing of unique entries (#2536) --- .../accounts-hash-cache-tool/src/main.rs | 52 +++++++------------ 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index c6118f01797e70..040a45c2db3a19 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -571,41 +571,29 @@ fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), S println!("State 1: total number of accounts: {num_accounts1:num_accounts_width$}, total capitalization: {capitalization1:lamports_width$} lamports"); println!("State 2: total number of accounts: {num_accounts2:num_accounts_width$}, total capitalization: {capitalization2:lamports_width$} lamports"); - println!("Unique entries in state 1:"); - if unique_entries1.is_empty() { - println!("(none)"); - } else { - let count_width = width10(unique_entries1.len() as u64); - let mut total_lamports = Saturating(0); - for (i, entry) in unique_entries1.iter().enumerate() { - total_lamports += entry.1 .1; - println!( - "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", - entry.0.to_string(), - entry.1 .0 .0.to_string(), - entry.1 .1, - ); + let do_print = |entries: &[(Pubkey, (AccountHash, /* lamports */ u64))]| { + if entries.is_empty() { + println!("(none)"); + } else { + let count_width = width10(entries.len() as u64); + let mut total_lamports = Saturating(0); + for (i, entry) in entries.iter().enumerate() { + total_lamports += entry.1 .1; + println!( + "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", + entry.0.to_string(), + entry.1 .0 .0.to_string(), + entry.1 .1, + ); + } + println!("total lamports: {}", total_lamports.0); } - println!("total lamports: {}", total_lamports.0); - } + }; + println!("Unique entries in state 1:"); + do_print(&unique_entries1); println!("Unique entries in state 2:"); - if unique_entries1.is_empty() { - println!("(none)"); - } else { - let count_width = width10(unique_entries2.len() as u64); - let mut total_lamports = Saturating(0); - for (i, entry) in unique_entries2.iter().enumerate() { - total_lamports += entry.1 .1; - println!( - "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", - entry.0.to_string(), - entry.1 .0 .0.to_string(), - entry.1 .1, - ); - } - println!("total lamports: {}", total_lamports.0); - } + do_print(&unique_entries2); println!("Mismatch values:"); let count_width = width10(mismatch_entries.len() as u64); From 584b072b9962d1e2531564bb6cfd209746210a91 Mon Sep 17 00:00:00 2001 From: Brooks Date: Sat, 10 Aug 2024 07:04:48 -0400 Subject: [PATCH 083/529] hash-cache-tool: Refactors printing unique entries (#2537) --- .../accounts-hash-cache-tool/src/main.rs | 85 +++++++++---------- 1 file changed, 39 insertions(+), 46 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 040a45c2db3a19..3561d9d3d4c1d6 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -282,29 +282,10 @@ fn do_diff_files(file1: impl AsRef, file2: impl AsRef) -> Result<(), let (unique_entries1, mismatch_entries) = do_compute(&entries1, &entries2); let (unique_entries2, _) = do_compute(&entries2, &entries1); - // display the unique entries in each file - let do_print = |entries: &[CacheHashDataFileEntry]| { - let count_width = width10(entries.len() as u64); - if entries.is_empty() { - println!("(none)"); - } else { - let mut total_lamports = Saturating(0); - for (i, entry) in entries.iter().enumerate() { - total_lamports += entry.lamports; - println!( - "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", - entry.pubkey.to_string(), - entry.hash.0.to_string(), - entry.lamports, - ); - } - println!("total lamports: {}", total_lamports.0); - } - }; println!("Unique entries in file 1:"); - do_print(&unique_entries1); + print_unique_entries(&unique_entries1, lamports_width); println!("Unique entries in file 2:"); - do_print(&unique_entries2); + print_unique_entries(&unique_entries2, lamports_width); println!("Mismatch values:"); let count_width = width10(mismatch_entries.len() as u64); @@ -527,7 +508,11 @@ fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), S } None => { // this pubkey was *not* found in state2, so its a unique entry in state1 - unique_entries1.push((key1, value1)); + unique_entries1.push(CacheHashDataFileEntry { + pubkey: key1, + hash: value1.0, + lamports: value1.1, + }); } } } @@ -547,13 +532,19 @@ fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), S let mut unique_entries2 = Vec::new(); for bin in Vec::from(state2).into_iter() { let mut bin = bin.write().unwrap(); - unique_entries2.extend(bin.drain()); + unique_entries2.extend(bin.drain().map(|(pubkey, (hash, lamports))| { + CacheHashDataFileEntry { + pubkey, + hash, + lamports, + } + })); } // sort all the results by pubkey to make them saner to view let timer = LoggingTimer::new("Sorting results"); - unique_entries1.sort_unstable_by(|a, b| a.0.cmp(&b.0)); - unique_entries2.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + unique_entries1.sort_unstable_by(|a, b| a.pubkey.cmp(&b.pubkey)); + unique_entries2.sort_unstable_by(|a, b| a.pubkey.cmp(&b.pubkey)); mismatch_entries.sort_unstable_by(|a, b| a.0.cmp(&b.0)); drop(timer); @@ -571,29 +562,10 @@ fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), S println!("State 1: total number of accounts: {num_accounts1:num_accounts_width$}, total capitalization: {capitalization1:lamports_width$} lamports"); println!("State 2: total number of accounts: {num_accounts2:num_accounts_width$}, total capitalization: {capitalization2:lamports_width$} lamports"); - let do_print = |entries: &[(Pubkey, (AccountHash, /* lamports */ u64))]| { - if entries.is_empty() { - println!("(none)"); - } else { - let count_width = width10(entries.len() as u64); - let mut total_lamports = Saturating(0); - for (i, entry) in entries.iter().enumerate() { - total_lamports += entry.1 .1; - println!( - "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", - entry.0.to_string(), - entry.1 .0 .0.to_string(), - entry.1 .1, - ); - } - println!("total lamports: {}", total_lamports.0); - } - }; - println!("Unique entries in state 1:"); - do_print(&unique_entries1); + print_unique_entries(&unique_entries1, lamports_width); println!("Unique entries in state 2:"); - do_print(&unique_entries2); + print_unique_entries(&unique_entries2, lamports_width); println!("Mismatch values:"); let count_width = width10(mismatch_entries.len() as u64); @@ -825,6 +797,27 @@ fn open_file( Ok((reader, header)) } +/// Prints unique entries +fn print_unique_entries(entries: &[CacheHashDataFileEntry], lamports_width: usize) { + if entries.is_empty() { + println!("(none)"); + return; + } + + let count_width = width10(entries.len() as u64); + let mut total_lamports = Saturating(0); + for (i, entry) in entries.iter().enumerate() { + total_lamports += entry.lamports; + println!( + "{i:count_width$}: pubkey: {:44}, hash: {:44}, lamports: {:lamports_width$}", + entry.pubkey.to_string(), + entry.hash.0.to_string(), + entry.lamports, + ); + } + println!("total lamports: {}", total_lamports.0); +} + /// Returns the number of characters required to print `x` in base-10 fn width10(x: u64) -> usize { (x as f64).log10().ceil() as usize From 3838ee094c02052ba8268126414598e4674eafdb Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sun, 11 Aug 2024 17:14:34 +0800 Subject: [PATCH 084/529] fix docs format (#2497) * fix docs format * fix wrong indent * fix wrong indent * fix wrong indent * fix wrong indent * fix wrong indent * fix wrong indent * revert bigtable docs update --- core/src/banking_stage/leader_slot_metrics.rs | 6 +-- .../transaction_state_container.rs | 6 +-- core/src/repair/duplicate_repair_status.rs | 8 +-- dos/src/main.rs | 49 ++++++++++--------- gossip/src/crds.rs | 1 + ledger/src/blockstore.rs | 17 +++---- ledger/src/blockstore_cleanup_service.rs | 4 +- ledger/src/blockstore_db.rs | 6 +-- local-cluster/src/integration_tests.rs | 4 +- programs/stake/src/rewards.rs | 1 + runtime/src/bank.rs | 2 +- sdk/gen-headers/src/main.rs | 8 +-- turbine/src/broadcast_stage.rs | 1 + zk-sdk/src/encryption/elgamal.rs | 1 + zk-sdk/src/encryption/grouped_elgamal.rs | 1 + zk-sdk/src/encryption/mod.rs | 8 +-- zk-sdk/src/range_proof/mod.rs | 6 +-- .../ciphertext_ciphertext_equality.rs | 2 +- .../src/sigma_proofs/percentage_with_cap.rs | 2 +- zk-sdk/src/sigma_proofs/pubkey_validity.rs | 2 +- .../zk_elgamal_proof_program/instruction.rs | 4 +- zk-token-sdk/src/encryption/elgamal.rs | 1 + .../src/encryption/grouped_elgamal.rs | 6 +-- zk-token-sdk/src/encryption/mod.rs | 8 +-- zk-token-sdk/src/range_proof/mod.rs | 6 +-- .../ciphertext_ciphertext_equality_proof.rs | 2 +- zk-token-sdk/src/sigma_proofs/fee_proof.rs | 10 ++-- .../handles_2.rs | 4 +- zk-token-sdk/src/sigma_proofs/pubkey_proof.rs | 2 +- .../src/zk_token_proof_instruction.rs | 4 +- 30 files changed, 94 insertions(+), 88 deletions(-) diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index 98cf4d72f92c91..e305ded2d468e3 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -16,10 +16,10 @@ use { /// A summary of what happened to transactions passed to the processing pipeline. /// Transactions can /// 1) Did not even make it to processing due to being filtered out by things like AccountInUse -/// lock conflicts or CostModel compute limits. These types of errors are retryable and -/// counted in `Self::retryable_transaction_indexes`. +/// lock conflicts or CostModel compute limits. These types of errors are retryable and +/// counted in `Self::retryable_transaction_indexes`. /// 2) Did not process due to some fatal error like too old, or duplicate signature. These -/// will be dropped from the transactions queue and not counted in `Self::retryable_transaction_indexes` +/// will be dropped from the transactions queue and not counted in `Self::retryable_transaction_indexes` /// 3) Were processed and committed, captured by `transaction_counts` below. /// 4) Were processed and failed commit, captured by `transaction_counts` below. pub(crate) struct ProcessTransactionsSummary { diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index 3f804f662652ab..ed78b41983fa2a 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -20,9 +20,9 @@ use { /// 2. Inserted into `TransactionStateContainer` by `BankingStage` /// 3. Popped in priority-order by scheduler, and transitioned to `Pending` state /// 4. Processed by `ConsumeWorker` -/// a. If consumed, remove `Pending` state from the `TransactionStateContainer` -/// b. If retryable, transition back to `Unprocessed` state. -/// Re-insert to the queue, and return to step 3. +/// a. If consumed, remove `Pending` state from the `TransactionStateContainer` +/// b. If retryable, transition back to `Unprocessed` state. +/// Re-insert to the queue, and return to step 3. /// /// The structure is composed of two main components: /// 1. A priority queue of wrapped `TransactionId`s, which are used to diff --git a/core/src/repair/duplicate_repair_status.rs b/core/src/repair/duplicate_repair_status.rs index 53c2bd64761858..75956a64d6e58f 100644 --- a/core/src/repair/duplicate_repair_status.rs +++ b/core/src/repair/duplicate_repair_status.rs @@ -193,10 +193,10 @@ impl AncestorRequestStatus { /// Record the response from `from_addr`. Returns Some(DuplicateAncestorDecision) /// if we have finalized a decision based on the responses. We can finalize a decision when /// one of the following conditions is met: - /// 1) We have heard from all the validators, OR - /// 2) >= MINIMUM_ANCESTOR_AGREEMENT_SIZE have agreed that we have the correct versions - /// of nth ancestor, for some `n>0`, AND >= MINIMUM_ANCESTOR_AGREEMENT_SIZE have - /// agreed we have the wrong version of the `n-1` ancestor. + /// 1. We have heard from all the validators + /// 2. Or >= MINIMUM_ANCESTOR_AGREEMENT_SIZE have agreed that we have the correct versions + /// of nth ancestor, for some `n>0`, AND >= MINIMUM_ANCESTOR_AGREEMENT_SIZE have + /// agreed we have the wrong version of the `n-1` ancestor. pub fn add_response( &mut self, from_addr: &SocketAddr, diff --git a/dos/src/main.rs b/dos/src/main.rs index 0b299718467134..2d06c9d6632195 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -5,6 +5,7 @@ //! //! * `mode` argument defines interface to use (e.g. rpc, tvu, tpu) //! * `data-type` argument specifies the type of the request. +//! //! Some request types might be used only with particular `mode` value. //! For example, `get-account-info` is valid only with `mode=rpc`. //! @@ -16,27 +17,27 @@ //! The following configurations are suggested: //! Let `COMMON="--mode tpu --data-type transaction --unique-transactions"` //! 1. Without blockhash or payer: -//! 1.1 With invalid signatures -//! ```bash -//! solana-dos $COMMON --num-signatures 8 -//! ``` -//! 1.2 With valid signatures -//! ```bash -//! solana-dos $COMMON --valid-signatures --num-signatures 8 -//! ``` +//! 1.1 With invalid signatures +//! ```bash +//! solana-dos $COMMON --num-signatures 8 +//! ``` +//! 1.2 With valid signatures +//! ```bash +//! solana-dos $COMMON --valid-signatures --num-signatures 8 +//! ``` //! 2. With blockhash and payer: -//! 2.1 Single-instruction transaction -//! ```bash -//! solana-dos $COMMON --valid-blockhash --transaction-type transfer --num-instructions 1 -//! ``` -//! 2.2 Multi-instruction transaction -//! ```bash -//! solana-dos $COMMON --valid-blockhash --transaction-type transfer --num-instructions 8 -//! ``` -//! 2.3 Account-creation transaction -//! ```bash -//! solana-dos $COMMON --valid-blockhash --transaction-type account-creation -//! ``` +//! 2.1 Single-instruction transaction +//! ```bash +//! solana-dos $COMMON --valid-blockhash --transaction-type transfer --num-instructions 1 +//! ``` +//! 2.2 Multi-instruction transaction +//! ```bash +//! solana-dos $COMMON --valid-blockhash --transaction-type transfer --num-instructions 8 +//! ``` +//! 2.3 Account-creation transaction +//! ```bash +//! solana-dos $COMMON --valid-blockhash --transaction-type account-creation +//! ``` //! #![allow(clippy::arithmetic_side_effects)] #![allow(deprecated)] @@ -89,12 +90,12 @@ fn compute_rate_per_second(count: usize) -> usize { /// Provide functionality to generate several types of transactions: /// /// 1. Without blockhash -/// 1.1 With valid signatures (number of signatures is configurable) -/// 1.2 With invalid signatures (number of signatures is configurable) +/// 1.1 With valid signatures (number of signatures is configurable) +/// 1.2 With invalid signatures (number of signatures is configurable) /// /// 2. With blockhash (but still deliberately invalid): -/// 2.1 Transfer from 1 payer to multiple destinations (many instructions per transaction) -/// 2.2 Create an account +/// 2.1 Transfer from 1 payer to multiple destinations (many instructions per transaction) +/// 2.2 Create an account /// #[derive(Clone)] struct TransactionGenerator { diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index 73d2dd0d1c9f26..e0205e14e62988 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -6,6 +6,7 @@ //! that is identified by a Pubkey. //! * 1 Pubkey maps many CrdsValueLabels //! * 1 CrdsValueLabel maps to 1 CrdsValue +//! //! The Label, the record Pubkey, and all the record labels can be derived //! from a single CrdsValue. //! diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 569b2bdd8b7cfe..55377cb31557eb 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -912,8 +912,8 @@ impl Blockstore { /// but N.index() is less than the current slot_meta.received /// for slot S. /// - The slot is not currently full - /// It means there's an alternate version of this slot. See - /// `check_insert_data_shred` for more details. + /// It means there's an alternate version of this slot. See + /// `check_insert_data_shred` for more details. /// - [`cf::ShredData`]: stores data shreds (in check_insert_data_shreds). /// - [`cf::ShredCode`]: stores coding shreds (in check_insert_coding_shreds). /// - [`cf::SlotMeta`]: the SlotMeta of the input `shreds` and their related @@ -935,8 +935,7 @@ impl Blockstore { /// shreds inside `shreds` will be updated and committed to /// `cf::MerkleRootMeta`. /// - [`cf::Index`]: stores (slot id, index to the index_working_set_entry) - /// pair to the `cf::Index` column family for each index_working_set_entry - /// which insert did occur in this function call. + /// pair to the `cf::Index` column family for each index_working_set_entry which insert did occur in this function call. /// /// Arguments: /// - `shreds`: the shreds to be inserted. @@ -4301,17 +4300,17 @@ impl Blockstore { /// it handles the following two things: /// /// 1. based on the `SlotMetaWorkingSetEntry` for `slot`, check if `slot` - /// did not previously have a parent slot but does now. If `slot` satisfies - /// this condition, update the Orphan property of both `slot` and its parent - /// slot based on their current orphan status. Specifically: + /// did not previously have a parent slot but does now. If `slot` satisfies + /// this condition, update the Orphan property of both `slot` and its parent + /// slot based on their current orphan status. Specifically: /// - updates the orphan property of slot to no longer be an orphan because /// it has a parent. /// - adds the parent to the orphan column family if the parent's parent is /// currently unknown. /// /// 2. if the `SlotMetaWorkingSetEntry` for `slot` indicates this slot - /// is newly connected to a parent slot, then this function will update - /// the is_connected property of all its direct and indirect children slots. + /// is newly connected to a parent slot, then this function will update + /// the is_connected property of all its direct and indirect children slots. /// /// This function may update column family [`cf::Orphans`] and indirectly /// update SlotMeta from its output parameter `new_chained_slots`. diff --git a/ledger/src/blockstore_cleanup_service.rs b/ledger/src/blockstore_cleanup_service.rs index 4c6068236e7269..728ca217c94350 100644 --- a/ledger/src/blockstore_cleanup_service.rs +++ b/ledger/src/blockstore_cleanup_service.rs @@ -93,8 +93,8 @@ impl BlockstoreCleanupService { /// /// Return value (bool, Slot, u64): /// - `slots_to_clean` (bool): a boolean value indicating whether there - /// are any slots to clean. If true, then `cleanup_ledger` function - /// will then proceed with the ledger cleanup. + /// are any slots to clean. If true, then `cleanup_ledger` function + /// will then proceed with the ledger cleanup. /// - `lowest_slot_to_purge` (Slot): the lowest slot to purge. Any /// slot which is older or equal to `lowest_slot_to_purge` will be /// cleaned up. diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 00eea6f811ebcb..8c96403f20e9da 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -2109,9 +2109,9 @@ fn new_cf_descriptor_fifo( /// instead. /// /// - [`max_cf_size`]: the maximum allowed column family size. Note that -/// rocksdb will start deleting the oldest SST file when the column family -/// size reaches `max_cf_size` - `FIFO_WRITE_BUFFER_SIZE` to strictly -/// maintain the size limit. +/// rocksdb will start deleting the oldest SST file when the column family +/// size reaches `max_cf_size` - `FIFO_WRITE_BUFFER_SIZE` to strictly +/// maintain the size limit. fn get_cf_options_fifo( max_cf_size: &u64, column_options: &LedgerColumnOptions, diff --git a/local-cluster/src/integration_tests.rs b/local-cluster/src/integration_tests.rs index db394cd394adbd..719005cd27df49 100644 --- a/local-cluster/src/integration_tests.rs +++ b/local-cluster/src/integration_tests.rs @@ -298,9 +298,9 @@ pub fn create_custom_leader_schedule_with_random_keys( /// continues to achieve consensus /// # Arguments /// * `partitions` - A slice of partition configurations, where each partition -/// configuration is a usize representing a node's stake +/// configuration is a usize representing a node's stake /// * `leader_schedule` - An option that specifies whether the cluster should -/// run with a fixed, predetermined leader schedule +/// run with a fixed, predetermined leader schedule #[allow(clippy::cognitive_complexity)] pub fn run_cluster_partition( partitions: &[usize], diff --git a/programs/stake/src/rewards.rs b/programs/stake/src/rewards.rs index 9895bd525ac96e..34bfd0dcc12564 100644 --- a/programs/stake/src/rewards.rs +++ b/programs/stake/src/rewards.rs @@ -123,6 +123,7 @@ fn redeem_stake_rewards( /// * staker_rewards to be distributed /// * voter_rewards to be distributed /// * new value for credits_observed in the stake +/// /// returns None if there's no payout or if any deserved payout is < 1 lamport fn calculate_stake_rewards( rewarded_epoch: Epoch, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6b93ec643ee031..e269272e787b21 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1433,7 +1433,7 @@ impl Bank { /// Like `new_from_parent` but additionally: /// * Doesn't assume that the parent is anywhere near `slot`, parent could be millions of slots - /// in the past + /// in the past /// * Adjusts the new bank's tick height to avoid having to run PoH for millions of slots /// * Freezes the new bank, assuming that the user will `Bank::new_from_parent` from this bank /// * Calculates and sets the epoch accounts hash from the parent diff --git a/sdk/gen-headers/src/main.rs b/sdk/gen-headers/src/main.rs index a0a90a4355c820..80e8e5d3bf00c9 100644 --- a/sdk/gen-headers/src/main.rs +++ b/sdk/gen-headers/src/main.rs @@ -16,10 +16,10 @@ use { * 1. process every inc file in syscalls header file * * 2. in every such file replace the syscall declaration by a new - * declaration with a new extended name, and a static function - * definition that computes a hash of the original name and uses the - * hash to initialize a function pointer, the function pointer then is - * used the call the syscall function. + * declaration with a new extended name, and a static function + * definition that computes a hash of the original name and uses the + * hash to initialize a function pointer, the function pointer then is + * used the call the syscall function. */ fn main() { let syscalls_inc_path = PathBuf::from("sdk/sbf/c/inc/sol/inc"); diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index cce43f4fa5aabd..2f9d21980372e3 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -265,6 +265,7 @@ impl BroadcastStage { /// * `window` - Cache of Shreds that we have broadcast /// * `receiver` - Receive channel for Shreds to be retransmitted to all the layer 1 nodes. /// * `exit_sender` - Set to true when this service exits, allows rest of Tpu to exit cleanly. + /// /// Otherwise, when a Tpu closes, it only closes the stages that come after it. The stages /// that come before could be blocked on a receive, and never notice that they need to /// exit. Now, if any stage of the Tpu closes, it will lead to closing the WriteStage (b/c diff --git a/zk-sdk/src/encryption/elgamal.rs b/zk-sdk/src/encryption/elgamal.rs index 3d950f75f6cc1b..c0d90fb148d26c 100644 --- a/zk-sdk/src/encryption/elgamal.rs +++ b/zk-sdk/src/encryption/elgamal.rs @@ -6,6 +6,7 @@ //! A twisted ElGamal ciphertext consists of two components: //! - A Pedersen commitment that encodes a message to be encrypted //! - A "decryption handle" that binds the Pedersen opening to a specific public key +//! //! In contrast to the traditional ElGamal encryption scheme, the twisted ElGamal encodes messages //! directly as a Pedersen commitment. Therefore, proof systems that are designed specifically for //! Pedersen commitments can be used on the twisted ElGamal ciphertexts. diff --git a/zk-sdk/src/encryption/grouped_elgamal.rs b/zk-sdk/src/encryption/grouped_elgamal.rs index 920a91f4293943..b786d251973c38 100644 --- a/zk-sdk/src/encryption/grouped_elgamal.rs +++ b/zk-sdk/src/encryption/grouped_elgamal.rs @@ -6,6 +6,7 @@ //! A regular twisted ElGamal ciphertext consists of two components: //! - A Pedersen commitment that encodes a message to be encrypted //! - A "decryption handle" that binds the Pedersen opening to a specific public key +//! //! The ciphertext can be generalized to hold not a single decryption handle, but multiple handles //! pertaining to multiple ElGamal public keys. These ciphertexts are referred to as a "grouped" //! ElGamal ciphertext. diff --git a/zk-sdk/src/encryption/mod.rs b/zk-sdk/src/encryption/mod.rs index 55eda670576712..8cad6217fc4c68 100644 --- a/zk-sdk/src/encryption/mod.rs +++ b/zk-sdk/src/encryption/mod.rs @@ -3,12 +3,12 @@ //! //! The module contains implementations of the following cryptographic objects: //! - Pedersen commitments that uses the prime-order Ristretto representation of Curve25519. -//! [curve25519-dalek](https://docs.rs/curve25519-dalek/latest/curve25519_dalek/ristretto/index.html) -//! is used for the Ristretto group implementation. +//! [curve25519-dalek](https://docs.rs/curve25519-dalek/latest/curve25519_dalek/ristretto/index.html) +//! is used for the Ristretto group implementation. //! - The twisted ElGamal scheme, which converts Pedersen commitments into a public-key encryption -//! scheme. +//! scheme. //! - Basic type-wrapper around the AES-GCM-SIV symmetric authenticated encryption scheme -//! implemented by [aes-gcm-siv](https://docs.rs/aes-gcm-siv/latest/aes_gcm_siv/) crate. +//! implemented by [aes-gcm-siv](https://docs.rs/aes-gcm-siv/latest/aes_gcm_siv/) crate. use crate::{RISTRETTO_POINT_LEN, SCALAR_LEN}; diff --git a/zk-sdk/src/range_proof/mod.rs b/zk-sdk/src/range_proof/mod.rs index 9a4939845ae847..3cae18a8bb0f3c 100644 --- a/zk-sdk/src/range_proof/mod.rs +++ b/zk-sdk/src/range_proof/mod.rs @@ -4,10 +4,10 @@ //! [implementation](https://github.com/dalek-cryptography/bulletproofs). Compared to the original //! implementation by dalek-cryptography: //! - This implementation focuses on the range proof implementation, while the dalek-cryptography -//! crate additionally implements the general bulletproofs implementation for languages that can be -//! represented by arithmetic circuits as well as MPC. +//! crate additionally implements the general bulletproofs implementation for languages that can be +//! represented by arithmetic circuits as well as MPC. //! - This implementation implements a non-interactive range proof aggregation that is specified in -//! the original Bulletproofs [paper](https://eprint.iacr.org/2017/1066) (Section 4.3). +//! the original Bulletproofs [paper](https://eprint.iacr.org/2017/1066) (Section 4.3). //! #![allow(dead_code)] diff --git a/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs b/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs index 9ff9529e4a52e8..910d0a9a3ea556 100644 --- a/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs +++ b/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs @@ -62,7 +62,7 @@ impl CiphertextCiphertextEqualityProof { /// * `first_keypair` - The ElGamal keypair associated with the first ciphertext to be proved /// * `second_pubkey` - The ElGamal pubkey associated with the second ElGamal ciphertext /// * `first_ciphertext` - The first ElGamal ciphertext for which the prover knows a - /// decryption key for + /// decryption key for /// * `second_opening` - The opening (randomness) associated with the second ElGamal ciphertext /// * `amount` - The message associated with the ElGamal ciphertext and Pedersen commitment /// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic diff --git a/zk-sdk/src/sigma_proofs/percentage_with_cap.rs b/zk-sdk/src/sigma_proofs/percentage_with_cap.rs index d53c118c858e53..18a8e1efe5cadb 100644 --- a/zk-sdk/src/sigma_proofs/percentage_with_cap.rs +++ b/zk-sdk/src/sigma_proofs/percentage_with_cap.rs @@ -69,7 +69,7 @@ impl PercentageWithCapProof { /// A typical percentage-with-cap application is defined with respect to the following values: /// - a commitment encoding a `base_amount` and a commitment encoding a `percentage_amount` /// - two constants `percentage_rate_basis_point`, which defines the percentage rate in units - /// of 0.01% and `max_value`, which defines the max cap amount. + /// of 0.01% and `max_value`, which defines the max cap amount. /// /// This setting requires that the `percentage_amount` is either a certain percentage of the /// `base_amount` (determined by the `percentage_rate_basis_point`) or is equal to the max cap diff --git a/zk-sdk/src/sigma_proofs/pubkey_validity.rs b/zk-sdk/src/sigma_proofs/pubkey_validity.rs index 97e6281e913d92..4166a543b77e76 100644 --- a/zk-sdk/src/sigma_proofs/pubkey_validity.rs +++ b/zk-sdk/src/sigma_proofs/pubkey_validity.rs @@ -57,7 +57,7 @@ impl PubkeyValidityProof { /// invertible). /// /// * `elgamal_keypair` = The ElGamal keypair that pertains to the ElGamal public key to be - /// proved + /// proved /// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic pub fn new(elgamal_keypair: &ElGamalKeypair, transcript: &mut Transcript) -> Self { transcript.pubkey_proof_domain_separator(); diff --git a/zk-sdk/src/zk_elgamal_proof_program/instruction.rs b/zk-sdk/src/zk_elgamal_proof_program/instruction.rs index 2723bf1d6c07b7..ecfd207f30f7c5 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/instruction.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/instruction.rs @@ -253,7 +253,7 @@ pub enum ProofInstruction { /// 2. `[]` The proof context account owner /// /// * Otherwise - /// None + /// None /// /// The instruction expects either: /// i. `GroupedCiphertext3HandlesValidityProofData` if proof is provided as instruction data @@ -276,7 +276,7 @@ pub enum ProofInstruction { /// 2. `[]` The proof context account owner /// /// * Otherwise - /// None + /// None /// /// The instruction expects either: /// i. `BatchedGroupedCiphertext3HandlesValidityProofData` if proof is provided as instruction data diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index 130aacef669545..e90b98920abfd4 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -6,6 +6,7 @@ //! A twisted ElGamal ciphertext consists of two components: //! - A Pedersen commitment that encodes a message to be encrypted //! - A "decryption handle" that binds the Pedersen opening to a specific public key +//! //! In contrast to the traditional ElGamal encryption scheme, the twisted ElGamal encodes messages //! directly as a Pedersen commitment. Therefore, proof systems that are designed specifically for //! Pedersen commitments can be used on the twisted ElGamal ciphertexts. diff --git a/zk-token-sdk/src/encryption/grouped_elgamal.rs b/zk-token-sdk/src/encryption/grouped_elgamal.rs index c73e7bf2772ddb..0d894b9f4655fb 100644 --- a/zk-token-sdk/src/encryption/grouped_elgamal.rs +++ b/zk-token-sdk/src/encryption/grouped_elgamal.rs @@ -6,9 +6,9 @@ //! A regular twisted ElGamal ciphertext consists of two components: //! - A Pedersen commitment that encodes a message to be encrypted //! - A "decryption handle" that binds the Pedersen opening to a specific public key -//! The ciphertext can be generalized to hold not a single decryption handle, but multiple handles -//! pertaining to multiple ElGamal public keys. These ciphertexts are referred to as a "grouped" -//! ElGamal ciphertext. +//! The ciphertext can be generalized to hold not a single decryption handle, but multiple handles +//! pertaining to multiple ElGamal public keys. These ciphertexts are referred to as a "grouped" +//! ElGamal ciphertext. //! use { diff --git a/zk-token-sdk/src/encryption/mod.rs b/zk-token-sdk/src/encryption/mod.rs index 7cf53dd0f06167..2eeb6e1c544709 100644 --- a/zk-token-sdk/src/encryption/mod.rs +++ b/zk-token-sdk/src/encryption/mod.rs @@ -3,12 +3,12 @@ //! //! The module contains implementations of the following cryptographic objects: //! - Pedersen commitments that uses the prime-order Ristretto representation of Curve25519. -//! [curve25519-dalek](https://docs.rs/curve25519-dalek/latest/curve25519_dalek/ristretto/index.html) -//! is used for the Ristretto group implementation. +//! [curve25519-dalek](https://docs.rs/curve25519-dalek/latest/curve25519_dalek/ristretto/index.html) +//! is used for the Ristretto group implementation. //! - The twisted ElGamal scheme, which converts Pedersen commitments into a public-key encryption -//! scheme. +//! scheme. //! - Basic type-wrapper around the AES-GCM-SIV symmetric authenticated encryption scheme -//! implemented by [aes-gcm-siv](https://docs.rs/aes-gcm-siv/latest/aes_gcm_siv/) crate. +//! implemented by [aes-gcm-siv](https://docs.rs/aes-gcm-siv/latest/aes_gcm_siv/) crate. pub mod auth_encryption; pub mod discrete_log; diff --git a/zk-token-sdk/src/range_proof/mod.rs b/zk-token-sdk/src/range_proof/mod.rs index d7c7774d469baf..32dac961f507df 100644 --- a/zk-token-sdk/src/range_proof/mod.rs +++ b/zk-token-sdk/src/range_proof/mod.rs @@ -4,10 +4,10 @@ //! [implementation](https://github.com/dalek-cryptography/bulletproofs). Compared to the original //! implementation by dalek-cryptography: //! - This implementation focuses on the range proof implementation, while the dalek-cryptography -//! crate additionally implements the general bulletproofs implementation for languages that can be -//! represented by arithmetic circuits as well as MPC. +//! crate additionally implements the general bulletproofs implementation for languages that can be +//! represented by arithmetic circuits as well as MPC. //! - This implementation implements a non-interactive range proof aggregation that is specified in -//! the original Bulletproofs [paper](https://eprint.iacr.org/2017/1066) (Section 4.3). +//! the original Bulletproofs [paper](https://eprint.iacr.org/2017/1066) (Section 4.3). //! #[cfg(not(target_os = "solana"))] diff --git a/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs b/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs index 70a5de9c4c5efb..565ba9b4360920 100644 --- a/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs @@ -62,7 +62,7 @@ impl CiphertextCiphertextEqualityProof { /// * `source_keypair` - The ElGamal keypair associated with the first ciphertext to be proved /// * `destination_pubkey` - The ElGamal pubkey associated with the second ElGamal ciphertext /// * `source_ciphertext` - The first ElGamal ciphertext for which the prover knows a - /// decryption key for + /// decryption key for /// * `destination_opening` - The opening (randomness) associated with the second ElGamal ciphertext /// * `amount` - The message associated with the ElGamal ciphertext and Pedersen commitment /// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic diff --git a/zk-token-sdk/src/sigma_proofs/fee_proof.rs b/zk-token-sdk/src/sigma_proofs/fee_proof.rs index c3a431768f1226..5cb67553e26276 100644 --- a/zk-token-sdk/src/sigma_proofs/fee_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/fee_proof.rs @@ -68,7 +68,7 @@ impl FeeSigmaProof { /// `fee_amount` must satisfy the relation `transfer_amount * (fee_rate_basis_point / /// 10_000) = fee_amount` or equivalently, `(transfer_amount * fee_rate_basis_point) - (10_000 /// * fee_amount) = 0`. More generally, let `delta_fee = (transfer_amount * - /// fee_rate_basis_point) - (10_000 * fee_amount)`. Then assuming that a division rounding + /// fee_rate_basis_point) - (10_000 * fee_amount)`. Then assuming that a division rounding /// could occur, the `delta_fee` must satisfy the bound `0 <= delta_fee < 10_000`. /// /// If `fee_amount >= max_fee`, then `fee_amount = max_fee` and therefore, the prover can @@ -89,11 +89,11 @@ impl FeeSigmaProof { /// and `create_proof_fee_below_max` to enforce that the function executes in constant time. /// /// * `(fee_amount, fee_commitment, fee_opening)` - The amount, Pedersen commitment, and - /// opening of the transfer fee + /// opening of the transfer fee /// * `(delta_fee, delta_commitment, delta_opening)` - The amount, Pedersen commitment, and - /// opening of the "real" delta amount + /// opening of the "real" delta amount /// * `(claimed_commitment, claimed_opening)` - The Pedersen commitment and opening of the - /// "claimed" delta amount + /// "claimed" delta amount /// * `max_fee` - The maximum fee bound /// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic pub fn new( @@ -226,7 +226,7 @@ impl FeeSigmaProof { /// /// * `fee_commitment` - The Pedersen commitment of the transfer fee /// * `(delta_fee, delta_opening)` - The Pedersen commitment and opening of the "real" delta - /// value + /// value /// * `claimed_opening` - The opening of the Pedersen commitment of the "claimed" delta value /// * `max_fee` - The maximum fee bound /// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic diff --git a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_2.rs b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_2.rs index 1c1a57997e4740..be45e969cf1257 100644 --- a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_2.rs +++ b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_2.rs @@ -66,7 +66,7 @@ impl GroupedCiphertext2HandlesValidityProof { /// handles as input; it only takes the associated Pedersen opening instead. /// /// * `(destination_pubkey, auditor_pubkey)` - The ElGamal public keys associated with the decryption - /// handles + /// handles /// * `amount` - The committed message in the commitment /// * `opening` - The opening associated with the Pedersen commitment /// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic @@ -121,7 +121,7 @@ impl GroupedCiphertext2HandlesValidityProof { /// /// * `commitment` - The Pedersen commitment /// * `(destination_pubkey, auditor_pubkey)` - The ElGamal pubkeys associated with the decryption - /// handles + /// handles /// * `(destination_handle, auditor_handle)` - The decryption handles /// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic pub fn verify( diff --git a/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs b/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs index e0d80f2a528ef8..16f6c3b25d81ef 100644 --- a/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs @@ -57,7 +57,7 @@ impl PubkeyValidityProof { /// invertible). /// /// * `elgamal_keypair` = The ElGamal keypair that pertains to the ElGamal public key to be - /// proved + /// proved /// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic pub fn new(elgamal_keypair: &ElGamalKeypair, transcript: &mut Transcript) -> Self { transcript.pubkey_proof_domain_separator(); diff --git a/zk-token-sdk/src/zk_token_proof_instruction.rs b/zk-token-sdk/src/zk_token_proof_instruction.rs index 48b6ec39c19bf4..afcf4f36a3e211 100644 --- a/zk-token-sdk/src/zk_token_proof_instruction.rs +++ b/zk-token-sdk/src/zk_token_proof_instruction.rs @@ -321,7 +321,7 @@ pub enum ProofInstruction { /// 2. `[]` The proof context account owner /// /// * Otherwise - /// None + /// None /// /// The instruction expects either: /// i. `GroupedCiphertext3HandlesValidityProofData` if proof is provided as instruction data @@ -344,7 +344,7 @@ pub enum ProofInstruction { /// 2. `[]` The proof context account owner /// /// * Otherwise - /// None + /// None /// /// The instruction expects either: /// i. `BatchedGroupedCiphertext3HandlesValidityProofData` if proof is provided as instruction data From dcee6df1772fef9be0af080cb58896727bb55018 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sun, 11 Aug 2024 17:14:45 +0800 Subject: [PATCH 085/529] clippy: arithmetic-side-effects (#2493) * possible arithmetic_side_effects * feedback * fix test expected message * fmt --- sdk/program/src/epoch_rewards.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/sdk/program/src/epoch_rewards.rs b/sdk/program/src/epoch_rewards.rs index 9060f3e208090f..bec4b101fee9fa 100644 --- a/sdk/program/src/epoch_rewards.rs +++ b/sdk/program/src/epoch_rewards.rs @@ -6,7 +6,7 @@ //! //! [`sysvar::epoch_rewards`]: crate::sysvar::epoch_rewards -use {crate::hash::Hash, solana_sdk_macro::CloneZeroed, std::ops::AddAssign}; +use {crate::hash::Hash, solana_sdk_macro::CloneZeroed}; #[repr(C, align(16))] #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -42,9 +42,9 @@ pub struct EpochRewards { impl EpochRewards { pub fn distribute(&mut self, amount: u64) { - assert!(self.distributed_rewards.saturating_add(amount) <= self.total_rewards); - - self.distributed_rewards.add_assign(amount); + let new_distributed_rewards = self.distributed_rewards.saturating_add(amount); + assert!(new_distributed_rewards <= self.total_rewards); + self.distributed_rewards = new_distributed_rewards; } } @@ -86,9 +86,7 @@ mod tests { } #[test] - #[should_panic( - expected = "self.distributed_rewards.saturating_add(amount) <= self.total_rewards" - )] + #[should_panic(expected = "new_distributed_rewards <= self.total_rewards")] fn test_epoch_rewards_distribute_panic() { let mut epoch_rewards = EpochRewards::new(100, 0, 64); epoch_rewards.distribute(200); From 5980c082d794818d5e26c0f161b5e632c50ee8f6 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sun, 11 Aug 2024 17:14:56 +0800 Subject: [PATCH 086/529] fix docs format (#2492) * docs fix: accounts-db * Update accounts-db/src/account_storage.rs Co-authored-by: Brooks * Update accounts-db/src/ancient_append_vecs.rs Co-authored-by: Brooks * remove redundant indent * remove redundant indent --------- Co-authored-by: Brooks --- accounts-db/src/account_storage.rs | 2 ++ accounts-db/src/accounts_db.rs | 2 ++ accounts-db/src/accounts_index.rs | 1 + accounts-db/src/ancient_append_vecs.rs | 6 ++++-- 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/account_storage.rs b/accounts-db/src/account_storage.rs index 738d7d958e9ce2..da25092a59be09 100644 --- a/accounts-db/src/account_storage.rs +++ b/accounts-db/src/account_storage.rs @@ -43,8 +43,10 @@ impl AccountStorage { /// Callers to this function have 2 choices: /// 1. hold the account index read lock for the pubkey so that the account index entry cannot be changed prior to or during this call. (scans do this) /// 2. expect to be ready to start over and read the index again if this function returns None + /// /// Operations like shrinking or write cache flushing may have updated the index between when the caller read the index and called this function to /// load from the append vec specified in the index. + /// /// In practice, this fn will return the entry from the map in the very first lookup unless a shrink is in progress. /// The third lookup will only be called if a requesting thread exactly interposes itself between the 2 map manipulations in the drop of 'shrink_in_progress'. pub(crate) fn get_account_storage_entry( diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2a9b0c8b1c7331..1474b421f4acfa 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -325,6 +325,7 @@ pub enum StoreReclaims { /// 3. use it (slot, append_vec, etc.) /// 4. re-create it sometimes /// 5. goto 3 +/// /// If a caller uses it before initializing it, it will be a runtime unwrap() error, similar to an assert. /// That condition is an illegal use pattern and is justifiably an assertable condition. #[derive(Default)] @@ -9176,6 +9177,7 @@ impl AccountsDb { /// 1. get the _duplicate_ accounts data len from the given pubkeys /// 2. get the slots that contained duplicate pubkeys /// 3. update rent stats + /// /// Note this should only be used when ALL entries in the accounts index are roots. /// returns (data len sum of all older duplicates, slots that contained duplicate pubkeys) fn visit_duplicate_pubkeys_during_startup( diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index dc10df29d67e35..3b4ddbe3927e5d 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -376,6 +376,7 @@ impl PreAllocatedAccountMapEntry { /// create an entry that is equivalent to this process: /// 1. new empty (refcount=0, slot_list={}) /// 2. update(slot, account_info) + /// /// This code is called when the first entry [ie. (slot,account_info)] for a pubkey is inserted into the index. pub fn new + Into>( slot: Slot, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index ffe025e7474a39..095c7dd21881eb 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -2,6 +2,7 @@ //! an ancient append vec is: //! 1. a slot that is older than an epoch old //! 2. multiple 'slots' squashed into a single older (ie. ancient) slot for convenience and performance +//! //! Otherwise, an ancient append vec is the same as any other append vec use { crate::{ @@ -717,8 +718,9 @@ impl AccountsDb { /// given all accounts per ancient slot, in slots that we want to combine together: /// 1. Look up each pubkey in the index /// 2. separate, by slot, into: - /// 2a. pubkeys with refcount = 1. This means this pubkey exists NOWHERE else in accounts db. - /// 2b. pubkeys with refcount > 1 + /// 2a. pubkeys with refcount = 1. This means this pubkey exists NOWHERE else in accounts db. + /// 2b. pubkeys with refcount > 1 + /// /// Note that the return value can contain fewer items than 'accounts_per_storage' if we find storages which won't be affected. /// 'accounts_per_storage' should be sorted by slot fn calc_accounts_to_combine<'a>( From c86dba1cda651b7df47195e9dd3b78e217fcf712 Mon Sep 17 00:00:00 2001 From: Brooks Date: Sun, 11 Aug 2024 06:41:41 -0400 Subject: [PATCH 087/529] hash-cache-tool: Adds cli arg for number of bins when diffing state (#2540) --- .../accounts-hash-cache-tool/src/main.rs | 30 +++++++++++++++---- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 3561d9d3d4c1d6..6cd525b97b2673 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -32,6 +32,8 @@ const CMD_DIFF_FILES: &str = "files"; const CMD_DIFF_DIRS: &str = "directories"; const CMD_DIFF_STATE: &str = "state"; +const DEFAULT_BINS: &str = "8192"; + fn main() { let matches = App::new(crate_name!()) .about(crate_description!()) @@ -127,6 +129,20 @@ fn main() { .takes_value(true) .value_name("PATH2") .help("Accounts hash cache directory 2 to diff"), + ) + .arg( + Arg::with_name("bins") + .long("bins") + .takes_value(true) + .value_name("NUM") + .default_value(DEFAULT_BINS) + .help("Sets the number of bins to split the entries into") + .long_help( + "Sets the number of bins to split the entries into. \ + The binning is based on each entry's pubkey. \ + Must be a power of two, greater than 0, \ + and less-than-or-equal-to 16,777,216 (2^24)" + ), ), ), ) @@ -193,7 +209,8 @@ fn cmd_diff_state( ) -> Result<(), String> { let path1 = value_t_or_exit!(subcommand_matches, "path1", String); let path2 = value_t_or_exit!(subcommand_matches, "path2", String); - do_diff_state(path1, path2) + let num_bins = value_t_or_exit!(subcommand_matches, "bins", usize); + do_diff_state(path1, path2, num_bins) } fn do_inspect(file: impl AsRef, force: bool) -> Result<(), String> { @@ -459,15 +476,18 @@ fn do_diff_dirs( Ok(()) } -fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), String> { - const NUM_BINS: usize = 8192; +fn do_diff_state( + dir1: impl AsRef, + dir2: impl AsRef, + num_bins: usize, +) -> Result<(), String> { let extract = |dir: &Path| -> Result<_, String> { let files = get_cache_files_in(dir).map_err(|err| format!("failed to get cache files: {err}"))?; let BinnedLatestEntriesInfo { latest_entries, capitalization, - } = extract_binned_latest_entries_in(files.iter().map(|file| &file.path), NUM_BINS) + } = extract_binned_latest_entries_in(files.iter().map(|file| &file.path), num_bins) .map_err(|err| format!("failed to extract entries: {err}"))?; let num_accounts: usize = latest_entries.iter().map(|bin| bin.len()).sum(); let entries = Vec::from(latest_entries); @@ -486,7 +506,7 @@ fn do_diff_state(dir1: impl AsRef, dir2: impl AsRef) -> Result<(), S drop(timer); let timer = LoggingTimer::new("Diffing state"); - let (mut mismatch_entries, mut unique_entries1) = (0..NUM_BINS) + let (mut mismatch_entries, mut unique_entries1) = (0..num_bins) .into_par_iter() .map(|bindex| { let mut bin1 = state1[bindex].write().unwrap(); From d34b0b3feec880136c3a8b9706ba8a4df39753d8 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 12 Aug 2024 10:39:35 +0800 Subject: [PATCH 088/529] fix docs format (#2542) fix docs format for replay_stage.rs --- core/src/replay_stage.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index d128289cb7bb93..cca977768c0436 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1999,6 +1999,7 @@ impl ReplayStage { /// - Bank forks already contains a bank for this leader slot /// - We have not landed a vote yet and the `wait_for_vote_to_start_leader` flag is set /// - We have failed the propagated check + /// /// Returns whether a new working bank was created and inserted into bank forks. #[allow(clippy::too_many_arguments)] fn maybe_start_leader( From a99c916b36bcee8ca2f8568b14db3d42ff79edb1 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 12 Aug 2024 10:39:46 +0800 Subject: [PATCH 089/529] clippy: manual_inspect (#2541) map => insepct --- core/src/banking_stage/latest_unprocessed_votes.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index e7aaf7d561e6d4..52b520b8a0322b 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -354,9 +354,8 @@ impl LatestUnprocessedVotes { if !Self::is_valid_for_our_fork(&latest_vote, &slot_hashes) { return None; } - latest_vote.take_vote().map(|vote| { + latest_vote.take_vote().inspect(|_vote| { self.num_unprocessed_votes.fetch_sub(1, Ordering::Relaxed); - vote }) }) }) From 2f193266e094fbcb7504e28572c6e06f8871dd26 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 12 Aug 2024 09:11:30 -0500 Subject: [PATCH 090/529] AccountLocks: validate_account_locks (#2448) --- accounts-db/src/account_locks.rs | 142 +++++++++++++++++++++++- accounts-db/src/accounts.rs | 13 +-- sdk/program/src/message/account_keys.rs | 5 + 3 files changed, 149 insertions(+), 11 deletions(-) diff --git a/accounts-db/src/account_locks.rs b/accounts-db/src/account_locks.rs index dcd512e7d72984..fc2fba7292b10e 100644 --- a/accounts-db/src/account_locks.rs +++ b/accounts-db/src/account_locks.rs @@ -2,8 +2,12 @@ use qualifier_attr::qualifiers; use { ahash::{AHashMap, AHashSet}, - solana_sdk::{pubkey::Pubkey, transaction::TransactionError}, - std::collections::hash_map, + solana_sdk::{ + message::AccountKeys, + pubkey::Pubkey, + transaction::{TransactionError, MAX_TX_ACCOUNT_LOCKS}, + }, + std::{cell::RefCell, collections::hash_map}, }; #[derive(Debug, Default)] @@ -110,9 +114,51 @@ impl AccountLocks { } } +/// Validate account locks before locking. +pub fn validate_account_locks( + account_keys: AccountKeys, + tx_account_lock_limit: usize, +) -> Result<(), TransactionError> { + if account_keys.len() > tx_account_lock_limit { + Err(TransactionError::TooManyAccountLocks) + } else if has_duplicates(account_keys) { + Err(TransactionError::AccountLoadedTwice) + } else { + Ok(()) + } +} + +thread_local! { + static HAS_DUPLICATES_SET: RefCell> = RefCell::new(AHashSet::with_capacity(MAX_TX_ACCOUNT_LOCKS)); +} + +/// Check for duplicate account keys. +fn has_duplicates(account_keys: AccountKeys) -> bool { + // Benchmarking has shown that for sets of 32 or more keys, it is faster to + // use a HashSet to check for duplicates. + // For smaller sets a brute-force O(n^2) check seems to be faster. + const USE_ACCOUNT_LOCK_SET_SIZE: usize = 32; + if account_keys.len() >= USE_ACCOUNT_LOCK_SET_SIZE { + HAS_DUPLICATES_SET.with_borrow_mut(|set| { + let has_duplicates = account_keys.iter().any(|key| !set.insert(*key)); + set.clear(); + has_duplicates + }) + } else { + for (idx, key) in account_keys.iter().enumerate() { + for jdx in idx + 1..account_keys.len() { + if key == &account_keys[jdx] { + return true; + } + } + } + false + } +} + #[cfg(test)] mod tests { - use super::*; + use {super::*, solana_sdk::message::v0::LoadedAddresses}; #[test] fn test_account_locks() { @@ -152,4 +198,94 @@ mod tests { account_locks.unlock_accounts([(&key2, false)].into_iter()); assert!(!account_locks.is_locked_readonly(&key2)); } + + #[test] + fn test_validate_account_locks_valid_no_dynamic() { + let static_keys = &[Pubkey::new_unique(), Pubkey::new_unique()]; + let account_keys = AccountKeys::new(static_keys, None); + assert!(validate_account_locks(account_keys, MAX_TX_ACCOUNT_LOCKS).is_ok()); + } + + #[test] + fn test_validate_account_locks_too_many_no_dynamic() { + let static_keys = &[Pubkey::new_unique(), Pubkey::new_unique()]; + let account_keys = AccountKeys::new(static_keys, None); + assert_eq!( + validate_account_locks(account_keys, 1), + Err(TransactionError::TooManyAccountLocks) + ); + } + + #[test] + fn test_validate_account_locks_duplicate_no_dynamic() { + let duplicate_key = Pubkey::new_unique(); + let static_keys = &[duplicate_key, Pubkey::new_unique(), duplicate_key]; + let account_keys = AccountKeys::new(static_keys, None); + assert_eq!( + validate_account_locks(account_keys, MAX_TX_ACCOUNT_LOCKS), + Err(TransactionError::AccountLoadedTwice) + ); + } + + #[test] + fn test_validate_account_locks_valid_dynamic() { + let static_keys = &[Pubkey::new_unique(), Pubkey::new_unique()]; + let dynamic_keys = LoadedAddresses { + writable: vec![Pubkey::new_unique()], + readonly: vec![Pubkey::new_unique()], + }; + let account_keys = AccountKeys::new(static_keys, Some(&dynamic_keys)); + assert!(validate_account_locks(account_keys, MAX_TX_ACCOUNT_LOCKS).is_ok()); + } + + #[test] + fn test_validate_account_locks_too_many_dynamic() { + let static_keys = &[Pubkey::new_unique()]; + let dynamic_keys = LoadedAddresses { + writable: vec![Pubkey::new_unique()], + readonly: vec![Pubkey::new_unique()], + }; + let account_keys = AccountKeys::new(static_keys, Some(&dynamic_keys)); + assert_eq!( + validate_account_locks(account_keys, 2), + Err(TransactionError::TooManyAccountLocks) + ); + } + + #[test] + fn test_validate_account_locks_duplicate_dynamic() { + let duplicate_key = Pubkey::new_unique(); + let static_keys = &[duplicate_key]; + let dynamic_keys = LoadedAddresses { + writable: vec![Pubkey::new_unique()], + readonly: vec![duplicate_key], + }; + let account_keys = AccountKeys::new(static_keys, Some(&dynamic_keys)); + assert_eq!( + validate_account_locks(account_keys, MAX_TX_ACCOUNT_LOCKS), + Err(TransactionError::AccountLoadedTwice) + ); + } + + #[test] + fn test_has_duplicates_small() { + let mut keys = (0..16).map(|_| Pubkey::new_unique()).collect::>(); + let account_keys = AccountKeys::new(&keys, None); + assert!(!has_duplicates(account_keys)); + + keys[14] = keys[3]; // Duplicate key + let account_keys = AccountKeys::new(&keys, None); + assert!(has_duplicates(account_keys)); + } + + #[test] + fn test_has_duplicates_large() { + let mut keys = (0..64).map(|_| Pubkey::new_unique()).collect::>(); + let account_keys = AccountKeys::new(&keys, None); + assert!(!has_duplicates(account_keys)); + + keys[47] = keys[3]; // Duplicate key + let account_keys = AccountKeys::new(&keys, None); + assert!(has_duplicates(account_keys)); + } } diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index d781bb45e11b8a..9033ceea6e6da5 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -1,6 +1,6 @@ use { crate::{ - account_locks::AccountLocks, + account_locks::{validate_account_locks, AccountLocks}, accounts_db::{ AccountStorageEntry, AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, ScanAccountStorageData, ScanStorageResult, VerifyAccountsHashAndLamportsConfig, @@ -520,7 +520,7 @@ impl Accounts { // Validate the account locks, then get iterator if successful validation. let tx_account_locks_results: Vec> = txs .map(|tx| { - SanitizedTransaction::validate_account_locks(tx.message(), tx_account_lock_limit) + validate_account_locks(tx.account_keys(), tx_account_lock_limit) .map(|_| TransactionAccountLocksIterator::new(tx)) }) .collect(); @@ -530,7 +530,7 @@ impl Accounts { #[must_use] pub fn lock_accounts_with_results<'a>( &self, - txs: impl Iterator, + txs: impl Iterator, results: impl Iterator>, tx_account_lock_limit: usize, ) -> Vec> { @@ -538,11 +538,8 @@ impl Accounts { let tx_account_locks_results: Vec> = txs .zip(results) .map(|(tx, result)| match result { - Ok(()) => SanitizedTransaction::validate_account_locks( - tx.message(), - tx_account_lock_limit, - ) - .map(|_| TransactionAccountLocksIterator::new(tx)), + Ok(()) => validate_account_locks(tx.account_keys(), tx_account_lock_limit) + .map(|_| TransactionAccountLocksIterator::new(tx)), Err(err) => Err(err), }) .collect(); diff --git a/sdk/program/src/message/account_keys.rs b/sdk/program/src/message/account_keys.rs index e7bb569d03643b..e7dbd061cede76 100644 --- a/sdk/program/src/message/account_keys.rs +++ b/sdk/program/src/message/account_keys.rs @@ -17,6 +17,7 @@ pub struct AccountKeys<'a> { impl Index for AccountKeys<'_> { type Output = Pubkey; + #[inline] fn index(&self, index: usize) -> &Self::Output { self.get(index).expect("index is invalid") } @@ -33,6 +34,7 @@ impl<'a> AccountKeys<'a> { /// Returns an iterator of account key segments. The ordering of segments /// affects how account indexes from compiled instructions are resolved and /// so should not be changed. + #[inline] fn key_segment_iter(&self) -> impl Iterator + Clone { if let Some(dynamic_keys) = self.dynamic_keys { [ @@ -51,6 +53,7 @@ impl<'a> AccountKeys<'a> { /// message account keys constructed from static keys, followed by dynamically /// loaded writable addresses, and lastly the list of dynamically loaded /// readonly addresses. + #[inline] pub fn get(&self, mut index: usize) -> Option<&'a Pubkey> { for key_segment in self.key_segment_iter() { if index < key_segment.len() { @@ -63,6 +66,7 @@ impl<'a> AccountKeys<'a> { } /// Returns the total length of loaded accounts for a message + #[inline] pub fn len(&self) -> usize { let mut len = 0usize; for key_segment in self.key_segment_iter() { @@ -77,6 +81,7 @@ impl<'a> AccountKeys<'a> { } /// Iterator for the addresses of the loaded accounts for a message + #[inline] pub fn iter(&self) -> impl Iterator + Clone { self.key_segment_iter().flatten() } From 8223dfc1132869187e45df338b884655d50fd678 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 12 Aug 2024 22:17:23 +0800 Subject: [PATCH 091/529] fix: dependency_on_unit_never_type_fallback (#2551) --- client/src/send_and_confirm_transactions_in_parallel.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/src/send_and_confirm_transactions_in_parallel.rs b/client/src/send_and_confirm_transactions_in_parallel.rs index aa65ef42a9d93e..571e3566a4782a 100644 --- a/client/src/send_and_confirm_transactions_in_parallel.rs +++ b/client/src/send_and_confirm_transactions_in_parallel.rs @@ -309,7 +309,10 @@ async fn sign_all_messages_and_send( }); } // collect to convert Vec> to Result> - join_all(futures).await.into_iter().collect::>()?; + join_all(futures) + .await + .into_iter() + .collect::>>()?; Ok(()) } From 23529db19def247c047da3006e95b5a3507036c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 22:17:51 +0800 Subject: [PATCH 092/529] build(deps): bump serde_json from 1.0.122 to 1.0.124 (#2548) * build(deps): bump serde_json from 1.0.122 to 1.0.124 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.122 to 1.0.124. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.122...v1.0.124) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4bd42822c86cf6..472d0d59806ccc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5045,9 +5045,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.122" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" dependencies = [ "itoa", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 5cc529ea29ab86..2e4e857533db0d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -322,7 +322,7 @@ seqlock = "0.2.0" serde = "1.0.205" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" serde_derive = "1.0.205" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 -serde_json = "1.0.122" +serde_json = "1.0.124" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b340cf6da24faa..c08a7d45d0302b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4199,9 +4199,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.122" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" dependencies = [ "itoa", "memchr", From f46d3408b255764ff5e413bb27714c1cea630614 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 22:18:15 +0800 Subject: [PATCH 093/529] build(deps): bump syn from 2.0.72 to 2.0.74 (#2549) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.72 to 2.0.74. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.72...2.0.74) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 68 +++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 472d0d59806ccc..6702d3c94e08cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -722,7 +722,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -875,7 +875,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1032,7 +1032,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "syn_derive", ] @@ -1164,7 +1164,7 @@ checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1781,7 +1781,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1792,7 +1792,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1854,7 +1854,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1978,7 +1978,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -2084,7 +2084,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -2354,7 +2354,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -3642,7 +3642,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -3715,7 +3715,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -4340,7 +4340,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -5040,7 +5040,7 @@ checksum = "692d6f5ac90220161d6774db30c662202721e64aed9058d2c394f451261420c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -5095,7 +5095,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -5145,7 +5145,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -6368,7 +6368,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -6791,7 +6791,7 @@ version = "2.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "toml 0.8.12", ] @@ -7426,7 +7426,7 @@ dependencies = [ "bs58", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -8264,7 +8264,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -8276,7 +8276,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.72", + "syn 2.0.74", "thiserror", ] @@ -8335,7 +8335,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -8523,9 +8523,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.72" +version = "2.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" dependencies = [ "proc-macro2", "quote", @@ -8541,7 +8541,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -8727,7 +8727,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -8739,7 +8739,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "test-case-core", ] @@ -8775,7 +8775,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -8912,7 +8912,7 @@ source = "git+https://github.com/anza-xyz/solana-tokio.git?rev=7cf47705faacf7bf0 dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -9156,7 +9156,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -9465,7 +9465,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "wasm-bindgen-shared", ] @@ -9499,7 +9499,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9849,7 +9849,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -9869,7 +9869,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] From e8ddd9ca2933ba56766545646eff190fd3f21088 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 12 Aug 2024 09:56:30 -0500 Subject: [PATCH 094/529] TransactionView: AddressTableLookupMeta (#2479) --- .../src/address_table_lookup_meta.rs | 201 ++++++++++++++++++ transaction-view/src/lib.rs | 2 + 2 files changed, 203 insertions(+) create mode 100644 transaction-view/src/address_table_lookup_meta.rs diff --git a/transaction-view/src/address_table_lookup_meta.rs b/transaction-view/src/address_table_lookup_meta.rs new file mode 100644 index 00000000000000..4f92547ef2ef7c --- /dev/null +++ b/transaction-view/src/address_table_lookup_meta.rs @@ -0,0 +1,201 @@ +use { + crate::{ + bytes::{ + advance_offset_for_array, advance_offset_for_type, check_remaining, + optimized_read_compressed_u16, read_byte, + }, + result::Result, + }, + solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Signature}, +}; + +// Each ATL has at least a Pubkey, one byte for the number of write indexes, +// and one byte for the number of read indexes. Additionally, for validity +// the ATL must have at least one write or read index giving a minimum size +// of 35 bytes. +const MIN_SIZED_ATL: usize = { + core::mem::size_of::() // account key + + 1 // writable indexes length + + 1 // readonly indexes length + + 1 // single account (either write or read) +}; + +// A valid packet with ATLs has: +// 1. At least 1 signature +// 2. 1 message prefix byte +// 3. 3 bytes for the message header +// 4. 1 static account key +// 5. 1 recent blockhash +// 6. 1 byte for the number of instructions (0) +// 7. 1 byte for the number of ATLS +const MIN_SIZED_PACKET_WITH_ATLS: usize = { + 1 // signatures count + + core::mem::size_of::() // signature + + 1 // message prefix + + 3 // message header + + 1 // static account keys count + + core::mem::size_of::() // static account key + + core::mem::size_of::() // recent blockhash + + 1 // number of instructions + + 1 // number of ATLS +}; + +/// The maximum number of ATLS that can fit in a valid packet. +const MAX_ATLS_PER_PACKET: usize = (PACKET_DATA_SIZE - MIN_SIZED_PACKET_WITH_ATLS) / MIN_SIZED_ATL; + +/// Contains metadata about the address table lookups in a transaction packet. +pub struct AddressTableLookupMeta { + /// The number of address table lookups in the transaction. + pub(crate) num_address_table_lookup: u8, + /// The offset to the first address table lookup in the transaction. + pub(crate) offset: u16, +} + +impl AddressTableLookupMeta { + /// Get the number of address table lookups (ATL) and offset to the first. + /// The offset will be updated to point to the first byte after the last + /// ATL. + /// This function will parse each ATL to ensure the data is well-formed, + /// but will not cache data related to these ATLs. + pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { + // Maximum number of ATLs should be represented by a single byte, + // thus the MSB should not be set. + const _: () = assert!(MAX_ATLS_PER_PACKET & 0b1000_0000 == 0); + let num_address_table_lookups = read_byte(bytes, offset)?; + + // Check that the remaining bytes are enough to hold the ATLs. + check_remaining( + bytes, + *offset, + MIN_SIZED_ATL.wrapping_mul(usize::from(num_address_table_lookups)), + )?; + + // We know the offset does not exceed packet length, and our packet + // length is less than u16::MAX, so we can safely cast to u16. + let address_table_lookups_offset = *offset as u16; + + // The ATLs do not have a fixed size. So we must iterate over + // each ATL to find the total size of the ATLs in the packet, + // and check for any malformed ATLs or buffer overflows. + for _index in 0..num_address_table_lookups { + // Each ATL has 3 pieces: + // 1. Address (Pubkey) + // 2. write indexes ([u8]) + // 3. read indexes ([u8]) + + // Advance offset for address of the lookup table. + advance_offset_for_type::(bytes, offset)?; + + // Read the number of write indexes, and then update the offset. + let num_accounts = optimized_read_compressed_u16(bytes, offset)?; + advance_offset_for_array::(bytes, offset, num_accounts)?; + + // Read the number of read indexes, and then update the offset. + let data_len = optimized_read_compressed_u16(bytes, offset)?; + advance_offset_for_array::(bytes, offset, data_len)? + } + + Ok(Self { + num_address_table_lookup: num_address_table_lookups, + offset: address_table_lookups_offset, + }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{message::v0::MessageAddressTableLookup, short_vec::ShortVec}, + }; + + #[test] + fn test_zero_atls() { + let bytes = bincode::serialize(&ShortVec::(vec![])).unwrap(); + let mut offset = 0; + let meta = AddressTableLookupMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(meta.num_address_table_lookup, 0); + assert_eq!(meta.offset, 1); + assert_eq!(offset, bytes.len()); + } + + #[test] + fn test_length_too_high() { + let mut bytes = bincode::serialize(&ShortVec::(vec![])).unwrap(); + let mut offset = 0; + // modify the number of atls to be too high + bytes[0] = 5; + assert!(AddressTableLookupMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_single_atl() { + let bytes = bincode::serialize(&ShortVec::(vec![ + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![4, 5, 6], + }, + ])) + .unwrap(); + let mut offset = 0; + let meta = AddressTableLookupMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(meta.num_address_table_lookup, 1); + assert_eq!(meta.offset, 1); + assert_eq!(offset, bytes.len()); + } + + #[test] + fn test_multiple_atls() { + let bytes = bincode::serialize(&ShortVec::(vec![ + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![4, 5, 6], + }, + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![4, 5, 6], + }, + ])) + .unwrap(); + let mut offset = 0; + let meta = AddressTableLookupMeta::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(meta.num_address_table_lookup, 2); + assert_eq!(meta.offset, 1); + assert_eq!(offset, bytes.len()); + } + + #[test] + fn test_invalid_writable_indexes_vec() { + let mut bytes = bincode::serialize(&ShortVec(vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![4, 5, 6], + }])) + .unwrap(); + + // modify the number of accounts to be too high + bytes[33] = 127; + + let mut offset = 0; + assert!(AddressTableLookupMeta::try_new(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_invalid_readonly_indexes_vec() { + let mut bytes = bincode::serialize(&ShortVec(vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![4, 5, 6], + }])) + .unwrap(); + + // modify the number of accounts to be too high + bytes[37] = 127; + + let mut offset = 0; + assert!(AddressTableLookupMeta::try_new(&bytes, &mut offset).is_err()); + } +} diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index 145b4df16ed06c..a9ed7eb9b8f17c 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -6,6 +6,8 @@ pub mod bytes; #[allow(dead_code)] mod bytes; +#[allow(dead_code)] +mod address_table_lookup_meta; #[allow(dead_code)] mod instructions_meta; #[allow(dead_code)] From ea10d2e5347d12b7966529529004e7a34a43e215 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 12 Aug 2024 18:58:23 +0000 Subject: [PATCH 095/529] rolls out chained Merkle shreds to ~21% of testnet slots (#2503) --- turbine/src/broadcast_stage/standard_broadcast_run.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 0ddbe1020f5f98..4bcdebf27ec066 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -511,8 +511,8 @@ fn should_chain_merkle_shreds(slot: Slot, cluster_type: ClusterType) -> bool { ClusterType::Development => true, ClusterType::Devnet => false, ClusterType::MainnetBeta => false, - // Roll out chained Merkle shreds to ~5% of testnet. - ClusterType::Testnet => slot % 19 == 1, + // Roll out chained Merkle shreds to ~21% of testnet. + ClusterType::Testnet => slot % 19 < 4, } } From 71c8433cd550a3bb76794834fdbec0e13f23fee1 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 13 Aug 2024 07:06:33 +0800 Subject: [PATCH 096/529] refactor: committed tx may not have been executed (#2525) --- core/src/banking_stage/committer.rs | 2 +- ledger-tool/src/main.rs | 6 +-- ledger/src/blockstore_processor.rs | 23 ++++------ programs/sbf/tests/programs.rs | 21 ++++----- rpc/src/transaction_status_service.rs | 34 +++++--------- runtime/src/bank.rs | 59 ++++++++++++++----------- runtime/src/bank/bank_hash_details.rs | 31 ++++++++++++- runtime/src/bank/tests.rs | 11 +---- svm/src/transaction_commit_result.rs | 24 +++++----- svm/src/transaction_execution_result.rs | 9 +++- 10 files changed, 113 insertions(+), 107 deletions(-) diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index 7beb06a4d2de39..4c5bf8bbf5382b 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -101,7 +101,7 @@ impl Committer { // transaction committed to block. qos_service uses these information to adjust // reserved block space. Ok(committed_tx) => CommitTransactionDetails::Committed { - compute_units: committed_tx.execution_details.executed_units, + compute_units: committed_tx.executed_units, loaded_accounts_data_size: committed_tx .loaded_account_stats .loaded_accounts_data_size, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 60154480f4dd3e..06f17a55e03a2a 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -706,16 +706,14 @@ fn record_transactions( .collect(); let is_simple_vote_tx = tx.is_simple_vote_transaction(); - let execution_results = commit_result - .ok() - .map(|committed_tx| committed_tx.execution_details); + let commit_details = commit_result.ok().map(|committed_tx| committed_tx.into()); TransactionDetails { signature: tx.signature().to_string(), accounts, instructions, is_simple_vote_tx, - execution_results, + commit_details, index, } }) diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 9bd863908d98dd..98a7f1c72d46c3 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -238,7 +238,7 @@ fn check_block_cost_limits( if let Ok(committed_tx) = commit_result { Some(CostModel::calculate_cost_for_executed_transaction( tx, - committed_tx.execution_details.executed_units, + committed_tx.executed_units, committed_tx.loaded_account_stats.loaded_accounts_data_size, &bank.feature_set, )) @@ -2243,9 +2243,7 @@ pub mod tests { }, solana_svm::{ transaction_commit_result::CommittedTransaction, - transaction_execution_result::{ - TransactionExecutionDetails, TransactionLoadedAccountsStats, - }, + transaction_execution_result::TransactionLoadedAccountsStats, transaction_processor::ExecutionRecordingConfig, }, solana_vote::vote_account::VoteAccount, @@ -5077,20 +5075,17 @@ pub mod tests { let txs = vec![tx.clone(), tx]; let commit_results = vec![ Ok(CommittedTransaction { + status: Ok(()), + log_messages: None, + inner_instructions: None, + return_data: None, + executed_units: actual_execution_cu, + fee_details: FeeDetails::default(), + rent_debits: RentDebits::default(), loaded_account_stats: TransactionLoadedAccountsStats { loaded_accounts_data_size: actual_loaded_accounts_data_size, loaded_accounts_count: 2, }, - execution_details: TransactionExecutionDetails { - status: Ok(()), - log_messages: None, - inner_instructions: None, - return_data: None, - executed_units: actual_execution_cu, - accounts_data_len_delta: 0, - }, - fee_details: FeeDetails::default(), - rent_debits: RentDebits::default(), }), Err(TransactionError::AccountNotFound), ]; diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index f9afcf03013566..2290c5e5cc1c19 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -63,7 +63,7 @@ use { }, solana_svm::{ transaction_commit_result::CommittedTransaction, - transaction_execution_result::{InnerInstruction, TransactionExecutionDetails}, + transaction_execution_result::InnerInstruction, transaction_processor::ExecutionRecordingConfig, }, solana_timings::ExecuteTimings, @@ -107,12 +107,12 @@ fn process_transaction_and_record_inner( None, ) .0; - let TransactionExecutionDetails { + let CommittedTransaction { inner_instructions, log_messages, status, .. - } = commit_results.swap_remove(0).unwrap().execution_details; + } = commit_results.swap_remove(0).unwrap(); let inner_instructions = inner_instructions.expect("cpi recording should be enabled"); let log_messages = log_messages.expect("log recording should be enabled"); (status, inner_instructions, log_messages) @@ -163,16 +163,12 @@ fn execute_transactions( )| { commit_result.map(|committed_tx| { let CommittedTransaction { + status, + log_messages, + inner_instructions, + return_data, + executed_units, fee_details, - execution_details: - TransactionExecutionDetails { - status, - log_messages, - inner_instructions, - return_data, - executed_units, - .. - }, .. } = committed_tx; @@ -5244,7 +5240,6 @@ fn test_function_call_args() { let return_data = &result[0] .as_ref() .unwrap() - .execution_details .return_data .as_ref() .unwrap() diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 96e2d88a13c550..43ce83c0966de6 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -6,10 +6,7 @@ use { blockstore::Blockstore, blockstore_processor::{TransactionStatusBatch, TransactionStatusMessage}, }, - solana_svm::{ - transaction_commit_result::CommittedTransaction, - transaction_execution_result::TransactionExecutionDetails, - }, + solana_svm::transaction_commit_result::CommittedTransaction, solana_transaction_status::{ extract_and_fmt_memos, map_inner_instructions, Reward, TransactionStatusMeta, }, @@ -98,15 +95,11 @@ impl TransactionStatusService { }; let CommittedTransaction { - execution_details: - TransactionExecutionDetails { - status, - log_messages, - inner_instructions, - return_data, - executed_units, - .. - }, + status, + log_messages, + inner_instructions, + return_data, + executed_units, fee_details, rent_debits, .. @@ -332,17 +325,14 @@ pub(crate) mod tests { rent_debits.insert(&pubkey, 123, 456); let commit_result = Ok(CommittedTransaction { - loaded_account_stats: TransactionLoadedAccountsStats::default(), - execution_details: TransactionExecutionDetails { - status: Ok(()), - log_messages: None, - inner_instructions: None, - return_data: None, - executed_units: 0, - accounts_data_len_delta: 0, - }, + status: Ok(()), + log_messages: None, + inner_instructions: None, + return_data: None, + executed_units: 0, fee_details: FeeDetails::default(), rent_debits, + loaded_account_stats: TransactionLoadedAccountsStats::default(), }); let balances = TransactionBalancesSet { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e269272e787b21..4fab9ce7405d5f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -149,7 +149,7 @@ use { stake_state::StakeStateV2, }, solana_svm::{ - account_loader::collect_rent_from_account, + account_loader::{collect_rent_from_account, LoadedTransaction}, account_overrides::AccountOverrides, account_saver::collect_accounts_to_store, transaction_commit_result::{CommittedTransaction, TransactionCommitResult}, @@ -3859,29 +3859,37 @@ impl Bank { ) -> Vec { processing_results .into_iter() - .map(|processing_result| match processing_result { - Ok(processed_tx) => { - let loaded_tx = &processed_tx.loaded_transaction; - let loaded_account_stats = TransactionLoadedAccountsStats { - loaded_accounts_data_size: loaded_tx.loaded_accounts_data_size, - loaded_accounts_count: loaded_tx.accounts.len(), - }; - - // Rent is only collected for successfully executed transactions - let rent_debits = if processed_tx.was_successful() { - processed_tx.loaded_transaction.rent_debits - } else { - RentDebits::default() - }; + .map(|processing_result| { + let processed_tx = processing_result?; + let execution_details = processed_tx.execution_details; + let LoadedTransaction { + rent_debits, + accounts: loaded_accounts, + loaded_accounts_data_size, + fee_details, + .. + } = processed_tx.loaded_transaction; + + // Rent is only collected for successfully executed transactions + let rent_debits = if execution_details.was_successful() { + rent_debits + } else { + RentDebits::default() + }; - Ok(CommittedTransaction { - loaded_account_stats, - execution_details: processed_tx.execution_details, - fee_details: processed_tx.loaded_transaction.fee_details, - rent_debits, - }) - } - Err(err) => Err(err), + Ok(CommittedTransaction { + status: execution_details.status, + log_messages: execution_details.log_messages, + inner_instructions: execution_details.inner_instructions, + return_data: execution_details.return_data, + executed_units: execution_details.executed_units, + fee_details, + rent_debits, + loaded_account_stats: TransactionLoadedAccountsStats { + loaded_accounts_count: loaded_accounts.len(), + loaded_accounts_data_size, + }, + }) }) .collect() } @@ -4615,7 +4623,7 @@ impl Bank { pub fn process_transaction_with_metadata( &self, tx: impl Into, - ) -> Result { + ) -> Result { let txs = vec![tx.into()]; let batch = self.prepare_entry_batch(txs)?; @@ -4632,8 +4640,7 @@ impl Bank { Some(1000 * 1000), ); - let committed_tx = commit_results.remove(0)?; - Ok(committed_tx.execution_details) + commit_results.remove(0) } /// Process multiple transaction in a single batch. This is used for benches and unit tests. diff --git a/runtime/src/bank/bank_hash_details.rs b/runtime/src/bank/bank_hash_details.rs index a6c449a50c72b8..5ab13d85c4d89b 100644 --- a/runtime/src/bank/bank_hash_details.rs +++ b/runtime/src/bank/bank_hash_details.rs @@ -15,10 +15,14 @@ use { solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount}, clock::{Epoch, Slot}, + fee::FeeDetails, hash::Hash, + inner_instruction::InnerInstructionsList, pubkey::Pubkey, + transaction::Result as TransactionResult, + transaction_context::TransactionReturnData, }, - solana_svm::transaction_execution_result::TransactionExecutionDetails, + solana_svm::transaction_commit_result::CommittedTransaction, solana_transaction_status::UiInstruction, std::str::FromStr, }; @@ -74,7 +78,30 @@ pub struct TransactionDetails { pub accounts: Vec, pub instructions: Vec, pub is_simple_vote_tx: bool, - pub execution_results: Option, + pub commit_details: Option, +} + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct TransactionCommitDetails { + pub status: TransactionResult<()>, + pub log_messages: Option>, + pub inner_instructions: Option, + pub return_data: Option, + pub executed_units: u64, + pub fee_details: FeeDetails, +} + +impl From for TransactionCommitDetails { + fn from(committed_tx: CommittedTransaction) -> Self { + Self { + status: committed_tx.status, + log_messages: committed_tx.log_messages, + inner_instructions: committed_tx.inner_instructions, + return_data: committed_tx.return_data, + executed_units: committed_tx.executed_units, + fee_details: committed_tx.fee_details, + } + } } /// The components that go into a bank hash calculation for a single bank/slot. diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index a46ae1f8578342..85d51a10f8e2e9 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -5732,7 +5732,7 @@ fn test_pre_post_transaction_balances() { // Failed transactions still produce balance sets // This is an InstructionError - fees charged assert_eq!( - commit_results[2].transaction_result(), + commit_results[2].as_ref().unwrap().status, Err(TransactionError::InstructionError( 0, InstructionError::Custom(1), @@ -9090,7 +9090,6 @@ fn test_tx_log_order() { assert!(commit_results[0] .as_ref() .unwrap() - .execution_details .log_messages .as_ref() .unwrap()[1] @@ -9099,7 +9098,6 @@ fn test_tx_log_order() { assert!(commit_results[1] .as_ref() .unwrap() - .execution_details .log_messages .as_ref() .unwrap()[2] @@ -9193,12 +9191,7 @@ fn test_tx_return_data() { None, ) .0; - let return_data = commit_results[0] - .as_ref() - .unwrap() - .execution_details - .return_data - .clone(); + let return_data = commit_results[0].as_ref().unwrap().return_data.clone(); if let Some(index) = index { let return_data = return_data.unwrap(); assert_eq!(return_data.program_id, mock_program_id); diff --git a/svm/src/transaction_commit_result.rs b/svm/src/transaction_commit_result.rs index 53590c0c5d2b50..8bbada73634ad9 100644 --- a/svm/src/transaction_commit_result.rs +++ b/svm/src/transaction_commit_result.rs @@ -1,9 +1,8 @@ use { - crate::transaction_execution_result::{ - TransactionExecutionDetails, TransactionLoadedAccountsStats, - }, + crate::transaction_execution_result::TransactionLoadedAccountsStats, solana_sdk::{ - fee::FeeDetails, rent_debits::RentDebits, transaction::Result as TransactionResult, + fee::FeeDetails, inner_instruction::InnerInstructionsList, rent_debits::RentDebits, + transaction::Result as TransactionResult, transaction_context::TransactionReturnData, }, }; @@ -11,16 +10,19 @@ pub type TransactionCommitResult = TransactionResult; #[derive(Clone, Debug)] pub struct CommittedTransaction { - pub loaded_account_stats: TransactionLoadedAccountsStats, - pub execution_details: TransactionExecutionDetails, + pub status: TransactionResult<()>, + pub log_messages: Option>, + pub inner_instructions: Option, + pub return_data: Option, + pub executed_units: u64, pub fee_details: FeeDetails, pub rent_debits: RentDebits, + pub loaded_account_stats: TransactionLoadedAccountsStats, } pub trait TransactionCommitResultExtensions { fn was_committed(&self) -> bool; fn was_executed_successfully(&self) -> bool; - fn transaction_result(&self) -> TransactionResult<()>; } impl TransactionCommitResultExtensions for TransactionCommitResult { @@ -30,14 +32,8 @@ impl TransactionCommitResultExtensions for TransactionCommitResult { fn was_executed_successfully(&self) -> bool { match self { - Ok(committed_tx) => committed_tx.execution_details.status.is_ok(), + Ok(committed_tx) => committed_tx.status.is_ok(), Err(_) => false, } } - - fn transaction_result(&self) -> TransactionResult<()> { - self.as_ref() - .map_err(|err| err.clone()) - .and_then(|committed_tx| committed_tx.execution_details.status.clone()) - } } diff --git a/svm/src/transaction_execution_result.rs b/svm/src/transaction_execution_result.rs index 2ac684d4cc219c..6a41294ddb975e 100644 --- a/svm/src/transaction_execution_result.rs +++ b/svm/src/transaction_execution_result.rs @@ -6,7 +6,6 @@ pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList}; use { crate::account_loader::LoadedTransaction, - serde::{Deserialize, Serialize}, solana_program_runtime::loaded_programs::ProgramCacheEntry, solana_sdk::{ pubkey::Pubkey, @@ -51,7 +50,7 @@ impl ExecutedTransaction { } } -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq)] pub struct TransactionExecutionDetails { pub status: transaction::Result<()>, pub log_messages: Option>, @@ -62,3 +61,9 @@ pub struct TransactionExecutionDetails { /// NOTE: This value is valid IFF `status` is `Ok`. pub accounts_data_len_delta: i64, } + +impl TransactionExecutionDetails { + pub fn was_successful(&self) -> bool { + self.status.is_ok() + } +} From 5b54ff2ac87428fbd90e7346d161dfb71546b86b Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 13 Aug 2024 07:08:24 +0800 Subject: [PATCH 097/529] Use less memory when serializing versioned epoch stakes (#2520) --- runtime/src/bank/serde_snapshot.rs | 9 +- runtime/src/epoch_stakes.rs | 92 +++--------------- runtime/src/stakes.rs | 18 +--- runtime/src/stakes/serde_stakes.rs | 148 ++++++++++++++++++++++++++++- 4 files changed, 167 insertions(+), 100 deletions(-) diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 70ab7e01a473a8..258c49593f0e12 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -6,8 +6,7 @@ mod tests { epoch_accounts_hash_utils, test_utils as bank_test_utils, Bank, EpochRewardStatus, }, epoch_stakes::{ - EpochAuthorizedVoters, EpochStakes, NodeIdToVoteAccounts, StakesSerdeWrapper, - VersionedEpochStakes, + EpochAuthorizedVoters, EpochStakes, NodeIdToVoteAccounts, VersionedEpochStakes, }, genesis_utils::activate_all_features, runtime_config::RuntimeConfig, @@ -20,7 +19,7 @@ mod tests { create_tmp_accounts_dir_for_tests, get_storages_to_serialize, ArchiveFormat, StorageAndNextAccountsFileId, }, - stakes::{Stakes, StakesEnum}, + stakes::{SerdeStakesToStakeFormat, Stakes, StakesEnum}, }, solana_accounts_db::{ account_storage::{AccountStorageMap, AccountStorageReference}, @@ -307,7 +306,7 @@ mod tests { bank.epoch_stakes.insert( 42, EpochStakes::from(VersionedEpochStakes::Current { - stakes: StakesSerdeWrapper::Stake(Stakes::::default()), + stakes: SerdeStakesToStakeFormat::Stake(Stakes::::default()), total_stake: 42, node_id_to_vote_accounts: Arc::::default(), epoch_authorized_voters: Arc::::default(), @@ -536,7 +535,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "HRBDXrGrHMZU4cNebKHT7jEmhrgd3h1c2qUMMywrGPiq") + frozen_abi(digest = "J7MnnLU99fYk2hfZPjdqyTYxgHstwRUDk2Yr8fFnXxFp") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 6400e05c3b2e5d..664c2044903d51 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -1,11 +1,7 @@ use { - crate::{ - stake_account::StakeAccount, - stakes::{serde_stakes_to_delegation_format, Stakes, StakesEnum}, - }, - serde::{Deserialize, Deserializer, Serialize, Serializer}, - solana_sdk::{clock::Epoch, pubkey::Pubkey, stake::state::Stake}, - solana_stake_program::stake_state::Delegation, + crate::stakes::{serde_stakes_to_delegation_format, SerdeStakesToStakeFormat, StakesEnum}, + serde::{Deserialize, Serialize}, + solana_sdk::{clock::Epoch, pubkey::Pubkey}, solana_vote::vote_account::VoteAccountsHashMap, std::{collections::HashMap, sync::Arc}, }; @@ -139,74 +135,13 @@ impl EpochStakes { #[derive(Debug, Clone, Serialize, Deserialize)] pub enum VersionedEpochStakes { Current { - stakes: StakesSerdeWrapper, + stakes: SerdeStakesToStakeFormat, total_stake: u64, node_id_to_vote_accounts: Arc, epoch_authorized_voters: Arc, }, } -/// Wrapper struct with custom serialization to support serializing -/// `Stakes` as `Stakes` without doing a full deep clone of -/// the stake data. Serialization works by building a `Stakes<&Stake>` map which -/// borrows `&Stake` from `StakeAccount` entries in `Stakes`. Note -/// that `Stakes<&Stake>` still copies `Pubkey` keys so the `Stakes<&Stake>` -/// data structure still allocates a fair amount of memory but the memory only -/// remains allocated during serialization. -#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] -#[derive(Debug, Clone)] -pub enum StakesSerdeWrapper { - Stake(Stakes), - Account(Stakes>), -} - -#[cfg(feature = "dev-context-only-utils")] -impl PartialEq for StakesSerdeWrapper { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::Stake(stakes), Self::Stake(other)) => stakes == other, - (Self::Account(stakes), Self::Account(other)) => stakes == other, - (Self::Stake(stakes), Self::Account(other)) => { - stakes == &Stakes::::from(other.clone()) - } - (Self::Account(stakes), Self::Stake(other)) => { - other == &Stakes::::from(stakes.clone()) - } - } - } -} - -impl From for StakesEnum { - fn from(stakes: StakesSerdeWrapper) -> Self { - match stakes { - StakesSerdeWrapper::Stake(stakes) => Self::Stakes(stakes), - StakesSerdeWrapper::Account(stakes) => Self::Accounts(stakes), - } - } -} - -impl Serialize for StakesSerdeWrapper { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match self { - Self::Stake(stakes) => stakes.serialize(serializer), - Self::Account(stakes) => Stakes::<&Stake>::from(stakes).serialize(serializer), - } - } -} - -impl<'de> Deserialize<'de> for StakesSerdeWrapper { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let stakes = Stakes::::deserialize(deserializer)?; - Ok(Self::Stake(stakes)) - } -} - impl From for EpochStakes { fn from(versioned: VersionedEpochStakes) -> Self { let VersionedEpochStakes::Current { @@ -262,7 +197,7 @@ pub(crate) fn split_epoch_stakes( versioned_epoch_stakes.insert( epoch, VersionedEpochStakes::Current { - stakes: StakesSerdeWrapper::Account(stakes.clone()), + stakes: SerdeStakesToStakeFormat::Account(stakes.clone()), total_stake, node_id_to_vote_accounts, epoch_authorized_voters, @@ -273,7 +208,7 @@ pub(crate) fn split_epoch_stakes( versioned_epoch_stakes.insert( epoch, VersionedEpochStakes::Current { - stakes: StakesSerdeWrapper::Stake(stakes.clone()), + stakes: SerdeStakesToStakeFormat::Stake(stakes.clone()), total_stake, node_id_to_vote_accounts, epoch_authorized_voters, @@ -289,10 +224,13 @@ pub(crate) fn split_epoch_stakes( pub(crate) mod tests { use { super::*, - crate::{stake_account::StakeAccount, stakes::StakesCache}, + crate::{ + stake_account::StakeAccount, + stakes::{Stakes, StakesCache}, + }, im::HashMap as ImHashMap, solana_sdk::{account::AccountSharedData, rent::Rent}, - solana_stake_program::stake_state::{self, Delegation}, + solana_stake_program::stake_state::{self, Delegation, Stake}, solana_vote::vote_account::{VoteAccount, VoteAccounts}, solana_vote_program::vote_state::{self, create_account_with_authorized}, std::iter, @@ -492,7 +430,7 @@ pub(crate) mod tests { assert_eq!( versioned.get(&epoch), Some(&VersionedEpochStakes::Current { - stakes: StakesSerdeWrapper::Account(test_stakes), + stakes: SerdeStakesToStakeFormat::Account(test_stakes), total_stake: epoch_stakes.total_stake, node_id_to_vote_accounts: epoch_stakes.node_id_to_vote_accounts, epoch_authorized_voters: epoch_stakes.epoch_authorized_voters, @@ -521,7 +459,7 @@ pub(crate) mod tests { assert_eq!( versioned.get(&epoch), Some(&VersionedEpochStakes::Current { - stakes: StakesSerdeWrapper::Stake(test_stakes), + stakes: SerdeStakesToStakeFormat::Stake(test_stakes), total_stake: epoch_stakes.total_stake, node_id_to_vote_accounts: epoch_stakes.node_id_to_vote_accounts, epoch_authorized_voters: epoch_stakes.epoch_authorized_voters, @@ -575,7 +513,7 @@ pub(crate) mod tests { assert_eq!( versioned.get(&epoch2), Some(&VersionedEpochStakes::Current { - stakes: StakesSerdeWrapper::Account(Stakes::default()), + stakes: SerdeStakesToStakeFormat::Account(Stakes::default()), total_stake: 200, node_id_to_vote_accounts: Arc::default(), epoch_authorized_voters: Arc::default(), @@ -584,7 +522,7 @@ pub(crate) mod tests { assert_eq!( versioned.get(&epoch3), Some(&VersionedEpochStakes::Current { - stakes: StakesSerdeWrapper::Stake(Stakes::default()), + stakes: SerdeStakesToStakeFormat::Stake(Stakes::default()), total_stake: 300, node_id_to_vote_accounts: Arc::default(), epoch_authorized_voters: Arc::default(), diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 22dcd3931d65c9..ff8bdddc3da563 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -28,6 +28,7 @@ use { mod serde_stakes; pub(crate) use serde_stakes::serde_stakes_to_delegation_format; +pub use serde_stakes::SerdeStakesToStakeFormat; #[derive(Debug, Error)] pub enum Error { @@ -570,23 +571,6 @@ impl From> for Stakes { } } -impl<'a> From<&'a Stakes> for Stakes<&'a Stake> { - fn from(stakes: &'a Stakes) -> Self { - let stake_delegations = stakes - .stake_delegations - .iter() - .map(|(pubkey, stake_account)| (*pubkey, stake_account.stake())) - .collect(); - Self { - vote_accounts: stakes.vote_accounts.clone(), - stake_delegations, - unused: stakes.unused, - epoch: stakes.epoch, - stake_history: stakes.stake_history.clone(), - } - } -} - /// This conversion is memory intensive so should only be used in development /// contexts. #[cfg(feature = "dev-context-only-utils")] diff --git a/runtime/src/stakes/serde_stakes.rs b/runtime/src/stakes/serde_stakes.rs index b7c260b29426f1..ddcff5d6b410dc 100644 --- a/runtime/src/stakes/serde_stakes.rs +++ b/runtime/src/stakes/serde_stakes.rs @@ -2,13 +2,70 @@ use { super::{StakeAccount, Stakes, StakesEnum}, crate::stake_history::StakeHistory, im::HashMap as ImHashMap, - serde::{ser::SerializeMap, Serialize, Serializer}, + serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}, solana_sdk::{clock::Epoch, pubkey::Pubkey, stake::state::Delegation}, solana_stake_program::stake_state::Stake, solana_vote::vote_account::VoteAccounts, std::sync::Arc, }; +/// Wrapper struct with custom serialization to support serializing +/// `Stakes` as `Stakes` without doing an intermediate +/// clone of the stake data. +#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] +#[derive(Debug, Clone)] +pub enum SerdeStakesToStakeFormat { + Stake(Stakes), + Account(Stakes), +} + +#[cfg(feature = "dev-context-only-utils")] +impl PartialEq for SerdeStakesToStakeFormat { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Stake(stakes), Self::Stake(other)) => stakes == other, + (Self::Account(stakes), Self::Account(other)) => stakes == other, + (Self::Stake(stakes), Self::Account(other)) => { + stakes == &Stakes::::from(other.clone()) + } + (Self::Account(stakes), Self::Stake(other)) => { + other == &Stakes::::from(stakes.clone()) + } + } + } +} + +impl From for StakesEnum { + fn from(stakes: SerdeStakesToStakeFormat) -> Self { + match stakes { + SerdeStakesToStakeFormat::Stake(stakes) => Self::Stakes(stakes), + SerdeStakesToStakeFormat::Account(stakes) => Self::Accounts(stakes), + } + } +} + +impl Serialize for SerdeStakesToStakeFormat { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + Self::Stake(stakes) => stakes.serialize(serializer), + Self::Account(stakes) => serialize_stake_accounts_to_stake_format(stakes, serializer), + } + } +} + +impl<'de> Deserialize<'de> for SerdeStakesToStakeFormat { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let stakes = Stakes::::deserialize(deserializer)?; + Ok(Self::Stake(stakes)) + } +} + // In order to maintain backward compatibility, the StakesEnum in EpochStakes // and SerializableVersionedBank should be serialized as Stakes. pub(crate) mod serde_stakes_to_delegation_format { @@ -53,6 +110,13 @@ fn serialize_stake_accounts_to_delegation_format( SerdeStakeAccountsToDelegationFormat::from(stakes.clone()).serialize(serializer) } +fn serialize_stake_accounts_to_stake_format( + stakes: &Stakes, + serializer: S, +) -> Result { + SerdeStakeAccountsToStakeFormat::from(stakes.clone()).serialize(serializer) +} + impl From> for SerdeStakesToDelegationFormat { fn from(stakes: Stakes) -> Self { let Stakes { @@ -93,6 +157,26 @@ impl From> for SerdeStakeAccountsToDelegationFormat { } } +impl From> for SerdeStakeAccountsToStakeFormat { + fn from(stakes: Stakes) -> Self { + let Stakes { + vote_accounts, + stake_delegations, + unused, + epoch, + stake_history, + } = stakes; + + Self { + vote_accounts, + stake_delegations: SerdeStakeAccountMapToStakeFormat(stake_delegations), + unused, + epoch, + stake_history, + } + } +} + #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Serialize)] struct SerdeStakesToDelegationFormat { @@ -113,6 +197,16 @@ struct SerdeStakeAccountsToDelegationFormat { stake_history: StakeHistory, } +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[derive(Serialize)] +struct SerdeStakeAccountsToStakeFormat { + vote_accounts: VoteAccounts, + stake_delegations: SerdeStakeAccountMapToStakeFormat, + unused: u64, + epoch: Epoch, + stake_history: StakeHistory, +} + #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] struct SerdeStakeMapToDelegationFormat(ImHashMap); impl Serialize for SerdeStakeMapToDelegationFormat { @@ -143,6 +237,21 @@ impl Serialize for SerdeStakeAccountMapToDelegationFormat { } } +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +struct SerdeStakeAccountMapToStakeFormat(ImHashMap); +impl Serialize for SerdeStakeAccountMapToStakeFormat { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut s = serializer.serialize_map(Some(self.0.len()))?; + for (pubkey, stake_account) in self.0.iter() { + s.serialize_entry(pubkey, stake_account.stake())?; + } + s.end() + } +} + #[cfg(test)] mod tests { use { @@ -150,6 +259,43 @@ mod tests { solana_stake_program::stake_state, solana_vote_program::vote_state, }; + #[test] + fn test_serde_stakes_to_stake_format() { + let mut stake_delegations = ImHashMap::new(); + stake_delegations.insert( + Pubkey::new_unique(), + StakeAccount::try_from(stake_state::create_account( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &vote_state::create_account( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 0, + 1_000_000_000, + ), + &Rent::default(), + 1_000_000_000, + )) + .unwrap(), + ); + + let stake_account_stakes = Stakes { + vote_accounts: VoteAccounts::default(), + stake_delegations, + unused: 0, + epoch: 0, + stake_history: StakeHistory::default(), + }; + + let wrapped_stakes = SerdeStakesToStakeFormat::Account(stake_account_stakes.clone()); + let serialized_stakes = bincode::serialize(&wrapped_stakes).unwrap(); + let stake_stakes = bincode::deserialize::>(&serialized_stakes).unwrap(); + assert_eq!( + StakesEnum::Stakes(stake_stakes), + StakesEnum::Accounts(stake_account_stakes) + ); + } + #[test] fn test_serde_stakes_to_delegation_format() { #[derive(Debug, PartialEq, Deserialize, Serialize)] From cb62b6a4e6fc0d4b336febb5612241a812e5534d Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 13 Aug 2024 07:47:00 +0800 Subject: [PATCH 098/529] refactor: load transaction accounts (#2442) * refactor: load transaction accounts * feedback * fix account found * fix clippy --- svm/src/account_loader.rs | 164 +++++++++++++++++++---------------- svm/src/account_overrides.rs | 4 +- 2 files changed, 91 insertions(+), 77 deletions(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index b62833e404004b..2b2e31aacb8fd1 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -198,6 +198,7 @@ fn load_transaction_accounts( ) -> Result { let mut tx_rent: TransactionRent = 0; let account_keys = message.account_keys(); + let mut accounts = Vec::with_capacity(account_keys.len()); let mut accounts_found = Vec::with_capacity(account_keys.len()); let mut rent_debits = RentDebits::default(); let mut accumulated_accounts_data_size: u32 = 0; @@ -208,85 +209,96 @@ fn load_transaction_accounts( .unique() .collect::>(); - let mut accounts = account_keys - .iter() - .enumerate() - .map(|(i, key)| { - let mut account_found = true; - #[allow(clippy::collapsible_else_if)] - let account = if solana_sdk::sysvar::instructions::check_id(key) { - construct_instructions_account(message) - } else { - let is_fee_payer = i == 0; - let instruction_account = u8::try_from(i) - .map(|i| instruction_accounts.contains(&&i)) - .unwrap_or(false); - let (account_size, account, rent) = if is_fee_payer { - ( - tx_details.fee_payer_account.data().len(), - tx_details.fee_payer_account.clone(), - tx_details.fee_payer_rent_debit, - ) - } else if let Some(account_override) = - account_overrides.and_then(|overrides| overrides.get(key)) - { - (account_override.data().len(), account_override.clone(), 0) - } else if let Some(program) = (!instruction_account && !message.is_writable(i)) - .then_some(()) - .and_then(|_| loaded_programs.find(key)) - { - callbacks - .get_account_shared_data(key) - .ok_or(TransactionError::AccountNotFound)?; - // Optimization to skip loading of accounts which are only used as - // programs in top-level instructions and not passed as instruction accounts. - let program_account = account_shared_data_from_program(&program); - (program.account_size, program_account, 0) - } else { - callbacks - .get_account_shared_data(key) - .map(|mut account| { - if message.is_writable(i) { - let rent_due = collect_rent_from_account( - feature_set, - rent_collector, - key, - &mut account, - ) - .rent_amount; - - (account.data().len(), account, rent_due) - } else { - (account.data().len(), account, 0) - } - }) - .unwrap_or_else(|| { - account_found = false; - let mut default_account = AccountSharedData::default(); - // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). - // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account - // with this field already set would allow us to skip rent collection for these accounts. - default_account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - (default_account.data().len(), default_account, 0) - }) - }; - accumulate_and_check_loaded_account_data_size( - &mut accumulated_accounts_data_size, - account_size, - tx_details.compute_budget_limits.loaded_accounts_bytes, - error_metrics, - )?; - - tx_rent += rent; - rent_debits.insert(key, rent, account.lamports()); + let mut collect_account = + |key, account_size, account: AccountSharedData, rent, account_found| -> Result<()> { + accumulate_and_check_loaded_account_data_size( + &mut accumulated_accounts_data_size, + account_size, + tx_details.compute_budget_limits.loaded_accounts_bytes, + error_metrics, + )?; - account - }; + tx_rent += rent; + rent_debits.insert(key, rent, account.lamports()); + accounts.push((*key, account)); accounts_found.push(account_found); - Ok((*key, account)) - }) - .collect::>>()?; + Ok(()) + }; + + // Since the fee payer is always the first account, collect it first. Note + // that account overrides are already applied during fee payer validation so + // it's fine to use the fee payer directly here rather than checking account + // overrides again. + collect_account( + message.fee_payer(), + tx_details.fee_payer_account.data().len(), + tx_details.fee_payer_account, + tx_details.fee_payer_rent_debit, + true, // account_found + )?; + + // Attempt to load and collect remaining non-fee payer accounts + for (i, key) in account_keys.iter().enumerate().skip(1) { + let mut account_found = true; + let is_instruction_account = u8::try_from(i) + .map(|i| instruction_accounts.contains(&&i)) + .unwrap_or(false); + let (account_size, account, rent) = if solana_sdk::sysvar::instructions::check_id(key) { + // Since the instructions sysvar is constructed by the SVM + // and modified for each transaction instruction, it cannot + // be overridden. + ( + 0, /* loaded size */ + construct_instructions_account(message), + 0, /* collected rent */ + ) + } else if let Some(account_override) = + account_overrides.and_then(|overrides| overrides.get(key)) + { + (account_override.data().len(), account_override.clone(), 0) + } else if let Some(program) = (!is_instruction_account && !message.is_writable(i)) + .then_some(()) + .and_then(|_| loaded_programs.find(key)) + { + callbacks + .get_account_shared_data(key) + .ok_or(TransactionError::AccountNotFound)?; + // Optimization to skip loading of accounts which are only used as + // programs in top-level instructions and not passed as instruction accounts. + let program_account = account_shared_data_from_program(&program); + (program.account_size, program_account, 0) + } else { + callbacks + .get_account_shared_data(key) + .map(|mut account| { + if message.is_writable(i) { + let rent_due = collect_rent_from_account( + feature_set, + rent_collector, + key, + &mut account, + ) + .rent_amount; + + (account.data().len(), account, rent_due) + } else { + (account.data().len(), account, 0) + } + }) + .unwrap_or_else(|| { + account_found = false; + let mut default_account = AccountSharedData::default(); + // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). + // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account + // with this field already set would allow us to skip rent collection for these accounts. + default_account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + (default_account.data().len(), default_account, 0) + }) + }; + + collect_account(key, account_size, account, rent, account_found)?; + } let builtins_start_index = accounts.len(); let program_indices = message diff --git a/svm/src/account_overrides.rs b/svm/src/account_overrides.rs index 8a205a798f66b1..7628b82f85d88e 100644 --- a/svm/src/account_overrides.rs +++ b/svm/src/account_overrides.rs @@ -3,7 +3,9 @@ use { std::collections::HashMap, }; -/// Encapsulates overridden accounts, typically used for transaction simulations +/// Encapsulates overridden accounts, typically used for transaction +/// simulations. Account overrides are currently not used when loading the +/// durable nonce account or when constructing the instructions sysvar account. #[derive(Default)] pub struct AccountOverrides { accounts: HashMap, From bda1c45e6bf4ed6d683d7fe60ee8b622eb0caab2 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:35:10 -0700 Subject: [PATCH 099/529] wen_restart: split bank_forks lock inside find_bankhash_of_heaviest_fork(). (#2544) Replace big write lock with smaller locks, otherwise replay of block will get stuck. --- wen-restart/src/wen_restart.rs | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 5e794ce2f43560..67b148b17149b5 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -529,15 +529,17 @@ pub(crate) fn find_bankhash_of_heaviest_fork( let recyclers = VerifyRecyclers::default(); let mut timing = ExecuteTimings::default(); let opts = ProcessOptions::default(); - // Grab one write lock until end of function because we are the only one touching bankforks now. - let mut my_bankforks = bank_forks.write().unwrap(); // Now replay all the missing blocks. let mut parent_bank = root_bank; for slot in slots { if exit.load(Ordering::Relaxed) { return Err(WenRestartError::Exiting.into()); } - let bank = match my_bankforks.get(slot) { + let saved_bank; + { + saved_bank = bank_forks.read().unwrap().get(slot); + } + let bank = match saved_bank { Some(cur_bank) => { if !cur_bank.is_frozen() { return Err(WenRestartError::BlockNotFrozenAfterReplay(slot, None).into()); @@ -552,7 +554,10 @@ pub(crate) fn find_bankhash_of_heaviest_fork( .unwrap(), slot, ); - let bank_with_scheduler = my_bankforks.insert_from_ledger(new_bank); + let bank_with_scheduler; + { + bank_with_scheduler = bank_forks.write().unwrap().insert_from_ledger(new_bank); + } let mut progress = ConfirmationProgress::new(parent_bank.last_blockhash()); if let Err(e) = process_single_slot( &blockstore, @@ -573,7 +578,15 @@ pub(crate) fn find_bankhash_of_heaviest_fork( ) .into()); } - my_bankforks.get(slot).unwrap() + let cur_bank; + { + cur_bank = bank_forks + .read() + .unwrap() + .get(slot) + .expect("bank should have been just inserted"); + } + cur_bank } }; parent_bank = bank; From 41dc227f8b75734ed4a674a45b15b238eb4e6620 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 13 Aug 2024 11:28:50 +0800 Subject: [PATCH 100/529] clippy: allow all clippy for mod google in storage-bigtable (#2543) clippy: allow all for mod google in storage-bigtable --- storage-bigtable/src/bigtable.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage-bigtable/src/bigtable.rs b/storage-bigtable/src/bigtable.rs index b4bfe040a30963..d473429a9744be 100644 --- a/storage-bigtable/src/bigtable.rs +++ b/storage-bigtable/src/bigtable.rs @@ -16,7 +16,7 @@ use { tonic::{codegen::InterceptedService, transport::ClientTlsConfig, Request, Status}, }; -#[allow(clippy::enum_variant_names)] +#[allow(clippy::all)] mod google { mod rpc { include!(concat!( From e25a999ea465e92f23cc9e479a9206a522f44e2c Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 13 Aug 2024 11:29:03 +0800 Subject: [PATCH 101/529] clippy: needless_maybe_sized (#2545) --- frozen-abi/src/abi_example.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index d0de2845776619..d9187d16c04333 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -534,7 +534,7 @@ impl AbiEnumVisitor for T { } } -impl AbiEnumVisitor for T { +impl AbiEnumVisitor for T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { info!("AbiEnumVisitor for T: {}", type_name::()); // not calling self.serialize(...) is intentional here as the most generic impl From 3aa43d25484bc471fc1b0bf021a8b1c79c8e770c Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 13 Aug 2024 08:59:59 -0400 Subject: [PATCH 102/529] hash-cache-tool: Adds cli arg to limit state to single bin (#2557) --- .../accounts-hash-cache-tool/src/main.rs | 67 ++++++++++++++++--- 1 file changed, 59 insertions(+), 8 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 6cd525b97b2673..076568b4759498 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -21,6 +21,7 @@ use { mem::size_of, num::Saturating, path::{Path, PathBuf}, + str, sync::RwLock, time::Instant, }, @@ -143,6 +144,20 @@ fn main() { Must be a power of two, greater than 0, \ and less-than-or-equal-to 16,777,216 (2^24)" ), + ) + .arg( + Arg::with_name("bin_of_interest") + .long("bin-of-interest") + .takes_value(true) + .value_name("INDEX") + .help("Specifies a single bin to diff") + .long_help( + "Specifies a single bin to diff. \ + When diffing large state that does not fit in memory, \ + it may be neccessary to diff a subset at a time. \ + Use this arg to limit the state to a single bin. \ + The INDEX must be less than --bins." + ), ), ), ) @@ -210,7 +225,22 @@ fn cmd_diff_state( let path1 = value_t_or_exit!(subcommand_matches, "path1", String); let path2 = value_t_or_exit!(subcommand_matches, "path2", String); let num_bins = value_t_or_exit!(subcommand_matches, "bins", usize); - do_diff_state(path1, path2, num_bins) + let bin_of_interest = + if let Some(bin_of_interest) = subcommand_matches.value_of("bin_of_interest") { + let bin_of_interest = bin_of_interest + .parse() + .map_err(|err| format!("argument 'bin-of-interest' is not a valid value: {err}"))?; + if bin_of_interest >= num_bins { + return Err(format!( + "argument 'bin-of-interest' must be less than 'bins', \ + bins: {num_bins}, bin-of-interest: {bin_of_interest}", + )); + } + Some(bin_of_interest) + } else { + None + }; + do_diff_state(path1, path2, num_bins, bin_of_interest) } fn do_inspect(file: impl AsRef, force: bool) -> Result<(), String> { @@ -480,6 +510,7 @@ fn do_diff_state( dir1: impl AsRef, dir2: impl AsRef, num_bins: usize, + bin_of_interest: Option, ) -> Result<(), String> { let extract = |dir: &Path| -> Result<_, String> { let files = @@ -487,8 +518,12 @@ fn do_diff_state( let BinnedLatestEntriesInfo { latest_entries, capitalization, - } = extract_binned_latest_entries_in(files.iter().map(|file| &file.path), num_bins) - .map_err(|err| format!("failed to extract entries: {err}"))?; + } = extract_binned_latest_entries_in( + files.iter().map(|file| &file.path), + num_bins, + bin_of_interest, + ) + .map_err(|err| format!("failed to extract entries: {err}"))?; let num_accounts: usize = latest_entries.iter().map(|bin| bin.len()).sum(); let entries = Vec::from(latest_entries); let state: Box<_> = entries.into_iter().map(RwLock::new).collect(); @@ -664,7 +699,7 @@ fn extract_latest_entries_in(file: impl AsRef) -> Result) -> Result>, - bins: usize, + num_bins: usize, + bin_of_interest: Option, ) -> Result { - let binner = PubkeyBinCalculator24::new(bins); - let mut entries: Box<_> = iter::repeat_with(HashMap::default).take(bins).collect(); + if let Some(bin_of_interest) = bin_of_interest { + assert!(bin_of_interest < num_bins); + } + + let binner = PubkeyBinCalculator24::new(num_bins); + let mut entries: Box<_> = iter::repeat_with(HashMap::default).take(num_bins).collect(); let mut capitalization = Saturating(0); for file in files.into_iter() { @@ -699,8 +743,15 @@ fn extract_binned_latest_entries_in( })?; let num_entries = scan_mmap(&mmap, |entry| { - capitalization += entry.lamports; let bin = binner.bin_from_pubkey(&entry.pubkey); + if let Some(bin_of_interest) = bin_of_interest { + // Is this the bin of interest? If not, skip it. + if bin != bin_of_interest { + return; + } + } + + capitalization += entry.lamports; let old_value = entries[bin].insert(entry.pubkey, (entry.hash, entry.lamports)); if let Some((_, old_lamports)) = old_value { // back out the old value's lamports, so we only keep the latest's for capitalization From 8e747d52c2ceaccf2c1f32ba411f88f84c9e6fa5 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 13 Aug 2024 21:51:48 +0800 Subject: [PATCH 103/529] refactor: load transaction result supports fee only transactions (#2527) --- svm/src/account_loader.rs | 324 ++++++++++++++++++++----------- svm/src/transaction_processor.rs | 7 +- 2 files changed, 213 insertions(+), 118 deletions(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 2b2e31aacb8fd1..e7a2c9749e8147 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -35,7 +35,19 @@ pub(crate) type TransactionRent = u64; pub(crate) type TransactionProgramIndices = Vec>; pub type TransactionCheckResult = Result; pub type TransactionValidationResult = Result; -pub type TransactionLoadResult = Result; + +#[derive(PartialEq, Eq, Debug)] +pub enum TransactionLoadResult { + /// All transaction accounts were loaded successfully + Loaded(LoadedTransaction), + /// Some transaction accounts needed for execution were unable to be loaded + /// but the fee payer and any nonce account needed for fee collection were + /// loaded successfully + FeesOnly(FeesOnlyTransaction), + /// Some transaction accounts needed for fee collection were unable to be + /// loaded + NotLoaded(TransactionError), +} #[derive(PartialEq, Eq, Debug, Clone)] pub struct CheckedTransactionDetails { @@ -66,6 +78,13 @@ pub struct LoadedTransaction { pub loaded_accounts_data_size: u32, } +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct FeesOnlyTransaction { + pub load_error: TransactionError, + pub rollback_accounts: RollbackAccounts, + pub fee_details: FeeDetails, +} + /// Collect rent from an account if rent is still enabled and regardless of /// whether rent is enabled, set the rent epoch to u64::MAX if the account is /// rent exempt. @@ -167,35 +186,90 @@ pub(crate) fn load_accounts( ) -> Vec { txs.iter() .zip(validation_results) - .map(|etx| match etx { - (tx, Ok(tx_details)) => { - // load transactions - load_transaction_accounts( - callbacks, - tx, - tx_details, - error_metrics, - account_overrides, - feature_set, - rent_collector, - loaded_programs, - ) - } - (_, Err(e)) => Err(e), + .map(|(transaction, validation_result)| { + load_transaction( + callbacks, + transaction, + validation_result, + error_metrics, + account_overrides, + feature_set, + rent_collector, + loaded_programs, + ) }) .collect() } +fn load_transaction( + callbacks: &CB, + message: &impl SVMMessage, + validation_result: TransactionValidationResult, + error_metrics: &mut TransactionErrorMetrics, + account_overrides: Option<&AccountOverrides>, + feature_set: &FeatureSet, + rent_collector: &RentCollector, + loaded_programs: &ProgramCacheForTxBatch, +) -> TransactionLoadResult { + match validation_result { + Err(e) => TransactionLoadResult::NotLoaded(e), + Ok(tx_details) => { + let load_result = load_transaction_accounts( + callbacks, + message, + tx_details.fee_payer_account, + tx_details.fee_payer_rent_debit, + &tx_details.compute_budget_limits, + error_metrics, + account_overrides, + feature_set, + rent_collector, + loaded_programs, + ); + + match load_result { + Ok(loaded_tx_accounts) => TransactionLoadResult::Loaded(LoadedTransaction { + accounts: loaded_tx_accounts.accounts, + program_indices: loaded_tx_accounts.program_indices, + fee_details: tx_details.fee_details, + rent: loaded_tx_accounts.rent, + rent_debits: loaded_tx_accounts.rent_debits, + rollback_accounts: tx_details.rollback_accounts, + compute_budget_limits: tx_details.compute_budget_limits, + loaded_accounts_data_size: loaded_tx_accounts.loaded_accounts_data_size, + }), + Err(err) => TransactionLoadResult::FeesOnly(FeesOnlyTransaction { + load_error: err, + fee_details: tx_details.fee_details, + rollback_accounts: tx_details.rollback_accounts, + }), + } + } + } +} + +#[derive(PartialEq, Eq, Debug, Clone)] +struct LoadedTransactionAccounts { + pub accounts: Vec, + pub program_indices: TransactionProgramIndices, + pub rent: TransactionRent, + pub rent_debits: RentDebits, + pub loaded_accounts_data_size: u32, +} + +#[allow(clippy::too_many_arguments)] fn load_transaction_accounts( callbacks: &CB, message: &impl SVMMessage, - tx_details: ValidatedTransactionDetails, + fee_payer_account: AccountSharedData, + fee_payer_rent_debit: u64, + compute_budget_limits: &ComputeBudgetLimits, error_metrics: &mut TransactionErrorMetrics, account_overrides: Option<&AccountOverrides>, feature_set: &FeatureSet, rent_collector: &RentCollector, loaded_programs: &ProgramCacheForTxBatch, -) -> Result { +) -> Result { let mut tx_rent: TransactionRent = 0; let account_keys = message.account_keys(); let mut accounts = Vec::with_capacity(account_keys.len()); @@ -214,7 +288,7 @@ fn load_transaction_accounts( accumulate_and_check_loaded_account_data_size( &mut accumulated_accounts_data_size, account_size, - tx_details.compute_budget_limits.loaded_accounts_bytes, + compute_budget_limits.loaded_accounts_bytes, error_metrics, )?; @@ -232,9 +306,9 @@ fn load_transaction_accounts( // overrides again. collect_account( message.fee_payer(), - tx_details.fee_payer_account.data().len(), - tx_details.fee_payer_account, - tx_details.fee_payer_rent_debit, + fee_payer_account.data().len(), + fee_payer_account, + fee_payer_rent_debit, true, // account_found )?; @@ -345,7 +419,7 @@ fn load_transaction_accounts( accumulate_and_check_loaded_account_data_size( &mut accumulated_accounts_data_size, owner_account.data().len(), - tx_details.compute_budget_limits.loaded_accounts_bytes, + compute_budget_limits.loaded_accounts_bytes, error_metrics, )?; accounts.push((*owner_id, owner_account)); @@ -358,12 +432,9 @@ fn load_transaction_accounts( }) .collect::>>>()?; - Ok(LoadedTransaction { + Ok(LoadedTransactionAccounts { accounts, program_indices, - fee_details: tx_details.fee_details, - rollback_accounts: tx_details.rollback_accounts, - compute_budget_limits: tx_details.compute_budget_limits, rent: tx_rent, rent_debits, loaded_accounts_data_size: accumulated_accounts_data_size, @@ -587,14 +658,17 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_aux_test(tx, &accounts, &mut error_metrics); + let load_results = load_accounts_aux_test(tx, &accounts, &mut error_metrics); assert_eq!(error_metrics.account_not_found, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - Err(TransactionError::ProgramAccountNotFound) - ); + assert_eq!(load_results.len(), 1); + assert!(matches!( + load_results[0], + TransactionLoadResult::FeesOnly(FeesOnlyTransaction { + load_error: TransactionError::ProgramAccountNotFound, + .. + }), + )); } #[test] @@ -629,13 +703,14 @@ mod tests { assert_eq!(error_metrics.account_not_found, 0); assert_eq!(loaded_accounts.len(), 1); match &loaded_accounts[0] { - Ok(loaded_transaction) => { + TransactionLoadResult::Loaded(loaded_transaction) => { assert_eq!(loaded_transaction.accounts.len(), 3); assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); assert_eq!(loaded_transaction.program_indices.len(), 1); assert_eq!(loaded_transaction.program_indices[0].len(), 0); } - Err(e) => panic!("{e}"), + TransactionLoadResult::FeesOnly(fees_only_tx) => panic!("{}", fees_only_tx.load_error), + TransactionLoadResult::NotLoaded(e) => panic!("{e}"), } } @@ -665,14 +740,17 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_aux_test(tx, &accounts, &mut error_metrics); + let load_results = load_accounts_aux_test(tx, &accounts, &mut error_metrics); assert_eq!(error_metrics.account_not_found, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - Err(TransactionError::ProgramAccountNotFound) - ); + assert_eq!(load_results.len(), 1); + assert!(matches!( + load_results[0], + TransactionLoadResult::FeesOnly(FeesOnlyTransaction { + load_error: TransactionError::ProgramAccountNotFound, + .. + }), + )); } #[test] @@ -699,14 +777,17 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_aux_test(tx, &accounts, &mut error_metrics); + let load_results = load_accounts_aux_test(tx, &accounts, &mut error_metrics); assert_eq!(error_metrics.invalid_program_for_execution, 1); - assert_eq!(loaded_accounts.len(), 1); - assert_eq!( - loaded_accounts[0], - Err(TransactionError::InvalidProgramForExecution) - ); + assert_eq!(load_results.len(), 1); + assert!(matches!( + load_results[0], + TransactionLoadResult::FeesOnly(FeesOnlyTransaction { + load_error: TransactionError::InvalidProgramForExecution, + .. + }), + )); } #[test] @@ -753,14 +834,15 @@ mod tests { assert_eq!(error_metrics.account_not_found, 0); assert_eq!(loaded_accounts.len(), 1); match &loaded_accounts[0] { - Ok(loaded_transaction) => { + TransactionLoadResult::Loaded(loaded_transaction) => { assert_eq!(loaded_transaction.accounts.len(), 4); assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); assert_eq!(loaded_transaction.program_indices.len(), 2); assert_eq!(loaded_transaction.program_indices[0], &[1]); assert_eq!(loaded_transaction.program_indices[1], &[2]); } - Err(e) => panic!("{e}"), + TransactionLoadResult::FeesOnly(fees_only_tx) => panic!("{}", fees_only_tx.load_error), + TransactionLoadResult::NotLoaded(e) => panic!("{e}"), } } @@ -803,9 +885,15 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_no_store(&[], tx, None); - assert_eq!(loaded_accounts.len(), 1); - assert!(loaded_accounts[0].is_err()); + let load_results = load_accounts_no_store(&[], tx, None); + assert_eq!(load_results.len(), 1); + assert!(matches!( + load_results[0], + TransactionLoadResult::FeesOnly(FeesOnlyTransaction { + load_error: TransactionError::ProgramAccountNotFound, + .. + }), + )); } #[test] @@ -831,10 +919,15 @@ mod tests { let loaded_accounts = load_accounts_no_store(&[(keypair.pubkey(), account)], tx, Some(&account_overrides)); assert_eq!(loaded_accounts.len(), 1); - let loaded_transaction = loaded_accounts[0].as_ref().unwrap(); - assert_eq!(loaded_transaction.accounts[0].0, keypair.pubkey()); - assert_eq!(loaded_transaction.accounts[1].0, slot_history_id); - assert_eq!(loaded_transaction.accounts[1].1.lamports(), 42); + match &loaded_accounts[0] { + TransactionLoadResult::Loaded(loaded_transaction) => { + assert_eq!(loaded_transaction.accounts[0].0, keypair.pubkey()); + assert_eq!(loaded_transaction.accounts[1].0, slot_history_id); + assert_eq!(loaded_transaction.accounts[1].1.lamports(), 42); + } + TransactionLoadResult::FeesOnly(fees_only_tx) => panic!("{}", fees_only_tx.load_error), + TransactionLoadResult::NotLoaded(e) => panic!("{e}"), + } } #[test] @@ -1059,11 +1152,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails { - fee_payer_account: fee_payer_account_data.clone(), - fee_payer_rent_debit, - ..ValidatedTransactionDetails::default() - }, + fee_payer_account_data.clone(), + fee_payer_rent_debit, + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1078,12 +1169,9 @@ mod tests { }; assert_eq!( result.unwrap(), - LoadedTransaction { + LoadedTransactionAccounts { accounts: vec![(fee_payer_address, fee_payer_account_data),], program_indices: vec![], - fee_details: FeeDetails::default(), - rollback_accounts: RollbackAccounts::default(), - compute_budget_limits: ComputeBudgetLimits::default(), rent: fee_payer_rent_debit, rent_debits: expected_rent_debits, loaded_accounts_data_size: 0, @@ -1127,10 +1215,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails { - fee_payer_account: fee_payer_account_data.clone(), - ..ValidatedTransactionDetails::default() - }, + fee_payer_account_data.clone(), + 0, // fee_payer_rent_debit + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1140,7 +1227,7 @@ mod tests { assert_eq!( result.unwrap(), - LoadedTransaction { + LoadedTransactionAccounts { accounts: vec![ (key1.pubkey(), fee_payer_account_data), ( @@ -1149,9 +1236,6 @@ mod tests { ) ], program_indices: vec![vec![]], - fee_details: FeeDetails::default(), - rollback_accounts: RollbackAccounts::default(), - compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1193,7 +1277,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails::default(), + AccountSharedData::default(), // fee_payer_account + 0, // fee_payer_rent_debit + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1237,7 +1323,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails::default(), + AccountSharedData::default(), // fee_payer_account + 0, // fee_payer_rent_debit + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1281,7 +1369,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails::default(), + AccountSharedData::default(), // fee_payer_account + 0, // fee_payer_rent_debit + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1334,10 +1424,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails { - fee_payer_account: fee_payer_account_data.clone(), - ..ValidatedTransactionDetails::default() - }, + fee_payer_account_data.clone(), + 0, // fee_payer_rent_debit + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1347,7 +1436,7 @@ mod tests { assert_eq!( result.unwrap(), - LoadedTransaction { + LoadedTransactionAccounts { accounts: vec![ (key2.pubkey(), fee_payer_account_data), ( @@ -1355,9 +1444,6 @@ mod tests { mock_bank.accounts_map[&key1.pubkey()].clone() ), ], - fee_details: FeeDetails::default(), - rollback_accounts: RollbackAccounts::default(), - compute_budget_limits: ComputeBudgetLimits::default(), program_indices: vec![vec![1]], rent: 0, rent_debits: RentDebits::default(), @@ -1402,7 +1488,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails::default(), + AccountSharedData::default(), // fee_payer_account + 0, // fee_payer_rent_debit + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1455,7 +1543,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails::default(), + AccountSharedData::default(), // fee_payer_account + 0, // fee_payer_rent_debit + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1515,10 +1605,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails { - fee_payer_account: fee_payer_account_data.clone(), - ..ValidatedTransactionDetails::default() - }, + fee_payer_account_data.clone(), + 0, // fee_payer_rent_debit + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1528,7 +1617,7 @@ mod tests { assert_eq!( result.unwrap(), - LoadedTransaction { + LoadedTransactionAccounts { accounts: vec![ (key2.pubkey(), fee_payer_account_data), ( @@ -1541,9 +1630,6 @@ mod tests { ), ], program_indices: vec![vec![1]], - fee_details: FeeDetails::default(), - rollback_accounts: RollbackAccounts::default(), - compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1605,10 +1691,9 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - ValidatedTransactionDetails { - fee_payer_account: fee_payer_account_data.clone(), - ..ValidatedTransactionDetails::default() - }, + fee_payer_account_data.clone(), + 0, // fee_payer_rent_debit + &ComputeBudgetLimits::default(), &mut error_metrics, None, &FeatureSet::default(), @@ -1620,7 +1705,7 @@ mod tests { account_data.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); assert_eq!( result.unwrap(), - LoadedTransaction { + LoadedTransactionAccounts { accounts: vec![ (key2.pubkey(), fee_payer_account_data), ( @@ -1634,9 +1719,6 @@ mod tests { ), ], program_indices: vec![vec![1], vec![1]], - fee_details: FeeDetails::default(), - rollback_accounts: RollbackAccounts::default(), - compute_budget_limits: ComputeBudgetLimits::default(), rent: 0, rent_debits: RentDebits::default(), loaded_accounts_data_size: 0, @@ -1673,7 +1755,7 @@ mod tests { let num_accounts = tx.message().account_keys.len(); let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); let mut error_metrics = TransactionErrorMetrics::default(); - let loaded_txs = load_accounts( + let mut load_results = load_accounts( &bank, &[sanitized_tx.clone()], vec![Ok(ValidatedTransactionDetails::default())], @@ -1684,11 +1766,15 @@ mod tests { &ProgramCacheForTxBatch::default(), ); + let TransactionLoadResult::Loaded(loaded_transaction) = load_results.swap_remove(0) else { + panic!("transaction loading failed"); + }; + let compute_budget = ComputeBudget::new(u64::from( compute_budget_limits::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, )); let transaction_context = TransactionContext::new( - loaded_txs[0].as_ref().unwrap().accounts.clone(), + loaded_transaction.accounts, Rent::default(), compute_budget.max_instruction_stack_depth, compute_budget.max_instruction_trace_length, @@ -1761,7 +1847,7 @@ mod tests { ..ValidatedTransactionDetails::default() }); - let results = load_accounts( + let mut load_results = load_accounts( &mock_bank, &[sanitized_transaction], vec![validation_result], @@ -1775,10 +1861,12 @@ mod tests { let mut account_data = AccountSharedData::default(); account_data.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - assert_eq!(results.len(), 1); - let loaded_result = results[0].clone(); + assert_eq!(load_results.len(), 1); + let TransactionLoadResult::Loaded(loaded_transaction) = load_results.swap_remove(0) else { + panic!("transaction loading failed"); + }; assert_eq!( - loaded_result.unwrap(), + loaded_transaction, LoadedTransaction { accounts: vec![ ( @@ -1831,7 +1919,7 @@ mod tests { ); let validation_result = Ok(ValidatedTransactionDetails::default()); - let result = load_accounts( + let load_results = load_accounts( &mock_bank, &[sanitized_transaction.clone()], vec![validation_result.clone()], @@ -1842,14 +1930,17 @@ mod tests { &ProgramCacheForTxBatch::default(), ); - assert_eq!( - result, - vec![Err(TransactionError::InvalidProgramForExecution)] - ); + assert!(matches!( + load_results[0], + TransactionLoadResult::FeesOnly(FeesOnlyTransaction { + load_error: TransactionError::InvalidProgramForExecution, + .. + }), + )); let validation_result = Err(TransactionError::InvalidWritableAccount); - let result = load_accounts( + let load_results = load_accounts( &mock_bank, &[sanitized_transaction.clone()], vec![validation_result], @@ -1860,7 +1951,10 @@ mod tests { &ProgramCacheForTxBatch::default(), ); - assert_eq!(result, vec![Err(TransactionError::InvalidWritableAccount)]); + assert!(matches!( + load_results[0], + TransactionLoadResult::NotLoaded(TransactionError::InvalidWritableAccount), + )); } #[test] diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 54b1abb6740661..b43fb3429557e4 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -5,7 +5,7 @@ use { account_loader::{ collect_rent_from_account, load_accounts, validate_fee_payer, CheckedTransactionDetails, LoadedTransaction, TransactionCheckResult, - TransactionValidationResult, ValidatedTransactionDetails, + TransactionLoadResult, TransactionValidationResult, ValidatedTransactionDetails, }, account_overrides::AccountOverrides, message_processor::MessageProcessor, @@ -298,8 +298,9 @@ impl TransactionBatchProcessor { .into_iter() .zip(sanitized_txs.iter()) .map(|(load_result, tx)| match load_result { - Err(e) => Err(e.clone()), - Ok(loaded_transaction) => { + TransactionLoadResult::NotLoaded(err) => Err(err), + TransactionLoadResult::FeesOnly(fees_only_tx) => Err(fees_only_tx.load_error), + TransactionLoadResult::Loaded(loaded_transaction) => { let executed_tx = self.execute_loaded_transaction( tx, loaded_transaction, From 223cca374dee7d429a186d02ed71b405a678a27e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Aug 2024 22:32:47 +0800 Subject: [PATCH 104/529] build(deps): bump wasm-bindgen from 0.2.92 to 0.2.93 (#2573) * build(deps): bump wasm-bindgen from 0.2.92 to 0.2.93 Bumps [wasm-bindgen](https://github.com/rustwasm/wasm-bindgen) from 0.2.92 to 0.2.93. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/compare/0.2.92...0.2.93) --- updated-dependencies: - dependency-name: wasm-bindgen dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 21 +++++++++++---------- programs/sbf/Cargo.lock | 21 +++++++++++---------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6702d3c94e08cf..09365fe64c71b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9446,19 +9446,20 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if 1.0.0", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", @@ -9483,9 +9484,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9493,9 +9494,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", @@ -9506,9 +9507,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "web-sys" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c08a7d45d0302b..b8af134b63c15d 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7877,19 +7877,20 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if 1.0.0", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", @@ -7914,9 +7915,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7924,9 +7925,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", @@ -7937,9 +7938,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "web-sys" From fbae3a694e29c13e3adfa010bd53436f036f886d Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 13 Aug 2024 10:45:07 -0400 Subject: [PATCH 105/529] ledger-tool: add option to print contents of duplicate slot proofs (#2153) * ledger-tool: add option to print contents of duplicate slot proofs * pr feedback: Add CliDuplicateSlotProof type for display --- ledger-tool/src/blockstore.rs | 9 ++- ledger-tool/src/output.rs | 116 +++++++++++++++++++++++++++++++++- ledger/src/shred.rs | 2 +- 3 files changed, 124 insertions(+), 3 deletions(-) diff --git a/ledger-tool/src/blockstore.rs b/ledger-tool/src/blockstore.rs index 6b69fcda0d382e..903c232a7e230b 100644 --- a/ledger-tool/src/blockstore.rs +++ b/ledger-tool/src/blockstore.rs @@ -5,7 +5,7 @@ use { error::{LedgerToolError, Result}, ledger_path::canonicalize_ledger_path, ledger_utils::{get_program_ids, get_shred_storage_type}, - output::{output_ledger, output_slot, SlotBounds, SlotInfo}, + output::{output_ledger, output_slot, CliDuplicateSlotProof, SlotBounds, SlotInfo}, }, chrono::{DateTime, Utc}, clap::{ @@ -707,8 +707,15 @@ fn do_blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) - let blockstore = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let output_format = + OutputFormat::from_matches(arg_matches, "output_format", verbose_level > 1); for slot in blockstore.duplicate_slots_iterator(starting_slot)? { println!("{slot}"); + if verbose_level > 0 { + let proof = blockstore.get_duplicate_slot(slot).unwrap(); + let cli_duplicate_proof = CliDuplicateSlotProof::from(proof); + println!("{}", output_format.formatted_string(&cli_duplicate_proof)); + } } } ("latest-optimistic-slots", Some(arg_matches)) => { diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index fd0b64eb58c84a..dbe7ff3ef726cb 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -12,7 +12,11 @@ use { display::writeln_transaction, CliAccount, CliAccountNewConfig, OutputFormat, QuietDisplay, VerboseDisplay, }, - solana_ledger::blockstore::{Blockstore, BlockstoreError}, + solana_ledger::{ + blockstore::{Blockstore, BlockstoreError}, + blockstore_meta::{DuplicateSlotProof, ErasureMeta}, + shred::{Shred, ShredType}, + }, solana_runtime::bank::{Bank, TotalAccountsStats}, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, @@ -296,6 +300,116 @@ impl fmt::Display for CliBlockWithEntries { } } +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliDuplicateSlotProof { + shred1: CliDuplicateShred, + shred2: CliDuplicateShred, + erasure_consistency: Option, +} + +impl QuietDisplay for CliDuplicateSlotProof {} + +impl VerboseDisplay for CliDuplicateSlotProof { + fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result { + write!(w, " Shred1 ")?; + VerboseDisplay::write_str(&self.shred1, w)?; + write!(w, " Shred2 ")?; + VerboseDisplay::write_str(&self.shred2, w)?; + if let Some(erasure_consistency) = self.erasure_consistency { + writeln!(w, " Erasure consistency {}", erasure_consistency)?; + } + Ok(()) + } +} + +impl fmt::Display for CliDuplicateSlotProof { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, " Shred1 {}", self.shred1)?; + write!(f, " Shred2 {}", self.shred2)?; + if let Some(erasure_consistency) = self.erasure_consistency { + writeln!(f, " Erasure consistency {}", erasure_consistency)?; + } + Ok(()) + } +} + +impl From for CliDuplicateSlotProof { + fn from(proof: DuplicateSlotProof) -> Self { + let shred1 = Shred::new_from_serialized_shred(proof.shred1).unwrap(); + let shred2 = Shred::new_from_serialized_shred(proof.shred2).unwrap(); + let erasure_consistency = (shred1.shred_type() == ShredType::Code + && shred2.shred_type() == ShredType::Code) + .then(|| ErasureMeta::check_erasure_consistency(&shred1, &shred2)); + + Self { + shred1: CliDuplicateShred::from(shred1), + shred2: CliDuplicateShred::from(shred2), + erasure_consistency, + } + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliDuplicateShred { + fec_set_index: u32, + index: u32, + shred_type: ShredType, + version: u16, + merkle_root: Option, + chained_merkle_root: Option, + last_in_slot: bool, + payload: Vec, +} + +impl CliDuplicateShred { + fn write_common(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result { + writeln!( + w, + "fec_set_index {}, index {}, shred_type {:?}\n \ + version {}, merkle_root {:?}, chained_merkle_root {:?}, last_in_slot {}", + self.fec_set_index, + self.index, + self.shred_type, + self.version, + self.merkle_root, + self.chained_merkle_root, + self.last_in_slot, + ) + } +} + +impl QuietDisplay for CliDuplicateShred {} + +impl VerboseDisplay for CliDuplicateShred { + fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result { + self.write_common(w)?; + writeln!(w, " payload: {:?}", self.payload) + } +} + +impl fmt::Display for CliDuplicateShred { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.write_common(f) + } +} + +impl From for CliDuplicateShred { + fn from(shred: Shred) -> Self { + Self { + fec_set_index: shred.fec_set_index(), + index: shred.index(), + shred_type: shred.shred_type(), + version: shred.version(), + merkle_root: shred.merkle_root().ok(), + chained_merkle_root: shred.chained_merkle_root().ok(), + last_in_slot: shred.last_in_slot(), + payload: shred.payload().clone(), + } + } +} + #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EncodedConfirmedBlockWithEntries { diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 814ec2b5bf303a..7525fd5258e442 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -348,7 +348,7 @@ impl Shred { dispatch!(fn set_signature(&mut self, signature: Signature)); dispatch!(fn signed_data(&self) -> Result); - dispatch!(pub(crate) fn chained_merkle_root(&self) -> Result); + dispatch!(pub fn chained_merkle_root(&self) -> Result); // Returns the portion of the shred's payload which is erasure coded. dispatch!(pub(crate) fn erasure_shard(self) -> Result, Error>); // Like Shred::erasure_shard but returning a slice. From 926e06fec4dac2d7a1a42ad920ef3030257c015c Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 13 Aug 2024 10:45:34 -0400 Subject: [PATCH 106/529] gossip: remove vote buffer, send all verified votes to banking_stage (#2509) --- core/src/cluster_info_vote_listener.rs | 284 +------- core/src/lib.rs | 1 - core/src/tpu.rs | 1 - core/src/verified_vote_packets.rs | 930 ------------------------- 4 files changed, 12 insertions(+), 1204 deletions(-) delete mode 100644 core/src/verified_vote_packets.rs diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index ae90a97b88377f..a4306dcbea2ea4 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -6,9 +6,6 @@ use { replay_stage::DUPLICATE_THRESHOLD, result::{Error, Result}, sigverify, - verified_vote_packets::{ - ValidatorGossipVotesIterator, VerifiedVoteMetadata, VerifiedVotePackets, - }, }, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Select, Sender}, log::*, @@ -19,8 +16,7 @@ use { solana_ledger::blockstore::Blockstore, solana_measure::measure::Measure, solana_metrics::inc_new_counter_debug, - solana_perf::packet, - solana_poh::poh_recorder::PohRecorder, + solana_perf::packet::{self, PacketBatch}, solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender}, rpc_subscriptions::RpcSubscriptions, @@ -30,11 +26,10 @@ use { epoch_stakes::EpochStakes, vote_sender_types::ReplayVoteReceiver, }, solana_sdk::{ - clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT}, + clock::{Slot, DEFAULT_MS_PER_SLOT}, hash::Hash, pubkey::Pubkey, signature::Signature, - slot_hashes, timing::AtomicInterval, transaction::Transaction, }, @@ -44,7 +39,7 @@ use { }, std::{ cmp::max, - collections::{HashMap, HashSet}, + collections::HashMap, iter::repeat, sync::{ atomic::{AtomicBool, Ordering}, @@ -57,8 +52,6 @@ use { // Map from a vote account to the authorized voter for an epoch pub type ThresholdConfirmedSlots = Vec<(Slot, Hash)>; -pub type VerifiedLabelVotePacketsSender = Sender>; -pub type VerifiedLabelVotePacketsReceiver = Receiver>; pub type VerifiedVoteTransactionsSender = Sender>; pub type VerifiedVoteTransactionsReceiver = Receiver>; pub type VerifiedVoteSender = Sender<(Pubkey, Vec)>; @@ -69,7 +62,6 @@ pub type DuplicateConfirmedSlotsSender = Sender; pub type DuplicateConfirmedSlotsReceiver = Receiver; const THRESHOLDS_TO_CHECK: [f64; 2] = [DUPLICATE_THRESHOLD, VOTE_THRESHOLD_SIZE]; -const BANK_SEND_VOTES_LOOP_SLEEP_MS: u128 = 10; #[derive(Default)] pub struct SlotVoteTracker { @@ -144,45 +136,6 @@ impl VoteTracker { } } -struct BankVoteSenderState { - bank: Arc, - previously_sent_to_bank_votes: HashSet, - bank_send_votes_stats: BankSendVotesStats, -} - -impl BankVoteSenderState { - fn new(bank: Arc) -> Self { - Self { - bank, - previously_sent_to_bank_votes: HashSet::new(), - bank_send_votes_stats: BankSendVotesStats::default(), - } - } - - fn report_metrics(&self) { - self.bank_send_votes_stats.report_metrics(self.bank.slot()); - } -} - -#[derive(Default)] -struct BankSendVotesStats { - num_votes_sent: usize, - num_batches_sent: usize, - total_elapsed: u64, -} - -impl BankSendVotesStats { - fn report_metrics(&self, slot: Slot) { - datapoint_info!( - "cluster_info_vote_listener-bank-send-vote-stats", - ("slot", slot, i64), - ("num_votes_sent", self.num_votes_sent, i64), - ("total_elapsed", self.total_elapsed, i64), - ("num_batches_sent", self.num_batches_sent, i64), - ); - } -} - #[derive(Default)] struct VoteProcessingTiming { gossip_txn_processing_time_us: u64, @@ -234,7 +187,6 @@ impl ClusterInfoVoteListener { exit: Arc, cluster_info: Arc, verified_packets_sender: BankingPacketSender, - poh_recorder: Arc>, vote_tracker: Arc, bank_forks: Arc>, subscriptions: Arc, @@ -245,8 +197,6 @@ impl ClusterInfoVoteListener { bank_notification_sender: Option, duplicate_confirmed_slot_sender: DuplicateConfirmedSlotsSender, ) -> Self { - let (verified_vote_label_packets_sender, verified_vote_label_packets_receiver) = - unbounded(); let (verified_vote_transactions_sender, verified_vote_transactions_receiver) = unbounded(); let listen_thread = { let exit = exit.clone(); @@ -258,28 +208,14 @@ impl ClusterInfoVoteListener { exit, &cluster_info, &bank_forks, - verified_vote_label_packets_sender, + verified_packets_sender, verified_vote_transactions_sender, ); }) .unwrap() }; - let bank_send_thread = { - let exit = exit.clone(); - Builder::new() - .name("solCiBankSend".to_string()) - .spawn(move || { - let _ = Self::bank_send_loop( - exit, - verified_vote_label_packets_receiver, - poh_recorder, - &verified_packets_sender, - ); - }) - .unwrap() - }; - let send_thread = Builder::new() + let process_thread = Builder::new() .name("solCiProcVotes".to_string()) .spawn(move || { let _ = Self::process_votes_loop( @@ -299,7 +235,7 @@ impl ClusterInfoVoteListener { .unwrap(); Self { - thread_hdls: vec![listen_thread, send_thread, bank_send_thread], + thread_hdls: vec![listen_thread, process_thread], } } @@ -311,7 +247,7 @@ impl ClusterInfoVoteListener { exit: Arc, cluster_info: &ClusterInfo, bank_forks: &RwLock, - verified_vote_label_packets_sender: VerifiedLabelVotePacketsSender, + verified_packets_sender: BankingPacketSender, verified_vote_transactions_sender: VerifiedVoteTransactionsSender, ) -> Result<()> { let mut cursor = Cursor::default(); @@ -321,7 +257,7 @@ impl ClusterInfoVoteListener { if !votes.is_empty() { let (vote_txs, packets) = Self::verify_votes(votes, bank_forks); verified_vote_transactions_sender.send(vote_txs)?; - verified_vote_label_packets_sender.send(packets)?; + verified_packets_sender.send(BankingPacketBatch::new((packets, None)))?; } sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS)); } @@ -332,7 +268,7 @@ impl ClusterInfoVoteListener { fn verify_votes( votes: Vec, bank_forks: &RwLock, - ) -> (Vec, Vec) { + ) -> (Vec, Vec) { let mut packet_batches = packet::to_packet_batches(&votes, 1); // Votes should already be filtered by this point. @@ -363,126 +299,11 @@ impl ClusterInfoVoteListener { if !keys.any(|(i, key)| tx.message.is_signer(i) && key == authorized_voter) { return None; } - let verified_vote_metadata = VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch, - signature: *tx.signatures.first()?, - }; - Some((tx, verified_vote_metadata)) + Some((tx, packet_batch)) }) .unzip() } - fn bank_send_loop( - exit: Arc, - verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver, - poh_recorder: Arc>, - verified_packets_sender: &BankingPacketSender, - ) -> Result<()> { - let mut verified_vote_packets = VerifiedVotePackets::default(); - let mut time_since_lock = Instant::now(); - let mut bank_vote_sender_state_option: Option = None; - - loop { - if exit.load(Ordering::Relaxed) { - return Ok(()); - } - - let would_be_leader = poh_recorder - .read() - .unwrap() - .would_be_leader(3 * slot_hashes::MAX_ENTRIES as u64 * DEFAULT_TICKS_PER_SLOT); - - if let Err(e) = verified_vote_packets.receive_and_process_vote_packets( - &verified_vote_label_packets_receiver, - would_be_leader, - ) { - match e { - Error::RecvTimeout(RecvTimeoutError::Disconnected) - | Error::RecvTimeout(RecvTimeoutError::Timeout) => (), - _ => { - error!("thread {:?} error {:?}", thread::current().name(), e); - } - } - } - - if time_since_lock.elapsed().as_millis() > BANK_SEND_VOTES_LOOP_SLEEP_MS { - // Always set this to avoid taking the poh lock too often - time_since_lock = Instant::now(); - // We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS` - Self::check_for_leader_bank_and_send_votes( - &mut bank_vote_sender_state_option, - poh_recorder.read().unwrap().bank(), - verified_packets_sender, - &verified_vote_packets, - )?; - } - } - } - - fn check_for_leader_bank_and_send_votes( - bank_vote_sender_state_option: &mut Option, - current_working_bank: Option>, - verified_packets_sender: &BankingPacketSender, - verified_vote_packets: &VerifiedVotePackets, - ) -> Result<()> { - let Some(current_working_bank) = current_working_bank else { - // We are not the leader! - if let Some(bank_vote_sender_state) = bank_vote_sender_state_option { - // This ensures we report the last slot's metrics - bank_vote_sender_state.report_metrics(); - *bank_vote_sender_state_option = None; - } - return Ok(()); - }; - // We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS` - if let Some(bank_vote_sender_state) = bank_vote_sender_state_option { - if bank_vote_sender_state.bank.slot() != current_working_bank.slot() { - bank_vote_sender_state.report_metrics(); - *bank_vote_sender_state_option = - Some(BankVoteSenderState::new(current_working_bank)); - } - } else { - *bank_vote_sender_state_option = Some(BankVoteSenderState::new(current_working_bank)); - } - - let bank_vote_sender_state = bank_vote_sender_state_option.as_mut().unwrap(); - let BankVoteSenderState { - ref bank, - ref mut bank_send_votes_stats, - ref mut previously_sent_to_bank_votes, - } = bank_vote_sender_state; - - // This logic may run multiple times for the same leader bank, - // we just have to ensure that the same votes are not sent - // to the bank multiple times, which is guaranteed by - // `previously_sent_to_bank_votes` - let gossip_votes_iterator = ValidatorGossipVotesIterator::new( - bank.clone(), - verified_vote_packets, - previously_sent_to_bank_votes, - ); - - let mut filter_gossip_votes_timing = Measure::start("filter_gossip_votes"); - - // Send entire batch at a time so that there is no partial processing of - // a single validator's votes by two different banks. This might happen - // if we sent each vote individually, for instance if we created two different - // leader banks from the same common parent, one leader bank may process - // only the later votes and ignore the earlier votes. - for single_validator_votes in gossip_votes_iterator { - bank_send_votes_stats.num_votes_sent += single_validator_votes.len(); - bank_send_votes_stats.num_batches_sent += 1; - verified_packets_sender - .send(BankingPacketBatch::new((single_validator_votes, None)))?; - } - filter_gossip_votes_timing.stop(); - bank_send_votes_stats.total_elapsed += filter_gossip_votes_timing.as_us(); - - Ok(()) - } - #[allow(clippy::too_many_arguments)] fn process_votes_loop( exit: Arc, @@ -896,7 +717,6 @@ impl ClusterInfoVoteListener { mod tests { use { super::*, - crate::banking_trace::BankingTracer, itertools::Itertools, solana_perf::packet, solana_rpc::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, @@ -1629,11 +1449,8 @@ mod tests { assert!(packets.is_empty()); } - fn verify_packets_len(packets: &[VerifiedVoteMetadata], ref_value: usize) { - let num_packets: usize = packets - .iter() - .map(|vote_metadata| vote_metadata.packet_batch.len()) - .sum(); + fn verify_packets_len(packets: &[PacketBatch], ref_value: usize) { + let num_packets: usize = packets.iter().map(|pb| pb.len()).sum(); assert_eq!(num_packets, ref_value); } @@ -1723,83 +1540,6 @@ mod tests { run_test_bad_vote(Some(Hash::default())); } - #[test] - fn test_check_for_leader_bank_and_send_votes() { - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000); - let current_leader_bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let mut bank_vote_sender_state_option: Option = None; - let verified_vote_packets = VerifiedVotePackets::default(); - let (verified_packets_sender, _verified_packets_receiver) = - BankingTracer::channel_for_test(); - - // 1) If we hand over a `current_leader_bank`, vote sender state should be updated - ClusterInfoVoteListener::check_for_leader_bank_and_send_votes( - &mut bank_vote_sender_state_option, - Some(current_leader_bank.clone()), - &verified_packets_sender, - &verified_vote_packets, - ) - .unwrap(); - - assert_eq!( - bank_vote_sender_state_option.as_ref().unwrap().bank.slot(), - current_leader_bank.slot() - ); - bank_vote_sender_state_option - .as_mut() - .unwrap() - .previously_sent_to_bank_votes - .insert(Signature::new_unique()); - - // 2) Handing over the same leader bank again should not update the state - ClusterInfoVoteListener::check_for_leader_bank_and_send_votes( - &mut bank_vote_sender_state_option, - Some(current_leader_bank.clone()), - &verified_packets_sender, - &verified_vote_packets, - ) - .unwrap(); - // If we hand over a `current_leader_bank`, vote sender state should be updated - assert_eq!( - bank_vote_sender_state_option.as_ref().unwrap().bank.slot(), - current_leader_bank.slot() - ); - assert_eq!( - bank_vote_sender_state_option - .as_ref() - .unwrap() - .previously_sent_to_bank_votes - .len(), - 1 - ); - - let slot = current_leader_bank.slot() + 1; - let current_leader_bank = Arc::new(Bank::new_from_parent( - current_leader_bank, - &Pubkey::default(), - slot, - )); - ClusterInfoVoteListener::check_for_leader_bank_and_send_votes( - &mut bank_vote_sender_state_option, - Some(current_leader_bank.clone()), - &verified_packets_sender, - &verified_vote_packets, - ) - .unwrap(); - - // 3) If we hand over a new `current_leader_bank`, vote sender state should be updated - // to the new bank - assert_eq!( - bank_vote_sender_state_option.as_ref().unwrap().bank.slot(), - current_leader_bank.slot() - ); - assert!(bank_vote_sender_state_option - .as_ref() - .unwrap() - .previously_sent_to_bank_votes - .is_empty()); - } - #[test] fn test_track_new_votes_filter() { let validator_keypairs: Vec<_> = diff --git a/core/src/lib.rs b/core/src/lib.rs index c6ab7b7e9c8b3d..da9d69ed508875 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -44,7 +44,6 @@ pub mod tracer_packet_stats; pub mod tvu; pub mod unfrozen_gossip_verified_vote_hashes; pub mod validator; -pub mod verified_vote_packets; pub mod vote_simulator; pub mod voting_service; pub mod warm_quic_cache_service; diff --git a/core/src/tpu.rs b/core/src/tpu.rs index c76d2dd4d0094e..05982c9c3edf29 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -222,7 +222,6 @@ impl Tpu { exit.clone(), cluster_info.clone(), gossip_vote_sender, - poh_recorder.clone(), vote_tracker, bank_forks.clone(), subscriptions.clone(), diff --git a/core/src/verified_vote_packets.rs b/core/src/verified_vote_packets.rs deleted file mode 100644 index 0840c57b809d22..00000000000000 --- a/core/src/verified_vote_packets.rs +++ /dev/null @@ -1,930 +0,0 @@ -use { - crate::{cluster_info_vote_listener::VerifiedLabelVotePacketsReceiver, result::Result}, - itertools::Itertools, - solana_perf::packet::PacketBatch, - solana_runtime::bank::Bank, - solana_sdk::{ - account::from_account, - clock::{Slot, UnixTimestamp}, - hash::Hash, - pubkey::Pubkey, - signature::Signature, - slot_hashes::SlotHashes, - sysvar, - }, - solana_vote::vote_transaction::VoteTransaction, - std::{ - collections::{BTreeMap, HashMap, HashSet}, - sync::Arc, - time::Duration, - }, -}; - -const MAX_VOTES_PER_VALIDATOR: usize = 1000; - -pub struct VerifiedVoteMetadata { - pub vote_account_key: Pubkey, - pub vote: VoteTransaction, - pub packet_batch: PacketBatch, - pub signature: Signature, -} - -pub struct ValidatorGossipVotesIterator<'a> { - my_leader_bank: Arc, - slot_hashes: SlotHashes, - verified_vote_packets: &'a VerifiedVotePackets, - vote_account_keys: Vec, - previously_sent_to_bank_votes: &'a mut HashSet, -} - -impl<'a> ValidatorGossipVotesIterator<'a> { - pub fn new( - my_leader_bank: Arc, - verified_vote_packets: &'a VerifiedVotePackets, - previously_sent_to_bank_votes: &'a mut HashSet, - ) -> Self { - let slot_hashes_account = my_leader_bank.get_account(&sysvar::slot_hashes::id()); - - if slot_hashes_account.is_none() { - warn!( - "Slot hashes sysvar doesn't exist on bank {}", - my_leader_bank.slot() - ); - } - - let slot_hashes_account = slot_hashes_account.unwrap_or_default(); - let slot_hashes = from_account::(&slot_hashes_account).unwrap_or_default(); - - // TODO: my_leader_bank.vote_accounts() may not contain zero-staked validators - // in this epoch, but those validators may have stake warming up in the next epoch - // Sort by stake weight so heavier validators' votes are sent first - let vote_account_keys: Vec = my_leader_bank - .vote_accounts() - .iter() - .map(|(pubkey, &(stake, _))| (pubkey, stake)) - .sorted_unstable_by_key(|&(_, stake)| std::cmp::Reverse(stake)) - .map(|(&pubkey, _)| pubkey) - .collect(); - Self { - my_leader_bank, - slot_hashes, - verified_vote_packets, - vote_account_keys, - previously_sent_to_bank_votes, - } - } - - fn filter_vote( - &mut self, - slot: &Slot, - hash: &Hash, - packet: &PacketBatch, - tx_signature: &Signature, - ) -> Option { - // Don't send the same vote to the same bank multiple times - if self.previously_sent_to_bank_votes.contains(tx_signature) { - return None; - } - self.previously_sent_to_bank_votes.insert(*tx_signature); - // Filter out votes on the wrong fork (or too old to be) - // on this fork - if self - .slot_hashes - .get(slot) - .map(|found_hash| found_hash == hash) - .unwrap_or(false) - { - Some(packet.clone()) - } else { - None - } - } -} - -/// Each iteration returns all of the missing votes for a single validator, the votes -/// ordered from smallest to largest. -/// -/// Iterator is done after iterating through all vote accounts -impl<'a> Iterator for ValidatorGossipVotesIterator<'a> { - type Item = Vec; - - fn next(&mut self) -> Option { - use SingleValidatorVotes::*; - while let Some(vote_account_key) = self.vote_account_keys.pop() { - // Get all the gossip votes we've queued up for this validator - // that are: - // 1) missing from the current leader bank - // 2) on the same fork - let validator_votes = self - .verified_vote_packets - .0 - .get(&vote_account_key) - .and_then(|validator_gossip_votes| { - // Fetch the validator's vote state from the bank - self.my_leader_bank - .vote_accounts() - .get(&vote_account_key) - .and_then(|(_stake, vote_account)| { - vote_account.vote_state().as_ref().ok().map(|vote_state| { - let start_vote_slot = - vote_state.last_voted_slot().map(|x| x + 1).unwrap_or(0); - match validator_gossip_votes { - FullTowerVote(GossipVote { - slot, - hash, - packet_batch, - signature, - .. - }) => self - .filter_vote(slot, hash, packet_batch, signature) - .map(|packet| vec![packet]) - .unwrap_or_default(), - IncrementalVotes(validator_gossip_votes) => { - validator_gossip_votes - .range((start_vote_slot, Hash::default())..) - .filter_map(|((slot, hash), (packet, tx_signature))| { - self.filter_vote(slot, hash, packet, tx_signature) - }) - .collect::>() - } - } - }) - }) - }); - if let Some(validator_votes) = validator_votes { - if !validator_votes.is_empty() { - return Some(validator_votes); - } - } - } - None - } -} - -#[derive(Debug, Default, Clone)] -pub struct GossipVote { - slot: Slot, - hash: Hash, - packet_batch: PacketBatch, - signature: Signature, - timestamp: Option, -} - -pub enum SingleValidatorVotes { - FullTowerVote(GossipVote), - IncrementalVotes(BTreeMap<(Slot, Hash), (PacketBatch, Signature)>), -} - -impl SingleValidatorVotes { - fn get_latest_gossip_slot(&self) -> Slot { - match self { - Self::FullTowerVote(vote) => vote.slot, - _ => 0, - } - } - - fn get_latest_timestamp(&self) -> Option { - match self { - Self::FullTowerVote(vote) => vote.timestamp, - _ => None, - } - } - - #[cfg(test)] - fn len(&self) -> usize { - match self { - Self::IncrementalVotes(votes) => votes.len(), - _ => 1, - } - } -} - -#[derive(Default)] -pub struct VerifiedVotePackets(HashMap); - -impl VerifiedVotePackets { - pub fn receive_and_process_vote_packets( - &mut self, - vote_packets_receiver: &VerifiedLabelVotePacketsReceiver, - would_be_leader: bool, - ) -> Result<()> { - use SingleValidatorVotes::*; - const RECV_TIMEOUT: Duration = Duration::from_millis(200); - let vote_packets = vote_packets_receiver.recv_timeout(RECV_TIMEOUT)?; - let vote_packets = std::iter::once(vote_packets).chain(vote_packets_receiver.try_iter()); - - // No need to process any votes if we will not be the leader soon. But, - // return early only after draining the channel to avoid accumulating - // votes that will be stale by the time we do become leader - if !would_be_leader { - return Ok(()); - } - - for gossip_votes in vote_packets { - for verfied_vote_metadata in gossip_votes { - let VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch, - signature, - } = verfied_vote_metadata; - if vote.is_empty() { - error!("Empty votes should have been filtered out earlier in the pipeline"); - continue; - } - let slot = vote.last_voted_slot().unwrap(); - let hash = vote.hash(); - let timestamp = vote.timestamp(); - - match vote { - VoteTransaction::VoteStateUpdate(_) | VoteTransaction::TowerSync(_) => { - let (latest_gossip_slot, latest_timestamp) = - self.0.get(&vote_account_key).map_or((0, None), |vote| { - (vote.get_latest_gossip_slot(), vote.get_latest_timestamp()) - }); - // Since votes are not incremental, we keep only the latest vote - // If the vote is for the same slot we will only allow it if - // it has a later timestamp (refreshed vote) - // - // Timestamp can be None if something was wrong with the senders clock. - // We directly compare as Options to ensure that votes with proper - // timestamps have precedence (Some is > None). - if slot > latest_gossip_slot - || ((slot == latest_gossip_slot) && (timestamp > latest_timestamp)) - { - self.0.insert( - vote_account_key, - FullTowerVote(GossipVote { - slot, - hash, - packet_batch, - signature, - timestamp, - }), - ); - } - } - _ => { - if let Some(FullTowerVote(gossip_vote)) = self.0.get_mut(&vote_account_key) - { - if slot > gossip_vote.slot { - warn!( - "Originally {} submitted full tower votes, but now has reverted to incremental votes. Converting back to old format.", - vote_account_key - ); - let mut votes = BTreeMap::new(); - let GossipVote { - slot, - hash, - packet_batch, - signature, - .. - } = std::mem::take(gossip_vote); - votes.insert((slot, hash), (packet_batch, signature)); - self.0.insert(vote_account_key, IncrementalVotes(votes)); - } else { - continue; - } - }; - let validator_votes: &mut BTreeMap<(Slot, Hash), (PacketBatch, Signature)> = - match self - .0 - .entry(vote_account_key) - .or_insert(IncrementalVotes(BTreeMap::new())) - { - IncrementalVotes(votes) => votes, - FullTowerVote(_) => continue, // Should never happen - }; - validator_votes.insert((slot, hash), (packet_batch, signature)); - if validator_votes.len() > MAX_VOTES_PER_VALIDATOR { - let smallest_key = validator_votes.keys().next().cloned().unwrap(); - validator_votes.remove(&smallest_key).unwrap(); - } - } - } - } - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use { - super::{SingleValidatorVotes::*, *}, - crate::{result::Error, vote_simulator::VoteSimulator}, - crossbeam_channel::{unbounded, Receiver, Sender}, - solana_perf::packet::Packet, - solana_sdk::slot_hashes::MAX_ENTRIES, - solana_vote_program::vote_state::{Lockout, TowerSync, Vote}, - std::collections::VecDeque, - }; - - #[test] - fn test_verified_vote_packets_receive_and_process_vote_packets() { - let (s, r) = unbounded(); - let vote_account_key = solana_sdk::pubkey::new_rand(); - - // Construct the buffer - let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); - - // Send a vote from `vote_account_key`, check that it was inserted - let vote_slot = 0; - let vote_hash = Hash::new_unique(); - let vote = Vote::new(vec![vote_slot], vote_hash); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(vote.clone()), - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - assert_eq!( - verified_vote_packets - .0 - .get(&vote_account_key) - .unwrap() - .len(), - 1 - ); - - // Same slot, same hash, should not be inserted - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(vote), - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - assert_eq!( - verified_vote_packets - .0 - .get(&vote_account_key) - .unwrap() - .len(), - 1 - ); - - // Same slot, different hash, should still be inserted - let new_vote_hash = Hash::new_unique(); - let vote = Vote::new(vec![vote_slot], new_vote_hash); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(vote), - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - assert_eq!( - verified_vote_packets - .0 - .get(&vote_account_key) - .unwrap() - .len(), - 2 - ); - - // Different vote slot, should be inserted - let vote_slot = 1; - let vote_hash = Hash::new_unique(); - let vote = Vote::new(vec![vote_slot], vote_hash); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(vote), - packet_batch: PacketBatch::default(), - signature: Signature::from([2u8; 64]), - }]) - .unwrap(); - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - assert_eq!( - verified_vote_packets - .0 - .get(&vote_account_key) - .unwrap() - .len(), - 3 - ); - - // No new messages, should time out - assert_matches!( - verified_vote_packets.receive_and_process_vote_packets(&r, true), - Err(Error::RecvTimeout(_)) - ); - } - - #[test] - fn test_verified_vote_packets_receive_and_process_vote_packets_max_len() { - let (s, r) = unbounded(); - let vote_account_key = solana_sdk::pubkey::new_rand(); - - // Construct the buffer - let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); - - // Send many more votes than the upper limit per validator - for _ in 0..2 * MAX_VOTES_PER_VALIDATOR { - let vote_slot = 0; - let vote_hash = Hash::new_unique(); - let vote = Vote::new(vec![vote_slot], vote_hash); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(vote), - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - } - - // At most `MAX_VOTES_PER_VALIDATOR` should be stored per validator - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - assert_eq!( - verified_vote_packets - .0 - .get(&vote_account_key) - .unwrap() - .len(), - MAX_VOTES_PER_VALIDATOR - ); - } - - #[test] - fn test_verified_vote_packets_validator_gossip_votes_iterator_wrong_fork() { - let (s, r) = unbounded(); - let vote_simulator = VoteSimulator::new(1); - let my_leader_bank = vote_simulator.bank_forks.read().unwrap().root_bank(); - let vote_account_key = vote_simulator.vote_pubkeys[0]; - - // Create a bunch of votes with random vote hashes, which should all be ignored - // since they are not on the same fork as `my_leader_bank`, i.e. their hashes do - // not exist in the SlotHashes sysvar for `my_leader_bank` - for _ in 0..MAX_VOTES_PER_VALIDATOR { - let vote_slot = 0; - let vote_hash = Hash::new_unique(); - let vote = Vote::new(vec![vote_slot], vote_hash); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(vote), - packet_batch: PacketBatch::default(), - signature: Signature::new_unique(), - }]) - .unwrap(); - } - - // Ingest the votes into the buffer - let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - - // Create tracker for previously sent bank votes - let mut previously_sent_to_bank_votes = HashSet::new(); - let mut gossip_votes_iterator = ValidatorGossipVotesIterator::new( - my_leader_bank, - &verified_vote_packets, - &mut previously_sent_to_bank_votes, - ); - - // Wrong fork, we should get no hashes - assert!(gossip_votes_iterator.next().is_none()); - } - - #[test] - fn test_verified_vote_packets_validator_gossip_votes_iterator_correct_fork() { - let (s, r) = unbounded(); - let num_validators = 2; - let vote_simulator = VoteSimulator::new(num_validators); - let mut my_leader_bank = vote_simulator.bank_forks.read().unwrap().root_bank(); - - // Create a set of valid ancestor hashes for this fork - for _ in 0..MAX_ENTRIES { - let slot = my_leader_bank.slot() + 1; - my_leader_bank = Arc::new(Bank::new_from_parent( - my_leader_bank, - &Pubkey::default(), - slot, - )); - } - let slot_hashes_account = my_leader_bank - .get_account(&sysvar::slot_hashes::id()) - .expect("Slot hashes sysvar must exist"); - let slot_hashes = from_account::(&slot_hashes_account).unwrap(); - - // Create valid votes - for i in 0..num_validators { - let vote_account_key = vote_simulator.vote_pubkeys[i]; - // Used to uniquely identify the packets for each validator - let num_packets = i + 1; - for (vote_slot, vote_hash) in slot_hashes.slot_hashes().iter() { - let vote = Vote::new(vec![*vote_slot], *vote_hash); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(vote), - packet_batch: PacketBatch::new(vec![Packet::default(); num_packets]), - signature: Signature::new_unique(), - }]) - .unwrap(); - } - } - - // Ingest the votes into the buffer - let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - - // One batch of vote packets per validator - assert_eq!(verified_vote_packets.0.len(), num_validators); - // Each validator should have one vote per slot - assert!(verified_vote_packets - .0 - .values() - .all(|validator_votes| validator_votes.len() == slot_hashes.slot_hashes().len())); - - let mut previously_sent_to_bank_votes = HashSet::new(); - let mut gossip_votes_iterator = ValidatorGossipVotesIterator::new( - my_leader_bank.clone(), - &verified_vote_packets, - &mut previously_sent_to_bank_votes, - ); - - // Get and verify batches - for _ in 0..num_validators { - let validator_batch: Vec = gossip_votes_iterator.next().unwrap(); - assert_eq!(validator_batch.len(), slot_hashes.slot_hashes().len()); - let expected_len = validator_batch[0].len(); - assert!(validator_batch - .iter() - .all(|batch| batch.len() == expected_len)); - } - - // Should be empty now - assert!(gossip_votes_iterator.next().is_none()); - - // If we construct another iterator, should return nothing because `previously_sent_to_bank_votes` - // should filter out everything - let mut gossip_votes_iterator = ValidatorGossipVotesIterator::new( - my_leader_bank.clone(), - &verified_vote_packets, - &mut previously_sent_to_bank_votes, - ); - assert!(gossip_votes_iterator.next().is_none()); - - // If we add a new vote, we should return it - my_leader_bank.freeze(); - let vote_slot = my_leader_bank.slot(); - let vote_hash = my_leader_bank.hash(); - let new_leader_slot = my_leader_bank.slot() + 1; - let my_leader_bank = Arc::new(Bank::new_from_parent( - my_leader_bank, - &Pubkey::default(), - new_leader_slot, - )); - let vote_account_key = vote_simulator.vote_pubkeys[1]; - let vote = VoteTransaction::from(Vote::new(vec![vote_slot], vote_hash)); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch: PacketBatch::default(), - signature: Signature::new_unique(), - }]) - .unwrap(); - // Ingest the votes into the buffer - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - let mut gossip_votes_iterator = ValidatorGossipVotesIterator::new( - my_leader_bank, - &verified_vote_packets, - &mut previously_sent_to_bank_votes, - ); - assert!(gossip_votes_iterator.next().is_some()); - assert!(gossip_votes_iterator.next().is_none()); - } - - #[test] - fn test_only_latest_vote_is_sent_with_feature() { - let (s, r) = unbounded(); - let vote_account_key = solana_sdk::pubkey::new_rand(); - - // Send three tower syncs that are out of order - let first_vote = TowerSync::from(vec![(2, 4), (4, 3), (6, 2), (7, 1)]); - let second_vote = TowerSync::from(vec![(2, 4), (4, 3), (11, 1)]); - let third_vote = TowerSync::from(vec![(2, 5), (4, 4), (11, 3), (12, 2), (13, 1)]); - - for vote in [second_vote, first_vote] { - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(vote), - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - } - - let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - - // second_vote should be kept and first_vote ignored - let slot = verified_vote_packets - .0 - .get(&vote_account_key) - .unwrap() - .get_latest_gossip_slot(); - assert_eq!(11, slot); - - // Now send the third_vote, it should overwrite second_vote - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(third_vote), - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - let slot = verified_vote_packets - .0 - .get(&vote_account_key) - .unwrap() - .get_latest_gossip_slot(); - assert_eq!(13, slot); - } - - fn send_tower_sync_and_process( - s: &Sender>, - r: &Receiver>, - vote: TowerSync, - vote_account_key: Pubkey, - verified_vote_packets: &mut VerifiedVotePackets, - ) -> GossipVote { - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote: VoteTransaction::from(vote), - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - verified_vote_packets - .receive_and_process_vote_packets(r, true) - .unwrap(); - match verified_vote_packets.0.get(&vote_account_key).unwrap() { - SingleValidatorVotes::FullTowerVote(gossip_vote) => gossip_vote.clone(), - _ => panic!("Received incremental vote"), - } - } - - #[test] - fn test_latest_vote_tie_break_with_feature() { - let (s, r) = unbounded(); - let vote_account_key = solana_sdk::pubkey::new_rand(); - - // Send identical tower syncs with different timestamps - let mut vote = TowerSync::from(vec![(2, 4), (4, 3), (6, 2), (7, 1)]); - vote.timestamp = Some(5); - - let mut vote_later_ts = vote.clone(); - vote_later_ts.timestamp = Some(6); - - let mut vote_earlier_ts = vote.clone(); - vote_earlier_ts.timestamp = Some(4); - - let mut vote_no_ts = vote.clone(); - vote_no_ts.timestamp = None; - - let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); - - // Original vote - let GossipVote { - slot, timestamp, .. - } = send_tower_sync_and_process( - &s, - &r, - vote.clone(), - vote_account_key, - &mut verified_vote_packets, - ); - assert_eq!(slot, vote.last_voted_slot().unwrap()); - assert_eq!(timestamp, vote.timestamp); - - // Same vote with later timestamp should override - let GossipVote { - slot, timestamp, .. - } = send_tower_sync_and_process( - &s, - &r, - vote_later_ts.clone(), - vote_account_key, - &mut verified_vote_packets, - ); - assert_eq!(slot, vote_later_ts.last_voted_slot().unwrap()); - assert_eq!(timestamp, vote_later_ts.timestamp); - - // Same vote with earlier timestamp should not override - let GossipVote { - slot, timestamp, .. - } = send_tower_sync_and_process( - &s, - &r, - vote_earlier_ts, - vote_account_key, - &mut verified_vote_packets, - ); - assert_eq!(slot, vote_later_ts.last_voted_slot().unwrap()); - assert_eq!(timestamp, vote_later_ts.timestamp); - - // Same vote with no timestamp should not override - let GossipVote { - slot, timestamp, .. - } = send_tower_sync_and_process( - &s, - &r, - vote_no_ts, - vote_account_key, - &mut verified_vote_packets, - ); - assert_eq!(slot, vote_later_ts.last_voted_slot().unwrap()); - assert_eq!(timestamp, vote_later_ts.timestamp); - } - - #[test] - fn test_latest_vote_feature_upgrade() { - let (s, r) = unbounded(); - let vote_account_key = solana_sdk::pubkey::new_rand(); - - // Send incremental votes - for i in 0..100 { - let vote = VoteTransaction::from(Vote::new(vec![i], Hash::new_unique())); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - } - - let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); - // Receive votes without the feature active - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - assert_eq!( - 100, - verified_vote_packets - .0 - .get(&vote_account_key) - .unwrap() - .len() - ); - - // Now send some new votes - for i in 101..201 { - let slots = std::iter::zip((i - 30)..(i + 1), (1..32).rev()) - .map(|(slot, confirmation_count)| { - Lockout::new_with_confirmation_count(slot, confirmation_count) - }) - .collect::>(); - let vote = VoteTransaction::from(TowerSync::new( - slots, - Some(i - 32), - Hash::new_unique(), - Hash::new_unique(), - )); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - } - - // Receive votes with the feature active - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - if let FullTowerVote(vote) = verified_vote_packets.0.get(&vote_account_key).unwrap() { - assert_eq!(200, vote.slot); - } else { - panic!("Feature active but incremental votes are present"); - } - } - - #[test] - fn test_incremental_votes_with_feature_active() { - let (s, r) = unbounded(); - let vote_account_key = solana_sdk::pubkey::new_rand(); - let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); - - let hash = Hash::new_unique(); - let vote = VoteTransaction::from(Vote::new(vec![42], hash)); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - - // Receive incremental votes with the feature active - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - - // Should still store as incremental votes - if let IncrementalVotes(votes) = verified_vote_packets.0.get(&vote_account_key).unwrap() { - assert!(votes.contains_key(&(42, hash))); - } else { - panic!("Although feature is active, incremental votes should not be stored as full tower votes"); - } - } - - #[test] - fn test_latest_votes_downgrade_full_to_incremental() { - let (s, r) = unbounded(); - let vote_account_key = solana_sdk::pubkey::new_rand(); - let mut verified_vote_packets = VerifiedVotePackets(HashMap::new()); - - let vote = VoteTransaction::from(TowerSync::from(vec![(42, 1)])); - let hash_42 = vote.hash(); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - - // Receive full votes - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - assert_eq!( - 42, - verified_vote_packets - .0 - .get(&vote_account_key) - .unwrap() - .get_latest_gossip_slot() - ); - - // Try to send an old incremental vote from pre feature activation - let vote = VoteTransaction::from(Vote::new(vec![34], Hash::new_unique())); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - - // Try to receive nothing should happen - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - if let FullTowerVote(vote) = verified_vote_packets.0.get(&vote_account_key).unwrap() { - assert_eq!(42, vote.slot); - } else { - panic!("Old vote triggered a downgrade conversion"); - } - - // Now try to send an incremental vote - let vote = VoteTransaction::from(Vote::new(vec![43], Hash::new_unique())); - let hash_43 = vote.hash(); - s.send(vec![VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch: PacketBatch::default(), - signature: Signature::from([1u8; 64]), - }]) - .unwrap(); - - // Try to receive and vote lands as well as the conversion back to incremental votes - verified_vote_packets - .receive_and_process_vote_packets(&r, true) - .unwrap(); - if let IncrementalVotes(votes) = verified_vote_packets.0.get(&vote_account_key).unwrap() { - assert!(votes.contains_key(&(42, hash_42))); - assert!(votes.contains_key(&(43, hash_43))); - assert_eq!(2, votes.len()); - } else { - panic!("Conversion back to incremental votes failed"); - } - } -} From 65f64663963cbeee8b72eba289f25851e70f3ecf Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 13 Aug 2024 11:23:51 -0400 Subject: [PATCH 107/529] Flushes hash cache data file's mmap after done writing (#2567) --- accounts-db/src/cache_hash_data.rs | 11 +++++++++-- accounts-db/src/cache_hash_data_stats.rs | 6 ++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index 350fdec560caed..e3aa1fabee01ec 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -5,7 +5,7 @@ use { crate::{accounts_hash::CalculateHashIntermediate, cache_hash_data_stats::CacheHashDataStats}, bytemuck_derive::{Pod, Zeroable}, memmap2::MmapMut, - solana_measure::measure::Measure, + solana_measure::{measure::Measure, measure_us}, solana_sdk::clock::Slot, std::{ collections::HashSet, @@ -369,10 +369,17 @@ impl CacheHashData { }); assert_eq!(i, entries); m2.stop(); + // We must flush the mmap after writing, since we're about to turn around and load it for + // reading *not* via the mmap. If the mmap is never flushed to disk, it is possible the + // entries will *not* be visible when the reader comes along. + let (_, measure_flush_us) = measure_us!(cache_file.mmap.flush()?); + m.stop(); self.stats .write_to_mmap_us .fetch_add(m2.as_us(), Ordering::Relaxed); - m.stop(); + self.stats + .flush_mmap_us + .fetch_add(measure_flush_us, Ordering::Relaxed); self.stats.save_us.fetch_add(m.as_us(), Ordering::Relaxed); self.stats.saved_to_cache.fetch_add(1, Ordering::Relaxed); Ok(()) diff --git a/accounts-db/src/cache_hash_data_stats.rs b/accounts-db/src/cache_hash_data_stats.rs index ba134dc226a288..f59c513a048488 100644 --- a/accounts-db/src/cache_hash_data_stats.rs +++ b/accounts-db/src/cache_hash_data_stats.rs @@ -11,6 +11,7 @@ pub struct CacheHashDataStats { pub save_us: AtomicU64, pub saved_to_cache: AtomicUsize, pub write_to_mmap_us: AtomicU64, + pub flush_mmap_us: AtomicU64, pub create_save_us: AtomicU64, pub load_us: AtomicU64, pub read_us: AtomicU64, @@ -61,6 +62,11 @@ impl CacheHashDataStats { self.write_to_mmap_us.load(Ordering::Relaxed), i64 ), + ( + "flush_mmap_us", + self.flush_mmap_us.load(Ordering::Relaxed), + i64 + ), ( "create_save_us", self.create_save_us.load(Ordering::Relaxed), From fc208a011f47d417e90bc0c445f7f25924179c58 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 13 Aug 2024 10:37:19 -0500 Subject: [PATCH 108/529] unified_scheduler_logic: replace get_account_locks_unchecked (#2554) --- unified-scheduler-logic/src/lib.rs | 68 ++++++++++++++++++------------ 1 file changed, 42 insertions(+), 26 deletions(-) diff --git a/unified-scheduler-logic/src/lib.rs b/unified-scheduler-logic/src/lib.rs index 50647904ca61c2..da748ef401d1fe 100644 --- a/unified-scheduler-logic/src/lib.rs +++ b/unified-scheduler-logic/src/lib.rs @@ -772,35 +772,51 @@ impl SchedulingStateMachine { index: usize, usage_queue_loader: &mut impl FnMut(Pubkey) -> UsageQueue, ) -> Task { - // Calling the _unchecked() version here is safe for faster operation, because - // `get_account_locks()` (the safe variant) is ensured to be called in - // DefaultTransactionHandler::handle() via Bank::prepare_unlocked_batch_from_single_tx(). + // It's crucial for tasks to be validated with + // `account_locks::validate_account_locks()` prior to the creation. + // That's because it's part of protocol consensus regarding the + // rejection of blocks containing malformed transactions + // (`AccountLoadedTwice` and `TooManyAccountLocks`). Even more, + // `SchedulingStateMachine` can't properly handle transactions with + // duplicate addresses (those falling under `AccountLoadedTwice`). // - // The safe variant has additional account-locking related verifications, which is crucial. + // However, it's okay for now not to call `::validate_account_locks()` + // here. // - // Currently the replaying stage is redundantly calling `get_account_locks()` when unified - // scheduler is enabled on the given transaction at the blockstore. This will be relaxed - // for optimization in the future. As for banking stage with unified scheduler, it will - // need to run .get_account_locks() at least once somewhere in the code path. In the - // distant future, this function `create_task()` should be adjusted so that both stages do - // the checks before calling this (say, with some ad-hoc type like - // `SanitizedTransactionWithCheckedAccountLocks`) or do the checks here, resulting in - // eliminating the redundant one in the replaying stage and in the handler. - let locks = transaction.get_account_locks_unchecked(); - - let writable_locks = locks - .writable - .iter() - .map(|address| (address, RequestedUsage::Writable)); - let readonly_locks = locks - .readonly + // Currently `replay_stage` is always calling + //`::validate_account_locks()` regardless of whether unified-scheduler + // is enabled or not at the blockstore + // (`Bank::prepare_sanitized_batch()` is called in + // `process_entries()`). This verification will be hoisted for + // optimization when removing + // `--block-verification-method=blockstore-processor`. + // + // As for `banking_stage` with unified scheduler, it will need to run + // `validate_account_locks()` at least once somewhere in the code path. + // In the distant future, this function (`create_task()`) should be + // adjusted so that both stages do the checks before calling this or do + // the checks here, to simplify the two code paths regarding the + // essential `validate_account_locks` validation. + // + // Lastly, `validate_account_locks()` is currently called in + // `DefaultTransactionHandler::handle()` via + // `Bank::prepare_unlocked_batch_from_single_tx()` as well. + // This redundancy is known. It was just left as-is out of abundance + // of caution. + let lock_contexts = transaction + .message() + .account_keys() .iter() - .map(|address| (address, RequestedUsage::Readonly)); - - let lock_contexts = writable_locks - .chain(readonly_locks) - .map(|(address, requested_usage)| { - LockContext::new(usage_queue_loader(**address), requested_usage) + .enumerate() + .map(|(index, address)| { + LockContext::new( + usage_queue_loader(*address), + if transaction.message().is_writable(index) { + RequestedUsage::Writable + } else { + RequestedUsage::Readonly + }, + ) }) .collect(); From 26f9bfc30945f1e8f92cd1c70d4dff3aa22e965a Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 13 Aug 2024 23:47:15 +0800 Subject: [PATCH 109/529] docs: fix coption doc test (#2571) * docs: fix coption doc test * Update sdk/program/src/program_option.rs Co-authored-by: Brooks * Update sdk/program/src/program_option.rs Co-authored-by: Brooks --------- Co-authored-by: Brooks --- sdk/program/src/program_option.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sdk/program/src/program_option.rs b/sdk/program/src/program_option.rs index b1a7c18d8a75a0..3496e5c282a804 100644 --- a/sdk/program/src/program_option.rs +++ b/sdk/program/src/program_option.rs @@ -171,7 +171,8 @@ impl COption { /// assert_eq!(x.expect("the world is ending"), "value"); /// ``` /// - /// ```ignore{.should_panic} + /// ```should_panic + /// # use solana_program::program_option::COption; /// let x: COption<&str> = COption::None; /// x.expect("the world is ending"); // panics with `the world is ending` /// ``` @@ -203,7 +204,8 @@ impl COption { /// assert_eq!(x.unwrap(), "air"); /// ``` /// - /// ```ignore{.should_panic} + /// ```should_panic + /// # use solana_program::program_option::COption; /// let x: COption<&str> = COption::None; /// assert_eq!(x.unwrap(), "air"); // fails /// ``` From 38a2dc76e4c2a5f4191fafef24091fbe93e95f76 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Tue, 13 Aug 2024 12:04:34 -0400 Subject: [PATCH 110/529] chore: remove unused solana-metrics dependencies (#2565) chore: remove unused solana-metrics crate dependencies. --- Cargo.lock | 3 --- client-test/Cargo.toml | 1 - client/Cargo.toml | 1 - client/src/lib.rs | 2 -- programs/sbf/Cargo.lock | 2 -- tpu-client/Cargo.toml | 1 - tpu-client/src/lib.rs | 2 -- 7 files changed, 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 09365fe64c71b9..ab43623d0cb84a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5989,7 +5989,6 @@ dependencies = [ "rayon", "solana-connection-cache", "solana-measure", - "solana-metrics", "solana-pubsub-client", "solana-quic-client", "solana-rpc-client", @@ -6016,7 +6015,6 @@ dependencies = [ "solana-logger", "solana-measure", "solana-merkle-tree", - "solana-metrics", "solana-perf", "solana-pubsub-client", "solana-rayon-threadlimit", @@ -7824,7 +7822,6 @@ dependencies = [ "rayon", "solana-connection-cache", "solana-measure", - "solana-metrics", "solana-pubsub-client", "solana-rpc-client", "solana-rpc-client-api", diff --git a/client-test/Cargo.toml b/client-test/Cargo.toml index 514d99ada1ca10..18c81ba2ae84e5 100644 --- a/client-test/Cargo.toml +++ b/client-test/Cargo.toml @@ -18,7 +18,6 @@ solana-client = { workspace = true } solana-ledger = { workspace = true } solana-measure = { workspace = true } solana-merkle-tree = { workspace = true } -solana-metrics = { workspace = true } solana-perf = { workspace = true } solana-pubsub-client = { workspace = true } solana-rayon-threadlimit = { workspace = true } diff --git a/client/Cargo.toml b/client/Cargo.toml index ece0bc82a9cacb..d68ca8c22e36bf 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -22,7 +22,6 @@ quinn = { workspace = true } rayon = { workspace = true } solana-connection-cache = { workspace = true } solana-measure = { workspace = true } -solana-metrics = { workspace = true } solana-pubsub-client = { workspace = true } solana-quic-client = { workspace = true } solana-rpc-client = { workspace = true, features = ["default"] } diff --git a/client/src/lib.rs b/client/src/lib.rs index f5e045ff531604..e1951521d1d761 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -7,8 +7,6 @@ pub mod thin_client; pub mod tpu_client; pub mod transaction_executor; -extern crate solana_metrics; - pub use solana_rpc_client::mock_sender_for_cli; pub mod blockhash_query { diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b8af134b63c15d..155dc71441b875 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4746,7 +4746,6 @@ dependencies = [ "rayon", "solana-connection-cache", "solana-measure", - "solana-metrics", "solana-pubsub-client", "solana-quic-client", "solana-rpc-client", @@ -6470,7 +6469,6 @@ dependencies = [ "rayon", "solana-connection-cache", "solana-measure", - "solana-metrics", "solana-pubsub-client", "solana-rpc-client", "solana-rpc-client-api", diff --git a/tpu-client/Cargo.toml b/tpu-client/Cargo.toml index 63fbc65583841b..77bb2674008036 100644 --- a/tpu-client/Cargo.toml +++ b/tpu-client/Cargo.toml @@ -19,7 +19,6 @@ log = { workspace = true } rayon = { workspace = true } solana-connection-cache = { workspace = true } solana-measure = { workspace = true } -solana-metrics = { workspace = true } solana-pubsub-client = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } diff --git a/tpu-client/src/lib.rs b/tpu-client/src/lib.rs index 54adaec935a213..db01a6f0bb4635 100644 --- a/tpu-client/src/lib.rs +++ b/tpu-client/src/lib.rs @@ -2,5 +2,3 @@ pub mod nonblocking; pub mod tpu_client; - -extern crate solana_metrics; From b09567c1c355d4ec24d749a4e6f03bdde02c1319 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Aug 2024 16:25:52 +0000 Subject: [PATCH 111/529] build(deps): bump serde from 1.0.205 to 1.0.207 (#2572) * build(deps): bump serde from 1.0.205 to 1.0.207 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.205 to 1.0.207. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.205...v1.0.207) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files * sync the version --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: yihau --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab43623d0cb84a..a70e10d9031b55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5016,9 +5016,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.205" +version = "1.0.207" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33aedb1a7135da52b7c21791455563facbbcc43d0f0f66165b42c21b3dfb150" +checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" dependencies = [ "serde_derive", ] @@ -5034,9 +5034,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.205" +version = "1.0.207" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692d6f5ac90220161d6774db30c662202721e64aed9058d2c394f451261420c1" +checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 2e4e857533db0d..482ef6a0c4e21e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -319,9 +319,9 @@ rustls = { version = "0.21.12", default-features = false, features = ["quic"] } scopeguard = "1.2.0" semver = "1.0.23" seqlock = "0.2.0" -serde = "1.0.205" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde = "1.0.207" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" -serde_derive = "1.0.205" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_derive = "1.0.207" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.124" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 155dc71441b875..251f3565969319 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4170,9 +4170,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.205" +version = "1.0.207" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33aedb1a7135da52b7c21791455563facbbcc43d0f0f66165b42c21b3dfb150" +checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" dependencies = [ "serde_derive", ] @@ -4188,9 +4188,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.205" +version = "1.0.207" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692d6f5ac90220161d6774db30c662202721e64aed9058d2c394f451261420c1" +checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" dependencies = [ "proc-macro2", "quote", From e181194ad75a05627d1a8169a0e01a1999460e54 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 13 Aug 2024 13:32:13 -0500 Subject: [PATCH 112/529] use new `validate_account_locks` (#2553) --- accounts-db/src/lib.rs | 2 +- .../transaction_scheduler/scheduler_controller.rs | 5 +++-- core/src/banking_stage/unprocessed_transaction_storage.rs | 5 +++-- runtime/src/bank.rs | 7 +++---- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index fccd1d43695732..68ffe068bec6f2 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -5,7 +5,7 @@ extern crate lazy_static; pub mod account_info; -mod account_locks; +pub mod account_locks; pub mod account_storage; pub mod accounts; mod accounts_cache; diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index cb945cf37ab189..5563f84d1c2204 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -23,6 +23,7 @@ use { }, arrayvec::ArrayVec, crossbeam_channel::RecvTimeoutError, + solana_accounts_db::account_locks::validate_account_locks, solana_cost_model::cost_model::CostModel, solana_measure::measure_us, solana_runtime::{bank::Bank, bank_forks::BankForks}, @@ -526,8 +527,8 @@ impl SchedulerController { }) .inspect(|_| saturating_add_assign!(post_sanitization_count, 1)) .filter(|(_packet, tx)| { - SanitizedTransaction::validate_account_locks( - tx.message(), + validate_account_locks( + tx.message().account_keys(), transaction_account_lock_limit, ) .is_ok() diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index ce6638016433c6..428e871bcac6cf 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -17,6 +17,7 @@ use { }, itertools::Itertools, min_max_heap::MinMaxHeap, + solana_accounts_db::account_locks::validate_account_locks, solana_measure::measure_us, solana_runtime::bank::Bank, solana_sdk::{ @@ -171,8 +172,8 @@ fn consume_scan_should_process_packet( let message = sanitized_transaction.message(); // Check the number of locks and whether there are duplicates - if SanitizedTransaction::validate_account_locks( - message, + if validate_account_locks( + message.account_keys(), bank.get_transaction_account_lock_limit(), ) .is_err() diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4fab9ce7405d5f..798f4c84d957cf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -66,6 +66,7 @@ use { }, serde::Serialize, solana_accounts_db::{ + account_locks::validate_account_locks, accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot}, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, @@ -3281,10 +3282,8 @@ impl Bank { transaction: &'a SanitizedTransaction, ) -> TransactionBatch<'_, '_> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); - let lock_result = SanitizedTransaction::validate_account_locks( - transaction.message(), - tx_account_lock_limit, - ); + let lock_result = + validate_account_locks(transaction.message().account_keys(), tx_account_lock_limit); let mut batch = TransactionBatch::new( vec![lock_result], self, From 8b22b92fc1b2d9402a13e3d5b3932c53d93696c1 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 13 Aug 2024 13:32:53 -0500 Subject: [PATCH 113/529] prioritization_fee_cache: remove get_account_locks (#2556) --- runtime/src/prioritization_fee_cache.rs | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index 332449e81c04c1..bafbde9f7411aa 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -2,6 +2,7 @@ use { crate::{bank::Bank, prioritization_fee::*}, crossbeam_channel::{unbounded, Receiver, Sender}, log::*, + solana_accounts_db::account_locks::validate_account_locks, solana_measure::measure_us, solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ @@ -205,10 +206,14 @@ impl PrioritizationFeeCache { let compute_budget_limits = process_compute_budget_instructions( sanitized_transaction.message().program_instructions_iter(), ); - let account_locks = sanitized_transaction - .get_account_locks(bank.get_transaction_account_lock_limit()); - if compute_budget_limits.is_err() || account_locks.is_err() { + let message = sanitized_transaction.message(); + let lock_result = validate_account_locks( + message.account_keys(), + bank.get_transaction_account_lock_limit(), + ); + + if compute_budget_limits.is_err() || lock_result.is_err() { continue; } let compute_budget_limits = compute_budget_limits.unwrap(); @@ -219,12 +224,13 @@ impl PrioritizationFeeCache { continue; } - let writable_accounts = account_locks - .unwrap() - .writable + let writable_accounts = message + .account_keys() .iter() - .map(|key| **key) - .collect::>(); + .enumerate() + .filter(|(index, _)| message.is_writable(*index)) + .map(|(_, key)| *key) + .collect(); self.sender .send(CacheServiceUpdate::TransactionUpdate { From 955cf6c3f42082700ec36e69b00a8f4a4bc4b195 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 13 Aug 2024 13:33:55 -0500 Subject: [PATCH 114/529] TransactionView: TransactionMeta (#2558) --- transaction-view/src/bytes.rs | 1 + transaction-view/src/lib.rs | 4 +- transaction-view/src/transaction_meta.rs | 394 +++++++++++++++++++++++ 3 files changed, 397 insertions(+), 2 deletions(-) create mode 100644 transaction-view/src/transaction_meta.rs diff --git a/transaction-view/src/bytes.rs b/transaction-view/src/bytes.rs index 563acf0e9ae5e0..e66442efbb2a93 100644 --- a/transaction-view/src/bytes.rs +++ b/transaction-view/src/bytes.rs @@ -31,6 +31,7 @@ pub fn read_byte(bytes: &[u8], offset: &mut usize) -> Result { /// /// Assumptions: /// - The current offset is not greater than `bytes.len()`. +#[allow(dead_code)] #[inline(always)] pub fn read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result { let mut result = 0u16; diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index a9ed7eb9b8f17c..40cea3da25393c 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -1,9 +1,7 @@ // Parsing helpers only need to be public for benchmarks. #[cfg(feature = "dev-context-only-utils")] -#[allow(dead_code)] pub mod bytes; #[cfg(not(feature = "dev-context-only-utils"))] -#[allow(dead_code)] mod bytes; #[allow(dead_code)] @@ -17,3 +15,5 @@ pub mod result; mod signature_meta; #[allow(dead_code)] mod static_account_keys_meta; +#[allow(dead_code)] +pub mod transaction_meta; diff --git a/transaction-view/src/transaction_meta.rs b/transaction-view/src/transaction_meta.rs new file mode 100644 index 00000000000000..6547823a2c35ea --- /dev/null +++ b/transaction-view/src/transaction_meta.rs @@ -0,0 +1,394 @@ +use { + crate::{ + address_table_lookup_meta::AddressTableLookupMeta, + bytes::advance_offset_for_type, + instructions_meta::InstructionsMeta, + message_header_meta::{MessageHeaderMeta, TransactionVersion}, + result::{Result, TransactionParsingError}, + signature_meta::SignatureMeta, + static_account_keys_meta::StaticAccountKeysMeta, + }, + solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Signature}, +}; + +pub struct TransactionMeta { + /// Signature metadata. + pub(crate) signature: SignatureMeta, + /// Message header metadata. + pub(crate) message_header: MessageHeaderMeta, + /// Static account keys metadata. + pub(crate) static_account_keys: StaticAccountKeysMeta, + /// Recent blockhash offset. + pub(crate) recent_blockhash_offset: u16, + /// Instructions metadata. + pub(crate) instructions: InstructionsMeta, + /// Address table lookup metadata. + pub(crate) address_table_lookup: AddressTableLookupMeta, +} + +impl TransactionMeta { + /// Parse a serialized transaction and verify basic structure. + /// The `bytes` parameter must have no trailing data. + pub fn try_new(bytes: &[u8]) -> Result { + let mut offset = 0; + let signature = SignatureMeta::try_new(bytes, &mut offset)?; + let message_header = MessageHeaderMeta::try_new(bytes, &mut offset)?; + let static_account_keys = StaticAccountKeysMeta::try_new(bytes, &mut offset)?; + + // The recent blockhash is the first account key after the static + // account keys. The recent blockhash is always present in a valid + // transaction and has a fixed size of 32 bytes. + let recent_blockhash_offset = offset as u16; + advance_offset_for_type::(bytes, &mut offset)?; + + let instructions = InstructionsMeta::try_new(bytes, &mut offset)?; + let address_table_lookup = match message_header.version { + TransactionVersion::Legacy => AddressTableLookupMeta { + num_address_table_lookup: 0, + offset: 0, + }, + TransactionVersion::V0 => AddressTableLookupMeta::try_new(bytes, &mut offset)?, + }; + + // Verify that the entire transaction was parsed. + if offset != bytes.len() { + return Err(TransactionParsingError); + } + + Ok(Self { + signature, + message_header, + static_account_keys, + recent_blockhash_offset, + instructions, + address_table_lookup, + }) + } + + /// Return the number of signatures in the transaction. + pub fn num_signatures(&self) -> u8 { + self.signature.num_signatures + } + + /// Return the version of the transaction. + pub fn version(&self) -> TransactionVersion { + self.message_header.version + } + + /// Return the number of required signatures in the transaction. + pub fn num_required_signatures(&self) -> u8 { + self.message_header.num_required_signatures + } + + /// Return the number of readonly signed accounts in the transaction. + pub fn num_readonly_signed_accounts(&self) -> u8 { + self.message_header.num_readonly_signed_accounts + } + + /// Return the number of readonly unsigned accounts in the transaction. + pub fn num_readonly_unsigned_accounts(&self) -> u8 { + self.message_header.num_readonly_unsigned_accounts + } + + /// Return the number of static account keys in the transaction. + pub fn num_static_account_keys(&self) -> u8 { + self.static_account_keys.num_static_accounts + } + + /// Return the number of instructions in the transaction. + pub fn num_instructions(&self) -> u16 { + self.instructions.num_instructions + } + + /// Return the number of address table lookups in the transaction. + pub fn num_address_table_lookups(&self) -> u8 { + self.address_table_lookup.num_address_table_lookup + } +} + +// Separate implementation for `unsafe` accessor methods. +impl TransactionMeta { + /// Return the slice of signatures in the transaction. + /// # Safety + /// - This function must be called with the same `bytes` slice that was + /// used to create the `TransactionMeta` instance. + pub unsafe fn signatures(&self, bytes: &[u8]) -> &[Signature] { + core::slice::from_raw_parts( + bytes.as_ptr().add(usize::from(self.signature.offset)) as *const Signature, + usize::from(self.signature.num_signatures), + ) + } + + /// Return the slice of static account keys in the transaction. + /// + /// # Safety + /// - This function must be called with the same `bytes` slice that was + /// used to create the `TransactionMeta` instance. + pub unsafe fn static_account_keys(&self, bytes: &[u8]) -> &[Pubkey] { + core::slice::from_raw_parts( + bytes + .as_ptr() + .add(usize::from(self.static_account_keys.offset)) as *const Pubkey, + usize::from(self.static_account_keys.num_static_accounts), + ) + } + + /// Return the recent blockhash in the transaction. + /// # Safety + /// - This function must be called with the same `bytes` slice that was + /// used to create the `TransactionMeta` instance. + pub unsafe fn recent_blockhash(&self, bytes: &[u8]) -> &Hash { + &*(bytes + .as_ptr() + .add(usize::from(self.recent_blockhash_offset)) as *const Hash) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + address_lookup_table::AddressLookupTableAccount, + message::{v0, Message, MessageHeader, VersionedMessage}, + pubkey::Pubkey, + signature::Signature, + system_instruction, + transaction::VersionedTransaction, + }, + }; + + fn verify_transaction_view_meta(tx: &VersionedTransaction) { + let bytes = bincode::serialize(tx).unwrap(); + let meta = TransactionMeta::try_new(&bytes).unwrap(); + + assert_eq!(meta.signature.num_signatures, tx.signatures.len() as u8); + assert_eq!(meta.signature.offset as usize, 1); + + assert_eq!( + meta.message_header.num_required_signatures, + tx.message.header().num_required_signatures + ); + assert_eq!( + meta.message_header.num_readonly_signed_accounts, + tx.message.header().num_readonly_signed_accounts + ); + assert_eq!( + meta.message_header.num_readonly_unsigned_accounts, + tx.message.header().num_readonly_unsigned_accounts + ); + + assert_eq!( + meta.static_account_keys.num_static_accounts, + tx.message.static_account_keys().len() as u8 + ); + assert_eq!( + meta.instructions.num_instructions, + tx.message.instructions().len() as u16 + ); + assert_eq!( + meta.address_table_lookup.num_address_table_lookup, + tx.message + .address_table_lookups() + .map(|x| x.len() as u8) + .unwrap_or(0) + ); + } + + fn minimally_sized_transaction() -> VersionedTransaction { + VersionedTransaction { + signatures: vec![Signature::default()], // 1 signature to be valid. + message: VersionedMessage::Legacy(Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + account_keys: vec![Pubkey::default()], + recent_blockhash: Hash::default(), + instructions: vec![], + }), + } + } + + fn simple_transfer() -> VersionedTransaction { + let payer = Pubkey::new_unique(); + VersionedTransaction { + signatures: vec![Signature::default()], // 1 signature to be valid. + message: VersionedMessage::Legacy(Message::new( + &[system_instruction::transfer( + &payer, + &Pubkey::new_unique(), + 1, + )], + Some(&payer), + )), + } + } + + fn simple_transfer_v0() -> VersionedTransaction { + let payer = Pubkey::new_unique(); + VersionedTransaction { + signatures: vec![Signature::default()], // 1 signature to be valid. + message: VersionedMessage::V0( + v0::Message::try_compile( + &payer, + &[system_instruction::transfer( + &payer, + &Pubkey::new_unique(), + 1, + )], + &[], + Hash::default(), + ) + .unwrap(), + ), + } + } + + fn v0_with_lookup() -> VersionedTransaction { + let payer = Pubkey::new_unique(); + let to = Pubkey::new_unique(); + VersionedTransaction { + signatures: vec![Signature::default()], // 1 signature to be valid. + message: VersionedMessage::V0( + v0::Message::try_compile( + &payer, + &[system_instruction::transfer(&payer, &to, 1)], + &[AddressLookupTableAccount { + key: Pubkey::new_unique(), + addresses: vec![to], + }], + Hash::default(), + ) + .unwrap(), + ), + } + } + + #[test] + fn test_minimal_sized_transaction() { + verify_transaction_view_meta(&minimally_sized_transaction()); + } + + #[test] + fn test_simple_transfer() { + verify_transaction_view_meta(&simple_transfer()); + } + + #[test] + fn test_simple_transfer_v0() { + verify_transaction_view_meta(&simple_transfer_v0()); + } + + #[test] + fn test_v0_with_lookup() { + verify_transaction_view_meta(&v0_with_lookup()); + } + + #[test] + fn test_trailing_byte() { + let tx = simple_transfer(); + let mut bytes = bincode::serialize(&tx).unwrap(); + bytes.push(0); + assert!(TransactionMeta::try_new(&bytes).is_err()); + } + + #[test] + fn test_insufficient_bytes() { + let tx = simple_transfer(); + let bytes = bincode::serialize(&tx).unwrap(); + assert!(TransactionMeta::try_new(&bytes[..bytes.len().wrapping_sub(1)]).is_err()); + } + + #[test] + fn test_signature_overflow() { + let tx = simple_transfer(); + let mut bytes = bincode::serialize(&tx).unwrap(); + // Set the number of signatures to u16::MAX + bytes[0] = 0xff; + bytes[1] = 0xff; + bytes[2] = 0xff; + assert!(TransactionMeta::try_new(&bytes).is_err()); + } + + #[test] + fn test_account_key_overflow() { + let tx = simple_transfer(); + let mut bytes = bincode::serialize(&tx).unwrap(); + // Set the number of accounts to u16::MAX + let offset = 1 + core::mem::size_of::() + 3; + bytes[offset] = 0xff; + bytes[offset + 1] = 0xff; + bytes[offset + 2] = 0xff; + assert!(TransactionMeta::try_new(&bytes).is_err()); + } + + #[test] + fn test_instructions_overflow() { + let tx = simple_transfer(); + let mut bytes = bincode::serialize(&tx).unwrap(); + // Set the number of instructions to u16::MAX + let offset = 1 + + core::mem::size_of::() + + 3 + + 1 + + 3 * core::mem::size_of::() + + core::mem::size_of::(); + bytes[offset] = 0xff; + bytes[offset + 1] = 0xff; + bytes[offset + 2] = 0xff; + assert!(TransactionMeta::try_new(&bytes).is_err()); + } + + #[test] + fn test_alt_overflow() { + let tx = simple_transfer_v0(); + let ix_bytes = tx.message.instructions()[0].data.len(); + let mut bytes = bincode::serialize(&tx).unwrap(); + // Set the number of instructions to u16::MAX + let offset = 1 // byte for num signatures + + core::mem::size_of::() // signature + + 1 // version byte + + 3 // message header + + 1 // byte for num account keys + + 3 * core::mem::size_of::() // account keys + + core::mem::size_of::() // recent blockhash + + 1 // byte for num instructions + + 1 // program index + + 1 // byte for num accounts + + 2 // bytes for account index + + 1 // byte for data length + + ix_bytes; + bytes[offset] = 0x01; + assert!(TransactionMeta::try_new(&bytes).is_err()); + } + + #[test] + fn test_basic_accessors() { + let tx = simple_transfer(); + let bytes = bincode::serialize(&tx).unwrap(); + let meta = TransactionMeta::try_new(&bytes).unwrap(); + + assert_eq!(meta.num_signatures(), 1); + assert!(matches!(meta.version(), TransactionVersion::Legacy)); + assert_eq!(meta.num_required_signatures(), 1); + assert_eq!(meta.num_readonly_signed_accounts(), 0); + assert_eq!(meta.num_readonly_unsigned_accounts(), 1); + assert_eq!(meta.num_static_account_keys(), 3); + assert_eq!(meta.num_instructions(), 1); + assert_eq!(meta.num_address_table_lookups(), 0); + + // SAFETY: `bytes` is the same slice used to create `meta`. + unsafe { + let signatures = meta.signatures(&bytes); + assert_eq!(signatures, &tx.signatures); + + let static_account_keys = meta.static_account_keys(&bytes); + assert_eq!(static_account_keys, tx.message.static_account_keys()); + + let recent_blockhash = meta.recent_blockhash(&bytes); + assert_eq!(recent_blockhash, tx.message.recent_blockhash()); + } + } +} From 272cddbefe2c16e8a86062520ac486f0fe2a9e2d Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 13 Aug 2024 16:55:36 -0500 Subject: [PATCH 115/529] Remove tower_save-ms metric counter (#2581) The metric tracked the amount of time taken to store the tower to disk. Moreso, it updated the counter everytime VotingService receives a VoteOp::PushVote message. This is excessive for this metric. Moreso, the process exits if the operation fails, so this metric isn't useful for a heartbeat either. Replace the metric with a trace log statement --- core/src/voting_service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/voting_service.rs b/core/src/voting_service.rs index 47d63a4b63a6eb..31ccf5c6885ad5 100644 --- a/core/src/voting_service.rs +++ b/core/src/voting_service.rs @@ -69,13 +69,13 @@ impl VotingService { vote_op: VoteOp, ) { if let VoteOp::PushVote { saved_tower, .. } = &vote_op { - let mut measure = Measure::start("tower_save-ms"); + let mut measure = Measure::start("tower storage save"); if let Err(err) = tower_storage.store(saved_tower) { error!("Unable to save tower to storage: {:?}", err); std::process::exit(1); } measure.stop(); - inc_new_counter_info!("tower_save-ms", measure.as_ms() as usize); + trace!("{measure}"); } let _ = cluster_info.send_transaction( From 743c34542fd979dbbee780f30b2e8389146b6ffc Mon Sep 17 00:00:00 2001 From: Jon C Date: Wed, 14 Aug 2024 00:08:51 +0200 Subject: [PATCH 116/529] test-validator: Improve `expect` messages (#2583) #### Problem As pointed out at https://github.com/anza-xyz/agave/pull/2480#discussion_r1710053543, the expect messages in solana-test-validator are a bit strange since clap ensure that the rpc client will be there. #### Summary of changes Improve the messages with some more information. --- validator/src/bin/solana-test-validator.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index bba5a359093370..3d1d47990895bc 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -480,7 +480,7 @@ fn main() { accounts_to_clone, cluster_rpc_client .as_ref() - .expect("bug: --url argument missing?"), + .expect("--clone-account requires --json-rpc-url argument"), false, ) { println!("Error: clone_accounts failed: {e}"); @@ -493,7 +493,7 @@ fn main() { accounts_to_maybe_clone, cluster_rpc_client .as_ref() - .expect("bug: --url argument missing?"), + .expect("--maybe-clone requires --json-rpc-url argument"), true, ) { println!("Error: clone_accounts failed: {e}"); @@ -506,7 +506,7 @@ fn main() { upgradeable_programs_to_clone, cluster_rpc_client .as_ref() - .expect("bug: --url argument missing?"), + .expect("--clone-upgradeable-program requires --json-rpc-url argument"), ) { println!("Error: clone_upgradeable_programs failed: {e}"); exit(1); @@ -517,7 +517,7 @@ fn main() { if let Err(e) = genesis.clone_feature_set( cluster_rpc_client .as_ref() - .expect("bug: --url argument missing?"), + .expect("--clone-feature-set requires --json-rpc-url argument"), ) { println!("Error: clone_feature_set failed: {e}"); exit(1); From e2b7d0ffaf1f566032c33a0fe9a1e2672fa45c54 Mon Sep 17 00:00:00 2001 From: dmakarov Date: Tue, 13 Aug 2024 20:19:28 -0400 Subject: [PATCH 117/529] Replace purges_old_accounts by a boolean field in CleaningInfo (#2566) * Replace purges_old_accounts by a boolean field in CleaningInfo * comments * Move check for purgeable accounts existence to caller * Corrections to Atomic * correction. * Remove checks for no-existent purge old accounts --- accounts-db/src/accounts_db.rs | 253 ++++++++++++++++----------------- 1 file changed, 119 insertions(+), 134 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1474b421f4acfa..feeadee16c623b 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1349,6 +1349,8 @@ impl StoreAccountsTiming { struct CleaningInfo { slot_list: SlotList, ref_count: u64, + /// True for pubkeys mapping to older versions of accounts that should be purged. + should_purge: bool, } /// This is the return type of AccountsDb::construct_candidate_clean_keys. @@ -2684,41 +2686,33 @@ impl AccountsDb { /// These should NOT be unref'd later from the accounts index. fn clean_accounts_older_than_root( &self, - purges: Vec, + candidates: &[RwLock>], max_clean_root_inclusive: Option, ancient_account_cleans: &AtomicU64, epoch_schedule: &EpochSchedule, ) -> (ReclaimResult, PubkeysRemovedFromAccountsIndex) { let pubkeys_removed_from_accounts_index = HashSet::default(); - if purges.is_empty() { - return ( - ReclaimResult::default(), - pubkeys_removed_from_accounts_index, - ); - } - // This number isn't carefully chosen; just guessed randomly such that - // the hot loop will be the order of ~Xms. - const INDEX_CLEAN_BULK_COUNT: usize = 4096; - let one_epoch_old = self.get_oldest_non_ancient_slot(epoch_schedule); let pubkeys_removed_from_accounts_index = Mutex::new(pubkeys_removed_from_accounts_index); let mut clean_rooted = Measure::start("clean_old_root-ms"); - let reclaim_vecs = purges - .par_chunks(INDEX_CLEAN_BULK_COUNT) - .filter_map(|pubkeys: &[Pubkey]| { + let reclaim_vecs = candidates + .par_iter() + .filter_map(|candidates_bin| { let mut reclaims = Vec::new(); - for pubkey in pubkeys { - let removed_from_index = self.accounts_index.clean_rooted_entries( - pubkey, - &mut reclaims, - max_clean_root_inclusive, - ); - if removed_from_index { - pubkeys_removed_from_accounts_index - .lock() - .unwrap() - .insert(*pubkey); + for (pubkey, cleaning_info) in candidates_bin.read().unwrap().iter() { + if cleaning_info.should_purge { + let removed_from_index = self.accounts_index.clean_rooted_entries( + pubkey, + &mut reclaims, + max_clean_root_inclusive, + ); + if removed_from_index { + pubkeys_removed_from_accounts_index + .lock() + .unwrap() + .insert(*pubkey); + } } } @@ -2791,6 +2785,7 @@ impl AccountsDb { CleaningInfo { slot_list, ref_count, + should_purge: _, }, ) in bin.iter().filter(|x| !x.1.slot_list.is_empty()) { @@ -3286,128 +3281,112 @@ impl AccountsDb { let useful_accum = AtomicU64::new(0); // parallel scan the index. - let purges_old_accounts = { - let do_clean_scan = || { - candidates - .par_iter() - .map(|candidates_bin| { - let mut purges_old_accounts = Vec::new(); - let mut found_not_zero = 0; - let mut not_found_on_fork = 0; - let mut missing = 0; - let mut useful = 0; - let mut candidates_bin = candidates_bin.write().unwrap(); - // Iterate over each HashMap entry to - // avoid capturing the HashMap in the - // closure passed to scan thus making - // conflicting read and write borrows. - candidates_bin - .iter_mut() - .for_each(|(candidate_pubkey, candidate_info)| { - self.accounts_index.scan( - [*candidate_pubkey].iter(), - |candidate_pubkey, slot_list_and_ref_count, _entry| { - let mut useless = true; - if let Some((slot_list, ref_count)) = - slot_list_and_ref_count - { - // find the highest rooted slot in the slot list - let index_in_slot_list = - self.accounts_index.latest_slot( - None, - slot_list, - max_clean_root_inclusive, - ); + let do_clean_scan = || { + candidates.par_iter().for_each(|candidates_bin| { + let mut found_not_zero = 0; + let mut not_found_on_fork = 0; + let mut missing = 0; + let mut useful = 0; + let mut candidates_bin = candidates_bin.write().unwrap(); + // Iterate over each HashMap entry to + // avoid capturing the HashMap in the + // closure passed to scan thus making + // conflicting read and write borrows. + candidates_bin + .iter_mut() + .for_each(|(candidate_pubkey, candidate_info)| { + self.accounts_index.scan( + [*candidate_pubkey].iter(), + |_candidate_pubkey, slot_list_and_ref_count, _entry| { + let mut useless = true; + if let Some((slot_list, ref_count)) = slot_list_and_ref_count { + // find the highest rooted slot in the slot list + let index_in_slot_list = self.accounts_index.latest_slot( + None, + slot_list, + max_clean_root_inclusive, + ); - match index_in_slot_list { - Some(index_in_slot_list) => { - // found info relative to max_clean_root - let (slot, account_info) = - &slot_list[index_in_slot_list]; - if account_info.is_zero_lamport() { - useless = false; - // The latest one is zero lamports. We may be able to purge it. - // Add all the rooted entries that contain this pubkey. - // We know the highest rooted entry is zero lamports. - candidate_info.slot_list = - self.accounts_index.get_rooted_entries( - slot_list, - max_clean_root_inclusive, - ); - candidate_info.ref_count = ref_count; - } else { - found_not_zero += 1; - } - if uncleaned_roots.contains(slot) { - // Assertion enforced by `accounts_index.get()`, the latest slot - // will not be greater than the given `max_clean_root` - if let Some(max_clean_root_inclusive) = - max_clean_root_inclusive - { - assert!( - slot <= &max_clean_root_inclusive - ); - } - if slot_list.len() > 1 { - // no need to purge old accounts if there is only 1 slot in the slot list - purges_old_accounts - .push(*candidate_pubkey); - useless = false; - } else { - self.clean_accounts_stats - .uncleaned_roots_slot_list_1 - .fetch_add(1, Ordering::Relaxed); - } - } + match index_in_slot_list { + Some(index_in_slot_list) => { + // found info relative to max_clean_root + let (slot, account_info) = + &slot_list[index_in_slot_list]; + if account_info.is_zero_lamport() { + useless = false; + // The latest one is zero lamports. We may be able to purge it. + // Add all the rooted entries that contain this pubkey. + // We know the highest rooted entry is zero lamports. + candidate_info.slot_list = + self.accounts_index.get_rooted_entries( + slot_list, + max_clean_root_inclusive, + ); + candidate_info.ref_count = ref_count; + } else { + found_not_zero += 1; + } + if uncleaned_roots.contains(slot) { + // Assertion enforced by `accounts_index.get()`, the latest slot + // will not be greater than the given `max_clean_root` + if let Some(max_clean_root_inclusive) = + max_clean_root_inclusive + { + assert!(slot <= &max_clean_root_inclusive); } - None => { - // This pubkey is in the index but not in a root slot, so clean - // it up by adding it to the to-be-purged list. - // - // Also, this pubkey must have been touched by some slot since - // it was in the dirty list, so we assume that the slot it was - // touched in must be unrooted. - not_found_on_fork += 1; + if slot_list.len() > 1 { + // no need to purge old accounts if there is only 1 slot in the slot list + candidate_info.should_purge = true; useless = false; - purges_old_accounts.push(*candidate_pubkey); + } else { + self.clean_accounts_stats + .uncleaned_roots_slot_list_1 + .fetch_add(1, Ordering::Relaxed); } } - } else { - missing += 1; } - if !useless { - useful += 1; + None => { + // This pubkey is in the index but not in a root slot, so clean + // it up by adding it to the to-be-purged list. + // + // Also, this pubkey must have been touched by some slot since + // it was in the dirty list, so we assume that the slot it was + // touched in must be unrooted. + not_found_on_fork += 1; + candidate_info.should_purge = true; + useless = false; } - AccountsIndexScanResult::OnlyKeepInMemoryIfDirty - }, - None, - false, - ); - }); - found_not_zero_accum.fetch_add(found_not_zero, Ordering::Relaxed); - not_found_on_fork_accum.fetch_add(not_found_on_fork, Ordering::Relaxed); - missing_accum.fetch_add(missing, Ordering::Relaxed); - useful_accum.fetch_add(useful, Ordering::Relaxed); - purges_old_accounts - }) - .reduce(Vec::new, |mut a, b| { - // Collapse down the vecs into one. - a.extend(b); - a - }) - }; - if is_startup { - do_clean_scan() - } else { - self.thread_pool_clean.install(do_clean_scan) - } + } + } else { + missing += 1; + } + if !useless { + useful += 1; + } + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty + }, + None, + false, + ); + }); + found_not_zero_accum.fetch_add(found_not_zero, Ordering::Relaxed); + not_found_on_fork_accum.fetch_add(not_found_on_fork, Ordering::Relaxed); + missing_accum.fetch_add(missing, Ordering::Relaxed); + useful_accum.fetch_add(useful, Ordering::Relaxed); + }); }; + if is_startup { + do_clean_scan(); + } else { + self.thread_pool_clean.install(do_clean_scan); + } + accounts_scan.stop(); let mut clean_old_rooted = Measure::start("clean_old_roots"); let ((purged_account_slots, removed_accounts), mut pubkeys_removed_from_accounts_index) = self.clean_accounts_older_than_root( - purges_old_accounts, + &candidates, max_clean_root_inclusive, &ancient_account_cleans, epoch_schedule, @@ -3427,6 +3406,7 @@ impl AccountsDb { CleaningInfo { slot_list, ref_count, + should_purge: _, }, ) in candidates_bin.write().unwrap().iter_mut() { @@ -3508,6 +3488,7 @@ impl AccountsDb { let CleaningInfo { slot_list, ref_count: _, + should_purge: _, } = cleaning_info; (!slot_list.is_empty()).then_some(( *pubkey, @@ -3784,6 +3765,7 @@ impl AccountsDb { let CleaningInfo { slot_list, ref_count: _, + should_purge: _, } = cleaning_info; if slot_list.is_empty() { return false; @@ -12817,6 +12799,7 @@ pub mod tests { CleaningInfo { slot_list: rooted_entries, ref_count, + should_purge: false, }, ); } @@ -12827,6 +12810,7 @@ pub mod tests { CleaningInfo { slot_list: list, ref_count, + should_purge: _, }, ) in candidates_bin.iter() { @@ -15131,6 +15115,7 @@ pub mod tests { CleaningInfo { slot_list: vec![(slot, account_info)], ref_count: 1, + should_purge: false, }, ); let accounts_db = AccountsDb::new_single_for_tests(); From f5ea392dfee9550cf8cfae09371d74d40946d143 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 14 Aug 2024 01:12:11 -0500 Subject: [PATCH 118/529] sdk: Use Duration methods in duration_as_*() (#2584) The functions manually computed seconds/millis/micros/nanos from a Duration. The functions look to have been written before Duration natively supported methods to calculate these different units. So, remove our manual integer math and defer to whatever std::time::Duration does --- sdk/src/timing.rs | 81 ++++------------------------------------------- 1 file changed, 7 insertions(+), 74 deletions(-) diff --git a/sdk/src/timing.rs b/sdk/src/timing.rs index f6a72fcb7c8213..beccb37841ce01 100644 --- a/sdk/src/timing.rs +++ b/sdk/src/timing.rs @@ -1,35 +1,23 @@ //! The `timing` module provides std::time utility functions. -use { - crate::unchecked_div_by_const, - std::{ - sync::atomic::{AtomicU64, Ordering}, - time::{Duration, SystemTime, UNIX_EPOCH}, - }, +use std::{ + sync::atomic::{AtomicU64, Ordering}, + time::{Duration, SystemTime, UNIX_EPOCH}, }; pub fn duration_as_ns(d: &Duration) -> u64 { - d.as_secs() - .saturating_mul(1_000_000_000) - .saturating_add(u64::from(d.subsec_nanos())) + d.as_nanos() as u64 } pub fn duration_as_us(d: &Duration) -> u64 { - d.as_secs() - .saturating_mul(1_000_000) - .saturating_add(unchecked_div_by_const!(u64::from(d.subsec_nanos()), 1_000)) + d.as_micros() as u64 } pub fn duration_as_ms(d: &Duration) -> u64 { - d.as_secs() - .saturating_mul(1000) - .saturating_add(unchecked_div_by_const!( - u64::from(d.subsec_nanos()), - 1_000_000 - )) + d.as_millis() as u64 } pub fn duration_as_s(d: &Duration) -> f32 { - d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0) + d.as_secs_f32() } /// return timestamp as ms @@ -167,59 +155,4 @@ mod test { Duration::from_millis(1000) * ticks_per_slot ); } - - #[test] - fn test_duration_as() { - // zero - let test_zero = Duration::from_nanos(0); - assert_eq!(duration_as_ns(&test_zero), 0); - assert_eq!(duration_as_us(&test_zero), 0); - assert_eq!(duration_as_ms(&test_zero), 0); - assert!((duration_as_s(&test_zero) - 0f32) <= f32::EPSILON); - // min non-zero for each unit - let test_1ns = Duration::from_nanos(1); - assert_eq!(duration_as_ns(&test_1ns), 1); - assert_eq!(duration_as_us(&test_1ns), 0); - assert_eq!(duration_as_ms(&test_1ns), 0); - assert!((duration_as_s(&test_1ns) - 0.000_000_001f32) <= f32::EPSILON); - let test_1ns = Duration::from_micros(1); - assert_eq!(duration_as_ns(&test_1ns), 1_000); - assert_eq!(duration_as_us(&test_1ns), 1); - assert_eq!(duration_as_ms(&test_1ns), 0); - assert!((duration_as_s(&test_1ns) - 0.000_001f32) <= f32::EPSILON); - let test_1ns = Duration::from_millis(1); - assert_eq!(duration_as_ns(&test_1ns), 1_000_000); - assert_eq!(duration_as_us(&test_1ns), 1_000); - assert_eq!(duration_as_ms(&test_1ns), 1); - assert!((duration_as_s(&test_1ns) - 0.001f32) <= f32::EPSILON); - let test_1ns = Duration::from_secs(1); - assert_eq!(duration_as_ns(&test_1ns), 1_000_000_000); - assert_eq!(duration_as_us(&test_1ns), 1_000_000); - assert_eq!(duration_as_ms(&test_1ns), 1_000); - assert!((duration_as_s(&test_1ns) - 1f32) <= f32::EPSILON); - // max without error for each unit (except secs, 'cause if you use floats - // you deserve to get got) - const DUR_MAX_SECS: u64 = Duration::MAX.as_secs(); - const NS_PER_SEC: u64 = 1_000_000_000; - let max_as_ns_secs = DUR_MAX_SECS / NS_PER_SEC; - let max_as_ns_ns = (DUR_MAX_SECS % NS_PER_SEC) as u32; - let max_as_ns = Duration::new(max_as_ns_secs, max_as_ns_ns); - assert_eq!(max_as_ns_secs, 18_446_744_073); - assert_eq!(max_as_ns_ns, 709_551_615); - assert_eq!(duration_as_ns(&max_as_ns), u64::MAX); - const US_PER_SEC: u64 = 1_000_000; - let max_as_us_secs = DUR_MAX_SECS / US_PER_SEC; - let max_as_us_ns = (DUR_MAX_SECS % US_PER_SEC) as u32; - let max_as_us = Duration::new(max_as_us_secs, max_as_us_ns * 1_000); - assert_eq!(max_as_us_secs, 18_446_744_073_709); - assert_eq!(max_as_us_ns, 551_615); - assert_eq!(duration_as_us(&max_as_us), u64::MAX); - const MS_PER_SEC: u64 = 1_000; - let max_as_ms_secs = DUR_MAX_SECS / MS_PER_SEC; - let max_as_ms_ns = (DUR_MAX_SECS % MS_PER_SEC) as u32; - let max_as_ms = Duration::new(max_as_ms_secs, max_as_ms_ns * 1_000_000); - assert_eq!(max_as_ms_secs, 18_446_744_073_709_551); - assert_eq!(max_as_ms_ns, 615); - assert_eq!(duration_as_ms(&max_as_ms), u64::MAX); - } } From b8de432065d2a36853f9a839e542f7dd4386b541 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Wed, 14 Aug 2024 10:47:00 -0400 Subject: [PATCH 119/529] Refactor process_compute_budget_instructions to separate processing and sanitizing parts (#2582) --- .../src/compute_budget_instruction_details.rs | 518 ++++++++++++++++++ .../src/instructions_processor.rs | 101 +--- runtime-transaction/src/lib.rs | 1 + 3 files changed, 527 insertions(+), 93 deletions(-) create mode 100644 runtime-transaction/src/compute_budget_instruction_details.rs diff --git a/runtime-transaction/src/compute_budget_instruction_details.rs b/runtime-transaction/src/compute_budget_instruction_details.rs new file mode 100644 index 00000000000000..638d8d8bc3afaa --- /dev/null +++ b/runtime-transaction/src/compute_budget_instruction_details.rs @@ -0,0 +1,518 @@ +use { + solana_compute_budget::compute_budget_limits::*, + solana_sdk::{ + borsh1::try_from_slice_unchecked, + compute_budget::{self, ComputeBudgetInstruction}, + instruction::{CompiledInstruction, InstructionError}, + pubkey::Pubkey, + saturating_add_assign, + transaction::{Result, TransactionError}, + }, + std::num::NonZeroU32, +}; + +#[cfg_attr(test, derive(Eq, PartialEq))] +#[derive(Default, Debug)] +pub(crate) struct ComputeBudgetInstructionDetails { + // compute-budget instruction details: + // the first field in tuple is instruction index, second field is the unsanitized value set by user + requested_compute_unit_limit: Option<(u8, u32)>, + requested_compute_unit_price: Option<(u8, u64)>, + requested_heap_size: Option<(u8, u32)>, + requested_loaded_accounts_data_size_limit: Option<(u8, u32)>, + num_non_compute_budget_instructions: u32, +} + +impl ComputeBudgetInstructionDetails { + pub fn try_from<'a>( + instructions: impl Iterator, + ) -> Result { + let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); + for (i, (program_id, instruction)) in instructions.enumerate() { + compute_budget_instruction_details.process_instruction( + i as u8, + program_id, + instruction, + )?; + } + + Ok(compute_budget_instruction_details) + } + + pub fn sanitize_and_convert_to_compute_budget_limits(&self) -> Result { + // Sanitize requested heap size + let updated_heap_bytes = + if let Some((index, requested_heap_size)) = self.requested_heap_size { + if Self::sanitize_requested_heap_size(requested_heap_size) { + requested_heap_size + } else { + return Err(TransactionError::InstructionError( + index, + InstructionError::InvalidInstructionData, + )); + } + } else { + MIN_HEAP_FRAME_BYTES + } + .min(MAX_HEAP_FRAME_BYTES); + + // Calculate compute unit limit + let compute_unit_limit = self + .requested_compute_unit_limit + .map_or_else( + || { + self.num_non_compute_budget_instructions + .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) + }, + |(_index, requested_compute_unit_limit)| requested_compute_unit_limit, + ) + .min(MAX_COMPUTE_UNIT_LIMIT); + + let compute_unit_price = self + .requested_compute_unit_price + .map_or(0, |(_index, requested_compute_unit_price)| { + requested_compute_unit_price + }); + + let loaded_accounts_bytes = + if let Some((_index, requested_loaded_accounts_data_size_limit)) = + self.requested_loaded_accounts_data_size_limit + { + NonZeroU32::new(requested_loaded_accounts_data_size_limit) + .ok_or(TransactionError::InvalidLoadedAccountsDataSizeLimit)? + } else { + MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + } + .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); + + Ok(ComputeBudgetLimits { + updated_heap_bytes, + compute_unit_limit, + compute_unit_price, + loaded_accounts_bytes, + }) + } + + fn process_instruction<'a>( + &mut self, + index: u8, + program_id: &'a Pubkey, + instruction: &'a CompiledInstruction, + ) -> Result<()> { + if compute_budget::check_id(program_id) { + let invalid_instruction_data_error = + TransactionError::InstructionError(index, InstructionError::InvalidInstructionData); + let duplicate_instruction_error = TransactionError::DuplicateInstruction(index); + + match try_from_slice_unchecked(&instruction.data) { + Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { + if self.requested_heap_size.is_some() { + return Err(duplicate_instruction_error); + } + self.requested_heap_size = Some((index, bytes)); + } + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { + if self.requested_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); + } + self.requested_compute_unit_limit = Some((index, compute_unit_limit)); + } + Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { + if self.requested_compute_unit_price.is_some() { + return Err(duplicate_instruction_error); + } + self.requested_compute_unit_price = Some((index, micro_lamports)); + } + Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) => { + if self.requested_loaded_accounts_data_size_limit.is_some() { + return Err(duplicate_instruction_error); + } + self.requested_loaded_accounts_data_size_limit = Some((index, bytes)); + } + _ => return Err(invalid_instruction_data_error), + } + } else { + saturating_add_assign!(self.num_non_compute_budget_instructions, 1); + } + + Ok(()) + } + + fn sanitize_requested_heap_size(bytes: u32) -> bool { + (MIN_HEAP_FRAME_BYTES..=MAX_HEAP_FRAME_BYTES).contains(&bytes) && bytes % 1024 == 0 + } +} + +#[cfg(test)] +mod test { + use {super::*, solana_sdk::instruction::Instruction}; + + fn setup_test_instruction( + index: u8, + instruction: Instruction, + ) -> (Pubkey, CompiledInstruction) { + ( + instruction.program_id, + CompiledInstruction { + program_id_index: index, + data: instruction.data.clone(), + accounts: vec![], + }, + ) + } + + #[test] + fn test_process_instruction_request_heap() { + let mut index = 0; + let mut expected_details = ComputeBudgetInstructionDetails::default(); + let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); + + // irrelevant instruction makes no change + index += 1; + let (program_id, ix) = setup_test_instruction( + index, + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + expected_details.num_non_compute_budget_instructions = 1; + assert_eq!(compute_budget_instruction_details, expected_details); + + // valid instruction + index += 1; + let (program_id, ix) = setup_test_instruction( + index, + ComputeBudgetInstruction::request_heap_frame(40 * 1024), + ); + expected_details.requested_heap_size = Some((index, 40 * 1024)); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + assert_eq!(compute_budget_instruction_details, expected_details); + + // duplicate instruction results error + index += 1; + let expected_err = Err(TransactionError::DuplicateInstruction(index)); + let (program_id, ix) = setup_test_instruction( + index, + ComputeBudgetInstruction::request_heap_frame(50 * 1024), + ); + assert_eq!( + compute_budget_instruction_details.process_instruction(index, &program_id, &ix), + expected_err + ); + assert_eq!(compute_budget_instruction_details, expected_details); + + // irrelevant instruction makes no change + index += 1; + let (program_id, ix) = setup_test_instruction( + index, + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + expected_details.num_non_compute_budget_instructions += 1; + assert_eq!(compute_budget_instruction_details, expected_details); + } + + #[test] + fn test_process_instruction_compute_unit_limit() { + let mut index = 0; + let mut expected_details = ComputeBudgetInstructionDetails::default(); + let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); + + // irrelevant instruction makes no change + let (program_id, ix) = setup_test_instruction( + index, + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + expected_details.num_non_compute_budget_instructions = 1; + assert_eq!(compute_budget_instruction_details, expected_details); + + // valid instruction, + index += 1; + let (program_id, ix) = setup_test_instruction( + index, + ComputeBudgetInstruction::set_compute_unit_limit(u32::MAX), + ); + expected_details.requested_compute_unit_limit = Some((index, u32::MAX)); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + assert_eq!(compute_budget_instruction_details, expected_details); + + // duplicate instruction results error + index += 1; + let expected_err = Err(TransactionError::DuplicateInstruction(index)); + let (program_id, ix) = setup_test_instruction( + index, + ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), + ); + assert_eq!( + compute_budget_instruction_details.process_instruction(index, &program_id, &ix), + expected_err + ); + assert_eq!(compute_budget_instruction_details, expected_details); + + // irrelevant instruction makes no change + index += 1; + let (program_id, ix) = setup_test_instruction( + index, + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + expected_details.num_non_compute_budget_instructions += 1; + assert_eq!(compute_budget_instruction_details, expected_details); + } + + #[test] + fn test_process_instruction_compute_unit_price() { + let mut index = 0; + let mut expected_details = ComputeBudgetInstructionDetails::default(); + let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); + + // irrelevant instruction makes no change + let (program_id, ix) = setup_test_instruction( + index, + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + expected_details.num_non_compute_budget_instructions = 1; + assert_eq!(compute_budget_instruction_details, expected_details); + + // valid instruction, + index += 1; + let (program_id, ix) = setup_test_instruction( + index, + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ); + expected_details.requested_compute_unit_price = Some((index, u64::MAX)); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + assert_eq!(compute_budget_instruction_details, expected_details); + + // duplicate instruction results error + index += 1; + let expected_err = Err(TransactionError::DuplicateInstruction(index)); + let (program_id, ix) = + setup_test_instruction(index, ComputeBudgetInstruction::set_compute_unit_price(0)); + assert_eq!( + compute_budget_instruction_details.process_instruction(index, &program_id, &ix), + expected_err + ); + assert_eq!(compute_budget_instruction_details, expected_details); + + // irrelevant instruction makes no change + index += 1; + let (program_id, ix) = setup_test_instruction( + index, + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + expected_details.num_non_compute_budget_instructions += 1; + assert_eq!(compute_budget_instruction_details, expected_details); + } + + #[test] + fn test_process_instruction_loaded_accounts_data_size_limit() { + let mut index = 0; + let mut expected_details = ComputeBudgetInstructionDetails::default(); + let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); + + // irrelevant instruction makes no change + let (program_id, ix) = setup_test_instruction( + index, + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + expected_details.num_non_compute_budget_instructions = 1; + assert_eq!(compute_budget_instruction_details, expected_details); + + // valid instruction, + index += 1; + let (program_id, ix) = setup_test_instruction( + index, + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(u32::MAX), + ); + expected_details.requested_loaded_accounts_data_size_limit = Some((index, u32::MAX)); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + assert_eq!(compute_budget_instruction_details, expected_details); + + // duplicate instruction results error + index += 1; + let expected_err = Err(TransactionError::DuplicateInstruction(index)); + let (program_id, ix) = setup_test_instruction( + index, + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(0), + ); + assert_eq!( + compute_budget_instruction_details.process_instruction(index, &program_id, &ix), + expected_err + ); + assert_eq!(compute_budget_instruction_details, expected_details); + + // irrelevant instruction makes no change + index += 1; + let (program_id, ix) = setup_test_instruction( + index, + Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + ); + assert!(compute_budget_instruction_details + .process_instruction(index, &program_id, &ix) + .is_ok()); + expected_details.num_non_compute_budget_instructions += 1; + assert_eq!(compute_budget_instruction_details, expected_details); + } + + #[test] + fn test_sanitize_and_convert_to_compute_budget_limits() { + // empty details, default ComputeBudgetLimits with 0 compute_unit_limits + let instruction_details = ComputeBudgetInstructionDetails::default(); + assert_eq!( + instruction_details.sanitize_and_convert_to_compute_budget_limits(), + Ok(ComputeBudgetLimits { + compute_unit_limit: 0, + ..ComputeBudgetLimits::default() + }) + ); + + let num_non_compute_budget_instructions = 4; + + // no compute-budget instructions, all default ComputeBudgetLimits except cu-limit + let instruction_details = ComputeBudgetInstructionDetails { + num_non_compute_budget_instructions, + ..ComputeBudgetInstructionDetails::default() + }; + let expected_compute_unit_limit = + num_non_compute_budget_instructions * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT; + assert_eq!( + instruction_details.sanitize_and_convert_to_compute_budget_limits(), + Ok(ComputeBudgetLimits { + compute_unit_limit: expected_compute_unit_limit, + ..ComputeBudgetLimits::default() + }) + ); + + let expected_heap_size_err = Err(TransactionError::InstructionError( + 3, + InstructionError::InvalidInstructionData, + )); + // invalid: requested_heap_size can't be zero + let instruction_details = ComputeBudgetInstructionDetails { + requested_compute_unit_limit: Some((1, 0)), + requested_compute_unit_price: Some((2, 0)), + requested_heap_size: Some((3, 0)), + requested_loaded_accounts_data_size_limit: Some((4, 1024)), + num_non_compute_budget_instructions, + }; + assert_eq!( + instruction_details.sanitize_and_convert_to_compute_budget_limits(), + expected_heap_size_err + ); + + // invalid: requested_heap_size can't be less than MIN_HEAP_FRAME_BYTES + let instruction_details = ComputeBudgetInstructionDetails { + requested_compute_unit_limit: Some((1, 0)), + requested_compute_unit_price: Some((2, 0)), + requested_heap_size: Some((3, MIN_HEAP_FRAME_BYTES - 1)), + requested_loaded_accounts_data_size_limit: Some((4, 1024)), + num_non_compute_budget_instructions, + }; + assert_eq!( + instruction_details.sanitize_and_convert_to_compute_budget_limits(), + expected_heap_size_err + ); + + // invalid: requested_heap_size can't be more than MAX_HEAP_FRAME_BYTES + let instruction_details = ComputeBudgetInstructionDetails { + requested_compute_unit_limit: Some((1, 0)), + requested_compute_unit_price: Some((2, 0)), + requested_heap_size: Some((3, MAX_HEAP_FRAME_BYTES + 1)), + requested_loaded_accounts_data_size_limit: Some((4, 1024)), + num_non_compute_budget_instructions, + }; + assert_eq!( + instruction_details.sanitize_and_convert_to_compute_budget_limits(), + expected_heap_size_err + ); + + // invalid: requested_heap_size must be round by 1024 + let instruction_details = ComputeBudgetInstructionDetails { + requested_compute_unit_limit: Some((1, 0)), + requested_compute_unit_price: Some((2, 0)), + requested_heap_size: Some((3, MIN_HEAP_FRAME_BYTES + 1024 + 1)), + requested_loaded_accounts_data_size_limit: Some((4, 1024)), + num_non_compute_budget_instructions, + }; + assert_eq!( + instruction_details.sanitize_and_convert_to_compute_budget_limits(), + expected_heap_size_err + ); + + // invalid: loaded_account_data_size can't be zero + let instruction_details = ComputeBudgetInstructionDetails { + requested_compute_unit_limit: Some((1, 0)), + requested_compute_unit_price: Some((2, 0)), + requested_heap_size: Some((3, 40 * 1024)), + requested_loaded_accounts_data_size_limit: Some((4, 0)), + num_non_compute_budget_instructions, + }; + assert_eq!( + instruction_details.sanitize_and_convert_to_compute_budget_limits(), + Err(TransactionError::InvalidLoadedAccountsDataSizeLimit) + ); + + // valid: acceptable MAX + let instruction_details = ComputeBudgetInstructionDetails { + requested_compute_unit_limit: Some((1, u32::MAX)), + requested_compute_unit_price: Some((2, u64::MAX)), + requested_heap_size: Some((3, MAX_HEAP_FRAME_BYTES)), + requested_loaded_accounts_data_size_limit: Some((4, u32::MAX)), + num_non_compute_budget_instructions, + }; + assert_eq!( + instruction_details.sanitize_and_convert_to_compute_budget_limits(), + Ok(ComputeBudgetLimits { + updated_heap_bytes: MAX_HEAP_FRAME_BYTES, + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + compute_unit_price: u64::MAX, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + }) + ); + + // valid + let val: u32 = 1024 * 40; + let instruction_details = ComputeBudgetInstructionDetails { + requested_compute_unit_limit: Some((1, val)), + requested_compute_unit_price: Some((2, val as u64)), + requested_heap_size: Some((3, val)), + requested_loaded_accounts_data_size_limit: Some((4, val)), + num_non_compute_budget_instructions, + }; + assert_eq!( + instruction_details.sanitize_and_convert_to_compute_budget_limits(), + Ok(ComputeBudgetLimits { + updated_heap_bytes: val, + compute_unit_limit: val, + compute_unit_price: val as u64, + loaded_accounts_bytes: NonZeroU32::new(val).unwrap(), + }) + ); + } +} diff --git a/runtime-transaction/src/instructions_processor.rs b/runtime-transaction/src/instructions_processor.rs index 06c0f265712e7c..d220f1a2f36e88 100644 --- a/runtime-transaction/src/instructions_processor.rs +++ b/runtime-transaction/src/instructions_processor.rs @@ -1,13 +1,7 @@ use { + crate::compute_budget_instruction_details::*, solana_compute_budget::compute_budget_limits::*, - solana_sdk::{ - borsh1::try_from_slice_unchecked, - compute_budget::{self, ComputeBudgetInstruction}, - instruction::{CompiledInstruction, InstructionError}, - pubkey::Pubkey, - transaction::TransactionError, - }, - std::num::NonZeroU32, + solana_sdk::{instruction::CompiledInstruction, pubkey::Pubkey, transaction::TransactionError}, }; /// Processing compute_budget could be part of tx sanitizing, failed to process @@ -18,89 +12,8 @@ use { pub fn process_compute_budget_instructions<'a>( instructions: impl Iterator, ) -> Result { - let mut num_non_compute_budget_instructions: u32 = 0; - let mut updated_compute_unit_limit = None; - let mut updated_compute_unit_price = None; - let mut requested_heap_size = None; - let mut updated_loaded_accounts_data_size_limit = None; - - for (i, (program_id, instruction)) in instructions.enumerate() { - if compute_budget::check_id(program_id) { - let invalid_instruction_data_error = TransactionError::InstructionError( - i as u8, - InstructionError::InvalidInstructionData, - ); - let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); - - match try_from_slice_unchecked(&instruction.data) { - Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { - if requested_heap_size.is_some() { - return Err(duplicate_instruction_error); - } - if sanitize_requested_heap_size(bytes) { - requested_heap_size = Some(bytes); - } else { - return Err(invalid_instruction_data_error); - } - } - Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - } - Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { - if updated_compute_unit_price.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_price = Some(micro_lamports); - } - Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) => { - if updated_loaded_accounts_data_size_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_loaded_accounts_data_size_limit = Some( - NonZeroU32::new(bytes) - .ok_or(TransactionError::InvalidLoadedAccountsDataSizeLimit)?, - ); - } - _ => return Err(invalid_instruction_data_error), - } - } else { - // only include non-request instructions in default max calc - num_non_compute_budget_instructions = - num_non_compute_budget_instructions.saturating_add(1); - } - } - - // sanitize limits - let updated_heap_bytes = requested_heap_size - .unwrap_or(MIN_HEAP_FRAME_BYTES) // loader's default heap_size - .min(MAX_HEAP_FRAME_BYTES); - - let compute_unit_limit = updated_compute_unit_limit - .unwrap_or_else(|| { - num_non_compute_budget_instructions - .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) - }) - .min(MAX_COMPUTE_UNIT_LIMIT); - - let compute_unit_price = updated_compute_unit_price.unwrap_or(0); - - let loaded_accounts_bytes = updated_loaded_accounts_data_size_limit - .unwrap_or(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) - .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); - - Ok(ComputeBudgetLimits { - updated_heap_bytes, - compute_unit_limit, - compute_unit_price, - loaded_accounts_bytes, - }) -} - -fn sanitize_requested_heap_size(bytes: u32) -> bool { - (MIN_HEAP_FRAME_BYTES..=MAX_HEAP_FRAME_BYTES).contains(&bytes) && bytes % 1024 == 0 + ComputeBudgetInstructionDetails::try_from(instructions)? + .sanitize_and_convert_to_compute_budget_limits() } #[cfg(test)] @@ -108,15 +21,17 @@ mod tests { use { super::*, solana_sdk::{ + compute_budget::ComputeBudgetInstruction, hash::Hash, - instruction::Instruction, + instruction::{Instruction, InstructionError}, message::Message, pubkey::Pubkey, signature::Keypair, signer::Signer, system_instruction::{self}, - transaction::{SanitizedTransaction, Transaction}, + transaction::{SanitizedTransaction, Transaction, TransactionError}, }, + std::num::NonZeroU32, }; macro_rules! test { diff --git a/runtime-transaction/src/lib.rs b/runtime-transaction/src/lib.rs index 4b980ce5dd92bc..011df606d59cf3 100644 --- a/runtime-transaction/src/lib.rs +++ b/runtime-transaction/src/lib.rs @@ -1,6 +1,7 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] +mod compute_budget_instruction_details; pub mod instructions_processor; pub mod runtime_transaction; pub mod transaction_meta; From 472d8484ebb57f4998670cfc15ff5fb7aa0ac2c9 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 14 Aug 2024 10:50:17 -0500 Subject: [PATCH 120/529] TransactionView: InstructionsIterator (#2580) --- Cargo.lock | 1 + transaction-view/Cargo.toml | 1 + transaction-view/src/instructions_meta.rs | 73 ++++++++++++++++++++++- transaction-view/src/transaction_meta.rs | 37 +++++++++++- 4 files changed, 107 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a70e10d9031b55..1c361111cea12e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -227,6 +227,7 @@ dependencies = [ "bincode", "criterion", "solana-sdk", + "solana-svm-transaction", ] [[package]] diff --git a/transaction-view/Cargo.toml b/transaction-view/Cargo.toml index 0b3f4e828c969d..25fef46304e595 100644 --- a/transaction-view/Cargo.toml +++ b/transaction-view/Cargo.toml @@ -11,6 +11,7 @@ edition = { workspace = true } [dependencies] solana-sdk = { workspace = true } +solana-svm-transaction = { workspace = true } [dev-dependencies] # See order-crates-for-publishing.py for using this unusual `path = "."` diff --git a/transaction-view/src/instructions_meta.rs b/transaction-view/src/instructions_meta.rs index ad380f1724548a..9a6d5e3dd72c0a 100644 --- a/transaction-view/src/instructions_meta.rs +++ b/transaction-view/src/instructions_meta.rs @@ -1,6 +1,11 @@ -use crate::{ - bytes::{advance_offset_for_array, check_remaining, optimized_read_compressed_u16, read_byte}, - result::Result, +use { + crate::{ + bytes::{ + advance_offset_for_array, check_remaining, optimized_read_compressed_u16, read_byte, + }, + result::Result, + }, + solana_svm_transaction::instruction::SVMInstruction, }; /// Contains metadata about the instructions in a transaction packet. @@ -64,6 +69,68 @@ impl InstructionsMeta { } } +pub struct InstructionsIterator<'a> { + pub(crate) bytes: &'a [u8], + pub(crate) offset: usize, + pub(crate) num_instructions: u16, + pub(crate) index: u16, +} + +impl<'a> Iterator for InstructionsIterator<'a> { + type Item = SVMInstruction<'a>; + + fn next(&mut self) -> Option { + if self.index < self.num_instructions { + // Each instruction has 3 pieces: + // 1. Program ID index (u8) + // 2. Accounts indexes ([u8]) + // 3. Data ([u8]) + + // Read the program ID index. + let program_id_index = read_byte(self.bytes, &mut self.offset).ok()?; + + // Read the number of account indexes, and then update the offset + // to skip over the account indexes. + let num_accounts = optimized_read_compressed_u16(self.bytes, &mut self.offset).ok()?; + // SAFETY: Only returned after we check that there are enough bytes. + let accounts = unsafe { + core::slice::from_raw_parts( + self.bytes.as_ptr().add(self.offset), + usize::from(num_accounts), + ) + }; + advance_offset_for_array::(self.bytes, &mut self.offset, num_accounts).ok()?; + + // Read the length of the data, and then update the offset to skip + // over the data. + let data_len = optimized_read_compressed_u16(self.bytes, &mut self.offset).ok()?; + // SAFETY: Only returned after we check that there are enough bytes. + let data = unsafe { + core::slice::from_raw_parts( + self.bytes.as_ptr().add(self.offset), + usize::from(data_len), + ) + }; + advance_offset_for_array::(self.bytes, &mut self.offset, data_len).ok()?; + self.index = self.index.wrapping_add(1); + + Some(SVMInstruction { + program_id_index, + accounts, + data, + }) + } else { + None + } + } +} + +impl ExactSizeIterator for InstructionsIterator<'_> { + fn len(&self) -> usize { + usize::from(self.num_instructions.wrapping_sub(self.index)) + } +} + #[cfg(test)] mod tests { use { diff --git a/transaction-view/src/transaction_meta.rs b/transaction-view/src/transaction_meta.rs index 6547823a2c35ea..d467448c2ff500 100644 --- a/transaction-view/src/transaction_meta.rs +++ b/transaction-view/src/transaction_meta.rs @@ -2,7 +2,7 @@ use { crate::{ address_table_lookup_meta::AddressTableLookupMeta, bytes::advance_offset_for_type, - instructions_meta::InstructionsMeta, + instructions_meta::{InstructionsIterator, InstructionsMeta}, message_header_meta::{MessageHeaderMeta, TransactionVersion}, result::{Result, TransactionParsingError}, signature_meta::SignatureMeta, @@ -142,6 +142,19 @@ impl TransactionMeta { .as_ptr() .add(usize::from(self.recent_blockhash_offset)) as *const Hash) } + + /// Return an iterator over the instructions in the transaction. + /// # Safety + /// - This function must be called with the same `bytes` slice that was + /// used to create the `TransactionMeta` instance. + pub unsafe fn instructions_iter<'a>(&self, bytes: &'a [u8]) -> InstructionsIterator<'a> { + InstructionsIterator { + bytes, + offset: usize::from(self.instructions.offset), + num_instructions: self.instructions.num_instructions, + index: 0, + } + } } #[cfg(test)] @@ -153,7 +166,7 @@ mod tests { message::{v0, Message, MessageHeader, VersionedMessage}, pubkey::Pubkey, signature::Signature, - system_instruction, + system_instruction::{self, SystemInstruction}, transaction::VersionedTransaction, }, }; @@ -391,4 +404,24 @@ mod tests { assert_eq!(recent_blockhash, tx.message.recent_blockhash()); } } + + #[test] + fn test_instructions_iter() { + let tx = simple_transfer(); + let bytes = bincode::serialize(&tx).unwrap(); + let meta = TransactionMeta::try_new(&bytes).unwrap(); + + // SAFETY: `bytes` is the same slice used to create `meta`. + unsafe { + let mut iter = meta.instructions_iter(&bytes); + let ix = iter.next().unwrap(); + assert_eq!(ix.program_id_index, 2); + assert_eq!(ix.accounts, &[0, 1]); + assert_eq!( + ix.data, + &bincode::serialize(&SystemInstruction::Transfer { lamports: 1 }).unwrap() + ); + assert!(iter.next().is_none()); + } + } } From aa2d35116de1a0c0e6f5552dfff4042d03b60a06 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 14 Aug 2024 10:50:33 -0500 Subject: [PATCH 121/529] TransactionView: benchmark TransactionMeta (#2568) --- transaction-view/Cargo.toml | 4 + transaction-view/benches/transaction_meta.rs | 213 +++++++++++++++++++ 2 files changed, 217 insertions(+) create mode 100644 transaction-view/benches/transaction_meta.rs diff --git a/transaction-view/Cargo.toml b/transaction-view/Cargo.toml index 25fef46304e595..cbe16529521cb6 100644 --- a/transaction-view/Cargo.toml +++ b/transaction-view/Cargo.toml @@ -25,3 +25,7 @@ dev-context-only-utils = [] [[bench]] name = "bytes" harness = false + +[[bench]] +name = "transaction_meta" +harness = false diff --git a/transaction-view/benches/transaction_meta.rs b/transaction-view/benches/transaction_meta.rs new file mode 100644 index 00000000000000..89dd07600ba4b1 --- /dev/null +++ b/transaction-view/benches/transaction_meta.rs @@ -0,0 +1,213 @@ +use { + agave_transaction_view::transaction_meta::TransactionMeta, + criterion::{ + black_box, criterion_group, criterion_main, measurement::Measurement, BenchmarkGroup, + Criterion, Throughput, + }, + solana_sdk::{ + hash::Hash, + instruction::Instruction, + message::{ + v0::{self, MessageAddressTableLookup}, + Message, MessageHeader, VersionedMessage, + }, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + system_instruction, + transaction::VersionedTransaction, + }, +}; + +const NUM_TRANSACTIONS: usize = 1024; + +fn serialize_transactions(transactions: Vec) -> Vec> { + transactions + .into_iter() + .map(|transaction| bincode::serialize(&transaction).unwrap()) + .collect() +} + +fn bench_transactions_parsing( + group: &mut BenchmarkGroup, + serialized_transactions: Vec>, +) { + // Legacy Transaction Parsing + group.bench_function("VersionedTransaction", |c| { + c.iter(|| { + for bytes in serialized_transactions.iter() { + let _ = bincode::deserialize::(black_box(bytes)).unwrap(); + } + }); + }); + + // New Transaction Parsing + group.bench_function("TransactionMeta", |c| { + c.iter(|| { + for bytes in serialized_transactions.iter() { + let _ = TransactionMeta::try_new(black_box(bytes)).unwrap(); + } + }); + }); +} + +fn minimum_sized_transactions() -> Vec { + (0..NUM_TRANSACTIONS) + .map(|_| { + let keypair = Keypair::new(); + VersionedTransaction::try_new( + VersionedMessage::Legacy(Message::new_with_blockhash( + &[], + Some(&keypair.pubkey()), + &Hash::default(), + )), + &[&keypair], + ) + .unwrap() + }) + .collect() +} + +fn simple_transfers() -> Vec { + (0..NUM_TRANSACTIONS) + .map(|_| { + let keypair = Keypair::new(); + VersionedTransaction::try_new( + VersionedMessage::Legacy(Message::new_with_blockhash( + &[system_instruction::transfer( + &keypair.pubkey(), + &Pubkey::new_unique(), + 1, + )], + Some(&keypair.pubkey()), + &Hash::default(), + )), + &[&keypair], + ) + .unwrap() + }) + .collect() +} + +fn packed_transfers() -> Vec { + // Creating transfer instructions between same keys to maximize the number + // of transfers per transaction. We can fit up to 60 transfers. + const MAX_TRANSFERS_PER_TX: usize = 60; + + (0..NUM_TRANSACTIONS) + .map(|_| { + let keypair = Keypair::new(); + let to_pubkey = Pubkey::new_unique(); + let ixs = system_instruction::transfer_many( + &keypair.pubkey(), + &vec![(to_pubkey, 1); MAX_TRANSFERS_PER_TX], + ); + VersionedTransaction::try_new( + VersionedMessage::Legacy(Message::new(&ixs, Some(&keypair.pubkey()))), + &[&keypair], + ) + .unwrap() + }) + .collect() +} + +fn packed_noops() -> Vec { + // Creating noop instructions to maximize the number of instructions per + // transaction. We can fit up to 355 noops. + const MAX_INSTRUCTIONS_PER_TRANSACTION: usize = 355; + + (0..NUM_TRANSACTIONS) + .map(|_| { + let keypair = Keypair::new(); + let program_id = Pubkey::new_unique(); + let ixs = (0..MAX_INSTRUCTIONS_PER_TRANSACTION) + .map(|_| Instruction::new_with_bytes(program_id, &[], vec![])); + VersionedTransaction::try_new( + VersionedMessage::Legacy(Message::new( + &ixs.collect::>(), + Some(&keypair.pubkey()), + )), + &[&keypair], + ) + .unwrap() + }) + .collect() +} + +fn packed_atls() -> Vec { + // Creating ATLs to maximize the number of ATLS per transaction. We can fit + // up to 31. + const MAX_ATLS_PER_TRANSACTION: usize = 31; + + (0..NUM_TRANSACTIONS) + .map(|_| { + let keypair = Keypair::new(); + VersionedTransaction::try_new( + VersionedMessage::V0(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + account_keys: vec![keypair.pubkey()], + recent_blockhash: Hash::default(), + instructions: vec![], + address_table_lookups: Vec::from_iter((0..MAX_ATLS_PER_TRANSACTION).map( + |_| MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0], + readonly_indexes: vec![], + }, + )), + }), + &[&keypair], + ) + .unwrap() + }) + .collect() +} + +fn bench_parse_min_sized_transactions(c: &mut Criterion) { + let serialized_transactions = serialize_transactions(minimum_sized_transactions()); + let mut group = c.benchmark_group("min sized transactions"); + group.throughput(Throughput::Elements(serialized_transactions.len() as u64)); + bench_transactions_parsing(&mut group, serialized_transactions); +} + +fn bench_parse_simple_transfers(c: &mut Criterion) { + let serialized_transactions = serialize_transactions(simple_transfers()); + let mut group = c.benchmark_group("simple transfers"); + group.throughput(Throughput::Elements(serialized_transactions.len() as u64)); + bench_transactions_parsing(&mut group, serialized_transactions); +} + +fn bench_parse_packed_transfers(c: &mut Criterion) { + let serialized_transactions = serialize_transactions(packed_transfers()); + let mut group = c.benchmark_group("packed transfers"); + group.throughput(Throughput::Elements(serialized_transactions.len() as u64)); + bench_transactions_parsing(&mut group, serialized_transactions); +} + +fn bench_parse_packed_noops(c: &mut Criterion) { + let serialized_transactions = serialize_transactions(packed_noops()); + let mut group = c.benchmark_group("packed noops"); + group.throughput(Throughput::Elements(serialized_transactions.len() as u64)); + bench_transactions_parsing(&mut group, serialized_transactions); +} + +fn bench_parse_packed_atls(c: &mut Criterion) { + let serialized_transactions = serialize_transactions(packed_atls()); + let mut group = c.benchmark_group("packed atls"); + group.throughput(Throughput::Elements(serialized_transactions.len() as u64)); + bench_transactions_parsing(&mut group, serialized_transactions); +} + +criterion_group!( + benches, + bench_parse_min_sized_transactions, + bench_parse_simple_transfers, + bench_parse_packed_transfers, + bench_parse_packed_noops, + bench_parse_packed_atls +); +criterion_main!(benches); From a058a231f614690d0d087fc68d7201eb77796eb1 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Wed, 14 Aug 2024 09:50:09 -0700 Subject: [PATCH 122/529] Use governor for rate limiter (#2547) * use governor rate limiter * removed naive rate limter implementations * clippy issue * missing cargo.lock changes for programs crate * panic on u64 to u32 overflow error * safe check on u64 to u32 conversion --- Cargo.lock | 76 ++++++++++++- Cargo.toml | 1 + programs/sbf/Cargo.lock | 76 ++++++++++++- streamer/Cargo.toml | 1 + .../nonblocking/connection_rate_limiter.rs | 28 +++-- .../src/nonblocking/keyed_rate_limiter.rs | 103 ------------------ streamer/src/nonblocking/mod.rs | 2 - streamer/src/nonblocking/quic.rs | 7 +- streamer/src/nonblocking/rate_limiter.rs | 74 ------------- 9 files changed, 172 insertions(+), 196 deletions(-) delete mode 100644 streamer/src/nonblocking/keyed_rate_limiter.rs delete mode 100644 streamer/src/nonblocking/rate_limiter.rs diff --git a/Cargo.lock b/Cargo.lock index 1c361111cea12e..2362c0e8de3801 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2370,6 +2370,12 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + [[package]] name = "futures-util" version = "0.3.30" @@ -2542,6 +2548,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "governor" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +dependencies = [ + "cfg-if 1.0.0", + "dashmap", + "futures 0.3.30", + "futures-timer", + "no-std-compat", + "nonzero_ext", + "parking_lot 0.12.3", + "portable-atomic", + "quanta", + "rand 0.8.5", + "smallvec", + "spinning_top", +] + [[package]] name = "h2" version = "0.3.26" @@ -3573,6 +3599,12 @@ dependencies = [ "memoffset 0.9.1", ] +[[package]] +name = "no-std-compat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" + [[package]] name = "nom" version = "7.0.0" @@ -3584,6 +3616,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + [[package]] name = "normalize-line-endings" version = "0.3.0" @@ -4120,9 +4158,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.3.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc59d1bcc64fc5d021d67521f818db868368028108d37f0e98d74e33f68297b5" +checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" [[package]] name = "ppv-lite86" @@ -4344,6 +4382,21 @@ dependencies = [ "syn 2.0.74", ] +[[package]] +name = "quanta" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi 0.11.0+wasi-snapshot-preview1", + "web-sys", + "winapi 0.3.9", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -4539,6 +4592,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "raw-cpuid" +version = "11.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" +dependencies = [ + "bitflags 2.6.0", +] + [[package]] name = "rayon" version = "1.10.0" @@ -7585,6 +7647,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", + "governor", "histogram", "indexmap 2.3.0", "itertools 0.12.1", @@ -8227,6 +8290,15 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + [[package]] name = "spl-associated-token-account" version = "4.0.0" diff --git a/Cargo.toml b/Cargo.toml index 482ef6a0c4e21e..ae808b2ae00012 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -237,6 +237,7 @@ generic-array = { version = "0.14.7", default-features = false } gethostname = "0.2.3" getrandom = "0.2.10" goauth = "0.13.1" +governor = "0.6.3" hex = "0.4.3" hidapi = { version = "2.6.3", default-features = false } histogram = "0.6.9" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 251f3565969319..d83fab753de125 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1794,6 +1794,12 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + [[package]] name = "futures-util" version = "0.3.30" @@ -1903,6 +1909,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "governor" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +dependencies = [ + "cfg-if 1.0.0", + "dashmap", + "futures 0.3.30", + "futures-timer", + "no-std-compat", + "nonzero_ext", + "parking_lot 0.12.2", + "portable-atomic", + "quanta", + "rand 0.8.5", + "smallvec", + "spinning_top", +] + [[package]] name = "h2" version = "0.3.26" @@ -2942,6 +2968,12 @@ dependencies = [ "memoffset 0.9.0", ] +[[package]] +name = "no-std-compat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" + [[package]] name = "nom" version = "7.1.3" @@ -2952,6 +2984,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + [[package]] name = "normalize-line-endings" version = "0.3.0" @@ -3424,9 +3462,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.3.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc59d1bcc64fc5d021d67521f818db868368028108d37f0e98d74e33f68297b5" +checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" [[package]] name = "powerfmt" @@ -3626,6 +3664,21 @@ dependencies = [ "syn 2.0.58", ] +[[package]] +name = "quanta" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi 0.11.0+wasi-snapshot-preview1", + "web-sys", + "winapi 0.3.9", +] + [[package]] name = "quinn" version = "0.10.2" @@ -3763,6 +3816,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "raw-cpuid" +version = "11.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" +dependencies = [ + "bitflags 2.6.0", +] + [[package]] name = "rayon" version = "1.10.0" @@ -6331,6 +6393,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", + "governor", "histogram", "indexmap 2.3.0", "itertools 0.12.1", @@ -6775,6 +6838,15 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c530c2b0d0bf8b69304b39fe2001993e267461948b890cd037d8ad4293fa1a0d" +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + [[package]] name = "spl-associated-token-account" version = "4.0.0" diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index b6051bc604451f..89ce80c910c80f 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -16,6 +16,7 @@ crossbeam-channel = { workspace = true } dashmap = { workspace = true } futures = { workspace = true } futures-util = { workspace = true } +governor = { workspace = true } histogram = { workspace = true } indexmap = { workspace = true } itertools = { workspace = true } diff --git a/streamer/src/nonblocking/connection_rate_limiter.rs b/streamer/src/nonblocking/connection_rate_limiter.rs index b14b88f6ee3af0..fa781f8d6e0d44 100644 --- a/streamer/src/nonblocking/connection_rate_limiter.rs +++ b/streamer/src/nonblocking/connection_rate_limiter.rs @@ -1,25 +1,27 @@ use { - crate::nonblocking::{keyed_rate_limiter::KeyedRateLimiter, rate_limiter::RateLimiter}, - std::{net::IpAddr, time::Duration}, + governor::{DefaultDirectRateLimiter, DefaultKeyedRateLimiter, Quota, RateLimiter}, + std::{net::IpAddr, num::NonZeroU32}, }; pub struct ConnectionRateLimiter { - limiter: KeyedRateLimiter, + limiter: DefaultKeyedRateLimiter, } impl ConnectionRateLimiter { /// Create a new rate limiter per IpAddr. The rate is specified as the count per minute to allow for /// less frequent connections. pub fn new(limit_per_minute: u64) -> Self { + let quota = + Quota::per_minute(NonZeroU32::new(u32::try_from(limit_per_minute).unwrap()).unwrap()); Self { - limiter: KeyedRateLimiter::new(limit_per_minute, Duration::from_secs(60)), + limiter: DefaultKeyedRateLimiter::keyed(quota), } } /// Check if the connection from the said `ip` is allowed. pub fn is_allowed(&self, ip: &IpAddr) -> bool { // Acquire a permit from the rate limiter for the given IP address - if self.limiter.check_and_update(*ip) { + if self.limiter.check_key(ip).is_ok() { debug!("Request from IP {:?} allowed", ip); true // Request allowed } else { @@ -48,20 +50,26 @@ impl ConnectionRateLimiter { /// Connection rate limiter for enforcing connection rates from /// all clients. pub struct TotalConnectionRateLimiter { - limiter: RateLimiter, + limiter: DefaultDirectRateLimiter, } impl TotalConnectionRateLimiter { /// Create a new rate limiter. The rate is specified as the count per second. pub fn new(limit_per_second: u64) -> Self { + let quota = + Quota::per_second(NonZeroU32::new(u32::try_from(limit_per_second).unwrap()).unwrap()); Self { - limiter: RateLimiter::new(limit_per_second, Duration::from_secs(1)), + limiter: RateLimiter::direct(quota), } } /// Check if a connection is allowed. - pub fn is_allowed(&mut self) -> bool { - self.limiter.check_and_update() + pub fn is_allowed(&self) -> bool { + if self.limiter.check().is_ok() { + true // Request allowed + } else { + false // Request blocked + } } } @@ -71,7 +79,7 @@ pub mod test { #[tokio::test] async fn test_total_connection_rate_limiter() { - let mut limiter = TotalConnectionRateLimiter::new(2); + let limiter = TotalConnectionRateLimiter::new(2); assert!(limiter.is_allowed()); assert!(limiter.is_allowed()); assert!(!limiter.is_allowed()); diff --git a/streamer/src/nonblocking/keyed_rate_limiter.rs b/streamer/src/nonblocking/keyed_rate_limiter.rs deleted file mode 100644 index c73682c8add542..00000000000000 --- a/streamer/src/nonblocking/keyed_rate_limiter.rs +++ /dev/null @@ -1,103 +0,0 @@ -use { - crate::nonblocking::rate_limiter::RateLimiter, - dashmap::DashMap, - std::{hash::Hash, time::Duration}, -}; - -pub struct KeyedRateLimiter { - limiters: DashMap, - interval: Duration, - limit: u64, -} - -impl KeyedRateLimiter -where - K: Eq + Hash, -{ - /// Create a keyed rate limiter with `limit` count with a rate limit `interval` - pub fn new(limit: u64, interval: Duration) -> Self { - Self { - limiters: DashMap::default(), - interval, - limit, - } - } - - /// Check if the connection from the said `key` is allowed to pass through the rate limiter. - /// When it is allowed, the rate limiter state is updated to reflect it has been - /// allowed. For a unique request, the caller should call it only once when it is allowed. - pub fn check_and_update(&self, key: K) -> bool { - let allowed = match self.limiters.entry(key) { - dashmap::mapref::entry::Entry::Occupied(mut entry) => { - let limiter = entry.get_mut(); - limiter.check_and_update() - } - dashmap::mapref::entry::Entry::Vacant(entry) => entry - .insert(RateLimiter::new(self.limit, self.interval)) - .value_mut() - .check_and_update(), - }; - allowed - } - - /// retain only keys whose rate-limiting start date is within the set up interval. - /// Otherwise drop them as inactive - pub fn retain_recent(&self) { - let now = tokio::time::Instant::now(); - self.limiters - .retain(|_key, limiter| now.duration_since(*limiter.start_instant()) <= self.interval); - } - - /// Returns the number of "live" keys in the rate limiter. - pub fn len(&self) -> usize { - self.limiters.len() - } - - /// Returns `true` if the rate limiter has no keys in it. - pub fn is_empty(&self) -> bool { - self.limiters.is_empty() - } -} - -#[cfg(test)] -pub mod test { - use {super::*, tokio::time::sleep}; - - #[allow(clippy::len_zero)] - #[tokio::test] - async fn test_rate_limiter() { - let limiter = KeyedRateLimiter::::new(2, Duration::from_millis(100)); - assert!(limiter.len() == 0); - assert!(limiter.is_empty()); - assert!(limiter.check_and_update(1)); - assert!(limiter.check_and_update(1)); - assert!(!limiter.check_and_update(1)); - assert!(limiter.len() == 1); - assert!(limiter.check_and_update(2)); - assert!(limiter.check_and_update(2)); - assert!(!limiter.check_and_update(2)); - assert!(limiter.len() == 2); - - // sleep 150 ms, the rate-limiting parameters should have been reset. - sleep(Duration::from_millis(150)).await; - assert!(limiter.len() == 2); - - assert!(limiter.check_and_update(1)); - assert!(limiter.check_and_update(1)); - assert!(!limiter.check_and_update(1)); - - assert!(limiter.check_and_update(2)); - assert!(limiter.check_and_update(2)); - assert!(!limiter.check_and_update(2)); - assert!(limiter.len() == 2); - - // sleep another 150 and clean outdatated, key 2 will be removed - sleep(Duration::from_millis(150)).await; - assert!(limiter.check_and_update(1)); - assert!(limiter.check_and_update(1)); - assert!(!limiter.check_and_update(1)); - - limiter.retain_recent(); - assert!(limiter.len() == 1); - } -} diff --git a/streamer/src/nonblocking/mod.rs b/streamer/src/nonblocking/mod.rs index d7205e42468235..61bd021ae0651e 100644 --- a/streamer/src/nonblocking/mod.rs +++ b/streamer/src/nonblocking/mod.rs @@ -1,7 +1,5 @@ pub mod connection_rate_limiter; -pub mod keyed_rate_limiter; pub mod quic; -pub mod rate_limiter; pub mod recvmmsg; pub mod sendmmsg; mod stream_throttle; diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 4d5c2326f5a0a6..f1b0a5a7efd5ed 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -253,7 +253,7 @@ async fn run_server( coalesce: Duration, ) { let rate_limiter = ConnectionRateLimiter::new(max_connections_per_ipaddr_per_min); - let mut overall_connection_rate_limiter = + let overall_connection_rate_limiter = TotalConnectionRateLimiter::new(TOTAL_CONNECTIONS_PER_SECOND); const WAIT_FOR_CONNECTION_TIMEOUT: Duration = Duration::from_secs(1); @@ -340,9 +340,9 @@ async fn run_server( stats .connection_rate_limiter_length .store(rate_limiter.len(), Ordering::Relaxed); - info!("Got a connection {remote_address:?}"); + debug!("Got a connection {remote_address:?}"); if !rate_limiter.is_allowed(&remote_address.ip()) { - info!( + debug!( "Reject connection from {:?} -- rate limiting exceeded", remote_address ); @@ -351,6 +351,7 @@ async fn run_server( .fetch_add(1, Ordering::Relaxed); continue; } + stats .outstanding_incoming_connection_attempts .fetch_add(1, Ordering::Relaxed); diff --git a/streamer/src/nonblocking/rate_limiter.rs b/streamer/src/nonblocking/rate_limiter.rs deleted file mode 100644 index 96ce89391fa1ac..00000000000000 --- a/streamer/src/nonblocking/rate_limiter.rs +++ /dev/null @@ -1,74 +0,0 @@ -use {std::time::Duration, tokio::time::Instant}; - -#[derive(Debug)] -pub struct RateLimiter { - /// count of requests in an interval - pub(crate) count: u64, - - /// Rate limit start time - start_instant: Instant, - interval: Duration, - limit: u64, -} - -/// A naive rate limiter, to be replaced by using governor which has more even -/// distribution of requests passing through using GCRA algorithm. -impl RateLimiter { - pub fn new(limit: u64, interval: Duration) -> Self { - Self { - count: 0, - start_instant: Instant::now(), - interval, - limit, - } - } - - /// Reset the counter and start instant if needed. - pub fn reset_params_if_needed(&mut self) { - if Instant::now().duration_since(self.start_instant) > self.interval { - self.start_instant = Instant::now(); - self.count = 0; - } - } - - /// Check if a single request should be allowed to pass through the rate limiter - /// When it is allowed, the rate limiter state is updated to reflect it has been - /// allowed. For a unique request, the caller should call it only once when it is allowed. - pub fn check_and_update(&mut self) -> bool { - self.reset_params_if_needed(); - if self.count >= self.limit { - return false; - } - - self.count = self.count.saturating_add(1); - true - } - - /// Return the start instant for the current rate-limiting interval. - pub fn start_instant(&self) -> &Instant { - &self.start_instant - } -} - -#[cfg(test)] -pub mod test { - use {super::*, tokio::time::sleep}; - - #[tokio::test] - async fn test_rate_limiter() { - let mut limiter = RateLimiter::new(2, Duration::from_millis(100)); - assert!(limiter.check_and_update()); - assert!(limiter.check_and_update()); - assert!(!limiter.check_and_update()); - let instant1 = *limiter.start_instant(); - - // sleep 150 ms, the rate-limiting parameters should have been reset. - sleep(Duration::from_millis(150)).await; - assert!(limiter.check_and_update()); - assert!(limiter.check_and_update()); - assert!(!limiter.check_and_update()); - - let instant2 = *limiter.start_instant(); - assert!(instant2 > instant1); - } -} From 939d7f65ef4bb9ac9e229be912ea47916d5360b6 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Wed, 14 Aug 2024 13:24:15 -0400 Subject: [PATCH 123/529] add benches for process_compute_budget_instructions (#2498) * add benches for process_compute_budget_instructions --- Cargo.lock | 2 + runtime-transaction/Cargo.toml | 8 +- .../process_compute_budget_instructions.rs | 168 ++++++++++++++++++ 3 files changed, 177 insertions(+), 1 deletion(-) create mode 100644 runtime-transaction/benches/process_compute_budget_instructions.rs diff --git a/Cargo.lock b/Cargo.lock index 2362c0e8de3801..9aedd912f45fc5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7404,9 +7404,11 @@ name = "solana-runtime-transaction" version = "2.1.0" dependencies = [ "bincode", + "criterion", "log", "rand 0.8.5", "rustc_version 0.4.0", + "solana-builtins-default-costs", "solana-compute-budget", "solana-program", "solana-sdk", diff --git a/runtime-transaction/Cargo.toml b/runtime-transaction/Cargo.toml index 22635d6c121159..2965564e63d3dc 100644 --- a/runtime-transaction/Cargo.toml +++ b/runtime-transaction/Cargo.toml @@ -21,11 +21,17 @@ name = "solana_runtime_transaction" [dev-dependencies] bincode = { workspace = true } +criterion = { workspace = true } rand = { workspace = true } -solana-program ={ workspace = true } +solana-builtins-default-costs = { workspace = true } +solana-program = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] rustc_version = { workspace = true, optional = true } + +[[bench]] +name = "process_compute_budget_instructions" +harness = false diff --git a/runtime-transaction/benches/process_compute_budget_instructions.rs b/runtime-transaction/benches/process_compute_budget_instructions.rs new file mode 100644 index 00000000000000..463a4cda596c3a --- /dev/null +++ b/runtime-transaction/benches/process_compute_budget_instructions.rs @@ -0,0 +1,168 @@ +use { + criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}, + solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, + solana_sdk::{ + compute_budget::ComputeBudgetInstruction, + instruction::Instruction, + message::Message, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + system_instruction::{self}, + transaction::{SanitizedTransaction, Transaction}, + }, +}; + +const NUM_TRANSACTIONS_PER_ITER: usize = 1024; +const DUMMY_PROGRAM_ID: &str = "dummmy1111111111111111111111111111111111111"; + +fn build_sanitized_transaction( + payer_keypair: &Keypair, + instructions: &[Instruction], +) -> SanitizedTransaction { + SanitizedTransaction::from_transaction_for_tests(Transaction::new_unsigned(Message::new( + instructions, + Some(&payer_keypair.pubkey()), + ))) +} + +fn bench_process_compute_budget_instructions_empty(c: &mut Criterion) { + c.benchmark_group("bench_process_compute_budget_instructions_empty") + .throughput(Throughput::Elements(NUM_TRANSACTIONS_PER_ITER as u64)) + .bench_function("0 instructions", |bencher| { + let tx = build_sanitized_transaction(&Keypair::new(), &[]); + bencher.iter(|| { + (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { + assert!(process_compute_budget_instructions(black_box( + tx.message().program_instructions_iter() + )) + .is_ok()) + }) + }); + }); +} + +fn bench_process_compute_budget_instructions_no_builtins(c: &mut Criterion) { + let num_instructions = 4; + c.benchmark_group("bench_process_compute_budget_instructions_no_builtins") + .throughput(Throughput::Elements(NUM_TRANSACTIONS_PER_ITER as u64)) + .bench_function( + format!("{num_instructions} dummy Instructions"), + |bencher| { + let ixs: Vec<_> = (0..num_instructions) + .map(|_| { + Instruction::new_with_bincode( + DUMMY_PROGRAM_ID.parse().unwrap(), + &(), + vec![], + ) + }) + .collect(); + let tx = build_sanitized_transaction(&Keypair::new(), &ixs); + bencher.iter(|| { + (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { + assert!(process_compute_budget_instructions(black_box( + tx.message().program_instructions_iter() + )) + .is_ok()) + }) + }); + }, + ); +} + +fn bench_process_compute_budget_instructions_compute_budgets(c: &mut Criterion) { + c.benchmark_group("bench_process_compute_budget_instructions_compute_budgets") + .throughput(Throughput::Elements(NUM_TRANSACTIONS_PER_ITER as u64)) + .bench_function("4 compute-budget instructions", |bencher| { + let ixs = vec![ + ComputeBudgetInstruction::request_heap_frame(40 * 1024), + ComputeBudgetInstruction::set_compute_unit_limit(u32::MAX), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(u32::MAX), + ]; + let tx = build_sanitized_transaction(&Keypair::new(), &ixs); + bencher.iter(|| { + (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { + assert!(process_compute_budget_instructions(black_box( + tx.message().program_instructions_iter() + )) + .is_ok()) + }) + }); + }); +} + +fn bench_process_compute_budget_instructions_builtins(c: &mut Criterion) { + c.benchmark_group("bench_process_compute_budget_instructions_builtins") + .throughput(Throughput::Elements(NUM_TRANSACTIONS_PER_ITER as u64)) + .bench_function("4 dummy builtins", |bencher| { + let ixs = vec![ + Instruction::new_with_bincode(solana_sdk::bpf_loader::id(), &(), vec![]), + Instruction::new_with_bincode(solana_sdk::secp256k1_program::id(), &(), vec![]), + Instruction::new_with_bincode( + solana_sdk::address_lookup_table::program::id(), + &(), + vec![], + ), + Instruction::new_with_bincode(solana_sdk::loader_v4::id(), &(), vec![]), + ]; + let tx = build_sanitized_transaction(&Keypair::new(), &ixs); + bencher.iter(|| { + (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { + assert!(process_compute_budget_instructions(black_box( + tx.message().program_instructions_iter() + )) + .is_ok()) + }) + }); + }); +} + +fn bench_process_compute_budget_instructions_mixed(c: &mut Criterion) { + let num_instructions = 355; + c.benchmark_group("bench_process_compute_budget_instructions_mixed") + .throughput(Throughput::Elements(NUM_TRANSACTIONS_PER_ITER as u64)) + .bench_function( + format!("{num_instructions} mixed instructions"), + |bencher| { + let payer_keypair = Keypair::new(); + let mut ixs: Vec<_> = (0..num_instructions) + .map(|_| { + Instruction::new_with_bincode( + DUMMY_PROGRAM_ID.parse().unwrap(), + &(), + vec![], + ) + }) + .collect(); + ixs.extend(vec![ + ComputeBudgetInstruction::request_heap_frame(40 * 1024), + ComputeBudgetInstruction::set_compute_unit_limit(u32::MAX), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(u32::MAX), + system_instruction::transfer(&payer_keypair.pubkey(), &Pubkey::new_unique(), 1), + ]); + let tx = build_sanitized_transaction(&payer_keypair, &ixs); + + bencher.iter(|| { + (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { + assert!(process_compute_budget_instructions(black_box( + tx.message().program_instructions_iter() + )) + .is_ok()) + }) + }); + }, + ); +} + +criterion_group!( + benches, + bench_process_compute_budget_instructions_empty, + bench_process_compute_budget_instructions_no_builtins, + bench_process_compute_budget_instructions_compute_budgets, + bench_process_compute_budget_instructions_builtins, + bench_process_compute_budget_instructions_mixed, +); +criterion_main!(benches); From ae18213c19ea5335dfc75e6b6116def0f0910aff Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 14 Aug 2024 15:16:31 -0500 Subject: [PATCH 124/529] sdk: Deprecate timing::duration_as_*() functions (#2586) These functions are from a time when Duration did not natively support these operations. Duration has now supported these methods for some time, so we should do away with our functions and encourage any callers to use the methods on Duration directly instead --- accounts-db/src/append_vec.rs | 10 +++------- banking-bench/src/main.rs | 8 ++++---- bench-tps/src/bench.rs | 14 +++++++------- bench-tps/src/perf_utils.rs | 4 ++-- core/benches/banking_stage.rs | 4 ++-- core/benches/sigverify_stage.rs | 3 +-- core/src/repair/serve_repair.rs | 4 ++-- core/src/sigverify_stage.rs | 7 ++----- entry/src/entry.rs | 11 +++++------ genesis/src/main.rs | 7 +++---- ledger/src/blockstore_processor.rs | 4 +--- local-cluster/src/cluster_tests.rs | 4 ++-- measure/src/macros.rs | 2 +- measure/src/measure.rs | 19 ++++++++----------- poh/src/poh_service.rs | 4 ++-- runtime/src/bank/tests.rs | 8 ++++---- runtime/src/bank_forks.rs | 5 ++--- sdk/src/timing.rs | 12 ++++++++---- thin-client/src/thin_client.rs | 10 ++++++---- .../broadcast_stage/standard_broadcast_run.rs | 11 ++++------- 20 files changed, 69 insertions(+), 82 deletions(-) diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index d0dcee18cb3ac5..52f72477566459 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -1206,7 +1206,6 @@ pub mod tests { solana_sdk::{ account::{Account, AccountSharedData}, clock::Slot, - timing::duration_as_ms, }, std::{mem::ManuallyDrop, time::Instant}, test_case::test_case, @@ -1535,7 +1534,7 @@ pub mod tests { indexes.push(pos); assert_eq!(sizes, av.get_account_sizes(&indexes)); } - trace!("append time: {} ms", duration_as_ms(&now.elapsed()),); + trace!("append time: {} ms", now.elapsed().as_millis()); let now = Instant::now(); for _ in 0..size { @@ -1543,7 +1542,7 @@ pub mod tests { let account = create_test_account(sample + 1); assert_eq!(av.get_account_test(indexes[sample]).unwrap(), account); } - trace!("random read time: {} ms", duration_as_ms(&now.elapsed()),); + trace!("random read time: {} ms", now.elapsed().as_millis()); let now = Instant::now(); assert_eq!(indexes.len(), size); @@ -1556,10 +1555,7 @@ pub mod tests { assert_eq!(recovered, account.1); sample += 1; }); - trace!( - "sequential read time: {} ms", - duration_as_ms(&now.elapsed()), - ); + trace!("sequential read time: {} ms", now.elapsed().as_millis()); } #[test_case(StorageAccess::Mmap)] diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 7d194d044aa85c..c80e96005c8829 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -31,7 +31,7 @@ use { pubkey::{self, Pubkey}, signature::{Keypair, Signature, Signer}, system_instruction, system_transaction, - timing::{duration_as_us, timestamp}, + timing::timestamp, transaction::Transaction, }, solana_streamer::socket::SocketAddrSpace, @@ -534,7 +534,7 @@ fn main() { bank.slot(), bank.transaction_count(), ); - tx_total_us += duration_as_us(&now.elapsed()); + tx_total_us += now.elapsed().as_micros() as u64; let mut poh_time = Measure::start("poh_time"); poh_recorder @@ -578,14 +578,14 @@ fn main() { bank.slot(), bank.transaction_count(), ); - tx_total_us += duration_as_us(&now.elapsed()); + tx_total_us += now.elapsed().as_micros() as u64; } // This signature clear may not actually clear the signatures // in this chunk, but since we rotate between CHUNKS then // we should clear them by the time we come around again to re-use that chunk. bank.clear_signatures(); - total_us += duration_as_us(&now.elapsed()); + total_us += now.elapsed().as_micros() as u64; total_sent += sent; if current_iteration_index % num_chunks == 0 { diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index d8c550c9312e01..0c85af917965ca 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -25,7 +25,7 @@ use { pubkey::Pubkey, signature::{Keypair, Signer}, system_instruction, - timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp}, + timing::timestamp, transaction::Transaction, }, solana_tps_client::*, @@ -233,12 +233,12 @@ where "Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time, {:?}", bsps * 1_000_000_f64, nsps / 1_000_f64, - duration_as_ms(&duration), + duration.as_millis(), blockhash, ); datapoint_info!( "bench-tps-generate_txs", - ("duration", duration_as_us(&duration), i64) + ("duration", duration.as_micros() as i64, i64) ); transactions @@ -1029,12 +1029,12 @@ fn do_tx_transfers( total_tx_sent_count.fetch_add(num_txs, Ordering::Relaxed); info!( "Tx send done. {} ms {} tps", - duration_as_ms(&transfer_start.elapsed()), - num_txs as f32 / duration_as_s(&transfer_start.elapsed()), + transfer_start.elapsed().as_millis(), + num_txs as f32 / transfer_start.elapsed().as_secs_f32(), ); datapoint_info!( "bench-tps-do_tx_transfers", - ("duration", duration_as_us(&transfer_start.elapsed()), i64), + ("duration", transfer_start.elapsed().as_micros() as i64, i64), ("count", num_txs, i64) ); } @@ -1107,7 +1107,7 @@ fn compute_and_report_stats( ); info!( "\tAverage TPS: {}", - max_tx_count as f32 / duration_as_s(tx_send_elapsed) + max_tx_count as f32 / tx_send_elapsed.as_secs_f32() ); } diff --git a/bench-tps/src/perf_utils.rs b/bench-tps/src/perf_utils.rs index bb7cd725c37013..cd2bc373935b7a 100644 --- a/bench-tps/src/perf_utils.rs +++ b/bench-tps/src/perf_utils.rs @@ -1,6 +1,6 @@ use { log::*, - solana_sdk::{commitment_config::CommitmentConfig, timing::duration_as_s}, + solana_sdk::commitment_config::CommitmentConfig, solana_tps_client::TpsClient, std::{ sync::{ @@ -60,7 +60,7 @@ pub fn sample_txs( let sample_txs = txs - last_txs; last_txs = txs; - let tps = sample_txs as f32 / duration_as_s(&elapsed); + let tps = sample_txs as f32 / elapsed.as_secs_f32(); if tps > max_tps { max_tps = tps; } diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index d0efbfafddfc0b..3e2d5572e4e761 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -49,7 +49,7 @@ use { pubkey, signature::{Keypair, Signature, Signer}, system_instruction, system_transaction, - timing::{duration_as_us, timestamp}, + timing::timestamp, transaction::{Transaction, VersionedTransaction}, }, solana_streamer::socket::SocketAddrSpace, @@ -355,7 +355,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { bank.clear_signatures(); trace!( "time: {} checked: {} sent: {}", - duration_as_us(&now.elapsed()), + now.elapsed().as_micros(), txes / CHUNKS, sent, ); diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index 7013f718e4ab2e..3f11cc150574d3 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -26,7 +26,6 @@ use { packet::PacketFlags, signature::{Keypair, Signer}, system_transaction, - timing::duration_as_ms, }, std::time::{Duration, Instant}, test::Bencher, @@ -167,7 +166,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher, use_same_tx: bool) { let batches = gen_batches(use_same_tx); trace!( "starting... generation took: {} ms batches: {}", - duration_as_ms(&now.elapsed()), + now.elapsed().as_millis(), batches.len() ); diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 44c221f2a97877..ad123ea8562957 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -41,7 +41,7 @@ use { pubkey::{Pubkey, PUBKEY_BYTES}, signature::{Signable, Signature, Signer, SIGNATURE_BYTES}, signer::keypair::Keypair, - timing::{duration_as_ms, timestamp}, + timing::timestamp, }, solana_streamer::{ sendmmsg::{batch_send, SendPktsError}, @@ -517,7 +517,7 @@ impl ServeRepair { } fn report_time_spent(label: &str, time: &Duration, extra: &str) { - let count = duration_as_ms(time); + let count = time.as_millis(); if count > 5 { info!("{} took: {} ms {}", label, count, extra); } diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index 5fcf60ba5d471b..ac7d9889db0ed8 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -466,10 +466,7 @@ impl SigVerifyStage { mod tests { use { super::*, - crate::{ - banking_trace::BankingTracer, sigverify::TransactionSigVerifier, - sigverify_stage::timing::duration_as_ms, - }, + crate::{banking_trace::BankingTracer, sigverify::TransactionSigVerifier}, crossbeam_channel::unbounded, solana_perf::{ packet::{to_packet_batches, Packet}, @@ -563,7 +560,7 @@ mod tests { let batches = gen_batches(use_same_tx, packets_per_batch, total_packets); trace!( "starting... generation took: {} ms batches: {}", - duration_as_ms(&now.elapsed()), + now.elapsed().as_millis(), batches.len() ); diff --git a/entry/src/entry.rs b/entry/src/entry.rs index da4fda5914a363..75057db1630f9b 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -24,7 +24,6 @@ use { solana_sdk::{ hash::Hash, packet::Meta, - timing, transaction::{ Result, SanitizedTransaction, Transaction, TransactionError, TransactionVerificationMode, VersionedTransaction, @@ -670,7 +669,7 @@ impl EntrySlice for [Entry] { r }) }); - let poh_duration_us = timing::duration_as_us(&now.elapsed()); + let poh_duration_us = now.elapsed().as_micros() as u64; EntryVerificationState { verification_status: if res { EntryVerificationStatus::Success @@ -756,7 +755,7 @@ impl EntrySlice for [Entry] { }) }) }); - let poh_duration_us = timing::duration_as_us(&now.elapsed()); + let poh_duration_us = now.elapsed().as_micros() as u64; EntryVerificationState { verification_status: if res { EntryVerificationStatus::Success @@ -849,9 +848,9 @@ impl EntrySlice for [Entry] { assert!(res == 0, "GPU PoH verify many failed"); inc_new_counter_info!( "entry_verify-gpu_thread", - timing::duration_as_us(&gpu_wait.elapsed()) as usize + gpu_wait.elapsed().as_micros() as usize ); - timing::duration_as_us(&gpu_wait.elapsed()) + gpu_wait.elapsed().as_micros() as u64 }) .unwrap(); @@ -879,7 +878,7 @@ impl EntrySlice for [Entry] { }); EntryVerificationState { verification_status: EntryVerificationStatus::Pending, - poh_duration_us: timing::duration_as_us(&start.elapsed()), + poh_duration_us: start.elapsed().as_micros() as u64, device_verification_data, } } diff --git a/genesis/src/main.rs b/genesis/src/main.rs index 9edf3a49cc51e6..dc9f2ba7031531 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -32,7 +32,7 @@ use { signature::{Keypair, Signer}, signer::keypair::read_keypair_file, stake::state::StakeStateV2, - system_program, timing, + system_program, }, solana_stake_program::stake_state, solana_vote_program::vote_state::{self, VoteState}, @@ -144,8 +144,7 @@ fn main() -> Result<(), Box> { .max(rent.minimum_balance(StakeStateV2::size_of())) .to_string(); - let default_target_tick_duration = - timing::duration_as_us(&PohConfig::default().target_tick_duration); + let default_target_tick_duration = PohConfig::default().target_tick_duration; let default_ticks_per_slot = &clock::DEFAULT_TICKS_PER_SLOT.to_string(); let default_cluster_type = "mainnet-beta"; let default_genesis_archive_unpacked_size = MAX_GENESIS_ARCHIVE_UNPACKED_SIZE.to_string(); @@ -473,7 +472,7 @@ fn main() -> Result<(), Box> { target_tick_duration: if matches.is_present("target_tick_duration") { Duration::from_micros(value_t_or_exit!(matches, "target_tick_duration", u64)) } else { - Duration::from_micros(default_target_tick_duration) + default_target_tick_duration }, ..PohConfig::default() }; diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 98a7f1c72d46c3..ec099b4ecfedab 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -50,7 +50,6 @@ use { pubkey::Pubkey, saturating_add_assign, signature::{Keypair, Signature}, - timing, transaction::{ Result, SanitizedTransaction, TransactionError, TransactionVerificationMode, VersionedTransaction, @@ -1572,8 +1571,7 @@ fn confirm_slot_entries( recyclers.clone(), Arc::new(verify_transaction), ); - let transaction_cpu_duration_us = - timing::duration_as_us(&transaction_verification_start.elapsed()); + let transaction_cpu_duration_us = transaction_verification_start.elapsed().as_micros() as u64; let mut transaction_verification_result = match transaction_verification_result { Ok(transaction_verification_result) => transaction_verification_result, diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index 9b80c15824f494..b46fd67023d649 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -30,7 +30,7 @@ use { pubkey::Pubkey, signature::{Keypair, Signature, Signer}, system_transaction, - timing::{duration_as_ms, timestamp}, + timing::timestamp, transaction::Transaction, transport::TransportError, }, @@ -231,7 +231,7 @@ pub fn sleep_n_epochs( ticks_per_slot: u64, slots_per_epoch: u64, ) { - let num_ticks_per_second = (1000 / duration_as_ms(&config.target_tick_duration)) as f64; + let num_ticks_per_second = config.target_tick_duration.as_secs_f64().recip(); let num_ticks_to_sleep = num_epochs * ticks_per_slot as f64 * slots_per_epoch as f64; let secs = ((num_ticks_to_sleep + num_ticks_per_second - 1.0) / num_ticks_per_second) as u64; warn!("sleep_n_epochs: {} seconds", secs); diff --git a/measure/src/macros.rs b/measure/src/macros.rs index 1dddcbb4074072..f9ec0702db4d21 100644 --- a/measure/src/macros.rs +++ b/measure/src/macros.rs @@ -85,7 +85,7 @@ macro_rules! measure_us { ($val:expr) => {{ let start = std::time::Instant::now(); let result = $val; - (result, solana_sdk::timing::duration_as_us(&start.elapsed())) + (result, start.elapsed().as_micros() as u64) }}; } diff --git a/measure/src/measure.rs b/measure/src/measure.rs index 1e96d68fda1f0a..190abb30cb568e 100644 --- a/measure/src/measure.rs +++ b/measure/src/measure.rs @@ -1,9 +1,6 @@ -use { - solana_sdk::timing::{duration_as_ms, duration_as_ns, duration_as_s, duration_as_us}, - std::{ - fmt, - time::{Duration, Instant}, - }, +use std::{ + fmt, + time::{Duration, Instant}, }; #[derive(Debug)] @@ -23,7 +20,7 @@ impl Measure { } pub fn stop(&mut self) { - self.duration = duration_as_ns(&self.start.elapsed()); + self.duration = self.start.elapsed().as_nanos() as u64; } pub fn as_ns(&self) -> u64 { @@ -47,19 +44,19 @@ impl Measure { } pub fn end_as_ns(self) -> u64 { - duration_as_ns(&self.start.elapsed()) + self.start.elapsed().as_nanos() as u64 } pub fn end_as_us(self) -> u64 { - duration_as_us(&self.start.elapsed()) + self.start.elapsed().as_micros() as u64 } pub fn end_as_ms(self) -> u64 { - duration_as_ms(&self.start.elapsed()) + self.start.elapsed().as_millis() as u64 } pub fn end_as_s(self) -> f32 { - duration_as_s(&self.start.elapsed()) + self.start.elapsed().as_secs_f32() } pub fn end_as_duration(self) -> Duration { diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index 231ec623fd454a..c387abf2f93944 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -386,7 +386,7 @@ mod tests { solana_measure::measure::Measure, solana_perf::test_tx::test_tx, solana_runtime::bank::Bank, - solana_sdk::{clock, hash::hash, timing, transaction::VersionedTransaction}, + solana_sdk::{clock, hash::hash, transaction::VersionedTransaction}, std::{thread::sleep, time::Duration}, }; @@ -402,7 +402,7 @@ mod tests { .expect("Expected to be able to open database ledger"); let default_target_tick_duration = - timing::duration_as_us(&PohConfig::default().target_tick_duration); + PohConfig::default().target_tick_duration.as_micros() as u64; let target_tick_duration = Duration::from_micros(default_target_tick_duration); let poh_config = PohConfig { hashes_per_tick: Some(clock::DEFAULT_HASHES_PER_TICK), diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 85d51a10f8e2e9..6d05ab5010d0d0 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -92,7 +92,7 @@ use { MAX_PERMITTED_DATA_LENGTH, }, system_program, system_transaction, sysvar, - timing::{duration_as_s, years_as_slots}, + timing::years_as_slots, transaction::{ Result, SanitizedTransaction, Transaction, TransactionError, TransactionVerificationMode, @@ -268,9 +268,9 @@ fn test_bank_unix_timestamp_from_genesis() { genesis_config.creation_time, bank.unix_timestamp_from_genesis() ); - let slots_per_sec = 1.0 - / (duration_as_s(&genesis_config.poh_config.target_tick_duration) - * genesis_config.ticks_per_slot as f32); + let slots_per_sec = (genesis_config.poh_config.target_tick_duration.as_secs_f32() + * genesis_config.ticks_per_slot as f32) + .recip(); for _i in 0..slots_per_sec as usize + 1 { bank = Arc::new(new_from_parent(bank)); diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 3dd82c1fe85c98..884fdddcfa616e 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -16,7 +16,6 @@ use { solana_sdk::{ clock::{Epoch, Slot}, hash::Hash, - timing, }, std::{ collections::{hash_map::Entry, HashMap, HashSet}, @@ -494,7 +493,7 @@ impl BankForks { "bank-forks_set_root", ( "elapsed_ms", - timing::duration_as_ms(&set_root_start.elapsed()) as usize, + set_root_start.elapsed().as_millis() as usize, i64 ), ("slot", root, i64), @@ -569,7 +568,7 @@ impl BankForks { ), ( "program_cache_prune_ms", - timing::duration_as_ms(&program_cache_prune_start.elapsed()), + program_cache_prune_start.elapsed().as_millis() as i64, i64 ), ("dropped_banks_len", set_root_metrics.dropped_banks_len, i64), diff --git a/sdk/src/timing.rs b/sdk/src/timing.rs index beccb37841ce01..bcca611bd1dc04 100644 --- a/sdk/src/timing.rs +++ b/sdk/src/timing.rs @@ -4,28 +4,32 @@ use std::{ time::{Duration, SystemTime, UNIX_EPOCH}, }; +#[deprecated(since = "2.1.0", note = "Use `Duration::as_nanos()` directly")] pub fn duration_as_ns(d: &Duration) -> u64 { d.as_nanos() as u64 } +#[deprecated(since = "2.1.0", note = "Use `Duration::as_micros()` directly")] pub fn duration_as_us(d: &Duration) -> u64 { d.as_micros() as u64 } +#[deprecated(since = "2.1.0", note = "Use `Duration::as_millis()` directly")] pub fn duration_as_ms(d: &Duration) -> u64 { d.as_millis() as u64 } +#[deprecated(since = "2.1.0", note = "Use `Duration::as_secs_f32()` directly")] pub fn duration_as_s(d: &Duration) -> f32 { d.as_secs_f32() } /// return timestamp as ms pub fn timestamp() -> u64 { - let now = SystemTime::now() + SystemTime::now() .duration_since(UNIX_EPOCH) - .expect("create timestamp in timing"); - duration_as_ms(&now) + .expect("create timestamp in timing") + .as_millis() as u64 } pub const SECONDS_PER_YEAR: f64 = 365.242_199 * 24.0 * 60.0 * 60.0; @@ -37,7 +41,7 @@ pub fn years_as_slots(years: f64, tick_duration: &Duration, ticks_per_slot: u64) // slots/year is seconds/year ... SECONDS_PER_YEAR // * (ns/s)/(ns/tick) / ticks/slot = 1/s/1/tick = ticks/s - * (1_000_000_000.0 / duration_as_ns(tick_duration) as f64) + * (1_000_000_000.0 / tick_duration.as_nanos() as f64) // / ticks/slot / ticks_per_slot as f64 } diff --git a/thin-client/src/thin_client.rs b/thin-client/src/thin_client.rs index f53ae499a8b68f..fc994ee9b5e094 100644 --- a/thin-client/src/thin_client.rs +++ b/thin-client/src/thin_client.rs @@ -27,7 +27,6 @@ use { signature::{Keypair, Signature, Signer}, signers::Signers, system_instruction, - timing::duration_as_ms, transaction::{self, Transaction, VersionedTransaction}, transport::Result as TransportResult, }, @@ -480,7 +479,8 @@ where let now = Instant::now(); match self.rpc_client().get_transaction_count() { Ok(transaction_count) => { - self.optimizer.report(index, duration_as_ms(&now.elapsed())); + self.optimizer + .report(index, now.elapsed().as_millis() as u64); Ok(transaction_count) } Err(e) => { @@ -501,7 +501,8 @@ where .get_transaction_count_with_commitment(commitment_config) { Ok(transaction_count) => { - self.optimizer.report(index, duration_as_ms(&now.elapsed())); + self.optimizer + .report(index, now.elapsed().as_millis() as u64); Ok(transaction_count) } Err(e) => { @@ -542,7 +543,8 @@ where let now = Instant::now(); match self.rpc_clients[index].get_latest_blockhash_with_commitment(commitment_config) { Ok((blockhash, last_valid_block_height)) => { - self.optimizer.report(index, duration_as_ms(&now.elapsed())); + self.optimizer + .report(index, now.elapsed().as_millis() as u64); Ok((blockhash, last_valid_block_height)) } Err(e) => { diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 4bcdebf27ec066..0808f9c2532236 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -12,10 +12,7 @@ use { shred::{shred_code, ProcessShredsStats, ReedSolomonCache, Shred, ShredFlags, Shredder}, }, solana_sdk::{ - genesis_config::ClusterType, - hash::Hash, - signature::Keypair, - timing::{duration_as_us, AtomicInterval}, + genesis_config::ClusterType, hash::Hash, signature::Keypair, timing::AtomicInterval, }, std::{sync::RwLock, time::Duration}, tokio::sync::mpsc::Sender as AsyncSender, @@ -344,8 +341,8 @@ impl StandardBroadcastRun { process_stats.shredding_elapsed = to_shreds_time.as_us(); process_stats.get_leader_schedule_elapsed = get_leader_schedule_time.as_us(); - process_stats.receive_elapsed = duration_as_us(&receive_elapsed); - process_stats.coalesce_elapsed = duration_as_us(&coalesce_elapsed); + process_stats.receive_elapsed = receive_elapsed.as_micros() as u64; + process_stats.coalesce_elapsed = coalesce_elapsed.as_micros() as u64; process_stats.coding_send_elapsed = coding_send_time.as_us(); self.process_shreds_stats += process_stats; @@ -382,7 +379,7 @@ impl StandardBroadcastRun { .expect("Failed to insert shreds in blockstore"); let insert_shreds_elapsed = insert_shreds_start.elapsed(); let new_insert_shreds_stats = InsertShredsStats { - insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed), + insert_shreds_elapsed: insert_shreds_elapsed.as_micros() as u64, num_shreds, }; self.update_insertion_metrics(&new_insert_shreds_stats, &broadcast_shred_batch_info); From de4cb11ac23cb7d8bdcfd748c9ef22532c9b5d72 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 15 Aug 2024 07:52:16 +0800 Subject: [PATCH 125/529] refactor: new load_transaction_account function (#2574) --- svm/src/account_loader.rs | 318 ++++++++++++++++++------------- svm/src/transaction_processor.rs | 33 +++- 2 files changed, 209 insertions(+), 142 deletions(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index e7a2c9749e8147..5d14c35ec0677c 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -61,8 +61,15 @@ pub struct ValidatedTransactionDetails { pub rollback_accounts: RollbackAccounts, pub compute_budget_limits: ComputeBudgetLimits, pub fee_details: FeeDetails, - pub fee_payer_account: AccountSharedData, - pub fee_payer_rent_debit: u64, + pub loaded_fee_payer_account: LoadedTransactionAccount, +} + +#[derive(PartialEq, Eq, Debug, Clone)] +#[cfg_attr(feature = "dev-context-only-utils", derive(Default))] +pub struct LoadedTransactionAccount { + pub(crate) account: AccountSharedData, + pub(crate) loaded_size: usize, + pub(crate) rent_collected: u64, } #[derive(PartialEq, Eq, Debug, Clone)] @@ -217,8 +224,7 @@ fn load_transaction( let load_result = load_transaction_accounts( callbacks, message, - tx_details.fee_payer_account, - tx_details.fee_payer_rent_debit, + tx_details.loaded_fee_payer_account, &tx_details.compute_budget_limits, error_metrics, account_overrides, @@ -257,12 +263,10 @@ struct LoadedTransactionAccounts { pub loaded_accounts_data_size: u32, } -#[allow(clippy::too_many_arguments)] fn load_transaction_accounts( callbacks: &CB, message: &impl SVMMessage, - fee_payer_account: AccountSharedData, - fee_payer_rent_debit: u64, + loaded_fee_payer_account: LoadedTransactionAccount, compute_budget_limits: &ComputeBudgetLimits, error_metrics: &mut TransactionErrorMetrics, account_overrides: Option<&AccountOverrides>, @@ -283,95 +287,48 @@ fn load_transaction_accounts( .unique() .collect::>(); - let mut collect_account = - |key, account_size, account: AccountSharedData, rent, account_found| -> Result<()> { - accumulate_and_check_loaded_account_data_size( - &mut accumulated_accounts_data_size, - account_size, - compute_budget_limits.loaded_accounts_bytes, - error_metrics, - )?; + let mut collect_loaded_account = |key, (loaded_account, found)| -> Result<()> { + let LoadedTransactionAccount { + account, + loaded_size, + rent_collected, + } = loaded_account; + + accumulate_and_check_loaded_account_data_size( + &mut accumulated_accounts_data_size, + loaded_size, + compute_budget_limits.loaded_accounts_bytes, + error_metrics, + )?; - tx_rent += rent; - rent_debits.insert(key, rent, account.lamports()); + tx_rent += rent_collected; + rent_debits.insert(key, rent_collected, account.lamports()); - accounts.push((*key, account)); - accounts_found.push(account_found); - Ok(()) - }; + accounts.push((*key, account)); + accounts_found.push(found); + Ok(()) + }; // Since the fee payer is always the first account, collect it first. Note // that account overrides are already applied during fee payer validation so // it's fine to use the fee payer directly here rather than checking account // overrides again. - collect_account( - message.fee_payer(), - fee_payer_account.data().len(), - fee_payer_account, - fee_payer_rent_debit, - true, // account_found - )?; + collect_loaded_account(message.fee_payer(), (loaded_fee_payer_account, true))?; // Attempt to load and collect remaining non-fee payer accounts - for (i, key) in account_keys.iter().enumerate().skip(1) { - let mut account_found = true; - let is_instruction_account = u8::try_from(i) - .map(|i| instruction_accounts.contains(&&i)) - .unwrap_or(false); - let (account_size, account, rent) = if solana_sdk::sysvar::instructions::check_id(key) { - // Since the instructions sysvar is constructed by the SVM - // and modified for each transaction instruction, it cannot - // be overridden. - ( - 0, /* loaded size */ - construct_instructions_account(message), - 0, /* collected rent */ - ) - } else if let Some(account_override) = - account_overrides.and_then(|overrides| overrides.get(key)) - { - (account_override.data().len(), account_override.clone(), 0) - } else if let Some(program) = (!is_instruction_account && !message.is_writable(i)) - .then_some(()) - .and_then(|_| loaded_programs.find(key)) - { - callbacks - .get_account_shared_data(key) - .ok_or(TransactionError::AccountNotFound)?; - // Optimization to skip loading of accounts which are only used as - // programs in top-level instructions and not passed as instruction accounts. - let program_account = account_shared_data_from_program(&program); - (program.account_size, program_account, 0) - } else { - callbacks - .get_account_shared_data(key) - .map(|mut account| { - if message.is_writable(i) { - let rent_due = collect_rent_from_account( - feature_set, - rent_collector, - key, - &mut account, - ) - .rent_amount; - - (account.data().len(), account, rent_due) - } else { - (account.data().len(), account, 0) - } - }) - .unwrap_or_else(|| { - account_found = false; - let mut default_account = AccountSharedData::default(); - // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). - // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account - // with this field already set would allow us to skip rent collection for these accounts. - default_account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - (default_account.data().len(), default_account, 0) - }) - }; - - collect_account(key, account_size, account, rent, account_found)?; + for (account_index, account_key) in account_keys.iter().enumerate().skip(1) { + let (loaded_account, account_found) = load_transaction_account( + callbacks, + message, + account_key, + account_index, + &instruction_accounts[..], + account_overrides, + feature_set, + rent_collector, + loaded_programs, + )?; + collect_loaded_account(account_key, (loaded_account, account_found))?; } let builtins_start_index = accounts.len(); @@ -441,6 +398,91 @@ fn load_transaction_accounts( }) } +fn load_transaction_account( + callbacks: &CB, + message: &impl SVMMessage, + account_key: &Pubkey, + account_index: usize, + instruction_accounts: &[&u8], + account_overrides: Option<&AccountOverrides>, + feature_set: &FeatureSet, + rent_collector: &RentCollector, + loaded_programs: &ProgramCacheForTxBatch, +) -> Result<(LoadedTransactionAccount, bool)> { + let mut account_found = true; + let is_instruction_account = u8::try_from(account_index) + .map(|i| instruction_accounts.contains(&&i)) + .unwrap_or(false); + let loaded_account = if solana_sdk::sysvar::instructions::check_id(account_key) { + // Since the instructions sysvar is constructed by the SVM and modified + // for each transaction instruction, it cannot be overridden. + LoadedTransactionAccount { + loaded_size: 0, + account: construct_instructions_account(message), + rent_collected: 0, + } + } else if let Some(account_override) = + account_overrides.and_then(|overrides| overrides.get(account_key)) + { + LoadedTransactionAccount { + loaded_size: account_override.data().len(), + account: account_override.clone(), + rent_collected: 0, + } + } else if let Some(program) = (!is_instruction_account && !message.is_writable(account_index)) + .then_some(()) + .and_then(|_| loaded_programs.find(account_key)) + { + callbacks + .get_account_shared_data(account_key) + .ok_or(TransactionError::AccountNotFound)?; + // Optimization to skip loading of accounts which are only used as + // programs in top-level instructions and not passed as instruction accounts. + LoadedTransactionAccount { + loaded_size: program.account_size, + account: account_shared_data_from_program(&program), + rent_collected: 0, + } + } else { + callbacks + .get_account_shared_data(account_key) + .map(|mut account| { + let rent_collected = if message.is_writable(account_index) { + collect_rent_from_account( + feature_set, + rent_collector, + account_key, + &mut account, + ) + .rent_amount + } else { + 0 + }; + + LoadedTransactionAccount { + loaded_size: account.data().len(), + account, + rent_collected, + } + }) + .unwrap_or_else(|| { + account_found = false; + let mut default_account = AccountSharedData::default(); + // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). + // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account + // with this field already set would allow us to skip rent collection for these accounts. + default_account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + LoadedTransactionAccount { + loaded_size: default_account.data().len(), + account: default_account, + rent_collected: 0, + } + }) + }; + + Ok((loaded_account, account_found)) +} + fn account_shared_data_from_program(loaded_program: &ProgramCacheEntry) -> AccountSharedData { // It's an executable program account. The program is already loaded in the cache. // So the account data is not needed. Return a dummy AccountSharedData with meta @@ -577,7 +619,10 @@ mod tests { &callbacks, &[sanitized_tx], vec![Ok(ValidatedTransactionDetails { - fee_payer_account, + loaded_fee_payer_account: LoadedTransactionAccount { + account: fee_payer_account, + ..LoadedTransactionAccount::default() + }, ..ValidatedTransactionDetails::default() })], error_metrics, @@ -1134,11 +1179,11 @@ mod tests { let mut mock_bank = TestCallbacks::default(); let fee_payer_balance = 200; - let mut fee_payer_account_data = AccountSharedData::default(); - fee_payer_account_data.set_lamports(fee_payer_balance); + let mut fee_payer_account = AccountSharedData::default(); + fee_payer_account.set_lamports(fee_payer_balance); mock_bank .accounts_map - .insert(fee_payer_address, fee_payer_account_data.clone()); + .insert(fee_payer_address, fee_payer_account.clone()); let fee_payer_rent_debit = 42; let mut error_metrics = TransactionErrorMetrics::default(); @@ -1152,8 +1197,11 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - fee_payer_account_data.clone(), - fee_payer_rent_debit, + LoadedTransactionAccount { + loaded_size: fee_payer_account.data().len(), + account: fee_payer_account.clone(), + rent_collected: fee_payer_rent_debit, + }, &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1170,7 +1218,7 @@ mod tests { assert_eq!( result.unwrap(), LoadedTransactionAccounts { - accounts: vec![(fee_payer_address, fee_payer_account_data),], + accounts: vec![(fee_payer_address, fee_payer_account)], program_indices: vec![], rent: fee_payer_rent_debit, rent_debits: expected_rent_debits, @@ -1198,11 +1246,11 @@ mod tests { mock_bank .accounts_map .insert(native_loader::id(), AccountSharedData::default()); - let mut fee_payer_account_data = AccountSharedData::default(); - fee_payer_account_data.set_lamports(200); + let mut fee_payer_account = AccountSharedData::default(); + fee_payer_account.set_lamports(200); mock_bank .accounts_map - .insert(key1.pubkey(), fee_payer_account_data.clone()); + .insert(key1.pubkey(), fee_payer_account.clone()); let mut error_metrics = TransactionErrorMetrics::default(); let loaded_programs = ProgramCacheForTxBatch::default(); @@ -1215,8 +1263,10 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - fee_payer_account_data.clone(), - 0, // fee_payer_rent_debit + LoadedTransactionAccount { + account: fee_payer_account.clone(), + ..LoadedTransactionAccount::default() + }, &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1229,7 +1279,7 @@ mod tests { result.unwrap(), LoadedTransactionAccounts { accounts: vec![ - (key1.pubkey(), fee_payer_account_data), + (key1.pubkey(), fee_payer_account), ( native_loader::id(), mock_bank.accounts_map[&native_loader::id()].clone() @@ -1277,8 +1327,7 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - AccountSharedData::default(), // fee_payer_account - 0, // fee_payer_rent_debit + LoadedTransactionAccount::default(), &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1323,8 +1372,7 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - AccountSharedData::default(), // fee_payer_account - 0, // fee_payer_rent_debit + LoadedTransactionAccount::default(), &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1369,8 +1417,7 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - AccountSharedData::default(), // fee_payer_account - 0, // fee_payer_rent_debit + LoadedTransactionAccount::default(), &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1408,11 +1455,11 @@ mod tests { account_data.set_executable(true); mock_bank.accounts_map.insert(key1.pubkey(), account_data); - let mut fee_payer_account_data = AccountSharedData::default(); - fee_payer_account_data.set_lamports(200); + let mut fee_payer_account = AccountSharedData::default(); + fee_payer_account.set_lamports(200); mock_bank .accounts_map - .insert(key2.pubkey(), fee_payer_account_data.clone()); + .insert(key2.pubkey(), fee_payer_account.clone()); let mut error_metrics = TransactionErrorMetrics::default(); let loaded_programs = ProgramCacheForTxBatch::default(); @@ -1424,8 +1471,10 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - fee_payer_account_data.clone(), - 0, // fee_payer_rent_debit + LoadedTransactionAccount { + account: fee_payer_account.clone(), + ..LoadedTransactionAccount::default() + }, &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1438,7 +1487,7 @@ mod tests { result.unwrap(), LoadedTransactionAccounts { accounts: vec![ - (key2.pubkey(), fee_payer_account_data), + (key2.pubkey(), fee_payer_account), ( key1.pubkey(), mock_bank.accounts_map[&key1.pubkey()].clone() @@ -1488,8 +1537,7 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - AccountSharedData::default(), // fee_payer_account - 0, // fee_payer_rent_debit + LoadedTransactionAccount::default(), &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1543,8 +1591,7 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - AccountSharedData::default(), // fee_payer_account - 0, // fee_payer_rent_debit + LoadedTransactionAccount::default(), &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1583,11 +1630,11 @@ mod tests { account_data.set_owner(key3.pubkey()); mock_bank.accounts_map.insert(key1.pubkey(), account_data); - let mut fee_payer_account_data = AccountSharedData::default(); - fee_payer_account_data.set_lamports(200); + let mut fee_payer_account = AccountSharedData::default(); + fee_payer_account.set_lamports(200); mock_bank .accounts_map - .insert(key2.pubkey(), fee_payer_account_data.clone()); + .insert(key2.pubkey(), fee_payer_account.clone()); let mut account_data = AccountSharedData::default(); account_data.set_executable(true); @@ -1605,8 +1652,10 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - fee_payer_account_data.clone(), - 0, // fee_payer_rent_debit + LoadedTransactionAccount { + account: fee_payer_account.clone(), + ..LoadedTransactionAccount::default() + }, &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1619,7 +1668,7 @@ mod tests { result.unwrap(), LoadedTransactionAccounts { accounts: vec![ - (key2.pubkey(), fee_payer_account_data), + (key2.pubkey(), fee_payer_account), ( key1.pubkey(), mock_bank.accounts_map[&key1.pubkey()].clone() @@ -1669,11 +1718,11 @@ mod tests { account_data.set_owner(key3.pubkey()); mock_bank.accounts_map.insert(key1.pubkey(), account_data); - let mut fee_payer_account_data = AccountSharedData::default(); - fee_payer_account_data.set_lamports(200); + let mut fee_payer_account = AccountSharedData::default(); + fee_payer_account.set_lamports(200); mock_bank .accounts_map - .insert(key2.pubkey(), fee_payer_account_data.clone()); + .insert(key2.pubkey(), fee_payer_account.clone()); let mut account_data = AccountSharedData::default(); account_data.set_executable(true); @@ -1691,8 +1740,10 @@ mod tests { let result = load_transaction_accounts( &mock_bank, sanitized_transaction.message(), - fee_payer_account_data.clone(), - 0, // fee_payer_rent_debit + LoadedTransactionAccount { + account: fee_payer_account.clone(), + ..LoadedTransactionAccount::default() + }, &ComputeBudgetLimits::default(), &mut error_metrics, None, @@ -1707,7 +1758,7 @@ mod tests { result.unwrap(), LoadedTransactionAccounts { accounts: vec![ - (key2.pubkey(), fee_payer_account_data), + (key2.pubkey(), fee_payer_account), ( key1.pubkey(), mock_bank.accounts_map[&key1.pubkey()].clone() @@ -1823,11 +1874,11 @@ mod tests { account_data.set_owner(key3.pubkey()); mock_bank.accounts_map.insert(key1.pubkey(), account_data); - let mut fee_payer_account_data = AccountSharedData::default(); - fee_payer_account_data.set_lamports(200); + let mut fee_payer_account = AccountSharedData::default(); + fee_payer_account.set_lamports(200); mock_bank .accounts_map - .insert(key2.pubkey(), fee_payer_account_data.clone()); + .insert(key2.pubkey(), fee_payer_account.clone()); let mut account_data = AccountSharedData::default(); account_data.set_executable(true); @@ -1843,7 +1894,10 @@ mod tests { false, ); let validation_result = Ok(ValidatedTransactionDetails { - fee_payer_account: fee_payer_account_data, + loaded_fee_payer_account: LoadedTransactionAccount { + account: fee_payer_account, + ..LoadedTransactionAccount::default() + }, ..ValidatedTransactionDetails::default() }); diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index b43fb3429557e4..bc83f4b0b86ba8 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -4,8 +4,9 @@ use { crate::{ account_loader::{ collect_rent_from_account, load_accounts, validate_fee_payer, - CheckedTransactionDetails, LoadedTransaction, TransactionCheckResult, - TransactionLoadResult, TransactionValidationResult, ValidatedTransactionDetails, + CheckedTransactionDetails, LoadedTransaction, LoadedTransactionAccount, + TransactionCheckResult, TransactionLoadResult, TransactionValidationResult, + ValidatedTransactionDetails, }, account_overrides::AccountOverrides, message_processor::MessageProcessor, @@ -467,10 +468,13 @@ impl TransactionBatchProcessor { Ok(ValidatedTransactionDetails { fee_details, - fee_payer_account, - fee_payer_rent_debit, rollback_accounts, compute_budget_limits, + loaded_fee_payer_account: LoadedTransactionAccount { + loaded_size: fee_payer_account.data().len(), + account: fee_payer_account, + rent_collected: fee_payer_rent_debit, + }, }) } @@ -1874,8 +1878,11 @@ mod tests { ), compute_budget_limits, fee_details: FeeDetails::new(transaction_fee, priority_fee, false), - fee_payer_rent_debit, - fee_payer_account: post_validation_fee_payer_account, + loaded_fee_payer_account: LoadedTransactionAccount { + loaded_size: fee_payer_account.data().len(), + account: post_validation_fee_payer_account, + rent_collected: fee_payer_rent_debit, + }, }) ); } @@ -1947,8 +1954,11 @@ mod tests { ), compute_budget_limits, fee_details: FeeDetails::new(transaction_fee, 0, false), - fee_payer_rent_debit, - fee_payer_account: post_validation_fee_payer_account, + loaded_fee_payer_account: LoadedTransactionAccount { + loaded_size: fee_payer_account.data().len(), + account: post_validation_fee_payer_account, + rent_collected: fee_payer_rent_debit, + } }) ); } @@ -2194,8 +2204,11 @@ mod tests { ), compute_budget_limits, fee_details: FeeDetails::new(transaction_fee, priority_fee, false), - fee_payer_rent_debit: 0, // rent due - fee_payer_account: post_validation_fee_payer_account, + loaded_fee_payer_account: LoadedTransactionAccount { + loaded_size: fee_payer_account.data().len(), + account: post_validation_fee_payer_account, + rent_collected: 0, + } }) ); } From 86d60206328100664b1f33beb8fbea4bac8d7975 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 14 Aug 2024 22:16:16 -0400 Subject: [PATCH 126/529] ff cleanup: deprecate_unused_legacy_vote_plumbing (#2585) --- programs/vote/benches/process_vote.rs | 2 +- programs/vote/src/vote_state/mod.rs | 57 +++++++-------------------- sdk/program/src/vote/state/mod.rs | 30 +++----------- 3 files changed, 21 insertions(+), 68 deletions(-) diff --git a/programs/vote/benches/process_vote.rs b/programs/vote/benches/process_vote.rs index aea812237140dc..2e19923597fb53 100644 --- a/programs/vote/benches/process_vote.rs +++ b/programs/vote/benches/process_vote.rs @@ -49,7 +49,7 @@ fn create_accounts() -> (Slot, SlotHashes, Vec, Vec = vec![0; VoteState::size_of()]; let versioned = VoteStateVersions::new_current(vote_state); diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 21a5c0426beffc..d9485c47d10384 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -626,9 +626,6 @@ pub fn process_new_vote_state( let timely_vote_credits = feature_set.map_or(false, |f| { f.is_active(&feature_set::timely_vote_credits::id()) }); - let deprecate_unused_legacy_vote_plumbing = feature_set.map_or(false, |f| { - f.is_active(&feature_set::deprecate_unused_legacy_vote_plumbing::id()) - }); let mut earned_credits = if timely_vote_credits { 0_u64 } else { 1_u64 }; if let Some(new_root) = new_root { @@ -641,7 +638,6 @@ pub fn process_new_vote_state( .checked_add(vote_state.credits_for_vote_at_index( current_vote_state_index, timely_vote_credits, - deprecate_unused_legacy_vote_plumbing, )) .expect("`earned_credits` does not overflow"); } @@ -756,17 +752,10 @@ pub fn process_vote_unfiltered( epoch: Epoch, current_slot: Slot, timely_vote_credits: bool, - deprecate_unused_legacy_vote_plumbing: bool, ) -> Result<(), VoteError> { check_slots_are_valid(vote_state, vote_slots, &vote.hash, slot_hashes)?; vote_slots.iter().for_each(|s| { - vote_state.process_next_vote_slot( - *s, - epoch, - current_slot, - timely_vote_credits, - deprecate_unused_legacy_vote_plumbing, - ) + vote_state.process_next_vote_slot(*s, epoch, current_slot, timely_vote_credits) }); Ok(()) } @@ -778,7 +767,6 @@ pub fn process_vote( epoch: Epoch, current_slot: Slot, timely_vote_credits: bool, - deprecate_unused_legacy_vote_plumbing: bool, ) -> Result<(), VoteError> { if vote.slots.is_empty() { return Err(VoteError::EmptySlots); @@ -801,7 +789,6 @@ pub fn process_vote( epoch, current_slot, timely_vote_credits, - deprecate_unused_legacy_vote_plumbing, ) } @@ -819,7 +806,6 @@ pub fn process_vote_unchecked(vote_state: &mut VoteState, vote: Vote) -> Result< vote_state.current_epoch(), 0, true, - true, ) } @@ -1095,8 +1081,6 @@ pub fn process_vote_with_account( let mut vote_state = verify_and_get_vote_state(vote_account, clock, signers)?; let timely_vote_credits = feature_set.is_active(&feature_set::timely_vote_credits::id()); - let deprecate_unused_legacy_vote_plumbing = - feature_set.is_active(&feature_set::deprecate_unused_legacy_vote_plumbing::id()); process_vote( &mut vote_state, vote, @@ -1104,7 +1088,6 @@ pub fn process_vote_with_account( clock.epoch, clock.slot, timely_vote_credits, - deprecate_unused_legacy_vote_plumbing, )?; if let Some(timestamp) = vote.timestamp { vote.slots @@ -1342,7 +1325,7 @@ mod tests { 134, 135, ] .into_iter() - .for_each(|v| vote_state.process_next_vote_slot(v, 4, 0, false, true)); + .for_each(|v| vote_state.process_next_vote_slot(v, 4, 0, false)); let version1_14_11_serialized = bincode::serialize(&VoteStateVersions::V1_14_11(Box::new( VoteState1_14_11::from(vote_state.clone()), @@ -1823,11 +1806,11 @@ mod tests { let slot_hashes: Vec<_> = vote.slots.iter().rev().map(|x| (*x, vote.hash)).collect(); assert_eq!( - process_vote(&mut vote_state_a, &vote, &slot_hashes, 0, 0, true, true), + process_vote(&mut vote_state_a, &vote, &slot_hashes, 0, 0, true), Ok(()) ); assert_eq!( - process_vote(&mut vote_state_b, &vote, &slot_hashes, 0, 0, true, true), + process_vote(&mut vote_state_b, &vote, &slot_hashes, 0, 0, true), Ok(()) ); assert_eq!(recent_votes(&vote_state_a), recent_votes(&vote_state_b)); @@ -1840,12 +1823,12 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(0, vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true), Ok(()) ); let recent = recent_votes(&vote_state); assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true), Err(VoteError::VoteTooOld) ); assert_eq!(recent, recent_votes(&vote_state)); @@ -1905,7 +1888,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true), Ok(()) ); assert_eq!( @@ -1921,7 +1904,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true), Ok(()) ); @@ -1940,7 +1923,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true), Ok(()) ); @@ -1957,7 +1940,7 @@ mod tests { let vote = Vote::new(vec![], Hash::default()); assert_eq!( - process_vote(&mut vote_state, &vote, &[], 0, 0, true, true), + process_vote(&mut vote_state, &vote, &[], 0, 0, true), Err(VoteError::EmptySlots) ); } @@ -2232,7 +2215,6 @@ mod tests { let mut feature_set = FeatureSet::default(); feature_set.activate(&feature_set::timely_vote_credits::id(), 1); - feature_set.activate(&feature_set::deprecate_unused_legacy_vote_plumbing::id(), 1); // For each vote group, process all vote groups leading up to it and it itself, and ensure that the number of // credits earned is correct for both regular votes and vote state updates @@ -2257,7 +2239,6 @@ mod tests { 0, vote_group.1, // vote_group.1 is the slot in which the vote was cast true, - true ), Ok(()) ); @@ -2367,7 +2348,6 @@ mod tests { let mut feature_set = FeatureSet::default(); feature_set.activate(&feature_set::timely_vote_credits::id(), 1); - feature_set.activate(&feature_set::deprecate_unused_legacy_vote_plumbing::id(), 1); // Initial vote state let mut vote_state = VoteState::new(&VoteInit::default(), &Clock::default()); @@ -3141,7 +3121,7 @@ mod tests { // error with `VotesTooOldAllFiltered` let slot_hashes = vec![(3, Hash::new_unique()), (2, Hash::new_unique())]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true), Err(VoteError::VotesTooOldAllFiltered) ); @@ -3155,7 +3135,7 @@ mod tests { .1; let vote = Vote::new(vec![old_vote_slot, vote_slot], vote_slot_hash); - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true).unwrap(); + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true).unwrap(); assert_eq!( vote_state .votes @@ -3184,17 +3164,8 @@ mod tests { .unwrap() .1; let vote = Vote::new(vote_slots, vote_hash); - process_vote_unfiltered( - &mut vote_state, - &vote.slots, - &vote, - slot_hashes, - 0, - 0, - true, - true, - ) - .unwrap(); + process_vote_unfiltered(&mut vote_state, &vote.slots, &vote, slot_hashes, 0, 0, true) + .unwrap(); } vote_state diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index abac8f5abff61f..630e9a0354befe 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -52,9 +52,6 @@ pub const VOTE_CREDITS_GRACE_SLOTS: u8 = 2; // Maximum number of credits to award for a vote; this number of credits is awarded to votes on slots that land within the grace period. After that grace period, vote credits are reduced. pub const VOTE_CREDITS_MAXIMUM_PER_SLOT: u8 = 16; -// Previous max per slot -pub const VOTE_CREDITS_MAXIMUM_PER_SLOT_OLD: u8 = 8; - #[cfg_attr( feature = "frozen-abi", frozen_abi(digest = "Ch2vVEwos2EjAVqSHCyJjnN2MNX1yrpapZTGhMSCjWUH"), @@ -668,7 +665,6 @@ impl VoteState { epoch: Epoch, current_slot: Slot, timely_vote_credits: bool, - deprecate_unused_legacy_vote_plumbing: bool, ) { // Ignore votes for slots earlier than we already have votes for if self @@ -681,7 +677,7 @@ impl VoteState { self.pop_expired_votes(next_vote_slot); let landed_vote = LandedVote { - latency: if timely_vote_credits || !deprecate_unused_legacy_vote_plumbing { + latency: if timely_vote_credits { Self::compute_vote_latency(next_vote_slot, current_slot) } else { 0 @@ -691,11 +687,7 @@ impl VoteState { // Once the stack is full, pop the oldest lockout and distribute rewards if self.votes.len() == MAX_LOCKOUT_HISTORY { - let credits = self.credits_for_vote_at_index( - 0, - timely_vote_credits, - deprecate_unused_legacy_vote_plumbing, - ); + let credits = self.credits_for_vote_at_index(0, timely_vote_credits); let landed_vote = self.votes.pop_front().unwrap(); self.root_slot = Some(landed_vote.slot()); @@ -740,37 +732,27 @@ impl VoteState { } /// Returns the credits to award for a vote at the given lockout slot index - pub fn credits_for_vote_at_index( - &self, - index: usize, - timely_vote_credits: bool, - deprecate_unused_legacy_vote_plumbing: bool, - ) -> u64 { + pub fn credits_for_vote_at_index(&self, index: usize, timely_vote_credits: bool) -> u64 { let latency = self .votes .get(index) .map_or(0, |landed_vote| landed_vote.latency); - let max_credits = if deprecate_unused_legacy_vote_plumbing { - VOTE_CREDITS_MAXIMUM_PER_SLOT - } else { - VOTE_CREDITS_MAXIMUM_PER_SLOT_OLD - }; // If latency is 0, this means that the Lockout was created and stored from a software version that did not // store vote latencies; in this case, 1 credit is awarded - if latency == 0 || (deprecate_unused_legacy_vote_plumbing && !timely_vote_credits) { + if latency == 0 || !timely_vote_credits { 1 } else { match latency.checked_sub(VOTE_CREDITS_GRACE_SLOTS) { None | Some(0) => { // latency was <= VOTE_CREDITS_GRACE_SLOTS, so maximum credits are awarded - max_credits as u64 + VOTE_CREDITS_MAXIMUM_PER_SLOT as u64 } Some(diff) => { // diff = latency - VOTE_CREDITS_GRACE_SLOTS, and diff > 0 // Subtract diff from VOTE_CREDITS_MAXIMUM_PER_SLOT which is the number of credits to award - match max_credits.checked_sub(diff) { + match VOTE_CREDITS_MAXIMUM_PER_SLOT.checked_sub(diff) { // If diff >= VOTE_CREDITS_MAXIMUM_PER_SLOT, 1 credit is awarded None | Some(0) => 1, From 01782ccfcbc1722eaccb5094a73cedcc37ce2571 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 15 Aug 2024 11:38:01 +0800 Subject: [PATCH 127/529] ci: split local-cluster test into 10 jobs (#2576) * ci: split local-cluster test into 10 jobs * ci: reduce timout miniutes for local-cluster --- .buildkite/scripts/build-stable.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/scripts/build-stable.sh b/.buildkite/scripts/build-stable.sh index f20ca1db358402..e9799f11884ce2 100755 --- a/.buildkite/scripts/build-stable.sh +++ b/.buildkite/scripts/build-stable.sh @@ -26,9 +26,9 @@ local_cluster_partitions=$( { "name": "local-cluster", "command": "ci/docker-run-default-image.sh ci/stable/run-local-cluster-partially.sh", - "timeout_in_minutes": 30, + "timeout_in_minutes": 15, "agent": "$agent", - "parallelism": 5, + "parallelism": 10, "retry": 3 } EOF From 060dfcceb0c3231409134017e1f6f64ed4a30c7e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 11:38:15 +0800 Subject: [PATCH 128/529] build(deps): bump indexmap from 2.3.0 to 2.4.0 (#2590) * build(deps): bump indexmap from 2.3.0 to 2.4.0 Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.3.0 to 2.4.0. - [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.3.0...2.4.0) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 22 +++++++++++----------- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9aedd912f45fc5..251353a6c1ebfc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2580,7 +2580,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.3.0", + "indexmap 2.4.0", "slab", "tokio", "tokio-util 0.7.11", @@ -2951,9 +2951,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -5179,7 +5179,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "itoa", "ryu", "serde", @@ -5542,7 +5542,7 @@ dependencies = [ "dashmap", "ed25519-dalek", "index_list", - "indexmap 2.3.0", + "indexmap 2.4.0", "itertools 0.12.1", "lazy_static", "libsecp256k1", @@ -6045,7 +6045,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.3.0", + "indexmap 2.4.0", "indicatif", "log", "quinn", @@ -6135,7 +6135,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.3.0", + "indexmap 2.4.0", "indicatif", "log", "rand 0.8.5", @@ -6503,7 +6503,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.3.0", + "indexmap 2.4.0", "itertools 0.12.1", "log", "lru", @@ -7651,7 +7651,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.3.0", + "indexmap 2.4.0", "itertools 0.12.1", "libc", "log", @@ -7831,7 +7831,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.3.0", + "indexmap 2.4.0", "indicatif", "pickledb", "serde", @@ -7882,7 +7882,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.3.0", + "indexmap 2.4.0", "indicatif", "log", "rayon", @@ -9114,7 +9114,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "toml_datetime", "winnow 0.5.16", ] @@ -9125,7 +9125,7 @@ version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "serde", "serde_spanned", "toml_datetime", diff --git a/Cargo.toml b/Cargo.toml index ae808b2ae00012..9537a7debf3d38 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -248,7 +248,7 @@ hyper = "0.14.30" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.13" -indexmap = "2.3.0" +indexmap = "2.4.0" indicatif = "0.17.8" itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d83fab753de125..fb3026c0d02eb2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1941,7 +1941,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.3.0", + "indexmap 2.4.0", "slab", "tokio", "tokio-util 0.7.1", @@ -2286,9 +2286,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -4311,7 +4311,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "itoa", "ryu", "serde", @@ -4556,7 +4556,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "index_list", - "indexmap 2.3.0", + "indexmap 2.4.0", "itertools 0.12.1", "lazy_static", "log", @@ -4801,7 +4801,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.3.0", + "indexmap 2.4.0", "indicatif", "log", "quinn", @@ -4859,7 +4859,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.3.0", + "indexmap 2.4.0", "log", "rand 0.8.5", "rayon", @@ -5097,7 +5097,7 @@ dependencies = [ "clap", "crossbeam-channel", "flate2", - "indexmap 2.3.0", + "indexmap 2.4.0", "itertools 0.12.1", "log", "lru", @@ -6395,7 +6395,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.3.0", + "indexmap 2.4.0", "itertools 0.12.1", "libc", "log", @@ -6526,7 +6526,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.3.0", + "indexmap 2.4.0", "indicatif", "log", "rayon", @@ -7575,7 +7575,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "toml_datetime", "winnow", ] From fd93c43e9c6fc35b2f9e21f23b79b0336e9d08f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 11:38:27 +0800 Subject: [PATCH 129/529] build(deps): bump js-sys from 0.3.69 to 0.3.70 (#2589) * build(deps): bump js-sys from 0.3.69 to 0.3.70 Bumps [js-sys](https://github.com/rustwasm/wasm-bindgen) from 0.3.69 to 0.3.70. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/commits) --- updated-dependencies: - dependency-name: js-sys dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 251353a6c1ebfc..df9d1d37ccbc4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3043,9 +3043,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] diff --git a/Cargo.toml b/Cargo.toml index 9537a7debf3d38..8ab0a611841c61 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -254,7 +254,7 @@ itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ "unprefixed_malloc_on_supported_platforms", ] } -js-sys = "0.3.69" +js-sys = "0.3.70" json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index fb3026c0d02eb2..829ec1e3bcbb1f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2367,9 +2367,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] From 92acf9426daddbb5f7bc1af51804f177cf0c234b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 11:38:38 +0800 Subject: [PATCH 130/529] build(deps): bump ctrlc from 3.4.4 to 3.4.5 (#2588) Bumps [ctrlc](https://github.com/Detegr/rust-ctrlc) from 3.4.4 to 3.4.5. - [Release notes](https://github.com/Detegr/rust-ctrlc/releases) - [Commits](https://github.com/Detegr/rust-ctrlc/compare/3.4.4...3.4.5) --- updated-dependencies: - dependency-name: ctrlc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 97 +++++++++++++++++++++++++++++++++--------------------- Cargo.toml | 2 +- 2 files changed, 60 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df9d1d37ccbc4b..12d05d7a2de96b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,7 +132,7 @@ dependencies = [ "dirs-next", "indicatif", "lazy_static", - "nix", + "nix 0.28.0", "reqwest", "scopeguard", "semver 1.0.23", @@ -1319,7 +1319,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -1740,12 +1740,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.4" +version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" dependencies = [ - "nix", - "windows-sys 0.52.0", + "nix 0.29.0", + "windows-sys 0.59.0", ] [[package]] @@ -3599,6 +3599,18 @@ dependencies = [ "memoffset 0.9.1", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.6.0", + "cfg-if 1.0.0", + "cfg_aliases 0.2.1", + "libc", +] + [[package]] name = "no-std-compat" version = "0.4.1" @@ -6818,7 +6830,7 @@ dependencies = [ "clap 3.2.23", "crossbeam-channel", "log", - "nix", + "nix 0.28.0", "rand 0.8.5", "serde", "serde_derive", @@ -6871,7 +6883,7 @@ dependencies = [ "lazy_static", "libc", "log", - "nix", + "nix 0.28.0", "rand 0.8.5", "rand_chacha 0.3.1", "rayon", @@ -7655,7 +7667,7 @@ dependencies = [ "itertools 0.12.1", "libc", "log", - "nix", + "nix 0.28.0", "pem", "percentage", "quinn", @@ -9669,7 +9681,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49" dependencies = [ "windows-core", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -9679,7 +9691,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65" dependencies = [ "windows-result", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -9688,7 +9700,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "749f0da9cc72d82e600d8d2e44cadd0b9eedb9038f71a1c58556ac1c5791813b" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -9706,7 +9718,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -9726,18 +9747,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -9748,9 +9769,9 @@ checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -9760,9 +9781,9 @@ checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -9772,15 +9793,15 @@ checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -9790,9 +9811,9 @@ checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -9802,9 +9823,9 @@ checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -9814,9 +9835,9 @@ checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -9826,9 +9847,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" diff --git a/Cargo.toml b/Cargo.toml index 8ab0a611841c61..9852e973507b70 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -209,7 +209,7 @@ criterion = "0.5.1" criterion-stats = "0.3.0" crossbeam-channel = "0.5.13" csv = "1.3.0" -ctrlc = "3.4.4" +ctrlc = "3.4.5" curve25519-dalek = "3.2.1" dashmap = "5.5.3" derivation-path = { version = "0.2.0", default-features = false } From a5bde5fbdc061e30763530049625590265b26de5 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 15 Aug 2024 13:10:42 +0000 Subject: [PATCH 131/529] customizes override logic for gossip ContactInfo (#2579) If there are two running instances of the same node, we want the ContactInfo with more recent start time to be propagated through gossip regardless of wallclocks. The commit adds custom override logic for ContactInfo to first compare by outset timestamp. --- gossip/src/contact_info.rs | 43 ++++++++++++++++++++++++++++ gossip/src/crds.rs | 52 ++++++++++++++++++---------------- gossip/src/crds_gossip_pull.rs | 2 +- 3 files changed, 71 insertions(+), 26 deletions(-) diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 5e4f5b27cac04a..bf9a25241b25a4 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -14,6 +14,7 @@ use { solana_streamer::socket::SocketAddrSpace, static_assertions::const_assert_eq, std::{ + cmp::Ordering, collections::HashSet, net::{IpAddr, Ipv4Addr, SocketAddr}, time::{SystemTime, UNIX_EPOCH}, @@ -443,6 +444,24 @@ impl ContactInfo { pub(crate) fn check_duplicate(&self, other: &ContactInfo) -> bool { self.pubkey == other.pubkey && self.outset < other.outset } + + // Returns None if the contact-infos have different pubkey. + // Otherwise returns true if (self.outset, self.wallclock) tuple is larger + // than (other.outset, other.wallclock). + // If the tuples are equal it returns None. + #[inline] + #[must_use] + pub(crate) fn overrides(&self, other: &ContactInfo) -> Option { + if self.pubkey != other.pubkey { + return None; + } + let other = (other.outset, other.wallclock); + match (self.outset, self.wallclock).cmp(&other) { + Ordering::Less => Some(false), + Ordering::Greater => Some(true), + Ordering::Equal => None, + } + } } impl Default for ContactInfo { @@ -1038,6 +1057,8 @@ mod tests { let other = node.clone(); assert!(!node.check_duplicate(&other)); assert!(!other.check_duplicate(&node)); + assert_eq!(node.overrides(&other), None); + assert_eq!(other.overrides(&node), None); } // Updated socket address is not a duplicate instance. { @@ -1046,9 +1067,13 @@ mod tests { while other.set_serve_repair(new_rand_socket(&mut rng)).is_err() {} assert!(!node.check_duplicate(&other)); assert!(!other.check_duplicate(&node)); + assert_eq!(node.overrides(&other), None); + assert_eq!(other.overrides(&node), None); other.remove_serve_repair(); assert!(!node.check_duplicate(&other)); assert!(!other.check_duplicate(&node)); + assert_eq!(node.overrides(&other), None); + assert_eq!(other.overrides(&node), None); } // Updated wallclock is not a duplicate instance. { @@ -1056,6 +1081,14 @@ mod tests { node.set_wallclock(rng.gen()); assert!(!node.check_duplicate(&other)); assert!(!other.check_duplicate(&node)); + assert_eq!( + node.overrides(&other), + Some(other.wallclock < node.wallclock) + ); + assert_eq!( + other.overrides(&node), + Some(node.wallclock < other.wallclock) + ); } // Different pubkey is not a duplicate instance. { @@ -1066,6 +1099,8 @@ mod tests { ); assert!(!node.check_duplicate(&other)); assert!(!other.check_duplicate(&node)); + assert_eq!(node.overrides(&other), None); + assert_eq!(other.overrides(&node), None); } // Same pubkey, more recent outset timestamp is a duplicate instance. { @@ -1077,6 +1112,14 @@ mod tests { assert!(node.outset < other.outset); assert!(node.check_duplicate(&other)); assert!(!other.check_duplicate(&node)); + assert_eq!(node.overrides(&other), Some(false)); + assert_eq!(other.overrides(&node), Some(true)); + node.set_wallclock(other.wallclock); + assert!(node.outset < other.outset); + assert!(node.check_duplicate(&other)); + assert!(!other.check_duplicate(&node)); + assert_eq!(node.overrides(&other), Some(false)); + assert_eq!(other.overrides(&node), Some(true)); } } } diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index e0205e14e62988..087ce66a7455b6 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -196,14 +196,21 @@ impl Default for Crds { // Both values should have the same key/label. fn overrides(value: &CrdsValue, other: &VersionedCrdsValue) -> bool { assert_eq!(value.label(), other.value.label(), "labels mismatch!"); - // Node instances are special cased so that if there are two running - // instances of the same node, the more recent start is propagated through - // gossip regardless of wallclocks. + // Contact-infos and node instances are special cased so that if there are + // two running instances of the same node, the more recent start is + // propagated through gossip regardless of wallclocks. if let CrdsData::NodeInstance(value) = &value.data { if let Some(out) = value.overrides(&other.value) { return out; } } + if let CrdsData::ContactInfo(value) = &value.data { + if let CrdsData::ContactInfo(other) = &other.value.data { + if let Some(out) = value.overrides(other) { + return out; + } + } + } match value.wallclock().cmp(&other.value.wallclock()) { Ordering::Less => false, Ordering::Greater => true, @@ -1334,15 +1341,17 @@ mod tests { let mut node = ContactInfo::new_rand(&mut rng, Some(pubkey)); let wallclock = node.wallclock(); node.set_shred_version(42); - let node = CrdsData::ContactInfo(node); - let node = CrdsValue::new_unsigned(node); - assert_eq!( - crds.insert(node, timestamp(), GossipRoute::LocalMessage), - Ok(()) - ); + { + let node = CrdsData::ContactInfo(node.clone()); + let node = CrdsValue::new_unsigned(node); + assert_eq!( + crds.insert(node, timestamp(), GossipRoute::LocalMessage), + Ok(()) + ); + } assert_eq!(crds.get_shred_version(&pubkey), Some(42)); // An outdated value should not update shred-version: - let mut node = ContactInfo::new_rand(&mut rng, Some(pubkey)); + let mut node = node.clone(); node.set_wallclock(wallclock - 1); // outdated. node.set_shred_version(8); let node = CrdsData::ContactInfo(node); @@ -1481,20 +1490,17 @@ mod tests { #[test] #[allow(clippy::neg_cmp_op_on_partial_ord)] fn test_hash_order() { + let mut node = ContactInfo::new_localhost(&Pubkey::default(), 0); let v1 = VersionedCrdsValue::new( - CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &Pubkey::default(), - 0, - ))), + CrdsValue::new_unsigned(CrdsData::ContactInfo(node.clone())), Cursor::default(), 1, // local_timestamp GossipRoute::LocalMessage, ); let v2 = VersionedCrdsValue::new( { - let mut contact_info = ContactInfo::new_localhost(&Pubkey::default(), 0); - contact_info.set_rpc((Ipv4Addr::LOCALHOST, 1244)).unwrap(); - CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info)) + node.set_rpc((Ipv4Addr::LOCALHOST, 1244)).unwrap(); + CrdsValue::new_unsigned(CrdsData::ContactInfo(node)) }, Cursor::default(), 1, // local_timestamp @@ -1517,20 +1523,16 @@ mod tests { #[test] #[allow(clippy::neg_cmp_op_on_partial_ord)] fn test_wallclock_order() { + let mut node = ContactInfo::new_localhost(&Pubkey::default(), 1); let v1 = VersionedCrdsValue::new( - CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &Pubkey::default(), - 1, - ))), + CrdsValue::new_unsigned(CrdsData::ContactInfo(node.clone())), Cursor::default(), 1, // local_timestamp GossipRoute::LocalMessage, ); + node.set_wallclock(0); let v2 = VersionedCrdsValue::new( - CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &Pubkey::default(), - 0, - ))), + CrdsValue::new_unsigned(CrdsData::ContactInfo(node)), Cursor::default(), 1, // local_timestamp GossipRoute::LocalMessage, diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index c708f595343968..b227ed99734e1b 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -1148,6 +1148,7 @@ pub(crate) mod tests { let mut dest_crds = Crds::default(); let new_id = solana_sdk::pubkey::new_rand(); + let same_key = ContactInfo::new_localhost(&new_id, 0); let new = ContactInfo::new_localhost(&new_id, 1); ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now()); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); @@ -1157,7 +1158,6 @@ pub(crate) mod tests { let dest_crds = RwLock::new(dest_crds); // node contains a key from the dest node, but at an older local timestamp - let same_key = ContactInfo::new_localhost(&new_id, 0); ping_cache.mock_pong( *same_key.pubkey(), same_key.gossip().unwrap(), From 94bfbd56eeaec46c829a0824e06e512ae17f4174 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 15 Aug 2024 08:41:51 -0500 Subject: [PATCH 132/529] inline try_new for internal meta (#2593) --- transaction-view/src/address_table_lookup_meta.rs | 1 + transaction-view/src/instructions_meta.rs | 1 + transaction-view/src/message_header_meta.rs | 1 + transaction-view/src/signature_meta.rs | 1 + transaction-view/src/static_account_keys_meta.rs | 1 + 5 files changed, 5 insertions(+) diff --git a/transaction-view/src/address_table_lookup_meta.rs b/transaction-view/src/address_table_lookup_meta.rs index 4f92547ef2ef7c..0d5e93b2023635 100644 --- a/transaction-view/src/address_table_lookup_meta.rs +++ b/transaction-view/src/address_table_lookup_meta.rs @@ -57,6 +57,7 @@ impl AddressTableLookupMeta { /// ATL. /// This function will parse each ATL to ensure the data is well-formed, /// but will not cache data related to these ATLs. + #[inline(always)] pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Maximum number of ATLs should be represented by a single byte, // thus the MSB should not be set. diff --git a/transaction-view/src/instructions_meta.rs b/transaction-view/src/instructions_meta.rs index 9a6d5e3dd72c0a..8d68019f932c66 100644 --- a/transaction-view/src/instructions_meta.rs +++ b/transaction-view/src/instructions_meta.rs @@ -24,6 +24,7 @@ impl InstructionsMeta { /// This function will parse each individual instruction to ensure the /// instruction data is well-formed, but will not cache data related to /// these instructions. + #[inline(always)] pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Read the number of instructions at the current offset. // Each instruction needs at least 3 bytes, so do a sanity check here to diff --git a/transaction-view/src/message_header_meta.rs b/transaction-view/src/message_header_meta.rs index c144db2f4e8edc..2b83571d154494 100644 --- a/transaction-view/src/message_header_meta.rs +++ b/transaction-view/src/message_header_meta.rs @@ -33,6 +33,7 @@ pub(crate) struct MessageHeaderMeta { } impl MessageHeaderMeta { + #[inline(always)] pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Get the message offset. // We know the offset does not exceed packet length, and our packet diff --git a/transaction-view/src/signature_meta.rs b/transaction-view/src/signature_meta.rs index 2d8a8231f1cd0b..8d98554e195a11 100644 --- a/transaction-view/src/signature_meta.rs +++ b/transaction-view/src/signature_meta.rs @@ -27,6 +27,7 @@ pub(crate) struct SignatureMeta { impl SignatureMeta { /// Get the number of signatures and the offset to the first signature in /// the transaction packet, starting at the given `offset`. + #[inline(always)] pub(crate) fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Maximum number of signatures should be represented by a single byte, // thus the MSB should not be set. diff --git a/transaction-view/src/static_account_keys_meta.rs b/transaction-view/src/static_account_keys_meta.rs index 9dc26fd2d354fd..bea6a3e7394442 100644 --- a/transaction-view/src/static_account_keys_meta.rs +++ b/transaction-view/src/static_account_keys_meta.rs @@ -23,6 +23,7 @@ pub struct StaticAccountKeysMeta { } impl StaticAccountKeysMeta { + #[inline(always)] pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Max size must not have the MSB set so that it is size 1. const _: () = assert!(MAX_STATIC_ACCOUNTS_PER_PACKET & 0b1000_0000 == 0); From f46fda34a92f9dbd18cdc141f79f6e9ca0ed1d46 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 15 Aug 2024 08:42:04 -0500 Subject: [PATCH 133/529] AddressTableLookupMeta::try_new fix variable names (#2595) --- transaction-view/src/address_table_lookup_meta.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/transaction-view/src/address_table_lookup_meta.rs b/transaction-view/src/address_table_lookup_meta.rs index 0d5e93b2023635..28ecc67adc51ed 100644 --- a/transaction-view/src/address_table_lookup_meta.rs +++ b/transaction-view/src/address_table_lookup_meta.rs @@ -88,12 +88,12 @@ impl AddressTableLookupMeta { advance_offset_for_type::(bytes, offset)?; // Read the number of write indexes, and then update the offset. - let num_accounts = optimized_read_compressed_u16(bytes, offset)?; - advance_offset_for_array::(bytes, offset, num_accounts)?; + let num_write_accounts = optimized_read_compressed_u16(bytes, offset)?; + advance_offset_for_array::(bytes, offset, num_write_accounts)?; // Read the number of read indexes, and then update the offset. - let data_len = optimized_read_compressed_u16(bytes, offset)?; - advance_offset_for_array::(bytes, offset, data_len)? + let num_read_accounts = optimized_read_compressed_u16(bytes, offset)?; + advance_offset_for_array::(bytes, offset, num_read_accounts)? } Ok(Self { From ecb44d7bd7bcfcbf1f342eb7eaf2728e7b6eefc5 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 15 Aug 2024 22:51:32 +0800 Subject: [PATCH 134/529] fix: ensure vote packets can be retried (#2605) --- .../banking_stage/latest_unprocessed_votes.rs | 114 +++++++++++++----- .../unprocessed_transaction_storage.rs | 86 ++++++++++--- 2 files changed, 152 insertions(+), 48 deletions(-) diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 52b520b8a0322b..6bff6fa34eb06c 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -18,6 +18,7 @@ use { }, solana_vote_program::vote_instruction::VoteInstruction, std::{ + cmp, collections::HashMap, ops::DerefMut, sync::{ @@ -174,12 +175,13 @@ impl LatestUnprocessedVotes { pub(crate) fn insert_batch( &self, votes: impl Iterator, + should_replenish_taken_votes: bool, ) -> VoteBatchInsertionMetrics { let mut num_dropped_gossip = 0; let mut num_dropped_tpu = 0; for vote in votes { - if let Some(vote) = self.update_latest_vote(vote) { + if let Some(vote) = self.update_latest_vote(vote, should_replenish_taken_votes) { match vote.vote_source { VoteSource::Gossip => num_dropped_gossip += 1, VoteSource::Tpu => num_dropped_tpu += 1, @@ -207,26 +209,41 @@ impl LatestUnprocessedVotes { pub fn update_latest_vote( &self, vote: LatestValidatorVotePacket, + should_replenish_taken_votes: bool, ) -> Option { let pubkey = vote.pubkey(); let slot = vote.slot(); let timestamp = vote.timestamp(); + // Allow votes for later slots or the same slot with later timestamp (refreshed votes) + // We directly compare as options to prioritize votes for same slot with timestamp as + // Some > None + let allow_update = |latest_vote: &LatestValidatorVotePacket| -> bool { + match slot.cmp(&latest_vote.slot()) { + cmp::Ordering::Less => return false, + cmp::Ordering::Greater => return true, + cmp::Ordering::Equal => {} + }; + + // Slots are equal, now check timestamp + match timestamp.cmp(&latest_vote.timestamp()) { + cmp::Ordering::Less => return false, + cmp::Ordering::Greater => return true, + cmp::Ordering::Equal => {} + }; + + // Timestamps are equal, lastly check if vote was taken previously + // and should be replenished + should_replenish_taken_votes && latest_vote.is_vote_taken() + }; + let with_latest_vote = |latest_vote: &RwLock, vote: LatestValidatorVotePacket| -> Option { - let (latest_slot, latest_timestamp) = latest_vote - .read() - .map(|vote| (vote.slot(), vote.timestamp())) - .unwrap(); - // Allow votes for later slots or the same slot with later timestamp (refreshed votes) - // We directly compare as options to prioritize votes for same slot with timestamp as - // Some > None - if slot > latest_slot || ((slot == latest_slot) && (timestamp > latest_timestamp)) { + let should_try_update = allow_update(&latest_vote.read().unwrap()); + if should_try_update { let mut latest_vote = latest_vote.write().unwrap(); - let latest_slot = latest_vote.slot(); - let latest_timestamp = latest_vote.timestamp(); - if slot > latest_slot || ((slot == latest_slot) && (timestamp > latest_timestamp)) { + if allow_update(&latest_vote) { let old_vote = std::mem::replace(latest_vote.deref_mut(), vote); if old_vote.is_vote_taken() { self.num_unprocessed_votes.fetch_add(1, Ordering::Relaxed); @@ -573,10 +590,10 @@ mod tests { ); assert!(latest_unprocessed_votes - .update_latest_vote(vote_a) + .update_latest_vote(vote_a, false /* should replenish */) .is_none()); assert!(latest_unprocessed_votes - .update_latest_vote(vote_b) + .update_latest_vote(vote_b, false /* should replenish */) .is_none()); assert_eq!(2, latest_unprocessed_votes.len()); @@ -606,7 +623,7 @@ mod tests { assert_eq!( 1, latest_unprocessed_votes - .update_latest_vote(vote_a) + .update_latest_vote(vote_a, false /* should replenish */) .unwrap() .slot ); @@ -614,7 +631,7 @@ mod tests { assert_eq!( 6, latest_unprocessed_votes - .update_latest_vote(vote_b) + .update_latest_vote(vote_b, false /* should replenish */) .unwrap() .slot ); @@ -634,8 +651,8 @@ mod tests { &keypair_b, None, ); - latest_unprocessed_votes.update_latest_vote(vote_a); - latest_unprocessed_votes.update_latest_vote(vote_b); + latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); + latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); assert_eq!(2, latest_unprocessed_votes.len()); assert_eq!( @@ -664,8 +681,8 @@ mod tests { &keypair_b, Some(2), ); - latest_unprocessed_votes.update_latest_vote(vote_a); - latest_unprocessed_votes.update_latest_vote(vote_b); + latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); + latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); assert_eq!(2, latest_unprocessed_votes.len()); assert_eq!( @@ -690,8 +707,8 @@ mod tests { &keypair_b, Some(6), ); - latest_unprocessed_votes.update_latest_vote(vote_a); - latest_unprocessed_votes.update_latest_vote(vote_b); + latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); + latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); assert_eq!(2, latest_unprocessed_votes.len()); assert_eq!( @@ -716,8 +733,10 @@ mod tests { &keypair_b, Some(3), ); - latest_unprocessed_votes.update_latest_vote(vote_a); - latest_unprocessed_votes.update_latest_vote(vote_b); + latest_unprocessed_votes + .update_latest_vote(vote_a.clone(), false /* should replenish */); + latest_unprocessed_votes + .update_latest_vote(vote_b.clone(), false /* should replenish */); assert_eq!(2, latest_unprocessed_votes.len()); assert_eq!( @@ -728,6 +747,33 @@ mod tests { Some(6), latest_unprocessed_votes.get_latest_timestamp(keypair_b.node_keypair.pubkey()) ); + + // Drain all latest votes + for packet in latest_unprocessed_votes + .latest_votes_per_pubkey + .read() + .unwrap() + .values() + { + packet.write().unwrap().take_vote().inspect(|_vote| { + latest_unprocessed_votes + .num_unprocessed_votes + .fetch_sub(1, Ordering::Relaxed); + }); + } + assert_eq!(0, latest_unprocessed_votes.len()); + + // Same votes with same timestamps should not replenish without flag + latest_unprocessed_votes + .update_latest_vote(vote_a.clone(), false /* should replenish */); + latest_unprocessed_votes + .update_latest_vote(vote_b.clone(), false /* should replenish */); + assert_eq!(0, latest_unprocessed_votes.len()); + + // Same votes with same timestamps should replenish with the flag + latest_unprocessed_votes.update_latest_vote(vote_a, true /* should replenish */); + latest_unprocessed_votes.update_latest_vote(vote_b, true /* should replenish */); + assert_eq!(0, latest_unprocessed_votes.len()); } #[test] @@ -748,7 +794,7 @@ mod tests { keypairs: &Arc>, i: usize| { let vote = from_slots(vec![(i as u64, 1)], VoteSource::Gossip, &keypairs[i], None); - latest_unprocessed_votes.update_latest_vote(vote); + latest_unprocessed_votes.update_latest_vote(vote, false /* should replenish */); }; let hdl = Builder::new() @@ -793,7 +839,8 @@ mod tests { &keypairs[rng.gen_range(0..10)], None, ); - latest_unprocessed_votes.update_latest_vote(vote); + latest_unprocessed_votes + .update_latest_vote(vote, false /* should replenish */); } }) .unwrap(); @@ -808,7 +855,8 @@ mod tests { &keypairs_tpu[rng.gen_range(0..10)], None, ); - latest_unprocessed_votes_tpu.update_latest_vote(vote); + latest_unprocessed_votes_tpu + .update_latest_vote(vote, false /* should replenish */); if i % 214 == 0 { // Simulate draining and processing packets let latest_votes_per_pubkey = latest_unprocessed_votes_tpu @@ -844,8 +892,8 @@ mod tests { let vote_a = from_slots(vec![(1, 1)], VoteSource::Gossip, &keypair_a, None); let vote_b = from_slots(vec![(2, 1)], VoteSource::Tpu, &keypair_b, None); - latest_unprocessed_votes.update_latest_vote(vote_a); - latest_unprocessed_votes.update_latest_vote(vote_b); + latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); + latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); // Don't forward 0 stake accounts let forwarded = latest_unprocessed_votes @@ -939,10 +987,10 @@ mod tests { let vote_c = from_slots(vec![(3, 1)], VoteSource::Tpu, &keypair_c, None); let vote_d = from_slots(vec![(4, 1)], VoteSource::Gossip, &keypair_d, None); - latest_unprocessed_votes.update_latest_vote(vote_a); - latest_unprocessed_votes.update_latest_vote(vote_b); - latest_unprocessed_votes.update_latest_vote(vote_c); - latest_unprocessed_votes.update_latest_vote(vote_d); + latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); + latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); + latest_unprocessed_votes.update_latest_vote(vote_c, false /* should replenish */); + latest_unprocessed_votes.update_latest_vote(vote_d, false /* should replenish */); assert_eq!(4, latest_unprocessed_votes.len()); latest_unprocessed_votes.clear_forwarded_packets(); diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 428e871bcac6cf..212b7f4f2f48d8 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -443,18 +443,18 @@ impl VoteStorage { &mut self, deserialized_packets: Vec, ) -> VoteBatchInsertionMetrics { - self.latest_unprocessed_votes - .insert_batch( - deserialized_packets - .into_iter() - .filter_map(|deserialized_packet| { - LatestValidatorVotePacket::new_from_immutable( - Arc::new(deserialized_packet), - self.vote_source, - ) - .ok() - }), - ) + self.latest_unprocessed_votes.insert_batch( + deserialized_packets + .into_iter() + .filter_map(|deserialized_packet| { + LatestValidatorVotePacket::new_from_immutable( + Arc::new(deserialized_packet), + self.vote_source, + ) + .ok() + }), + false, // should_replenish_taken_votes + ) } fn filter_forwardable_packets_and_add_batches( @@ -525,12 +525,15 @@ impl VoteStorage { ) .ok() }), + true, // should_replenish_taken_votes ); } else { - self.latest_unprocessed_votes - .insert_batch(vote_packets.into_iter().filter_map(|packet| { + self.latest_unprocessed_votes.insert_batch( + vote_packets.into_iter().filter_map(|packet| { LatestValidatorVotePacket::new_from_immutable(packet, self.vote_source).ok() - })); + }), + true, // should_replenish_taken_votes + ); } } @@ -988,6 +991,7 @@ mod tests { super::*, solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}, solana_perf::packet::{Packet, PacketFlags}, + solana_runtime::genesis_utils, solana_sdk::{ hash::Hash, signature::{Keypair, Signer}, @@ -1256,6 +1260,58 @@ mod tests { Ok(()) } + #[test] + fn test_process_packets_retryable_indexes_reinserted() -> Result<(), Box> { + let node_keypair = Keypair::new(); + let genesis_config = + genesis_utils::create_genesis_config_with_leader(100, &node_keypair.pubkey(), 200) + .genesis_config; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let vote_keypair = Keypair::new(); + let mut vote = Packet::from_data( + None, + new_tower_sync_transaction( + TowerSync::default(), + Hash::new_unique(), + &node_keypair, + &vote_keypair, + &vote_keypair, + None, + ), + )?; + vote.meta_mut().flags.set(PacketFlags::SIMPLE_VOTE_TX, true); + + let mut transaction_storage = UnprocessedTransactionStorage::new_vote_storage( + Arc::new(LatestUnprocessedVotes::new()), + VoteSource::Tpu, + ); + + transaction_storage.insert_batch(vec![ImmutableDeserializedPacket::new(vote.clone())?]); + assert_eq!(1, transaction_storage.len()); + + // When processing packets, return all packets as retryable so that they + // are reinserted into storage + let _ = transaction_storage.process_packets( + bank.clone(), + &BankingStageStats::default(), + &mut LeaderSlotMetricsTracker::new(0), + |packets, _payload| { + // Return all packets indexes as retryable + Some( + packets + .iter() + .enumerate() + .map(|(index, _packet)| index) + .collect_vec(), + ) + }, + ); + + // All packets should remain in the transaction storage + assert_eq!(1, transaction_storage.len()); + Ok(()) + } + #[test] fn test_prepare_packets_to_forward() { solana_logger::setup(); From 6476d5fac0c30d1f49d13eae118b89be78fb15d2 Mon Sep 17 00:00:00 2001 From: Jon C Date: Thu, 15 Aug 2024 12:34:44 -0400 Subject: [PATCH 135/529] genesis: Make "cluster-type" aware of features (#2587) * genesis: Make "cluster-type" aware of features #### Problem The `--cluster-type` parameter in solana-genesis mentions cluster features, but refers to things like epoch length and hashes per tick. This behavior is confusing because it doesn't include the feature set. #### Summary of changes For "mainnet-beta", "testnet", and "devnet", clone the cluster feature set, and deactivate the appropriate features. * Allow deactivating individual features with --cluster-type --- Cargo.lock | 2 + genesis/Cargo.toml | 2 + genesis/src/main.rs | 111 ++++++++++++++++++++++++++++++++++++++------ 3 files changed, 100 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 12d05d7a2de96b..87962889b38e51 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6461,6 +6461,8 @@ dependencies = [ "solana-entry", "solana-ledger", "solana-logger", + "solana-rpc-client", + "solana-rpc-client-api", "solana-runtime", "solana-sdk", "solana-stake-program", diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index cd8e652a86b7ad..010f34a5bff997 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -23,6 +23,8 @@ solana-cli-config = { workspace = true } solana-entry = { workspace = true } solana-ledger = { workspace = true } solana-logger = { workspace = true } +solana-rpc-client = { workspace = true } +solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-stake-program = { workspace = true } diff --git a/genesis/src/main.rs b/genesis/src/main.rs index dc9f2ba7031531..46b0ceadd77614 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -11,17 +11,23 @@ use { cluster_type_of, pubkey_of, pubkeys_of, unix_timestamp_from_rfc3339_datetime, }, input_validators::{ - is_pubkey, is_pubkey_or_keypair, is_rfc3339_datetime, is_slot, is_valid_percentage, + is_pubkey, is_pubkey_or_keypair, is_rfc3339_datetime, is_slot, is_url_or_moniker, + is_valid_percentage, normalize_to_url_if_moniker, }, }, solana_entry::poh::compute_hashes_per_tick, solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account}, solana_ledger::{blockstore::create_new_ledger, blockstore_options::LedgerColumnOptions}, + solana_rpc_client::rpc_client::RpcClient, + solana_rpc_client_api::request::MAX_MULTIPLE_ACCOUNTS, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable::UpgradeableLoaderState, clock, + commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, + feature, + feature_set::FEATURE_NAMES, fee_calculator::FeeRateGovernor, genesis_config::{ClusterType, GenesisConfig}, inflation::Inflation, @@ -106,6 +112,70 @@ pub fn load_genesis_accounts(file: &str, genesis_config: &mut GenesisConfig) -> Ok(lamports) } +fn check_rpc_genesis_hash( + cluster_type: &ClusterType, + rpc_client: &RpcClient, +) -> Result<(), Box> { + if let Some(genesis_hash) = cluster_type.get_genesis_hash() { + let rpc_genesis_hash = rpc_client.get_genesis_hash()?; + if rpc_genesis_hash != genesis_hash { + return Err(format!( + "The genesis hash for the specified cluster {cluster_type:?} does not match the \ + genesis hash reported by the specified RPC. Cluster genesis hash: \ + {genesis_hash}, RPC reported genesis hash: {rpc_genesis_hash}" + ) + .into()); + } + } + Ok(()) +} + +fn features_to_deactivate_for_cluster( + cluster_type: &ClusterType, + matches: &ArgMatches<'_>, +) -> Result, Box> { + let mut features_to_deactivate = pubkeys_of(matches, "deactivate_feature").unwrap_or_default(); + if cluster_type == &ClusterType::Development { + return Ok(features_to_deactivate); + } + + // if we're here, the cluster type must be one of "mainnet-beta", "testnet", or "devnet" + assert!(matches!( + cluster_type, + ClusterType::MainnetBeta | ClusterType::Testnet | ClusterType::Devnet + )); + let json_rpc_url = normalize_to_url_if_moniker( + matches + .value_of("json_rpc_url") + .unwrap_or(matches.value_of("cluster_type").unwrap()), + ); + let rpc_client = RpcClient::new_with_commitment(json_rpc_url, CommitmentConfig::confirmed()); + check_rpc_genesis_hash(cluster_type, &rpc_client)?; + for feature_ids in FEATURE_NAMES + .keys() + .cloned() + .collect::>() + .chunks(MAX_MULTIPLE_ACCOUNTS) + { + rpc_client + .get_multiple_accounts(feature_ids) + .map_err(|err| format!("Failed to fetch: {err}"))? + .into_iter() + .zip(feature_ids) + .for_each(|(maybe_account, feature_id)| { + if maybe_account + .as_ref() + .and_then(feature::from_account) + .and_then(|feature| feature.activated_at) + .is_none() + { + features_to_deactivate.push(*feature_id); + } + }); + } + Ok(features_to_deactivate) +} + #[allow(clippy::cognitive_complexity)] fn main() -> Result<(), Box> { let default_faucet_pubkey = solana_cli_config::Config::default().keypair_path; @@ -407,6 +477,20 @@ fn main() -> Result<(), Box> { .possible_values(&["pico", "full", "none"]) .help("Selects inflation"), ) + .arg( + Arg::with_name("json_rpc_url") + .short("u") + .long("url") + .value_name("URL_OR_MONIKER") + .takes_value(true) + .global(true) + .validator(is_url_or_moniker) + .help( + "URL for Solana's JSON RPC or moniker (or their first letter): \ + [mainnet-beta, testnet, devnet, localhost]. Used for cloning \ + feature sets", + ), + ) .get_matches(); let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap()); @@ -480,12 +564,11 @@ fn main() -> Result<(), Box> { let cluster_type = cluster_type_of(&matches, "cluster_type").unwrap(); // Get the features to deactivate if provided - let features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); - - if cluster_type != ClusterType::Development && !features_to_deactivate.is_empty() { - eprintln!("Error: The --deativate-feature argument cannot be used with --cluster-type={cluster_type:?}"); - std::process::exit(1); - } + let features_to_deactivate = features_to_deactivate_for_cluster(&cluster_type, &matches) + .unwrap_or_else(|e| { + eprintln!("{e}"); + std::process::exit(1); + }); match matches.value_of("hashes_per_tick").unwrap() { "auto" => match cluster_type { @@ -594,14 +677,12 @@ fn main() -> Result<(), Box> { } solana_stake_program::add_genesis_accounts(&mut genesis_config); - if genesis_config.cluster_type == ClusterType::Development { - solana_runtime::genesis_utils::activate_all_features(&mut genesis_config); - if !features_to_deactivate.is_empty() { - solana_runtime::genesis_utils::deactivate_features( - &mut genesis_config, - &features_to_deactivate, - ); - } + solana_runtime::genesis_utils::activate_all_features(&mut genesis_config); + if !features_to_deactivate.is_empty() { + solana_runtime::genesis_utils::deactivate_features( + &mut genesis_config, + &features_to_deactivate, + ); } if let Some(files) = matches.values_of("primordial_accounts_file") { From 993d223410608b0dceb3d65340f4f9e62569451b Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 15 Aug 2024 14:25:25 -0400 Subject: [PATCH 136/529] vote: deprecate legacy vote instructions (#587) * vote: deprecate legacy vote instructions * fix tests --- .../banking_stage/latest_unprocessed_votes.rs | 44 +------ core/src/banking_stage/qos_service.rs | 12 +- .../unprocessed_packet_batches.rs | 7 +- core/src/cluster_info_vote_listener.rs | 45 ++++--- core/src/commitment_service.rs | 27 +++-- core/src/replay_stage.rs | 14 +-- core/src/vote_simulator.rs | 39 +++++- cost-model/src/cost_tracker.rs | 7 +- cost-model/src/transaction_cost.rs | 9 +- ledger/src/blockstore_processor.rs | 32 +++-- local-cluster/src/cluster_tests.rs | 8 +- local-cluster/tests/local_cluster.rs | 7 +- perf/src/test_tx.rs | 8 +- programs/vote/src/vote_processor.rs | 112 ++++++++++++------ rpc/src/rpc.rs | 18 +-- .../src/bank/partitioned_epoch_rewards/mod.rs | 11 +- runtime/tests/stake.rs | 32 ++++- sdk/program/src/vote/state/mod.rs | 33 ++++++ sdk/src/feature_set.rs | 5 + 19 files changed, 275 insertions(+), 195 deletions(-) diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 6bff6fa34eb06c..29ab17b7b3d11c 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -424,8 +424,7 @@ mod tests { }, solana_sdk::{hash::Hash, signature::Signer, system_transaction::transfer}, solana_vote_program::{ - vote_state::TowerSync, - vote_transaction::{new_tower_sync_transaction, new_vote_transaction}, + vote_state::TowerSync, vote_transaction::new_tower_sync_transaction, }, std::{sync::Arc, thread::Builder}, }; @@ -467,40 +466,8 @@ mod tests { #[test] fn test_deserialize_vote_packets() { let keypairs = ValidatorVoteKeypairs::new_rand(); - let bankhash = Hash::new_unique(); let blockhash = Hash::new_unique(); let switch_proof = Hash::new_unique(); - let mut vote = Packet::from_data( - None, - new_vote_transaction( - vec![0, 1, 2], - bankhash, - blockhash, - &keypairs.node_keypair, - &keypairs.vote_keypair, - &keypairs.vote_keypair, - None, - ), - ) - .unwrap(); - vote.meta_mut().flags.set(PacketFlags::SIMPLE_VOTE_TX, true); - let mut vote_switch = Packet::from_data( - None, - new_vote_transaction( - vec![0, 1, 2], - bankhash, - blockhash, - &keypairs.node_keypair, - &keypairs.vote_keypair, - &keypairs.vote_keypair, - Some(switch_proof), - ), - ) - .unwrap(); - vote_switch - .meta_mut() - .flags - .set(PacketFlags::SIMPLE_VOTE_TX, true); let mut tower_sync = Packet::from_data( None, new_tower_sync_transaction( @@ -543,13 +510,8 @@ mod tests { ), ) .unwrap(); - let packet_batch = PacketBatch::new(vec![ - vote, - vote_switch, - tower_sync, - tower_sync_switch, - random_transaction, - ]); + let packet_batch = + PacketBatch::new(vec![tower_sync, tower_sync_switch, random_transaction]); let deserialized_packets = deserialize_packets( &packet_batch, diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index afa871277cce42..6fe35c46f54e03 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -598,7 +598,7 @@ mod tests { signature::{Keypair, Signer}, system_transaction, }, - solana_vote_program::vote_transaction, + solana_vote_program::{vote_state::TowerSync, vote_transaction}, std::sync::Arc, }; @@ -612,9 +612,8 @@ mod tests { system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default()), ); let vote_tx = SanitizedTransaction::from_transaction_for_tests( - vote_transaction::new_vote_transaction( - vec![42], - Hash::default(), + vote_transaction::new_tower_sync_transaction( + TowerSync::from(vec![(42, 1)]), Hash::default(), &keypair, &keypair, @@ -656,9 +655,8 @@ mod tests { system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default()), ); let vote_tx = SanitizedTransaction::from_transaction_for_tests( - vote_transaction::new_vote_transaction( - vec![42], - Hash::default(), + vote_transaction::new_tower_sync_transaction( + TowerSync::from(vec![(42, 1)]), Hash::default(), &keypair, &keypair, diff --git a/core/src/banking_stage/unprocessed_packet_batches.rs b/core/src/banking_stage/unprocessed_packet_batches.rs index 2bec44dbd0ea5e..f92eeb09c57b54 100644 --- a/core/src/banking_stage/unprocessed_packet_batches.rs +++ b/core/src/banking_stage/unprocessed_packet_batches.rs @@ -315,7 +315,7 @@ mod tests { system_instruction, system_transaction, transaction::{SimpleAddressLoader, Transaction}, }, - solana_vote_program::vote_transaction, + solana_vote_program::{vote_state::TowerSync, vote_transaction}, }; fn simple_deserialized_packet() -> DeserializedPacket { @@ -467,9 +467,8 @@ mod tests { let keypair = Keypair::new(); let transfer_tx = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default()); - let vote_tx = vote_transaction::new_vote_transaction( - vec![42], - Hash::default(), + let vote_tx = vote_transaction::new_tower_sync_transaction( + TowerSync::from(vec![(42, 1)]), Hash::default(), &keypair, &keypair, diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index a4306dcbea2ea4..d8a8670f585e56 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -734,7 +734,7 @@ mod tests { signature::{Keypair, Signature, Signer}, }, solana_vote_program::{ - vote_state::{TowerSync, Vote}, + vote_state::{TowerSync, Vote, MAX_LOCKOUT_HISTORY}, vote_transaction, }, std::{ @@ -749,11 +749,9 @@ mod tests { solana_logger::setup(); let node_keypair = Keypair::new(); let vote_keypair = Keypair::new(); - let slots: Vec<_> = (0..31).collect(); - - let vote_tx = vote_transaction::new_vote_transaction( - slots, - Hash::default(), + let tower_sync = TowerSync::new_from_slot(MAX_LOCKOUT_HISTORY as u64, Hash::default()); + let vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync, Hash::default(), &node_keypair, &vote_keypair, @@ -918,12 +916,12 @@ mod tests { votes_sender: &VerifiedVoteTransactionsSender, replay_votes_sender: &ReplayVoteSender, ) { + let tower_sync = TowerSync::new_from_slots(gossip_vote_slots, Hash::default(), None); validator_voting_keypairs.iter().for_each(|keypairs| { let node_keypair = &keypairs.node_keypair; let vote_keypair = &keypairs.vote_keypair; - let vote_tx = vote_transaction::new_vote_transaction( - gossip_vote_slots.clone(), - Hash::default(), + let vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync.clone(), Hash::default(), node_keypair, vote_keypair, @@ -1121,9 +1119,10 @@ mod tests { let node_keypair = &keypairs.node_keypair; let vote_keypair = &keypairs.vote_keypair; expected_votes.push((vote_keypair.pubkey(), vec![i as Slot + 1])); - vote_transaction::new_vote_transaction( - vec![i as u64 + 1], - bank_hash, + let tower_sync = + TowerSync::new_from_slots(vec![(i as u64 + 1)], bank_hash, None); + vote_transaction::new_tower_sync_transaction( + tower_sync, Hash::default(), node_keypair, vote_keypair, @@ -1218,9 +1217,10 @@ mod tests { for &e in &events { if e == 0 || e == 2 { // Create vote transaction - let vote_tx = vote_transaction::new_vote_transaction( - vec![vote_slot], - vote_bank_hash, + let tower_sync = + TowerSync::new_from_slots(vec![(vote_slot)], vote_bank_hash, None); + let vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync, Hash::default(), node_keypair, vote_keypair, @@ -1315,10 +1315,9 @@ mod tests { // in the tracker let validator0_keypairs = &validator_keypairs[0]; let voted_slot = bank.slot() + 1; - let vote_tx = vec![vote_transaction::new_vote_transaction( + let vote_tx = vec![vote_transaction::new_tower_sync_transaction( // Must vote > root to be processed - vec![voted_slot], - Hash::default(), + TowerSync::from(vec![(voted_slot, 1)]), Hash::default(), &validator0_keypairs.node_keypair, &validator0_keypairs.vote_keypair, @@ -1362,10 +1361,9 @@ mod tests { let vote_txs: Vec<_> = [first_slot_in_new_epoch - 1, first_slot_in_new_epoch] .iter() .map(|slot| { - vote_transaction::new_vote_transaction( + vote_transaction::new_tower_sync_transaction( // Must vote > root to be processed - vec![*slot], - Hash::default(), + TowerSync::from(vec![(*slot, 1)]), Hash::default(), &validator0_keypairs.node_keypair, &validator0_keypairs.vote_keypair, @@ -1462,9 +1460,8 @@ mod tests { let validator_vote_keypair = validator_vote_keypairs.unwrap_or(&other); // TODO authorized_voter_keypair should be different from vote-keypair // but that is what create_genesis_... currently generates. - vote_transaction::new_vote_transaction( - vec![0], - Hash::default(), + vote_transaction::new_tower_sync_transaction( + TowerSync::from(vec![(0, 1)]), Hash::default(), &validator_vote_keypair.node_keypair, &validator_vote_keypair.vote_keypair, diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index cae40c587cb572..03c04ad9ab4c96 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -277,7 +277,10 @@ mod tests { solana_sdk::{account::Account, pubkey::Pubkey, signature::Signer}, solana_stake_program::stake_state, solana_vote_program::{ - vote_state::{self, process_slot_vote_unchecked, VoteStateVersions}, + vote_state::{ + self, process_slot_vote_unchecked, TowerSync, VoteStateVersions, + MAX_LOCKOUT_HISTORY, + }, vote_transaction, }, }; @@ -568,9 +571,9 @@ mod tests { &Pubkey::default(), x + 1, ); - let vote = vote_transaction::new_vote_transaction( - vec![x], - previous_bank.hash(), + let tower_sync = TowerSync::new_from_slot(x, previous_bank.hash()); + let vote = vote_transaction::new_tower_sync_transaction( + tower_sync, previous_bank.last_blockhash(), &validator_vote_keypairs.node_keypair, &validator_vote_keypairs.vote_keypair, @@ -601,9 +604,9 @@ mod tests { &Pubkey::default(), 34, ); - let vote33 = vote_transaction::new_vote_transaction( - vec![33], - bank33.hash(), + let tower_sync = TowerSync::new_from_slot(33, bank33.hash()); + let vote33 = vote_transaction::new_tower_sync_transaction( + tower_sync, bank33.last_blockhash(), &validator_vote_keypairs.node_keypair, &validator_vote_keypairs.vote_keypair, @@ -683,9 +686,13 @@ mod tests { &Pubkey::default(), x + 1, ); - let vote = vote_transaction::new_vote_transaction( - vec![x], - previous_bank.hash(), + // Skip 34 as it is not part of this fork. + let lowest_slot = x - MAX_LOCKOUT_HISTORY as u64; + let slots: Vec<_> = (lowest_slot..(x + 1)).filter(|s| *s != 34).collect(); + let tower_sync = + TowerSync::new_from_slots(slots, previous_bank.hash(), Some(lowest_slot - 1)); + let vote = vote_transaction::new_tower_sync_transaction( + tower_sync, previous_bank.last_blockhash(), &validator_vote_keypairs.node_keypair, &validator_vote_keypairs.vote_keypair, diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index cca977768c0436..7f7d0f61157d9c 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -4501,7 +4501,7 @@ pub(crate) mod tests { solana_streamer::socket::SocketAddrSpace, solana_transaction_status::VersionedTransactionWithStatusMeta, solana_vote_program::{ - vote_state::{self, VoteStateVersions}, + vote_state::{self, TowerSync, VoteStateVersions}, vote_transaction, }, std::{ @@ -5464,9 +5464,9 @@ pub(crate) mod tests { LatestValidatorVotesForFrozenBanks::default(); let bank0 = bank_forks.read().unwrap().get(0).unwrap(); let my_keypairs = keypairs.get(&my_node_pubkey).unwrap(); - let vote_tx = vote_transaction::new_vote_transaction( - vec![0], - bank0.hash(), + let tower_sync = TowerSync::new_from_slots(vec![0], bank0.hash(), None); + let vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync, bank0.last_blockhash(), &my_keypairs.node_keypair, &my_keypairs.vote_keypair, @@ -6408,9 +6408,9 @@ pub(crate) mod tests { // Process a vote for slot 0 in bank 5 let validator0_keypairs = &validator_keypairs.get(&sender).unwrap(); let bank0 = bank_forks.read().unwrap().get(0).unwrap(); - let vote_tx = vote_transaction::new_vote_transaction( - vec![0], - bank0.hash(), + let tower_sync = TowerSync::new_from_slots(vec![0], bank0.hash(), None); + let vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync, bank0.last_blockhash(), &validator0_keypairs.node_keypair, &validator0_keypairs.vote_keypair, diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index 7a09a7111f0b07..f886d2821af4b0 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -26,9 +26,12 @@ use { }, }, solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey, signature::Signer}, - solana_vote_program::vote_transaction, + solana_vote_program::{ + vote_state::{process_vote_unchecked, Lockout, TowerSync}, + vote_transaction, + }, std::{ - collections::{HashMap, HashSet}, + collections::{HashMap, HashSet, VecDeque}, sync::{Arc, RwLock}, }, trees::{tr, Tree, TreeWalk}, @@ -98,10 +101,34 @@ impl VoteSimulator { if vote.contains(&parent) { let keypairs = self.validator_keypairs.get(pubkey).unwrap(); let latest_blockhash = parent_bank.last_blockhash(); - let vote_tx = vote_transaction::new_vote_transaction( - // Must vote > root to be processed - vec![parent], - parent_bank.hash(), + let tower_sync = if let Some(vote_account) = + parent_bank.get_vote_account(&keypairs.vote_keypair.pubkey()) + { + let mut vote_state = vote_account.vote_state().unwrap().clone(); + process_vote_unchecked( + &mut vote_state, + solana_vote_program::vote_state::Vote::new( + vec![parent], + parent_bank.hash(), + ), + ) + .unwrap(); + TowerSync::new( + vote_state.votes.iter().map(|vote| vote.lockout).collect(), + vote_state.root_slot, + parent_bank.hash(), + Hash::default(), + ) + } else { + TowerSync::new( + VecDeque::from([Lockout::new(parent)]), + Some(root), + parent_bank.hash(), + Hash::default(), + ) + }; + let vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync, latest_blockhash, &keypairs.node_keypair, &keypairs.vote_keypair, diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 0c731f946ec3b8..23583068fb13b3 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -396,7 +396,7 @@ mod tests { MessageHash, SanitizedTransaction, SimpleAddressLoader, VersionedTransaction, }, }, - solana_vote_program::vote_transaction, + solana_vote_program::{vote_state::TowerSync, vote_transaction}, std::cmp, }; @@ -438,9 +438,8 @@ mod tests { start_hash: &Hash, ) -> (SanitizedTransaction, TransactionCost) { let keypair = Keypair::new(); - let transaction = vote_transaction::new_vote_transaction( - vec![42], - Hash::default(), + let transaction = vote_transaction::new_tower_sync_transaction( + TowerSync::from(vec![(42, 1)]), *start_hash, mint_keypair, &keypair, diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index 4951e50036ca8b..9db5832a114a42 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -207,7 +207,7 @@ mod tests { signer::keypair::Keypair, transaction::{MessageHash, SanitizedTransaction, VersionedTransaction}, }, - solana_vote_program::vote_transaction, + solana_vote_program::{vote_state::TowerSync, vote_transaction}, }; #[test] @@ -216,9 +216,8 @@ mod tests { let node_keypair = Keypair::new(); let vote_keypair = Keypair::new(); let auth_keypair = Keypair::new(); - let transaction = vote_transaction::new_vote_transaction( - vec![], - Hash::default(), + let transaction = vote_transaction::new_tower_sync_transaction( + TowerSync::default(), Hash::default(), &node_keypair, &vote_keypair, @@ -249,7 +248,7 @@ mod tests { // expected vote tx cost: 2 write locks, 1 sig, 1 vote ix, 8cu of loaded accounts size, let expected_vote_cost = SIMPLE_VOTE_USAGE_COST; // expected non-vote tx cost would include default loaded accounts size cost (16384) additionally - let expected_none_vote_cost = 20535; + let expected_none_vote_cost = 20543; let vote_cost = CostModel::calculate_cost(&vote_transaction, &FeatureSet::all_enabled()); let none_vote_cost = diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index ec099b4ecfedab..d34543db73993c 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -2247,7 +2247,7 @@ pub mod tests { solana_vote::vote_account::VoteAccount, solana_vote_program::{ self, - vote_state::{VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY}, + vote_state::{TowerSync, VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY}, vote_transaction, }, std::{collections::BTreeSet, sync::RwLock}, @@ -4286,13 +4286,13 @@ pub mod tests { .iter() .enumerate() .map(|(i, validator_keypairs)| { + let tower_sync = TowerSync::new_from_slots(vec![0], bank0.hash(), None); if i % 3 == 0 { // These votes are correct expected_successful_voter_pubkeys .insert(validator_keypairs.vote_keypair.pubkey()); - vote_transaction::new_vote_transaction( - vec![0], - bank0.hash(), + vote_transaction::new_tower_sync_transaction( + tower_sync, bank_1_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, @@ -4301,9 +4301,8 @@ pub mod tests { ) } else if i % 3 == 1 { // These have the wrong authorized voter - vote_transaction::new_vote_transaction( - vec![0], - bank0.hash(), + vote_transaction::new_tower_sync_transaction( + tower_sync, bank_1_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, @@ -4312,9 +4311,8 @@ pub mod tests { ) } else { // These have an invalid vote for non-existent bank 2 - vote_transaction::new_vote_transaction( - vec![bank1.slot() + 1], - bank0.hash(), + vote_transaction::new_tower_sync_transaction( + TowerSync::from(vec![(bank1.slot() + 1, 1)]), bank_1_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, @@ -4452,10 +4450,9 @@ pub mod tests { .get(last_main_fork_slot - 1) .unwrap() .last_blockhash(); - let slots: Vec<_> = (expected_root_slot..last_main_fork_slot).collect(); - let vote_tx = vote_transaction::new_vote_transaction( - slots, - last_vote_bank_hash, + let tower_sync = TowerSync::new_from_slot(last_main_fork_slot - 1, last_vote_bank_hash); + let vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync, last_vote_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, @@ -4513,10 +4510,9 @@ pub mod tests { .get(last_main_fork_slot) .unwrap() .last_blockhash(); - let slots: Vec<_> = vec![last_main_fork_slot]; - let vote_tx = vote_transaction::new_vote_transaction( - slots, - last_vote_bank_hash, + let tower_sync = TowerSync::new_from_slot(last_main_fork_slot, last_vote_bank_hash); + let vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync, last_vote_blockhash, &leader_keypair, &validator_keypairs.vote_keypair, diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index b46fd67023d649..65aa539e32c9e6 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -37,7 +37,7 @@ use { solana_streamer::socket::SocketAddrSpace, solana_tpu_client::tpu_client::{TpuClient, TpuClientConfig, TpuSenderError}, solana_vote::vote_transaction::VoteTransaction, - solana_vote_program::vote_transaction, + solana_vote_program::{vote_state::TowerSync, vote_transaction}, std::{ collections::{HashMap, HashSet, VecDeque}, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, @@ -677,9 +677,9 @@ pub fn submit_vote_to_cluster_gossip( gossip_addr: SocketAddr, socket_addr_space: &SocketAddrSpace, ) -> Result<(), GossipError> { - let vote_tx = vote_transaction::new_vote_transaction( - vec![vote_slot], - vote_hash, + let tower_sync = TowerSync::new_from_slots(vec![vote_slot], vote_hash, None); + let vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync, blockhash, node_keypair, vote_keypair, diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 62f7fd32435205..1e62835f91b1a2 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2698,12 +2698,11 @@ fn test_oc_bad_signatures() { // Add all recent vote slots on this fork to allow cluster to pass // vote threshold checks in replay. Note this will instantly force a // root by this validator. - let vote_slots: Vec = vec![vote_slot]; + let tower_sync = TowerSync::new_from_slots(vec![vote_slot], vote_hash, None); let bad_authorized_signer_keypair = Keypair::new(); - let mut vote_tx = vote_transaction::new_vote_transaction( - vote_slots, - vote_hash, + let mut vote_tx = vote_transaction::new_tower_sync_transaction( + tower_sync, leader_vote_tx.message.recent_blockhash, &node_keypair, &vote_keypair, diff --git a/perf/src/test_tx.rs b/perf/src/test_tx.rs index befbc83206b281..118f8de15b4217 100644 --- a/perf/src/test_tx.rs +++ b/perf/src/test_tx.rs @@ -10,7 +10,7 @@ use { system_program, system_transaction, transaction::Transaction, }, - solana_vote_program::vote_transaction, + solana_vote_program::{vote_state::TowerSync, vote_transaction}, }; pub fn test_tx() -> Transaction { @@ -60,9 +60,9 @@ where slots.sort_unstable(); slots.dedup(); let switch_proof_hash = rng.gen_bool(0.5).then(Hash::new_unique); - vote_transaction::new_vote_transaction( - slots, - Hash::new_unique(), // bank_hash + let tower_sync = TowerSync::new_from_slots(slots, Hash::default(), None); + vote_transaction::new_tower_sync_transaction( + tower_sync, Hash::new_unique(), // blockhash &Keypair::new(), // node_keypair &Keypair::new(), // vote_keypair diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index 748a8e9d6915d5..ea6514420cd0d2 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -133,6 +133,15 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| ) } VoteInstruction::Vote(vote) | VoteInstruction::VoteSwitch(vote, _) => { + if invoke_context + .get_feature_set() + .is_active(&feature_set::deprecate_legacy_vote_ixs::id()) + && invoke_context + .get_feature_set() + .is_active(&feature_set::enable_tower_sync_ix::id()) + { + return Err(InstructionError::InvalidInstructionData); + } let slot_hashes = get_sysvar_with_account_check::slot_hashes(invoke_context, instruction_context, 1)?; let clock = @@ -148,6 +157,15 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } VoteInstruction::UpdateVoteState(vote_state_update) | VoteInstruction::UpdateVoteStateSwitch(vote_state_update, _) => { + if invoke_context + .get_feature_set() + .is_active(&feature_set::deprecate_legacy_vote_ixs::id()) + && invoke_context + .get_feature_set() + .is_active(&feature_set::enable_tower_sync_ix::id()) + { + return Err(InstructionError::InvalidInstructionData); + } let sysvar_cache = invoke_context.get_sysvar_cache(); let slot_hashes = sysvar_cache.get_slot_hashes()?; let clock = sysvar_cache.get_clock()?; @@ -162,6 +180,15 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } VoteInstruction::CompactUpdateVoteState(vote_state_update) | VoteInstruction::CompactUpdateVoteStateSwitch(vote_state_update, _) => { + if invoke_context + .get_feature_set() + .is_active(&feature_set::deprecate_legacy_vote_ixs::id()) + && invoke_context + .get_feature_set() + .is_active(&feature_set::enable_tower_sync_ix::id()) + { + return Err(InstructionError::InvalidInstructionData); + } let sysvar_cache = invoke_context.get_sysvar_cache(); let slot_hashes = sysvar_cache.get_slot_hashes()?; let clock = sysvar_cache.get_clock()?; @@ -451,7 +478,7 @@ mod tests { (vote_pubkey, vote_account_with_epoch_credits) } - /// Returns Vec of serialized VoteInstruction and flag indicating if it is a vote state proposal + /// Returns Vec of serialized VoteInstruction and flag indicating if it is a tower sync /// variant, along with the original vote fn create_serialized_votes() -> (Vote, Vec<(Vec, bool)>) { let vote = Vote::new(vec![1], Hash::default()); @@ -464,11 +491,11 @@ mod tests { ( serialize(&VoteInstruction::UpdateVoteState(vote_state_update.clone())) .unwrap(), - true, + false, ), ( serialize(&VoteInstruction::CompactUpdateVoteState(vote_state_update)).unwrap(), - true, + false, ), ( serialize(&VoteInstruction::TowerSync(tower_sync)).unwrap(), @@ -744,20 +771,28 @@ mod tests { }, ]; - for (instruction_data, is_vote_state_update) in instruction_datas { + for (instruction_data, is_tower_sync) in instruction_datas { let mut transaction_accounts = vec![ (vote_pubkey, vote_account.clone()), (sysvar::slot_hashes::id(), slot_hashes_account.clone()), (sysvar::clock::id(), create_default_clock_account()), ]; + let error = |err| { + if !is_tower_sync { + Err(InstructionError::InvalidInstructionData) + } else { + Err(err) + } + }; + // should fail, unsigned instruction_accounts[0].is_signer = false; process_instruction( &instruction_data, transaction_accounts.clone(), instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), + error(InstructionError::MissingRequiredSignature), ); instruction_accounts[0].is_signer = true; @@ -766,18 +801,24 @@ mod tests { &instruction_data, transaction_accounts.clone(), instruction_accounts.clone(), - Ok(()), - ); - let vote_state: VoteState = StateMut::::state(&accounts[0]) - .unwrap() - .convert_to_current(); - assert_eq!( - vote_state.votes, - vec![vote_state::LandedVote::from(Lockout::new( - *vote.slots.last().unwrap() - ))] + if is_tower_sync { + Ok(()) + } else { + Err(InstructionError::InvalidInstructionData) + }, ); - assert_eq!(vote_state.credits(), 0); + if is_tower_sync { + let vote_state: VoteState = StateMut::::state(&accounts[0]) + .unwrap() + .convert_to_current(); + assert_eq!( + vote_state.votes, + vec![vote_state::LandedVote::from(Lockout::new( + *vote.slots.last().unwrap() + ))] + ); + assert_eq!(vote_state.credits(), 0); + } // should fail, wrong hash transaction_accounts[1] = ( @@ -791,7 +832,7 @@ mod tests { &instruction_data, transaction_accounts.clone(), instruction_accounts.clone(), - Err(VoteError::SlotHashMismatch.into()), + error(VoteError::SlotHashMismatch.into()), ); // should fail, wrong slot @@ -803,7 +844,7 @@ mod tests { &instruction_data, transaction_accounts.clone(), instruction_accounts.clone(), - Err(VoteError::SlotsMismatch.into()), + error(VoteError::SlotsMismatch.into()), ); // should fail, empty slot_hashes @@ -815,12 +856,7 @@ mod tests { &instruction_data, transaction_accounts.clone(), instruction_accounts.clone(), - Err((if is_vote_state_update { - VoteError::SlotsMismatch - } else { - VoteError::VoteTooOld - }) - .into()), + error(VoteError::SlotsMismatch.into()), ); transaction_accounts[1] = (sysvar::slot_hashes::id(), slot_hashes_account.clone()); @@ -831,7 +867,7 @@ mod tests { &instruction_data, transaction_accounts.clone(), instruction_accounts.clone(), - Err(InstructionError::UninitializedAccount), + error(InstructionError::UninitializedAccount), ); } } @@ -941,12 +977,16 @@ mod tests { is_writable: false, }); - for (instruction_data, _) in instruction_datas { + for (instruction_data, is_tower_sync) in instruction_datas { process_instruction( &instruction_data, transaction_accounts.clone(), instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), + Err(if is_tower_sync { + InstructionError::MissingRequiredSignature + } else { + InstructionError::InvalidInstructionData + }), ); // should pass, signed by authorized voter @@ -954,7 +994,11 @@ mod tests { &instruction_data, transaction_accounts.clone(), authorized_instruction_accounts.clone(), - Ok(()), + if is_tower_sync { + Ok(()) + } else { + Err(InstructionError::InvalidInstructionData) + }, ); } } @@ -1815,7 +1859,7 @@ mod tests { &Pubkey::new_unique(), Vote::default(), ), - Err(InstructionError::InvalidAccountData), + Err(InstructionError::InvalidInstructionData), ); process_instruction_as_one_arg( &vote_switch( @@ -1824,7 +1868,7 @@ mod tests { Vote::default(), Hash::default(), ), - Err(InstructionError::InvalidAccountData), + Err(InstructionError::InvalidInstructionData), ); process_instruction_as_one_arg( &authorize( @@ -1841,7 +1885,7 @@ mod tests { &Pubkey::default(), VoteStateUpdate::default(), ), - Err(InstructionError::InvalidAccountData), + Err(InstructionError::InvalidInstructionData), ); process_instruction_as_one_arg( @@ -1851,7 +1895,7 @@ mod tests { VoteStateUpdate::default(), Hash::default(), ), - Err(InstructionError::InvalidAccountData), + Err(InstructionError::InvalidInstructionData), ); process_instruction_as_one_arg( &compact_update_vote_state( @@ -1859,7 +1903,7 @@ mod tests { &Pubkey::default(), VoteStateUpdate::default(), ), - Err(InstructionError::InvalidAccountData), + Err(InstructionError::InvalidInstructionData), ); process_instruction_as_one_arg( &compact_update_vote_state_switch( @@ -1868,7 +1912,7 @@ mod tests { VoteStateUpdate::default(), Hash::default(), ), - Err(InstructionError::InvalidAccountData), + Err(InstructionError::InvalidInstructionData), ); process_instruction_as_one_arg( &tower_sync(&Pubkey::default(), &Pubkey::default(), TowerSync::default()), diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index d62a61ec81fe00..db4d7bf9e69b53 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -4367,7 +4367,7 @@ pub mod tests { }, solana_vote_program::{ vote_instruction, - vote_state::{self, Vote, VoteInit, VoteStateVersions, MAX_LOCKOUT_HISTORY}, + vote_state::{self, TowerSync, VoteInit, VoteStateVersions, MAX_LOCKOUT_HISTORY}, }, spl_pod::optional_keys::OptionalNonZeroPubkey, spl_token_2022::{ @@ -7239,23 +7239,15 @@ pub mod tests { // Votes let instructions = [ - vote_instruction::vote( + vote_instruction::tower_sync( &leader_vote_keypair.pubkey(), &leader_vote_keypair.pubkey(), - Vote { - slots: vec![bank.slot()], - hash: bank.hash(), - timestamp: None, - }, + TowerSync::new_from_slot(bank.slot(), bank.hash()), ), - vote_instruction::vote( + vote_instruction::tower_sync( &alice_vote_keypair.pubkey(), &alice_vote_keypair.pubkey(), - Vote { - slots: vec![bank.slot()], - hash: bank.hash(), - timestamp: None, - }, + TowerSync::new_from_slot(bank.slot(), bank.hash()), ), ]; diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index 49622ba183a396..f8ad09e06f66ec 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -282,7 +282,10 @@ mod tests { transaction::Transaction, vote::state::{VoteStateVersions, MAX_LOCKOUT_HISTORY}, }, - solana_vote_program::{vote_state, vote_transaction}, + solana_vote_program::{ + vote_state::{self, TowerSync}, + vote_transaction, + }, }; impl PartitionedStakeReward { @@ -769,9 +772,9 @@ mod tests { // Fill bank_forks with banks with votes landing in the next slot // So that rewards will be paid out at the epoch boundary, i.e. slot = 32 - let vote = vote_transaction::new_vote_transaction( - vec![slot - 1], - previous_bank.hash(), + let tower_sync = TowerSync::new_from_slot(slot - 1, previous_bank.hash()); + let vote = vote_transaction::new_tower_sync_transaction( + tower_sync, previous_bank.last_blockhash(), &validator_vote_keypairs.node_keypair, &validator_vote_keypairs.vote_keypair, diff --git a/runtime/tests/stake.rs b/runtime/tests/stake.rs index 9922b8c9a5d075..edc3bd3a4befb0 100755 --- a/runtime/tests/stake.rs +++ b/runtime/tests/stake.rs @@ -28,7 +28,7 @@ use { solana_stake_program::stake_state, solana_vote_program::{ vote_instruction, - vote_state::{Vote, VoteInit, VoteState, VoteStateVersions}, + vote_state::{TowerSync, VoteInit, VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY}, }, std::sync::{Arc, RwLock}, }; @@ -72,6 +72,7 @@ fn fill_epoch_with_votes( bank_forks: &RwLock, vote_keypair: &Keypair, mint_keypair: &Keypair, + start_slot: Slot, ) -> Arc { let mint_pubkey = mint_keypair.pubkey(); let vote_pubkey = vote_keypair.pubkey(); @@ -83,12 +84,18 @@ fn fill_epoch_with_votes( let bank_client = BankClient::new_shared(bank.clone()); let parent = bank.parent().unwrap(); - + let lowest_slot = u64::max( + (parent.slot() + 1).saturating_sub(MAX_LOCKOUT_HISTORY as u64), + start_slot, + ); + let slots: Vec<_> = (lowest_slot..(parent.slot() + 1)).collect(); + let root = (lowest_slot > start_slot).then(|| lowest_slot - 1); + let tower_sync = TowerSync::new_from_slots(slots, parent.hash(), root); let message = Message::new( - &[vote_instruction::vote( + &[vote_instruction::tower_sync( &vote_pubkey, &vote_pubkey, - Vote::new(vec![parent.slot()], parent.hash()), + tower_sync, )], Some(&mint_pubkey), ); @@ -413,7 +420,14 @@ fn test_stake_account_lifetime() { // Reward redemption // Submit enough votes to generate rewards - bank = fill_epoch_with_votes(bank, bank_forks.as_ref(), &vote_keypair, &mint_keypair); + let start_slot = bank.slot(); + bank = fill_epoch_with_votes( + bank, + bank_forks.as_ref(), + &vote_keypair, + &mint_keypair, + start_slot, + ); // Test that votes and credits are there let account = bank.get_account(&vote_pubkey).expect("account not found"); @@ -426,7 +440,13 @@ fn test_stake_account_lifetime() { // one vote per slot, might be more slots than 32 in the epoch assert!(vote_state.credits() >= 1); - bank = fill_epoch_with_votes(bank, bank_forks.as_ref(), &vote_keypair, &mint_keypair); + bank = fill_epoch_with_votes( + bank, + bank_forks.as_ref(), + &vote_keypair, + &mint_keypair, + start_slot, + ); let pre_staked = get_staked(&bank, &stake_pubkey); let pre_balance = bank.get_balance(&stake_pubkey); diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 630e9a0354befe..8d1efa7468db5f 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -274,6 +274,39 @@ impl TowerSync { } } + /// Creates a tower with consecutive votes for `slot - MAX_LOCKOUT_HISTORY + 1` to `slot` inclusive. + /// If `slot >= MAX_LOCKOUT_HISTORY`, sets the root to `(slot - MAX_LOCKOUT_HISTORY)` + /// Sets the hash to `hash` and leaves `block_id` unset. + pub fn new_from_slot(slot: Slot, hash: Hash) -> Self { + let lowest_slot = slot + .saturating_add(1) + .saturating_sub(MAX_LOCKOUT_HISTORY as u64); + let slots: Vec<_> = (lowest_slot..slot.saturating_add(1)).collect(); + Self::new_from_slots( + slots, + hash, + (lowest_slot > 0).then(|| lowest_slot.saturating_sub(1)), + ) + } + + /// Creates a tower with consecutive confirmation for `slots` + pub fn new_from_slots(slots: Vec, hash: Hash, root: Option) -> Self { + let lockouts: VecDeque = slots + .into_iter() + .rev() + .enumerate() + .map(|(cc, s)| Lockout::new_with_confirmation_count(s, cc.saturating_add(1) as u32)) + .rev() + .collect(); + Self { + lockouts, + hash, + root, + timestamp: None, + block_id: Hash::default(), + } + } + pub fn slots(&self) -> Vec { self.lockouts.iter().map(|lockout| lockout.slot()).collect() } diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 4626240a949de4..7322fdbfde900c 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -845,6 +845,10 @@ pub mod enable_turbine_extended_fanout_experiments { solana_sdk::declare_id!("BZn14Liea52wtBwrXUxTv6vojuTTmfc7XGEDTXrvMD7b"); } +pub mod deprecate_legacy_vote_ixs { + solana_sdk::declare_id!("depVvnQ2UysGrhwdiwU42tCadZL8GcBb1i2GYhMopQv"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -1051,6 +1055,7 @@ lazy_static! { (ed25519_precompile_verify_strict::id(), "Use strict verification in ed25519 precompile SIMD-0152"), (vote_only_retransmitter_signed_fec_sets::id(), "vote only on retransmitter signed fec sets"), (enable_turbine_extended_fanout_experiments::id(), "enable turbine extended fanout experiments #"), + (deprecate_legacy_vote_ixs::id(), "Deprecate legacy vote instructions"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From d4df4f80528c47df12147e2d5ef4c790db4df5c4 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:10:29 -0400 Subject: [PATCH 137/529] Refactor instruction processor to accept SVMInstruction (#2603) * refactor to make process_compute_budget_instructions accept SVMInstruction --- Cargo.lock | 4 ++ core/Cargo.toml | 1 + core/src/banking_stage/consumer.rs | 9 +-- .../immutable_deserialized_packet.rs | 4 +- .../scheduler_controller.rs | 3 +- cost-model/Cargo.toml | 1 + cost-model/src/cost_model.rs | 10 +-- programs/sbf/Cargo.lock | 5 ++ programs/sbf/Cargo.toml | 2 + programs/sbf/tests/programs.rs | 13 ++-- runtime-transaction/Cargo.toml | 1 + .../process_compute_budget_instructions.rs | 11 +-- .../src/compute_budget_instruction_details.rs | 68 ++++++++++++------- .../src/instructions_processor.rs | 13 ++-- .../src/runtime_transaction.rs | 7 +- runtime/Cargo.toml | 1 + runtime/src/bank.rs | 3 +- runtime/src/bank/tests.rs | 3 +- runtime/src/prioritization_fee_cache.rs | 3 +- svm/src/transaction_processor.rs | 12 ++-- 20 files changed, 117 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87962889b38e51..78c08a25ac6cad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6236,6 +6236,7 @@ dependencies = [ "solana-stake-program", "solana-streamer", "solana-svm", + "solana-svm-transaction", "solana-timings", "solana-tpu-client", "solana-transaction-status", @@ -6276,6 +6277,7 @@ dependencies = [ "solana-metrics", "solana-runtime-transaction", "solana-sdk", + "solana-svm-transaction", "solana-system-program", "solana-vote-program", "static_assertions", @@ -7392,6 +7394,7 @@ dependencies = [ "solana-sdk", "solana-stake-program", "solana-svm", + "solana-svm-transaction", "solana-system-program", "solana-timings", "solana-transaction-status", @@ -7426,6 +7429,7 @@ dependencies = [ "solana-compute-budget", "solana-program", "solana-sdk", + "solana-svm-transaction", "thiserror", ] diff --git a/core/Cargo.toml b/core/Cargo.toml index 8d96a2a1b0d1b8..4b318a42c368f2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -75,6 +75,7 @@ solana-send-transaction-service = { workspace = true } solana-short-vec = { workspace = true } solana-streamer = { workspace = true } solana-svm = { workspace = true } +solana-svm-transaction = { workspace = true } solana-timings = { workspace = true } solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 5d1d7c1637c40c..b773296fc2eb5f 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -37,6 +37,7 @@ use { transaction_processing_result::TransactionProcessingResultExtensions, transaction_processor::{ExecutionRecordingConfig, TransactionProcessingConfig}, }, + solana_svm_transaction::svm_message::SVMMessage, solana_timings::ExecuteTimings, std::{ sync::{atomic::Ordering, Arc}, @@ -555,9 +556,9 @@ impl Consumer { .sanitized_transactions() .iter() .filter_map(|transaction| { - process_compute_budget_instructions( - transaction.message().program_instructions_iter(), - ) + process_compute_budget_instructions(SVMMessage::program_instructions_iter( + transaction, + )) .ok() .map(|limits| limits.compute_unit_price) }) @@ -751,7 +752,7 @@ impl Consumer { ) -> Result<(), TransactionError> { let fee_payer = message.fee_payer(); let fee_budget_limits = FeeBudgetLimits::from(process_compute_budget_instructions( - message.program_instructions_iter(), + SVMMessage::program_instructions_iter(message), )?); let fee = solana_fee::calculate_fee( message, diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index f4a3a7e6623376..a83244171e775d 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -15,6 +15,7 @@ use { }, }, solana_short_vec::decode_shortu16_len, + solana_svm_transaction::instruction::SVMInstruction, std::{cmp::Ordering, collections::HashSet, mem::size_of}, thiserror::Error, }; @@ -64,7 +65,8 @@ impl ImmutableDeserializedPacket { } = process_compute_budget_instructions( sanitized_transaction .get_message() - .program_instructions_iter(), + .program_instructions_iter() + .map(|(pubkey, ix)| (pubkey, SVMInstruction::from(ix))), ) .map_err(|_| DeserializedPacketError::PrioritizationFailure)?; diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 5563f84d1c2204..417d5b95bf5bda 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -36,6 +36,7 @@ use { transaction::SanitizedTransaction, }, solana_svm::transaction_error_metrics::TransactionErrorMetrics, + solana_svm_transaction::svm_message::SVMMessage, std::{ sync::{Arc, RwLock}, time::{Duration, Instant}, @@ -534,7 +535,7 @@ impl SchedulerController { .is_ok() }) .filter_map(|(packet, tx)| { - process_compute_budget_instructions(tx.message().program_instructions_iter()) + process_compute_budget_instructions(SVMMessage::program_instructions_iter(&tx)) .map(|compute_budget| (packet, tx, compute_budget.into())) .ok() }) diff --git a/cost-model/Cargo.toml b/cost-model/Cargo.toml index c33ba7a5415963..41eb2c18dfe132 100644 --- a/cost-model/Cargo.toml +++ b/cost-model/Cargo.toml @@ -20,6 +20,7 @@ solana-frozen-abi-macro = { workspace = true, optional = true } solana-metrics = { workspace = true } solana-runtime-transaction = { workspace = true } solana-sdk = { workspace = true } +solana-svm-transaction = { workspace = true } solana-vote-program = { workspace = true } [lib] diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index c57a929d8b9bd1..68162bfcdc5bbd 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -25,6 +25,7 @@ use { system_program, transaction::SanitizedTransaction, }, + solana_svm_transaction::svm_message::SVMMessage, }; pub struct CostModel; @@ -150,7 +151,7 @@ impl CostModel { fn get_transaction_cost( tx_cost: &mut UsageCostDetails, - transaction: &SanitizedTransaction, + transaction: &impl SVMMessage, feature_set: &FeatureSet, ) { let mut programs_execution_costs = 0u64; @@ -159,7 +160,7 @@ impl CostModel { let mut compute_unit_limit_is_set = false; let mut has_user_space_instructions = false; - for (program_id, instruction) in transaction.message().program_instructions_iter() { + for (program_id, instruction) in transaction.program_instructions_iter() { let ix_execution_cost = if let Some(builtin_cost) = BUILTIN_INSTRUCTION_COSTS.get(program_id) { *builtin_cost @@ -177,7 +178,7 @@ impl CostModel { if compute_budget::check_id(program_id) { if let Ok(ComputeBudgetInstruction::SetComputeUnitLimit(_)) = - try_from_slice_unchecked(&instruction.data) + try_from_slice_unchecked(instruction.data) { compute_unit_limit_is_set = true; } @@ -186,8 +187,7 @@ impl CostModel { // if failed to process compute_budget instructions, the transaction will not be executed // by `bank`, therefore it should be considered as no execution cost by cost model. - match process_compute_budget_instructions(transaction.message().program_instructions_iter()) - { + match process_compute_budget_instructions(transaction.program_instructions_iter()) { Ok(compute_budget_limits) => { // if tx contained user-space instructions and a more accurate estimate available correct it, // where "user-space instructions" must be specifically checked by diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 829ec1e3bcbb1f..7d84471a678ec7 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4933,6 +4933,7 @@ dependencies = [ "solana-short-vec", "solana-streamer", "solana-svm", + "solana-svm-transaction", "solana-timings", "solana-tpu-client", "solana-transaction-status", @@ -4964,6 +4965,7 @@ dependencies = [ "solana-metrics", "solana-runtime-transaction", "solana-sdk", + "solana-svm-transaction", "solana-vote-program", ] @@ -5715,6 +5717,7 @@ dependencies = [ "solana-sdk", "solana-stake-program", "solana-svm", + "solana-svm-transaction", "solana-system-program", "solana-timings", "solana-transaction-status", @@ -5742,6 +5745,7 @@ dependencies = [ "log", "solana-compute-budget", "solana-sdk", + "solana-svm-transaction", "thiserror", ] @@ -5781,6 +5785,7 @@ dependencies = [ "solana-sbf-rust-realloc-invoke-dep", "solana-sdk", "solana-svm", + "solana-svm-transaction", "solana-timings", "solana-transaction-status", "solana-type-overrides", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 15d1775ab6559b..ae2437ade9f0b9 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -55,6 +55,7 @@ solana-sbf-rust-realloc-invoke-dep = { path = "rust/realloc_invoke_dep", version solana-sdk = { path = "../../sdk", version = "=2.1.0" } solana-secp256k1-recover = { path = "../../curves/secp256k1-recover", version = "=2.1.0" } solana-svm = { path = "../../svm", version = "=2.1.0" } +solana-svm-transaction = { path = "../../svm-transaction", version = "=2.1.0" } solana-timings = { path = "../../timings", version = "=2.1.0" } solana-transaction-status = { path = "../../transaction-status", version = "=2.1.0" } solana-type-overrides = { path = "../../type-overrides", version = "=2.1.0" } @@ -117,6 +118,7 @@ solana-sbf-rust-realloc-dep = { workspace = true } solana-sbf-rust-realloc-invoke-dep = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-svm = { workspace = true } +solana-svm-transaction = { workspace = true } solana-timings = { workspace = true } solana-transaction-status = { workspace = true } solana-type-overrides = { workspace = true } diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 2290c5e5cc1c19..b36f4d88bda09b 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -66,6 +66,7 @@ use { transaction_execution_result::InnerInstruction, transaction_processor::ExecutionRecordingConfig, }, + solana_svm_transaction::svm_message::SVMMessage, solana_timings::ExecuteTimings, solana_transaction_status::{ map_inner_instructions, ConfirmedTransactionWithStatusMeta, TransactionStatusMeta, @@ -3874,8 +3875,10 @@ fn test_program_fees() { ) .unwrap(); let fee_budget_limits = FeeBudgetLimits::from( - process_compute_budget_instructions(sanitized_message.program_instructions_iter()) - .unwrap_or_default(), + process_compute_budget_instructions(SVMMessage::program_instructions_iter( + &sanitized_message, + )) + .unwrap_or_default(), ); let expected_normal_fee = solana_fee::calculate_fee( &sanitized_message, @@ -3904,8 +3907,10 @@ fn test_program_fees() { ) .unwrap(); let fee_budget_limits = FeeBudgetLimits::from( - process_compute_budget_instructions(sanitized_message.program_instructions_iter()) - .unwrap_or_default(), + process_compute_budget_instructions(SVMMessage::program_instructions_iter( + &sanitized_message, + )) + .unwrap_or_default(), ); let expected_prioritized_fee = solana_fee::calculate_fee( &sanitized_message, diff --git a/runtime-transaction/Cargo.toml b/runtime-transaction/Cargo.toml index 2965564e63d3dc..dd7c33505216f6 100644 --- a/runtime-transaction/Cargo.toml +++ b/runtime-transaction/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } log = { workspace = true } solana-compute-budget = { workspace = true } solana-sdk = { workspace = true } +solana-svm-transaction = { workspace = true } thiserror = { workspace = true } [lib] diff --git a/runtime-transaction/benches/process_compute_budget_instructions.rs b/runtime-transaction/benches/process_compute_budget_instructions.rs index 463a4cda596c3a..76f6b590948875 100644 --- a/runtime-transaction/benches/process_compute_budget_instructions.rs +++ b/runtime-transaction/benches/process_compute_budget_instructions.rs @@ -11,6 +11,7 @@ use { system_instruction::{self}, transaction::{SanitizedTransaction, Transaction}, }, + solana_svm_transaction::svm_message::SVMMessage, }; const NUM_TRANSACTIONS_PER_ITER: usize = 1024; @@ -34,7 +35,7 @@ fn bench_process_compute_budget_instructions_empty(c: &mut Criterion) { bencher.iter(|| { (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { assert!(process_compute_budget_instructions(black_box( - tx.message().program_instructions_iter() + SVMMessage::program_instructions_iter(&tx) )) .is_ok()) }) @@ -62,7 +63,7 @@ fn bench_process_compute_budget_instructions_no_builtins(c: &mut Criterion) { bencher.iter(|| { (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { assert!(process_compute_budget_instructions(black_box( - tx.message().program_instructions_iter() + SVMMessage::program_instructions_iter(&tx) )) .is_ok()) }) @@ -85,7 +86,7 @@ fn bench_process_compute_budget_instructions_compute_budgets(c: &mut Criterion) bencher.iter(|| { (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { assert!(process_compute_budget_instructions(black_box( - tx.message().program_instructions_iter() + SVMMessage::program_instructions_iter(&tx) )) .is_ok()) }) @@ -111,7 +112,7 @@ fn bench_process_compute_budget_instructions_builtins(c: &mut Criterion) { bencher.iter(|| { (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { assert!(process_compute_budget_instructions(black_box( - tx.message().program_instructions_iter() + SVMMessage::program_instructions_iter(&tx) )) .is_ok()) }) @@ -148,7 +149,7 @@ fn bench_process_compute_budget_instructions_mixed(c: &mut Criterion) { bencher.iter(|| { (0..NUM_TRANSACTIONS_PER_ITER).for_each(|_| { assert!(process_compute_budget_instructions(black_box( - tx.message().program_instructions_iter() + SVMMessage::program_instructions_iter(&tx) )) .is_ok()) }) diff --git a/runtime-transaction/src/compute_budget_instruction_details.rs b/runtime-transaction/src/compute_budget_instruction_details.rs index 638d8d8bc3afaa..993c3905f6c101 100644 --- a/runtime-transaction/src/compute_budget_instruction_details.rs +++ b/runtime-transaction/src/compute_budget_instruction_details.rs @@ -3,11 +3,12 @@ use { solana_sdk::{ borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - instruction::{CompiledInstruction, InstructionError}, + instruction::InstructionError, pubkey::Pubkey, saturating_add_assign, transaction::{Result, TransactionError}, }, + solana_svm_transaction::instruction::SVMInstruction, std::num::NonZeroU32, }; @@ -25,14 +26,14 @@ pub(crate) struct ComputeBudgetInstructionDetails { impl ComputeBudgetInstructionDetails { pub fn try_from<'a>( - instructions: impl Iterator, + instructions: impl Iterator)>, ) -> Result { let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); for (i, (program_id, instruction)) in instructions.enumerate() { compute_budget_instruction_details.process_instruction( i as u8, program_id, - instruction, + &instruction, )?; } @@ -93,18 +94,18 @@ impl ComputeBudgetInstructionDetails { }) } - fn process_instruction<'a>( + fn process_instruction( &mut self, index: u8, - program_id: &'a Pubkey, - instruction: &'a CompiledInstruction, + program_id: &Pubkey, + instruction: &SVMInstruction, ) -> Result<()> { if compute_budget::check_id(program_id) { let invalid_instruction_data_error = TransactionError::InstructionError(index, InstructionError::InvalidInstructionData); let duplicate_instruction_error = TransactionError::DuplicateInstruction(index); - match try_from_slice_unchecked(&instruction.data) { + match try_from_slice_unchecked(instruction.data) { Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { if self.requested_heap_size.is_some() { return Err(duplicate_instruction_error); @@ -145,7 +146,10 @@ impl ComputeBudgetInstructionDetails { #[cfg(test)] mod test { - use {super::*, solana_sdk::instruction::Instruction}; + use { + super::*, + solana_sdk::instruction::{CompiledInstruction, Instruction}, + }; fn setup_test_instruction( index: u8, @@ -174,7 +178,7 @@ mod test { Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), ); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); expected_details.num_non_compute_budget_instructions = 1; assert_eq!(compute_budget_instruction_details, expected_details); @@ -187,7 +191,7 @@ mod test { ); expected_details.requested_heap_size = Some((index, 40 * 1024)); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); assert_eq!(compute_budget_instruction_details, expected_details); @@ -199,7 +203,11 @@ mod test { ComputeBudgetInstruction::request_heap_frame(50 * 1024), ); assert_eq!( - compute_budget_instruction_details.process_instruction(index, &program_id, &ix), + compute_budget_instruction_details.process_instruction( + index, + &program_id, + &SVMInstruction::from(&ix) + ), expected_err ); assert_eq!(compute_budget_instruction_details, expected_details); @@ -211,7 +219,7 @@ mod test { Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), ); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); expected_details.num_non_compute_budget_instructions += 1; assert_eq!(compute_budget_instruction_details, expected_details); @@ -229,7 +237,7 @@ mod test { Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), ); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); expected_details.num_non_compute_budget_instructions = 1; assert_eq!(compute_budget_instruction_details, expected_details); @@ -242,7 +250,7 @@ mod test { ); expected_details.requested_compute_unit_limit = Some((index, u32::MAX)); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); assert_eq!(compute_budget_instruction_details, expected_details); @@ -254,7 +262,11 @@ mod test { ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), ); assert_eq!( - compute_budget_instruction_details.process_instruction(index, &program_id, &ix), + compute_budget_instruction_details.process_instruction( + index, + &program_id, + &SVMInstruction::from(&ix) + ), expected_err ); assert_eq!(compute_budget_instruction_details, expected_details); @@ -266,7 +278,7 @@ mod test { Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), ); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); expected_details.num_non_compute_budget_instructions += 1; assert_eq!(compute_budget_instruction_details, expected_details); @@ -284,7 +296,7 @@ mod test { Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), ); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); expected_details.num_non_compute_budget_instructions = 1; assert_eq!(compute_budget_instruction_details, expected_details); @@ -297,7 +309,7 @@ mod test { ); expected_details.requested_compute_unit_price = Some((index, u64::MAX)); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); assert_eq!(compute_budget_instruction_details, expected_details); @@ -307,7 +319,11 @@ mod test { let (program_id, ix) = setup_test_instruction(index, ComputeBudgetInstruction::set_compute_unit_price(0)); assert_eq!( - compute_budget_instruction_details.process_instruction(index, &program_id, &ix), + compute_budget_instruction_details.process_instruction( + index, + &program_id, + &SVMInstruction::from(&ix) + ), expected_err ); assert_eq!(compute_budget_instruction_details, expected_details); @@ -319,7 +335,7 @@ mod test { Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), ); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); expected_details.num_non_compute_budget_instructions += 1; assert_eq!(compute_budget_instruction_details, expected_details); @@ -337,7 +353,7 @@ mod test { Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), ); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); expected_details.num_non_compute_budget_instructions = 1; assert_eq!(compute_budget_instruction_details, expected_details); @@ -350,7 +366,7 @@ mod test { ); expected_details.requested_loaded_accounts_data_size_limit = Some((index, u32::MAX)); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); assert_eq!(compute_budget_instruction_details, expected_details); @@ -362,7 +378,11 @@ mod test { ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(0), ); assert_eq!( - compute_budget_instruction_details.process_instruction(index, &program_id, &ix), + compute_budget_instruction_details.process_instruction( + index, + &program_id, + &SVMInstruction::from(&ix) + ), expected_err ); assert_eq!(compute_budget_instruction_details, expected_details); @@ -374,7 +394,7 @@ mod test { Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), ); assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &ix) + .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) .is_ok()); expected_details.num_non_compute_budget_instructions += 1; assert_eq!(compute_budget_instruction_details, expected_details); diff --git a/runtime-transaction/src/instructions_processor.rs b/runtime-transaction/src/instructions_processor.rs index d220f1a2f36e88..1edba220096276 100644 --- a/runtime-transaction/src/instructions_processor.rs +++ b/runtime-transaction/src/instructions_processor.rs @@ -1,7 +1,8 @@ use { crate::compute_budget_instruction_details::*, solana_compute_budget::compute_budget_limits::*, - solana_sdk::{instruction::CompiledInstruction, pubkey::Pubkey, transaction::TransactionError}, + solana_sdk::{pubkey::Pubkey, transaction::TransactionError}, + solana_svm_transaction::instruction::SVMInstruction, }; /// Processing compute_budget could be part of tx sanitizing, failed to process @@ -10,7 +11,7 @@ use { /// If succeeded, the transaction's specific limits/requests (could be default) /// are retrieved and returned, pub fn process_compute_budget_instructions<'a>( - instructions: impl Iterator, + instructions: impl Iterator)>, ) -> Result { ComputeBudgetInstructionDetails::try_from(instructions)? .sanitize_and_convert_to_compute_budget_limits() @@ -31,6 +32,7 @@ mod tests { system_instruction::{self}, transaction::{SanitizedTransaction, Transaction, TransactionError}, }, + solana_svm_transaction::svm_message::SVMMessage, std::num::NonZeroU32, }; @@ -43,7 +45,7 @@ mod tests { Hash::default(), )); let result = - process_compute_budget_instructions(tx.message().program_instructions_iter()); + process_compute_budget_instructions(SVMMessage::program_instructions_iter(&tx)); assert_eq!($expected_result, result); }; } @@ -350,8 +352,9 @@ mod tests { Hash::default(), )); - let result = - process_compute_budget_instructions(transaction.message().program_instructions_iter()); + let result = process_compute_budget_instructions(SVMMessage::program_instructions_iter( + &transaction, + )); // assert process_instructions will be successful with default, // and the default compute_unit_limit is 2 times default: one for bpf ix, one for diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index 05bb4e6bbb45f7..9c2f75a0868755 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -23,6 +23,7 @@ use { simple_vote_transaction_checker::is_simple_vote_transaction, transaction::{Result, SanitizedVersionedTransaction}, }, + solana_svm_transaction::instruction::SVMInstruction, std::collections::HashSet, }; @@ -87,7 +88,11 @@ impl RuntimeTransaction { compute_unit_price, loaded_accounts_bytes, .. - } = process_compute_budget_instructions(message.program_instructions_iter())?; + } = process_compute_budget_instructions( + message + .program_instructions_iter() + .map(|(program_id, ix)| (program_id, SVMInstruction::from(ix))), + )?; meta.set_compute_unit_limit(compute_unit_limit); meta.set_compute_unit_price(compute_unit_price); meta.set_loaded_accounts_bytes(loaded_accounts_bytes.get()); diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index b5df2c8bcded91..f8f5d7661b5fa4 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -68,6 +68,7 @@ solana-runtime-transaction = { workspace = true } solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-svm = { workspace = true } +solana-svm-transaction = { workspace = true } solana-system-program = { workspace = true } solana-timings = { workspace = true } solana-transaction-status = { workspace = true } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 798f4c84d957cf..d514b172f26df4 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -167,6 +167,7 @@ use { TransactionProcessingConfig, TransactionProcessingEnvironment, }, }, + solana_svm_transaction::svm_message::SVMMessage, solana_timings::{ExecuteTimingType, ExecuteTimings}, solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap}, solana_vote_program::vote_state::VoteState, @@ -3036,7 +3037,7 @@ impl Bank { pub fn get_fee_for_message_with_lamports_per_signature( &self, - message: &SanitizedMessage, + message: &impl SVMMessage, lamports_per_signature: u64, ) -> u64 { let fee_budget_limits = FeeBudgetLimits::from( diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 6d05ab5010d0d0..538e14084675fb 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -105,6 +105,7 @@ use { transaction_commit_result::TransactionCommitResultExtensions, transaction_execution_result::ExecutedTransaction, }, + solana_svm_transaction::svm_message::SVMMessage, solana_timings::ExecuteTimings, solana_vote_program::{ vote_instruction, @@ -9930,7 +9931,7 @@ fn test_call_precomiled_program() { } fn calculate_test_fee( - message: &SanitizedMessage, + message: &impl SVMMessage, lamports_per_signature: u64, fee_structure: &FeeStructure, ) -> u64 { diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index bafbde9f7411aa..e1392e6f66b7f3 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -10,6 +10,7 @@ use { pubkey::Pubkey, transaction::SanitizedTransaction, }, + solana_svm_transaction::svm_message::SVMMessage, std::{ collections::{BTreeMap, HashMap}, sync::{ @@ -204,7 +205,7 @@ impl PrioritizationFeeCache { } let compute_budget_limits = process_compute_budget_instructions( - sanitized_transaction.message().program_instructions_iter(), + SVMMessage::program_instructions_iter(sanitized_transaction), ); let message = sanitized_transaction.message(); diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index bc83f4b0b86ba8..c6ac4079dd33b4 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -49,6 +49,7 @@ use { transaction::{self, SanitizedTransaction, TransactionError}, transaction_context::{ExecutionRecord, TransactionContext}, }, + solana_svm_transaction::svm_message::SVMMessage, solana_timings::{ExecuteTimingType, ExecuteTimings}, solana_type_overrides::sync::{atomic::Ordering, Arc, RwLock, RwLockReadGuard}, solana_vote::vote_account::VoteAccountsHashMap, @@ -398,7 +399,7 @@ impl TransactionBatchProcessor { &self, callbacks: &CB, account_overrides: Option<&AccountOverrides>, - message: &SanitizedMessage, + message: &impl SVMMessage, checked_details: CheckedTransactionDetails, feature_set: &FeatureSet, fee_structure: &FeeStructure, @@ -1812,7 +1813,8 @@ mod tests { &Hash::new_unique(), )); let compute_budget_limits = - process_compute_budget_instructions(message.program_instructions_iter()).unwrap(); + process_compute_budget_instructions(SVMMessage::program_instructions_iter(&message)) + .unwrap(); let fee_payer_address = message.fee_payer(); let current_epoch = 42; let rent_collector = RentCollector { @@ -1896,7 +1898,8 @@ mod tests { &Hash::new_unique(), )); let compute_budget_limits = - process_compute_budget_instructions(message.program_instructions_iter()).unwrap(); + process_compute_budget_instructions(SVMMessage::program_instructions_iter(&message)) + .unwrap(); let fee_payer_address = message.fee_payer(); let mut rent_collector = RentCollector::default(); rent_collector.rent.lamports_per_byte_year = 1_000_000; @@ -2142,7 +2145,8 @@ mod tests { &Hash::new_unique(), )); let compute_budget_limits = - process_compute_budget_instructions(message.program_instructions_iter()).unwrap(); + process_compute_budget_instructions(SVMMessage::program_instructions_iter(&message)) + .unwrap(); let fee_payer_address = message.fee_payer(); let min_balance = Rent::default().minimum_balance(nonce::State::size()); let transaction_fee = lamports_per_signature; From 2e6ca8c1f62db62c1db7f19c9962d4db43d0d550 Mon Sep 17 00:00:00 2001 From: dmakarov Date: Thu, 15 Aug 2024 17:19:30 -0400 Subject: [PATCH 138/529] Count and dump the number of purges_old_accounts in clean_accounts (#2600) --- accounts-db/src/accounts_db.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index feeadee16c623b..ed2f6428c8b257 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3258,6 +3258,7 @@ impl AccountsDb { let _guard = self.active_stats.activate(ActiveStatItem::Clean); let ancient_account_cleans = AtomicU64::default(); + let purges_old_accounts_count = AtomicU64::default(); let mut measure_all = Measure::start("clean_accounts"); let max_clean_root_inclusive = self.max_clean_root(max_clean_root_inclusive); @@ -3287,6 +3288,7 @@ impl AccountsDb { let mut not_found_on_fork = 0; let mut missing = 0; let mut useful = 0; + let mut purges_old_accounts_local = 0; let mut candidates_bin = candidates_bin.write().unwrap(); // Iterate over each HashMap entry to // avoid capturing the HashMap in the @@ -3337,6 +3339,7 @@ impl AccountsDb { if slot_list.len() > 1 { // no need to purge old accounts if there is only 1 slot in the slot list candidate_info.should_purge = true; + purges_old_accounts_local += 1; useless = false; } else { self.clean_accounts_stats @@ -3354,6 +3357,7 @@ impl AccountsDb { // touched in must be unrooted. not_found_on_fork += 1; candidate_info.should_purge = true; + purges_old_accounts_local += 1; useless = false; } } @@ -3373,6 +3377,7 @@ impl AccountsDb { not_found_on_fork_accum.fetch_add(not_found_on_fork, Ordering::Relaxed); missing_accum.fetch_add(missing, Ordering::Relaxed); useful_accum.fetch_add(useful, Ordering::Relaxed); + purges_old_accounts_count.fetch_add(purges_old_accounts_local, Ordering::Relaxed); }); }; if is_startup { @@ -3651,6 +3656,11 @@ impl AccountsDb { ancient_account_cleans.load(Ordering::Relaxed), i64 ), + ( + "purges_old_accounts_count", + purges_old_accounts_count.load(Ordering::Relaxed), + i64 + ), ("next_store_id", self.next_id.load(Ordering::Relaxed), i64), ); } From 5e80a7ca7b4d512c7062d96450fef2ba9b285b29 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Thu, 15 Aug 2024 21:34:23 -0500 Subject: [PATCH 139/529] Update security email to security@anza.xyz (#2598) --- SECURITY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index f778f34c6db19f..b1c898106427d5 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -25,11 +25,11 @@ Expect a response as fast as possible in the advisory, typically within 72 hours -- If you do not receive a response in the advisory, send an email to -security@solana.com with the full URL of the advisory you have created. DO NOT +security@anza.xyz with the full URL of the advisory you have created. DO NOT include attachments or provide detail sufficient for exploitation regarding the security issue in this email. **Only provide such details in the advisory**. -If you do not receive a response from security@solana.com please followup with +If you do not receive a response from security@anza.xyz please followup with the team directly. You can do this in the `#core-technology` channel of the [Solana Tech discord server](https://solana.com/discord), by pinging the `Anza` role in the channel and referencing the fact that you submitted a security problem. From f4e2fa9fe293f97df777503e9fd426ed5ac760d9 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Fri, 16 Aug 2024 10:56:33 +0800 Subject: [PATCH 140/529] fix: send votes to the immediate next leader (#2607) * fix: send votes to the immediate next leader * feedback --- core/src/next_leader.rs | 28 ++++++++++++++++++++++++++++ core/src/voting_service.rs | 28 ++++++++++++++++++++++------ 2 files changed, 50 insertions(+), 6 deletions(-) diff --git a/core/src/next_leader.rs b/core/src/next_leader.rs index 024f31f0f0adb7..7e77ecd869e4a1 100644 --- a/core/src/next_leader.rs +++ b/core/src/next_leader.rs @@ -1,10 +1,38 @@ use { + itertools::Itertools, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_poh::poh_recorder::PohRecorder, solana_sdk::{clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, pubkey::Pubkey}, std::{net::SocketAddr, sync::RwLock}, }; +/// Returns a list of tpu vote sockets for the leaders of the next N fanout +/// slots. Leaders and sockets are deduped. +pub(crate) fn upcoming_leader_tpu_vote_sockets( + cluster_info: &ClusterInfo, + poh_recorder: &RwLock, + fanout_slots: u64, +) -> Vec { + let upcoming_leaders = { + let poh_recorder = poh_recorder.read().unwrap(); + (1..=fanout_slots) + .filter_map(|n_slots| poh_recorder.leader_after_n_slots(n_slots)) + .collect_vec() + }; + + upcoming_leaders + .into_iter() + .dedup() + .filter_map(|leader_pubkey| { + cluster_info + .lookup_contact_info(&leader_pubkey, ContactInfo::tpu_vote)? + .ok() + }) + // dedup again since leaders could potentially share the same tpu vote socket + .dedup() + .collect() +} + pub(crate) fn next_leader_tpu_vote( cluster_info: &ClusterInfo, poh_recorder: &RwLock, diff --git a/core/src/voting_service.rs b/core/src/voting_service.rs index 31ccf5c6885ad5..14443ab9c7947c 100644 --- a/core/src/voting_service.rs +++ b/core/src/voting_service.rs @@ -1,13 +1,16 @@ use { crate::{ consensus::tower_storage::{SavedTowerVersions, TowerStorage}, - next_leader::next_leader_tpu_vote, + next_leader::upcoming_leader_tpu_vote_sockets, }, crossbeam_channel::Receiver, solana_gossip::cluster_info::ClusterInfo, solana_measure::measure::Measure, solana_poh::poh_recorder::PohRecorder, - solana_sdk::{clock::Slot, transaction::Transaction}, + solana_sdk::{ + clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET}, + transaction::Transaction, + }, std::{ sync::{Arc, RwLock}, thread::{self, Builder, JoinHandle}, @@ -78,12 +81,25 @@ impl VotingService { trace!("{measure}"); } - let _ = cluster_info.send_transaction( - vote_op.tx(), - next_leader_tpu_vote(cluster_info, poh_recorder) - .map(|(_pubkey, target_addr)| target_addr), + // Attempt to send our vote transaction to the leaders for the next few slots + const UPCOMING_LEADER_FANOUT_SLOTS: u64 = FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET; + #[cfg(test)] + static_assertions::const_assert_eq!(UPCOMING_LEADER_FANOUT_SLOTS, 2); + let upcoming_leader_sockets = upcoming_leader_tpu_vote_sockets( + cluster_info, + poh_recorder, + UPCOMING_LEADER_FANOUT_SLOTS, ); + if !upcoming_leader_sockets.is_empty() { + for tpu_vote_socket in upcoming_leader_sockets { + let _ = cluster_info.send_transaction(vote_op.tx(), Some(tpu_vote_socket)); + } + } else { + // Send to our own tpu vote socket if we cannot find a leader to send to + let _ = cluster_info.send_transaction(vote_op.tx(), None); + } + match vote_op { VoteOp::PushVote { tx, tower_slots, .. From d8e95ac15fad1fb66429aa797bf5d193e4e422db Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 16 Aug 2024 11:55:08 +0800 Subject: [PATCH 141/529] bump rust to 1.80.1 / 2024-08-08 (#2487) * bump rust to 1.80 * bump nightly version to 2024-07-21 * bump rust stable to 1.80.1, nightly to 2024-08-08 * clippy: macro_metavars_in_unsafe * fix unexpected tag * run anchor downstream test with their master * add no-entrypoint into workspace level lint * use correct llvm path for coverage test --- .github/workflows/downstream-project-anchor.yml | 2 +- Cargo.toml | 9 +++++++++ account-decoder/Cargo.toml | 3 +++ accounts-db/Cargo.toml | 3 +++ bloom/Cargo.toml | 3 +++ builtins-default-costs/Cargo.toml | 3 +++ ci/docker/Dockerfile | 2 +- ci/rust-version.sh | 2 +- compute-budget/Cargo.toml | 3 +++ core/Cargo.toml | 3 +++ cost-model/Cargo.toml | 3 +++ curves/bn254/Cargo.toml | 3 +++ curves/curve25519/Cargo.toml | 3 +++ curves/secp256k1-recover/Cargo.toml | 3 +++ define-syscall/Cargo.toml | 5 +++++ frozen-abi/Cargo.toml | 3 +++ frozen-abi/macro/Cargo.toml | 3 +++ gossip/Cargo.toml | 3 +++ ledger/Cargo.toml | 3 +++ metrics/src/counter.rs | 5 ++++- perf/Cargo.toml | 9 +++++++++ poseidon/Cargo.toml | 3 +++ program-runtime/Cargo.toml | 3 +++ programs/address-lookup-table/Cargo.toml | 3 +++ programs/sbf/Cargo.toml | 4 ++++ programs/sbf/rust/custom_heap/Cargo.toml | 3 +++ programs/sbf/rust/deprecated_loader/Cargo.toml | 3 +++ programs/sbf/rust/membuiltins/Cargo.toml | 3 +++ programs/sbf/rust/panic/Cargo.toml | 3 +++ programs/sbf/rust/ro_modify/Cargo.toml | 3 +++ programs/sbf/rust/sanity/Cargo.toml | 3 +++ programs/stake/Cargo.toml | 3 +++ programs/vote/Cargo.toml | 3 +++ runtime-transaction/Cargo.toml | 3 +++ runtime/Cargo.toml | 3 +++ rust-toolchain.toml | 2 +- scripts/coverage.sh | 9 +++++++++ sdk/Cargo.toml | 3 +++ sdk/msg/Cargo.toml | 3 +++ sdk/program-memory/Cargo.toml | 3 +++ sdk/program/Cargo.toml | 3 +++ short-vec/Cargo.toml | 3 +++ svm/Cargo.toml | 3 +++ version/Cargo.toml | 3 +++ vote/Cargo.toml | 3 +++ zk-sdk/Cargo.toml | 3 +++ zk-token-sdk/Cargo.toml | 3 +++ 47 files changed, 155 insertions(+), 5 deletions(-) diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml index 33ecc632f0b7d5..92ebee20aa217f 100644 --- a/.github/workflows/downstream-project-anchor.yml +++ b/.github/workflows/downstream-project-anchor.yml @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - version: ["v0.29.0", "v0.30.0"] + version: ["master"] steps: - uses: actions/checkout@v4 diff --git a/Cargo.toml b/Cargo.toml index 9852e973507b70..b5e5aca8fb5cc4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -159,6 +159,15 @@ homepage = "https://anza.xyz/" license = "Apache-2.0" edition = "2021" +[workspace.lints.rust.unexpected_cfgs] +level = "warn" +check-cfg = [ + 'cfg(target_os, values("solana"))', + 'cfg(feature, values("frozen-abi", "no-entrypoint"))', + 'cfg(RUSTC_WITH_SPECIALIZATION)', + 'cfg(RUSTC_WITHOUT_SPECIALIZATION)', +] + [workspace.dependencies] Inflector = "0.11.4" agave-transaction-view = { path = "transaction-view", version = "=2.1.0" } diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index 7aee8478b4f126..71674b67ddd23b 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -34,3 +34,6 @@ spl-pod = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 8a6d7e4d32160a..2eb876219da96a 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -111,3 +111,6 @@ harness = false [[bench]] name = "bench_lock_accounts" harness = false + +[lints] +workspace = true diff --git a/bloom/Cargo.toml b/bloom/Cargo.toml index 5234fb506d90aa..4c9d88893fbacf 100644 --- a/bloom/Cargo.toml +++ b/bloom/Cargo.toml @@ -41,3 +41,6 @@ frozen-abi = [ "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", ] + +[lints] +workspace = true diff --git a/builtins-default-costs/Cargo.toml b/builtins-default-costs/Cargo.toml index ddc5f06a6d68ee..f2173aabed588f 100644 --- a/builtins-default-costs/Cargo.toml +++ b/builtins-default-costs/Cargo.toml @@ -44,3 +44,6 @@ frozen-abi = [ "dep:solana-frozen-abi", "solana-vote-program/frozen-abi", ] + +[lints] +workspace = true diff --git a/ci/docker/Dockerfile b/ci/docker/Dockerfile index fa801cf124314f..622b017146f817 100644 --- a/ci/docker/Dockerfile +++ b/ci/docker/Dockerfile @@ -70,11 +70,11 @@ RUN \ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --no-modify-path --profile minimal --default-toolchain $RUST_VERSION -y && \ rustup component add rustfmt && \ rustup component add clippy && \ - rustup component add llvm-tools-preview && \ rustup install $RUST_NIGHTLY_VERSION && \ rustup component add clippy --toolchain=$RUST_NIGHTLY_VERSION && \ rustup component add rustfmt --toolchain=$RUST_NIGHTLY_VERSION && \ rustup component add miri --toolchain=$RUST_NIGHTLY_VERSION && \ + rustup component add llvm-tools-preview --toolchain=$RUST_NIGHTLY_VERSION && \ rustup target add wasm32-unknown-unknown && \ cargo install cargo-audit && \ cargo install cargo-hack && \ diff --git a/ci/rust-version.sh b/ci/rust-version.sh index c902153c4fdb4f..c55d5ba4462998 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -29,7 +29,7 @@ fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2024-05-02 + nightly_version=2024-08-08 fi diff --git a/compute-budget/Cargo.toml b/compute-budget/Cargo.toml index 55229cf808b4f0..bbcaa27b8d57d5 100644 --- a/compute-budget/Cargo.toml +++ b/compute-budget/Cargo.toml @@ -22,3 +22,6 @@ frozen-abi = [ "dep:solana-frozen-abi", "solana-sdk/frozen-abi", ] + +[lints] +workspace = true diff --git a/core/Cargo.toml b/core/Cargo.toml index 4b318a42c368f2..4d3c59a8ada4f8 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -153,3 +153,6 @@ name = "sigverify_stage" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/cost-model/Cargo.toml b/cost-model/Cargo.toml index 41eb2c18dfe132..b1d7949bf63a05 100644 --- a/cost-model/Cargo.toml +++ b/cost-model/Cargo.toml @@ -54,3 +54,6 @@ frozen-abi = [ [[bench]] name = "cost_tracker" + +[lints] +workspace = true diff --git a/curves/bn254/Cargo.toml b/curves/bn254/Cargo.toml index 7182ff53f95b45..c2cc0fe86764d6 100644 --- a/curves/bn254/Cargo.toml +++ b/curves/bn254/Cargo.toml @@ -25,3 +25,6 @@ array-bytes = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } + +[lints] +workspace = true diff --git a/curves/curve25519/Cargo.toml b/curves/curve25519/Cargo.toml index ce77b5ddca04db..6d8991623ee0bd 100644 --- a/curves/curve25519/Cargo.toml +++ b/curves/curve25519/Cargo.toml @@ -19,3 +19,6 @@ solana-program = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dependencies] curve25519-dalek = { workspace = true, features = ["serde"] } + +[lints] +workspace = true diff --git a/curves/secp256k1-recover/Cargo.toml b/curves/secp256k1-recover/Cargo.toml index 90208106bb5475..6f983ef6d713c4 100644 --- a/curves/secp256k1-recover/Cargo.toml +++ b/curves/secp256k1-recover/Cargo.toml @@ -37,3 +37,6 @@ frozen-abi = ["dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-a [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/define-syscall/Cargo.toml b/define-syscall/Cargo.toml index af8ef66d74450e..7f93af8817f3ff 100644 --- a/define-syscall/Cargo.toml +++ b/define-syscall/Cargo.toml @@ -11,3 +11,8 @@ edition = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(target_feature, values("static-syscalls"))', +] } diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 5c71ec5c2b4596..096662d05956f9 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -36,3 +36,6 @@ rustc_version = { workspace = true } default = ["frozen-abi"] # no reason to deactivate this. It's needed because the build.rs is reused elsewhere frozen-abi = [] + +[lints] +workspace = true diff --git a/frozen-abi/macro/Cargo.toml b/frozen-abi/macro/Cargo.toml index fe3c79d4404cbe..f5cb98c1287e45 100644 --- a/frozen-abi/macro/Cargo.toml +++ b/frozen-abi/macro/Cargo.toml @@ -24,3 +24,6 @@ rustc_version = { workspace = true } default = ["frozen-abi"] # no reason to deactivate this. It's needed because the build.rs is reused elsewhere frozen-abi = [] + +[lints] +workspace = true diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 96f54442e7c98d..db46eb8ea1c974 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -94,3 +94,6 @@ path = "src/main.rs" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index c21f8028bdf95e..255efa9c8358dd 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -112,3 +112,6 @@ name = "blockstore" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/metrics/src/counter.rs b/metrics/src/counter.rs index da7d8f9b1d96b1..0431a3538a108b 100644 --- a/metrics/src/counter.rs +++ b/metrics/src/counter.rs @@ -58,7 +58,10 @@ macro_rules! create_counter { #[macro_export] macro_rules! inc_counter { ($name:expr, $level:expr, $count:expr) => { - unsafe { $name.inc($level, $count) }; + #[allow(clippy::macro_metavars_in_unsafe)] + unsafe { + $name.inc($level, $count) + }; }; } diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 4cf6e49d2f4f72..0e90fd7ccf231a 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -64,3 +64,12 @@ name = "discard" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints.rust.unexpected_cfgs] +level = "warn" +check-cfg = [ + 'cfg(build_target_feature_avx)', + 'cfg(build_target_feature_avx2)', + 'cfg(RUSTC_WITH_SPECIALIZATION)', + 'cfg(RUSTC_WITHOUT_SPECIALIZATION)', +] diff --git a/poseidon/Cargo.toml b/poseidon/Cargo.toml index 09f40b2ac65ecb..021bf76716d03a 100644 --- a/poseidon/Cargo.toml +++ b/poseidon/Cargo.toml @@ -21,3 +21,6 @@ light-poseidon = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index 1b6b914835c07f..413f7b7665ba42 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -59,3 +59,6 @@ frozen-abi = [ "solana-sdk/frozen-abi", ] shuttle-test = ["solana-type-overrides/shuttle-test", "solana_rbpf/shuttle-test"] + +[lints] +workspace = true diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml index 1c61a6bdf466d2..3ff68a686a7521 100644 --- a/programs/address-lookup-table/Cargo.toml +++ b/programs/address-lookup-table/Cargo.toml @@ -32,3 +32,6 @@ name = "solana_address_lookup_table_program" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index ae2437ade9f0b9..a82a5efa92fa4e 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -7,6 +7,10 @@ homepage = "https://anza.xyz" license = "Apache-2.0" edition = "2021" +[workspace.lints.rust.unexpected_cfgs] +level = "warn" +check-cfg = ['cfg(target_os, values("solana"))'] + [workspace.dependencies] array-bytes = "=1.4.1" bincode = { version = "1.1.4", default-features = false } diff --git a/programs/sbf/rust/custom_heap/Cargo.toml b/programs/sbf/rust/custom_heap/Cargo.toml index d4daadfd4ab423..3386d32547b31f 100644 --- a/programs/sbf/rust/custom_heap/Cargo.toml +++ b/programs/sbf/rust/custom_heap/Cargo.toml @@ -17,3 +17,6 @@ custom-heap = [] [lib] crate-type = ["cdylib"] + +[lints] +workspace = true diff --git a/programs/sbf/rust/deprecated_loader/Cargo.toml b/programs/sbf/rust/deprecated_loader/Cargo.toml index dce97434a03c4b..8a139ba3912199 100644 --- a/programs/sbf/rust/deprecated_loader/Cargo.toml +++ b/programs/sbf/rust/deprecated_loader/Cargo.toml @@ -13,3 +13,6 @@ solana-program = { workspace = true } [lib] crate-type = ["cdylib"] + +[lints] +workspace = true diff --git a/programs/sbf/rust/membuiltins/Cargo.toml b/programs/sbf/rust/membuiltins/Cargo.toml index a2ae21f4c9c37d..405feffd4effb9 100644 --- a/programs/sbf/rust/membuiltins/Cargo.toml +++ b/programs/sbf/rust/membuiltins/Cargo.toml @@ -14,3 +14,6 @@ solana-sbf-rust-mem-dep = { workspace = true } [lib] crate-type = ["cdylib"] + +[lints] +workspace = true diff --git a/programs/sbf/rust/panic/Cargo.toml b/programs/sbf/rust/panic/Cargo.toml index 706bab0c664107..46f99056ac901e 100644 --- a/programs/sbf/rust/panic/Cargo.toml +++ b/programs/sbf/rust/panic/Cargo.toml @@ -17,3 +17,6 @@ custom-panic = [] [lib] crate-type = ["cdylib"] + +[lints] +workspace = true diff --git a/programs/sbf/rust/ro_modify/Cargo.toml b/programs/sbf/rust/ro_modify/Cargo.toml index d71ae82da01b4f..566734108df255 100644 --- a/programs/sbf/rust/ro_modify/Cargo.toml +++ b/programs/sbf/rust/ro_modify/Cargo.toml @@ -13,3 +13,6 @@ solana-program = { workspace = true } [lib] crate-type = ["cdylib"] + +[lints] +workspace = true diff --git a/programs/sbf/rust/sanity/Cargo.toml b/programs/sbf/rust/sanity/Cargo.toml index f01dd7501e6906..d5c47f9deb46b2 100644 --- a/programs/sbf/rust/sanity/Cargo.toml +++ b/programs/sbf/rust/sanity/Cargo.toml @@ -13,3 +13,6 @@ solana-program = { workspace = true } [lib] crate-type = ["cdylib"] + +[lints] +workspace = true diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index e14fc532d34bca..16f1e746698e71 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -35,3 +35,6 @@ name = "solana_stake_program" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index 58a21e6fd8443f..2a8462cd178ecf 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -48,3 +48,6 @@ frozen-abi = [ "solana-program-runtime/frozen-abi", "solana-sdk/frozen-abi", ] + +[lints] +workspace = true diff --git a/runtime-transaction/Cargo.toml b/runtime-transaction/Cargo.toml index dd7c33505216f6..32e42cefcf2f29 100644 --- a/runtime-transaction/Cargo.toml +++ b/runtime-transaction/Cargo.toml @@ -36,3 +36,6 @@ rustc_version = { workspace = true, optional = true } [[bench]] name = "process_compute_budget_instructions" harness = false + +[lints] +workspace = true diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index f8f5d7661b5fa4..44d291725b7419 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -133,3 +133,6 @@ frozen-abi = [ [[bench]] name = "prioritization_fee_cache" + +[lints] +workspace = true diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 51985806fcac46..a56a283d2abc1b 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.78.0" +channel = "1.80.1" diff --git a/scripts/coverage.sh b/scripts/coverage.sh index 9fc43be5eba03b..6a144316ba50c1 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -28,6 +28,14 @@ fi # shellcheck source=ci/rust-version.sh source "$here/../ci/rust-version.sh" nightly +# Check llvm path +llvm_profdata="$(find "$(rustc +"$rust_nightly" --print sysroot)" -name llvm-profdata)" +if [ -z "$llvm_profdata" ]; then + echo "Error: couldn't find llvm-profdata. Try installing the llvm-tools component with \`rustup component add llvm-tools-preview --toolchain=$rust_nightly\`" + exit 1 +fi +llvm_path="$(dirname "$llvm_profdata")" + # get commit hash. it will be used to name output folder if [ -z "$COMMIT_HASH" ]; then COMMIT_HASH=$(git rev-parse --short=9 HEAD) @@ -66,6 +74,7 @@ grcov_common_args=( --source-dir "$here/.." --binary-path "$here/../target/cov/debug" --llvm + --llvm-path "$llvm_path" --ignore \*.cargo\* --ignore \*build.rs --ignore bench-tps\* diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index d19d16a5a3fc71..faf38677af31c8 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -119,3 +119,6 @@ targets = ["x86_64-unknown-linux-gnu"] [lib] crate-type = ["cdylib", "rlib"] + +[lints] +workspace = true diff --git a/sdk/msg/Cargo.toml b/sdk/msg/Cargo.toml index afa4a94d07c790..1c6addc97e1dda 100644 --- a/sdk/msg/Cargo.toml +++ b/sdk/msg/Cargo.toml @@ -14,3 +14,6 @@ solana-define-syscall = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/sdk/program-memory/Cargo.toml b/sdk/program-memory/Cargo.toml index f47f6adfd792ab..27d848f59a8275 100644 --- a/sdk/program-memory/Cargo.toml +++ b/sdk/program-memory/Cargo.toml @@ -17,3 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] [target.'cfg(target_os = "solana")'.dependencies] solana-define-syscall = { workspace = true } + +[lints] +workspace = true diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index cc220b1711db65..7b96ccf7d1f0e1 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -104,3 +104,6 @@ frozen-abi = [ "dep:solana-frozen-abi-macro", "solana-short-vec/frozen-abi", ] + +[lints] +workspace = true diff --git a/short-vec/Cargo.toml b/short-vec/Cargo.toml index 2b0c7baf5f12ce..2ab13eaad53a34 100644 --- a/short-vec/Cargo.toml +++ b/short-vec/Cargo.toml @@ -27,3 +27,6 @@ frozen-abi = ["dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-a [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/svm/Cargo.toml b/svm/Cargo.toml index e387a21eeedf6c..603f0c8ae8a1d5 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -76,3 +76,6 @@ shuttle-test = [ "solana-bpf-loader-program/shuttle-test", "solana-loader-v4-program/shuttle-test", ] + +[lints] +workspace = true diff --git a/version/Cargo.toml b/version/Cargo.toml index 7c8ae8f6820155..81f37b111b300a 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -36,3 +36,6 @@ targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] rustc_version = { workspace = true, optional = true } + +[lints] +workspace = true diff --git a/vote/Cargo.toml b/vote/Cargo.toml index 89c4808ede0ed7..2eb821eac407a4 100644 --- a/vote/Cargo.toml +++ b/vote/Cargo.toml @@ -41,3 +41,6 @@ frozen-abi = [ "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", ] + +[lints] +workspace = true diff --git a/zk-sdk/Cargo.toml b/zk-sdk/Cargo.toml index a57b994e017d2f..236c7a5e06dbf3 100644 --- a/zk-sdk/Cargo.toml +++ b/zk-sdk/Cargo.toml @@ -39,3 +39,6 @@ zeroize = { workspace = true, features = ["zeroize_derive"] } [lib] crate-type = ["cdylib", "rlib"] + +[lints] +workspace = true diff --git a/zk-token-sdk/Cargo.toml b/zk-token-sdk/Cargo.toml index d466d2ba0af22d..cc4f785550a73b 100644 --- a/zk-token-sdk/Cargo.toml +++ b/zk-token-sdk/Cargo.toml @@ -41,3 +41,6 @@ zeroize = { workspace = true, features = ["zeroize_derive"] } [lib] crate-type = ["cdylib", "rlib"] + +[lints] +workspace = true From 7d3682797511c03cedfa23cb22f63ae58e99fc65 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Fri, 16 Aug 2024 14:50:05 +0900 Subject: [PATCH 142/529] Fix flaky unified scheduler pool tests (#2606) * Fix flaky unified scheduler pool tests * Correct wrong constant name... --- unified-scheduler-pool/src/lib.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index bf1dfe886e2430..1b9c471137ca70 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -1530,7 +1530,7 @@ mod tests { } const SHORTENED_POOL_CLEANER_INTERVAL: Duration = Duration::from_millis(1); - const SHORTENED_MAX_POOLING_DURATION: Duration = Duration::from_millis(10); + const SHORTENED_MAX_POOLING_DURATION: Duration = Duration::from_millis(100); #[test] fn test_scheduler_drop_idle() { @@ -1575,6 +1575,13 @@ mod tests { // See the old (= idle) scheduler gone only after solScCleaner did its job... sleepless_testing::at(&TestCheckPoint::AfterIdleSchedulerCleaned); + + // The following assertion is racy. + // + // We need to make sure new_scheduler isn't treated as idle up to now since being returned + // to the pool after sleep(SHORTENED_MAX_POOLING_DURATION * 10). + // Removing only old_scheduler is the expected behavior. So, make + // SHORTENED_MAX_POOLING_DURATION rather long... assert_eq!(pool_raw.scheduler_inners.lock().unwrap().len(), 1); assert_eq!( pool_raw @@ -2065,7 +2072,14 @@ mod tests { let context = SchedulingContext::new(bank.clone()); let scheduler = pool.do_take_scheduler(context); - for i in 0..10 { + // This test is racy. + // + // That's because the scheduler needs to be aborted quickly as an expected behavior, + // leaving some readily-available work untouched. So, schedule rather large number of tasks + // to make the short-cutting abort code-path win the race easily. + const MAX_TASK_COUNT: usize = 100; + + for i in 0..MAX_TASK_COUNT { let tx = &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, @@ -2080,7 +2094,8 @@ mod tests { sleepless_testing::at(TestCheckPoint::BeforeThreadManagerDrop); drop::>(scheduler); sleepless_testing::at(TestCheckPoint::AfterSchedulerThreadAborted); - assert!(*TASK_COUNT.lock().unwrap() < 10); + // All of handler threads should have been aborted before processing MAX_TASK_COUNT tasks. + assert!(*TASK_COUNT.lock().unwrap() < MAX_TASK_COUNT); } #[test] From c66a8fbeecbce6fa256643362b04ec4147cf0faa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Aug 2024 14:47:08 +0800 Subject: [PATCH 143/529] build(deps): bump nix from 0.28.0 to 0.29.0 (#2608) * build(deps): bump nix from 0.28.0 to 0.29.0 Bumps [nix](https://github.com/nix-rust/nix) from 0.28.0 to 0.29.0. - [Changelog](https://github.com/nix-rust/nix/blob/master/CHANGELOG.md) - [Commits](https://github.com/nix-rust/nix/compare/v0.28.0...v0.29.0) --- updated-dependencies: - dependency-name: nix dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 34 ++++++++-------------------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 14 ++++---------- 3 files changed, 13 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78c08a25ac6cad..cd85ad9bc60a00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,7 +132,7 @@ dependencies = [ "dirs-next", "indicatif", "lazy_static", - "nix 0.28.0", + "nix", "reqwest", "scopeguard", "semver 1.0.23", @@ -1007,7 +1007,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ "borsh-derive 1.5.1", - "cfg_aliases 0.2.1", + "cfg_aliases", ] [[package]] @@ -1295,12 +1295,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "cfg_aliases" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" - [[package]] name = "cfg_aliases" version = "0.2.1" @@ -1744,7 +1738,7 @@ version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" dependencies = [ - "nix 0.29.0", + "nix", "windows-sys 0.59.0", ] @@ -3586,19 +3580,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "nix" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" -dependencies = [ - "bitflags 2.6.0", - "cfg-if 1.0.0", - "cfg_aliases 0.1.1", - "libc", - "memoffset 0.9.1", -] - [[package]] name = "nix" version = "0.29.0" @@ -3607,8 +3588,9 @@ checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.6.0", "cfg-if 1.0.0", - "cfg_aliases 0.2.1", + "cfg_aliases", "libc", + "memoffset 0.9.1", ] [[package]] @@ -6834,7 +6816,7 @@ dependencies = [ "clap 3.2.23", "crossbeam-channel", "log", - "nix 0.28.0", + "nix", "rand 0.8.5", "serde", "serde_derive", @@ -6887,7 +6869,7 @@ dependencies = [ "lazy_static", "libc", "log", - "nix 0.28.0", + "nix", "rand 0.8.5", "rand_chacha 0.3.1", "rayon", @@ -7673,7 +7655,7 @@ dependencies = [ "itertools 0.12.1", "libc", "log", - "nix 0.28.0", + "nix", "pem", "percentage", "quinn", diff --git a/Cargo.toml b/Cargo.toml index b5e5aca8fb5cc4..939b9aff789987 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -289,7 +289,7 @@ merlin = "3" min-max-heap = "1.3.0" mockall = "0.11.4" modular-bitfield = "0.11.2" -nix = "0.28.0" +nix = "0.29.0" num-bigint = "0.4.6" num-derive = "0.4" num-traits = "0.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7d84471a678ec7..512aff5dc21cb6 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -737,7 +737,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ "borsh-derive 1.5.1", - "cfg_aliases 0.2.1", + "cfg_aliases", ] [[package]] @@ -945,12 +945,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "cfg_aliases" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" - [[package]] name = "cfg_aliases" version = "0.2.1" @@ -2957,13 +2951,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.6.0", "cfg-if 1.0.0", - "cfg_aliases 0.1.1", + "cfg_aliases", "libc", "memoffset 0.9.0", ] From a7522394efe9a4d4d64786968376bc1194df7ca5 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 16 Aug 2024 07:38:03 -0500 Subject: [PATCH 144/529] SVM account_saver: Use SVMMessage (#2617) --- svm/src/account_saver.rs | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/svm/src/account_saver.rs b/svm/src/account_saver.rs index 2657e7a7cb9717..f0881050dea4bc 100644 --- a/svm/src/account_saver.rs +++ b/svm/src/account_saver.rs @@ -7,15 +7,16 @@ use { }, solana_sdk::{ account::AccountSharedData, nonce::state::DurableNonce, pubkey::Pubkey, - transaction::SanitizedTransaction, transaction_context::TransactionAccount, + transaction_context::TransactionAccount, }, + solana_svm_transaction::svm_message::SVMMessage, }; // Used to approximate how many accounts will be calculated for storage so that // vectors are allocated with an appropriate capacity. Doesn't account for some // optimization edge cases where some write locked accounts have skip storage. fn max_number_of_accounts_to_collect( - txs: &[SanitizedTransaction], + txs: &[impl SVMMessage], processing_results: &[TransactionProcessingResult], ) -> usize { processing_results @@ -28,22 +29,19 @@ fn max_number_of_accounts_to_collect( }) .map( |(processed_tx, tx)| match processed_tx.execution_details.status { - Ok(_) => tx.message().num_write_locks() as usize, + Ok(_) => tx.num_write_locks() as usize, Err(_) => processed_tx.loaded_transaction.rollback_accounts.count(), }, ) .sum() } -pub fn collect_accounts_to_store<'a>( - txs: &'a [SanitizedTransaction], +pub fn collect_accounts_to_store<'a, T: SVMMessage>( + txs: &'a [T], processing_results: &'a mut [TransactionProcessingResult], durable_nonce: &DurableNonce, lamports_per_signature: u64, -) -> ( - Vec<(&'a Pubkey, &'a AccountSharedData)>, - Vec>, -) { +) -> (Vec<(&'a Pubkey, &'a AccountSharedData)>, Vec>) { let collect_capacity = max_number_of_accounts_to_collect(txs, processing_results); let mut accounts = Vec::with_capacity(collect_capacity); let mut transactions = Vec::with_capacity(collect_capacity); @@ -74,22 +72,21 @@ pub fn collect_accounts_to_store<'a>( (accounts, transactions) } -fn collect_accounts_for_successful_tx<'a>( +fn collect_accounts_for_successful_tx<'a, T: SVMMessage>( collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, - collected_account_transactions: &mut Vec>, - transaction: &'a SanitizedTransaction, + collected_account_transactions: &mut Vec>, + transaction: &'a T, transaction_accounts: &'a [TransactionAccount], ) { - let message = transaction.message(); - for (_, (address, account)) in (0..message.account_keys().len()) + for (_, (address, account)) in (0..transaction.account_keys().len()) .zip(transaction_accounts) .filter(|(i, _)| { - message.is_writable(*i) && { + transaction.is_writable(*i) && { // Accounts that are invoked and also not passed as an instruction // account to a program don't need to be stored because it's assumed // to be impossible for a committable transaction to modify an // invoked account if said account isn't passed to some program. - !message.is_invoked(*i) || message.is_instruction_account(*i) + !transaction.is_invoked(*i) || transaction.is_instruction_account(*i) } }) { @@ -98,16 +95,15 @@ fn collect_accounts_for_successful_tx<'a>( } } -fn collect_accounts_for_failed_tx<'a>( +fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, - collected_account_transactions: &mut Vec>, - transaction: &'a SanitizedTransaction, + collected_account_transactions: &mut Vec>, + transaction: &'a T, rollback_accounts: &'a mut RollbackAccounts, durable_nonce: &DurableNonce, lamports_per_signature: u64, ) { - let message = transaction.message(); - let fee_payer_address = message.fee_payer(); + let fee_payer_address = transaction.fee_payer(); match rollback_accounts { RollbackAccounts::FeePayerOnly { fee_payer_account } => { collected_accounts.push((fee_payer_address, &*fee_payer_account)); @@ -165,7 +161,7 @@ mod tests { rent_debits::RentDebits, signature::{keypair_from_seed, signers::Signers, Keypair, Signer}, system_instruction, system_program, - transaction::{Result, Transaction, TransactionError}, + transaction::{Result, SanitizedTransaction, Transaction, TransactionError}, }, std::collections::HashMap, }; From cc18a71fb822faa081100fad864498fdad0b2b95 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 16 Aug 2024 07:38:15 -0500 Subject: [PATCH 145/529] SVM TransactionProcessor: Use SVMTransaction (#2615) --- svm/src/transaction_account_state_info.rs | 4 +-- svm/src/transaction_processor.rs | 34 +++++++++++------------ 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/svm/src/transaction_account_state_info.rs b/svm/src/transaction_account_state_info.rs index 74d898b81adc10..123b572e758868 100644 --- a/svm/src/transaction_account_state_info.rs +++ b/svm/src/transaction_account_state_info.rs @@ -2,12 +2,12 @@ use { crate::account_rent_state::RentState, solana_sdk::{ account::ReadableAccount, - message::SanitizedMessage, native_loader, rent::Rent, transaction::Result, transaction_context::{IndexOfAccount, TransactionContext}, }, + solana_svm_transaction::svm_message::SVMMessage, }; #[derive(PartialEq, Debug)] @@ -19,7 +19,7 @@ impl TransactionAccountStateInfo { pub(crate) fn new( rent: &Rent, transaction_context: &TransactionContext, - message: &SanitizedMessage, + message: &impl SVMMessage, ) -> Vec { (0..message.account_keys().len()) .map(|i| { diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index c6ac4079dd33b4..8b1a30f1ad913b 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -42,14 +42,13 @@ use { hash::Hash, inner_instruction::{InnerInstruction, InnerInstructionsList}, instruction::{CompiledInstruction, TRANSACTION_LEVEL_STACK_HEIGHT}, - message::SanitizedMessage, pubkey::Pubkey, rent_collector::RentCollector, saturating_add_assign, - transaction::{self, SanitizedTransaction, TransactionError}, + transaction::{self, TransactionError}, transaction_context::{ExecutionRecord, TransactionContext}, }, - solana_svm_transaction::svm_message::SVMMessage, + solana_svm_transaction::{svm_message::SVMMessage, svm_transaction::SVMTransaction}, solana_timings::{ExecuteTimingType, ExecuteTimings}, solana_type_overrides::sync::{atomic::Ordering, Arc, RwLock, RwLockReadGuard}, solana_vote::vote_account::VoteAccountsHashMap, @@ -227,7 +226,7 @@ impl TransactionBatchProcessor { pub fn load_and_execute_sanitized_transactions( &self, callbacks: &CB, - sanitized_txs: &[SanitizedTransaction], + sanitized_txs: &[impl SVMTransaction], check_results: Vec, environment: &TransactionProcessingEnvironment, config: &TransactionProcessingConfig, @@ -360,11 +359,11 @@ impl TransactionBatchProcessor { } } - fn validate_fees( + fn validate_fees( &self, callbacks: &CB, account_overrides: Option<&AccountOverrides>, - sanitized_txs: &[impl core::borrow::Borrow], + sanitized_txs: &[impl core::borrow::Borrow], check_results: Vec, feature_set: &FeatureSet, fee_structure: &FeeStructure, @@ -376,7 +375,7 @@ impl TransactionBatchProcessor { .zip(check_results) .map(|(sanitized_tx, check_result)| { check_result.and_then(|checked_details| { - let message = sanitized_tx.borrow().message(); + let message = sanitized_tx.borrow(); self.validate_transaction_fee_payer( callbacks, account_overrides, @@ -483,15 +482,14 @@ impl TransactionBatchProcessor { /// to their usage counters, for the transactions with a valid blockhash or nonce. fn filter_executable_program_accounts( callbacks: &CB, - txs: &[SanitizedTransaction], + txs: &[impl SVMMessage], validation_results: &[TransactionValidationResult], program_owners: &[Pubkey], ) -> HashMap { let mut result: HashMap = HashMap::new(); validation_results.iter().zip(txs).for_each(|etx| { if let (Ok(_), tx) = etx { - tx.message() - .account_keys() + tx.account_keys() .iter() .for_each(|key| match result.entry(*key) { Entry::Occupied(mut entry) => { @@ -697,7 +695,7 @@ impl TransactionBatchProcessor { #[allow(clippy::too_many_arguments)] fn execute_loaded_transaction( &self, - tx: &SanitizedTransaction, + tx: &impl SVMTransaction, mut loaded_transaction: LoadedTransaction, execute_timings: &mut ExecuteTimings, error_metrics: &mut TransactionErrorMetrics, @@ -709,7 +707,7 @@ impl TransactionBatchProcessor { fn transaction_accounts_lamports_sum( accounts: &[(Pubkey, AccountSharedData)], - message: &SanitizedMessage, + message: &impl SVMMessage, ) -> Option { let mut lamports_sum = 0u128; for i in 0..message.account_keys().len() { @@ -725,7 +723,7 @@ impl TransactionBatchProcessor { .unwrap_or_default(); let lamports_before_tx = - transaction_accounts_lamports_sum(&transaction_accounts, tx.message()).unwrap_or(0); + transaction_accounts_lamports_sum(&transaction_accounts, tx).unwrap_or(0); let compute_budget = config .compute_budget @@ -741,7 +739,7 @@ impl TransactionBatchProcessor { transaction_context.set_signature(tx.signature()); let pre_account_state_info = - TransactionAccountStateInfo::new(&rent, &transaction_context, tx.message()); + TransactionAccountStateInfo::new(&rent, &transaction_context, tx); let log_collector = if config.recording_config.enable_log_recording { match config.log_messages_bytes_limit { @@ -777,7 +775,7 @@ impl TransactionBatchProcessor { let mut process_message_time = Measure::start("process_message_time"); let process_result = MessageProcessor::process_message( - tx.message(), + tx, &loaded_transaction.program_indices, &mut invoke_context, execute_timings, @@ -795,7 +793,7 @@ impl TransactionBatchProcessor { let mut status = process_result .and_then(|info| { let post_account_state_info = - TransactionAccountStateInfo::new(&rent, &transaction_context, tx.message()); + TransactionAccountStateInfo::new(&rent, &transaction_context, tx); TransactionAccountStateInfo::verify_changes( &pre_account_state_info, &post_account_state_info, @@ -842,7 +840,7 @@ impl TransactionBatchProcessor { } = transaction_context.into(); if status.is_ok() - && transaction_accounts_lamports_sum(&accounts, tx.message()) + && transaction_accounts_lamports_sum(&accounts, tx) .filter(|lamports_after_tx| lamports_before_tx == *lamports_after_tx) .is_none() { @@ -993,7 +991,7 @@ mod tests { fee::FeeDetails, fee_calculator::FeeCalculator, hash::Hash, - message::{LegacyMessage, Message, MessageHeader}, + message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, nonce, rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, From 92d932e3e25a31c8df1a03b3cbfe9e106c0e91b4 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Fri, 16 Aug 2024 17:19:43 +0400 Subject: [PATCH 146/529] ignore path dev deps in circular deps check (attempt 2) (#2578) * filter out path dev deps in order-crates-for-publishing.py * remove extra backtick * remove debug prints --- ci/order-crates-for-publishing.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/ci/order-crates-for-publishing.py b/ci/order-crates-for-publishing.py index 855f0e89d17407..6bfda2b29df0d0 100755 --- a/ci/order-crates-for-publishing.py +++ b/ci/order-crates-for-publishing.py @@ -63,13 +63,34 @@ def is_self_dev_dep_with_dev_context_only_utils(package, dependency, wrong_self_ return is_special_cased + +# `cargo publish` is fine with circular dev-dependencies if +# they are path deps. +# However, cargo still fails if deps are path deps with versions +# (this when you use `workspace = true`): https://github.com/rust-lang/cargo/issues/4242 +# Unlike in is_self_dev_dep_with_dev_context_only_utils(), +# we don't have a clean way of checking if someone used a workspace dev +# dep when they probably meant to use a path dev dep, +# so this function just checks if a dev dep is a path dep +# and provides no special warnings. +def is_path_dev_dep(dependency): + no_explicit_version = '*' + return ( + dependency['kind'] == 'dev' + and 'path' in dependency + and dependency['req'] == no_explicit_version + ) + def should_add(package, dependency, wrong_self_dev_dependencies): related_to_solana = dependency['name'].startswith('solana') self_dev_dep_with_dev_context_only_utils = is_self_dev_dep_with_dev_context_only_utils( package, dependency, wrong_self_dev_dependencies ) - - return related_to_solana and not self_dev_dep_with_dev_context_only_utils + return ( + related_to_solana + and not self_dev_dep_with_dev_context_only_utils + and not is_path_dev_dep(dependency) + ) def get_packages(): metadata = load_metadata() From 78377b1c3f250aa37da323a04b3d31e7656e83b0 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 16 Aug 2024 08:26:51 -0500 Subject: [PATCH 147/529] Update SVM docs (#2628) --- svm/doc/spec.md | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/svm/doc/spec.md b/svm/doc/spec.md index c40a928e4c4e4d..a3e0c4b09ac574 100644 --- a/svm/doc/spec.md +++ b/svm/doc/spec.md @@ -139,28 +139,25 @@ pub trait TransactionProcessingCallback { Consumers can customize this plug-in to use their own Solana account source, caching, and more. -### `SanitizedTransaction` +### `SVMTransaction` -A "sanitized" Solana transaction is a transaction that has undergone the +An SVM transaction is a transaction that has undergone the various checks required to evaluate a transaction against the Solana protocol ruleset. Some of these rules include signature verification and validation of account indices (`num_readonly_signers`, etc.). -A `SanitizedTransaction` contains: - -- `SanitizedMessage`: Enum with two kinds of messages - `LegacyMessage` and - `LoadedMessage` - both of which contain: - - `MessageHeader`: Vector of `Pubkey` of accounts used in the transaction. - - `Hash` of recent block. - - Vector of `CompiledInstruction`. - - In addition, `LoadedMessage` contains a vector of - `MessageAddressTableLookup` - list of address table lookups to - load additional accounts for this transaction. -- A Hash of the message -- A boolean flag `is_simple_vote_tx` - shortcut for determining if the - transaction is merely a simple vote transaction produced by a validator. -- A vector of `Signature` - the hash of the transaction message encrypted using +A `SVMTransaction` is a trait that can access: + +- `signatures`: the hash of the transaction message encrypted using the signing key (for each signer in the transaction). +- `static_account_keys`: Slice of `Pubkey` of accounts used in the transaction. +- `account_keys`: Pubkeys of all accounts used in the transaction, including + those from address table lookups. +- `recent_blockhash`: Hash of a recent block. +- `instructions_iter`: An iterator over the transaction's instructions. +- `message_address_table_lookups`: An iterator over the transaction's + address table lookups. These are only used in V0 transactions, for legacy + transactions the iterator is empty. ### `TransactionCheckResult` @@ -245,10 +242,10 @@ Steps of `load_and_execute_sanitized_transactions` - Return the replenished local program cache. 2. Load accounts (call to `load_accounts` function) - - For each `SanitizedTransaction` and `TransactionCheckResult`, we: + - For each `SVMTransaction` and `TransactionCheckResult`, we: - Calculate the number of signatures in transaction and its cost. - Call `load_transaction_accounts` - - The function is interwined with the struct `CompiledInstruction` + - The function is interwined with the struct `SVMInstruction` - Load accounts from accounts DB - Extract data from accounts - Verify if we've reached the maximum account data size From 3b9e7a3ca53e73f066be058edae0ccaffda852f3 Mon Sep 17 00:00:00 2001 From: Jon C Date: Fri, 16 Aug 2024 09:55:22 -0400 Subject: [PATCH 148/529] CI: Disable downstream anchor builds (#2629) #### Problem With https://github.com/solana-labs/solana-program-library/pull/7148, spl-token-2022 has moved from using solana-zk-token-sdk to solana-zk-sdk, which is a major breaking change. Certain agave and anchor crates are depending on the re-export solana-zk-token-sdk in spl-token-2022, which is no longer present. This change is causing the downstream Anchor job to fail, since the patched version of spl-token-2022 is no longer compatible. #### Summary of changes Until new major versions of the SPL crates are available used by the Agave monorepo, disable the downstream anchor job. --- .github/workflows/downstream-project-anchor.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml index 92ebee20aa217f..89a7acd101942c 100644 --- a/.github/workflows/downstream-project-anchor.yml +++ b/.github/workflows/downstream-project-anchor.yml @@ -42,6 +42,7 @@ jobs: strategy: matrix: version: ["master"] + if: false # Re-enable once new major versions for spl-token-2022 and spl-pod are out steps: - uses: actions/checkout@v4 From 1492119857df939f236354d71f7182127d11eca4 Mon Sep 17 00:00:00 2001 From: Andrei Silviu Dragnea Date: Fri, 16 Aug 2024 17:30:12 +0100 Subject: [PATCH 149/529] refactor: use less &mut self and more async-await in BanksClient (#2591) * refactor: use less &mut self in BanksClient * refactor: use async-await syntax instead of explicit Futures --- banks-client/src/lib.rs | 321 ++++++++++++----------- program-test/tests/bpf.rs | 2 +- program-test/tests/builtins.rs | 4 +- program-test/tests/compute_units.rs | 2 +- program-test/tests/core_bpf.rs | 2 +- program-test/tests/cpi.rs | 8 +- program-test/tests/genesis_accounts.rs | 2 +- program-test/tests/lamports.rs | 2 +- program-test/tests/realloc.rs | 2 +- program-test/tests/return_data.rs | 4 +- program-test/tests/spl.rs | 4 +- tpu-client/src/nonblocking/tpu_client.rs | 7 +- 12 files changed, 189 insertions(+), 171 deletions(-) diff --git a/banks-client/src/lib.rs b/banks-client/src/lib.rs index aae81192d3a07b..125aceefa89f89 100644 --- a/banks-client/src/lib.rs +++ b/banks-client/src/lib.rs @@ -11,7 +11,7 @@ pub use { }; use { borsh::BorshDeserialize, - futures::{future::join_all, Future, FutureExt, TryFutureExt}, + futures::future::join_all, solana_banks_interface::{ BanksRequest, BanksResponse, BanksTransactionResultWithMetadata, BanksTransactionResultWithSimulation, @@ -58,175 +58,185 @@ impl BanksClient { TarpcClient::new(config, transport) } - pub fn send_transaction_with_context( - &mut self, + pub async fn send_transaction_with_context( + &self, ctx: Context, transaction: impl Into, - ) -> impl Future> + '_ { + ) -> Result<(), BanksClientError> { self.inner .send_transaction_with_context(ctx, transaction.into()) + .await .map_err(Into::into) } - pub fn get_transaction_status_with_context( - &mut self, + pub async fn get_transaction_status_with_context( + &self, ctx: Context, signature: Signature, - ) -> impl Future, BanksClientError>> + '_ { + ) -> Result, BanksClientError> { self.inner .get_transaction_status_with_context(ctx, signature) + .await .map_err(Into::into) } - pub fn get_slot_with_context( - &mut self, + pub async fn get_slot_with_context( + &self, ctx: Context, commitment: CommitmentLevel, - ) -> impl Future> + '_ { + ) -> Result { self.inner .get_slot_with_context(ctx, commitment) + .await .map_err(Into::into) } - pub fn get_block_height_with_context( - &mut self, + pub async fn get_block_height_with_context( + &self, ctx: Context, commitment: CommitmentLevel, - ) -> impl Future> + '_ { + ) -> Result { self.inner .get_block_height_with_context(ctx, commitment) + .await .map_err(Into::into) } - pub fn process_transaction_with_commitment_and_context( - &mut self, + pub async fn process_transaction_with_commitment_and_context( + &self, ctx: Context, transaction: impl Into, commitment: CommitmentLevel, - ) -> impl Future>, BanksClientError>> + '_ { + ) -> Result>, BanksClientError> { self.inner .process_transaction_with_commitment_and_context(ctx, transaction.into(), commitment) + .await .map_err(Into::into) } - pub fn process_transaction_with_preflight_and_commitment_and_context( - &mut self, + pub async fn process_transaction_with_preflight_and_commitment_and_context( + &self, ctx: Context, transaction: impl Into, commitment: CommitmentLevel, - ) -> impl Future> + '_ - { + ) -> Result { self.inner .process_transaction_with_preflight_and_commitment_and_context( ctx, transaction.into(), commitment, ) + .await .map_err(Into::into) } - pub fn process_transaction_with_metadata_and_context( - &mut self, + pub async fn process_transaction_with_metadata_and_context( + &self, ctx: Context, transaction: impl Into, - ) -> impl Future> + '_ - { + ) -> Result { self.inner .process_transaction_with_metadata_and_context(ctx, transaction.into()) + .await .map_err(Into::into) } - pub fn simulate_transaction_with_commitment_and_context( - &mut self, + pub async fn simulate_transaction_with_commitment_and_context( + &self, ctx: Context, transaction: impl Into, commitment: CommitmentLevel, - ) -> impl Future> + '_ - { + ) -> Result { self.inner .simulate_transaction_with_commitment_and_context(ctx, transaction.into(), commitment) + .await .map_err(Into::into) } - pub fn get_account_with_commitment_and_context( - &mut self, + pub async fn get_account_with_commitment_and_context( + &self, ctx: Context, address: Pubkey, commitment: CommitmentLevel, - ) -> impl Future, BanksClientError>> + '_ { + ) -> Result, BanksClientError> { self.inner .get_account_with_commitment_and_context(ctx, address, commitment) + .await .map_err(Into::into) } /// Send a transaction and return immediately. The server will resend the /// transaction until either it is accepted by the cluster or the transaction's /// blockhash expires. - pub fn send_transaction( - &mut self, + pub async fn send_transaction( + &self, transaction: impl Into, - ) -> impl Future> + '_ { + ) -> Result<(), BanksClientError> { self.send_transaction_with_context(context::current(), transaction.into()) + .await } /// Return the cluster Sysvar - pub fn get_sysvar( - &mut self, - ) -> impl Future> + '_ { - self.get_account(T::id()).map(|result| { - let sysvar = result?.ok_or(BanksClientError::ClientError("Sysvar not present"))?; - from_account::(&sysvar).ok_or(BanksClientError::ClientError( - "Failed to deserialize sysvar", - )) - }) + pub async fn get_sysvar(&self) -> Result { + let sysvar = self + .get_account(T::id()) + .await? + .ok_or(BanksClientError::ClientError("Sysvar not present"))?; + from_account::(&sysvar).ok_or(BanksClientError::ClientError( + "Failed to deserialize sysvar", + )) } /// Return the cluster rent - pub fn get_rent(&mut self) -> impl Future> + '_ { - self.get_sysvar::() + pub async fn get_rent(&self) -> Result { + self.get_sysvar::().await } /// Send a transaction and return after the transaction has been rejected or /// reached the given level of commitment. - pub fn process_transaction_with_commitment( - &mut self, + pub async fn process_transaction_with_commitment( + &self, transaction: impl Into, commitment: CommitmentLevel, - ) -> impl Future> + '_ { + ) -> Result<(), BanksClientError> { let ctx = context::current(); - self.process_transaction_with_commitment_and_context(ctx, transaction, commitment) - .map(|result| match result? { - None => Err(BanksClientError::ClientError( - "invalid blockhash or fee-payer", - )), - Some(transaction_result) => Ok(transaction_result?), - }) + match self + .process_transaction_with_commitment_and_context(ctx, transaction, commitment) + .await? + { + None => Err(BanksClientError::ClientError( + "invalid blockhash or fee-payer", + )), + Some(transaction_result) => Ok(transaction_result?), + } } /// Process a transaction and return the result with metadata. - pub fn process_transaction_with_metadata( - &mut self, + pub async fn process_transaction_with_metadata( + &self, transaction: impl Into, - ) -> impl Future> + '_ - { + ) -> Result { let ctx = context::current(); self.process_transaction_with_metadata_and_context(ctx, transaction.into()) + .await } /// Send a transaction and return any preflight (sanitization or simulation) errors, or return /// after the transaction has been rejected or reached the given level of commitment. - pub fn process_transaction_with_preflight_and_commitment( - &mut self, + pub async fn process_transaction_with_preflight_and_commitment( + &self, transaction: impl Into, commitment: CommitmentLevel, - ) -> impl Future> + '_ { + ) -> Result<(), BanksClientError> { let ctx = context::current(); - self.process_transaction_with_preflight_and_commitment_and_context( - ctx, - transaction, - commitment, - ) - .map(|result| match result? { + match self + .process_transaction_with_preflight_and_commitment_and_context( + ctx, + transaction, + commitment, + ) + .await? + { BanksTransactionResultWithSimulation { result: None, simulation_details: _, @@ -246,31 +256,33 @@ impl BanksClient { result: Some(result), simulation_details: _, } => result.map_err(Into::into), - }) + } } /// Send a transaction and return any preflight (sanitization or simulation) errors, or return /// after the transaction has been finalized or rejected. - pub fn process_transaction_with_preflight( - &mut self, + pub async fn process_transaction_with_preflight( + &self, transaction: impl Into, - ) -> impl Future> + '_ { + ) -> Result<(), BanksClientError> { self.process_transaction_with_preflight_and_commitment( transaction, CommitmentLevel::default(), ) + .await } /// Send a transaction and return until the transaction has been finalized or rejected. - pub fn process_transaction( - &mut self, + pub async fn process_transaction( + &self, transaction: impl Into, - ) -> impl Future> + '_ { + ) -> Result<(), BanksClientError> { self.process_transaction_with_commitment(transaction, CommitmentLevel::default()) + .await } pub async fn process_transactions_with_commitment>( - &mut self, + &self, transactions: Vec, commitment: CommitmentLevel, ) -> Result<(), BanksClientError> { @@ -286,112 +298,115 @@ impl BanksClient { } /// Send transactions and return until the transaction has been finalized or rejected. - pub fn process_transactions<'a, T: Into + 'a>( - &'a mut self, + pub async fn process_transactions<'a, T: Into + 'a>( + &'a self, transactions: Vec, - ) -> impl Future> + '_ { + ) -> Result<(), BanksClientError> { self.process_transactions_with_commitment(transactions, CommitmentLevel::default()) + .await } /// Simulate a transaction at the given commitment level - pub fn simulate_transaction_with_commitment( - &mut self, + pub async fn simulate_transaction_with_commitment( + &self, transaction: impl Into, commitment: CommitmentLevel, - ) -> impl Future> + '_ - { + ) -> Result { self.simulate_transaction_with_commitment_and_context( context::current(), transaction, commitment, ) + .await } /// Simulate a transaction at the default commitment level - pub fn simulate_transaction( - &mut self, + pub async fn simulate_transaction( + &self, transaction: impl Into, - ) -> impl Future> + '_ - { + ) -> Result { self.simulate_transaction_with_commitment(transaction, CommitmentLevel::default()) + .await } /// Return the most recent rooted slot. All transactions at or below this slot /// are said to be finalized. The cluster will not fork to a higher slot. - pub fn get_root_slot(&mut self) -> impl Future> + '_ { + pub async fn get_root_slot(&self) -> Result { self.get_slot_with_context(context::current(), CommitmentLevel::default()) + .await } /// Return the most recent rooted block height. All transactions at or below this height /// are said to be finalized. The cluster will not fork to a higher block height. - pub fn get_root_block_height( - &mut self, - ) -> impl Future> + '_ { + pub async fn get_root_block_height(&self) -> Result { self.get_block_height_with_context(context::current(), CommitmentLevel::default()) + .await } /// Return the account at the given address at the slot corresponding to the given /// commitment level. If the account is not found, None is returned. - pub fn get_account_with_commitment( - &mut self, + pub async fn get_account_with_commitment( + &self, address: Pubkey, commitment: CommitmentLevel, - ) -> impl Future, BanksClientError>> + '_ { + ) -> Result, BanksClientError> { self.get_account_with_commitment_and_context(context::current(), address, commitment) + .await } /// Return the account at the given address at the time of the most recent root slot. /// If the account is not found, None is returned. - pub fn get_account( - &mut self, - address: Pubkey, - ) -> impl Future, BanksClientError>> + '_ { + pub async fn get_account(&self, address: Pubkey) -> Result, BanksClientError> { self.get_account_with_commitment(address, CommitmentLevel::default()) + .await } /// Return the unpacked account data at the given address /// If the account is not found, an error is returned - pub fn get_packed_account_data( - &mut self, + pub async fn get_packed_account_data( + &self, address: Pubkey, - ) -> impl Future> + '_ { - self.get_account(address).map(|result| { - let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?; - T::unpack_from_slice(&account.data) - .map_err(|_| BanksClientError::ClientError("Failed to deserialize account")) - }) + ) -> Result { + let account = self + .get_account(address) + .await? + .ok_or(BanksClientError::ClientError("Account not found"))?; + T::unpack_from_slice(&account.data) + .map_err(|_| BanksClientError::ClientError("Failed to deserialize account")) } /// Return the unpacked account data at the given address /// If the account is not found, an error is returned - pub fn get_account_data_with_borsh( - &mut self, + pub async fn get_account_data_with_borsh( + &self, address: Pubkey, - ) -> impl Future> + '_ { - self.get_account(address).map(|result| { - let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?; - T::try_from_slice(&account.data).map_err(Into::into) - }) + ) -> Result { + let account = self + .get_account(address) + .await? + .ok_or(BanksClientError::ClientError("Account not found"))?; + T::try_from_slice(&account.data).map_err(Into::into) } /// Return the balance in lamports of an account at the given address at the slot /// corresponding to the given commitment level. - pub fn get_balance_with_commitment( - &mut self, + pub async fn get_balance_with_commitment( + &self, address: Pubkey, commitment: CommitmentLevel, - ) -> impl Future> + '_ { - self.get_account_with_commitment_and_context(context::current(), address, commitment) - .map(|result| Ok(result?.map(|x| x.lamports).unwrap_or(0))) + ) -> Result { + Ok(self + .get_account_with_commitment_and_context(context::current(), address, commitment) + .await? + .map(|x| x.lamports) + .unwrap_or(0)) } /// Return the balance in lamports of an account at the given address at the time /// of the most recent root slot. - pub fn get_balance( - &mut self, - address: Pubkey, - ) -> impl Future> + '_ { + pub async fn get_balance(&self, address: Pubkey) -> Result { self.get_balance_with_commitment(address, CommitmentLevel::default()) + .await } /// Return the status of a transaction with a signature matching the transaction's first @@ -399,16 +414,17 @@ impl BanksClient { /// blockhash was expired or the fee-paying account had insufficient funds to pay the /// transaction fee. Note that servers rarely store the full transaction history. This /// method may return None if the transaction status has been discarded. - pub fn get_transaction_status( - &mut self, + pub async fn get_transaction_status( + &self, signature: Signature, - ) -> impl Future, BanksClientError>> + '_ { + ) -> Result, BanksClientError> { self.get_transaction_status_with_context(context::current(), signature) + .await } /// Same as get_transaction_status, but for multiple transactions. pub async fn get_transaction_statuses( - &mut self, + &self, signatures: Vec, ) -> Result>, BanksClientError> { // tarpc futures oddly hold a mutable reference back to the client so clone the client upfront @@ -427,66 +443,67 @@ impl BanksClient { statuses.into_iter().collect() } - pub fn get_latest_blockhash( - &mut self, - ) -> impl Future> + '_ { + pub async fn get_latest_blockhash(&self) -> Result { self.get_latest_blockhash_with_commitment(CommitmentLevel::default()) - .map(|result| { - result? - .map(|x| x.0) - .ok_or(BanksClientError::ClientError("valid blockhash not found")) - .map_err(Into::into) - }) + .await? + .map(|x| x.0) + .ok_or(BanksClientError::ClientError("valid blockhash not found")) + .map_err(Into::into) } - pub fn get_latest_blockhash_with_commitment( - &mut self, + pub async fn get_latest_blockhash_with_commitment( + &self, commitment: CommitmentLevel, - ) -> impl Future, BanksClientError>> + '_ { + ) -> Result, BanksClientError> { self.get_latest_blockhash_with_commitment_and_context(context::current(), commitment) + .await } - pub fn get_latest_blockhash_with_commitment_and_context( - &mut self, + pub async fn get_latest_blockhash_with_commitment_and_context( + &self, ctx: Context, commitment: CommitmentLevel, - ) -> impl Future, BanksClientError>> + '_ { + ) -> Result, BanksClientError> { self.inner .get_latest_blockhash_with_commitment_and_context(ctx, commitment) + .await .map_err(Into::into) } - pub fn get_fee_for_message( - &mut self, + pub async fn get_fee_for_message( + &self, message: Message, - ) -> impl Future, BanksClientError>> + '_ { + ) -> Result, BanksClientError> { self.get_fee_for_message_with_commitment_and_context( context::current(), message, CommitmentLevel::default(), ) + .await } - pub fn get_fee_for_message_with_commitment( - &mut self, + pub async fn get_fee_for_message_with_commitment( + &self, message: Message, commitment: CommitmentLevel, - ) -> impl Future, BanksClientError>> + '_ { + ) -> Result, BanksClientError> { self.get_fee_for_message_with_commitment_and_context( context::current(), message, commitment, ) + .await } - pub fn get_fee_for_message_with_commitment_and_context( - &mut self, + pub async fn get_fee_for_message_with_commitment_and_context( + &self, ctx: Context, message: Message, commitment: CommitmentLevel, - ) -> impl Future, BanksClientError>> + '_ { + ) -> Result, BanksClientError> { self.inner .get_fee_for_message_with_commitment_and_context(ctx, message, commitment) + .await .map_err(Into::into) } } @@ -557,7 +574,7 @@ mod tests { let client_transport = start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1)) .await; - let mut banks_client = start_client(client_transport).await?; + let banks_client = start_client(client_transport).await?; let recent_blockhash = banks_client.get_latest_blockhash().await?; let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash); @@ -596,7 +613,7 @@ mod tests { let client_transport = start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1)) .await; - let mut banks_client = start_client(client_transport).await?; + let banks_client = start_client(client_transport).await?; let (recent_blockhash, last_valid_block_height) = banks_client .get_latest_blockhash_with_commitment(CommitmentLevel::default()) .await? diff --git a/program-test/tests/bpf.rs b/program-test/tests/bpf.rs index af9c19c2037dfc..d601c6c040ef9f 100644 --- a/program-test/tests/bpf.rs +++ b/program-test/tests/bpf.rs @@ -14,7 +14,7 @@ async fn test_add_bpf_program() { program_test.prefer_bpf(true); program_test.add_program("noop_program", program_id, None); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; // Assert the program is a BPF Loader 2 program. let program_account = context diff --git a/program-test/tests/builtins.rs b/program-test/tests/builtins.rs index b48724f8800973..4d5ca93ed041c3 100644 --- a/program-test/tests/builtins.rs +++ b/program-test/tests/builtins.rs @@ -13,7 +13,7 @@ use { #[tokio::test] async fn test_bpf_loader_upgradeable_present() { // Arrange - let (mut banks_client, payer, recent_blockhash) = ProgramTest::default().start().await; + let (banks_client, payer, recent_blockhash) = ProgramTest::default().start().await; let buffer_keypair = Keypair::new(); let upgrade_authority_keypair = Keypair::new(); @@ -50,7 +50,7 @@ async fn test_bpf_loader_upgradeable_present() { #[tokio::test] async fn versioned_transaction() { let program_test = ProgramTest::default(); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; let program_id = Pubkey::new_unique(); let account = Keypair::new(); diff --git a/program-test/tests/compute_units.rs b/program-test/tests/compute_units.rs index bcbdd0b252f76b..3649e76a14c5d6 100644 --- a/program-test/tests/compute_units.rs +++ b/program-test/tests/compute_units.rs @@ -20,7 +20,7 @@ fn overflow_compute_units() { async fn max_compute_units() { let mut program_test = ProgramTest::default(); program_test.set_compute_max_units(i64::MAX as u64); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; // Invalid compute unit maximums are only triggered by BPF programs, so send // a valid instruction into a BPF program to make sure the issue doesn't diff --git a/program-test/tests/core_bpf.rs b/program-test/tests/core_bpf.rs index bc81a94f8e2f82..961539aace8e5b 100644 --- a/program-test/tests/core_bpf.rs +++ b/program-test/tests/core_bpf.rs @@ -14,7 +14,7 @@ async fn test_add_bpf_program() { let mut program_test = ProgramTest::default(); program_test.add_upgradeable_program_to_genesis("noop_program", &program_id); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; // Assert the program is a BPF Loader Upgradeable program. let program_account = context diff --git a/program-test/tests/cpi.rs b/program-test/tests/cpi.rs index 22592a7892e968..12047533926826 100644 --- a/program-test/tests/cpi.rs +++ b/program-test/tests/cpi.rs @@ -129,7 +129,7 @@ async fn cpi() { processor!(invoked_process_instruction), ); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; let instructions = vec![Instruction::new_with_bincode( invoker_program_id, &[0], @@ -165,7 +165,7 @@ async fn cpi_dupes() { processor!(invoked_process_instruction), ); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; let instructions = vec![Instruction::new_with_bincode( invoker_program_id, &[0], @@ -201,7 +201,7 @@ async fn cpi_create_account() { ); let create_account_keypair = Keypair::new(); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; let instructions = vec![Instruction::new_with_bincode( create_account_program_id, &[0], @@ -272,7 +272,7 @@ async fn stack_height() { processor!(invoked_stack_height), ); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; let instructions = vec![Instruction::new_with_bytes( invoker_stack_height_program_id, &[], diff --git a/program-test/tests/genesis_accounts.rs b/program-test/tests/genesis_accounts.rs index 41f1f71b923418..9aaa6eb9e7eeae 100644 --- a/program-test/tests/genesis_accounts.rs +++ b/program-test/tests/genesis_accounts.rs @@ -30,7 +30,7 @@ async fn genesis_accounts() { program_test.add_genesis_account(*pubkey, account.clone()); } - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; // Verify the accounts are present. for (pubkey, account) in my_genesis_accounts.iter() { diff --git a/program-test/tests/lamports.rs b/program-test/tests/lamports.rs index 783aae7edf4828..a7c2ac0d3becd1 100644 --- a/program-test/tests/lamports.rs +++ b/program-test/tests/lamports.rs @@ -42,7 +42,7 @@ async fn move_lamports() { let lamports = 1_000_000_000; let source = Keypair::new(); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; let instructions = vec![ system_instruction::create_account( &context.payer.pubkey(), diff --git a/program-test/tests/realloc.rs b/program-test/tests/realloc.rs index 14a93439e826f1..97b60ae083928f 100644 --- a/program-test/tests/realloc.rs +++ b/program-test/tests/realloc.rs @@ -50,7 +50,7 @@ async fn realloc_smaller_in_cpi() { program_id, processor!(process_instruction), ); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; let token_2022_id = solana_inline_spl::token_2022::id(); let mint = Keypair::new(); diff --git a/program-test/tests/return_data.rs b/program-test/tests/return_data.rs index d8c3aa992c4f43..99ec61000c252a 100644 --- a/program-test/tests/return_data.rs +++ b/program-test/tests/return_data.rs @@ -69,7 +69,7 @@ async fn return_data() { processor!(set_return_data_process_instruction), ); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; let instructions = vec![Instruction { program_id: get_return_data_program_id, accounts: vec![AccountMeta::new_readonly(set_return_data_program_id, false)], @@ -109,7 +109,7 @@ async fn simulation_return_data() { processor!(error_set_return_data_process_instruction), ); - let mut context = program_test.start_with_context().await; + let context = program_test.start_with_context().await; let expected_data = vec![240, 159, 166, 150]; let instructions = vec![Instruction { program_id: error_set_return_data_program_id, diff --git a/program-test/tests/spl.rs b/program-test/tests/spl.rs index fc6deff7ca61c4..d352bbf93bf667 100644 --- a/program-test/tests/spl.rs +++ b/program-test/tests/spl.rs @@ -14,7 +14,7 @@ use { #[tokio::test] async fn programs_present() { - let (mut banks_client, _, _) = ProgramTest::default().start().await; + let (banks_client, _, _) = ProgramTest::default().start().await; let rent = banks_client.get_rent().await.unwrap(); let token_2022_id = solana_inline_spl::token_2022::id(); let (token_2022_programdata_id, _) = @@ -32,7 +32,7 @@ async fn programs_present() { #[tokio::test] async fn token_2022() { - let (mut banks_client, payer, recent_blockhash) = ProgramTest::default().start().await; + let (banks_client, payer, recent_blockhash) = ProgramTest::default().start().await; let token_2022_id = solana_inline_spl::token_2022::id(); let mint = Keypair::new(); diff --git a/tpu-client/src/nonblocking/tpu_client.rs b/tpu-client/src/nonblocking/tpu_client.rs index 47d443af20a4ca..8c21ea4f510fc0 100644 --- a/tpu-client/src/nonblocking/tpu_client.rs +++ b/tpu-client/src/nonblocking/tpu_client.rs @@ -3,7 +3,7 @@ use { crate::tpu_client::{RecentLeaderSlots, TpuClientConfig, MAX_FANOUT_SLOTS}, bincode::serialize, futures_util::{ - future::{join_all, FutureExt, TryFutureExt}, + future::{join_all, FutureExt}, stream::StreamExt, }, log::*, @@ -308,11 +308,12 @@ where // // Useful for end-users who don't need a persistent connection to each validator, // and want to abort more quickly. -fn timeout_future<'a, Fut: Future> + 'a>( +async fn timeout_future>>( timeout_duration: Duration, future: Fut, -) -> impl Future> + 'a { +) -> TransportResult<()> { timeout(timeout_duration, future) + .await .unwrap_or_else(|_| Err(TransportError::Custom("Timed out".to_string()))) } From a2be99c517a867c91b858f266ea51a4f8b164b86 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Fri, 16 Aug 2024 12:40:54 -0400 Subject: [PATCH 150/529] ff cleanup: merkle_conflict_duplicate_proofs (#2601) --- core/src/window_service.rs | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 14ffcf6277a890..511da236958af3 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -159,11 +159,6 @@ fn run_check_duplicate( root_bank = bank_forks.read().unwrap().root_bank(); } let shred_slot = shred.slot(); - let merkle_conflict_duplicate_proofs = cluster_nodes::check_feature_activation( - &feature_set::merkle_conflict_duplicate_proofs::id(), - shred_slot, - &root_bank, - ); let chained_merkle_conflict_duplicate_proofs = cluster_nodes::check_feature_activation( &feature_set::chained_merkle_conflict_duplicate_proofs::id(), shred_slot, @@ -173,22 +168,18 @@ fn run_check_duplicate( PossibleDuplicateShred::LastIndexConflict(shred, conflict) | PossibleDuplicateShred::ErasureConflict(shred, conflict) => (shred, conflict), PossibleDuplicateShred::MerkleRootConflict(shred, conflict) => { - if merkle_conflict_duplicate_proofs { - // Although this proof can be immediately stored on detection, we wait until - // here in order to check the feature flag, as storage in blockstore can - // preclude the detection of other duplicate proofs in this slot - if blockstore.has_duplicate_shreds_in_slot(shred_slot) { - return Ok(()); - } - blockstore.store_duplicate_slot( - shred_slot, - conflict.clone(), - shred.clone().into_payload(), - )?; - (shred, conflict) - } else { + // Although this proof can be immediately stored on detection, we wait until + // here in order to check the feature flag, as storage in blockstore can + // preclude the detection of other duplicate proofs in this slot + if blockstore.has_duplicate_shreds_in_slot(shred_slot) { return Ok(()); } + blockstore.store_duplicate_slot( + shred_slot, + conflict.clone(), + shred.clone().into_payload(), + )?; + (shred, conflict) } PossibleDuplicateShred::ChainedMerkleRootConflict(shred, conflict) => { if chained_merkle_conflict_duplicate_proofs { From 459983c8f8175b1490392bbfb7fb26b5fe90fe6f Mon Sep 17 00:00:00 2001 From: Jon C Date: Fri, 16 Aug 2024 12:45:53 -0400 Subject: [PATCH 151/529] Add missing CHANGELOG entries (#2634) #### Problem There have been a couple of changes not reported in the CHANGELOG: banks-client using `&self`, and solana-genesis cloning the feature set from clusters. #### Summary of changes Add changelog entries for these PRs. --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0cd6e2b22698d0..66aadb9b702c2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,10 +20,13 @@ Release channels have their own copy of this changelog: Use `cargo-build-sbf` and `cargo-test-sbf` instead. * Stake: * removed the unreleased `redelegate` instruction processor and CLI commands (#2213) + * Banks-client: + * relax functions to use `&self` instead of `&mut self` (#2591) * Changes * SDK: removed the `respan` macro. This was marked as "internal use only" and was no longer used internally. * `agave-validator`: Update PoH speed check to compare against current hash rate from a Bank (#2447) * `solana-test-validator`: Add `--clone-feature-set` flag to mimic features from a target cluster (#2480) + * `solana-genesis`: the `--cluster-type` parameter now clones the feature set from the target cluster (#2587) ## [2.0.0] * Breaking From 0df0aaa685d0b5b5c661a0498c6511d607253647 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Fri, 16 Aug 2024 14:39:51 -0300 Subject: [PATCH 152/529] Implement concurrent test for the SVM (#2610) * Implement concurrent test for the SVM * Remove debug print and optimize lock --- svm/tests/concurrent_tests.rs | 192 +++++++++++++++++- .../transfer-from-account/Cargo.toml | 12 ++ .../transfer-from-account/src/lib.rs | 28 +++ .../transfer_from_account_program.so | Bin 0 -> 67888 bytes svm/tests/integration_test.rs | 180 +--------------- svm/tests/mock_bank.rs | 162 ++++++++++++++- 6 files changed, 394 insertions(+), 180 deletions(-) create mode 100644 svm/tests/example-programs/transfer-from-account/Cargo.toml create mode 100644 svm/tests/example-programs/transfer-from-account/src/lib.rs create mode 100755 svm/tests/example-programs/transfer-from-account/transfer_from_account_program.so diff --git a/svm/tests/concurrent_tests.rs b/svm/tests/concurrent_tests.rs index cfe9f2233afceb..e11019be16a346 100644 --- a/svm/tests/concurrent_tests.rs +++ b/svm/tests/concurrent_tests.rs @@ -1,20 +1,40 @@ #![cfg(feature = "shuttle-test")] use { - crate::mock_bank::{deploy_program, MockForkGraph}, + crate::{ + mock_bank::{ + create_executable_environment, deploy_program, register_builtins, MockForkGraph, + }, + transaction_builder::SanitizedTransactionBuilder, + }, mock_bank::MockBankCallback, shuttle::{ sync::{Arc, RwLock}, thread, Runner, }, solana_program_runtime::loaded_programs::ProgramCacheEntryType, - solana_sdk::pubkey::Pubkey, - solana_svm::transaction_processor::TransactionBatchProcessor, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount, WritableAccount}, + hash::Hash, + instruction::AccountMeta, + pubkey::Pubkey, + signature::Signature, + }, + solana_svm::{ + account_loader::{CheckedTransactionDetails, TransactionCheckResult}, + transaction_processing_result::TransactionProcessingResultExtensions, + transaction_processor::{ + ExecutionRecordingConfig, TransactionBatchProcessor, TransactionProcessingConfig, + TransactionProcessingEnvironment, + }, + }, std::collections::{HashMap, HashSet}, }; mod mock_bank; +mod transaction_builder; + fn program_cache_execution(threads: usize) { let mut mock_bank = MockBankCallback::default(); let batch_processor = TransactionBatchProcessor::::new(5, 5, HashSet::new()); @@ -97,3 +117,169 @@ fn test_program_cache_with_exhaustive_scheduler() { let runner = Runner::new(scheduler, Default::default()); runner.run(move || program_cache_execution(4)); } + +// This test executes multiple transactions in parallel where all read from the same data account, +// but write to different accounts. Given that there are no locks in this case, SVM must behave +// correctly. +fn svm_concurrent() { + let mock_bank = Arc::new(MockBankCallback::default()); + let batch_processor = Arc::new(TransactionBatchProcessor::::new( + 5, + 2, + HashSet::new(), + )); + let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); + + create_executable_environment( + fork_graph.clone(), + &mock_bank, + &mut batch_processor.program_cache.write().unwrap(), + ); + + batch_processor.fill_missing_sysvar_cache_entries(&*mock_bank); + register_builtins(&mock_bank, &batch_processor); + + let mut transaction_builder = SanitizedTransactionBuilder::default(); + let program_id = deploy_program("transfer-from-account".to_string(), 0, &mock_bank); + + const THREADS: usize = 4; + const TRANSACTIONS_PER_THREAD: usize = 3; + const AMOUNT: u64 = 50; + const CAPACITY: usize = THREADS * TRANSACTIONS_PER_THREAD; + const BALANCE: u64 = 500000; + + let mut transactions = vec![Vec::new(); THREADS]; + let mut check_data = vec![Vec::new(); THREADS]; + let read_account = Pubkey::new_unique(); + let mut account_data = AccountSharedData::default(); + account_data.set_data(AMOUNT.to_le_bytes().to_vec()); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(read_account, account_data); + + #[derive(Clone)] + struct CheckTxData { + sender: Pubkey, + recipient: Pubkey, + fee_payer: Pubkey, + } + + for idx in 0..CAPACITY { + let sender = Pubkey::new_unique(); + let recipient = Pubkey::new_unique(); + let fee_payer = Pubkey::new_unique(); + let system_account = Pubkey::from([0u8; 32]); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(BALANCE); + + { + let shared_data = &mut mock_bank.account_shared_data.write().unwrap(); + shared_data.insert(sender, account_data.clone()); + shared_data.insert(recipient, account_data.clone()); + shared_data.insert(fee_payer, account_data); + } + + transaction_builder.create_instruction( + program_id, + vec![ + AccountMeta { + pubkey: sender, + is_signer: true, + is_writable: true, + }, + AccountMeta { + pubkey: recipient, + is_signer: false, + is_writable: true, + }, + AccountMeta { + pubkey: read_account, + is_signer: false, + is_writable: false, + }, + AccountMeta { + pubkey: system_account, + is_signer: false, + is_writable: false, + }, + ], + HashMap::from([(sender, Signature::new_unique())]), + vec![0], + ); + + let sanitized_transaction = + transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique()), true); + transactions[idx % THREADS].push(sanitized_transaction.unwrap()); + check_data[idx % THREADS].push(CheckTxData { + fee_payer, + recipient, + sender, + }); + } + + let ths: Vec<_> = (0..THREADS) + .map(|idx| { + let local_batch = batch_processor.clone(); + let local_bank = mock_bank.clone(); + let th_txs = std::mem::take(&mut transactions[idx]); + let check_results = vec![ + Ok(CheckedTransactionDetails { + nonce: None, + lamports_per_signature: 20 + }) as TransactionCheckResult; + TRANSACTIONS_PER_THREAD + ]; + let processing_config = TransactionProcessingConfig { + recording_config: ExecutionRecordingConfig { + enable_log_recording: true, + enable_return_data_recording: false, + enable_cpi_recording: false, + }, + ..Default::default() + }; + let check_tx_data = std::mem::take(&mut check_data[idx]); + + thread::spawn(move || { + let result = local_batch.load_and_execute_sanitized_transactions( + &*local_bank, + &th_txs, + check_results, + &TransactionProcessingEnvironment::default(), + &processing_config, + ); + + for (idx, item) in result.processing_results.iter().enumerate() { + assert!(item.was_processed()); + let inserted_accounts = &check_tx_data[idx]; + for (key, account_data) in &item.as_ref().unwrap().loaded_transaction.accounts { + if *key == inserted_accounts.fee_payer { + assert_eq!(account_data.lamports(), BALANCE - 10000); + } else if *key == inserted_accounts.sender { + assert_eq!(account_data.lamports(), BALANCE - AMOUNT); + } else if *key == inserted_accounts.recipient { + assert_eq!(account_data.lamports(), BALANCE + AMOUNT); + } + } + } + }) + }) + .collect(); + + for th in ths { + th.join().unwrap(); + } +} + +#[test] +fn test_svm_with_probabilistic_scheduler() { + shuttle::check_pct( + move || { + svm_concurrent(); + }, + 300, + 5, + ); +} diff --git a/svm/tests/example-programs/transfer-from-account/Cargo.toml b/svm/tests/example-programs/transfer-from-account/Cargo.toml new file mode 100644 index 00000000000000..3b05d81523837c --- /dev/null +++ b/svm/tests/example-programs/transfer-from-account/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "transfer-from-account" +version = "2.1.0" +edition = "2021" + +[dependencies] +solana-program = { path = "../../../../sdk/program", version = "=2.1.0" } + +[lib] +crate-type = ["cdylib", "rlib"] + +[workspace] \ No newline at end of file diff --git a/svm/tests/example-programs/transfer-from-account/src/lib.rs b/svm/tests/example-programs/transfer-from-account/src/lib.rs new file mode 100644 index 00000000000000..08494a13ce0395 --- /dev/null +++ b/svm/tests/example-programs/transfer-from-account/src/lib.rs @@ -0,0 +1,28 @@ +use solana_program::{ + account_info::{AccountInfo, next_account_info}, entrypoint, entrypoint::ProgramResult, pubkey::Pubkey, + program::invoke, system_instruction, +}; + +entrypoint!(process_instruction); + + +fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + _data: &[u8] +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + let payer = next_account_info(accounts_iter)?; + let recipient = next_account_info(accounts_iter)?; + let data_account = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + let amount = u64::from_le_bytes(data_account.data.borrow()[0..8].try_into().unwrap()); + + invoke( + &system_instruction::transfer(payer.key, recipient.key, amount), + &[payer.clone(), recipient.clone(), system_program.clone()], + )?; + + Ok(()) +} \ No newline at end of file diff --git a/svm/tests/example-programs/transfer-from-account/transfer_from_account_program.so b/svm/tests/example-programs/transfer-from-account/transfer_from_account_program.so new file mode 100755 index 0000000000000000000000000000000000000000..a3ef926d3747ef4a499aa4dd4127fcadbead6948 GIT binary patch literal 67888 zcmeHw3w%_^b@#pd(2Iw}V?h?iS{X1h_JYJi67q|Ov1P|Gvhi9N+d+%81`O!6!f?G! zE#Vh-k_J03l6+|q#x`-%nDlD`P0}n)>X;;LOxh%-X_}BU`9fOPrjH~vFMa2K&YZh< zM-pJS?w9_)4rpi3%$YN1&YU@O=5g;nuSeGJUk4%iDAb%OmG=x zc?Wk61H=31ZjN)i1V2_<$zK#MCI;&$B~}_^nZ`;dJK#B(zlZZDIYi#^27Bm};we-i zg-SyH4b_kD6g`lbbs0qLOA+0pz%iK|X>>pwznmC|m3|fE zoMQS8PNU9O81^QMyu8^AI@x;UQu`mRllT#ioFLse_}yUAGZLeoi%Y58gVMuZy`-c6 zAfLlVPBAkk03&3lNSq9X7*FM)u{U7&0TBx4q$lW3GUPJ7sPj|NlZTtQymt#n=cLp7 z&^$)W-guDZ+WXL@20moqS_2xn^%X#&Fh*yF6b%5WXH-#^t=_9G#P| z?7DqIZ>r$u>76j`+$;3Xvz^(py~6Ln9xj(H+r)71z&Zo>82AnY?=ua)@Tc8O~wKS`xfBIRvk64kyq6bhO;*wGU=w@s}362w7+Vf zfe*4AGF2Zk@F4?#(7;Cwyw|`-MQ&Al41CPMVo$xHy#^jI`9|(lI}L2)Ue&{J&_2MR z<@U2!**>zpc*?+J2Vu_-(_{mCi*f!uO(3Zuhv_8U7N?5Ry&+TYepBD9e0eH9oHh10 z83q-?pCxvum_W%FIH*^gu6hzO^D_JVzQ|E7QkkK7)*O@no@7ILx82uJ?{!#MxS8$x2cbxUu z+h1wm69OMl{VL(~)Kd{D_w-JQ#hmdR&Uyi7j6#yNmh`RF=nbVwzR{aijOG**KBwO% za58H2ZIvnK=bLf|jJ`c>^zFaAexq+spLmnn|09w6810Wb-{<^FQm0nSPr7V;-cN!b zr2${PMFLeqO$X#V(qm!K%iLl~%dM2S+V~r)--oFmfSFh+_<#}Mw;?2>hK})P9yNZ; z`WnfHe<(dD_AD`3#tYIlfS;`TwPf!Qbi8Psne`0_Ojk)&HNag zkB-tn%$j);bmAyrLe|W~L43?lIv)qqcT#$AUfV-3+VNvrka$DJZtR!&6X}#atnC7P zIpHZBT>tzb30Bi{vxG6t_a;+83JQZxFX@uvYkF#yD7xx<8>t~mCL^NIZPcG}f^}R( zX{Ct(dKq$6qsFcQPEH3bLIONMuw7ryj3dmq(2H#I8J2(UDJJV6{S37~+gy7ZDZKtU zC3*PFHrM&-*PaNbSNiFd1^5-5o~&VuHz;af;Wlr11%+cNAiMZM;$Sk-ZV*9`_x96gyN_j#(D(n zf0TnsPs}1gP>8wZ9D5sOZ@}9m2G!fe$xg0U;&)Mma>dk*L}u9ACH2MB|9nvBDL&+U z75XP7<*`mCaWUU0iTtIMuX6FE8IIy3|JzpixK+MBP(Ig4nG|xhCN}h9C1k89c-TCk z5ObviycNQ4G9>x2V%Zo{{&gP%e3Vf=F^80fg33F4pOp9QyODp?p-mtPx(?1g$e@$! zml*VBFg=lT)FG#ULJW>5h0*H&1$CV4`k;OpM_PaO5owpsA5p?U(0MU8z@X#f7v=Gh zlQnYEe$aW-U%!$bLC^g4D*y`6FMLh9B5*GybaSNh2pg$+$WP3b0=c=I?98G08RH!M zaZGvv4A|9=J@?#mz@jLW$VfTeU&&>V<2Z8(ua4r4RF1w<{rDt$f=+hxG}~{Llh?`F z4%k3BwIjd>$BLi5@s!y8A;E_maUA-0z`(@pDT2{|e?+l2bb|4{{l_^@nsJc5Pvl_B z6?U6QY8aSV^D8OmQU(Rk{XVGxVj@56?H4~5@N0o|?VsFiAysbtLhb*=3aLfq(MBLr zlI|=nFlIkpFXeLA3tjXRT7rH;oa>YHXNg_#w}awqC~0i@8~fEE<$`iYO}^Zl&rj~! z|B%Nn$mdOv|0e@-4$40lOcyiY=)9H=PhdSvhvO{gSa=RM82Tgnb?$lV@s~wL_=J7N zl! z`t$9&u@}0&@UBDaY5W#pRvu|Tv-irlJWNb6UyqAD$}MMfr+iRi_1nGuVt2i#_Oe~e z&Jw!bexZ{!{)UdLtnPCe`neT`?k0v#>uV}JZ&G=Lk06k3fdw4 z*AN)Lxs6h8Zn4Y<+P=e-%_d-t&%qmKB4s;6_QopqF-GkE1tDhl4vLB`KanP=cb2ENq5M+_|T z^oEWyoShOk`Ns?_{g!`0|nNcgn@x zbG%EW9bN~c^STZc;CCa|^(0^fZ?44I28olA(a&{qc0HGh+`K2HoXSJ}b>ENDewOZW zlC;kQKBCUQiX1Guy*1LWx-ZaMBL#I|p|?i*HEZ_Uy_z^PpzD9H#`LSdUoYb_cPpbg zs?T>%{v)Y>NYY{NPSSh}eg0fP@4bB|8BfQ3)cF|0*?FQzxksdYxv?|7DQU3JuNj}p zZ?D`{q**WKHWW81RUJQf?j6VNj~Jd3Htf8z$!P@PuqT~{@QjlvqtTw$SpHP?Wf2m zW9Fmmd!$|3kJ%}vy&35bwa3{hMqbA5M4dZ%oM64P_LRs3pWwfqHiA??a%SA*j68Z9 z1vlF)<1RZ->|vTZ1}c#*E@!_B^BI-|vEo`T5TkVJ5(nv6X-1tnoSr6wOi6?{PW-~+ zGWH9jj+`H+2`=OO@w~qTy}XIw;C%t~bCkVRp#xQlk^W-@M4f-;cB02hw99NI~c$ z*8PKRHH(NmzG%AtQsXuNw*Nk7D#d5nkp5pU^0;a(NG7hp3rTO!$yv_Wm$~p&ha5mR>ijOtD_wj8>u>f$Vh>0! z-BQZebAo*PRL)P2kTdjO`OGzO_3Xc(t8KaKIDnl*zRhRm2~(c$XMC&Q znlQSEPJ^_2jyubRYS{gt{wC}?+K+ztCG^wCJ?dPD>hg9v1&H?EeLE}l-rXzxbvF7S z1p2Q9^gmyZ84txxydx7U?slOUDrfD-IVHvCtxrEdJ!jD)j3ezIyFcu4FvmSIe7^Gg z>jL?GlE+o7xWmOhy|yPmJ~xig8?m$NSnebB?fl2n7lLvgLk=D7#1w^dwO{3raSJ_3 zOkT@;+j?q`C)HD1o?e3(pgE+K=PTEP!S)#a9IJkn8`ce&S#^9XKRPdrIo<{#6)A^~ z&~mY&U$}9CbM^E=p*$sVFDjv+?Yz79-?-n$8V|bPto{_n#XG5ex=)oK7jllT?aAxU z`Rl=8P;O)B0p=gH(;p8cH^8=Dym7hxwHqgTZ-(5`Z<5^BokecC9u3Y*WV-Y6Bc%?` zt8ZLxAAi&2cJ-Siw?(g0ZW#Z*-u@}z}~(3E3$XH3|^!J6wLmJTfT!*z?8+d3;p-!>a~iE#W`Doxj-h zbX~0Hd1(K4XfLw-QK?7myYdJ7QbSof@4RvQG24w3oj1eoej5E}_Vx1pCi9j0&DyRv zGH*Q>?60%TTO_LghvQ)r)sFEX{`MSq=napDcb;`Tn0-p4m;V{#;dg`bwZ=ofzs}#^ z?gzcI^b_VowOi^}M4jU-zkGe?@7LTDp5n?wu{;2hA*1;v99p@U|D>{!x3ooV$hW?RS|#_D0!X^A4JI zsF9DZ^Yz{%idKrwZmS$Ok{J4WPi&)KOaM$!Ty^}@>KqPdWJvk z*YcfGU)HQ2)Lz2=Lu2gzedF^$hZ84yZ^k_E(_fA~5B%r%_s3aaucyCXqbTkP$N7^H30X;|cI@ZZwBtHE5$Fd)W&!E0FQ9U+4Z(QGEZ<@Y6 zdlr2g)6X4bF>?D{`l@y&Su~ybRJrSUb1bxl3wQ@)pWIt112ns|MbcNox)GhF%Q*H{ z!g>LQ6iMG!azwE7pMRdpW?~S9@`|9ZBMNRzmmdBQKwrmU_C~Q2 z+2-S>ed3SkdA3(W1t@s83Z3k{y^QDGD)nUN?O{0PCI!!&Tk1Um+)KEaa#a3$9+jOZ z=Of-esedlv)S;fIg%3P`bAsfd_k@DyTi{6VS!9jf@%Kx~ZG@%C-YfH1*4(r6_Dg*; z=PsjM;>Y+)m^YZe5>9q*i|4j*FB8{gA*WbCV)z5tTK~4vs z;2iUr^ur-e&py77d)6J zdKl%?gCfKRnEz}pz87jzL+ zpJz^=#<2I4(DCn+J;cclJvT1$df4dM01>31_u0Kir2)N<)o^<6BXN#19VUK{q-XJP z4h6kmoDB<|>>5M&fYhV+`k?0*6F!>FG07j}HyQeg@bj$bsb>V}-B-)>ajyVPj1`d} zDQJ7QrC#E4I*znIA)kpP$DFX?#7gS89PA;!>HcNzNltch9TKD5U8HNCIXLqAD1m~P z5kg+S;CW9QeOV`Z?L94ekzHr>e%(>gx5tl3Z0-qU?>!;#jVC2G_S?7j`aH~#)B|}P zBKdgwobgiwT+TZn`s_X3!!Y<}Ir8a-$Ord?J@dR*hZ#Tm9F;e;o6G4vH2_OvjWHb>7BU>Ckw#W9d-b!ILqu&_ZVd z$BX&Nj$k=riORK^a^raa1NhjniG>#PK2Z=K`~Feq7K2~rV5R~-ZomMawlxa)#mJ00 zZ=p}LYl_^*A$-sY(uLiNI@bY>@FqLh5{ZTC_{pFk9!6l)Sq?PHU%=<^u~0p0LJ$x9 zN1ZDTUL9L>@HvT6f_UIN>NFa>dbYp8e8?^8EHe3ZJmUoOp?6V7=GS04sj`!rB6<}I z!+WH0*uW9tU&>#C_;}(b>Rd7+9sL(|W{pUPWr#ZQ5$PDYdfzrkAN>;Lr}+!pkN%6Y zvJi9WUQBL%vmhn(dU~!n!?sys3Zk>#4x1Pr} z%89@qZvNFz&9EEX)?}9POuZe7ySZNk(a08g`T`>-%Dqo8F$exIzi~`TOJ8{%`|F?M zelxU5@{tZd1$Y>L^NA6}7++{7B%|vrz(KnJ`d!^Sx;iSrz0gZ`Z6TOJdLB^xu@TP& z5<2Wbhq1Ts6LjBi)AQ<_h}(b89`#*|dIIhFdv1^EU&`0^01xe$N_A;_yeAAldhhxP zVKi&*Z+lOOT=f37_k=K}_qf?#pbEBB#5uzo)clJLtmWq!Sm`e`u+}g740>RAOT`M# z&z9|EKM3j6L|P|^UzDv9KPzL_1KIociof&}7vp^qJYOGtj&LX7l;q>Nz)dg9cYHz~ z1HZ`oK^~Nsg1(2!TO{r9774%aqCE7l-%qoE4SR0zpcE89_dzlsiuWIR?ip8{)jtH? zGBP2-=fE2Y)_TT@l|SDfG<-sjA$rjL6zPwa*ZD-}DShq;c$)(4(s^YE5s4EW%qtLK z+MUjJHSNykag%nJh(1ytLd<=yiS;=QQLw|53Vx!_9?nnGK0Yi#+O1(bmUb5!dw-{8 z=O1Hyl#kJ0)Y;8&+MU3Da+>A^SeTf5yK@o8F2hl07vrHE@}ul+qrMAxT&8JWfxU^j zx0w0DWjN}{Jz|s-dvrg2L%BM>@f~yD%AQcny_xT0!%o8@MxB1fLpgyn45!@%X5PDz za8RFX>};2hFLrmEj}Lz?=3d8k9r%ECJ_db(H~RS0b%ejd%+D^*H&JH;=K~-4Q76T4 z%)N%kUCdo(<}rDWIO^b5IN2Tg6MJ|k!!h?NGcR6k=1G_PE9%_N`M}2riqe~K5Mu5X ztS>S5N}dV8|HYi&%K5+-cs0W@cPYQ_7k%Jj2M)UQB9J{Qi^u8m`2mjDp z^_OC#e;9Hx(qGKu;78_3v4_AHxSqZVe==__@L}k!KQGgF8ioX$d6~Y$6sf-sdb1fu z>xCZlN}oqSzRZ&-CHZ7VD8I&sLkwe7NPd`MGcVJ3nYYcnOz?EhH}f)mk28!^DG$p2 zyiDK4PeH3CU;GR)OyKVl`iru*N)4j475 zT?H)ul}q1*9;RHihp>CdQCOd^x8I}CMo5rT+O1}M=m&z%KiD^bKjH7^4{^4C&mdb> zdm1V5?`wY|dHBp$$$L*QAEF=ieI>yAw-Z4;^BxpRKXxjZF7H9X{wVr6d#~)rfgZ-O z?nmnWqwafQzI-)6AM*w55Y87a45ULovuJ=s9noJrHwgXNLCA56wf(@udnhtx$7SAr zhB7IDFTQ7y5?J@su#a3Bs0aJC;1_h>LiGjTtHbXPFy&D$*nZ%zq&jlcUIf4&COFDk zLg@!39s6I*$DqJLK7L~Hfqnp;pHrD!rzua&QIfxp1^ET#q<$pMPi+3)Lutw%%Kac% zAGHViuJxdNNkBiq$2$V$0T-dY^oL-tBeh>T?){4q{ScI6u>B{4{7^g5zo_SBwEqBC zE7&%}) z0K1BMIHpE`BpY=RFd;e)}r#`n7lw)u_jHM6IgHx6qZ?yE_g+TlD`Mg-@Pq|*b zZxZwa`$-N_=TA5v?fnMvuI)zrQUKQXVxT;!fuqk4r`@u%`hjRq@cc05=BPC31*IRd z^o6RVar#-#&-;UM;}6ywe{dc56Uv|Y8%E_1@Fo7)|2E~v8-K8l{k@pm#{MA6b$mNt zF6w;T;M;!SZ9MM+e~VR*%*W4|a_R@(YW%|MUWifq$_@e>$Z{o%i_okY|kcWiSr_|LIZbW2FPUn~fiM3(sr7zrey5 zzu{4X-)j88TiG7~{&5oB!e~T06dTwLS1NiT>@Wn668vHfJ54@e_#`qRLB2ONQ z#DCKGfos{W0ROKn`hwqW=(ia^a2@M2@Q;qFkDCqt9mWs5i~S4W-(t}hzoNt7-)a27 zx3Rwg{BK$Oi(hp&<0JnSD_{JkJN*3J0shqAM!9yYoXGuFzuX^Kd`kY!e*SZ#=8L}# z`n48)Y3Fr*`RSvUU*YF}&?+zO5`P=?|2@zyj1$rOX1{!yRbKQ^Hd0W2XP~_9iz7X3 z@kMR~jgJLB-CiqS_`1x`f4h}0^7rRmI=YPSXFa08ZHu(Qb zE1%3e@sCk1^}ofUFZ_$2jq+_)zO)~$kn&3{`cfWTNdBr(^J!u=`JcAvi(XI%n*3u{ zKGtpe9tzAaFA^#BvvuCFVND+^#=QHzAfMFk`BV?^e@gQ=;@Kv~d=8k7B{>GH`yY^3 zi{)p1gaoeqXna2VE00mdim&_fpkMK~kcV*FxnrdW zIooo-CXlZ8Emi)p5UeTjvmD+g{1dT@dcP?df;Ex;;g~)k=~yv#Psq;na#XNy{;PkZ z@ov&%W`9BN>*#*H_HTBY)C(AN^}dj|+|Z@*i*&uOf%c7~ai6D;T$F=Vr8P(3y@tq9 z`g#tj^k*yP}`HOl6w%apF0C~Pv29o{OEg> zbRPrwK|7W;@4FA)+r+vO_HK88F66V1(o_%h{VZx9_5L05FCsbmcrxAk=OJF7(TC8b z++I)n8H-H{N*DL_fax`HvQyq7G14I^jBn^;?jcD}hUEPmUSB0A#8kfwUD#{%3*H}t z^4cGXhmk5UVm;r*z8?b0A#8g{=I!+h1#UC;B}u;ng0PX8W%~4;V<}GeW-zPLK2u4E z`Gs6wB))z5405*557Pet`eD+Oyow<{h#pAly*l8%6rh9qJqUX5M){9AuM8ty^-}c$ z?MKMpn>odJ`W~*JKAfa_?D<6D`6oGF z>fb>=2DV1pDCY7M4O7jggak@K_qCPZk?j$Fl;7)#ucK5hbH9-z!B9PYFUZ(*o^K=l(+-&Lu?`De6yde4DRzr5ko zl=s`!A?e^R1^zY*oc(~*3%i1#d;;$YYL}g^=aNwxl|g(hm%D_?INC1IsiJ|P_ZE}9 zdqm@(`u|j5yr0i}wEOh2tKBgLL^CJ5pe$zl2*M8 z-ouL!zj|LXHvsIsTwp(M{fDo79%*MbZm!MNNPi*yM}5D7DmqU;@xw{k8hp!_^J?M% z>3vOx&)G7+ak)UdFi&CS_b&Q_{`zt?b{2Bmw3RYUP`@B(=c&2*^Nwc)5$!!f=_B?# zjeP(gX9E2ZoM)*@wll|xx6$PFenNnJQSbTMY3x_RKE_;W82rjohV@?1TxR?*Dkt8y zOv9>O=1;`gX_QC-`FGL6LGSaW>7EW$Fh=L$@LVx}UQDsxZ_($GU{_$@V{R3`Fr9PO zLxgnS;=K5(1OBDwPro1dv(6vCFW;^h-3NojAWxhaw^3iAi81rLJKAsPrz{CR$+J$r zKaoCzM2z2MLeTqt9rr;CMLq)*XN(@|Ibnf3sc6iV{q^&b4>9bY52D(0^)oeWi>Y77 z1;zvHFO}w?=O^pMU2}F2O_0|1udWe1*7smJ*{*Tci};-#BtI(Z-``U|B4eIs*)s2E zTqo$9JO>>}Q11aMf8Y!0Z4_g@r|Xv;ln9H`Mlq-N5I|uE{b9Vc(I2Ppq%Rb95HfWg zzi*J!w{xdJ59PBn4Qpg7jrb zrF7!IAbqx^W4;DI<(|Nh>)YvTW}ft6?~{_AnJ4~K?+S&*AL%80gpBd$dUq)-&)xLi zs<5mhdKW4zzZZu7!Vd*VD#?A6en-=^zZjnioDCoHSNvy5*+?CR^>+!-zf|4u4oZg} z!tNv~R2%)&br;eXQGi~6MU(@qew@-nK8e=Rbd)QnFqdMN0_uT9L;5uo7gLP-4^uhJ zD~ZY5=o8~^Bgg+Ue*S0tKoK(Y{%8E0-}oT|Nde;t9trH|(N{Q$KQNDej21Y)KeS)+ zu>PWB-eG+%68=?|vN8)Pg96~Y$aZfr_GdFO$d4VA z3cv?~J|7BtCI294k}l8X0FOu$p4>Cd&NI*5>=#_fXCKKYJ5TPLs=Y%x=CSO&quegg zkN=gF!KXgIs_y{~mV;f^_xqyUzJhWGnO@fX-c)eC&_jHve007>f4-AwjFiU`X2u&t zZSa3&A1Pb4R{9~u$$aiR-I6nrJBys>alzm{8OTTXlf3;xAN^HFnBeajQU#mu;B(&n zRAH1);i>-~6~4SdnaDl!e&66aq8;`12_EzT{fGIY^R-WM;^4H;{o32ViQD=7WWhuJ zpJgxlOF;kY*#1BBo@$qTGtaJ`C3w&$1br{L?$0X!=&#o&FU*S6E`2`D@U8SzpVe=K z+@Q}}%YUR`lWkxTX1- z(oYk&%FeUK!I0p? zZ{YE0es2ctZuu+8!>9iqV0}(LQ0^en*Y$bsSwpW_p5F_uKTm>>!1{A-K;Gydj1TPd zA_o8XqWHn}`j?1~$~kwt)Ths{>+jF$bDr7eeJp>pr)QM*1pCo$e}VlNrQLhpV7qso zqut*pUPo*9v%z*ZYP(Ny`}DmjdQPhE)7IZvrQh3RJZ};Ni~c+*<)|)!%{mT~00kYN z@GF7h9T5DCn7jOW)l``m==ZOg9>yJ1+P0heoW72`r27zIk>5)7p`hA_Wa%KMC+=I% zv5seb4~ovGx^Gu)>cT9#ORrIa)CChk3wDmrZx)8k&ldcUuFHlsPQ2y#X55a;6f zBpmZY&n+~$XVa#kXR$@Ir4X;ys2;IbhDqMbWOjO-^OT8@3j)gB1Pw*JBbms zTyB+=E5AizJ%@lkasN*zgGE90GjSg&GKHw~9QU8CuZJbu>8)Wjx*uHFuQ9in`P1J; z2R`cY&t>%a=$%qt$IHy#8JwW!GkQNTM*D3D{<*R>PBGtJeu7c>1NN6x(BFr&$8VDM zn-H{J-T|=}+OO(&Xn!9D^Au*b>|lB5zLBnDd#B2p9C$#DMf!%pv6hR{uTi7C&fmJv z>{S~7Wjb<+p9?T*=w9R#9Fe~YZ8L-X_usvTWZ)^@>y>BBmG^x3?W;BQnf)&K&yY)RO4^HaBEU&E!-(1Dy)2*J4Wd}y zW+|U~VljWweU3KMuh|>-Fn)I3ZjO^-sZjSLz<=U9LIa;Mml7x-y)KXrJ%Q%yIVE6V zdQZgZlVW|3%C#aNoe#7<`g=-AXPw|{xhI7m&+L=yIfh+6>8v?xIq-X##V`0%I^I*# zetl1f@|EOYOCkQvb7*?*qW!LT`nxDmX9e@2_fzz_MPCk9d%)L1i!aDg`BM5x^ZdE? zW9I%G_m}r%oa^)6WBTm?gFJ8K$iD0E(Px{De`nt7p}*VXzgOkGGG4s*N&lqw)G?vn z4>oa}`HAeSkmr zBigu1^6gd{e?wB2`lVy_E6RPB%VC$1`p>-I!C&9(H*`q;M@dfF@7k~We5#&zd%m3% zK2h%ytKTTe?>E&4?Z3<=G9EMc$+*q6Oa1;hH{)B!wT|DUdG7@5x{Px%aBAm+&-F>l{PBEY}$rSeUh=h0Lyna7QN=req0gkEsnbyoY($L#0V(f!g7-gaXjdg>UV zcZabL_rE^-V2@*6cbv;^=r|pF983TIr;TG#Hz#Ax@9{n^Xh!z^BA;Bl#J)c3IQH*r zNPf~4M^xw4{Jeb;BLGHFKUVYe^N7gZ+awGnX&we2y(#fell1&Af;SjPd$Ab z^JSD*{ZV_R_uo>3w%rhWe>VD8S^62IA2ausGUnc0oAFz6JrcA z{(I2$cg%F2d{X$<^@r+Nl5h7?{TaPI@ythz z-}fFyqkTlt)2Q=L(*C8joDnO%nPWU>h^EXX{+lS)_c-bE_PT$p&zGVeaH{XKQN8f@ zP0YTj%FEk-gzMG)2yYfjQ#~~t6+Aat*v_{wg5ORm2L802koPf?ORRJWU@Utef=`;8A|k`HINf+(o@(-dW~_G$Kzy2-v{nZG5TZt zoDuzO?qBA|mvo5QKYd>XVAQO3F1JkT@z00uFz*TckodQFUlIcBVS-}y>r;=)yrA=- z`h~#5lR`G0u7lx!0ww8Q2}CFV9>NnMM+kvJ5Fbgpp48`sRS%Q8kMUS7m-8NpbFBL= zx=!<+5`D-?b9n#wX~NX`HOaSS&>vFXdqm`&z-1;1djAFU6>?(5ntl!ER~tW|_c4rC zl9PG9TKD(UrE-%dR=Sb%b$?CwWus0fr$c^mly8>tm7wg0M@6sldL#a6 za6gIbHU1O!y%2QYQ}=DuPcEU^5@u2;+) zzdI5=_#!DCV))mfn~wa;a1Qgam?M(6*qPLzFcAD5SEO`QN_y|0(FgN8wJPV#PD!`@ zDE}PA|2^T6dW#t>>3p5}R=v}GN5sVDaI8ogs{4c9lsJ{j%Mo@D9Oma&v4>IT=S(+N zI+6En{QXLE|2x0_6?u-O7fI(&nU22iG*fBXJ0$*y$_Mg5h&qQEU&qA`VxRky+JhL+ z%QI}!^Kv)#eo)50%Fo*!2U%aQp>Hgg(n5Vwx#Z7Lg1CFL=$5On`H7BS}!Vpbi$yeD`wm&*P??gIjAKlPf1=2|6v zBSjcNFPb<_egpSkpd6}Je4L}4BsdY3ii7lF!jONO^M&DHKAArDH_;{4Ptpg4awFeF zr`V&JjTduzZ-yf$C+@%q`P-&Y%qbB4YC)^kqpsl1&b7RdhHF8Gg`_7fYR;92jxPo#iN{cN2-K&`u} zD|KGPI6~0(0^9id9-p!2PcoPkuG%B}rqu%azbC8nTH2k=b{Y4p5pu>4((^3!o6bGI zzG}^{@_duu-_|~c?xSfx<;Q!1`GCI&&DL{N<;$K&be_q6Nc39g(bQvgOviiF=$V;M z^7E_olb4Azo^RL8cmY2cXF> zN8Uf>rR9B7S`PRR5Pq*Y|L@%|{ER#o5XV{jN5>yvh$>0r6+!2%-o}daGndFbloc_Ktv{GNDCqCidrw$;^hBKV zeLhrgJ|uR31Rv&iCwxBUN&o7)DY$RQ?5Xzuan^liaZ6|n%l?O+SF};(nBkJH*=Ifr z{qxypZZqxC^`37JF6Hj=pG)%^Ogqf)r|Ejm_p4KakN$z@lWi7%UG2P{SJAqR5&ix( z>%~~<7dTz#;p~SFvi&cALg;3z4l%6rruvtli;%s4ALr})^vWNTbiG$qt@i9GS%;Jx z{nY)WnNyzS0=;!oPUjQ&>p*?c#QJ+MX?Kpy1NDLr_Z#!6?zi>UQvwC_*EJO9Ogq~s zt_HjixwNlEh0xWj#q7{KH(lp@1nc<*>PNn=n|g0UEku`;0CZO`XVAgDDpZkOC+!RR zo4C=T_k35c5PbNXr|D}VBL0hC&w=B^!025e47X8rTu+UZ+epb^745(q3-vh;+`oce z>VA&ar|&Ps^Uc&0(hI|%G^600`iC?R=)N-iQM`E-<8}tU_@sA8eit_8zA!vMbb5zg z6+ZXB#&NZgANWN=EOc}j^?Cb6j}nhH5=7~xxV1vBUM|AWmY}Y=&pnqIUG7*gc)s%Ae$9$G`7m_T&BEQ_*#AO7PS^Ce7~z zq=v-r>oEJzo{@{*XNA6@{jf&^6l2|rSlic2Kek37$xrUzUDa#WSK@Z@xdAxOd`R-= zxYcs6eLqUc{t@PD-M8<(SkiUf?LAdE?sI1RCe1l-)cI$w-jKXG54+dEI%3;P8PNlb?KMDjB* zo?AHa3McUg=)axb%dGVT&t+bZeEj+;GnSSx@fw42N+iVX^ta_fN-=oQq-ue2vLg=a9LSN5V z=b3j>+eA=)oxV0F1w-aP=7w@#F>87`4`r= zk>gnI1B^9}6A(0ovyI>A8yLUJXOf@#A^DCuZsm80&NXheKl5}UMaW&oxAv>QPET<* zw=3BH>aXO;E+P1ST!X;++>SoK5Y!8K{y_X#+*3Q>=N9&nKIwgw*Yn&$ZNOebA87~` zJh$)@{O+KUtDa|Id^}HnfIm)t3)|Ilj&^-*jCQ^Ex4d1O&e5*_Jx05{V7sP^9%BEN z{_}fC_<4eUNw1zNabou4{3SQXq_2_~dIFQ3Sn~)cWX*k#+^v$|MoI*_hXwTCUo>+% zQfZu;^{W0JwC<Mun>b#wfxDMtIUFZ1wPepRyS=T#JN9;>^J6Ff|SYbKf&rostT{N+OJ1D|@ zuKSqyy&pWciBgE8&JFnL^j^;qpR@D4Mq)#(>)mv*Ip33Moa~d>3-fcfPVP}Dzk|vF zSUUry0 zdXfkITi+=7J4h~&-1@}=mmB$1lCb0#DU>Q6RY|6G;XM9!?)`Q%4$)`| zI)1Rur~m#qi-qFIruP4o>nVSPL7t!St&QXZ5*tm7`87EHymnv_|I}^-jMpNHpQS(6 ze~MZEjlEef>h6@sDT4y+1M~prw`_+gK?KVrS{XLnE;)+dWT^UEzZ z`egEfUq^rC%jkoG`Wu*^As>I9zMZpaU!aNQlWne({-Ack3MJ-DX9S!tp)?isBh`G;j3neg672SamvlS)ti)Zu-81UOpJNt zDAC#0D*Bo4pLUVkY$A)Xri5rX?V6wDU=N?nT?H{$P2VDK}MOoiD4+JgVy}oe$7&XfK|RL_j+T z9PSmdF<*c;63lWk@wSYd6Zp?d&XW3K5fUVY#EJyR`kv?Nl?*yFSBO0P-!Tba%o*yR z_L_cDzu4ZVNQV{(f0Km{{E=&jg3jA*)Sha|n@J%QCd1W6v+I#QyU_M)T>a zzm4%;V0Ve1%Lp}i9`vvDdt!e78~Ll8eSKdg^vBZobo3?J?=k(O@4pU?tC`gQem$*1 z4}J}LprdT-FjC+L4N#m2kw7RQA0k9LCgy}b-$dz<45t%?1U-j|;QL88iXBva$X4xS z`a%09cJVZ)kF<+tIDNEpwPn`^+Q2N>Cmt)raf)NM1gE;E63O(b$dbh}VweGKbx63(pEPN?f2z^8k z=)HpSdQPTuAd#cDR?fe5{}1EKp&E8j41Y`Qn*aWa$4_zny5EzizmN&)y(PWRHgmEt z*gIGHU;W4O?NYx!Cyn(9RA{F0x7ANgv^Fwg;$iX6)z0bfiYs4O50f~D)xXAg1cSXT zQV;AMIG9;En-lzdfJV?7d!(I3KCb_(-VyER|k9kf@%j-z%S_&Y89Pt%0z z+Z{6>>OGaw@ZT4}*Z$Z2RoLY?@w>yskUQYSthw|ldJLA34!I>}i9VdIJS35rH3`K{ zKZXdFhDG^TZ{)ed#_ur-e`M#$_=ElX0-9s7{(~r$r#MdOJ4}rE`v*h|_LIxwpp=68al!J{Q%5gfO^!iA%Tc=!U{}!u z@Qb9?OOPt#5jE&J_G(#{(eGA&i>v|hYy1S*y-&UJtKre~mPnGG$-ETAZ~e~b_^p3e zM)+STpx;LHPg4Kc^kbnHhEF~d^7#NB-X}N`@96MFM?!or-*H|WM)Tvu*A5e-pV3a} z<7rpki@wX!1)dOl~j%9wQnV9y=N;%ND%N=h`a`7Y{s1Wg(v2N$BM>(Vz+WBxB~o(U3m7z_)K-f8`_RS2 z=K#TT+zO8MIgUrscnaAn8Sg>=h50q}td<|~`+zl4UdILO-5uYNG<-rXBwAi)JxKf1 zYL9_GK5!57O-6)5c{@kW z`RcdYL)G&gR6I_9uV2qk`{PpdT<)i+KC7PoztrBFx9IzClYW2YJmoy| zzloehpMU%0{E_pN^Xb6*9CaTnwQ#ZM+iZ^b9wYpdqGS2r4$`OpPxt$|tOYt23g7qbt!Qna49}Ewl z{Am>MT*`qwJ1CCwE#JHx+DJ~^E{eHbtwIlF0AnqTu#sZshd!7eN-N+8gPj8E!@4Nw z*Rvd!Oa5kwoGl=Bw&sk5B$va!j{)p}ap&XC@rzIcj zXXbB}fr+m=`kFEIjMWYupYT^9t7_9ee_dnf>pY6_TCi_Td?3Kyy-NE(>PM(O)qAr# z-hp?37`O3K<~(B~(Tfwk7fqawgr%K^FSS4VK7^b}SNgc00=cNYMSbrdtv=b-@%JTw zhu_Ds`5MixzcYa6$#qfx_sR76YLyr6%|S#y-Pr_Cu-^xv?EoM22#C*zwgc_fPqtM?+75I~_?hA*9eBNz0f zgLV)fNYUqIgXb`q;xN9y^YO4Dxd)Q2p&~T0rCa^dJd2E9Qe_5wbUv`J8_D4nE0@;=pV0qj z=y&!Iv)lss_{#Rj!rvFf7cu$IGh{E`xO9HhC|@+s*r{{O6mE-BbN2hw@O+ z^SsP$0)rmtY?~tIHp-7L*OcI?+@g*=Pmt;BSg>{ThWUM&j?~tz>CSljt?A76t^Li- z+q-u3rFv>FZI5?%#Zz7J_Emj-?eY6kTeqi))aI_v^v-zq_WpSHJ@F0Q+q*h5&GG(C z>G;-kSA26OF13yQc|-RNT|{ehXR`mErd3@X-JP4eHkzar-Q8QQq?MaeeOA(Ksg0U) zZ90?g+f3B%PcQHCZtm`1v%ROMyRSdpxrUN%+PrOZ|C)5~_HPK-?zP^e{**ir4TdMK{~f>-j?oMP2#HE;Pt07@oUqm&aK@Y zTP}_7*hCV`Y~9>J0*JTwrMfnzxJbA)-M4LXCPT!oO?PchcP{Vj>?4}X?@Mjo zn%b~6U01(w(c&cyjZI5a8#+4E_pHurOJ_2vjcMww?mlTB^SiRUt1Hc1-I~5s-$*#V1s$&!A-kk2=(cQNtkdnt=zJX;+Y?w4*>xQmmM$28>y`yVIU-y=D zSL^1U^zyAlx6@m-b91IY)7HIxYv&5I9qqVwb02Zl-RIqq$$$3~31ii^o__CED!#gF zt9RYzt?7)H=}&LVf4ix>yJt;*%4ZolEvc@~t!bl9H+1!E?>FDCP4%bR`Zo8c*QNWq z*HFDAsq40H-AWa$NvF4<<$21tZ13(*t=idlG$n+aDBArimcJD~V@9FN_mg?_^zV`Q}HuuLjZ|m8bMoXlwzI6ZgzAmyQWHng# zM$*4$8-@$juU2Pu57@M=PP`rC&{&;IH;cuHQy0*95tci;`_jxYQCzUCyOXHSpTDA; zlz#^|EhVZK-^P99k*Pvfy0ban+>`3s+_430_Q#vc;u~mi!hp%RBNNsfk7whT(?{9m zWp9tay|%XO@=+0+47uGMTUtGVW zzM;OczNvoc!n%d^3l}b2v~cmlB?}uCHZE*hxO7q7qWVP(>Gz)&FIuvwVNv6vrbSB^ z*DbDJym0ZN#fuj&S=_L=adFe)rAz9T)Gt}MWYLnvOO`BYSkkzpY01)tx`z6Og$;`u z7B?(uXlQ6`Xlhv6Sl3wJxUg|iBjZKY9o9deCn-(@LYFga1q^Y5)v8kzP z=~7~GDOJCeNH3*gODRcAP^LfCM?K4CUbRMYsQcMorD6I>lgW-6Sg^Td22y32k}YH4 z&iV7_-@PHdadQ{!D=b`XJ8YrWK@#uk?vJP9WPti)N+F%RA`bn^5K?AyXF7f<>TE|s z!PLX4BRYrduu+K-S=*`W$?7lY=_A99!3%9Ha^0{S2}O&GLnS4pp|a?>(8NfEJ1I0d zI%U#SHx`;6x?sYE(U~RH?rito$d=Ia;ZKG>7kVl5_0TuQf3x&kp_fD7c27pXANq0R zROr?C>ByPTFT%fc$Irdu>YG=;`}cqU53=ug&-?fP(Ptj{WKnTx!&O(`{;$Ws9+_I% z(0F^>1J8Zp`9EE9a`K~({oe0KCQO_(`O^Bu&8x1v;l`U+cc$<9^x+v77MGNbn>xK= zY4gF4p7@*6#yw9QEH1m^>U%c7`^ib&>vONZwr<0}{c3p4Z6EsZ`~`DslONeP^wfc8 z5B}k2KKJKE**i@)4gd_mR4bFRMT$FICL{KA(a@mX^&sa@36+;Zbh zt!t9EuU&W7+uA$Q_iV}ReES2BA9(KhLr0H);`y%b&%ftw7iXj4NFsbs*j+GxVAsrW zeffov*`*gnFNMoI?X`B-F2bV9haxZ=t7>r4Bu9QavTrlco! z-ThO?PaWS@dcnY>gV%>2UQr%%#S+h3>PV zkA|L$e5~XbpJ%KYQceD+IU`PxfA{E_3U z`xVglw{@l;dg8+$`SRCZnlS0o<||g+(01otZ(HA)e%BKpr6MnU`FlV7(dh}3R^8B< z9(d@FKKJ?DH(x&e&ksKQjswqpKKH_lFMa!OTlW6xUw-LpFWqqS>b5)Hw*Il-d-o?l zeK_}LUwZN7Nz#!tk3_@cirk5@ z=*`75irb1qMbpQ(MplLsWS%A!m5*^S9W=XbR%3p?(#@g(YWxy$0=(;{aeEW2THCCmxr$`ZYsGfx@&k+ zWyyj`iEvGMP5Hn(BfCCu!MND&_eK{)uOLmUEFJj##r@+4zEw3oIxrj^_}=(``AE32 zbnvdJ14l{*z8Wp7ydqpy)Kqd^$@rrFaTkTxMeZmac(C%qvT3C^MFt)(`q+W-(qSF1EnA#wJ$>)cEvvh0ZvDvThHiCU zO07wMeQ1sI?V8)1liyE%;s>d=SAJM?$MI7`cf_4n?>Ox~u+HhBE3*lDLLx-}+?H{5 zu?ja$9T*C^ky-9VGwvMMTw3Z@M%+@GYoeEhuPm8Y>BbwWK%|5^s<ItjsqMe1b1p^8ckV*O|(ng;&55$BKHa^JDy6`5_zH;j*`k0 zhsH5gutb%H0ACndO8jXpGu;+9;u3MU#J$B06^}33;D$=a72gn=LG)a=aiU8#M#s6c zOWk`SZV@pRx*!w@S41Yz*CMyvCE15(hAyIi%R+8(i5nVM>XN#;+d~(-_k|;&Qnx7l zwA+VL0@qWsdvB z8YlcNw>|C@Z4NmRw=5oNrPpIq6Wt3!QTGF(s>u`FOG+*nH$PlQZ3~6wx+_WYq0o40 z*8+DDQ4NKn)b4qq68BYz4UZaCR8-KGi~9rjee|*iYGtH09C4o}`c9}deBHSENY-sA zzm(cp7Op3n#qL$%*-^LTYIl5SaVc4AcYPS5B%!z;al<9_GaS_I1()> z2^C)yd2iThj4Uj1C%V(3ZaGn%#1x~Q?mjAWRfHr^ysg-2A2==R_==Zh?-~%cC9aWV z$4+kyq?@lCwa}g__B;?Y#`k0L3J+(j{3cxRqA*tergP-S&yj!P9Qh~Bp3YeHH<|pm zp1_-NC8TIGW|sNBiSBf1do@N@#dJY<8=X)&r?;`x5 z+<%YYaax$-4-#y)qd9ee$_M3`qkOZ?&H3k|_X6Pu`M2@^mhx53_!*JQ4KF{V^f~4` zWZW=HI(CmQGwC1rd%0r^u$BI5M@hen(v+7R;cMS5rgW7-Fnt51!~R@Ee+Vj9wNHCr zm2~ajib{daag}cu=n<}Nut=w;1}eOYS&t5p=%|20ZTw)#FW+ zO-6yJYdJuFINk6UHx=UP0<~>|DWBD2q(kPT;a@XGy3@g1$Lo0$$;od5J1K0AI+^aR z>$i4qT+d%<-->oZ*Jn0wq)oB)eN>Q>X%DWW$1^s~JbvWsYm*VI>Tj!Qx59fZxWde# znqOj|CsJ;I%cPKU%)-%0jrsg4bH`K?`nwYhn2kbB^QFx8N2F-ebWB zEx00CNWaB`_ge6A3qEDRCASyStF_?07JS@-Pg!tnTOqw=7F@ZuFu%@%Pg$^YM4j|CsK;L{ddahFBkf?F(jlLha# z;DZ)?%z{r@aLL_;_19W(iv_n^@PGyHv*4o^eA0rQw-weOx8P+K+-|`G7QD}bk6Q3a z3vMxXT*vPo3qEbZyE_Z{Jz~KpE%@|3h4}5bC_zE%+iAhOEqLdaLi~dkJYalSrFY1J zOSTo3Kh;wRpRwSI-opG^3tncyYc068zp#9h1-Dx8CJP?0;Jp^S?7l+!e1U_KwEe9+ z3iA(H@F@%4w6hR@uU80H-d_mUS@1y%uFV$W@3!DG7F_!~h4=#&Tryagzjjw4T=`%j zTxY>2EqLcch4@Vm7s5v@xbBg{{E|lt;bj(l(t^)e@V?!J<<~w|2%oXwlaCkXFMCHJ zyvKqo-&vU7V!=HYJn-&9{P-RV{)0kztp&FX73Lqb;62Y2=AW_PWgjifKk{55eDY(3 z@Uo8=!bdE)@(&C1_gL_03$FZBA%2|&AF^QQ(}no$7JS5lPg`)y;llDqjugTNKT`-F z|7;;#^0`8|$$~do@KFnH%@vm4WWjqa_?QLv94#z=$byeq@JS1f|5;)ACJR3D=Y{#F zEV%Ydh52hOc;AbK`Nu7|^2>$!EfzfRmBRc(7JTZfh540#Q3xOU%R>0z*9zfHFBQW3 zEV%A?Vg6nVZvT2={=i=s!j<1BgljGMlm#F9W+DDb3vT*xVScLxZ?fQ>7QDxT_ge5F z3qE4O$1V7T1)sLyGZtKNs*wLm3$C-^CJS!0;I$UqW5K&Ec#j1iwBVx_e9VGRTJR|g zc79UWz7h+LTX3xfFSFnl3vRdIO%~i^!8_gLlij<4n)wen9`aD2V+TSk#ff3=}J9Hy^L_@ID%d|hV2t>ciJ&+my9 z!n-Z_^t8hKlIex;v8qCN*^ENCVrC&+X~BmsD$H-KE`)n(EcnVoc#j3|v*6a{h4{xT zc+a(k`Fq|{2)Ex<2=8kvg!h{L7?t-y3$6|H7rLU(f}1ROnFaS)@PGyHOcnC8$AV8| zL!5&0SJF`kpXn@w;9q;K5G0=rFYEu zcM6}eU_IDEeuVxi0{RO0_{u!}D89a&hl}uaet&vrzcatn!*z0s`Tk3K>-@fMXMP{W zlp(p^HjY!AyZV_r}Zma zb0{BEg_&L)J@?Z1V4yts)bf=ky_jOut>yJxOk-na4Yst^2k6U%^jAuMT3*lXG}d!F e6@%8V{5McpZ99GS!@(ly$QD0EzDR05@&5w8JPSeq literal 0 HcmV?d00001 diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 5070bca08e0907..f35f023e54ae2a 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -2,28 +2,15 @@ use { crate::{ - mock_bank::{deploy_program, MockBankCallback}, - transaction_builder::SanitizedTransactionBuilder, - }, - solana_bpf_loader_program::syscalls::{ - SyscallAbort, SyscallGetClockSysvar, SyscallInvokeSignedRust, SyscallLog, SyscallMemcpy, - SyscallMemset, SyscallSetReturnData, - }, - solana_compute_budget::compute_budget::ComputeBudget, - solana_program_runtime::{ - invoke_context::InvokeContext, - loaded_programs::{ - BlockRelation, ForkGraph, ProgramCache, ProgramCacheEntry, ProgramRuntimeEnvironments, - }, - solana_rbpf::{ - program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, - vm::Config, + mock_bank::{ + create_executable_environment, deploy_program, register_builtins, MockBankCallback, + MockForkGraph, }, + transaction_builder::SanitizedTransactionBuilder, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, - bpf_loader_upgradeable::{self}, - clock::{Clock, Epoch, Slot, UnixTimestamp}, + clock::Clock, hash::Hash, instruction::AccountMeta, pubkey::Pubkey, @@ -40,165 +27,20 @@ use { TransactionProcessingEnvironment, }, }, - std::{ - cmp::Ordering, - collections::{HashMap, HashSet}, - sync::{Arc, RwLock}, - time::{SystemTime, UNIX_EPOCH}, - }, + solana_type_overrides::sync::{Arc, RwLock}, + std::collections::{HashMap, HashSet}, }; // This module contains the implementation of TransactionProcessingCallback mod mock_bank; mod transaction_builder; -const BPF_LOADER_NAME: &str = "solana_bpf_loader_upgradeable_program"; -const SYSTEM_PROGRAM_NAME: &str = "system_program"; const DEPLOYMENT_SLOT: u64 = 0; const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot -const DEPLOYMENT_EPOCH: u64 = 0; const EXECUTION_EPOCH: u64 = 2; // The execution epoch must be greater than the deployment epoch -struct MockForkGraph {} - -impl ForkGraph for MockForkGraph { - fn relationship(&self, a: Slot, b: Slot) -> BlockRelation { - match a.cmp(&b) { - Ordering::Less => BlockRelation::Ancestor, - Ordering::Equal => BlockRelation::Equal, - Ordering::Greater => BlockRelation::Descendant, - } - } - - fn slot_epoch(&self, _slot: Slot) -> Option { - Some(0) - } -} - -fn create_custom_environment<'a>() -> BuiltinProgram> { - let compute_budget = ComputeBudget::default(); - let vm_config = Config { - max_call_depth: compute_budget.max_call_depth, - stack_frame_size: compute_budget.stack_frame_size, - enable_address_translation: true, - enable_stack_frame_gaps: true, - instruction_meter_checkpoint_distance: 10000, - enable_instruction_meter: true, - enable_instruction_tracing: true, - enable_symbol_and_section_labels: true, - reject_broken_elfs: true, - noop_instruction_rate: 256, - sanitize_user_provided_values: true, - external_internal_function_hash_collision: false, - reject_callx_r10: false, - enable_sbpf_v1: true, - enable_sbpf_v2: false, - optimize_rodata: false, - aligned_memory_mapping: true, - }; - - // These functions are system calls the compile contract calls during execution, so they - // need to be registered. - let mut function_registry = FunctionRegistry::>::default(); - function_registry - .register_function_hashed(*b"abort", SyscallAbort::vm) - .expect("Registration failed"); - function_registry - .register_function_hashed(*b"sol_log_", SyscallLog::vm) - .expect("Registration failed"); - function_registry - .register_function_hashed(*b"sol_memcpy_", SyscallMemcpy::vm) - .expect("Registration failed"); - function_registry - .register_function_hashed(*b"sol_memset_", SyscallMemset::vm) - .expect("Registration failed"); - - function_registry - .register_function_hashed(*b"sol_invoke_signed_rust", SyscallInvokeSignedRust::vm) - .expect("Registration failed"); - - function_registry - .register_function_hashed(*b"sol_set_return_data", SyscallSetReturnData::vm) - .expect("Registration failed"); - - function_registry - .register_function_hashed(*b"sol_get_clock_sysvar", SyscallGetClockSysvar::vm) - .expect("Registration failed"); - - BuiltinProgram::new_loader(vm_config, function_registry) -} - -fn create_executable_environment( - fork_graph: Arc>, - mock_bank: &mut MockBankCallback, - program_cache: &mut ProgramCache, -) { - program_cache.environments = ProgramRuntimeEnvironments { - program_runtime_v1: Arc::new(create_custom_environment()), - // We are not using program runtime v2 - program_runtime_v2: Arc::new(BuiltinProgram::new_loader( - Config::default(), - FunctionRegistry::default(), - )), - }; - - program_cache.fork_graph = Some(Arc::downgrade(&fork_graph)); - - // We must fill in the sysvar cache entries - let time_now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backwards") - .as_secs() as i64; - let clock = Clock { - slot: DEPLOYMENT_SLOT, - epoch_start_timestamp: time_now.saturating_sub(10) as UnixTimestamp, - epoch: DEPLOYMENT_EPOCH, - leader_schedule_epoch: DEPLOYMENT_EPOCH, - unix_timestamp: time_now as UnixTimestamp, - }; - - let mut account_data = AccountSharedData::default(); - account_data.set_data(bincode::serialize(&clock).unwrap()); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(Clock::id(), account_data); -} - -fn register_builtins( - mock_bank: &MockBankCallback, - batch_processor: &TransactionBatchProcessor, -) { - // We must register the bpf loader account as a loadable account, otherwise programs - // won't execute. - batch_processor.add_builtin( - mock_bank, - bpf_loader_upgradeable::id(), - BPF_LOADER_NAME, - ProgramCacheEntry::new_builtin( - DEPLOYMENT_SLOT, - BPF_LOADER_NAME.len(), - solana_bpf_loader_program::Entrypoint::vm, - ), - ); - - // In order to perform a transference of native tokens using the system instruction, - // the system program builtin must be registered. - batch_processor.add_builtin( - mock_bank, - solana_system_program::id(), - SYSTEM_PROGRAM_NAME, - ProgramCacheEntry::new_builtin( - DEPLOYMENT_SLOT, - SYSTEM_PROGRAM_NAME.len(), - solana_system_program::system_processor::Entrypoint::vm, - ), - ); -} - fn prepare_transactions( - mock_bank: &mut MockBankCallback, + mock_bank: &MockBankCallback, ) -> (Vec, Vec) { let mut transaction_builder = SanitizedTransactionBuilder::default(); let mut all_transactions = Vec::new(); @@ -392,8 +234,8 @@ fn prepare_transactions( #[test] fn svm_integration() { - let mut mock_bank = MockBankCallback::default(); - let (transactions, check_results) = prepare_transactions(&mut mock_bank); + let mock_bank = MockBankCallback::default(); + let (transactions, check_results) = prepare_transactions(&mock_bank); let batch_processor = TransactionBatchProcessor::::new( EXECUTION_SLOT, EXECUTION_EPOCH, @@ -404,7 +246,7 @@ fn svm_integration() { create_executable_environment( fork_graph.clone(), - &mut mock_bank, + &mock_bank, &mut batch_processor.program_cache.write().unwrap(), ); diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 355f9f0ce8898a..169ac63cf8854b 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -1,15 +1,33 @@ use { - solana_program_runtime::loaded_programs::{BlockRelation, ForkGraph}, + solana_bpf_loader_program::syscalls::{ + SyscallAbort, SyscallGetClockSysvar, SyscallInvokeSignedRust, SyscallLog, SyscallMemcpy, + SyscallMemset, SyscallSetReturnData, + }, + solana_compute_budget::compute_budget::ComputeBudget, + solana_program_runtime::{ + invoke_context::InvokeContext, + loaded_programs::{ + BlockRelation, ForkGraph, ProgramCache, ProgramCacheEntry, ProgramRuntimeEnvironments, + }, + solana_rbpf::{ + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + vm::Config, + }, + }, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, - clock::Epoch, + clock::{Clock, Epoch, UnixTimestamp}, feature_set::FeatureSet, native_loader, pubkey::Pubkey, slot_hashes::Slot, + sysvar::SysvarId, + }, + solana_svm::{ + transaction_processing_callback::TransactionProcessingCallback, + transaction_processor::TransactionBatchProcessor, }, - solana_svm::transaction_processing_callback::TransactionProcessingCallback, solana_type_overrides::sync::{Arc, RwLock}, std::{ cmp::Ordering, @@ -17,6 +35,7 @@ use { env, fs::{self, File}, io::Read, + time::{SystemTime, UNIX_EPOCH}, }, }; @@ -97,11 +116,7 @@ fn load_program(name: String) -> Vec { } #[allow(unused)] -pub fn deploy_program( - name: String, - deployment_slot: Slot, - mock_bank: &mut MockBankCallback, -) -> Pubkey { +pub fn deploy_program(name: String, deployment_slot: Slot, mock_bank: &MockBankCallback) -> Pubkey { let program_account = Pubkey::new_unique(); let program_data_account = Pubkey::new_unique(); let state = UpgradeableLoaderState::Program { @@ -144,3 +159,134 @@ pub fn deploy_program( program_account } + +#[allow(unused)] +pub fn create_executable_environment( + fork_graph: Arc>, + mock_bank: &MockBankCallback, + program_cache: &mut ProgramCache, +) { + const DEPLOYMENT_EPOCH: u64 = 0; + const DEPLOYMENT_SLOT: u64 = 0; + + program_cache.environments = ProgramRuntimeEnvironments { + program_runtime_v1: Arc::new(create_custom_environment()), + // We are not using program runtime v2 + program_runtime_v2: Arc::new(BuiltinProgram::new_loader( + Config::default(), + FunctionRegistry::default(), + )), + }; + + program_cache.fork_graph = Some(Arc::downgrade(&fork_graph)); + + // We must fill in the sysvar cache entries + let time_now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() as i64; + let clock = Clock { + slot: DEPLOYMENT_SLOT, + epoch_start_timestamp: time_now.saturating_sub(10) as UnixTimestamp, + epoch: DEPLOYMENT_EPOCH, + leader_schedule_epoch: DEPLOYMENT_EPOCH, + unix_timestamp: time_now as UnixTimestamp, + }; + + let mut account_data = AccountSharedData::default(); + account_data.set_data(bincode::serialize(&clock).unwrap()); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(Clock::id(), account_data); +} + +#[allow(unused)] +pub fn register_builtins( + mock_bank: &MockBankCallback, + batch_processor: &TransactionBatchProcessor, +) { + const DEPLOYMENT_SLOT: u64 = 0; + // We must register the bpf loader account as a loadable account, otherwise programs + // won't execute. + let bpf_loader_name = "solana_bpf_loader_upgradeable_program"; + batch_processor.add_builtin( + mock_bank, + bpf_loader_upgradeable::id(), + bpf_loader_name, + ProgramCacheEntry::new_builtin( + DEPLOYMENT_SLOT, + bpf_loader_name.len(), + solana_bpf_loader_program::Entrypoint::vm, + ), + ); + + // In order to perform a transference of native tokens using the system instruction, + // the system program builtin must be registered. + let system_program_name = "system_program"; + batch_processor.add_builtin( + mock_bank, + solana_system_program::id(), + system_program_name, + ProgramCacheEntry::new_builtin( + DEPLOYMENT_SLOT, + system_program_name.len(), + solana_system_program::system_processor::Entrypoint::vm, + ), + ); +} + +#[allow(unused)] +fn create_custom_environment<'a>() -> BuiltinProgram> { + let compute_budget = ComputeBudget::default(); + let vm_config = Config { + max_call_depth: compute_budget.max_call_depth, + stack_frame_size: compute_budget.stack_frame_size, + enable_address_translation: true, + enable_stack_frame_gaps: true, + instruction_meter_checkpoint_distance: 10000, + enable_instruction_meter: true, + enable_instruction_tracing: true, + enable_symbol_and_section_labels: true, + reject_broken_elfs: true, + noop_instruction_rate: 256, + sanitize_user_provided_values: true, + external_internal_function_hash_collision: false, + reject_callx_r10: false, + enable_sbpf_v1: true, + enable_sbpf_v2: false, + optimize_rodata: false, + aligned_memory_mapping: true, + }; + + // These functions are system calls the compile contract calls during execution, so they + // need to be registered. + let mut function_registry = FunctionRegistry::>::default(); + function_registry + .register_function_hashed(*b"abort", SyscallAbort::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_log_", SyscallLog::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_memcpy_", SyscallMemcpy::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_memset_", SyscallMemset::vm) + .expect("Registration failed"); + + function_registry + .register_function_hashed(*b"sol_invoke_signed_rust", SyscallInvokeSignedRust::vm) + .expect("Registration failed"); + + function_registry + .register_function_hashed(*b"sol_set_return_data", SyscallSetReturnData::vm) + .expect("Registration failed"); + + function_registry + .register_function_hashed(*b"sol_get_clock_sysvar", SyscallGetClockSysvar::vm) + .expect("Registration failed"); + + BuiltinProgram::new_loader(vm_config, function_registry) +} From 1ff046ce1fd035df0ca7e573990741fa480a4eb0 Mon Sep 17 00:00:00 2001 From: asolana <110843012+ksolana@users.noreply.github.com> Date: Fri, 16 Aug 2024 11:23:18 -0700 Subject: [PATCH 153/529] Adding bench_poh_recorder_record_transaction_index (#2546) Adapted from test_poh_recorder_record_transaction_index test bench_poh_recorder_record_transaction_index ... bench: 27,786,204 ns/iter (+/- 1,134,088) --- poh/benches/poh.rs | 93 ++++++++++++++++++++++++++++++++++++++++- poh/src/poh_recorder.rs | 7 +++- 2 files changed, 97 insertions(+), 3 deletions(-) diff --git a/poh/benches/poh.rs b/poh/benches/poh.rs index 5ea2d3a9babf35..d22b1150390708 100644 --- a/poh/benches/poh.rs +++ b/poh/benches/poh.rs @@ -5,8 +5,20 @@ extern crate test; use { solana_entry::poh::Poh, - solana_poh::poh_service::DEFAULT_HASHES_PER_BATCH, - solana_sdk::hash::Hash, + solana_ledger::{ + blockstore::Blockstore, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + get_tmp_ledger_path_auto_delete, + leader_schedule_cache::LeaderScheduleCache, + }, + solana_perf::test_tx::test_tx, + solana_poh::{poh_recorder::PohRecorder, poh_service::DEFAULT_HASHES_PER_BATCH}, + solana_runtime::bank::Bank, + solana_sdk::{ + hash::{hash, Hash}, + poh_config::PohConfig, + transaction::SanitizedTransaction, + }, std::sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, @@ -65,3 +77,80 @@ fn bench_poh_lock_time_per_batch(bencher: &mut Bencher) { poh.hash(DEFAULT_HASHES_PER_BATCH); }) } + +#[bench] +fn bench_poh_recorder_record_transaction_index(bencher: &mut Bencher) { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = + Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank.last_blockhash(); + + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + Arc::new(blockstore), + &std::sync::Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + let h1 = hash(b"hello Agave, hello Anza!"); + + poh_recorder.set_bank_with_transaction_index_for_test(bank.clone()); + poh_recorder.tick(); + let txs: [SanitizedTransaction; 7] = [ + SanitizedTransaction::from_transaction_for_tests(test_tx()), + SanitizedTransaction::from_transaction_for_tests(test_tx()), + SanitizedTransaction::from_transaction_for_tests(test_tx()), + SanitizedTransaction::from_transaction_for_tests(test_tx()), + SanitizedTransaction::from_transaction_for_tests(test_tx()), + SanitizedTransaction::from_transaction_for_tests(test_tx()), + SanitizedTransaction::from_transaction_for_tests(test_tx()), + ]; + + bencher.iter(|| { + let _record_result = poh_recorder + .record( + bank.slot(), + test::black_box(h1), + test::black_box(&txs) + .iter() + .map(|tx| tx.to_versioned_transaction()) + .collect(), + ) + .unwrap() + .unwrap(); + }); + poh_recorder.tick(); +} + +#[bench] +fn bench_poh_recorder_set_bank(bencher: &mut Bencher) { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = + Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let prev_hash = bank.last_blockhash(); + + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + prev_hash, + bank.clone(), + Some((4, 4)), + bank.ticks_per_slot(), + Arc::new(blockstore), + &std::sync::Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + bencher.iter(|| { + poh_recorder.set_bank_with_transaction_index_for_test(bank.clone()); + poh_recorder.tick(); + poh_recorder.clear_bank_for_test(); + }); +} diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 5c6f5d26c7f4f5..3fe76d5274bcf8 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -734,11 +734,16 @@ impl PohRecorder { self.set_bank(BankWithScheduler::new_without_scheduler(bank), false) } - #[cfg(test)] + #[cfg(feature = "dev-context-only-utils")] pub fn set_bank_with_transaction_index_for_test(&mut self, bank: Arc) { self.set_bank(BankWithScheduler::new_without_scheduler(bank), true) } + #[cfg(feature = "dev-context-only-utils")] + pub fn clear_bank_for_test(&mut self) { + self.clear_bank(); + } + // Flush cache will delay flushing the cache for a bank until it past the WorkingBank::min_tick_height // On a record flush will flush the cache at the WorkingBank::min_tick_height, since a record // occurs after the min_tick_height was generated From 28cbac179ade62a69659067ebb13f71e54672d72 Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 16 Aug 2024 14:46:03 -0400 Subject: [PATCH 154/529] SVM: Hoist transaction error metrics reporting to banking stage (#2633) --- core/src/banking_stage/leader_slot_metrics.rs | 94 ++++++++++++++++++- svm/src/transaction_error_metrics.rs | 94 +------------------ 2 files changed, 94 insertions(+), 94 deletions(-) diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index e305ded2d468e3..ce232a370b2a5f 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -398,6 +398,98 @@ impl LeaderSlotPacketCountMetrics { } } +fn report_transaction_error_metrics(errors: &TransactionErrorMetrics, id: &str, slot: Slot) { + datapoint_info!( + "banking_stage-leader_slot_transaction_errors", + "id" => id, + ("slot", slot as i64, i64), + ("total", errors.total as i64, i64), + ("account_in_use", errors.account_in_use as i64, i64), + ( + "too_many_account_locks", + errors.too_many_account_locks as i64, + i64 + ), + ( + "account_loaded_twice", + errors.account_loaded_twice as i64, + i64 + ), + ("account_not_found", errors.account_not_found as i64, i64), + ("blockhash_not_found", errors.blockhash_not_found as i64, i64), + ("blockhash_too_old", errors.blockhash_too_old as i64, i64), + ("call_chain_too_deep", errors.call_chain_too_deep as i64, i64), + ("already_processed", errors.already_processed as i64, i64), + ("instruction_error", errors.instruction_error as i64, i64), + ("insufficient_funds", errors.insufficient_funds as i64, i64), + ( + "invalid_account_for_fee", + errors.invalid_account_for_fee as i64, + i64 + ), + ( + "invalid_account_index", + errors.invalid_account_index as i64, + i64 + ), + ( + "invalid_program_for_execution", + errors.invalid_program_for_execution as i64, + i64 + ), + ( + "invalid_compute_budget", + errors.invalid_compute_budget as i64, + i64 + ), + ( + "not_allowed_during_cluster_maintenance", + errors.not_allowed_during_cluster_maintenance as i64, + i64 + ), + ( + "invalid_writable_account", + errors.invalid_writable_account as i64, + i64 + ), + ( + "invalid_rent_paying_account", + errors.invalid_rent_paying_account as i64, + i64 + ), + ( + "would_exceed_max_block_cost_limit", + errors.would_exceed_max_block_cost_limit as i64, + i64 + ), + ( + "would_exceed_max_account_cost_limit", + errors.would_exceed_max_account_cost_limit as i64, + i64 + ), + ( + "would_exceed_max_vote_cost_limit", + errors.would_exceed_max_vote_cost_limit as i64, + i64 + ), + ( + "would_exceed_account_data_block_limit", + errors.would_exceed_account_data_block_limit as i64, + i64 + ), + ( + "max_loaded_accounts_data_size_exceeded", + errors.max_loaded_accounts_data_size_exceeded as i64, + i64 + ), + ( + "program_execution_temporarily_restricted", + errors.program_execution_temporarily_restricted as i64, + i64 + ), + ); +} + #[derive(Debug)] pub(crate) struct LeaderSlotMetrics { // banking_stage creates one QosService instance per working threads, that is uniquely @@ -447,7 +539,7 @@ impl LeaderSlotMetrics { self.is_reported = true; self.timing_metrics.report(&self.id, self.slot); - self.transaction_error_metrics.report(&self.id, self.slot); + report_transaction_error_metrics(&self.transaction_error_metrics, &self.id, self.slot); self.packet_count_metrics.report(&self.id, self.slot); self.vote_packet_count_metrics.report(&self.id, self.slot); self.prioritization_fees_metric.report(&self.id, self.slot); diff --git a/svm/src/transaction_error_metrics.rs b/svm/src/transaction_error_metrics.rs index d0f848205d2051..5b3ec2b7e53d1d 100644 --- a/svm/src/transaction_error_metrics.rs +++ b/svm/src/transaction_error_metrics.rs @@ -1,4 +1,4 @@ -use solana_sdk::{clock::Slot, saturating_add_assign}; +use solana_sdk::saturating_add_assign; #[derive(Debug, Default)] pub struct TransactionErrorMetrics { @@ -89,96 +89,4 @@ impl TransactionErrorMetrics { other.program_execution_temporarily_restricted ); } - - pub fn report(&self, id: &str, slot: Slot) { - datapoint_info!( - "banking_stage-leader_slot_transaction_errors", - "id" => id, - ("slot", slot as i64, i64), - ("total", self.total as i64, i64), - ("account_in_use", self.account_in_use as i64, i64), - ( - "too_many_account_locks", - self.too_many_account_locks as i64, - i64 - ), - ( - "account_loaded_twice", - self.account_loaded_twice as i64, - i64 - ), - ("account_not_found", self.account_not_found as i64, i64), - ("blockhash_not_found", self.blockhash_not_found as i64, i64), - ("blockhash_too_old", self.blockhash_too_old as i64, i64), - ("call_chain_too_deep", self.call_chain_too_deep as i64, i64), - ("already_processed", self.already_processed as i64, i64), - ("instruction_error", self.instruction_error as i64, i64), - ("insufficient_funds", self.insufficient_funds as i64, i64), - ( - "invalid_account_for_fee", - self.invalid_account_for_fee as i64, - i64 - ), - ( - "invalid_account_index", - self.invalid_account_index as i64, - i64 - ), - ( - "invalid_program_for_execution", - self.invalid_program_for_execution as i64, - i64 - ), - ( - "invalid_compute_budget", - self.invalid_compute_budget as i64, - i64 - ), - ( - "not_allowed_during_cluster_maintenance", - self.not_allowed_during_cluster_maintenance as i64, - i64 - ), - ( - "invalid_writable_account", - self.invalid_writable_account as i64, - i64 - ), - ( - "invalid_rent_paying_account", - self.invalid_rent_paying_account as i64, - i64 - ), - ( - "would_exceed_max_block_cost_limit", - self.would_exceed_max_block_cost_limit as i64, - i64 - ), - ( - "would_exceed_max_account_cost_limit", - self.would_exceed_max_account_cost_limit as i64, - i64 - ), - ( - "would_exceed_max_vote_cost_limit", - self.would_exceed_max_vote_cost_limit as i64, - i64 - ), - ( - "would_exceed_account_data_block_limit", - self.would_exceed_account_data_block_limit as i64, - i64 - ), - ( - "max_loaded_accounts_data_size_exceeded", - self.max_loaded_accounts_data_size_exceeded as i64, - i64 - ), - ( - "program_execution_temporarily_restricted", - self.program_execution_temporarily_restricted as i64, - i64 - ), - ); - } } From 1871e2c70932d9fb8e31a6fcd4bdda2e05c25432 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 16 Aug 2024 21:11:56 +0000 Subject: [PATCH 155/529] updates ContactInfo.outset when hot-swapping identity (#2613) When hot-swapping identity, ContactInfo.outset should be updated so that the new ContactInfo overrides older node with the same pubkey. --- .../cluster_slots_service/cluster_slots.rs | 25 ++++++++------- gossip/src/cluster_info.rs | 2 +- gossip/src/contact_info.rs | 32 +++++++++++++++---- turbine/src/cluster_nodes.rs | 2 +- 4 files changed, 40 insertions(+), 21 deletions(-) diff --git a/core/src/cluster_slots_service/cluster_slots.rs b/core/src/cluster_slots_service/cluster_slots.rs index c0a8118398d85d..772723ec553e2b 100644 --- a/core/src/cluster_slots_service/cluster_slots.rs +++ b/core/src/cluster_slots_service/cluster_slots.rs @@ -278,8 +278,6 @@ mod tests { #[test] fn test_best_peer_2() { let cs = ClusterSlots::default(); - let mut c1 = ContactInfo::default(); - let mut c2 = ContactInfo::default(); let mut map = HashMap::new(); let k1 = solana_sdk::pubkey::new_rand(); let k2 = solana_sdk::pubkey::new_rand(); @@ -289,16 +287,14 @@ mod tests { .write() .unwrap() .insert(0, Arc::new(RwLock::new(map))); - c1.set_pubkey(k1); - c2.set_pubkey(k2); + let c1 = ContactInfo::new(k1, /*wallclock:*/ 0, /*shred_version:*/ 0); + let c2 = ContactInfo::new(k2, /*wallclock:*/ 0, /*shred_version:*/ 0); assert_eq!(cs.compute_weights(0, &[c1, c2]), vec![u64::MAX / 4, 1]); } #[test] fn test_best_peer_3() { let cs = ClusterSlots::default(); - let mut c1 = ContactInfo::default(); - let mut c2 = ContactInfo::default(); let mut map = HashMap::new(); let k1 = solana_sdk::pubkey::new_rand(); let k2 = solana_sdk::pubkey::new_rand(); @@ -318,18 +314,23 @@ mod tests { .into_iter() .collect(); *cs.validator_stakes.write().unwrap() = Arc::new(validator_stakes); - c1.set_pubkey(k1); - c2.set_pubkey(k2); + let c1 = ContactInfo::new(k1, /*wallclock:*/ 0, /*shred_version:*/ 0); + let c2 = ContactInfo::new(k2, /*wallclock:*/ 0, /*shred_version:*/ 0); assert_eq!(cs.compute_weights(0, &[c1, c2]), vec![u64::MAX / 4 + 1, 1]); } #[test] fn test_best_completed_slot_peer() { let cs = ClusterSlots::default(); - let mut contact_infos = vec![ContactInfo::default(); 2]; - for ci in contact_infos.iter_mut() { - ci.set_pubkey(solana_sdk::pubkey::new_rand()); - } + let contact_infos: Vec<_> = std::iter::repeat_with(|| { + ContactInfo::new( + solana_sdk::pubkey::new_rand(), + 0, // wallclock + 0, // shred_version + ) + }) + .take(2) + .collect(); let slot = 9; // None of these validators have completed slot 9, so should diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index d3b0a9857481cb..07f7486d44260d 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -679,7 +679,7 @@ impl ClusterInfo { *instance = NodeInstance::new(&mut thread_rng(), id, timestamp()); } *self.keypair.write().unwrap() = new_keypair; - self.my_contact_info.write().unwrap().set_pubkey(id); + self.my_contact_info.write().unwrap().hot_swap_pubkey(id); self.refresh_my_gossip_contact_info(); self.push_message(CrdsValue::new_signed( diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index bf9a25241b25a4..09105909b7ab9b 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -182,11 +182,7 @@ impl ContactInfo { Self { pubkey, wallclock, - outset: { - let now = SystemTime::now(); - let elapsed = now.duration_since(UNIX_EPOCH).unwrap(); - u64::try_from(elapsed.as_micros()).unwrap() - }, + outset: get_node_outset(), shred_version, version: solana_version::Version::default(), addrs: Vec::::default(), @@ -216,8 +212,11 @@ impl ContactInfo { &self.version } - pub fn set_pubkey(&mut self, pubkey: Pubkey) { - self.pubkey = pubkey + pub(crate) fn hot_swap_pubkey(&mut self, pubkey: Pubkey) { + self.pubkey = pubkey; + // Need to update ContactInfo.outset so that this node's contact-info + // will override older node with the same pubkey. + self.outset = get_node_outset(); } pub fn set_wallclock(&mut self, wallclock: u64) { @@ -464,6 +463,12 @@ impl ContactInfo { } } +fn get_node_outset() -> u64 { + let now = SystemTime::now(); + let elapsed = now.duration_since(UNIX_EPOCH).unwrap(); + u64::try_from(elapsed.as_micros()).unwrap() +} + impl Default for ContactInfo { fn default() -> Self { Self::new( @@ -649,6 +654,7 @@ mod tests { iter::repeat_with, net::{Ipv4Addr, Ipv6Addr}, ops::Range, + time::Duration, }, }; @@ -1101,9 +1107,21 @@ mod tests { assert!(!other.check_duplicate(&node)); assert_eq!(node.overrides(&other), None); assert_eq!(other.overrides(&node), None); + + // Need to sleep here so that get_node_outset + // returns a larger value. + std::thread::sleep(Duration::from_millis(1)); + + node.hot_swap_pubkey(*other.pubkey()); + assert!(node.outset > other.outset); + assert!(!node.check_duplicate(&other)); + assert!(other.check_duplicate(&node)); + assert_eq!(node.overrides(&other), Some(true)); + assert_eq!(other.overrides(&node), Some(false)); } // Same pubkey, more recent outset timestamp is a duplicate instance. { + std::thread::sleep(Duration::from_millis(1)); let other = ContactInfo::new( node.pubkey, rng.gen(), // wallclock diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index be54df44a601fa..3ee66011e58a9c 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -536,7 +536,7 @@ pub fn make_test_cluster( .collect(); nodes.shuffle(rng); let keypair = Arc::new(Keypair::new()); - nodes[0].set_pubkey(keypair.pubkey()); + nodes[0] = ContactInfo::new_localhost(&keypair.pubkey(), /*wallclock:*/ timestamp()); let this_node = nodes[0].clone(); let mut stakes: HashMap = nodes .iter() From f9316f8115e8f1151b542aee33f54f44af0f5123 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 16 Aug 2024 15:03:00 -0700 Subject: [PATCH 156/529] Add Bank::set_epoch_stakes_for_test() so that we can adjust epoch_stakes in tests easily. (#2535) * Add Bank::set_epoch_stakes_for_test() so that we can adjust epoch_stakes in tests easily. * Do not use set_epoch_stakes_for_test to remove epoch_stakes. * Fix a bad merge. --- runtime/src/bank.rs | 5 +++++ runtime/src/bank/tests.rs | 34 ++++++++++++++++++++++++++++++++-- runtime/src/epoch_stakes.rs | 27 +++++++++++++++++---------- runtime/src/stakes.rs | 2 +- 4 files changed, 55 insertions(+), 13 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d514b172f26df4..0a0bacea0eef1c 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1974,6 +1974,11 @@ impl Bank { } } + #[cfg(feature = "dev-context-only-utils")] + pub fn set_epoch_stakes_for_test(&mut self, epoch: Epoch, stakes: EpochStakes) { + self.epoch_stakes.insert(epoch, stakes); + } + fn update_rent(&self) { self.update_sysvar_account(&sysvar::rent::id(), |account| { create_account( diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 538e14084675fb..36d3ca8d1c7f8b 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -110,7 +110,8 @@ use { solana_vote_program::{ vote_instruction, vote_state::{ - self, BlockTimestamp, Vote, VoteInit, VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY, + self, create_account_with_authorized, BlockTimestamp, Vote, VoteInit, VoteState, + VoteStateVersions, MAX_LOCKOUT_HISTORY, }, }, std::{ @@ -12996,7 +12997,7 @@ fn test_bank_epoch_stakes() { create_genesis_config_with_vote_accounts(1_000_000_000, &voting_keypairs, stakes.clone()); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank1 = Bank::new_from_parent( + let mut bank1 = Bank::new_from_parent( bank0.clone(), &Pubkey::default(), bank0.get_slots_in_epoch(0) + 1, @@ -13023,4 +13024,33 @@ fn test_bank_epoch_stakes() { Some(stakes[i]) ); } + + let new_epoch_stakes = EpochStakes::new_for_tests( + voting_keypairs + .iter() + .map(|keypair| { + let node_id = keypair.node_keypair.pubkey(); + let authorized_voter = keypair.vote_keypair.pubkey(); + let vote_account = VoteAccount::try_from(create_account_with_authorized( + &node_id, + &authorized_voter, + &node_id, + 0, + 100, + )) + .unwrap(); + (authorized_voter, (100_u64, vote_account)) + }) + .collect::>(), + 1, + ); + bank1.set_epoch_stakes_for_test(1, new_epoch_stakes); + assert_eq!(bank1.epoch_total_stake(1), Some(100 * num_of_nodes)); + assert_eq!(bank1.epoch_node_id_to_stake(1, &Pubkey::new_unique()), None); + for keypair in voting_keypairs.iter() { + assert_eq!( + bank1.epoch_node_id_to_stake(1, &keypair.node_keypair.pubkey()), + Some(100) + ); + } } diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 664c2044903d51..84b6bdc40a6345 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -40,6 +40,21 @@ impl EpochStakes { } } + #[cfg(feature = "dev-context-only-utils")] + pub fn new_for_tests( + vote_accounts_hash_map: VoteAccountsHashMap, + leader_schedule_epoch: Epoch, + ) -> Self { + Self::new( + Arc::new(StakesEnum::Accounts(crate::stakes::Stakes::new_for_tests( + 0, + solana_vote::vote_account::VoteAccounts::from(Arc::new(vote_accounts_hash_map)), + im::HashMap::default(), + ))), + leader_schedule_epoch, + ) + } + pub fn stakes(&self) -> &StakesEnum { &self.stakes } @@ -228,10 +243,9 @@ pub(crate) mod tests { stake_account::StakeAccount, stakes::{Stakes, StakesCache}, }, - im::HashMap as ImHashMap, solana_sdk::{account::AccountSharedData, rent::Rent}, solana_stake_program::stake_state::{self, Delegation, Stake}, - solana_vote::vote_account::{VoteAccount, VoteAccounts}, + solana_vote::vote_account::VoteAccount, solana_vote_program::vote_state::{self, create_account_with_authorized}, std::iter, }; @@ -544,14 +558,7 @@ pub(crate) mod tests { let epoch_vote_accounts = new_epoch_vote_accounts(&vote_accounts_map, |node_id| { *node_id_to_stake_map.get(node_id).unwrap() }); - let epoch_stakes = EpochStakes::new( - Arc::new(StakesEnum::Accounts(Stakes::new_for_tests( - 0, - VoteAccounts::from(Arc::new(epoch_vote_accounts)), - ImHashMap::default(), - ))), - 0, - ); + let epoch_stakes = EpochStakes::new_for_tests(epoch_vote_accounts, 0); assert_eq!(epoch_stakes.total_stake(), 11000); for (node_id, stake) in node_id_to_stake_map.iter() { diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index ff8bdddc3da563..d79d8e43492687 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -320,7 +320,7 @@ impl Stakes { }) } - #[cfg(test)] + #[cfg(feature = "dev-context-only-utils")] pub fn new_for_tests( epoch: Epoch, vote_accounts: VoteAccounts, From 2958cc33c18883e4c6613671be7e9b32b7928a82 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Sun, 18 Aug 2024 21:23:48 -0400 Subject: [PATCH 157/529] blockstore: store merkle root duplicate proofs immediately on detection (#2638) --- core/src/window_service.rs | 17 ++--------------- ledger/src/blockstore.rs | 11 +++++++++++ 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 511da236958af3..ff902e414ee017 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -166,21 +166,8 @@ fn run_check_duplicate( ); let (shred1, shred2) = match shred { PossibleDuplicateShred::LastIndexConflict(shred, conflict) - | PossibleDuplicateShred::ErasureConflict(shred, conflict) => (shred, conflict), - PossibleDuplicateShred::MerkleRootConflict(shred, conflict) => { - // Although this proof can be immediately stored on detection, we wait until - // here in order to check the feature flag, as storage in blockstore can - // preclude the detection of other duplicate proofs in this slot - if blockstore.has_duplicate_shreds_in_slot(shred_slot) { - return Ok(()); - } - blockstore.store_duplicate_slot( - shred_slot, - conflict.clone(), - shred.clone().into_payload(), - )?; - (shred, conflict) - } + | PossibleDuplicateShred::ErasureConflict(shred, conflict) + | PossibleDuplicateShred::MerkleRootConflict(shred, conflict) => (shred, conflict), PossibleDuplicateShred::ChainedMerkleRootConflict(shred, conflict) => { if chained_merkle_conflict_duplicate_proofs { // Although this proof can be immediately stored on detection, we wait until diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 55377cb31557eb..b5bca01a8b2078 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1811,6 +1811,17 @@ impl Blockstore { ); return true; }; + if let Err(e) = self.store_duplicate_slot( + slot, + conflicting_shred.clone(), + shred.clone().into_payload(), + ) { + warn!( + "Unable to store conflicting merkle root duplicate proof for {slot} \ + {:?} {e}", + shred.erasure_set(), + ); + } duplicate_shreds.push(PossibleDuplicateShred::MerkleRootConflict( shred.clone(), conflicting_shred, From 9947d4bfac23e23a4a9a384adcac79a996d1c401 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 10:37:44 +0800 Subject: [PATCH 158/529] build(deps): bump serde_json from 1.0.124 to 1.0.125 (#2609) * build(deps): bump serde_json from 1.0.124 to 1.0.125 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.124 to 1.0.125. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.124...1.0.125) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd85ad9bc60a00..706eaec125e99c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5102,9 +5102,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.124" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" +checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" dependencies = [ "itoa", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 939b9aff789987..d0cca55cf3a48a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -332,7 +332,7 @@ seqlock = "0.2.0" serde = "1.0.207" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" serde_derive = "1.0.207" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 -serde_json = "1.0.124" +serde_json = "1.0.125" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 512aff5dc21cb6..c4f3fa2a89478c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4255,9 +4255,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.124" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" +checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" dependencies = [ "itoa", "memchr", From c9030391c16931153546ebe5c0117832a6d7244a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 15:17:37 +0800 Subject: [PATCH 159/529] build(deps): bump libc from 0.2.155 to 0.2.157 (#2652) * build(deps): bump libc from 0.2.155 to 0.2.157 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.155 to 0.2.157. - [Release notes](https://github.com/rust-lang/libc/releases) - [Changelog](https://github.com/rust-lang/libc/blob/0.2.157/CHANGELOG.md) - [Commits](https://github.com/rust-lang/libc/compare/0.2.155...0.2.157) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 706eaec125e99c..05e485c1670fed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3217,9 +3217,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.157" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "374af5f94e54fa97cf75e945cce8a6b201e88a1a07e688b47dfd2a59c66dbd86" [[package]] name = "libloading" diff --git a/Cargo.toml b/Cargo.toml index d0cca55cf3a48a..33ff18714c59e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -273,7 +273,7 @@ jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" lazy-lru = "0.1.3" lazy_static = "1.5.0" -libc = "0.2.155" +libc = "0.2.157" libloading = "0.7.4" libsecp256k1 = { version = "0.6.0", default-features = false, features = [ "std", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c4f3fa2a89478c..bfff11313cd310 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2541,9 +2541,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.157" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "374af5f94e54fa97cf75e945cce8a6b201e88a1a07e688b47dfd2a59c66dbd86" [[package]] name = "libloading" From 4beb1080664dcbe8fd04d9cb4f6ab2ea240f5c3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 18:41:58 +0800 Subject: [PATCH 160/529] build(deps): bump syn from 2.0.74 to 2.0.75 (#2655) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.74 to 2.0.75. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.74...2.0.75) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 68 +++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 05e485c1670fed..3e9635482df3ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -723,7 +723,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -876,7 +876,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1033,7 +1033,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "syn_derive", ] @@ -1165,7 +1165,7 @@ checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1776,7 +1776,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1787,7 +1787,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1849,7 +1849,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1973,7 +1973,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -2079,7 +2079,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -2349,7 +2349,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -3675,7 +3675,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -3748,7 +3748,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -4373,7 +4373,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -5097,7 +5097,7 @@ checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -5152,7 +5152,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -5202,7 +5202,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -6425,7 +6425,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -6850,7 +6850,7 @@ version = "2.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "toml 0.8.12", ] @@ -7489,7 +7489,7 @@ dependencies = [ "bs58", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -8336,7 +8336,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -8348,7 +8348,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.74", + "syn 2.0.75", "thiserror", ] @@ -8407,7 +8407,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -8595,9 +8595,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.74" +version = "2.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" +checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" dependencies = [ "proc-macro2", "quote", @@ -8613,7 +8613,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -8799,7 +8799,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -8811,7 +8811,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "test-case-core", ] @@ -8847,7 +8847,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -8984,7 +8984,7 @@ source = "git+https://github.com/anza-xyz/solana-tokio.git?rev=7cf47705faacf7bf0 dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -9228,7 +9228,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -9538,7 +9538,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "wasm-bindgen-shared", ] @@ -9572,7 +9572,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9931,7 +9931,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -9951,7 +9951,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] From 8c96e9a2a57ce44bce02b38673d70245fc8194c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 19:59:14 +0800 Subject: [PATCH 161/529] build(deps): bump libc from 0.2.157 to 0.2.158 (#2654) * build(deps): bump libc from 0.2.157 to 0.2.158 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.157 to 0.2.158. - [Release notes](https://github.com/rust-lang/libc/releases) - [Changelog](https://github.com/rust-lang/libc/blob/0.2.158/CHANGELOG.md) - [Commits](https://github.com/rust-lang/libc/compare/0.2.157...0.2.158) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e9635482df3ee..53dcf174910935 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3217,9 +3217,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.157" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374af5f94e54fa97cf75e945cce8a6b201e88a1a07e688b47dfd2a59c66dbd86" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libloading" diff --git a/Cargo.toml b/Cargo.toml index 33ff18714c59e6..5248d51ab8d91b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -273,7 +273,7 @@ jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" lazy-lru = "0.1.3" lazy_static = "1.5.0" -libc = "0.2.157" +libc = "0.2.158" libloading = "0.7.4" libsecp256k1 = { version = "0.6.0", default-features = false, features = [ "std", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index bfff11313cd310..1606b6b4643d47 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2541,9 +2541,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.157" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374af5f94e54fa97cf75e945cce8a6b201e88a1a07e688b47dfd2a59c66dbd86" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libloading" From 76cf5772d5eed9d50ff6cbcd71c02ff780bf6faf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 19:59:32 +0800 Subject: [PATCH 162/529] build(deps): bump bytemuck from 1.16.3 to 1.17.0 (#2626) * build(deps): bump bytemuck from 1.16.3 to 1.17.0 Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.16.3 to 1.17.0. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/v1.16.3...v1.17.0) --- updated-dependencies: - dependency-name: bytemuck dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53dcf174910935..27430a5e552747 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1150,9 +1150,9 @@ checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "bytemuck" -version = "1.16.3" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" +checksum = "6fd4c6dcc3b0aea2f5c0b4b82c2b15fe39ddbc76041a310848f4706edf76bb31" dependencies = [ "bytemuck_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 5248d51ab8d91b..5d649eb9e269b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -199,7 +199,7 @@ bs58 = "0.5.1" bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" -bytemuck = "1.16.3" +bytemuck = "1.17.0" bytemuck_derive = "1.7.0" byteorder = "1.5.0" bytes = "1.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1606b6b4643d47..28db5393995769 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -847,9 +847,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.16.3" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" +checksum = "6fd4c6dcc3b0aea2f5c0b4b82c2b15fe39ddbc76041a310848f4706edf76bb31" dependencies = [ "bytemuck_derive", ] From 5b703d04b5889e98f726e352ce8cb9e47c05f86f Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 19 Aug 2024 08:09:51 -0500 Subject: [PATCH 163/529] AddressLoader: allow for non-owned MessageAddressTableLookup (#2592) --- accounts-db/src/accounts.rs | 24 +++++++++++++----------- runtime/src/bank/address_lookup_table.rs | 16 +++++++++++++++- transaction-view/src/lib.rs | 2 +- 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 9033ceea6e6da5..eb416c29e2f4e9 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -15,13 +15,15 @@ use { account::{AccountSharedData, ReadableAccount}, address_lookup_table::{self, error::AddressLookupError, state::AddressLookupTable}, clock::{BankId, Slot}, - message::v0::{LoadedAddresses, MessageAddressTableLookup}, + message::v0::LoadedAddresses, pubkey::Pubkey, slot_hashes::SlotHashes, transaction::{Result, SanitizedTransaction}, transaction_context::TransactionAccount, }, - solana_svm_transaction::svm_message::SVMMessage, + solana_svm_transaction::{ + message_address_table_lookup::SVMMessageAddressTableLookup, svm_message::SVMMessage, + }, std::{ cmp::Reverse, collections::{BinaryHeap, HashSet}, @@ -82,12 +84,12 @@ impl Accounts { pub fn load_lookup_table_addresses( &self, ancestors: &Ancestors, - address_table_lookup: &MessageAddressTableLookup, + address_table_lookup: SVMMessageAddressTableLookup, slot_hashes: &SlotHashes, ) -> std::result::Result { let table_account = self .accounts_db - .load_with_fixed_root(ancestors, &address_table_lookup.account_key) + .load_with_fixed_root(ancestors, address_table_lookup.account_key) .map(|(account, _rent)| account) .ok_or(AddressLookupError::LookupTableAccountNotFound)?; @@ -99,12 +101,12 @@ impl Accounts { Ok(LoadedAddresses { writable: lookup_table.lookup( current_slot, - &address_table_lookup.writable_indexes, + address_table_lookup.writable_indexes, slot_hashes, )?, readonly: lookup_table.lookup( current_slot, - &address_table_lookup.readonly_indexes, + address_table_lookup.readonly_indexes, slot_hashes, )?, }) @@ -611,7 +613,7 @@ mod tests { address_lookup_table::state::LookupTableMeta, hash::Hash, instruction::CompiledInstruction, - message::{Message, MessageHeader}, + message::{v0::MessageAddressTableLookup, Message, MessageHeader}, native_loader, signature::{signers::Signers, Keypair, Signer}, transaction::{Transaction, TransactionError, MAX_TX_ACCOUNT_LOCKS}, @@ -708,7 +710,7 @@ mod tests { assert_eq!( accounts.load_lookup_table_addresses( &ancestors, - &address_table_lookup, + SVMMessageAddressTableLookup::from(&address_table_lookup), &SlotHashes::default(), ), Err(AddressLookupError::LookupTableAccountNotFound), @@ -735,7 +737,7 @@ mod tests { assert_eq!( accounts.load_lookup_table_addresses( &ancestors, - &address_table_lookup, + SVMMessageAddressTableLookup::from(&address_table_lookup), &SlotHashes::default(), ), Err(AddressLookupError::InvalidAccountOwner), @@ -762,7 +764,7 @@ mod tests { assert_eq!( accounts.load_lookup_table_addresses( &ancestors, - &address_table_lookup, + SVMMessageAddressTableLookup::from(&address_table_lookup), &SlotHashes::default(), ), Err(AddressLookupError::InvalidAccountData), @@ -801,7 +803,7 @@ mod tests { assert_eq!( accounts.load_lookup_table_addresses( &ancestors, - &address_table_lookup, + SVMMessageAddressTableLookup::from(&address_table_lookup), &SlotHashes::default(), ), Ok(LoadedAddresses { diff --git a/runtime/src/bank/address_lookup_table.rs b/runtime/src/bank/address_lookup_table.rs index 344f1e8bdf09aa..4fa4e2bc0f570a 100644 --- a/runtime/src/bank/address_lookup_table.rs +++ b/runtime/src/bank/address_lookup_table.rs @@ -8,6 +8,7 @@ use { }, transaction::AddressLoader, }, + solana_svm_transaction::message_address_table_lookup::SVMMessageAddressTableLookup, }; fn into_address_loader_error(err: AddressLookupError) -> AddressLoaderError { @@ -25,6 +26,20 @@ impl AddressLoader for &Bank { fn load_addresses( self, address_table_lookups: &[MessageAddressTableLookup], + ) -> Result { + self.load_addresses_from_ref( + address_table_lookups + .iter() + .map(SVMMessageAddressTableLookup::from), + ) + } +} + +impl Bank { + /// Load addresses from an iterator of `SVMMessageAddressTableLookup`. + pub fn load_addresses_from_ref<'a>( + &self, + address_table_lookups: impl Iterator>, ) -> Result { let slot_hashes = self .transaction_processor @@ -33,7 +48,6 @@ impl AddressLoader for &Bank { .map_err(|_| AddressLoaderError::SlotHashesSysvarNotFound)?; address_table_lookups - .iter() .map(|address_table_lookup| { self.rc .accounts diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index 40cea3da25393c..baa3b91b84ef15 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -5,7 +5,7 @@ pub mod bytes; mod bytes; #[allow(dead_code)] -mod address_table_lookup_meta; +pub mod address_table_lookup_meta; #[allow(dead_code)] mod instructions_meta; #[allow(dead_code)] From 2a5a80313ca6d2a29cb67c38fde1010724e2adac Mon Sep 17 00:00:00 2001 From: ripatel-fd Date: Mon, 19 Aug 2024 18:23:46 +0200 Subject: [PATCH 164/529] Fix Turbine metric task names (#2657) Turbine server and client metric tasks incorrectly reported as repair due to a copy paste error. Co-authored-by: Richard Patel --- turbine/src/quic_endpoint.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index c137e1c0a6ef60..d16bb5c188b831 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -200,7 +200,7 @@ async fn run_server( ) { let stats = Arc::::default(); let report_metrics_task = - tokio::task::spawn(report_metrics_task("repair_quic_server", stats.clone())); + tokio::task::spawn(report_metrics_task("turbine_quic_server", stats.clone())); while let Some(connecting) = endpoint.accept().await { tokio::task::spawn(handle_connecting_task( endpoint.clone(), @@ -227,7 +227,7 @@ async fn run_client( ) { let stats = Arc::::default(); let report_metrics_task = - tokio::task::spawn(report_metrics_task("repair_quic_client", stats.clone())); + tokio::task::spawn(report_metrics_task("turbine_quic_client", stats.clone())); while let Some((remote_address, bytes)) = receiver.recv().await { let Some(bytes) = try_route_bytes(&remote_address, bytes, &*router.read().await, &stats) else { From b3261eba0cf8469686ebbd3162435c87e47e5bff Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Mon, 19 Aug 2024 13:14:22 -0400 Subject: [PATCH 165/529] refactor: test public function instead of private helper. (#2643) refactor: test public facing function --- .../src/compute_budget_instruction_details.rs | 311 ++++++------------ 1 file changed, 95 insertions(+), 216 deletions(-) diff --git a/runtime-transaction/src/compute_budget_instruction_details.rs b/runtime-transaction/src/compute_budget_instruction_details.rs index 993c3905f6c101..96c97ab0483ea3 100644 --- a/runtime-transaction/src/compute_budget_instruction_details.rs +++ b/runtime-transaction/src/compute_budget_instruction_details.rs @@ -148,256 +148,135 @@ impl ComputeBudgetInstructionDetails { mod test { use { super::*, - solana_sdk::instruction::{CompiledInstruction, Instruction}, + solana_sdk::{ + instruction::Instruction, + message::Message, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + transaction::{SanitizedTransaction, Transaction}, + }, + solana_svm_transaction::svm_message::SVMMessage, }; - fn setup_test_instruction( - index: u8, - instruction: Instruction, - ) -> (Pubkey, CompiledInstruction) { - ( - instruction.program_id, - CompiledInstruction { - program_id_index: index, - data: instruction.data.clone(), - accounts: vec![], - }, - ) + fn build_sanitized_transaction(instructions: &[Instruction]) -> SanitizedTransaction { + let payer_keypair = Keypair::new(); + SanitizedTransaction::from_transaction_for_tests(Transaction::new_unsigned(Message::new( + instructions, + Some(&payer_keypair.pubkey()), + ))) } #[test] - fn test_process_instruction_request_heap() { - let mut index = 0; - let mut expected_details = ComputeBudgetInstructionDetails::default(); - let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); - - // irrelevant instruction makes no change - index += 1; - let (program_id, ix) = setup_test_instruction( - index, - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - expected_details.num_non_compute_budget_instructions = 1; - assert_eq!(compute_budget_instruction_details, expected_details); - - // valid instruction - index += 1; - let (program_id, ix) = setup_test_instruction( - index, + fn test_try_from_request_heap() { + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), ComputeBudgetInstruction::request_heap_frame(40 * 1024), - ); - expected_details.requested_heap_size = Some((index, 40 * 1024)); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - assert_eq!(compute_budget_instruction_details, expected_details); - - // duplicate instruction results error - index += 1; - let expected_err = Err(TransactionError::DuplicateInstruction(index)); - let (program_id, ix) = setup_test_instruction( - index, - ComputeBudgetInstruction::request_heap_frame(50 * 1024), - ); + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + ]); + let expected_details = ComputeBudgetInstructionDetails { + requested_heap_size: Some((1, 40 * 1024)), + num_non_compute_budget_instructions: 2, + ..ComputeBudgetInstructionDetails::default() + }; assert_eq!( - compute_budget_instruction_details.process_instruction( - index, - &program_id, - &SVMInstruction::from(&ix) - ), - expected_err + ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)), + Ok(expected_details) ); - assert_eq!(compute_budget_instruction_details, expected_details); - // irrelevant instruction makes no change - index += 1; - let (program_id, ix) = setup_test_instruction( - index, - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + ComputeBudgetInstruction::request_heap_frame(40 * 1024), + ComputeBudgetInstruction::request_heap_frame(41 * 1024), + ]); + assert_eq!( + ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)), + Err(TransactionError::DuplicateInstruction(2)) ); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - expected_details.num_non_compute_budget_instructions += 1; - assert_eq!(compute_budget_instruction_details, expected_details); } #[test] - fn test_process_instruction_compute_unit_limit() { - let mut index = 0; - let mut expected_details = ComputeBudgetInstructionDetails::default(); - let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); - - // irrelevant instruction makes no change - let (program_id, ix) = setup_test_instruction( - index, - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - expected_details.num_non_compute_budget_instructions = 1; - assert_eq!(compute_budget_instruction_details, expected_details); - - // valid instruction, - index += 1; - let (program_id, ix) = setup_test_instruction( - index, + fn test_try_from_compute_unit_limit() { + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), ComputeBudgetInstruction::set_compute_unit_limit(u32::MAX), - ); - expected_details.requested_compute_unit_limit = Some((index, u32::MAX)); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - assert_eq!(compute_budget_instruction_details, expected_details); - - // duplicate instruction results error - index += 1; - let expected_err = Err(TransactionError::DuplicateInstruction(index)); - let (program_id, ix) = setup_test_instruction( - index, - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ); + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + ]); + let expected_details = ComputeBudgetInstructionDetails { + requested_compute_unit_limit: Some((1, u32::MAX)), + num_non_compute_budget_instructions: 2, + ..ComputeBudgetInstructionDetails::default() + }; assert_eq!( - compute_budget_instruction_details.process_instruction( - index, - &program_id, - &SVMInstruction::from(&ix) - ), - expected_err + ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)), + Ok(expected_details) ); - assert_eq!(compute_budget_instruction_details, expected_details); - // irrelevant instruction makes no change - index += 1; - let (program_id, ix) = setup_test_instruction( - index, - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + ComputeBudgetInstruction::set_compute_unit_limit(0), + ComputeBudgetInstruction::set_compute_unit_limit(u32::MAX), + ]); + assert_eq!( + ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)), + Err(TransactionError::DuplicateInstruction(2)) ); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - expected_details.num_non_compute_budget_instructions += 1; - assert_eq!(compute_budget_instruction_details, expected_details); } #[test] - fn test_process_instruction_compute_unit_price() { - let mut index = 0; - let mut expected_details = ComputeBudgetInstructionDetails::default(); - let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); - - // irrelevant instruction makes no change - let (program_id, ix) = setup_test_instruction( - index, - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - expected_details.num_non_compute_budget_instructions = 1; - assert_eq!(compute_budget_instruction_details, expected_details); - - // valid instruction, - index += 1; - let (program_id, ix) = setup_test_instruction( - index, + fn test_try_from_compute_unit_price() { + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ); - expected_details.requested_compute_unit_price = Some((index, u64::MAX)); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - assert_eq!(compute_budget_instruction_details, expected_details); - - // duplicate instruction results error - index += 1; - let expected_err = Err(TransactionError::DuplicateInstruction(index)); - let (program_id, ix) = - setup_test_instruction(index, ComputeBudgetInstruction::set_compute_unit_price(0)); + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + ]); + let expected_details = ComputeBudgetInstructionDetails { + requested_compute_unit_price: Some((1, u64::MAX)), + num_non_compute_budget_instructions: 2, + ..ComputeBudgetInstructionDetails::default() + }; assert_eq!( - compute_budget_instruction_details.process_instruction( - index, - &program_id, - &SVMInstruction::from(&ix) - ), - expected_err + ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)), + Ok(expected_details) ); - assert_eq!(compute_budget_instruction_details, expected_details); - // irrelevant instruction makes no change - index += 1; - let (program_id, ix) = setup_test_instruction( - index, - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + ComputeBudgetInstruction::set_compute_unit_price(0), + ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), + ]); + assert_eq!( + ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)), + Err(TransactionError::DuplicateInstruction(2)) ); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - expected_details.num_non_compute_budget_instructions += 1; - assert_eq!(compute_budget_instruction_details, expected_details); } #[test] - fn test_process_instruction_loaded_accounts_data_size_limit() { - let mut index = 0; - let mut expected_details = ComputeBudgetInstructionDetails::default(); - let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); - - // irrelevant instruction makes no change - let (program_id, ix) = setup_test_instruction( - index, - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - expected_details.num_non_compute_budget_instructions = 1; - assert_eq!(compute_budget_instruction_details, expected_details); - - // valid instruction, - index += 1; - let (program_id, ix) = setup_test_instruction( - index, + fn test_try_from_loaded_accounts_data_size_limit() { + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(u32::MAX), - ); - expected_details.requested_loaded_accounts_data_size_limit = Some((index, u32::MAX)); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - assert_eq!(compute_budget_instruction_details, expected_details); - - // duplicate instruction results error - index += 1; - let expected_err = Err(TransactionError::DuplicateInstruction(index)); - let (program_id, ix) = setup_test_instruction( - index, - ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(0), - ); + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + ]); + let expected_details = ComputeBudgetInstructionDetails { + requested_loaded_accounts_data_size_limit: Some((1, u32::MAX)), + num_non_compute_budget_instructions: 2, + ..ComputeBudgetInstructionDetails::default() + }; assert_eq!( - compute_budget_instruction_details.process_instruction( - index, - &program_id, - &SVMInstruction::from(&ix) - ), - expected_err + ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)), + Ok(expected_details) ); - assert_eq!(compute_budget_instruction_details, expected_details); - // irrelevant instruction makes no change - index += 1; - let (program_id, ix) = setup_test_instruction( - index, - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(0), + ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(u32::MAX), + ]); + assert_eq!( + ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)), + Err(TransactionError::DuplicateInstruction(2)) ); - assert!(compute_budget_instruction_details - .process_instruction(index, &program_id, &SVMInstruction::from(&ix)) - .is_ok()); - expected_details.num_non_compute_budget_instructions += 1; - assert_eq!(compute_budget_instruction_details, expected_details); } #[test] From ba3c7895d9fea93dd48c247707dc4a186679c091 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2024 17:37:05 +0000 Subject: [PATCH 166/529] build(deps): bump serde from 1.0.207 to 1.0.208 (#2625) * build(deps): bump serde from 1.0.207 to 1.0.208 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.207 to 1.0.208. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.207...v1.0.208) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files * sync version --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: yihau --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 27430a5e552747..cef70349fec932 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5073,9 +5073,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.207" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] @@ -5091,9 +5091,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.207" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 5d649eb9e269b8..f069a02e85e47e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -329,9 +329,9 @@ rustls = { version = "0.21.12", default-features = false, features = ["quic"] } scopeguard = "1.2.0" semver = "1.0.23" seqlock = "0.2.0" -serde = "1.0.207" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde = "1.0.208" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" -serde_derive = "1.0.207" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_derive = "1.0.208" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.125" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 28db5393995769..133ce53e57b91c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4226,9 +4226,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.207" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] @@ -4244,9 +4244,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.207" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2", "quote", From 59ecab5cfad49d89f181b048190ebd4169011490 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 20 Aug 2024 01:44:14 +0800 Subject: [PATCH 167/529] ci: use actions/upload-artifact@v4 (#2622) --- .github/workflows/release-artifacts.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index de32cee71dfc97..a49e56b2a39668 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -65,7 +65,7 @@ jobs: - name: Upload Artifacts if: ${{ steps.build.outputs.channel != '' || steps.build.outputs.tag != '' }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: windows-artifact path: windows-release/ From 094a63476c30a5c1f8eedfbe8d70b3bcef5703c4 Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 19 Aug 2024 14:35:12 -0500 Subject: [PATCH 168/529] Bound default value for thread pool args (#2599) The thread pool argument trait declares min/max/default functions. These functions are then called to provide a default as well as validation that any user set value on the CLI is within [min(), max()]. Some of the default values are fixed numbers. On a machine with few enough cores, the default could exceed the max. This would raise an error when the argument is parsed. This can be worked around by the user specifying a lower value; however, these flags are still very much experimental and intentionally hidden. So, make the default value that is passed to CLAP the min of default() and max(). This will adjust the default on low core count machines while leaving settings on sufficient machines untouched. --- validator/src/cli/thread_args.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs index 42115d25ee3b83..1841da54a1e028 100644 --- a/validator/src/cli/thread_args.rs +++ b/validator/src/cli/thread_args.rs @@ -18,10 +18,11 @@ pub struct DefaultThreadArgs { impl Default for DefaultThreadArgs { fn default() -> Self { Self { - ip_echo_server_threads: IpEchoServerThreadsArg::default().to_string(), - replay_forks_threads: ReplayForksThreadsArg::default().to_string(), - replay_transactions_threads: ReplayTransactionsThreadsArg::default().to_string(), - tvu_receive_threads: TvuReceiveThreadsArg::default().to_string(), + ip_echo_server_threads: IpEchoServerThreadsArg::bounded_default().to_string(), + replay_forks_threads: ReplayForksThreadsArg::bounded_default().to_string(), + replay_transactions_threads: ReplayTransactionsThreadsArg::bounded_default() + .to_string(), + tvu_receive_threads: TvuReceiveThreadsArg::bounded_default().to_string(), } } } @@ -85,6 +86,12 @@ trait ThreadArg { /// The default number of threads fn default() -> usize; + /// The default number of threads, bounded by Self::max() + /// This prevents potential CLAP issues on low core count machines where + /// a fixed value in Self::default() could be greater than Self::max() + fn bounded_default() -> usize { + std::cmp::min(Self::default(), Self::max()) + } /// The minimum allowed number of threads (inclusive) fn min() -> usize { 1 From 8f675ebcfecb36b79e38ffbf8982bc2bb315b665 Mon Sep 17 00:00:00 2001 From: Joe C Date: Mon, 19 Aug 2024 16:17:52 -0400 Subject: [PATCH 169/529] Runtime: Core BPF: Add test for CPI post-migration (#2531) --- runtime/src/bank/builtin_programs.rs | 64 +++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/runtime/src/bank/builtin_programs.rs b/runtime/src/bank/builtin_programs.rs index 1944c9ea0bac62..7c12bb23fbd6b0 100644 --- a/runtime/src/bank/builtin_programs.rs +++ b/runtime/src/bank/builtin_programs.rs @@ -75,13 +75,14 @@ mod tests_core_bpf_migration { tests::{create_genesis_config, new_bank_from_parent_with_bank_forks}, Bank, }, + solana_program_runtime::loaded_programs::ProgramCacheEntry, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable::{self, get_program_data_address, UpgradeableLoaderState}, epoch_schedule::EpochSchedule, feature::{self, Feature}, feature_set::FeatureSet, - instruction::Instruction, + instruction::{AccountMeta, Instruction}, message::Message, native_loader, native_token::LAMPORTS_PER_SOL, @@ -93,6 +94,27 @@ mod tests_core_bpf_migration { test_case::test_case, }; + // CPI mockup to test CPI to newly migrated programs. + mod cpi_mockup { + use { + solana_program_runtime::declare_process_instruction, + solana_sdk::instruction::Instruction, + }; + + declare_process_instruction!(Entrypoint, 0, |invoke_context| { + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + + let target_program_id = transaction_context.get_key_of_account_at_index( + instruction_context.get_index_of_instruction_account_in_transaction(0)?, + )?; + + let instruction = Instruction::new_with_bytes(*target_program_id, &[], Vec::new()); + + invoke_context.native_invoke(instruction.into(), &[]) + }); + } + fn test_elf() -> Vec { let mut elf = Vec::new(); File::open("../programs/bpf_loader/test_elfs/out/noop_aligned.so") @@ -144,6 +166,16 @@ mod tests_core_bpf_migration { let mut root_bank = Bank::new_for_tests(&genesis_config); + // Set up the CPI mockup to test CPI'ing to the migrated program. + let cpi_program_id = Pubkey::new_unique(); + let cpi_program_name = "mock_cpi_program"; + root_bank.transaction_processor.add_builtin( + &root_bank, + cpi_program_id, + cpi_program_name, + ProgramCacheEntry::new_builtin(0, cpi_program_name.len(), cpi_mockup::Entrypoint::vm), + ); + let (builtin_id, config) = prototype.deconstruct(); let feature_id = &config.feature_id; let source_buffer_address = &config.source_buffer_address; @@ -219,6 +251,21 @@ mod tests_core_bpf_migration { )) .unwrap(); + // Successfully invoke the new BPF builtin program via CPI. + bank.process_transaction(&Transaction::new( + &vec![&mint_keypair], + Message::new( + &[Instruction::new_with_bytes( + cpi_program_id, + &[], + vec![AccountMeta::new_readonly(*builtin_id, false)], + )], + Some(&mint_keypair.pubkey()), + ), + bank.last_blockhash(), + )) + .unwrap(); + // Simulate crossing another epoch boundary for a new bank. goto_end_of_slot(bank.clone()); first_slot_in_next_epoch += slots_per_epoch; @@ -243,6 +290,21 @@ mod tests_core_bpf_migration { bank.last_blockhash(), )) .unwrap(); + + // Again, successfully invoke the new BPF builtin program via CPI. + bank.process_transaction(&Transaction::new( + &vec![&mint_keypair], + Message::new( + &[Instruction::new_with_bytes( + cpi_program_id, + &[], + vec![AccountMeta::new_readonly(*builtin_id, false)], + )], + Some(&mint_keypair.pubkey()), + ), + bank.last_blockhash(), + )) + .unwrap(); } // Simulate a failure to migrate the program. From 7ae496a7777897ec55e8d7490383db2c13bb07dc Mon Sep 17 00:00:00 2001 From: steviez Date: Mon, 19 Aug 2024 17:00:07 -0500 Subject: [PATCH 170/529] Refactor Bank EAH fetch method (#2477) Shift the check for whether *this* bank should include the EAH inside the fetch function; will avoid code duplication later --- runtime/src/bank.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0a0bacea0eef1c..f54e5f46793c64 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5215,11 +5215,10 @@ impl Bank { self.last_blockhash().as_ref(), ]); - let epoch_accounts_hash = self.should_include_epoch_accounts_hash().then(|| { - let epoch_accounts_hash = self.wait_get_epoch_accounts_hash(); + let epoch_accounts_hash = self.wait_get_epoch_accounts_hash(); + if let Some(epoch_accounts_hash) = epoch_accounts_hash { hash = hashv(&[hash.as_ref(), epoch_accounts_hash.as_ref().as_ref()]); - epoch_accounts_hash - }); + }; let buf = self .hard_forks @@ -5264,9 +5263,13 @@ impl Bank { self.parent_slot() < stop_slot && self.slot() >= stop_slot } - /// If the epoch accounts hash should be included in this Bank, then fetch it. If the EAH + /// If the epoch accounts hash should be included in this Bank, then fetch it. If the EAH /// calculation has not completed yet, this fn will block until it does complete. - fn wait_get_epoch_accounts_hash(&self) -> EpochAccountsHash { + fn wait_get_epoch_accounts_hash(&self) -> Option { + if !self.should_include_epoch_accounts_hash() { + return None; + } + let (epoch_accounts_hash, waiting_time_us) = measure_us!(self .rc .accounts @@ -5279,7 +5282,7 @@ impl Bank { ("slot", self.slot(), i64), ("waiting-time-us", waiting_time_us, i64), ); - epoch_accounts_hash + Some(epoch_accounts_hash) } /// Used by ledger tool to run a final hash calculation once all ledger replay has completed. From 9c6f4b7b492c32808a0a3633a3b1941ce9c82a9a Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Tue, 20 Aug 2024 09:28:26 +0900 Subject: [PATCH 171/529] Switch block verification to the unified scheduler (#2653) --- CHANGELOG.md | 1 + Cargo.lock | 1 + core/src/validator.rs | 8 +++++--- local-cluster/Cargo.toml | 1 + local-cluster/tests/local_cluster.rs | 18 +++++++++--------- 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 66aadb9b702c2c..8c809f2b78b115 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ Release channels have their own copy of this changelog: * `agave-validator`: Update PoH speed check to compare against current hash rate from a Bank (#2447) * `solana-test-validator`: Add `--clone-feature-set` flag to mimic features from a target cluster (#2480) * `solana-genesis`: the `--cluster-type` parameter now clones the feature set from the target cluster (#2587) + * `unified-scheduler` as default option for `--block-verification-method` (#2653) ## [2.0.0] * Breaking diff --git a/Cargo.lock b/Cargo.lock index cef70349fec932..496c5633b958a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6707,6 +6707,7 @@ dependencies = [ "solana-vote", "solana-vote-program", "static_assertions", + "strum", "tempfile", "trees", ] diff --git a/core/src/validator.rs b/core/src/validator.rs index 373eebf4ae1be2..016514dd817166 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -138,7 +138,7 @@ use { time::{Duration, Instant}, }, strum::VariantNames, - strum_macros::{Display, EnumString, EnumVariantNames, IntoStaticStr}, + strum_macros::{Display, EnumCount, EnumIter, EnumString, EnumVariantNames, IntoStaticStr}, thiserror::Error, tokio::runtime::Runtime as TokioRuntime, }; @@ -151,11 +151,13 @@ const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80; const WAIT_FOR_WEN_RESTART_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT; -#[derive(Clone, EnumString, EnumVariantNames, Default, IntoStaticStr, Display)] +#[derive( + Clone, EnumCount, EnumIter, EnumString, EnumVariantNames, Default, IntoStaticStr, Display, +)] #[strum(serialize_all = "kebab-case")] pub enum BlockVerificationMethod { - #[default] BlockstoreProcessor, + #[default] UnifiedScheduler, } diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 3e4cbc0e366531..cd8e2bf6523152 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -37,6 +37,7 @@ solana-turbine = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } static_assertions = { workspace = true } +strum = { workspace = true, features = ["derive"] } tempfile = { workspace = true } trees = { workspace = true } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 1e62835f91b1a2..a4c767e22ede52 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -5,7 +5,7 @@ use { gag::BufferRedirect, itertools::Itertools, log::*, - rand::seq::IteratorRandom, + rand::seq::SliceRandom, serial_test::serial, solana_accounts_db::{ hardened_unpack::open_genesis_config, utils::create_accounts_run_and_snapshot_dirs, @@ -95,6 +95,7 @@ use { thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, }, + strum::{EnumCount, IntoEnumIterator}, }; #[test] @@ -5710,20 +5711,19 @@ fn test_randomly_mixed_block_verification_methods_between_bootstrap_and_not() { info", ); - let num_nodes = 2; + let num_nodes = BlockVerificationMethod::COUNT; let mut config = ClusterConfig::new_with_equal_stakes( num_nodes, DEFAULT_CLUSTER_LAMPORTS, DEFAULT_NODE_STAKE, ); - // Randomly switch to use unified scheduler - config - .validator_configs - .iter_mut() - .choose(&mut rand::thread_rng()) - .unwrap() - .block_verification_method = BlockVerificationMethod::UnifiedScheduler; + // Overwrite block_verification_method with shuffled variants + let mut methods = BlockVerificationMethod::iter().collect::>(); + methods.shuffle(&mut rand::thread_rng()); + for (validator_config, method) in config.validator_configs.iter_mut().zip_eq(methods) { + validator_config.block_verification_method = method; + } let local = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); cluster_tests::spend_and_verify_all_nodes( From e4b3b79a6c4ea4c3d6289b6af7b3b7e168534bf6 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 20 Aug 2024 12:45:20 +0800 Subject: [PATCH 172/529] ci: use actions/download-artifact@v4 (#2667) --- .github/workflows/release-artifacts.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index a49e56b2a39668..4dca7118c11348 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -76,7 +76,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Download - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: windows-artifact path: ./windows-release @@ -96,7 +96,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Download - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: windows-artifact path: ./windows-release/ From 925a603f0a253d6dc98e613f2b44193ecddc762c Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 20 Aug 2024 08:48:28 -0500 Subject: [PATCH 173/529] transaction_status_service: remove get_account_locks_unchecked (#2555) --- ledger/src/blockstore.rs | 56 +++++++++++------------ ledger/src/blockstore/blockstore_purge.rs | 21 ++++++--- rpc/src/transaction_status_service.rs | 11 +++-- 3 files changed, 50 insertions(+), 38 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index b5bca01a8b2078..2101896d9a0558 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2879,12 +2879,11 @@ impl Blockstore { } } - pub fn write_transaction_status( + pub fn write_transaction_status<'a>( &self, slot: Slot, signature: Signature, - writable_keys: Vec<&Pubkey>, - readonly_keys: Vec<&Pubkey>, + keys_with_writable: impl Iterator, status: TransactionStatusMeta, transaction_index: usize, ) -> Result<()> { @@ -2893,18 +2892,14 @@ impl Blockstore { .map_err(|_| BlockstoreError::TransactionIndexOverflow)?; self.transaction_status_cf .put_protobuf((signature, slot), &status)?; - for address in writable_keys { - self.address_signatures_cf.put( - (*address, slot, transaction_index, signature), - &AddressSignatureMeta { writeable: true }, - )?; - } - for address in readonly_keys { + + for (address, writeable) in keys_with_writable { self.address_signatures_cf.put( (*address, slot, transaction_index, signature), - &AddressSignatureMeta { writeable: false }, + &AddressSignatureMeta { writeable }, )?; } + Ok(()) } @@ -8684,8 +8679,11 @@ pub mod tests { .write_transaction_status( slot, signature, - vec![&Pubkey::new_unique()], - vec![&Pubkey::new_unique()], + vec![ + (&Pubkey::new_unique(), true), + (&Pubkey::new_unique(), false), + ] + .into_iter(), TransactionStatusMeta { fee: slot * 1_000, ..TransactionStatusMeta::default() @@ -9072,8 +9070,7 @@ pub mod tests { .write_transaction_status( lowest_cleanup_slot, signature1, - vec![&address0], - vec![], + vec![(&address0, true)].into_iter(), TransactionStatusMeta::default(), 0, ) @@ -9082,8 +9079,7 @@ pub mod tests { .write_transaction_status( lowest_available_slot, signature2, - vec![&address1], - vec![], + vec![(&address1, true)].into_iter(), TransactionStatusMeta::default(), 0, ) @@ -9451,8 +9447,7 @@ pub mod tests { .write_transaction_status( slot1, signature, - vec![&address0], - vec![&address1], + vec![(&address0, true), (&address1, false)].into_iter(), TransactionStatusMeta::default(), x as usize, ) @@ -9465,8 +9460,7 @@ pub mod tests { .write_transaction_status( slot2, signature, - vec![&address0], - vec![&address1], + vec![(&address0, true), (&address1, false)].into_iter(), TransactionStatusMeta::default(), x as usize, ) @@ -9478,8 +9472,7 @@ pub mod tests { .write_transaction_status( slot2, signature, - vec![&address0], - vec![&address1], + vec![(&address0, true), (&address1, false)].into_iter(), TransactionStatusMeta::default(), x as usize, ) @@ -9492,8 +9485,7 @@ pub mod tests { .write_transaction_status( slot3, signature, - vec![&address0], - vec![&address1], + vec![(&address0, true), (&address1, false)].into_iter(), TransactionStatusMeta::default(), x as usize, ) @@ -9576,8 +9568,11 @@ pub mod tests { .write_transaction_status( slot, transaction.signatures[0], - transaction.message.static_account_keys().iter().collect(), - vec![], + transaction + .message + .static_account_keys() + .iter() + .map(|key| (key, true)), TransactionStatusMeta::default(), counter, ) @@ -9604,8 +9599,11 @@ pub mod tests { .write_transaction_status( slot, transaction.signatures[0], - transaction.message.static_account_keys().iter().collect(), - vec![], + transaction + .message + .static_account_keys() + .iter() + .map(|key| (key, true)), TransactionStatusMeta::default(), counter, ) diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index d442732303fa2a..b2d79c2bf59672 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -582,8 +582,11 @@ pub mod tests { .write_transaction_status( x, Signature::from(random_bytes), - vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], - vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], + vec![ + (&Pubkey::try_from(&random_bytes[..32]).unwrap(), true), + (&Pubkey::try_from(&random_bytes[32..]).unwrap(), false), + ] + .into_iter(), TransactionStatusMeta::default(), 0, ) @@ -640,8 +643,11 @@ pub mod tests { .write_transaction_status( x, signature, - vec![&Pubkey::try_from(&random_bytes[..32]).unwrap()], - vec![&Pubkey::try_from(&random_bytes[32..]).unwrap()], + vec![ + (&Pubkey::try_from(&random_bytes[..32]).unwrap(), true), + (&Pubkey::try_from(&random_bytes[32..]).unwrap(), false), + ] + .into_iter(), TransactionStatusMeta::default(), 0, ) @@ -715,8 +721,11 @@ pub mod tests { .write_transaction_status( slot, transaction.signatures[0], - transaction.message.static_account_keys().iter().collect(), - vec![], + transaction + .message + .static_account_keys() + .iter() + .map(|key| (key, true)), TransactionStatusMeta::default(), 0, ) diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 43ce83c0966de6..314f8b9a4f5fda 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -104,7 +104,6 @@ impl TransactionStatusService { rent_debits, .. } = committed_tx; - let tx_account_locks = transaction.get_account_locks_unchecked(); let fee = fee_details.total_fee(); let inner_instructions = inner_instructions.map(|inner_instructions| { @@ -164,12 +163,18 @@ impl TransactionStatusService { .expect("Expect database write to succeed: TransactionMemos"); } + let message = transaction.message(); + let keys_with_writable = message + .account_keys() + .iter() + .enumerate() + .map(|(index, key)| (key, message.is_writable(index))); + blockstore .write_transaction_status( slot, *transaction.signature(), - tx_account_locks.writable, - tx_account_locks.readonly, + keys_with_writable, transaction_status_meta, transaction_index, ) From 871588cc3d72b97a90d2d12e72bb4458560f5890 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Aug 2024 22:05:50 +0800 Subject: [PATCH 174/529] build(deps): bump arrayvec from 0.7.4 to 0.7.6 (#2668) * build(deps): bump arrayvec from 0.7.4 to 0.7.6 Bumps [arrayvec](https://github.com/bluss/arrayvec) from 0.7.4 to 0.7.6. - [Release notes](https://github.com/bluss/arrayvec/releases) - [Changelog](https://github.com/bluss/arrayvec/blob/master/CHANGELOG.md) - [Commits](https://github.com/bluss/arrayvec/compare/0.7.4...0.7.6) --- updated-dependencies: - dependency-name: arrayvec dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 496c5633b958a6..4009b00baa1e90 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -585,9 +585,9 @@ checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "ascii" diff --git a/Cargo.toml b/Cargo.toml index f069a02e85e47e..d9da432322fe1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -182,7 +182,7 @@ ark-ff = "0.4.0" ark-serialize = "0.4.0" array-bytes = "=1.4.1" arrayref = "0.3.8" -arrayvec = "0.7.4" +arrayvec = "0.7.6" assert_cmd = "2.0" assert_matches = "1.5.0" async-channel = "1.9.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 133ce53e57b91c..2d71ce49c47119 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -382,9 +382,9 @@ checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "ascii" From 3d2639e5f9d778c544e37710fe2417dfe19d56ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Aug 2024 22:06:01 +0800 Subject: [PATCH 175/529] build(deps): bump bytemuck_derive from 1.7.0 to 1.7.1 (#2669) * build(deps): bump bytemuck_derive from 1.7.0 to 1.7.1 Bumps [bytemuck_derive](https://github.com/Lokathor/bytemuck) from 1.7.0 to 1.7.1. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/bytemuck_derive-v1.7.0...bytemuck_derive-v1.7.1) --- updated-dependencies: - dependency-name: bytemuck_derive dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4009b00baa1e90..6878c54452f15b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1159,9 +1159,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" +checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index d9da432322fe1d..2a772d3ee85d3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -200,7 +200,7 @@ bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" bytemuck = "1.17.0" -bytemuck_derive = "1.7.0" +bytemuck_derive = "1.7.1" byteorder = "1.5.0" bytes = "1.7" bzip2 = "0.4.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2d71ce49c47119..54472dd7e4dc20 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -856,9 +856,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" +checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" dependencies = [ "proc-macro2", "quote", From 4f9679a752fae61e0a11b51d2e35f93e5c5c8a69 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 20 Aug 2024 10:01:49 -0500 Subject: [PATCH 176/529] Make BankHashDetails use a subtype for bank hash mixins (#2644) The option to only record bank hash, and not the items that factor into the bank hash, was added a while ago. So, all of the items that factor into bank hash are optional. Someone is likely to either care about all of the bank hash mixins or none of them, so move the items into a sub type and make it an Option<_> --- ledger-tool/src/main.rs | 19 ++-- runtime/src/bank/bank_hash_details.rs | 136 ++++++++++---------------- 2 files changed, 58 insertions(+), 97 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 06f17a55e03a2a..d9a3a60d2f4600 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -571,13 +571,13 @@ fn setup_slot_recording( exit(1); }); - let mut include_bank = false; + let mut include_bank_hash_components = false; let mut include_tx = false; if let Some(args) = arg_matches.values_of("record_slots_config") { for arg in args { match arg { "tx" => include_tx = true, - "accounts" => include_bank = true, + "accounts" => include_bank_hash_components = true, _ => unreachable!(), } } @@ -603,16 +603,11 @@ fn setup_slot_recording( let slot_callback = Arc::new({ let slots = Arc::clone(&slot_details); move |bank: &Bank| { - let mut details = if include_bank { - bank_hash_details::SlotDetails::try_from(bank).unwrap() - } else { - bank_hash_details::SlotDetails { - slot: bank.slot(), - bank_hash: bank.hash().to_string(), - ..Default::default() - } - }; - + let mut details = bank_hash_details::SlotDetails::new_from_bank( + bank, + include_bank_hash_components, + ) + .unwrap(); let mut slots = slots.lock().unwrap(); if let Some(recorded_slot) = slots.iter_mut().find(|f| f.slot == details.slot) { diff --git a/runtime/src/bank/bank_hash_details.rs b/runtime/src/bank/bank_hash_details.rs index 5ab13d85c4d89b..6af468a911916e 100644 --- a/runtime/src/bank/bank_hash_details.rs +++ b/runtime/src/bank/bank_hash_details.rs @@ -104,66 +104,28 @@ impl From for TransactionCommitDetails { } } -/// The components that go into a bank hash calculation for a single bank/slot. #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Default)] pub struct SlotDetails { pub slot: Slot, pub bank_hash: String, - #[serde(skip_serializing_if = "String::is_empty")] - #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none", default, flatten)] + pub bank_hash_components: Option, + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub transactions: Vec, +} + +/// The components that go into a bank hash calculation for a single bank +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Default)] +pub struct BankHashComponents { pub parent_bank_hash: String, - #[serde(skip_serializing_if = "String::is_empty")] - #[serde(default)] pub accounts_delta_hash: String, - #[serde(skip_serializing_if = "u64_is_zero")] - #[serde(default)] pub signature_count: u64, - #[serde(skip_serializing_if = "String::is_empty")] - #[serde(default)] pub last_blockhash: String, - #[serde(skip_serializing_if = "accounts_is_empty")] - #[serde(default)] pub accounts: AccountsDetails, - #[serde(skip_serializing_if = "Vec::is_empty")] - #[serde(default)] - pub transactions: Vec, -} - -fn u64_is_zero(val: &u64) -> bool { - *val == 0 -} - -fn accounts_is_empty(accounts: &AccountsDetails) -> bool { - accounts.accounts.is_empty() } impl SlotDetails { - pub fn new( - slot: Slot, - bank_hash: Hash, - parent_bank_hash: Hash, - accounts_delta_hash: Hash, - signature_count: u64, - last_blockhash: Hash, - accounts: AccountsDetails, - ) -> Self { - Self { - slot, - bank_hash: bank_hash.to_string(), - parent_bank_hash: parent_bank_hash.to_string(), - accounts_delta_hash: accounts_delta_hash.to_string(), - signature_count, - last_blockhash: last_blockhash.to_string(), - accounts, - transactions: Vec::new(), - } - } -} - -impl TryFrom<&Bank> for SlotDetails { - type Error = String; - - fn try_from(bank: &Bank) -> Result { + pub fn new_from_bank(bank: &Bank, include_bank_hash_components: bool) -> Result { let slot = bank.slot(); if !bank.is_frozen() { return Err(format!( @@ -171,26 +133,34 @@ impl TryFrom<&Bank> for SlotDetails { )); } - // This bank is frozen; as a result, we know that the state has been - // hashed which means the delta hash is Some(). So, .unwrap() is safe - let AccountsDeltaHash(accounts_delta_hash) = bank - .rc - .accounts - .accounts_db - .get_accounts_delta_hash(slot) - .unwrap(); - - let accounts = bank.get_accounts_for_bank_hash_details(); + let bank_hash_components = if include_bank_hash_components { + // This bank is frozen; as a result, we know that the state has been + // hashed which means the delta hash is Some(). So, .unwrap() is safe + let AccountsDeltaHash(accounts_delta_hash) = bank + .rc + .accounts + .accounts_db + .get_accounts_delta_hash(slot) + .unwrap(); + let accounts = bank.get_accounts_for_bank_hash_details(); + + Some(BankHashComponents { + parent_bank_hash: bank.parent_hash().to_string(), + accounts_delta_hash: accounts_delta_hash.to_string(), + signature_count: bank.signature_count(), + last_blockhash: bank.last_blockhash().to_string(), + accounts: AccountsDetails { accounts }, + }) + } else { + None + }; - Ok(Self::new( + Ok(Self { slot, - bank.hash(), - bank.parent_hash(), - accounts_delta_hash, - bank.signature_count(), - bank.last_blockhash(), - AccountsDetails { accounts }, - )) + bank_hash: bank.hash().to_string(), + bank_hash_components, + transactions: Vec::new(), + }) } } @@ -291,7 +261,7 @@ impl<'de> Deserialize<'de> for AccountsDetails { /// Output the components that comprise the overall bank hash for the supplied `Bank` pub fn write_bank_hash_details_file(bank: &Bank) -> std::result::Result<(), String> { - let slot_details = SlotDetails::try_from(bank)?; + let slot_details = SlotDetails::new_from_bank(bank, /*include_bank_hash_mixins:*/ true)?; let details = BankHashDetails::new(vec![slot_details]); let parent_dir = bank @@ -328,11 +298,9 @@ pub mod tests { use super::*; fn build_details(num_slots: usize) -> BankHashDetails { - use solana_sdk::hash::{hash, hashv}; - let slot_details: Vec<_> = (0..num_slots) .map(|slot| { - let signature_count = 314; + let slot = slot as u64; let account = AccountSharedData::from(Account { lamports: 123_456_789, @@ -342,7 +310,7 @@ pub mod tests { rent_epoch: 123, }); let account_pubkey = Pubkey::new_unique(); - let account_hash = AccountHash(hash("account".as_bytes())); + let account_hash = AccountHash(solana_sdk::hash::hash("account".as_bytes())); let accounts = AccountsDetails { accounts: vec![PubkeyHashAccount { pubkey: account_pubkey, @@ -351,20 +319,18 @@ pub mod tests { }], }; - let bank_hash = hashv(&["bank".as_bytes(), &slot.to_le_bytes()]); - let parent_bank_hash = hash("parent_bank".as_bytes()); - let accounts_delta_hash = hash("accounts_delta".as_bytes()); - let last_blockhash = hash("last_blockhash".as_bytes()); - - SlotDetails::new( - slot as Slot, - bank_hash, - parent_bank_hash, - accounts_delta_hash, - signature_count, - last_blockhash, - accounts, - ) + SlotDetails { + slot, + bank_hash: format!("bank{slot}"), + bank_hash_components: Some(BankHashComponents { + parent_bank_hash: "parent_bank_hash".into(), + accounts_delta_hash: "accounts_delta_hash".into(), + signature_count: slot + 10, + last_blockhash: "last_blockhash".into(), + accounts, + }), + transactions: vec![], + } }) .collect(); From 7707ebde339ef6edad39adb323a50dc255f3240b Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:40:36 -0400 Subject: [PATCH 177/529] perf: improve efficiency on processing instructions (#2645) * perf: improve efficiency on processing instructions: 1. add filter to avoid duplicated lookups for same program_id_index; 2. add static filter to filter out non-builtin programs early; * set array size to MAX_STATIC_ACCOUNTS_PER_PACKET * limit access of StaticAccountKeysMeta --- Cargo.lock | 1 + builtins-default-costs/src/lib.rs | 14 ++++ programs/sbf/Cargo.lock | 10 +++ runtime-transaction/Cargo.toml | 3 +- .../src/compute_budget_instruction_details.rs | 79 +++++++++---------- .../src/compute_budget_program_id_filter.rs | 37 +++++++++ runtime-transaction/src/lib.rs | 1 + transaction-view/src/lib.rs | 2 +- .../src/static_account_keys_meta.rs | 4 +- 9 files changed, 106 insertions(+), 45 deletions(-) create mode 100644 runtime-transaction/src/compute_budget_program_id_filter.rs diff --git a/Cargo.lock b/Cargo.lock index 6878c54452f15b..2b659e2f5ffa98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7403,6 +7403,7 @@ dependencies = [ name = "solana-runtime-transaction" version = "2.1.0" dependencies = [ + "agave-transaction-view", "bincode", "criterion", "log", diff --git a/builtins-default-costs/src/lib.rs b/builtins-default-costs/src/lib.rs index 48a210b2197338..43c5c3043fcfc0 100644 --- a/builtins-default-costs/src/lib.rs +++ b/builtins-default-costs/src/lib.rs @@ -40,3 +40,17 @@ lazy_static! { .cloned() .collect(); } + +lazy_static! { + /// A table of 256 booleans indicates whether the first `u8` of a Pubkey exists in + /// BUILTIN_INSTRUCTION_COSTS. If the value is true, the Pubkey might be a builtin key; + /// if false, it cannot be a builtin key. This table allows for quick filtering of + /// builtin program IDs without the need for hashing. + pub static ref MAYBE_BUILTIN_KEY: [bool; 256] = { + let mut temp_table: [bool; 256] = [false; 256]; + BUILTIN_INSTRUCTION_COSTS + .keys() + .for_each(|key| temp_table[key.as_ref()[0] as usize] = true); + temp_table + }; +} diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 54472dd7e4dc20..f78add5c5688cb 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -73,6 +73,14 @@ dependencies = [ "thiserror", ] +[[package]] +name = "agave-transaction-view" +version = "2.1.0" +dependencies = [ + "solana-sdk", + "solana-svm-transaction", +] + [[package]] name = "agave-validator" version = "2.1.0" @@ -5736,7 +5744,9 @@ dependencies = [ name = "solana-runtime-transaction" version = "2.1.0" dependencies = [ + "agave-transaction-view", "log", + "solana-builtins-default-costs", "solana-compute-budget", "solana-sdk", "solana-svm-transaction", diff --git a/runtime-transaction/Cargo.toml b/runtime-transaction/Cargo.toml index 32e42cefcf2f29..69d172ec112c7c 100644 --- a/runtime-transaction/Cargo.toml +++ b/runtime-transaction/Cargo.toml @@ -10,7 +10,9 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +agave-transaction-view = { workspace = true } log = { workspace = true } +solana-builtins-default-costs = { workspace = true } solana-compute-budget = { workspace = true } solana-sdk = { workspace = true } solana-svm-transaction = { workspace = true } @@ -24,7 +26,6 @@ name = "solana_runtime_transaction" bincode = { workspace = true } criterion = { workspace = true } rand = { workspace = true } -solana-builtins-default-costs = { workspace = true } solana-program = { workspace = true } [package.metadata.docs.rs] diff --git a/runtime-transaction/src/compute_budget_instruction_details.rs b/runtime-transaction/src/compute_budget_instruction_details.rs index 96c97ab0483ea3..ab148ef14ae152 100644 --- a/runtime-transaction/src/compute_budget_instruction_details.rs +++ b/runtime-transaction/src/compute_budget_instruction_details.rs @@ -1,8 +1,9 @@ use { + crate::compute_budget_program_id_filter::ComputeBudgetProgramIdFilter, solana_compute_budget::compute_budget_limits::*, solana_sdk::{ borsh1::try_from_slice_unchecked, - compute_budget::{self, ComputeBudgetInstruction}, + compute_budget::ComputeBudgetInstruction, instruction::InstructionError, pubkey::Pubkey, saturating_add_assign, @@ -28,13 +29,18 @@ impl ComputeBudgetInstructionDetails { pub fn try_from<'a>( instructions: impl Iterator)>, ) -> Result { + let mut filter = ComputeBudgetProgramIdFilter::new(); + let mut compute_budget_instruction_details = ComputeBudgetInstructionDetails::default(); for (i, (program_id, instruction)) in instructions.enumerate() { - compute_budget_instruction_details.process_instruction( - i as u8, - program_id, - &instruction, - )?; + if filter.is_compute_budget_program(instruction.program_id_index as usize, program_id) { + compute_budget_instruction_details.process_instruction(i as u8, &instruction)?; + } else { + saturating_add_assign!( + compute_budget_instruction_details.num_non_compute_budget_instructions, + 1 + ); + } } Ok(compute_budget_instruction_details) @@ -94,46 +100,37 @@ impl ComputeBudgetInstructionDetails { }) } - fn process_instruction( - &mut self, - index: u8, - program_id: &Pubkey, - instruction: &SVMInstruction, - ) -> Result<()> { - if compute_budget::check_id(program_id) { - let invalid_instruction_data_error = - TransactionError::InstructionError(index, InstructionError::InvalidInstructionData); - let duplicate_instruction_error = TransactionError::DuplicateInstruction(index); - - match try_from_slice_unchecked(instruction.data) { - Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { - if self.requested_heap_size.is_some() { - return Err(duplicate_instruction_error); - } - self.requested_heap_size = Some((index, bytes)); + fn process_instruction(&mut self, index: u8, instruction: &SVMInstruction) -> Result<()> { + let invalid_instruction_data_error = + TransactionError::InstructionError(index, InstructionError::InvalidInstructionData); + let duplicate_instruction_error = TransactionError::DuplicateInstruction(index); + + match try_from_slice_unchecked(instruction.data) { + Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { + if self.requested_heap_size.is_some() { + return Err(duplicate_instruction_error); } - Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { - if self.requested_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - self.requested_compute_unit_limit = Some((index, compute_unit_limit)); + self.requested_heap_size = Some((index, bytes)); + } + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { + if self.requested_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); } - Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { - if self.requested_compute_unit_price.is_some() { - return Err(duplicate_instruction_error); - } - self.requested_compute_unit_price = Some((index, micro_lamports)); + self.requested_compute_unit_limit = Some((index, compute_unit_limit)); + } + Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { + if self.requested_compute_unit_price.is_some() { + return Err(duplicate_instruction_error); } - Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) => { - if self.requested_loaded_accounts_data_size_limit.is_some() { - return Err(duplicate_instruction_error); - } - self.requested_loaded_accounts_data_size_limit = Some((index, bytes)); + self.requested_compute_unit_price = Some((index, micro_lamports)); + } + Ok(ComputeBudgetInstruction::SetLoadedAccountsDataSizeLimit(bytes)) => { + if self.requested_loaded_accounts_data_size_limit.is_some() { + return Err(duplicate_instruction_error); } - _ => return Err(invalid_instruction_data_error), + self.requested_loaded_accounts_data_size_limit = Some((index, bytes)); } - } else { - saturating_add_assign!(self.num_non_compute_budget_instructions, 1); + _ => return Err(invalid_instruction_data_error), } Ok(()) diff --git a/runtime-transaction/src/compute_budget_program_id_filter.rs b/runtime-transaction/src/compute_budget_program_id_filter.rs new file mode 100644 index 00000000000000..b89b67113de105 --- /dev/null +++ b/runtime-transaction/src/compute_budget_program_id_filter.rs @@ -0,0 +1,37 @@ +// static account keys has max +use { + agave_transaction_view::static_account_keys_meta::MAX_STATIC_ACCOUNTS_PER_PACKET as FILTER_SIZE, + solana_builtins_default_costs::MAYBE_BUILTIN_KEY, solana_sdk::pubkey::Pubkey, +}; + +pub(crate) struct ComputeBudgetProgramIdFilter { + // array of slots for all possible static and sanitized program_id_index, + // each slot indicates if a program_id_index has not been checked (eg, None), + // or already checked with result (eg, Some(result)) that can be reused. + flags: [Option; FILTER_SIZE as usize], +} + +impl ComputeBudgetProgramIdFilter { + pub(crate) fn new() -> Self { + ComputeBudgetProgramIdFilter { + flags: [None; FILTER_SIZE as usize], + } + } + + #[inline] + pub(crate) fn is_compute_budget_program(&mut self, index: usize, program_id: &Pubkey) -> bool { + *self + .flags + .get_mut(index) + .expect("program id index is sanitized") + .get_or_insert_with(|| Self::check_program_id(program_id)) + } + + #[inline] + fn check_program_id(program_id: &Pubkey) -> bool { + if !MAYBE_BUILTIN_KEY[program_id.as_ref()[0] as usize] { + return false; + } + solana_sdk::compute_budget::check_id(program_id) + } +} diff --git a/runtime-transaction/src/lib.rs b/runtime-transaction/src/lib.rs index 011df606d59cf3..28d54b4eb3b6b8 100644 --- a/runtime-transaction/src/lib.rs +++ b/runtime-transaction/src/lib.rs @@ -2,6 +2,7 @@ #![allow(clippy::arithmetic_side_effects)] mod compute_budget_instruction_details; +mod compute_budget_program_id_filter; pub mod instructions_processor; pub mod runtime_transaction; pub mod transaction_meta; diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index baa3b91b84ef15..13c5a43fd4016c 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -14,6 +14,6 @@ pub mod result; #[allow(dead_code)] mod signature_meta; #[allow(dead_code)] -mod static_account_keys_meta; +pub mod static_account_keys_meta; #[allow(dead_code)] pub mod transaction_meta; diff --git a/transaction-view/src/static_account_keys_meta.rs b/transaction-view/src/static_account_keys_meta.rs index bea6a3e7394442..bae934863cfa4e 100644 --- a/transaction-view/src/static_account_keys_meta.rs +++ b/transaction-view/src/static_account_keys_meta.rs @@ -10,12 +10,12 @@ use { // This means the maximum number of 32 byte keys is 38. // 38 as an min-sized encoded u16 is 1 byte. // We can simply read this byte, if it's >38 we can return None. -const MAX_STATIC_ACCOUNTS_PER_PACKET: u8 = +pub const MAX_STATIC_ACCOUNTS_PER_PACKET: u8 = (PACKET_DATA_SIZE / core::mem::size_of::()) as u8; /// Contains meta-data about the static account keys in a transaction packet. #[derive(Default)] -pub struct StaticAccountKeysMeta { +pub(crate) struct StaticAccountKeysMeta { /// The number of static accounts in the transaction. pub(crate) num_static_accounts: u8, /// The offset to the first static account in the transaction. From 4daf93c3d37d92a7803eb30feece13f8d36a5834 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 20 Aug 2024 11:13:21 -0500 Subject: [PATCH 178/529] RuntimeTransaction - transaction (#2661) --- .../src/runtime_transaction.rs | 79 ++++++++++--------- 1 file changed, 42 insertions(+), 37 deletions(-) diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index 9c2f75a0868755..2a8772ce168977 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -17,20 +17,18 @@ use { solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_sdk::{ hash::Hash, - message::{AddressLoader, SanitizedMessage, SanitizedVersionedMessage}, + message::AddressLoader, pubkey::Pubkey, - signature::Signature, simple_vote_transaction_checker::is_simple_vote_transaction, - transaction::{Result, SanitizedVersionedTransaction}, + transaction::{Result, SanitizedTransaction, SanitizedVersionedTransaction}, }, solana_svm_transaction::instruction::SVMInstruction, std::collections::HashSet, }; #[derive(Debug, Clone, Eq, PartialEq)] -pub struct RuntimeTransaction { - signatures: Vec, - message: M, +pub struct RuntimeTransaction { + transaction: T, // transaction meta is a collection of fields, it is updated // during message state transition meta: TransactionMeta, @@ -44,11 +42,11 @@ trait DynamicMetaAccess: StaticMetaAccess {} // Implement the gate traits for the message types that should // have access to the static and dynamic metadata. -impl StaticMetaAccess for SanitizedVersionedMessage {} -impl StaticMetaAccess for SanitizedMessage {} -impl DynamicMetaAccess for SanitizedMessage {} +impl StaticMetaAccess for SanitizedVersionedTransaction {} +impl StaticMetaAccess for SanitizedTransaction {} +impl DynamicMetaAccess for SanitizedTransaction {} -impl StaticMeta for RuntimeTransaction { +impl StaticMeta for RuntimeTransaction { fn message_hash(&self) -> &Hash { &self.meta.message_hash } @@ -68,7 +66,7 @@ impl StaticMeta for RuntimeTransaction { impl DynamicMeta for RuntimeTransaction {} -impl RuntimeTransaction { +impl RuntimeTransaction { pub fn try_from( sanitized_versioned_tx: SanitizedVersionedTransaction, message_hash: Option, @@ -80,8 +78,9 @@ impl RuntimeTransaction { .unwrap_or_else(|| is_simple_vote_transaction(&sanitized_versioned_tx)), ); - let (signatures, message) = sanitized_versioned_tx.destruct(); - meta.set_message_hash(message_hash.unwrap_or_else(|| message.message.hash())); + meta.set_message_hash( + message_hash.unwrap_or_else(|| sanitized_versioned_tx.get_message().message.hash()), + ); let ComputeBudgetLimits { compute_unit_limit, @@ -89,7 +88,8 @@ impl RuntimeTransaction { loaded_accounts_bytes, .. } = process_compute_budget_instructions( - message + sanitized_versioned_tx + .get_message() .program_instructions_iter() .map(|(program_id, ix)| (program_id, SVMInstruction::from(ix))), )?; @@ -98,26 +98,30 @@ impl RuntimeTransaction { meta.set_loaded_accounts_bytes(loaded_accounts_bytes.get()); Ok(Self { - signatures, - message, + transaction: sanitized_versioned_tx, meta, }) } } -impl RuntimeTransaction { +impl RuntimeTransaction { pub fn try_from( - statically_loaded_runtime_tx: RuntimeTransaction, + statically_loaded_runtime_tx: RuntimeTransaction, address_loader: impl AddressLoader, reserved_account_keys: &HashSet, ) -> Result { + let hash = *statically_loaded_runtime_tx.message_hash(); + let is_simple_vote_tx = statically_loaded_runtime_tx.is_simple_vote_tx(); + let sanitized_transaction = SanitizedTransaction::try_new( + statically_loaded_runtime_tx.transaction, + hash, + is_simple_vote_tx, + address_loader, + reserved_account_keys, + )?; + let mut tx = Self { - signatures: statically_loaded_runtime_tx.signatures, - message: SanitizedMessage::try_new( - statically_loaded_runtime_tx.message, - address_loader, - reserved_account_keys, - )?, + transaction: sanitized_transaction, meta: statically_loaded_runtime_tx.meta, }; tx.load_dynamic_metadata()?; @@ -222,7 +226,7 @@ mod tests { svt: SanitizedVersionedTransaction, is_simple_vote: Option, ) -> bool { - RuntimeTransaction::::try_from(svt, None, is_simple_vote) + RuntimeTransaction::::try_from(svt, None, is_simple_vote) .unwrap() .meta .is_simple_vote_tx @@ -254,7 +258,7 @@ mod tests { let hash = Hash::new_unique(); let statically_loaded_transaction = - RuntimeTransaction::::try_from( + RuntimeTransaction::::try_from( non_vote_sanitized_versioned_transaction(), Some(hash), None, @@ -264,7 +268,7 @@ mod tests { assert_eq!(hash, *statically_loaded_transaction.message_hash()); assert!(!statically_loaded_transaction.is_simple_vote_tx()); - let dynamically_loaded_transaction = RuntimeTransaction::::try_from( + let dynamically_loaded_transaction = RuntimeTransaction::::try_from( statically_loaded_transaction, SimpleAddressLoader::Disabled, &ReservedAccountKeys::empty_key_set(), @@ -284,16 +288,17 @@ mod tests { let loaded_accounts_bytes = 1_024; let mut test_transaction = TestTransaction::new(); - let runtime_transaction_static = RuntimeTransaction::::try_from( - test_transaction - .add_compute_unit_limit(compute_unit_limit) - .add_compute_unit_price(compute_unit_price) - .add_loaded_accounts_bytes(loaded_accounts_bytes) - .to_sanitized_versioned_transaction(), - Some(hash), - None, - ) - .unwrap(); + let runtime_transaction_static = + RuntimeTransaction::::try_from( + test_transaction + .add_compute_unit_limit(compute_unit_limit) + .add_compute_unit_price(compute_unit_price) + .add_loaded_accounts_bytes(loaded_accounts_bytes) + .to_sanitized_versioned_transaction(), + Some(hash), + None, + ) + .unwrap(); assert_eq!(&hash, runtime_transaction_static.message_hash()); assert!(!runtime_transaction_static.is_simple_vote_tx()); From 951bb04d8639c7a92be79e10c5eea6210d73ce81 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:50:19 -0500 Subject: [PATCH 179/529] generalize support for bin range of accounts hash tool (#2649) * generalize support for bin range of accounts hash tool * fix comments * clippy * sort deps * fmtwq * pr feedbacks * Update accounts-db/accounts-hash-cache-tool/src/main.rs Co-authored-by: Brooks * Update accounts-db/accounts-hash-cache-tool/src/main.rs Co-authored-by: Brooks * Update accounts-db/accounts-hash-cache-tool/src/main.rs Co-authored-by: Brooks * pr feedback: rename variable --------- Co-authored-by: HaoranYi Co-authored-by: Brooks --- Cargo.lock | 1 + .../accounts-hash-cache-tool/Cargo.toml | 1 + .../accounts-hash-cache-tool/src/main.rs | 71 +++++++++---------- 3 files changed, 37 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b659e2f5ffa98..90d37815cebba9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -73,6 +73,7 @@ dependencies = [ "memmap2", "rayon", "solana-accounts-db", + "solana-clap-utils", "solana-program", "solana-version", ] diff --git a/accounts-db/accounts-hash-cache-tool/Cargo.toml b/accounts-db/accounts-hash-cache-tool/Cargo.toml index 908875a4662ceb..241c849d2668e5 100644 --- a/accounts-db/accounts-hash-cache-tool/Cargo.toml +++ b/accounts-db/accounts-hash-cache-tool/Cargo.toml @@ -16,6 +16,7 @@ clap = { workspace = true } memmap2 = { workspace = true } rayon = { workspace = true } solana-accounts-db = { workspace = true } +solana-clap-utils = { workspace = true } solana-program = { workspace = true } solana-version = { workspace = true } diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 076568b4759498..b692ca9482fe25 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -12,6 +12,7 @@ use { pubkey_bins::PubkeyBinCalculator24, CacheHashDataFileEntry, CacheHashDataFileHeader, ParsedCacheHashDataFilename, }, + solana_clap_utils::input_parsers::values_of, solana_program::pubkey::Pubkey, std::{ cmp::{self, Ordering}, @@ -20,6 +21,7 @@ use { iter, mem::size_of, num::Saturating, + ops::Range, path::{Path, PathBuf}, str, sync::RwLock, @@ -146,17 +148,24 @@ fn main() { ), ) .arg( - Arg::with_name("bin_of_interest") - .long("bin-of-interest") + Arg::with_name("bins_of_interest") + .long("bins-of-interest") .takes_value(true) - .value_name("INDEX") - .help("Specifies a single bin to diff") + .value_name("BINS") + .min_values(1) + .max_values(2) + .value_delimiter("-") + .require_delimiter(true) + .multiple(false) + .help("Specifies bins to diff") .long_help( - "Specifies a single bin to diff. \ + "Specifies bins to diff. \ When diffing large state that does not fit in memory, \ - it may be neccessary to diff a subset at a time. \ - Use this arg to limit the state to a single bin. \ - The INDEX must be less than --bins." + it may be necessary to diff a subset at a time. \ + Use this arg to limit the state to bins of interest. \ + This arg takes either a single bin or a bin range. \ + A bin range is specified as \"start-end\", where \ + \"start\" is inclusive, and \"end\" is exclusive." ), ), ), @@ -225,22 +234,20 @@ fn cmd_diff_state( let path1 = value_t_or_exit!(subcommand_matches, "path1", String); let path2 = value_t_or_exit!(subcommand_matches, "path2", String); let num_bins = value_t_or_exit!(subcommand_matches, "bins", usize); - let bin_of_interest = - if let Some(bin_of_interest) = subcommand_matches.value_of("bin_of_interest") { - let bin_of_interest = bin_of_interest - .parse() - .map_err(|err| format!("argument 'bin-of-interest' is not a valid value: {err}"))?; - if bin_of_interest >= num_bins { - return Err(format!( - "argument 'bin-of-interest' must be less than 'bins', \ - bins: {num_bins}, bin-of-interest: {bin_of_interest}", - )); + + let bins_of_interest = + if let Some(bins) = values_of::(subcommand_matches, "bins_of_interest") { + match bins.len() { + 1 => bins[0]..bins[0].saturating_add(1), + 2 => bins[0]..bins[1], + _ => { + unreachable!("invalid number of values given to bins_of_interest.") + } } - Some(bin_of_interest) } else { - None + 0..usize::MAX }; - do_diff_state(path1, path2, num_bins, bin_of_interest) + do_diff_state(path1, path2, num_bins, bins_of_interest) } fn do_inspect(file: impl AsRef, force: bool) -> Result<(), String> { @@ -510,7 +517,7 @@ fn do_diff_state( dir1: impl AsRef, dir2: impl AsRef, num_bins: usize, - bin_of_interest: Option, + bins_of_interest: Range, ) -> Result<(), String> { let extract = |dir: &Path| -> Result<_, String> { let files = @@ -521,7 +528,7 @@ fn do_diff_state( } = extract_binned_latest_entries_in( files.iter().map(|file| &file.path), num_bins, - bin_of_interest, + &bins_of_interest, ) .map_err(|err| format!("failed to extract entries: {err}"))?; let num_accounts: usize = latest_entries.iter().map(|bin| bin.len()).sum(); @@ -699,7 +706,7 @@ fn extract_latest_entries_in(file: impl AsRef) -> Result) -> Result>, num_bins: usize, - bin_of_interest: Option, + bins_of_interest: &Range, ) -> Result { - if let Some(bin_of_interest) = bin_of_interest { - assert!(bin_of_interest < num_bins); - } - let binner = PubkeyBinCalculator24::new(num_bins); let mut entries: Box<_> = iter::repeat_with(HashMap::default).take(num_bins).collect(); let mut capitalization = Saturating(0); @@ -744,11 +746,8 @@ fn extract_binned_latest_entries_in( let num_entries = scan_mmap(&mmap, |entry| { let bin = binner.bin_from_pubkey(&entry.pubkey); - if let Some(bin_of_interest) = bin_of_interest { - // Is this the bin of interest? If not, skip it. - if bin != bin_of_interest { - return; - } + if !bins_of_interest.contains(&bin) { + return; } capitalization += entry.lamports; From 6a34b3ecbac83f1d8d4bdbc44c1946192ae68921 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 20 Aug 2024 18:25:07 +0000 Subject: [PATCH 180/529] patches bug causing false duplicate nodes error (#2666) The bootstrap code during the validator start pushes a contact-info with more recent timestamp to gossip. If the node is staked the contact-info lingers in gossip causing false duplicate node instances when the fully initialized node joins gossip later on. The commit refreshes the timestamp on contact-info so that it overrides the one pushed by bootstrap and avoid false duplicates error. --- gossip/src/contact_info.rs | 2 +- validator/src/main.rs | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 09105909b7ab9b..b745db31f43692 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -212,7 +212,7 @@ impl ContactInfo { &self.version } - pub(crate) fn hot_swap_pubkey(&mut self, pubkey: Pubkey) { + pub fn hot_swap_pubkey(&mut self, pubkey: Pubkey) { self.pubkey = pubkey; // Need to update ContactInfo.outset so that this node's contact-info // will override older node with the same pubkey. diff --git a/validator/src/main.rs b/validator/src/main.rs index 45c35a43ef5392..349f01ecace8d7 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -2038,6 +2038,13 @@ pub fn main() { return; } + // Bootstrap code above pushes a contact-info with more recent timestamp to + // gossip. If the node is staked the contact-info lingers in gossip causing + // false duplicate nodes error. + // Below line refreshes the timestamp on contact-info so that it overrides + // the one pushed by bootstrap. + node.info.hot_swap_pubkey(identity_keypair.pubkey()); + let validator = Validator::new( node, identity_keypair, From acaf5b7d48e1b630cbaa4abc859393d8f59f0786 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 20 Aug 2024 20:26:21 +0200 Subject: [PATCH 181/529] Adjustments of loader-v4 (#2630) - Removes the empty program account check (InvalidAccountData). - Adjusts checks of program.state.status to conform to error messages. - Adds check to prevent authority transfer to itself. - Adjusts most execution errors to UnsupportedProgramId. - Removes the redundant owner check in execution. - Adds a tombstone in the program cache upon retraction. --- programs/loader-v4/src/lib.rs | 75 ++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 31 deletions(-) diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index f44031f6f15021..91b15e33b27755 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -5,7 +5,7 @@ use { solana_program_runtime::{ invoke_context::InvokeContext, loaded_programs::{ - LoadProgramMetrics, ProgramCacheEntry, ProgramCacheEntryType, + LoadProgramMetrics, ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType, DELAY_VISIBILITY_SLOT_OFFSET, }, stable_log, @@ -199,10 +199,6 @@ fn check_program_account( ic_logger_msg!(log_collector, "Program not owned by loader"); return Err(InstructionError::InvalidAccountOwner); } - if program.get_data().is_empty() { - ic_logger_msg!(log_collector, "Program is uninitialized"); - return Err(InstructionError::InvalidAccountData); - } let state = get_state(program.get_data())?; if !program.is_writable() { ic_logger_msg!(log_collector, "Program is not writeable"); @@ -488,12 +484,22 @@ pub fn process_instruction_retract( ); return Err(InstructionError::InvalidArgument); } - if matches!(state.status, LoaderV4Status::Retracted) { + if !matches!(state.status, LoaderV4Status::Deployed) { ic_logger_msg!(log_collector, "Program is not deployed"); return Err(InstructionError::InvalidArgument); } let state = get_state_mut(program.get_data_mut()?)?; state.status = LoaderV4Status::Retracted; + invoke_context + .program_cache_for_tx_batch + .store_modified_entry( + *program.get_key(), + Arc::new(ProgramCacheEntry::new_tombstone( + current_slot, + ProgramCacheEntryOwner::LoaderV4, + ProgramCacheEntryType::Closed, + )), + ); Ok(()) } @@ -518,12 +524,16 @@ pub fn process_instruction_transfer_authority( &program, authority_address, )?; - if new_authority_address.is_some() && !instruction_context.is_instruction_account_signer(2)? { - ic_logger_msg!(log_collector, "New authority did not sign"); - return Err(InstructionError::MissingRequiredSignature); - } let state = get_state_mut(program.get_data_mut()?)?; if let Some(new_authority_address) = new_authority_address { + if !instruction_context.is_instruction_account_signer(2)? { + ic_logger_msg!(log_collector, "New authority did not sign"); + return Err(InstructionError::MissingRequiredSignature); + } + if state.authority_address == new_authority_address { + ic_logger_msg!(log_collector, "No change"); + return Err(InstructionError::InvalidArgument); + } state.authority_address = new_authority_address; } else if matches!(state.status, LoaderV4Status::Deployed) { state.status = LoaderV4Status::Finalized; @@ -575,18 +585,10 @@ pub fn process_instruction_inner( .map_err(|err| Box::new(err) as Box) } else { let program = instruction_context.try_borrow_last_program_account(transaction_context)?; - if !loader_v4::check_id(program.get_owner()) { - ic_logger_msg!(log_collector, "Program not owned by loader"); - return Err(Box::new(InstructionError::InvalidAccountOwner)); - } - if program.get_data().is_empty() { - ic_logger_msg!(log_collector, "Program is uninitialized"); - return Err(Box::new(InstructionError::InvalidAccountData)); - } let state = get_state(program.get_data())?; if matches!(state.status, LoaderV4Status::Retracted) { - ic_logger_msg!(log_collector, "Program is not deployed"); - return Err(Box::new(InstructionError::InvalidArgument)); + ic_logger_msg!(log_collector, "Program is retracted"); + return Err(Box::new(InstructionError::UnsupportedProgramId)); } let mut get_or_create_executor_time = Measure::start("get_or_create_executor_time"); let loaded_program = invoke_context @@ -594,7 +596,7 @@ pub fn process_instruction_inner( .find(program.get_key()) .ok_or_else(|| { ic_logger_msg!(log_collector, "Program is not cached"); - InstructionError::InvalidAccountData + InstructionError::UnsupportedProgramId })?; get_or_create_executor_time.stop(); saturating_add_assign!( @@ -610,10 +612,12 @@ pub fn process_instruction_inner( | ProgramCacheEntryType::Closed | ProgramCacheEntryType::DelayVisibility => { ic_logger_msg!(log_collector, "Program is not deployed"); - Err(Box::new(InstructionError::InvalidAccountData) as Box) + Err(Box::new(InstructionError::UnsupportedProgramId) as Box) } ProgramCacheEntryType::Loaded(executable) => execute(invoke_context, executable), - _ => Err(Box::new(InstructionError::IncorrectProgramId) as Box), + _ => { + Err(Box::new(InstructionError::UnsupportedProgramId) as Box) + } } } .map(|_| 0) @@ -1157,7 +1161,7 @@ mod tests { &bincode::serialize(&LoaderV4Instruction::Truncate { new_size: 0 }).unwrap(), transaction_accounts.clone(), &[(3, false, true), (1, true, false), (2, true, true)], - Err(InstructionError::InvalidAccountData), + Err(InstructionError::AccountDataTooSmall), ); // Error: Program is not retracted @@ -1331,7 +1335,7 @@ mod tests { &bincode::serialize(&LoaderV4Instruction::Deploy).unwrap(), transaction_accounts.clone(), &[(3, false, true), (1, true, false)], - Err(InstructionError::InvalidAccountData), + Err(InstructionError::AccountDataTooSmall), ); // Error: Program fails verification @@ -1410,7 +1414,7 @@ mod tests { &bincode::serialize(&LoaderV4Instruction::Retract).unwrap(), transaction_accounts.clone(), &[(2, false, true), (1, true, false)], - Err(InstructionError::InvalidAccountData), + Err(InstructionError::AccountDataTooSmall), ); // Error: Program is not deployed @@ -1520,18 +1524,27 @@ mod tests { &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), transaction_accounts.clone(), &[(2, false, true), (3, true, false), (4, true, false)], - Err(InstructionError::InvalidAccountData), + Err(InstructionError::AccountDataTooSmall), ); // Error: New authority did not sign process_instruction( vec![], &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), - transaction_accounts, + transaction_accounts.clone(), &[(0, false, true), (3, true, false), (4, false, false)], Err(InstructionError::MissingRequiredSignature), ); + // Error: Authority did not change + process_instruction( + vec![], + &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), + transaction_accounts, + &[(0, false, true), (3, true, false), (3, true, false)], + Err(InstructionError::InvalidArgument), + ); + test_loader_instruction_general_errors(LoaderV4Instruction::TransferAuthority); } @@ -1598,7 +1611,7 @@ mod tests { &[0, 1, 2, 3], transaction_accounts.clone(), &[(1, false, true)], - Err(InstructionError::InvalidAccountData), + Err(InstructionError::AccountDataTooSmall), ); // Error: Program is not deployed @@ -1607,7 +1620,7 @@ mod tests { &[0, 1, 2, 3], transaction_accounts.clone(), &[(1, false, true)], - Err(InstructionError::InvalidArgument), + Err(InstructionError::UnsupportedProgramId), ); // Error: Program fails verification @@ -1616,7 +1629,7 @@ mod tests { &[0, 1, 2, 3], transaction_accounts, &[(1, false, true)], - Err(InstructionError::InvalidAccountData), + Err(InstructionError::UnsupportedProgramId), ); } } From 485216bd77752621be14553fccc27b4e38c7c41f Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Tue, 20 Aug 2024 13:09:52 -0700 Subject: [PATCH 182/529] filter out unstaked NodeInstance from sent PullRequests (#2637) * filter out unstaked NodeInstance from sent PullRequests * add descriptor, refactor retain_staked() --------- Co-authored-by: greg --- gossip/src/cluster_info.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 07f7486d44260d..850346c8e0e9d0 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -419,7 +419,11 @@ impl Sanitize for Protocol { // Retains only CRDS values associated with nodes with enough stake. // (some crds types are exempted) -fn retain_staked(values: &mut Vec, stakes: &HashMap) { +fn retain_staked( + values: &mut Vec, + stakes: &HashMap, + drop_unstaked_node_instance: bool, +) { values.retain(|value| { match value.data { CrdsData::ContactInfo(_) => true, @@ -434,6 +438,7 @@ fn retain_staked(values: &mut Vec, stakes: &HashMap) { // the various dashboards. CrdsData::Version(_) => true, CrdsData::AccountsHashes(_) => true, + CrdsData::NodeInstance(_) if !drop_unstaked_node_instance => true, CrdsData::LowestSlot(_, _) | CrdsData::LegacyVersion(_) | CrdsData::DuplicateShred(_, _) @@ -1646,7 +1651,7 @@ impl ClusterInfo { .add_relaxed(num_nodes as u64); if self.require_stake_for_gossip(stakes) { push_messages.retain(|_, data| { - retain_staked(data, stakes); + retain_staked(data, stakes, /* drop_unstaked_node_instance */ false); !data.is_empty() }) } @@ -2138,7 +2143,7 @@ impl ClusterInfo { }; if self.require_stake_for_gossip(stakes) { for resp in &mut pull_responses { - retain_staked(resp, stakes); + retain_staked(resp, stakes, /* drop_unstaked_node_instance */ true); } } let (responses, scores): (Vec<_>, Vec<_>) = addrs @@ -2544,9 +2549,13 @@ impl ClusterInfo { } } if self.require_stake_for_gossip(stakes) { - retain_staked(&mut pull_responses, stakes); + retain_staked( + &mut pull_responses, + stakes, + /* drop_unstaked_node_instance */ false, + ); for (_, data) in &mut push_messages { - retain_staked(data, stakes); + retain_staked(data, stakes, /* drop_unstaked_node_instance */ false); } push_messages.retain(|(_, data)| !data.is_empty()); } From 77b4d131502e095d18f3df7caab8b6a1cf7c1887 Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 20 Aug 2024 23:03:03 -0500 Subject: [PATCH 183/529] Update BankHashDetails to include EpochAccountsHash (#2466) The EpochAccountsHash gets mixed into a Bank hash once an epoch. It is just as important as the other items, and it was an oversight for the field not to be included to BankHashDetails. So, add EpochAccountsHash into BankHashDetails --- runtime/src/bank/bank_hash_details.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/runtime/src/bank/bank_hash_details.rs b/runtime/src/bank/bank_hash_details.rs index 6af468a911916e..ba341a52044f8c 100644 --- a/runtime/src/bank/bank_hash_details.rs +++ b/runtime/src/bank/bank_hash_details.rs @@ -121,6 +121,8 @@ pub struct BankHashComponents { pub accounts_delta_hash: String, pub signature_count: u64, pub last_blockhash: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub epoch_accounts_hash: Option, pub accounts: AccountsDetails, } @@ -149,6 +151,10 @@ impl SlotDetails { accounts_delta_hash: accounts_delta_hash.to_string(), signature_count: bank.signature_count(), last_blockhash: bank.last_blockhash().to_string(), + // The bank is already frozen so this should not have to wait + epoch_accounts_hash: bank + .wait_get_epoch_accounts_hash() + .map(|hash| hash.as_ref().to_string()), accounts: AccountsDetails { accounts }, }) } else { @@ -327,6 +333,11 @@ pub mod tests { accounts_delta_hash: "accounts_delta_hash".into(), signature_count: slot + 10, last_blockhash: "last_blockhash".into(), + epoch_accounts_hash: if slot % 2 == 0 { + Some("epoch_accounts_hash".into()) + } else { + None + }, accounts, }), transactions: vec![], From d852dfc94829e8ec25d97aa8b923d2bf49a72c49 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Wed, 21 Aug 2024 20:39:35 +0700 Subject: [PATCH 184/529] localnet: fix flakiness (#2684) localnet is flaky: https://buildkite.com/anza/agave/builds/9606#019165a1-af5f-4bfa-b329-3c0adba9dc46 It seems that nodes start with overlapping ports and then solana-gossip can't find them. Avoid the issue by using larger port ranges... --- ci/localnet-sanity.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ci/localnet-sanity.sh b/ci/localnet-sanity.sh index b01eca31d50d81..3a301e4b31e89d 100755 --- a/ci/localnet-sanity.sh +++ b/ci/localnet-sanity.sh @@ -76,18 +76,18 @@ nodes=( "multinode-demo/bootstrap-validator.sh \ --no-restart \ --init-complete-file init-complete-node0.log \ - --dynamic-port-range 8000-8050" + --dynamic-port-range 8000-8200" "multinode-demo/validator.sh \ --no-restart \ - --dynamic-port-range 8050-8100 + --dynamic-port-range 8200-8400 --init-complete-file init-complete-node1.log \ --rpc-port 18899" ) if [[ extraNodes -gt 0 ]]; then for i in $(seq 1 $extraNodes); do - portStart=$((8100 + i * 50)) - portEnd=$((portStart + 49)) + portStart=$((8400 + i * 200)) + portEnd=$((portStart + 200)) nodes+=( "multinode-demo/validator.sh \ --no-restart \ From 0880cb65869cf3bf024869b2263bb773f7537c02 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 21 Aug 2024 12:15:26 -0400 Subject: [PATCH 185/529] Adds inspect_account() to TransactionProcessingCallback (#2678) --- svm/src/account_loader.rs | 34 ++++++++++++++++++---- svm/src/transaction_processing_callback.rs | 12 ++++++++ 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 5d14c35ec0677c..c7165eca8ff4a5 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -1,8 +1,11 @@ use { crate::{ - account_overrides::AccountOverrides, account_rent_state::RentState, nonce_info::NonceInfo, - rollback_accounts::RollbackAccounts, transaction_error_metrics::TransactionErrorMetrics, - transaction_processing_callback::TransactionProcessingCallback, + account_overrides::AccountOverrides, + account_rent_state::RentState, + nonce_info::NonceInfo, + rollback_accounts::RollbackAccounts, + transaction_error_metrics::TransactionErrorMetrics, + transaction_processing_callback::{AccountState, TransactionProcessingCallback}, }, itertools::Itertools, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, @@ -410,9 +413,11 @@ fn load_transaction_account( loaded_programs: &ProgramCacheForTxBatch, ) -> Result<(LoadedTransactionAccount, bool)> { let mut account_found = true; + let mut was_inspected = false; let is_instruction_account = u8::try_from(account_index) .map(|i| instruction_accounts.contains(&&i)) .unwrap_or(false); + let is_writable = message.is_writable(account_index); let loaded_account = if solana_sdk::sysvar::instructions::check_id(account_key) { // Since the instructions sysvar is constructed by the SVM and modified // for each transaction instruction, it cannot be overridden. @@ -429,7 +434,7 @@ fn load_transaction_account( account: account_override.clone(), rent_collected: 0, } - } else if let Some(program) = (!is_instruction_account && !message.is_writable(account_index)) + } else if let Some(program) = (!is_instruction_account && !is_writable) .then_some(()) .and_then(|_| loaded_programs.find(account_key)) { @@ -447,7 +452,17 @@ fn load_transaction_account( callbacks .get_account_shared_data(account_key) .map(|mut account| { - let rent_collected = if message.is_writable(account_index) { + let rent_collected = if is_writable { + // Inspect the account prior to collecting rent, since + // rent collection can modify the account. + debug_assert!(!was_inspected); + callbacks.inspect_account( + account_key, + AccountState::Alive(&account), + is_writable, + ); + was_inspected = true; + collect_rent_from_account( feature_set, rent_collector, @@ -480,6 +495,15 @@ fn load_transaction_account( }) }; + if !was_inspected { + let account_state = if account_found { + AccountState::Alive(&loaded_account.account) + } else { + AccountState::Dead + }; + callbacks.inspect_account(account_key, account_state, is_writable); + } + Ok((loaded_account, account_found)) } diff --git a/svm/src/transaction_processing_callback.rs b/svm/src/transaction_processing_callback.rs index 760a6606568798..34741e9bd7ae3a 100644 --- a/svm/src/transaction_processing_callback.rs +++ b/svm/src/transaction_processing_callback.rs @@ -7,4 +7,16 @@ pub trait TransactionProcessingCallback { fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; fn add_builtin_account(&self, _name: &str, _program_id: &Pubkey) {} + + fn inspect_account(&self, _address: &Pubkey, _account_state: AccountState, _is_writable: bool) { + } +} + +/// The state the account is in initially, before transaction processing +#[derive(Debug)] +pub enum AccountState<'a> { + /// This account is dead, and will be created by this transaction + Dead, + /// This account is alive, and already existed prior to this transaction + Alive(&'a AccountSharedData), } From ee0667d36932a46ee2e2e36419567479f202a8f0 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 21 Aug 2024 15:16:00 -0500 Subject: [PATCH 186/529] ledger-tool: Make blockstore slot functional with no tx metadata (#2423) A previous commit unified the code to output a slot between the bigtable block and blockstore slot commands. In doing so, support for blockstore slot when tx metadata is absent was unintentionally broken This re-adds support for using the blockstore slot command when the blockstore does not contain tx metadata --- ledger-tool/src/blockstore.rs | 1 - ledger-tool/src/output.rs | 206 +++++++++++++++++++++++++--------- ledger-tool/tests/basic.rs | 1 - transaction-status/src/lib.rs | 68 ++++++++++- 4 files changed, 219 insertions(+), 57 deletions(-) diff --git a/ledger-tool/src/blockstore.rs b/ledger-tool/src/blockstore.rs index 903c232a7e230b..4ee47742de0994 100644 --- a/ledger-tool/src/blockstore.rs +++ b/ledger-tool/src/blockstore.rs @@ -1031,7 +1031,6 @@ fn do_blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) - let blockstore = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); for slot in slots { - println!("Slot {slot}"); output_slot( &blockstore, slot, diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index dbe7ff3ef726cb..4739a0a22b86aa 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -24,11 +24,12 @@ use { hash::Hash, native_token::lamports_to_sol, pubkey::Pubkey, + transaction::VersionedTransaction, }, solana_transaction_status::{ - BlockEncodingOptions, ConfirmedBlock, EncodeError, EncodedConfirmedBlock, + BlockEncodingOptions, ConfirmedBlock, Encodable, EncodedConfirmedBlock, EncodedTransactionWithStatusMeta, EntrySummary, Rewards, TransactionDetails, - UiTransactionEncoding, VersionedConfirmedBlockWithEntries, + UiTransactionEncoding, VersionedConfirmedBlock, VersionedConfirmedBlockWithEntries, VersionedTransactionWithStatusMeta, }, std::{ @@ -462,24 +463,82 @@ impl EncodedConfirmedBlockWithEntries { pub(crate) fn encode_confirmed_block( confirmed_block: ConfirmedBlock, ) -> Result { - let encoded_block = confirmed_block - .encode_with_options( - UiTransactionEncoding::Base64, - BlockEncodingOptions { - transaction_details: TransactionDetails::Full, - show_rewards: true, - max_supported_transaction_version: Some(0), - }, - ) - .map_err(|err| match err { - EncodeError::UnsupportedTransactionVersion(version) => LedgerToolError::Generic( - format!("Failed to process unsupported transaction version ({version}) in block"), - ), - })?; + let encoded_block = confirmed_block.encode_with_options( + UiTransactionEncoding::Base64, + BlockEncodingOptions { + transaction_details: TransactionDetails::Full, + show_rewards: true, + max_supported_transaction_version: Some(0), + }, + )?; + let encoded_block: EncodedConfirmedBlock = encoded_block.into(); Ok(encoded_block) } +fn encode_versioned_transactions(block: BlockWithoutMetadata) -> EncodedConfirmedBlock { + let transactions = block + .transactions + .into_iter() + .map(|transaction| EncodedTransactionWithStatusMeta { + transaction: transaction.encode(UiTransactionEncoding::Base64), + meta: None, + version: None, + }) + .collect(); + + EncodedConfirmedBlock { + previous_blockhash: Hash::default().to_string(), + blockhash: block.blockhash, + parent_slot: block.parent_slot, + transactions, + rewards: Rewards::default(), + num_partitions: None, + block_time: None, + block_height: None, + } +} + +pub enum BlockContents { + VersionedConfirmedBlock(VersionedConfirmedBlock), + BlockWithoutMetadata(BlockWithoutMetadata), +} + +// A VersionedConfirmedBlock analogue for use when the transaction metadata +// fields are unavailable. Also supports non-full blocks +pub struct BlockWithoutMetadata { + pub blockhash: String, + pub parent_slot: Slot, + pub transactions: Vec, +} + +impl BlockContents { + pub fn transactions(&self) -> Box + '_> { + match self { + BlockContents::VersionedConfirmedBlock(block) => Box::new( + block + .transactions + .iter() + .map(|VersionedTransactionWithStatusMeta { transaction, .. }| transaction), + ), + BlockContents::BlockWithoutMetadata(block) => Box::new(block.transactions.iter()), + } + } +} + +impl TryFrom for EncodedConfirmedBlock { + type Error = LedgerToolError; + + fn try_from(block_contents: BlockContents) -> Result { + match block_contents { + BlockContents::VersionedConfirmedBlock(block) => { + encode_confirmed_block(ConfirmedBlock::from(block)) + } + BlockContents::BlockWithoutMetadata(block) => Ok(encode_versioned_transactions(block)), + } + } +} + pub fn output_slot( blockstore: &Blockstore, slot: Slot, @@ -488,26 +547,77 @@ pub fn output_slot( verbose_level: u64, all_program_ids: &mut HashMap, ) -> Result<()> { - if blockstore.is_dead(slot) { - if allow_dead_slots { - if *output_format == OutputFormat::Display { - println!(" Slot is dead"); - } - } else { - return Err(LedgerToolError::from(BlockstoreError::DeadSlot)); + let is_root = blockstore.is_root(slot); + let is_dead = blockstore.is_dead(slot); + if *output_format == OutputFormat::Display && verbose_level <= 1 { + if is_root && is_dead { + eprintln!("Slot {slot} is marked as both a root and dead, this shouldn't be possible"); } + println!( + "Slot {slot}{}", + if is_root { + " (root)" + } else if is_dead { + " (dead)" + } else { + "" + } + ); + } + + if is_dead && !allow_dead_slots { + return Err(LedgerToolError::from(BlockstoreError::DeadSlot)); } let Some(meta) = blockstore.meta(slot)? else { return Ok(()); }; - let VersionedConfirmedBlockWithEntries { block, entries } = blockstore - .get_complete_block_with_entries( - slot, - /*require_previous_blockhash:*/ false, - /*populate_entries:*/ true, - allow_dead_slots, - )?; + let (block_contents, entries) = match blockstore.get_complete_block_with_entries( + slot, + /*require_previous_blockhash:*/ false, + /*populate_entries:*/ true, + allow_dead_slots, + ) { + Ok(VersionedConfirmedBlockWithEntries { block, entries }) => { + (BlockContents::VersionedConfirmedBlock(block), entries) + } + Err(_) => { + // Transaction metadata could be missing, try to fetch just the + // entries and leave the metadata fields empty + let entries = blockstore.get_slot_entries(slot, /*shred_start_index:*/ 0)?; + + let blockhash = entries + .last() + .filter(|_| meta.is_full()) + .map(|entry| entry.hash) + .unwrap_or(Hash::default()); + let parent_slot = meta.parent_slot.unwrap_or(0); + + let mut entry_summaries = Vec::with_capacity(entries.len()); + let mut starting_transaction_index = 0; + let transactions = entries + .into_iter() + .flat_map(|entry| { + entry_summaries.push(EntrySummary { + num_hashes: entry.num_hashes, + hash: entry.hash, + num_transactions: entry.transactions.len() as u64, + starting_transaction_index, + }); + starting_transaction_index += entry.transactions.len(); + + entry.transactions + }) + .collect(); + + let block = BlockWithoutMetadata { + blockhash: blockhash.to_string(), + parent_slot, + transactions, + }; + (BlockContents::BlockWithoutMetadata(block), entry_summaries) + } + }; if verbose_level == 0 { if *output_format == OutputFormat::Display { @@ -531,24 +641,23 @@ pub fn output_slot( for entry in entries.iter() { num_hashes += entry.num_hashes; } + let blockhash = entries + .last() + .filter(|_| meta.is_full()) + .map(|entry| entry.hash) + .unwrap_or(Hash::default()); - let blockhash = if let Some(entry) = entries.last() { - entry.hash - } else { - Hash::default() - }; - - let transactions = block.transactions.len(); + let mut num_transactions = 0; let mut program_ids = HashMap::new(); - for VersionedTransactionWithStatusMeta { transaction, .. } in block.transactions.iter() - { + + for transaction in block_contents.transactions() { + num_transactions += 1; for program_id in get_program_ids(transaction) { *program_ids.entry(*program_id).or_insert(0) += 1; } } - println!( - " Transactions: {transactions}, hashes: {num_hashes}, block_hash: {blockhash}", + " Transactions: {num_transactions}, hashes: {num_hashes}, block_hash: {blockhash}", ); for (pubkey, count) in program_ids.iter() { *all_program_ids.entry(*pubkey).or_insert(0) += count; @@ -557,7 +666,7 @@ pub fn output_slot( output_sorted_program_ids(program_ids); } } else { - let encoded_block = encode_confirmed_block(ConfirmedBlock::from(block))?; + let encoded_block = EncodedConfirmedBlock::try_from(block_contents)?; let cli_block = CliBlockWithEntries { encoded_confirmed_block: EncodedConfirmedBlockWithEntries::try_from( encoded_block, @@ -591,7 +700,7 @@ pub fn output_ledger( let num_slots = num_slots.unwrap_or(Slot::MAX); let mut num_printed = 0; let mut all_program_ids = HashMap::new(); - for (slot, slot_meta) in slot_iterator { + for (slot, _slot_meta) in slot_iterator { if only_rooted && !blockstore.is_root(slot) { continue; } @@ -599,17 +708,6 @@ pub fn output_ledger( break; } - match output_format { - OutputFormat::Display => { - println!("Slot {} root?: {}", slot, blockstore.is_root(slot)) - } - OutputFormat::Json => { - serde_json::to_writer(stdout(), &slot_meta)?; - stdout().write_all(b",\n")?; - } - _ => unreachable!(), - } - if let Err(err) = output_slot( &blockstore, slot, diff --git a/ledger-tool/tests/basic.rs b/ledger-tool/tests/basic.rs index a034df43305e7a..2459f1287497c8 100644 --- a/ledger-tool/tests/basic.rs +++ b/ledger-tool/tests/basic.rs @@ -101,7 +101,6 @@ fn ledger_tool_copy_test(src_shred_compaction: &str, dst_shred_compaction: &str) assert!(src_slot_output.status.success()); assert!(dst_slot_output.status.success()); assert!(!src_slot_output.stdout.is_empty()); - assert_eq!(src_slot_output.stdout, dst_slot_output.stdout); } } diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 6a5eb5fb8d6397..76f7e277c1571c 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -27,7 +27,7 @@ use { }, transaction_context::TransactionReturnData, }, - std::fmt, + std::{collections::HashSet, fmt}, thiserror::Error, }; @@ -1136,6 +1136,38 @@ impl EncodableWithMeta for VersionedTransaction { } } +impl Encodable for VersionedTransaction { + type Encoded = EncodedTransaction; + fn encode(&self, encoding: UiTransactionEncoding) -> Self::Encoded { + match encoding { + UiTransactionEncoding::Binary => EncodedTransaction::LegacyBinary( + bs58::encode(bincode::serialize(self).unwrap()).into_string(), + ), + UiTransactionEncoding::Base58 => EncodedTransaction::Binary( + bs58::encode(bincode::serialize(self).unwrap()).into_string(), + TransactionBinaryEncoding::Base58, + ), + UiTransactionEncoding::Base64 => EncodedTransaction::Binary( + BASE64_STANDARD.encode(bincode::serialize(self).unwrap()), + TransactionBinaryEncoding::Base64, + ), + UiTransactionEncoding::Json | UiTransactionEncoding::JsonParsed => { + EncodedTransaction::Json(UiTransaction { + signatures: self.signatures.iter().map(ToString::to_string).collect(), + message: match &self.message { + VersionedMessage::Legacy(message) => { + message.encode(UiTransactionEncoding::JsonParsed) + } + VersionedMessage::V0(message) => { + message.encode(UiTransactionEncoding::JsonParsed) + } + }, + }) + } + } + } +} + impl Encodable for Transaction { type Encoded = EncodedTransaction; fn encode(&self, encoding: UiTransactionEncoding) -> Self::Encoded { @@ -1240,6 +1272,40 @@ impl Encodable for Message { } } +impl Encodable for v0::Message { + type Encoded = UiMessage; + fn encode(&self, encoding: UiTransactionEncoding) -> Self::Encoded { + if encoding == UiTransactionEncoding::JsonParsed { + let account_keys = AccountKeys::new(&self.account_keys, None); + let loaded_addresses = LoadedAddresses::default(); + let loaded_message = + LoadedMessage::new_borrowed(self, &loaded_addresses, &HashSet::new()); + UiMessage::Parsed(UiParsedMessage { + account_keys: parse_v0_message_accounts(&loaded_message), + recent_blockhash: self.recent_blockhash.to_string(), + instructions: self + .instructions + .iter() + .map(|instruction| UiInstruction::parse(instruction, &account_keys, None)) + .collect(), + address_table_lookups: None, + }) + } else { + UiMessage::Raw(UiRawMessage { + header: self.header, + account_keys: self.account_keys.iter().map(ToString::to_string).collect(), + recent_blockhash: self.recent_blockhash.to_string(), + instructions: self + .instructions + .iter() + .map(|ix| UiCompiledInstruction::from(ix, None)) + .collect(), + address_table_lookups: None, + }) + } + } +} + impl EncodableWithMeta for v0::Message { type Encoded = UiMessage; fn encode_with_meta( From 053faa6a299156138d6462c78f6cac096e692dd7 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Wed, 21 Aug 2024 17:12:31 -0700 Subject: [PATCH 187/529] Check poh_recorder.start_slot() hasn't been dumped previously before checking it in ProgressMap. (#2676) * Check poh_recorder.start_slot() hasn't been dumped previously before checking it in progress_map. * Add more comments and put in checks for maybe_start_leader. * Update core/src/replay_stage.rs Co-authored-by: Ashwin Sekar * Use a slot which I am not leader to avoid dumping my own slot panic. * Address reviewer comments. * Address reviewer comments. --------- Co-authored-by: Ashwin Sekar --- core/src/replay_stage.rs | 151 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 146 insertions(+), 5 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 7f7d0f61157d9c..d0a8b3099f19ba 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -34,6 +34,7 @@ use { }, crossbeam_channel::{Receiver, RecvTimeoutError, Sender}, rayon::{prelude::*, ThreadPool}, + solana_accounts_db::contains::Contains, solana_entry::entry::VerifyRecyclers, solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierArc, solana_gossip::cluster_info::ClusterInfo, @@ -1306,6 +1307,13 @@ impl ReplayStage { ) { let start_slot = poh_recorder.read().unwrap().start_slot(); + // It is possible that bank corresponding to `start_slot` has been + // dumped, so we need to double check it exists before proceeding + if !progress.contains(&start_slot) { + warn!("Poh start slot {start_slot}, is missing from progress map. This indicates that we are in the middle of a dump and repair. Skipping retransmission of unpropagated leader slots"); + return; + } + if let (false, Some(latest_leader_slot)) = progress.get_leader_propagation_slot_must_exist(start_slot) { @@ -1962,6 +1970,9 @@ impl ReplayStage { // `poh_slot` and `parent_slot`, because they're in the same // `NUM_CONSECUTIVE_LEADER_SLOTS` block, we still skip the propagated // check because it's still within the propagation grace period. + // + // We've already checked in start_leader() that parent_slot hasn't been + // dumped, so we should get it in the progress map. if let Some(latest_leader_slot) = progress_map.get_latest_leader_slot_must_exist(parent_slot) { @@ -2034,11 +2045,12 @@ impl ReplayStage { trace!("{} reached_leader_slot", my_pubkey); - let parent = bank_forks - .read() - .unwrap() - .get(parent_slot) - .expect("parent_slot doesn't exist in bank forks"); + let Some(parent) = bank_forks.read().unwrap().get(parent_slot) else { + warn!( + "Poh recorder parent slot {parent_slot} is missing from bank_forks. This indicates \ + that we are in the middle of a dump and repair. Unable to start leader"); + return false; + }; assert!(parent.is_frozen()); @@ -3587,6 +3599,8 @@ impl ReplayStage { vote_tracker: &VoteTracker, cluster_slots: &ClusterSlots, ) { + // We would only reach here if the bank is in bank_forks, so it + // isn't dumped and should exist in progress map. // If propagation has already been confirmed, return if progress.get_leader_propagation_slot_must_exist(slot).0 { return; @@ -3902,6 +3916,8 @@ impl ReplayStage { ) }; + // If we reach here, the candidate_vote_bank exists in the bank_forks, so it isn't + // dumped and should exist in progress map. let propagation_confirmed = is_leader_slot || progress .get_leader_propagation_slot_must_exist(candidate_vote_bank.slot()) @@ -3979,6 +3995,8 @@ impl ReplayStage { fork_tip: Slot, bank_forks: &BankForks, ) { + // We would only reach here if the bank is in bank_forks, so it + // isn't dumped and should exist in progress map. let mut current_leader_slot = progress.get_latest_leader_slot_must_exist(fork_tip); let mut did_newly_reach_threshold = false; let root = bank_forks.root(); @@ -4405,6 +4423,9 @@ impl ReplayStage { for failure in heaviest_fork_failures { match failure { HeaviestForkFailures::NoPropagatedConfirmation(slot, ..) => { + // If failure is NoPropagatedConfirmation, then inside select_vote_and_reset_forks + // we already confirmed it's in progress map, we should see it in progress map + // here because we don't have dump and repair in between. if let Some(latest_leader_slot) = progress.get_latest_leader_slot_must_exist(*slot) { @@ -8527,6 +8548,126 @@ pub(crate) mod tests { assert_eq!(received_slots, vec![8, 9, 11]); } + #[test] + fn test_dumped_slot_not_causing_panic() { + solana_logger::setup(); + let ReplayBlockstoreComponents { + validator_node_to_vote_keys, + leader_schedule_cache, + poh_recorder, + vote_simulator, + rpc_subscriptions, + ref my_pubkey, + ref blockstore, + .. + } = replay_blockstore_components(None, 10, None::); + + let VoteSimulator { + mut progress, + ref bank_forks, + .. + } = vote_simulator; + + let poh_recorder = Arc::new(poh_recorder); + let (retransmit_slots_sender, _) = unbounded(); + + // Use a bank slot when I was not leader to avoid panic for dumping my own slot + let slot_to_dump = (1..100) + .find(|i| leader_schedule_cache.slot_leader_at(*i, None) != Some(*my_pubkey)) + .unwrap(); + let bank_to_dump = Bank::new_from_parent( + bank_forks.read().unwrap().get(0).unwrap(), + &leader_schedule_cache + .slot_leader_at(slot_to_dump, None) + .unwrap(), + slot_to_dump, + ); + progress.insert( + slot_to_dump, + ForkProgress::new_from_bank( + &bank_to_dump, + bank_to_dump.collector_id(), + validator_node_to_vote_keys + .get(bank_to_dump.collector_id()) + .unwrap(), + Some(0), + 0, + 0, + ), + ); + assert!(progress.get_propagated_stats(slot_to_dump).is_some()); + bank_to_dump.freeze(); + bank_forks.write().unwrap().insert(bank_to_dump); + let bank_to_dump = bank_forks + .read() + .unwrap() + .get(slot_to_dump) + .expect("Just inserted"); + + progress.get_retransmit_info_mut(0).unwrap().retry_time = Instant::now(); + poh_recorder + .write() + .unwrap() + .reset(bank_to_dump, Some((slot_to_dump + 1, slot_to_dump + 1))); + assert_eq!(poh_recorder.read().unwrap().start_slot(), slot_to_dump); + + // Now dump and repair slot_to_dump + let (mut ancestors, mut descendants) = { + let r_bank_forks = bank_forks.read().unwrap(); + (r_bank_forks.ancestors(), r_bank_forks.descendants()) + }; + let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); + let bank_to_dump_bad_hash = Hash::new_unique(); + duplicate_slots_to_repair.insert(slot_to_dump, bank_to_dump_bad_hash); + let mut purge_repair_slot_counter = PurgeRepairSlotCounter::default(); + let (dumped_slots_sender, dumped_slots_receiver) = unbounded(); + + ReplayStage::dump_then_repair_correct_slots( + &mut duplicate_slots_to_repair, + &mut ancestors, + &mut descendants, + &mut progress, + bank_forks, + blockstore, + None, + &mut purge_repair_slot_counter, + &dumped_slots_sender, + my_pubkey, + &leader_schedule_cache, + ); + assert_eq!( + dumped_slots_receiver.recv_timeout(Duration::from_secs(1)), + Ok(vec![(slot_to_dump, bank_to_dump_bad_hash)]) + ); + + // Now check it doesn't cause panic in the following functions. + ReplayStage::retransmit_latest_unpropagated_leader_slot( + &poh_recorder, + &retransmit_slots_sender, + &mut progress, + ); + + let (banking_tracer, _) = BankingTracer::new(None).unwrap(); + // A vote has not technically been rooted, but it doesn't matter for + // this test to use true to avoid skipping the leader slot + let has_new_vote_been_rooted = true; + let track_transaction_indexes = false; + + assert!(!ReplayStage::maybe_start_leader( + my_pubkey, + bank_forks, + &poh_recorder, + &leader_schedule_cache, + &rpc_subscriptions, + &mut progress, + &retransmit_slots_sender, + &mut SkippedSlotsInfo::default(), + &banking_tracer, + has_new_vote_been_rooted, + track_transaction_indexes, + )); + } + #[test] #[should_panic(expected = "We are attempting to dump a block that we produced")] fn test_dump_own_slots_fails() { From 0da63a397251be7fd150ec95c22cb30048d65b85 Mon Sep 17 00:00:00 2001 From: Jon C Date: Thu, 22 Aug 2024 05:47:25 -0400 Subject: [PATCH 188/529] cli: Simulate for compute units consumed during transfer (#1923) * cli: Plumb down `ComputeUnitLimit` to resolve tx * Use simulated compute units in transaction * wallet: Use simulated compute units for non-nonce transactions * Modify transfer test --- clap-utils/src/compute_budget.rs | 1 + cli/src/cluster_query.rs | 4 ++- cli/src/feature.rs | 4 ++- cli/src/nonce.rs | 6 ++-- cli/src/spend_utils.rs | 51 +++++++++++++++++++++++++------- cli/src/stake.rs | 8 +++-- cli/src/validator_info.rs | 6 ++-- cli/src/vote.rs | 8 +++-- cli/src/wallet.rs | 10 +++++-- cli/tests/transfer.rs | 41 ++++++++++++++++++++----- 10 files changed, 109 insertions(+), 30 deletions(-) diff --git a/clap-utils/src/compute_budget.rs b/clap-utils/src/compute_budget.rs index be8142738368eb..24f64ec13b091f 100644 --- a/clap-utils/src/compute_budget.rs +++ b/clap-utils/src/compute_budget.rs @@ -33,6 +33,7 @@ pub fn compute_unit_limit_arg<'a, 'b>() -> Arg<'a, 'b> { .help(COMPUTE_UNIT_LIMIT_ARG.help) } +#[derive(Clone, Copy, Debug, PartialEq)] pub enum ComputeUnitLimit { /// Do not include a compute unit limit instruction, which will give the /// transaction a compute unit limit of: diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 3d9d2f2b90e0f4..82cb9f906f4777 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -1471,6 +1471,7 @@ pub fn process_ping( let to = config.signers[0].pubkey(); lamports = lamports.saturating_add(1); + let compute_unit_limit = ComputeUnitLimit::Default; let build_message = |lamports| { let ixs = vec![system_instruction::transfer( &config.signers[0].pubkey(), @@ -1479,7 +1480,7 @@ pub fn process_ping( )] .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); Message::new(&ixs, Some(&config.signers[0].pubkey())) }; @@ -1489,6 +1490,7 @@ pub fn process_ping( SpendAmount::Some(lamports), &blockhash, &config.signers[0].pubkey(), + compute_unit_limit, build_message, config.commitment, )?; diff --git a/cli/src/feature.rs b/cli/src/feature.rs index c6e397d2e01088..66696b11c74190 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -10,7 +10,8 @@ use { console::style, serde::{Deserialize, Serialize}, solana_clap_utils::{ - fee_payer::*, hidden_unless_forced, input_parsers::*, input_validators::*, keypair::*, + compute_budget::ComputeUnitLimit, fee_payer::*, hidden_unless_forced, input_parsers::*, + input_validators::*, keypair::*, }, solana_cli_output::{cli_version::CliVersion, QuietDisplay, VerboseDisplay}, solana_remote_wallet::remote_wallet::RemoteWalletManager, @@ -972,6 +973,7 @@ fn process_activate( SpendAmount::Some(rent), &blockhash, &fee_payer.pubkey(), + ComputeUnitLimit::Default, |lamports| { Message::new( &feature::activate_with_lamports(&feature_id, &fee_payer.pubkey(), lamports), diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index dc5664607c5ae5..0f0bc15aa9295a 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -462,6 +462,7 @@ pub fn process_create_nonce_account( let nonce_authority = nonce_authority.unwrap_or_else(|| config.signers[0].pubkey()); + let compute_unit_limit = ComputeUnitLimit::Default; let build_message = |lamports| { let ixs = if let Some(seed) = seed.clone() { create_nonce_account_with_seed( @@ -475,7 +476,7 @@ pub fn process_create_nonce_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }) } else { create_nonce_account( @@ -487,7 +488,7 @@ pub fn process_create_nonce_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }) }; Message::new(&ixs, Some(&config.signers[0].pubkey())) @@ -501,6 +502,7 @@ pub fn process_create_nonce_account( amount, &latest_blockhash, &config.signers[0].pubkey(), + compute_unit_limit, build_message, config.commitment, )?; diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index 3868fb6160c5ed..a03e351de26862 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -2,9 +2,12 @@ use { crate::{ checks::{check_account_for_balance_with_commitment, get_fee_for_messages}, cli::CliError, + compute_budget::{simulate_and_update_compute_unit_limit, UpdateComputeUnitLimitResult}, }, clap::ArgMatches, - solana_clap_utils::{input_parsers::lamports_of_sol, offline::SIGN_ONLY_ARG}, + solana_clap_utils::{ + compute_budget::ComputeUnitLimit, input_parsers::lamports_of_sol, offline::SIGN_ONLY_ARG, + }, solana_rpc_client::rpc_client::RpcClient, solana_sdk::{ commitment_config::CommitmentConfig, hash::Hash, message::Message, @@ -52,6 +55,7 @@ pub fn resolve_spend_tx_and_check_account_balance( amount: SpendAmount, blockhash: &Hash, from_pubkey: &Pubkey, + compute_unit_limit: ComputeUnitLimit, build_message: F, commitment: CommitmentConfig, ) -> Result<(Message, u64), CliError> @@ -65,6 +69,7 @@ where blockhash, from_pubkey, from_pubkey, + compute_unit_limit, build_message, commitment, ) @@ -77,6 +82,7 @@ pub fn resolve_spend_tx_and_check_account_balances( blockhash: &Hash, from_pubkey: &Pubkey, fee_pubkey: &Pubkey, + compute_unit_limit: ComputeUnitLimit, build_message: F, commitment: CommitmentConfig, ) -> Result<(Message, u64), CliError> @@ -92,6 +98,7 @@ where from_pubkey, fee_pubkey, 0, + compute_unit_limit, build_message, )?; Ok((message, spend)) @@ -113,6 +120,7 @@ where from_pubkey, fee_pubkey, from_rent_exempt_minimum, + compute_unit_limit, build_message, )?; if from_pubkey == fee_pubkey { @@ -150,41 +158,57 @@ fn resolve_spend_message( from_pubkey: &Pubkey, fee_pubkey: &Pubkey, from_rent_exempt_minimum: u64, + compute_unit_limit: ComputeUnitLimit, build_message: F, ) -> Result<(Message, SpendAndFee), CliError> where F: Fn(u64) -> Message, { - let fee = match blockhash { + let (fee, compute_unit_info) = match blockhash { Some(blockhash) => { let mut dummy_message = build_message(0); dummy_message.recent_blockhash = *blockhash; - get_fee_for_messages(rpc_client, &[&dummy_message])? + let compute_unit_info = if compute_unit_limit == ComputeUnitLimit::Simulated { + // Simulate for correct compute units + if let UpdateComputeUnitLimitResult::UpdatedInstructionIndex(ix_index) = + simulate_and_update_compute_unit_limit(rpc_client, &mut dummy_message)? + { + Some((ix_index, dummy_message.instructions[ix_index].data.clone())) + } else { + None + } + } else { + None + }; + ( + get_fee_for_messages(rpc_client, &[&dummy_message])?, + compute_unit_info, + ) } - None => 0, // Offline, cannot calculate fee + None => (0, None), // Offline, cannot calculate fee }; - match amount { - SpendAmount::Some(lamports) => Ok(( + let (mut message, spend_and_fee) = match amount { + SpendAmount::Some(lamports) => ( build_message(lamports), SpendAndFee { spend: lamports, fee, }, - )), + ), SpendAmount::All => { let lamports = if from_pubkey == fee_pubkey { from_balance.saturating_sub(fee) } else { from_balance }; - Ok(( + ( build_message(lamports), SpendAndFee { spend: lamports, fee, }, - )) + ) } SpendAmount::RentExempt => { let mut lamports = if from_pubkey == fee_pubkey { @@ -193,13 +217,18 @@ where from_balance }; lamports = lamports.saturating_sub(from_rent_exempt_minimum); - Ok(( + ( build_message(lamports), SpendAndFee { spend: lamports, fee, }, - )) + ) } + }; + // After build message, update with correct compute units + if let Some((ix_index, ix_data)) = compute_unit_info { + message.instructions[ix_index].data = ix_data; } + Ok((message, spend_and_fee)) } diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 0e32a6216935a0..e63f3b7e1ee50a 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -1394,6 +1394,7 @@ pub fn process_create_stake_account( let fee_payer = config.signers[fee_payer]; let nonce_authority = config.signers[nonce_authority]; + let compute_unit_limit = ComputeUnitLimit::Default; let build_message = |lamports| { let authorized = Authorized { staker: staker.unwrap_or(from.pubkey()), @@ -1437,7 +1438,7 @@ pub fn process_create_stake_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( @@ -1460,6 +1461,7 @@ pub fn process_create_stake_account( &recent_blockhash, &from.pubkey(), &fee_payer.pubkey(), + compute_unit_limit, build_message, config.commitment, )?; @@ -1826,6 +1828,7 @@ pub fn process_withdraw_stake( let fee_payer = config.signers[fee_payer]; let nonce_authority = config.signers[nonce_authority]; + let compute_unit_limit = ComputeUnitLimit::Default; let build_message = |lamports| { let ixs = vec![stake_instruction::withdraw( &stake_account_address, @@ -1837,7 +1840,7 @@ pub fn process_withdraw_stake( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); if let Some(nonce_account) = &nonce_account { @@ -1859,6 +1862,7 @@ pub fn process_withdraw_stake( &recent_blockhash, &stake_account_address, &fee_payer.pubkey(), + compute_unit_limit, build_message, config.commitment, )?; diff --git a/cli/src/validator_info.rs b/cli/src/validator_info.rs index f4a3af3b3af49d..e50d74b2d4359f 100644 --- a/cli/src/validator_info.rs +++ b/cli/src/validator_info.rs @@ -349,6 +349,7 @@ pub fn process_set_validator_info( vec![config.signers[0]] }; + let compute_unit_limit = ComputeUnitLimit::Default; let build_message = |lamports| { let keys = keys.clone(); if balance == 0 { @@ -364,7 +365,7 @@ pub fn process_set_validator_info( ) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); instructions.extend_from_slice(&[config_instruction::store( &info_pubkey, @@ -387,7 +388,7 @@ pub fn process_set_validator_info( )] .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); Message::new(&instructions, Some(&config.signers[0].pubkey())) } @@ -401,6 +402,7 @@ pub fn process_set_validator_info( SpendAmount::Some(lamports), &latest_blockhash, &config.signers[0].pubkey(), + compute_unit_limit, build_message, config.commitment, )?; diff --git a/cli/src/vote.rs b/cli/src/vote.rs index c5070e35c37cb0..2e451a3d7f2150 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -821,6 +821,7 @@ pub fn process_create_vote_account( let nonce_authority = config.signers[nonce_authority]; let space = VoteStateVersions::vote_state_size_of(true) as u64; + let compute_unit_limit = ComputeUnitLimit::Default; let build_message = |lamports| { let vote_init = VoteInit { node_pubkey: identity_pubkey, @@ -849,7 +850,7 @@ pub fn process_create_vote_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); if let Some(nonce_account) = &nonce_account { @@ -873,6 +874,7 @@ pub fn process_create_vote_account( &recent_blockhash, &config.signers[0].pubkey(), &fee_payer.pubkey(), + compute_unit_limit, build_message, config.commitment, )?; @@ -1316,6 +1318,7 @@ pub fn process_withdraw_from_vote_account( let fee_payer = config.signers[fee_payer]; let nonce_authority = config.signers[nonce_authority]; + let compute_unit_limit = ComputeUnitLimit::Default; let build_message = |lamports| { let ixs = vec![withdraw( vote_account_pubkey, @@ -1326,7 +1329,7 @@ pub fn process_withdraw_from_vote_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); if let Some(nonce_account) = &nonce_account { @@ -1348,6 +1351,7 @@ pub fn process_withdraw_from_vote_account( &recent_blockhash, vote_account_pubkey, &fee_payer.pubkey(), + compute_unit_limit, build_message, config.commitment, )?; diff --git a/cli/src/wallet.rs b/cli/src/wallet.rs index 97dd8c6b567212..728a529af246c9 100644 --- a/cli/src/wallet.rs +++ b/cli/src/wallet.rs @@ -911,6 +911,11 @@ pub fn process_transfer( None }; + let compute_unit_limit = if nonce_account.is_some() { + ComputeUnitLimit::Default + } else { + ComputeUnitLimit::Simulated + }; let build_message = |lamports| { let ixs = if let Some((base_pubkey, seed, program_id, from_pubkey)) = with_seed.as_ref() { vec![system_instruction::transfer_with_seed( @@ -924,14 +929,14 @@ pub fn process_transfer( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }) } else { vec![system_instruction::transfer(&from_pubkey, to, lamports)] .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }) }; @@ -954,6 +959,7 @@ pub fn process_transfer( &recent_blockhash, &from_pubkey, &fee_payer.pubkey(), + compute_unit_limit, build_message, config.commitment, )?; diff --git a/cli/tests/transfer.rs b/cli/tests/transfer.rs index e5ebac60976814..b1a750972543e1 100644 --- a/cli/tests/transfer.rs +++ b/cli/tests/transfer.rs @@ -12,15 +12,18 @@ use { solana_rpc_client_nonce_utils::blockhash_query::{self, BlockhashQuery}, solana_sdk::{ commitment_config::CommitmentConfig, + compute_budget::ComputeBudgetInstruction, fee::FeeStructure, + message::Message, native_token::sol_to_lamports, nonce::State as NonceState, pubkey::Pubkey, signature::{keypair_from_seed, Keypair, NullSigner, Signer}, - stake, + stake, system_instruction, }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidator, + test_case::test_case, }; #[test] @@ -474,16 +477,17 @@ fn test_transfer_multisession_signing() { check_balance!(sol_to_lamports(42.0), &rpc_client, &to_pubkey); } -#[test] -fn test_transfer_all() { +#[test_case(None; "default")] +#[test_case(Some(100_000); "with_compute_unit_price")] +fn test_transfer_all(compute_unit_price: Option) { solana_logger::setup(); - let fee = FeeStructure::default().get_max_fee(1, 0); + let lamports_per_signature = FeeStructure::default().get_max_fee(1, 0); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_custom_fees( mint_pubkey, - fee, + lamports_per_signature, Some(faucet_addr), SocketAddrSpace::Unspecified, ); @@ -492,13 +496,36 @@ fn test_transfer_all() { RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let default_signer = Keypair::new(); + let recipient_pubkey = Pubkey::from([1u8; 32]); + + let fee = { + let mut instructions = vec![system_instruction::transfer( + &default_signer.pubkey(), + &recipient_pubkey, + 0, + )]; + if let Some(compute_unit_price) = compute_unit_price { + // This is brittle and will need to be updated if the compute unit + // limit for the system program or compute budget program are changed, + // or if they're converted to BPF. + // See `solana_system_program::system_processor::DEFAULT_COMPUTE_UNITS` + // and `solana_compute_budget_program::DEFAULT_COMPUTE_UNITS` + instructions.push(ComputeBudgetInstruction::set_compute_unit_limit(450)); + instructions.push(ComputeBudgetInstruction::set_compute_unit_price( + compute_unit_price, + )); + } + let blockhash = rpc_client.get_latest_blockhash().unwrap(); + let sample_message = + Message::new_with_blockhash(&instructions, Some(&default_signer.pubkey()), &blockhash); + rpc_client.get_fee_for_message(&sample_message).unwrap() + }; let mut config = CliConfig::recent_for_tests(); config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; let sender_pubkey = config.signers[0].pubkey(); - let recipient_pubkey = Pubkey::from([1u8; 32]); request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 500_000).unwrap(); check_balance!(500_000, &rpc_client, &sender_pubkey); @@ -522,7 +549,7 @@ fn test_transfer_all() { fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); check_balance!(0, &rpc_client, &sender_pubkey); From 8ae52fb63b825c39223a40cdd1e80d8e8bcfcdff Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Thu, 22 Aug 2024 08:28:33 -0500 Subject: [PATCH 189/529] accounts-db: reopen mmap as file-backed storage after shrink_progress drop (#1871) * reopen after shrink_progress drop * add test * allow deadcode * docu the whole fn --------- Co-authored-by: HaoranYi Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 5 + accounts-db/src/ancient_append_vecs.rs | 178 ++++++++++++++----------- 2 files changed, 107 insertions(+), 76 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ed2f6428c8b257..bae4526cf539ef 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3979,6 +3979,11 @@ impl AccountsDb { } } + #[cfg(feature = "dev-context-only-utils")] + pub fn set_storage_access(&mut self, storage_access: StorageAccess) { + self.storage_access = storage_access; + } + /// Sort `accounts` by pubkey and removes all but the *last* of consecutive /// accounts in the vector with the same pubkey. /// diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 095c7dd21881eb..6af2ae647bd25c 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -696,11 +696,20 @@ impl AccountsDb { let slot = shrink_collect.slot; let shrink_in_progress = write_ancient_accounts.shrinks_in_progress.remove(&slot); + + let mut reopen = false; if shrink_in_progress.is_none() { dropped_roots.push(slot); } else { - self.reopen_storage_as_readonly_shrinking_in_progress_ok(slot); + // Remember that we need to 'reopen' the storage for this + // 'slot'. Note that it is not *safe* to reopen the storage for + // the 'slot' here, because 'shrink_in_progress' is still alive. + // Storage map may still point to the old storage and will be + // updated to point to the new storage, after we drop + // 'shrink_in_progress'. + reopen = true; } + self.remove_old_stores_shrink( &shrink_collect, &self.shrink_ancient_stats.shrink_stats, @@ -710,6 +719,11 @@ impl AccountsDb { // If the slot is dead, remove the need to shrink the storage as the storage entries will be purged. self.shrink_candidate_slots.lock().unwrap().remove(&slot); + + if reopen { + // 'shrink_in_progress' is dead now. We can safely 'reopen' the new storage for 'slot'. + self.reopen_storage_as_readonly_shrinking_in_progress_ok(slot); + } } self.handle_dropped_roots_for_ancient(dropped_roots.into_iter()); metrics.accumulate(&write_ancient_accounts.metrics); @@ -1184,6 +1198,7 @@ pub mod tests { }, ShrinkCollectRefs, }, + accounts_file::StorageAccess, accounts_hash::AccountHash, accounts_index::UpsertReclaim, append_vec::{ @@ -1591,89 +1606,100 @@ pub mod tests { // or all slots shrunk so no roots or storages should be removed for in_shrink_candidate_slots in [false, true] { for all_slots_shrunk in [false, true] { - for num_slots in 0..3 { - let (db, storages, slots, infos) = get_sample_storages(num_slots, None); - let mut accounts_per_storage = infos - .iter() - .zip( - storages - .iter() - .map(|store| db.get_unique_accounts_from_storage(store)), - ) - .collect::>(); - - let alive_bytes = 1000; - let accounts_to_combine = db.calc_accounts_to_combine( - &mut accounts_per_storage, - &default_tuning(), - alive_bytes, - IncludeManyRefSlots::Include, - ); - let mut stats = ShrinkStatsSub::default(); - let mut write_ancient_accounts = WriteAncientAccounts::default(); + for storage_access in [StorageAccess::Mmap, StorageAccess::File] { + for num_slots in 0..3 { + let (mut db, storages, slots, infos) = get_sample_storages(num_slots, None); + db.set_storage_access(storage_access); + let mut accounts_per_storage = infos + .iter() + .zip( + storages + .iter() + .map(|store| db.get_unique_accounts_from_storage(store)), + ) + .collect::>(); - slots.clone().for_each(|slot| { - db.add_root(slot); - let storage = db.storage.get_slot_storage_entry(slot); - assert!(storage.is_some()); - if in_shrink_candidate_slots { - db.shrink_candidate_slots.lock().unwrap().insert(slot); - } - }); + let alive_bytes = 1000; + let accounts_to_combine = db.calc_accounts_to_combine( + &mut accounts_per_storage, + &default_tuning(), + alive_bytes, + IncludeManyRefSlots::Include, + ); + let mut stats = ShrinkStatsSub::default(); + let mut write_ancient_accounts = WriteAncientAccounts::default(); - let roots = db - .accounts_index - .roots_tracker - .read() - .unwrap() - .alive_roots - .get_all(); - assert_eq!(roots, slots.clone().collect::>()); - - if all_slots_shrunk { - // make it look like each of the slots was shrunk slots.clone().for_each(|slot| { - write_ancient_accounts - .shrinks_in_progress - .insert(slot, db.get_store_for_shrink(slot, 1)); + db.add_root(slot); + let storage = db.storage.get_slot_storage_entry(slot); + assert!(storage.is_some()); + if in_shrink_candidate_slots { + db.shrink_candidate_slots.lock().unwrap().insert(slot); + } }); - } - - db.finish_combine_ancient_slots_packed_internal( - accounts_to_combine, - write_ancient_accounts, - &mut stats, - ); - - slots.clone().for_each(|slot| { - assert!(!db.shrink_candidate_slots.lock().unwrap().contains(&slot)); - }); - let roots_after = db - .accounts_index - .roots_tracker - .read() - .unwrap() - .alive_roots - .get_all(); + let roots = db + .accounts_index + .roots_tracker + .read() + .unwrap() + .alive_roots + .get_all(); + assert_eq!(roots, slots.clone().collect::>()); - assert_eq!( - roots_after, - if all_slots_shrunk { - slots.clone().collect::>() - } else { - vec![] - }, - "all_slots_shrunk: {all_slots_shrunk}" - ); - slots.for_each(|slot| { - let storage = db.storage.get_slot_storage_entry(slot); if all_slots_shrunk { - assert!(storage.is_some()); - } else { - assert!(storage.is_none()); + // make it look like each of the slots was shrunk + slots.clone().for_each(|slot| { + write_ancient_accounts + .shrinks_in_progress + .insert(slot, db.get_store_for_shrink(slot, 1)); + }); } - }); + + db.finish_combine_ancient_slots_packed_internal( + accounts_to_combine, + write_ancient_accounts, + &mut stats, + ); + + slots.clone().for_each(|slot| { + assert!(!db.shrink_candidate_slots.lock().unwrap().contains(&slot)); + }); + + let roots_after = db + .accounts_index + .roots_tracker + .read() + .unwrap() + .alive_roots + .get_all(); + + assert_eq!( + roots_after, + if all_slots_shrunk { + slots.clone().collect::>() + } else { + vec![] + }, + "all_slots_shrunk: {all_slots_shrunk}" + ); + slots.for_each(|slot| { + let storage = db.storage.get_slot_storage_entry(slot); + if all_slots_shrunk { + assert!(storage.is_some()); + // Here we use can_append() as a proxy to assert the backup storage of the accounts after shrinking. + // When storage_access is set to `File`, after shrinking an ancient slot, the backup storage should be + // open as File, which means can_append() will return false. + // When storage_access is set to `Mmap`, backup storage is still Mmap, and can_append() will return true. + assert_eq!( + storage.unwrap().accounts.can_append(), + storage_access == StorageAccess::Mmap + ); + } else { + assert!(storage.is_none()); + } + }); + } } } } From 530e9c3f07a61a087357e008d0ad18596331dd4d Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 22 Aug 2024 21:33:24 +0800 Subject: [PATCH 190/529] feat: support committing fee-only transactions (#2425) * feat: support committing fee-only transactions * update and add new tests * fix sbf test * feedback * fix new test * test cleanup --- Cargo.lock | 1 + ledger/src/blockstore_processor.rs | 26 ++- programs/sbf/tests/programs.rs | 32 +-- runtime/src/bank.rs | 143 +++++++------ runtime/src/bank/tests.rs | 244 +++++++++++++++++++---- sdk/src/feature_set.rs | 5 + svm/Cargo.toml | 1 + svm/src/account_saver.rs | 148 ++++++++++---- svm/src/rollback_accounts.rs | 16 ++ svm/src/transaction_commit_result.rs | 1 + svm/src/transaction_execution_result.rs | 26 +-- svm/src/transaction_processing_result.rs | 62 +++++- svm/src/transaction_processor.rs | 23 ++- svm/tests/concurrent_tests.rs | 14 +- svm/tests/conformance.rs | 3 +- svm/tests/integration_test.rs | 8 + 16 files changed, 559 insertions(+), 194 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 90d37815cebba9..629d897225c250 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7682,6 +7682,7 @@ dependencies = [ name = "solana-svm" version = "2.1.0" dependencies = [ + "assert_matches", "bincode", "itertools 0.12.1", "lazy_static", diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index d34543db73993c..304cc549e57f5b 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -3801,13 +3801,14 @@ pub mod tests { #[test] fn test_update_transaction_statuses() { - // Make sure instruction errors still update the signature cache let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(11_000); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + + // Make sure instruction errors still update the signature cache let pubkey = solana_sdk::pubkey::new_rand(); bank.transfer(1_000, &mint_keypair, &pubkey).unwrap(); assert_eq!(bank.transaction_count(), 1); @@ -3824,6 +3825,29 @@ pub mod tests { Err(TransactionError::AlreadyProcessed) ); + // Make sure fees-only transactions still update the signature cache + let missing_program_id = Pubkey::new_unique(); + let tx = Transaction::new_signed_with_payer( + &[Instruction::new_with_bincode( + missing_program_id, + &10, + Vec::new(), + )], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + bank.last_blockhash(), + ); + // First process attempt will fail but still update status cache + assert_eq!( + bank.process_transaction(&tx), + Err(TransactionError::ProgramAccountNotFound) + ); + // Second attempt will be rejected since tx was already in status cache + assert_eq!( + bank.process_transaction(&tx), + Err(TransactionError::AlreadyProcessed) + ); + // Make sure other errors don't update the signature cache let tx = system_transaction::transfer(&mint_keypair, &pubkey, 1000, Hash::default()); let signature = tx.signatures[0]; diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index b36f4d88bda09b..1b44bacfb55ffa 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -62,7 +62,7 @@ use { transaction::{SanitizedTransaction, Transaction, TransactionError, VersionedTransaction}, }, solana_svm::{ - transaction_commit_result::CommittedTransaction, + transaction_commit_result::{CommittedTransaction, TransactionCommitResult}, transaction_execution_result::InnerInstruction, transaction_processor::ExecutionRecordingConfig, }, @@ -92,6 +92,20 @@ fn process_transaction_and_record_inner( Vec>, Vec, ) { + let commit_result = load_execute_and_commit_transaction(bank, tx); + let CommittedTransaction { + inner_instructions, + log_messages, + status, + .. + } = commit_result.unwrap(); + let inner_instructions = inner_instructions.expect("cpi recording should be enabled"); + let log_messages = log_messages.expect("log recording should be enabled"); + (status, inner_instructions, log_messages) +} + +#[cfg(feature = "sbf_rust")] +fn load_execute_and_commit_transaction(bank: &Bank, tx: Transaction) -> TransactionCommitResult { let txs = vec![tx]; let tx_batch = bank.prepare_batch_for_tests(txs); let mut commit_results = bank @@ -108,15 +122,7 @@ fn process_transaction_and_record_inner( None, ) .0; - let CommittedTransaction { - inner_instructions, - log_messages, - status, - .. - } = commit_results.swap_remove(0).unwrap(); - let inner_instructions = inner_instructions.expect("cpi recording should be enabled"); - let log_messages = log_messages.expect("log recording should be enabled"); - (status, inner_instructions, log_messages) + commit_results.pop().unwrap() } #[cfg(feature = "sbf_rust")] @@ -1880,10 +1886,10 @@ fn test_program_sbf_invoke_in_same_tx_as_deployment() { bank.last_blockhash(), ); if index == 0 { - let results = execute_transactions(&bank, vec![tx]); + let result = load_execute_and_commit_transaction(&bank, tx); assert_eq!( - results[0].as_ref().unwrap_err(), - &TransactionError::ProgramAccountNotFound, + result.unwrap().status, + Err(TransactionError::ProgramAccountNotFound), ); } else { let (result, _, _) = process_transaction_and_record_inner(&bank, tx); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f54e5f46793c64..34a1cd9d70b89c 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -160,7 +160,8 @@ use { }, transaction_processing_callback::TransactionProcessingCallback, transaction_processing_result::{ - TransactionProcessingResult, TransactionProcessingResultExtensions, + ProcessedTransaction, TransactionProcessingResult, + TransactionProcessingResultExtensions, }, transaction_processor::{ ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages, @@ -328,6 +329,7 @@ pub struct LoadAndExecuteTransactionsOutput { pub processed_counts: ProcessedTransactionCounts, } +#[derive(Debug, PartialEq)] pub struct TransactionSimulationResult { pub result: Result<()>, pub logs: TransactionLogMessages, @@ -3098,14 +3100,13 @@ impl Bank { assert_eq!(sanitized_txs.len(), processing_results.len()); for (tx, processing_result) in sanitized_txs.iter().zip(processing_results) { if let Ok(processed_tx) = &processing_result { - let details = &processed_tx.execution_details; // Add the message hash to the status cache to ensure that this message // won't be processed again with a different signature. status_cache.insert( tx.message().recent_blockhash(), tx.message_hash(), self.slot(), - details.status.clone(), + processed_tx.status(), ); // Add the transaction signature to the status cache so that transaction status // can be queried by transaction signature over RPC. In the future, this should @@ -3114,7 +3115,7 @@ impl Bank { tx.message().recent_blockhash(), tx.signature(), self.slot(), - details.status.clone(), + processed_tx.status(), ); } } @@ -3364,30 +3365,35 @@ impl Bank { let processing_result = processing_results .pop() .unwrap_or(Err(TransactionError::InvalidProgramForExecution)); - let flattened_result = processing_result.flattened_result(); - let (post_simulation_accounts, logs, return_data, inner_instructions) = + let (post_simulation_accounts, result, logs, return_data, inner_instructions) = match processing_result { - Ok(processed_tx) => { - let details = processed_tx.execution_details; - let post_simulation_accounts = processed_tx - .loaded_transaction - .accounts - .into_iter() - .take(number_of_accounts) - .collect::>(); - ( - post_simulation_accounts, - details.log_messages, - details.return_data, - details.inner_instructions, - ) - } - Err(_) => (vec![], None, None, None), + Ok(processed_tx) => match processed_tx { + ProcessedTransaction::Executed(executed_tx) => { + let details = executed_tx.execution_details; + let post_simulation_accounts = executed_tx + .loaded_transaction + .accounts + .into_iter() + .take(number_of_accounts) + .collect::>(); + ( + post_simulation_accounts, + details.status, + details.log_messages, + details.return_data, + details.inner_instructions, + ) + } + ProcessedTransaction::FeesOnly(fees_only_tx) => { + (vec![], Err(fees_only_tx.load_error), None, None, None) + } + }, + Err(error) => (vec![], Err(error), None, None, None), }; let logs = logs.unwrap_or_default(); TransactionSimulationResult { - result: flattened_result, + result, logs, post_simulation_accounts, units_consumed, @@ -3567,7 +3573,8 @@ impl Bank { .filter_map(|(processing_result, transaction)| { // Skip log collection for unprocessed transactions let processed_tx = processing_result.processed_transaction()?; - let execution_details = &processed_tx.execution_details; + // Skip log collection for unexecuted transactions + let execution_details = processed_tx.execution_details()?; Self::collect_transaction_logs( &transaction_log_collector_config, transaction, @@ -3717,7 +3724,7 @@ impl Bank { .iter() .for_each(|processing_result| match processing_result { Ok(processed_tx) => { - fees += processed_tx.loaded_transaction.fee_details.total_fee(); + fees += processed_tx.fee_details().total_fee(); } Err(_) => {} }); @@ -3736,8 +3743,7 @@ impl Bank { .iter() .for_each(|processing_result| match processing_result { Ok(processed_tx) => { - accumulated_fee_details - .accumulate(&processed_tx.loaded_transaction.fee_details); + accumulated_fee_details.accumulate(&processed_tx.fee_details()); } Err(_) => {} }); @@ -3810,7 +3816,9 @@ impl Bank { let ((), update_executors_us) = measure_us!({ let mut cache = None; for processing_result in &processing_results { - if let Some(executed_tx) = processing_result.processed_transaction() { + if let Some(ProcessedTransaction::Executed(executed_tx)) = + processing_result.processed_transaction() + { let programs_modified_by_tx = &executed_tx.programs_modified_by_tx; if executed_tx.was_successful() && !programs_modified_by_tx.is_empty() { cache @@ -3826,7 +3834,7 @@ impl Bank { let accounts_data_len_delta = processing_results .iter() .filter_map(|processing_result| processing_result.processed_transaction()) - .map(|processed_tx| &processed_tx.execution_details) + .filter_map(|processed_tx| processed_tx.execution_details()) .filter_map(|details| { details .status @@ -3864,37 +3872,52 @@ impl Bank { ) -> Vec { processing_results .into_iter() - .map(|processing_result| { - let processed_tx = processing_result?; - let execution_details = processed_tx.execution_details; - let LoadedTransaction { - rent_debits, - accounts: loaded_accounts, - loaded_accounts_data_size, - fee_details, - .. - } = processed_tx.loaded_transaction; - - // Rent is only collected for successfully executed transactions - let rent_debits = if execution_details.was_successful() { - rent_debits - } else { - RentDebits::default() - }; + .map(|processing_result| match processing_result? { + ProcessedTransaction::Executed(executed_tx) => { + let execution_details = executed_tx.execution_details; + let LoadedTransaction { + rent_debits, + accounts: loaded_accounts, + loaded_accounts_data_size, + fee_details, + .. + } = executed_tx.loaded_transaction; + + // Rent is only collected for successfully executed transactions + let rent_debits = if execution_details.was_successful() { + rent_debits + } else { + RentDebits::default() + }; - Ok(CommittedTransaction { - status: execution_details.status, - log_messages: execution_details.log_messages, - inner_instructions: execution_details.inner_instructions, - return_data: execution_details.return_data, - executed_units: execution_details.executed_units, - fee_details, - rent_debits, + Ok(CommittedTransaction { + status: execution_details.status, + log_messages: execution_details.log_messages, + inner_instructions: execution_details.inner_instructions, + return_data: execution_details.return_data, + executed_units: execution_details.executed_units, + fee_details, + rent_debits, + loaded_account_stats: TransactionLoadedAccountsStats { + loaded_accounts_count: loaded_accounts.len(), + loaded_accounts_data_size, + }, + }) + } + ProcessedTransaction::FeesOnly(fees_only_tx) => Ok(CommittedTransaction { + status: Err(fees_only_tx.load_error), + log_messages: None, + inner_instructions: None, + return_data: None, + executed_units: 0, + rent_debits: RentDebits::default(), + fee_details: fees_only_tx.fee_details, loaded_account_stats: TransactionLoadedAccountsStats { - loaded_accounts_count: loaded_accounts.len(), - loaded_accounts_data_size, + loaded_accounts_count: fees_only_tx.rollback_accounts.count(), + loaded_accounts_data_size: fees_only_tx.rollback_accounts.data_size() + as u32, }, - }) + }), }) .collect() } @@ -3903,6 +3926,7 @@ impl Bank { let collected_rent = processing_results .iter() .filter_map(|processing_result| processing_result.processed_transaction()) + .filter_map(|processed_tx| processed_tx.executed_transaction()) .filter(|executed_tx| executed_tx.was_successful()) .map(|executed_tx| executed_tx.loaded_transaction.rent) .sum(); @@ -5853,6 +5877,11 @@ impl Bank { .processed_transaction() .map(|processed_tx| (tx, processed_tx)) }) + .filter_map(|(tx, processed_tx)| { + processed_tx + .executed_transaction() + .map(|executed_tx| (tx, executed_tx)) + }) .filter(|(_, executed_tx)| executed_tx.was_successful()) .flat_map(|(tx, executed_tx)| { let num_account_keys = tx.message().account_keys().len(); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 36d3ca8d1c7f8b..dfbd449731179a 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -101,7 +101,8 @@ use { }, solana_stake_program::stake_state::{self, StakeStateV2}, solana_svm::{ - account_loader::LoadedTransaction, + account_loader::{FeesOnlyTransaction, LoadedTransaction}, + rollback_accounts::RollbackAccounts, transaction_commit_result::TransactionCommitResultExtensions, transaction_execution_result::ExecutedTransaction, }, @@ -234,25 +235,27 @@ fn test_race_register_tick_freeze() { } } -fn new_processing_result( +fn new_executed_processing_result( status: Result<()>, fee_details: FeeDetails, ) -> TransactionProcessingResult { - Ok(ExecutedTransaction { - loaded_transaction: LoadedTransaction { - fee_details, - ..LoadedTransaction::default() - }, - execution_details: TransactionExecutionDetails { - status, - log_messages: None, - inner_instructions: None, - return_data: None, - executed_units: 0, - accounts_data_len_delta: 0, + Ok(ProcessedTransaction::Executed(Box::new( + ExecutedTransaction { + loaded_transaction: LoadedTransaction { + fee_details, + ..LoadedTransaction::default() + }, + execution_details: TransactionExecutionDetails { + status, + log_messages: None, + inner_instructions: None, + return_data: None, + executed_units: 0, + accounts_data_len_delta: 0, + }, + programs_modified_by_tx: HashMap::new(), }, - programs_modified_by_tx: HashMap::new(), - }) + ))) } impl Bank { @@ -2880,14 +2883,22 @@ fn test_filter_program_errors_and_collect_fee() { let tx_fee = 42; let fee_details = FeeDetails::new(tx_fee, 0, false); let processing_results = vec![ - new_processing_result(Ok(()), fee_details), - new_processing_result( + Err(TransactionError::AccountNotFound), + new_executed_processing_result(Ok(()), fee_details), + new_executed_processing_result( Err(TransactionError::InstructionError( 1, SystemError::ResultWithNegativeLamports.into(), )), fee_details, ), + Ok(ProcessedTransaction::FeesOnly(Box::new( + FeesOnlyTransaction { + load_error: TransactionError::InvalidProgramForExecution, + rollback_accounts: RollbackAccounts::default(), + fee_details, + }, + ))), ]; let initial_balance = bank.get_balance(&leader); @@ -2895,7 +2906,7 @@ fn test_filter_program_errors_and_collect_fee() { bank.freeze(); assert_eq!( bank.get_balance(&leader), - initial_balance + bank.fee_rate_governor.burn(tx_fee * 2).0 + initial_balance + bank.fee_rate_governor.burn(tx_fee * 3).0 ); } @@ -2911,14 +2922,22 @@ fn test_filter_program_errors_and_collect_priority_fee() { let priority_fee = 42; let fee_details: FeeDetails = FeeDetails::new(0, priority_fee, false); let processing_results = vec![ - new_processing_result(Ok(()), fee_details), - new_processing_result( + Err(TransactionError::AccountNotFound), + new_executed_processing_result(Ok(()), fee_details), + new_executed_processing_result( Err(TransactionError::InstructionError( 1, SystemError::ResultWithNegativeLamports.into(), )), fee_details, ), + Ok(ProcessedTransaction::FeesOnly(Box::new( + FeesOnlyTransaction { + load_error: TransactionError::InvalidProgramForExecution, + rollback_accounts: RollbackAccounts::default(), + fee_details, + }, + ))), ]; let initial_balance = bank.get_balance(&leader); @@ -2926,7 +2945,7 @@ fn test_filter_program_errors_and_collect_priority_fee() { bank.freeze(); assert_eq!( bank.get_balance(&leader), - initial_balance + bank.fee_rate_governor.burn(priority_fee * 2).0 + initial_balance + bank.fee_rate_governor.burn(priority_fee * 3).0 ); } @@ -3081,6 +3100,103 @@ fn test_interleaving_locks() { .is_ok()); } +#[test_case(false; "disable fees-only transactions")] +#[test_case(true; "enable fees-only transactions")] +fn test_load_and_execute_commit_transactions_fees_only(enable_fees_only_txs: bool) { + let GenesisConfigInfo { + mut genesis_config, .. + } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); + if !enable_fees_only_txs { + genesis_config + .accounts + .remove(&solana_sdk::feature_set::enable_transaction_loading_failure_fees::id()); + } + genesis_config.rent = Rent::default(); + genesis_config.fee_rate_governor = FeeRateGovernor::new(5000, 0); + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = Bank::new_from_parent( + bank, + &Pubkey::new_unique(), + genesis_config.epoch_schedule.get_first_slot_in_epoch(1), + ); + + // Use rent-paying fee payer to show that rent is not collected for fees + // only transactions even when they use a rent-paying account. + let rent_paying_fee_payer = Pubkey::new_unique(); + bank.store_account( + &rent_paying_fee_payer, + &AccountSharedData::new( + genesis_config.rent.minimum_balance(0) - 1, + 0, + &system_program::id(), + ), + ); + + // Use nonce to show that loaded account stats also included loaded + // nonce account size + let nonce_size = nonce::State::size(); + let nonce_balance = genesis_config.rent.minimum_balance(nonce_size); + let nonce_pubkey = Pubkey::new_unique(); + let nonce_authority = rent_paying_fee_payer; + let nonce_initial_hash = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_data = nonce::state::Data::new(nonce_authority, nonce_initial_hash, 5000); + let nonce_account = AccountSharedData::new_data( + nonce_balance, + &nonce::state::Versions::new(nonce::State::Initialized(nonce_data.clone())), + &system_program::id(), + ) + .unwrap(); + bank.store_account(&nonce_pubkey, &nonce_account); + + // Invoke missing program to trigger load error in order to commit a + // fees-only transaction + let missing_program_id = Pubkey::new_unique(); + let transaction = Transaction::new_unsigned(Message::new_with_blockhash( + &[ + system_instruction::advance_nonce_account(&nonce_pubkey, &rent_paying_fee_payer), + Instruction::new_with_bincode(missing_program_id, &0, vec![]), + ], + Some(&rent_paying_fee_payer), + &nonce_data.blockhash(), + )); + + let batch = bank.prepare_batch_for_tests(vec![transaction]); + let commit_results = bank + .load_execute_and_commit_transactions( + &batch, + MAX_PROCESSING_AGE, + true, + ExecutionRecordingConfig::new_single_setting(true), + &mut ExecuteTimings::default(), + None, + ) + .0; + + if enable_fees_only_txs { + assert_eq!( + commit_results, + vec![Ok(CommittedTransaction { + status: Err(TransactionError::ProgramAccountNotFound), + log_messages: None, + inner_instructions: None, + return_data: None, + executed_units: 0, + fee_details: FeeDetails::new(5000, 0, true), + rent_debits: RentDebits::default(), + loaded_account_stats: TransactionLoadedAccountsStats { + loaded_accounts_count: 2, + loaded_accounts_data_size: nonce_size as u32, + }, + })] + ); + } else { + assert_eq!( + commit_results, + vec![Err(TransactionError::ProgramAccountNotFound)] + ); + } +} + #[test] fn test_readonly_relaxed_locks() { let (genesis_config, _) = create_genesis_config(3); @@ -6932,6 +7048,15 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { assert!(slot_versions.is_empty()); } + // Advance bank to get a new last blockhash so that when we retry invocation + // after creating the program, the new transaction created below with the + // same `invocation_message` as above doesn't return `AlreadyProcessed` when + // processed. + goto_end_of_slot(bank.clone()); + let bank = bank_client + .advance_slot(1, bank_forks.as_ref(), &mint_keypair.pubkey()) + .unwrap(); + // Load program file let mut file = File::open("../programs/bpf_loader/test_elfs/out/noop_aligned.so") .expect("file open failed"); @@ -6997,8 +7122,8 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { let program_cache = bank.transaction_processor.program_cache.read().unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 1); - assert_eq!(slot_versions[0].deployment_slot, 0); - assert_eq!(slot_versions[0].effective_slot, 0); + assert_eq!(slot_versions[0].deployment_slot, bank.slot()); + assert_eq!(slot_versions[0].effective_slot, bank.slot()); assert!(matches!( slot_versions[0].program, ProgramCacheEntryType::Closed, @@ -7021,8 +7146,8 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { let program_cache = bank.transaction_processor.program_cache.read().unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&buffer_address); assert_eq!(slot_versions.len(), 1); - assert_eq!(slot_versions[0].deployment_slot, 0); - assert_eq!(slot_versions[0].effective_slot, 0); + assert_eq!(slot_versions[0].deployment_slot, bank.slot()); + assert_eq!(slot_versions[0].effective_slot, bank.slot()); assert!(matches!( slot_versions[0].program, ProgramCacheEntryType::Closed, @@ -7123,14 +7248,14 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { let program_cache = bank.transaction_processor.program_cache.read().unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 2); - assert_eq!(slot_versions[0].deployment_slot, 0); - assert_eq!(slot_versions[0].effective_slot, 0); + assert_eq!(slot_versions[0].deployment_slot, bank.slot() - 1); + assert_eq!(slot_versions[0].effective_slot, bank.slot() - 1); assert!(matches!( slot_versions[0].program, ProgramCacheEntryType::Closed, )); - assert_eq!(slot_versions[1].deployment_slot, 0); - assert_eq!(slot_versions[1].effective_slot, 1); + assert_eq!(slot_versions[1].deployment_slot, bank.slot() - 1); + assert_eq!(slot_versions[1].effective_slot, bank.slot()); assert!(matches!( slot_versions[1].program, ProgramCacheEntryType::Loaded(_), @@ -12772,22 +12897,56 @@ fn test_failed_simulation_compute_units() { assert_eq!(expected_consumed_units, simulation.units_consumed); } +/// Test that simulations report the load error of fees-only transactions +#[test] +fn test_failed_simulation_load_error() { + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); + let bank = Bank::new_for_tests(&genesis_config); + let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); + let missing_program_id = Pubkey::new_unique(); + let message = Message::new( + &[Instruction::new_with_bincode( + missing_program_id, + &0, + vec![], + )], + Some(&mint_keypair.pubkey()), + ); + let transaction = Transaction::new(&[&mint_keypair], message, bank.last_blockhash()); + + bank.freeze(); + let sanitized = SanitizedTransaction::from_transaction_for_tests(transaction); + let simulation = bank.simulate_transaction(&sanitized, false); + assert_eq!( + simulation, + TransactionSimulationResult { + result: Err(TransactionError::ProgramAccountNotFound), + logs: vec![], + post_simulation_accounts: vec![], + units_consumed: 0, + return_data: None, + inner_instructions: None, + } + ); +} + #[test] fn test_filter_program_errors_and_collect_fee_details() { - // TX | EXECUTION RESULT | COLLECT | COLLECT + // TX | PROCESSING RESULT | COLLECT | COLLECT // | | (TX_FEE, PRIO_FEE) | RESULT // --------------------------------------------------------------------------------- - // tx1 | not executed | (0 , 0) | Original Err - // tx2 | executed and no error | (5_000, 1_000) | Ok + // tx1 | not processed | (0 , 0) | Original Err + // tx2 | processed but not executed | (5_000, 1_000) | Ok // tx3 | executed has error | (5_000, 1_000) | Ok + // tx4 | executed and no error | (5_000, 1_000) | Ok // let initial_payer_balance = 7_000; let tx_fee = 5000; let priority_fee = 1000; - let tx_fee_details = FeeDetails::new(tx_fee, priority_fee, false); + let fee_details = FeeDetails::new(tx_fee, priority_fee, false); let expected_collected_fee_details = CollectorFeeDetails { - transaction_fee: 2 * tx_fee, - priority_fee: 2 * priority_fee, + transaction_fee: 3 * tx_fee, + priority_fee: 3 * priority_fee, }; let GenesisConfigInfo { @@ -12799,14 +12958,21 @@ fn test_filter_program_errors_and_collect_fee_details() { let results = vec![ Err(TransactionError::AccountNotFound), - new_processing_result(Ok(()), tx_fee_details), - new_processing_result( + Ok(ProcessedTransaction::FeesOnly(Box::new( + FeesOnlyTransaction { + load_error: TransactionError::InvalidProgramForExecution, + rollback_accounts: RollbackAccounts::default(), + fee_details, + }, + ))), + new_executed_processing_result( Err(TransactionError::InstructionError( 0, SystemError::ResultWithNegativeLamports.into(), )), - tx_fee_details, + fee_details, ), + new_executed_processing_result(Ok(()), fee_details), ]; bank.filter_program_errors_and_collect_fee_details(&results); diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 7322fdbfde900c..0cb89f631eabef 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -841,6 +841,10 @@ pub mod vote_only_retransmitter_signed_fec_sets { solana_sdk::declare_id!("RfEcA95xnhuwooVAhUUksEJLZBF7xKCLuqrJoqk4Zph"); } +pub mod enable_transaction_loading_failure_fees { + solana_sdk::declare_id!("PaymEPK2oqwT9TXAVfadjztH2H6KfLEB9Hhd5Q5frvP"); +} + pub mod enable_turbine_extended_fanout_experiments { solana_sdk::declare_id!("BZn14Liea52wtBwrXUxTv6vojuTTmfc7XGEDTXrvMD7b"); } @@ -1054,6 +1058,7 @@ lazy_static! { (move_stake_and_move_lamports_ixs::id(), "Enable MoveStake and MoveLamports stake program instructions #1610"), (ed25519_precompile_verify_strict::id(), "Use strict verification in ed25519 precompile SIMD-0152"), (vote_only_retransmitter_signed_fec_sets::id(), "vote only on retransmitter signed fec sets"), + (enable_transaction_loading_failure_fees::id(), "Enable fees for some additional transaction failures SIMD-0082"), (enable_turbine_extended_fanout_experiments::id(), "enable turbine extended fanout experiments #"), (deprecate_legacy_vote_ixs::id(), "Deprecate legacy vote instructions"), /*************** ADD NEW FEATURES HERE ***************/ diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 603f0c8ae8a1d5..2627f4fbd8bf5a 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -40,6 +40,7 @@ crate-type = ["lib"] name = "solana_svm" [dev-dependencies] +assert_matches = { workspace = true } bincode = { workspace = true } lazy_static = { workspace = true } libsecp256k1 = { workspace = true } diff --git a/svm/src/account_saver.rs b/svm/src/account_saver.rs index f0881050dea4bc..0ecbd181e6698f 100644 --- a/svm/src/account_saver.rs +++ b/svm/src/account_saver.rs @@ -2,7 +2,8 @@ use { crate::{ rollback_accounts::RollbackAccounts, transaction_processing_result::{ - TransactionProcessingResult, TransactionProcessingResultExtensions, + ProcessedTransaction, TransactionProcessingResult, + TransactionProcessingResultExtensions, }, }, solana_sdk::{ @@ -27,12 +28,15 @@ fn max_number_of_accounts_to_collect( .processed_transaction() .map(|processed_tx| (processed_tx, tx)) }) - .map( - |(processed_tx, tx)| match processed_tx.execution_details.status { - Ok(_) => tx.num_write_locks() as usize, - Err(_) => processed_tx.loaded_transaction.rollback_accounts.count(), - }, - ) + .map(|(processed_tx, tx)| match processed_tx { + ProcessedTransaction::Executed(executed_tx) => { + match executed_tx.execution_details.status { + Ok(_) => tx.num_write_locks() as usize, + Err(_) => executed_tx.loaded_transaction.rollback_accounts.count(), + } + } + ProcessedTransaction::FeesOnly(fees_only_tx) => fees_only_tx.rollback_accounts.count(), + }) .sum() } @@ -51,22 +55,36 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( continue; }; - if processed_tx.execution_details.status.is_ok() { - collect_accounts_for_successful_tx( - &mut accounts, - &mut transactions, - transaction, - &processed_tx.loaded_transaction.accounts, - ); - } else { - collect_accounts_for_failed_tx( - &mut accounts, - &mut transactions, - transaction, - &mut processed_tx.loaded_transaction.rollback_accounts, - durable_nonce, - lamports_per_signature, - ); + match processed_tx { + ProcessedTransaction::Executed(executed_tx) => { + if executed_tx.execution_details.status.is_ok() { + collect_accounts_for_successful_tx( + &mut accounts, + &mut transactions, + transaction, + &executed_tx.loaded_transaction.accounts, + ); + } else { + collect_accounts_for_failed_tx( + &mut accounts, + &mut transactions, + transaction, + &mut executed_tx.loaded_transaction.rollback_accounts, + durable_nonce, + lamports_per_signature, + ); + } + } + ProcessedTransaction::FeesOnly(fees_only_tx) => { + collect_accounts_for_failed_tx( + &mut accounts, + &mut transactions, + transaction, + &mut fees_only_tx.rollback_accounts, + durable_nonce, + lamports_per_signature, + ); + } } } (accounts, transactions) @@ -141,7 +159,7 @@ mod tests { use { super::*, crate::{ - account_loader::LoadedTransaction, + account_loader::{FeesOnlyTransaction, LoadedTransaction}, nonce_info::NonceInfo, transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, }, @@ -178,22 +196,24 @@ mod tests { )) } - fn new_processing_result( + fn new_executed_processing_result( status: Result<()>, loaded_transaction: LoadedTransaction, ) -> TransactionProcessingResult { - Ok(ExecutedTransaction { - execution_details: TransactionExecutionDetails { - status, - log_messages: None, - inner_instructions: None, - return_data: None, - executed_units: 0, - accounts_data_len_delta: 0, + Ok(ProcessedTransaction::Executed(Box::new( + ExecutedTransaction { + execution_details: TransactionExecutionDetails { + status, + log_messages: None, + inner_instructions: None, + return_data: None, + executed_units: 0, + accounts_data_len_delta: 0, + }, + loaded_transaction, + programs_modified_by_tx: HashMap::new(), }, - loaded_transaction, - programs_modified_by_tx: HashMap::new(), - }) + ))) } #[test] @@ -259,8 +279,8 @@ mod tests { let txs = vec![tx0.clone(), tx1.clone()]; let mut processing_results = vec![ - new_processing_result(Ok(()), loaded0), - new_processing_result(Ok(()), loaded1), + new_executed_processing_result(Ok(()), loaded0), + new_executed_processing_result(Ok(()), loaded1), ]; let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 2); @@ -280,7 +300,7 @@ mod tests { } #[test] - fn test_nonced_failure_accounts_rollback_fee_payer_only() { + fn test_collect_accounts_for_failed_tx_rollback_fee_payer_only() { let from = keypair_from_seed(&[1; 32]).unwrap(); let from_address = from.pubkey(); let to_address = Pubkey::new_unique(); @@ -312,7 +332,7 @@ mod tests { }; let txs = vec![tx]; - let mut processing_results = vec![new_processing_result( + let mut processing_results = vec![new_executed_processing_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, @@ -337,7 +357,7 @@ mod tests { } #[test] - fn test_nonced_failure_accounts_rollback_separate_nonce_and_fee_payer() { + fn test_collect_accounts_for_failed_tx_rollback_separate_nonce_and_fee_payer() { let nonce_address = Pubkey::new_unique(); let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); let from = keypair_from_seed(&[1; 32]).unwrap(); @@ -398,7 +418,7 @@ mod tests { let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let txs = vec![tx]; - let mut processing_results = vec![new_processing_result( + let mut processing_results = vec![new_executed_processing_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, @@ -437,7 +457,7 @@ mod tests { } #[test] - fn test_nonced_failure_accounts_rollback_same_nonce_and_fee_payer() { + fn test_collect_accounts_for_failed_tx_rollback_same_nonce_and_fee_payer() { let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); let nonce_address = nonce_authority.pubkey(); let from = keypair_from_seed(&[1; 32]).unwrap(); @@ -496,7 +516,7 @@ mod tests { let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let txs = vec![tx]; - let mut processing_results = vec![new_processing_result( + let mut processing_results = vec![new_executed_processing_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, @@ -524,4 +544,44 @@ mod tests { ) .is_some()); } + + #[test] + fn test_collect_accounts_for_failed_fees_only_tx() { + let from = keypair_from_seed(&[1; 32]).unwrap(); + let from_address = from.pubkey(); + let to_address = Pubkey::new_unique(); + + let instructions = vec![system_instruction::transfer(&from_address, &to_address, 42)]; + let message = Message::new(&instructions, Some(&from_address)); + let blockhash = Hash::new_unique(); + let tx = new_sanitized_tx(&[&from], message, blockhash); + + let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); + + let txs = vec![tx]; + let mut processing_results = vec![Ok(ProcessedTransaction::FeesOnly(Box::new( + FeesOnlyTransaction { + load_error: TransactionError::InvalidProgramForExecution, + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::FeePayerOnly { + fee_payer_account: from_account_pre.clone(), + }, + }, + )))]; + let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); + assert_eq!(max_collected_accounts, 1); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let (collected_accounts, _) = + collect_accounts_to_store(&txs, &mut processing_results, &durable_nonce, 0); + assert_eq!(collected_accounts.len(), 1); + assert_eq!( + collected_accounts + .iter() + .find(|(pubkey, _account)| *pubkey == &from_address) + .map(|(_pubkey, account)| *account) + .cloned() + .unwrap(), + from_account_pre, + ); + } } diff --git a/svm/src/rollback_accounts.rs b/svm/src/rollback_accounts.rs index 71b670d37c4f85..c2c02f2f80bd43 100644 --- a/svm/src/rollback_accounts.rs +++ b/svm/src/rollback_accounts.rs @@ -79,6 +79,22 @@ impl RollbackAccounts { Self::SeparateNonceAndFeePayer { .. } => 2, } } + + /// Size of accounts tracked for rollback, used when calculating the actual + /// cost of transaction processing in the cost model. + pub fn data_size(&self) -> usize { + match self { + Self::FeePayerOnly { fee_payer_account } => fee_payer_account.data().len(), + Self::SameNonceAndFeePayer { nonce } => nonce.account().data().len(), + Self::SeparateNonceAndFeePayer { + nonce, + fee_payer_account, + } => fee_payer_account + .data() + .len() + .saturating_add(nonce.account().data().len()), + } + } } #[cfg(test)] diff --git a/svm/src/transaction_commit_result.rs b/svm/src/transaction_commit_result.rs index 8bbada73634ad9..6d838ea0786d53 100644 --- a/svm/src/transaction_commit_result.rs +++ b/svm/src/transaction_commit_result.rs @@ -9,6 +9,7 @@ use { pub type TransactionCommitResult = TransactionResult; #[derive(Clone, Debug)] +#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))] pub struct CommittedTransaction { pub status: TransactionResult<()>, pub log_messages: Option>, diff --git a/svm/src/transaction_execution_result.rs b/svm/src/transaction_execution_result.rs index 6a41294ddb975e..c226ae262a82ad 100644 --- a/svm/src/transaction_execution_result.rs +++ b/svm/src/transaction_execution_result.rs @@ -7,36 +7,16 @@ pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList} use { crate::account_loader::LoadedTransaction, solana_program_runtime::loaded_programs::ProgramCacheEntry, - solana_sdk::{ - pubkey::Pubkey, - transaction::{self, TransactionError}, - transaction_context::TransactionReturnData, - }, + solana_sdk::{pubkey::Pubkey, transaction, transaction_context::TransactionReturnData}, std::{collections::HashMap, sync::Arc}, }; -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct TransactionLoadedAccountsStats { pub loaded_accounts_data_size: u32, pub loaded_accounts_count: usize, } -/// Type safe representation of a transaction execution attempt which -/// differentiates between a transaction that was executed (will be -/// committed to the ledger) and a transaction which wasn't executed -/// and will be dropped. -/// -/// Note: `Result` is not -/// used because it's easy to forget that the inner `details.status` field -/// is what should be checked to detect a successful transaction. This -/// enum provides a convenience method `Self::was_executed_successfully` to -/// make such checks hard to do incorrectly. -#[derive(Debug, Clone)] -pub enum TransactionExecutionResult { - Executed(Box), - NotExecuted(TransactionError), -} - #[derive(Debug, Clone)] pub struct ExecutedTransaction { pub loaded_transaction: LoadedTransaction, @@ -46,7 +26,7 @@ pub struct ExecutedTransaction { impl ExecutedTransaction { pub fn was_successful(&self) -> bool { - self.execution_details.status.is_ok() + self.execution_details.was_successful() } } diff --git a/svm/src/transaction_processing_result.rs b/svm/src/transaction_processing_result.rs index 0ad68e0d18a803..7802b9ac213808 100644 --- a/svm/src/transaction_processing_result.rs +++ b/svm/src/transaction_processing_result.rs @@ -1,10 +1,15 @@ use { - crate::transaction_execution_result::ExecutedTransaction, - solana_sdk::transaction::Result as TransactionResult, + crate::{ + account_loader::FeesOnlyTransaction, + transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, + }, + solana_sdk::{ + fee::FeeDetails, + transaction::{Result as TransactionResult, TransactionError}, + }, }; pub type TransactionProcessingResult = TransactionResult; -pub type ProcessedTransaction = ExecutedTransaction; pub trait TransactionProcessingResultExtensions { fn was_processed(&self) -> bool; @@ -14,6 +19,16 @@ pub trait TransactionProcessingResultExtensions { fn flattened_result(&self) -> TransactionResult<()>; } +#[derive(Debug)] +pub enum ProcessedTransaction { + /// Transaction was executed, but if execution failed, all account state changes + /// will be rolled back except deducted fees and any advanced nonces + Executed(Box), + /// Transaction was not able to be executed but fees are able to be + /// collected and any nonces are advanceable + FeesOnly(Box), +} + impl TransactionProcessingResultExtensions for TransactionProcessingResult { fn was_processed(&self) -> bool { self.is_ok() @@ -21,7 +36,7 @@ impl TransactionProcessingResultExtensions for TransactionProcessingResult { fn was_processed_with_successful_result(&self) -> bool { match self { - Ok(processed_tx) => processed_tx.was_successful(), + Ok(processed_tx) => processed_tx.was_processed_with_successful_result(), Err(_) => false, } } @@ -43,6 +58,43 @@ impl TransactionProcessingResultExtensions for TransactionProcessingResult { fn flattened_result(&self) -> TransactionResult<()> { self.as_ref() .map_err(|err| err.clone()) - .and_then(|processed_tx| processed_tx.execution_details.status.clone()) + .and_then(|processed_tx| processed_tx.status()) + } +} + +impl ProcessedTransaction { + fn was_processed_with_successful_result(&self) -> bool { + match self { + Self::Executed(executed_tx) => executed_tx.execution_details.status.is_ok(), + Self::FeesOnly(_) => false, + } + } + + pub fn status(&self) -> TransactionResult<()> { + match self { + Self::Executed(executed_tx) => executed_tx.execution_details.status.clone(), + Self::FeesOnly(details) => Err(TransactionError::clone(&details.load_error)), + } + } + + pub fn fee_details(&self) -> FeeDetails { + match self { + Self::Executed(executed_tx) => executed_tx.loaded_transaction.fee_details, + Self::FeesOnly(details) => details.fee_details, + } + } + + pub fn executed_transaction(&self) -> Option<&ExecutedTransaction> { + match self { + Self::Executed(context) => Some(context), + Self::FeesOnly { .. } => None, + } + } + + pub fn execution_details(&self) -> Option<&TransactionExecutionDetails> { + match self { + Self::Executed(context) => Some(&context.execution_details), + Self::FeesOnly { .. } => None, + } } } diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 8b1a30f1ad913b..75ddbca4ca1f73 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -16,7 +16,7 @@ use { transaction_error_metrics::TransactionErrorMetrics, transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, transaction_processing_callback::TransactionProcessingCallback, - transaction_processing_result::TransactionProcessingResult, + transaction_processing_result::{ProcessedTransaction, TransactionProcessingResult}, }, log::debug, percentage::Percentage, @@ -37,7 +37,7 @@ use { solana_sdk::{ account::{AccountSharedData, ReadableAccount, PROGRAM_OWNERS}, clock::{Epoch, Slot}, - feature_set::{remove_rounding_in_fee_calculation, FeatureSet}, + feature_set::{self, remove_rounding_in_fee_calculation, FeatureSet}, fee::{FeeBudgetLimits, FeeStructure}, hash::Hash, inner_instruction::{InnerInstruction, InnerInstructionsList}, @@ -269,12 +269,12 @@ impl TransactionBatchProcessor { ); if program_cache_for_tx_batch.hit_max_limit { - const ERROR: TransactionError = TransactionError::ProgramCacheHitMaxLimit; - let processing_results = vec![Err(ERROR); sanitized_txs.len()]; return LoadAndExecuteSanitizedTransactionsOutput { error_metrics, execute_timings, - processing_results, + processing_results: (0..sanitized_txs.len()) + .map(|_| Err(TransactionError::ProgramCacheHitMaxLimit)) + .collect(), }; } @@ -294,13 +294,22 @@ impl TransactionBatchProcessor { &program_cache_for_tx_batch, )); + let enable_transaction_loading_failure_fees = environment + .feature_set + .is_active(&feature_set::enable_transaction_loading_failure_fees::id()); let (processing_results, execution_us): (Vec, u64) = measure_us!(loaded_transactions .into_iter() .zip(sanitized_txs.iter()) .map(|(load_result, tx)| match load_result { TransactionLoadResult::NotLoaded(err) => Err(err), - TransactionLoadResult::FeesOnly(fees_only_tx) => Err(fees_only_tx.load_error), + TransactionLoadResult::FeesOnly(fees_only_tx) => { + if enable_transaction_loading_failure_fees { + Ok(ProcessedTransaction::FeesOnly(Box::new(fees_only_tx))) + } else { + Err(fees_only_tx.load_error) + } + } TransactionLoadResult::Loaded(loaded_transaction) => { let executed_tx = self.execute_loaded_transaction( tx, @@ -318,7 +327,7 @@ impl TransactionBatchProcessor { program_cache_for_tx_batch.merge(&executed_tx.programs_modified_by_tx); } - Ok(executed_tx) + Ok(ProcessedTransaction::Executed(Box::new(executed_tx))) } }) .collect()); diff --git a/svm/tests/concurrent_tests.rs b/svm/tests/concurrent_tests.rs index e11019be16a346..2e84fbba243663 100644 --- a/svm/tests/concurrent_tests.rs +++ b/svm/tests/concurrent_tests.rs @@ -7,6 +7,7 @@ use { }, transaction_builder::SanitizedTransactionBuilder, }, + assert_matches::assert_matches, mock_bank::MockBankCallback, shuttle::{ sync::{Arc, RwLock}, @@ -22,7 +23,9 @@ use { }, solana_svm::{ account_loader::{CheckedTransactionDetails, TransactionCheckResult}, - transaction_processing_result::TransactionProcessingResultExtensions, + transaction_processing_result::{ + ProcessedTransaction, TransactionProcessingResultExtensions, + }, transaction_processor::{ ExecutionRecordingConfig, TransactionBatchProcessor, TransactionProcessingConfig, TransactionProcessingEnvironment, @@ -251,10 +254,13 @@ fn svm_concurrent() { &processing_config, ); - for (idx, item) in result.processing_results.iter().enumerate() { - assert!(item.was_processed()); + for (idx, processing_result) in result.processing_results.iter().enumerate() { + assert!(processing_result.was_processed()); + let processed_tx = processing_result.processed_transaction().unwrap(); + assert_matches!(processed_tx, &ProcessedTransaction::Executed(_)); + let executed_tx = processed_tx.executed_transaction().unwrap(); let inserted_accounts = &check_tx_data[idx]; - for (key, account_data) in &item.as_ref().unwrap().loaded_transaction.accounts { + for (key, account_data) in &executed_tx.loaded_transaction.accounts { if *key == inserted_accounts.fee_payer { assert_eq!(account_data.lamports(), BALANCE - 10000); } else if *key == inserted_accounts.sender { diff --git a/svm/tests/conformance.rs b/svm/tests/conformance.rs index 5b32c2e164c2d2..5015e1fadd9d0c 100644 --- a/svm/tests/conformance.rs +++ b/svm/tests/conformance.rs @@ -336,9 +336,10 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool return; } - let executed_tx = result.processing_results[0] + let processed_tx = result.processing_results[0] .processed_transaction() .unwrap(); + let executed_tx = processed_tx.executed_transaction().unwrap(); let execution_details = &executed_tx.execution_details; let loaded_accounts = &executed_tx.loaded_transaction.accounts; verify_accounts_and_data( diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index f35f023e54ae2a..317535f52cd361 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -275,6 +275,8 @@ fn svm_integration() { let executed_tx_0 = result.processing_results[0] .processed_transaction() + .unwrap() + .executed_transaction() .unwrap(); assert!(executed_tx_0.was_successful()); let logs = executed_tx_0 @@ -286,6 +288,8 @@ fn svm_integration() { let executed_tx_1 = result.processing_results[1] .processed_transaction() + .unwrap() + .executed_transaction() .unwrap(); assert!(executed_tx_1.was_successful()); @@ -301,6 +305,8 @@ fn svm_integration() { let executed_tx_2 = result.processing_results[2] .processed_transaction() + .unwrap() + .executed_transaction() .unwrap(); let return_data = executed_tx_2 .execution_details @@ -314,6 +320,8 @@ fn svm_integration() { let executed_tx_3 = result.processing_results[3] .processed_transaction() + .unwrap() + .executed_transaction() .unwrap(); assert!(executed_tx_3.execution_details.status.is_err()); assert!(executed_tx_3 From 4cf51ba5d15eada911157c81e8088de77fc5f805 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 22 Aug 2024 09:33:20 -0500 Subject: [PATCH 191/529] TransactionMeta - unsafe documentation (#2685) --- transaction-view/src/transaction_meta.rs | 52 ++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/transaction-view/src/transaction_meta.rs b/transaction-view/src/transaction_meta.rs index d467448c2ff500..38cf52468ac9ee 100644 --- a/transaction-view/src/transaction_meta.rs +++ b/transaction-view/src/transaction_meta.rs @@ -112,7 +112,27 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. - pub unsafe fn signatures(&self, bytes: &[u8]) -> &[Signature] { + pub unsafe fn signatures<'a>(&self, bytes: &'a [u8]) -> &'a [Signature] { + // Verify at compile time there are no alignment constraints. + const _: () = assert!( + core::mem::align_of::() == 1, + "Signature alignment" + ); + // The length of the slice is not greater than isize::MAX. + const _: () = + assert!(u8::MAX as usize * core::mem::size_of::() <= isize::MAX as usize); + + // SAFETY: + // - If this `TransactionMeta` was created from `bytes`: + // - the pointer is valid for the range and is properly aligned. + // - `num_signatures` has been verified against the bounds if + // `TransactionMeta` was created successfully. + // - `Signature` are just byte arrays; there is no possibility the + // `Signature` are not initialized properly. + // - The lifetime of the returned slice is the same as the input + // `bytes`. This means it will not be mutated or deallocated while + // holding the slice. + // - The length does not overflow `isize`. core::slice::from_raw_parts( bytes.as_ptr().add(usize::from(self.signature.offset)) as *const Signature, usize::from(self.signature.num_signatures), @@ -124,7 +144,24 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. - pub unsafe fn static_account_keys(&self, bytes: &[u8]) -> &[Pubkey] { + pub unsafe fn static_account_keys<'a>(&self, bytes: &'a [u8]) -> &'a [Pubkey] { + // Verify at compile time there are no alignment constraints. + const _: () = assert!(core::mem::align_of::() == 1, "Pubkey alignment"); + // The length of the slice is not greater than isize::MAX. + const _: () = + assert!(u8::MAX as usize * core::mem::size_of::() <= isize::MAX as usize); + + // SAFETY: + // - If this `TransactionMeta` was created from `bytes`: + // - the pointer is valid for the range and is properly aligned. + // - `num_static_accounts` has been verified against the bounds if + // `TransactionMeta` was created successfully. + // - `Pubkey` are just byte arrays; there is no possibility the + // `Pubkey` are not initialized properly. + // - The lifetime of the returned slice is the same as the input + // `bytes`. This means it will not be mutated or deallocated while + // holding the slice. + // - The length does not overflow `isize`. core::slice::from_raw_parts( bytes .as_ptr() @@ -137,7 +174,16 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. - pub unsafe fn recent_blockhash(&self, bytes: &[u8]) -> &Hash { + pub unsafe fn recent_blockhash<'a>(&self, bytes: &'a [u8]) -> &'a Hash { + // Verify at compile time there are no alignment constraints. + const _: () = assert!(core::mem::align_of::() == 1, "Hash alignment"); + + // SAFETY: + // - The pointer is correctly aligned (no alignment constraints). + // - `Hash` is just a byte array; there is no possibility the `Hash` + // is not initialized properly. + // - Aliasing rules are respected because the lifetime of the returned + // reference is the same as the input/source `bytes`. &*(bytes .as_ptr() .add(usize::from(self.recent_blockhash_offset)) as *const Hash) From 35051c756e148c7b61d26af1421ed133029bce12 Mon Sep 17 00:00:00 2001 From: asolana <110843012+ksolana@users.noreply.github.com> Date: Thu, 22 Aug 2024 08:24:08 -0700 Subject: [PATCH 192/529] Replace unbounded with bounded as single channel is used (#2646) We need the receiver to hold just one message so there is no need to have an unbounded channel. --- poh/src/poh_recorder.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 3fe76d5274bcf8..8b95ecec039d64 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -14,7 +14,9 @@ use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use { crate::{leader_bank_notifier::LeaderBankNotifier, poh_service::PohService}, - crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, SendError, Sender, TrySendError}, + crossbeam_channel::{ + bounded, unbounded, Receiver, RecvTimeoutError, SendError, Sender, TrySendError, + }, log::*, solana_entry::{ entry::{hash_transactions, Entry}, @@ -207,7 +209,7 @@ impl TransactionRecorder { transactions: Vec, ) -> Result> { // create a new channel so that there is only 1 sender and when it goes out of scope, the receiver fails - let (result_sender, result_receiver) = unbounded(); + let (result_sender, result_receiver) = bounded(1); let res = self.record_sender .send(Record::new(mixin, transactions, bank_slot, result_sender)); From ff87ed9187ab5e4967ad6cfa61bb951fe54fb68b Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 22 Aug 2024 12:21:39 -0400 Subject: [PATCH 193/529] gossip: ignore retransmitter signatures when comparing duplicate shreds (#2673) * gossip: ignore retransmitter signatures when comparing duplicate shreds * pr feedback: compare rest of payload instead of setting sig * pr feedback: remove dcou, pub(super) --- gossip/src/duplicate_shred.rs | 85 +++++++++++++++++++++++++++++++++-- ledger/src/shred.rs | 33 +++++++++++++- ledger/src/shred/merkle.rs | 2 +- 3 files changed, 115 insertions(+), 5 deletions(-) diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index 3b4448c968414c..ad8c8cc6eb484c 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -95,7 +95,8 @@ pub enum Error { /// - Must match the expected shred version /// - Must both sigverify for the correct leader /// - Must have a merkle root conflict, otherwise `shred1` and `shred2` must have the same `shred_type` -/// - If `shred1` and `shred2` share the same index they must be not equal +/// - If `shred1` and `shred2` share the same index they must be not have equal payloads excluding the +/// retransmitter signature /// - If `shred1` and `shred2` do not share the same index and are data shreds /// verify that they indicate an index conflict. One of them must be the /// LAST_SHRED_IN_SLOT, however the other shred must have a higher index. @@ -144,7 +145,7 @@ where } if shred1.index() == shred2.index() { - if shred1.payload() != shred2.payload() { + if shred1.is_shred_duplicate(shred2) { return Ok(()); } return Err(Error::InvalidDuplicateShreds); @@ -311,7 +312,7 @@ pub(crate) mod tests { solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, solana_sdk::{ hash::Hash, - signature::{Keypair, Signer}, + signature::{Keypair, Signature, Signer}, system_transaction, }, std::sync::Arc, @@ -1252,4 +1253,82 @@ pub(crate) mod tests { ); } } + + #[test] + fn test_retransmitter_signature_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let leader_schedule = |s| { + if s == slot { + Some(leader.pubkey()) + } else { + None + } + }; + let data_shred = + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, true); + let coding_shred = + new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader, true)[0] + .clone(); + let mut data_shred_different_retransmitter_payload = data_shred.clone().into_payload(); + shred::layout::set_retransmitter_signature( + &mut data_shred_different_retransmitter_payload, + &Signature::new_unique(), + ) + .unwrap(); + let data_shred_different_retransmitter = + Shred::new_from_serialized_shred(data_shred_different_retransmitter_payload).unwrap(); + let mut coding_shred_different_retransmitter_payload = coding_shred.clone().into_payload(); + shred::layout::set_retransmitter_signature( + &mut coding_shred_different_retransmitter_payload, + &Signature::new_unique(), + ) + .unwrap(); + let coding_shred_different_retransmitter = + Shred::new_from_serialized_shred(coding_shred_different_retransmitter_payload).unwrap(); + + let test_cases = vec![ + // Same data shred from different retransmitter + (data_shred, data_shred_different_retransmitter), + // Same coding shred from different retransmitter + (coding_shred, coding_shred_different_retransmitter), + ]; + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + assert_matches!( + from_shred( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.payload().clone(), + Some(leader_schedule), + rng.gen(), // wallclock + 512, // max_size + version, + ) + .err() + .unwrap(), + Error::InvalidDuplicateShreds + ); + + let chunks: Vec<_> = from_shred_bypass_checks( + shred1.clone(), + Pubkey::new_unique(), // self_pubkey + shred2.clone(), + rng.gen(), // wallclock + 512, // max_size + ) + .unwrap() + .collect(); + assert!(chunks.len() > 4); + + assert_matches!( + into_shreds(&leader.pubkey(), chunks, version) + .err() + .unwrap(), + Error::InvalidDuplicateShreds + ); + } + } } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 7525fd5258e442..5ece826ce5369b 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -576,6 +576,37 @@ impl Shred { Self::ShredData(_) => Err(Error::InvalidShredType), } } + + /// Returns true if the other shred has the same ShredId, i.e. (slot, index, + /// shred-type), but different payload. + /// Retransmitter's signature is ignored when comparing payloads. + pub fn is_shred_duplicate(&self, other: &Shred) -> bool { + if self.id() != other.id() { + return false; + } + fn get_payload(shred: &Shred) -> &[u8] { + let Ok(offset) = shred.retransmitter_signature_offset() else { + return shred.payload(); + }; + // Assert that the retransmitter's signature is at the very end of + // the shred payload. + debug_assert_eq!(offset + SIZE_OF_SIGNATURE, shred.payload().len()); + shred + .payload() + .get(..offset) + .unwrap_or_else(|| shred.payload()) + } + get_payload(self) != get_payload(other) + } + + fn retransmitter_signature_offset(&self) -> Result { + match self { + Self::ShredCode(ShredCode::Merkle(shred)) => shred.retransmitter_signature_offset(), + Self::ShredData(ShredData::Merkle(shred)) => shred.retransmitter_signature_offset(), + Self::ShredCode(ShredCode::Legacy(_)) => Err(Error::InvalidShredVariant), + Self::ShredData(ShredData::Legacy(_)) => Err(Error::InvalidShredVariant), + } + } } // Helper methods to extract pieces of the shred from the payload @@ -802,7 +833,7 @@ pub mod layout { } } - pub(crate) fn set_retransmitter_signature( + pub fn set_retransmitter_signature( shred: &mut [u8], signature: &Signature, ) -> Result<(), Error> { diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index e569999ba3d82f..3b9e29934f3e5c 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -445,7 +445,7 @@ macro_rules! impl_merkle_shred { Ok(()) } - fn retransmitter_signature_offset(&self) -> Result { + pub(super) fn retransmitter_signature_offset(&self) -> Result { let ShredVariant::$variant { proof_size, chained, From e429384a443d7d3992c17cb07e56e0d48697ce8e Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 22 Aug 2024 14:16:37 -0500 Subject: [PATCH 194/529] add index stat accounts_not_found_in_index (#2686) --- accounts-db/src/accounts_db.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index bae4526cf539ef..9095a29b19cc50 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2030,6 +2030,7 @@ pub struct ShrinkStats { alive_accounts: AtomicU64, accounts_loaded: AtomicU64, purged_zero_lamports: AtomicU64, + accounts_not_found_in_index: AtomicU64, } impl ShrinkStats { @@ -2133,6 +2134,11 @@ impl ShrinkStats { self.purged_zero_lamports.swap(0, Ordering::Relaxed), i64 ), + ( + "accounts_not_found_in_index", + self.accounts_not_found_in_index.swap(0, Ordering::Relaxed), + i64 + ), ); } } @@ -2340,6 +2346,13 @@ impl ShrinkAncientStats { .swap(0, Ordering::Relaxed), i64 ), + ( + "accounts_not_found_in_index", + self.shrink_stats + .accounts_not_found_in_index + .swap(0, Ordering::Relaxed), + i64 + ), ); } } @@ -3928,6 +3941,10 @@ impl AccountsDb { alive_accounts.add(ref_count, stored_account, slot_list); alive += 1; } + } else { + stats + .accounts_not_found_in_index + .fetch_add(1, Ordering::Relaxed); } index += 1; result From 5e9710a575970dab60dd32d6907cd48a33b9c8d4 Mon Sep 17 00:00:00 2001 From: Brennan Date: Thu, 22 Aug 2024 12:25:57 -0700 Subject: [PATCH 195/529] move to fork choice (#2698) --- core/src/consensus/fork_choice.rs | 350 ++++++++++++++++++++++- core/src/replay_stage.rs | 447 ++++-------------------------- core/src/vote_simulator.rs | 4 +- 3 files changed, 399 insertions(+), 402 deletions(-) diff --git a/core/src/consensus/fork_choice.rs b/core/src/consensus/fork_choice.rs index 982d520754124c..cddb9db01d2d1c 100644 --- a/core/src/consensus/fork_choice.rs +++ b/core/src/consensus/fork_choice.rs @@ -1,12 +1,15 @@ use { + super::heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, crate::{ consensus::{ latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, - progress_map::ProgressMap, SwitchForkDecision, Tower, + progress_map::ProgressMap, SwitchForkDecision, ThresholdDecision, Tower, + SWITCH_FORK_THRESHOLD, }, replay_stage::HeaviestForkFailures, }, solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_sdk::clock::Slot, std::{ collections::{HashMap, HashSet}, sync::{Arc, RwLock}, @@ -50,3 +53,348 @@ pub trait ForkChoice { valid_slot: &Self::ForkChoiceKey, ) -> Vec; } + +fn select_forks_failed_switch_threshold( + reset_bank: Option<&Bank>, + progress: &ProgressMap, + tower: &Tower, + heaviest_bank_slot: Slot, + failure_reasons: &mut Vec, + switch_proof_stake: u64, + total_stake: u64, + switch_fork_decision: SwitchForkDecision, +) -> SwitchForkDecision { + let last_vote_unable_to_land = match reset_bank { + Some(heaviest_bank_on_same_voted_fork) => { + match tower.last_voted_slot() { + Some(last_voted_slot) => { + match progress.my_latest_landed_vote(heaviest_bank_on_same_voted_fork.slot()) { + Some(my_latest_landed_vote) => + // Last vote did not land + { + my_latest_landed_vote < last_voted_slot + // If we are already voting at the tip, there is nothing we can do. + && last_voted_slot < heaviest_bank_on_same_voted_fork.slot() + // Last vote outside slot hashes of the tip of fork + && !heaviest_bank_on_same_voted_fork + .is_in_slot_hashes_history(&last_voted_slot) + } + None => false, + } + } + None => false, + } + } + None => false, + }; + + if last_vote_unable_to_land { + // If we reach here, these assumptions are true: + // 1. We can't switch because of threshold + // 2. Our last vote was on a non-duplicate/confirmed slot + // 3. Our last vote is now outside slot hashes history of the tip of fork + // So, there was no hope of this last vote ever landing again. + + // In this case, we do want to obey threshold, yet try to register our vote on + // the current fork, so we choose to vote at the tip of current fork instead. + // This will not cause longer lockout because lockout doesn't double after 512 + // slots, it might be enough to get majority vote. + SwitchForkDecision::SameFork + } else { + // If we can't switch and our last vote was on a non-duplicate/confirmed slot, then + // reset to the the next votable bank on the same fork as our last vote, + // but don't vote. + + // We don't just reset to the heaviest fork when switch threshold fails because + // a situation like this can occur: + + /* Figure 1: + slot 0 + | + slot 1 + / \ + slot 2 (last vote) | + | slot 8 (10%) + slot 4 (9%) + */ + + // Imagine 90% of validators voted on slot 4, but only 9% landed. If everybody that fails + // the switch threshold abandons slot 4 to build on slot 8 (because it's *currently* heavier), + // then there will be no blocks to include the votes for slot 4, and the network halts + // because 90% of validators can't vote + info!( + "Waiting to switch vote to {}, + resetting to slot {:?} for now, + switch proof stake: {}, + threshold stake: {}, + total stake: {}", + heaviest_bank_slot, + reset_bank.as_ref().map(|b| b.slot()), + switch_proof_stake, + total_stake as f64 * SWITCH_FORK_THRESHOLD, + total_stake + ); + failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( + heaviest_bank_slot, + switch_proof_stake, + total_stake, + )); + switch_fork_decision + } +} + +/// Given a `heaviest_bank` and a `heaviest_bank_on_same_voted_fork`, return +/// a bank to vote on, a bank to reset to, and a list of switch failure +/// reasons. +/// +/// If `heaviest_bank_on_same_voted_fork` is `None` due to that fork no +/// longer being valid to vote on, it's possible that a validator will not +/// be able to reset away from the invalid fork that they last voted on. To +/// resolve this scenario, validators need to wait until they can create a +/// switch proof for another fork or until the invalid fork is be marked +/// valid again if it was confirmed by the cluster. +/// Until this is resolved, leaders will build each of their +/// blocks from the last reset bank on the invalid fork. +pub fn select_vote_and_reset_forks( + heaviest_bank: &Arc, + // Should only be None if there was no previous vote + heaviest_bank_on_same_voted_fork: Option<&Arc>, + ancestors: &HashMap>, + descendants: &HashMap>, + progress: &ProgressMap, + tower: &mut Tower, + latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks, + fork_choice: &HeaviestSubtreeForkChoice, +) -> SelectVoteAndResetForkResult { + // Try to vote on the actual heaviest fork. If the heaviest bank is + // locked out or fails the threshold check, the validator will: + // 1) Not continue to vote on current fork, waiting for lockouts to expire/ + // threshold check to pass + // 2) Will reset PoH to heaviest fork in order to make sure the heaviest + // fork is propagated + // This above behavior should ensure correct voting and resetting PoH + // behavior under all cases: + // 1) The best "selected" bank is on same fork + // 2) The best "selected" bank is on a different fork, + // switch_threshold fails + // 3) The best "selected" bank is on a different fork, + // switch_threshold succeeds + let mut failure_reasons = vec![]; + struct CandidateVoteAndResetBanks<'a> { + // A bank that the validator will vote on given it passes all + // remaining vote checks + candidate_vote_bank: Option<&'a Arc>, + // A bank that the validator will reset its PoH to regardless + // of voting behavior + reset_bank: Option<&'a Arc>, + switch_fork_decision: SwitchForkDecision, + } + let candidate_vote_and_reset_banks = { + let switch_fork_decision: SwitchForkDecision = tower.check_switch_threshold( + heaviest_bank.slot(), + ancestors, + descendants, + progress, + heaviest_bank.total_epoch_stake(), + heaviest_bank + .epoch_vote_accounts(heaviest_bank.epoch()) + .expect("Bank epoch vote accounts must contain entry for the bank's own epoch"), + latest_validator_votes_for_frozen_banks, + fork_choice, + ); + + match switch_fork_decision { + SwitchForkDecision::FailedSwitchThreshold(switch_proof_stake, total_stake) => { + let final_switch_fork_decision = select_forks_failed_switch_threshold( + heaviest_bank_on_same_voted_fork.map(|bank| bank.as_ref()), + progress, + tower, + heaviest_bank.slot(), + &mut failure_reasons, + switch_proof_stake, + total_stake, + switch_fork_decision, + ); + let candidate_vote_bank = if final_switch_fork_decision.can_vote() { + // The only time we would still vote despite `!switch_fork_decision.can_vote()` + // is if we switched the vote candidate to `heaviest_bank_on_same_voted_fork` + // because we needed to refresh the vote to the tip of our last voted fork. + heaviest_bank_on_same_voted_fork + } else { + // Otherwise, we should just return the original vote candidate, the heaviest bank + // for logging purposes, namely to check if there are any additional voting failures + // besides the switch threshold + Some(heaviest_bank) + }; + CandidateVoteAndResetBanks { + candidate_vote_bank, + reset_bank: heaviest_bank_on_same_voted_fork, + switch_fork_decision: final_switch_fork_decision, + } + } + SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor) => { + // If we can't switch and our last vote was on an unconfirmed, duplicate slot, + // then we need to reset to the heaviest bank, even if the heaviest bank is not + // a descendant of the last vote (usually for switch threshold failures we reset + // to the heaviest descendant of the last vote, but in this case, the last vote + // was on a duplicate branch). This is because in the case of *unconfirmed* duplicate + // slots, somebody needs to generate an alternative branch to escape a situation + // like a 50-50 split where both partitions have voted on different versions of the + // same duplicate slot. + + // Unlike the situation described in `Figure 1` above, this is safe. To see why, + // imagine the same situation described in Figure 1 above occurs, but slot 2 is + // a duplicate block. There are now a few cases: + // + // Note first that DUPLICATE_THRESHOLD + SWITCH_FORK_THRESHOLD + DUPLICATE_LIVENESS_THRESHOLD = 1; + // + // 1) > DUPLICATE_THRESHOLD of the network voted on some version of slot 2. Because duplicate slots can be confirmed + // by gossip, unlike the situation described in `Figure 1`, we don't need those + // votes to land in a descendant to confirm slot 2. Once slot 2 is confirmed by + // gossip votes, that fork is added back to the fork choice set and falls back into + // normal fork choice, which is covered by the `FailedSwitchThreshold` case above + // (everyone will resume building on their last voted fork, slot 4, since slot 8 + // doesn't have for switch threshold) + // + // 2) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, > SWITCH_FORK_THRESHOLD of the network voted + // on slot 8. Then everybody abandons the duplicate fork from fork choice and both builds + // on slot 8's fork. They can also vote on slot 8's fork because it has sufficient weight + // to pass the switching threshold + // + // 3) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, <= SWITCH_FORK_THRESHOLD of the network voted + // on slot 8. This means more than DUPLICATE_LIVENESS_THRESHOLD of the network is gone, so we cannot + // guarantee progress anyways + + // Note the heaviest fork is never descended from a known unconfirmed duplicate slot + // because the fork choice rule ensures that (marks it as an invalid candidate), + // thus it's safe to use as the reset bank. + let reset_bank = Some(heaviest_bank); + info!( + "Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?}", + heaviest_bank.slot(), + reset_bank.as_ref().map(|b| b.slot()), + latest_duplicate_ancestor, + ); + failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( + heaviest_bank.slot(), + 0, // In this case we never actually performed the switch check, 0 for now + 0, + )); + CandidateVoteAndResetBanks { + candidate_vote_bank: None, + reset_bank, + switch_fork_decision, + } + } + _ => CandidateVoteAndResetBanks { + candidate_vote_bank: Some(heaviest_bank), + reset_bank: Some(heaviest_bank), + switch_fork_decision, + }, + } + }; + + let CandidateVoteAndResetBanks { + candidate_vote_bank, + reset_bank, + switch_fork_decision, + } = candidate_vote_and_reset_banks; + + if let Some(candidate_vote_bank) = candidate_vote_bank { + // If there's a bank to potentially vote on, then make the remaining + // checks + let ( + is_locked_out, + vote_thresholds, + propagated_stake, + is_leader_slot, + fork_weight, + total_threshold_stake, + total_epoch_stake, + ) = { + let fork_stats = progress.get_fork_stats(candidate_vote_bank.slot()).unwrap(); + let propagated_stats = &progress + .get_propagated_stats(candidate_vote_bank.slot()) + .unwrap(); + ( + fork_stats.is_locked_out, + &fork_stats.vote_threshold, + propagated_stats.propagated_validators_stake, + propagated_stats.is_leader_slot, + fork_stats.fork_weight(), + fork_stats.total_stake, + propagated_stats.total_epoch_stake, + ) + }; + + // If we reach here, the candidate_vote_bank exists in the bank_forks, so it isn't + // dumped and should exist in progress map. + let propagation_confirmed = is_leader_slot + || progress + .get_leader_propagation_slot_must_exist(candidate_vote_bank.slot()) + .0; + + if is_locked_out { + failure_reasons.push(HeaviestForkFailures::LockedOut(candidate_vote_bank.slot())); + } + let mut threshold_passed = true; + for threshold_failure in vote_thresholds { + let &ThresholdDecision::FailedThreshold(vote_depth, fork_stake) = threshold_failure + else { + continue; + }; + failure_reasons.push(HeaviestForkFailures::FailedThreshold( + candidate_vote_bank.slot(), + vote_depth, + fork_stake, + total_threshold_stake, + )); + // Ignore shallow checks for voting purposes + if (vote_depth as usize) >= tower.threshold_depth { + threshold_passed = false; + } + } + if !propagation_confirmed { + failure_reasons.push(HeaviestForkFailures::NoPropagatedConfirmation( + candidate_vote_bank.slot(), + propagated_stake, + total_epoch_stake, + )); + } + + if !is_locked_out + && threshold_passed + && propagation_confirmed + && switch_fork_decision.can_vote() + { + info!( + "voting: {} {:.1}%", + candidate_vote_bank.slot(), + 100.0 * fork_weight + ); + SelectVoteAndResetForkResult { + vote_bank: Some((candidate_vote_bank.clone(), switch_fork_decision)), + reset_bank: Some(candidate_vote_bank.clone()), + heaviest_fork_failures: failure_reasons, + } + } else { + SelectVoteAndResetForkResult { + vote_bank: None, + reset_bank: reset_bank.cloned(), + heaviest_fork_failures: failure_reasons, + } + } + } else if reset_bank.is_some() { + SelectVoteAndResetForkResult { + vote_bank: None, + reset_bank: reset_bank.cloned(), + heaviest_fork_failures: failure_reasons, + } + } else { + SelectVoteAndResetForkResult { + vote_bank: None, + reset_bank: None, + heaviest_fork_failures: failure_reasons, + } + } +} diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index d0a8b3099f19ba..b368ac1de89ea8 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -10,13 +10,13 @@ use { cluster_slots_service::{cluster_slots::ClusterSlots, ClusterSlotsUpdateSender}, commitment_service::{AggregateCommitmentService, CommitmentAggregationData}, consensus::{ - fork_choice::{ForkChoice, SelectVoteAndResetForkResult}, + fork_choice::{select_vote_and_reset_forks, ForkChoice, SelectVoteAndResetForkResult}, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, progress_map::{ForkProgress, ProgressMap, PropagatedStats}, tower_storage::{SavedTower, SavedTowerVersions, TowerStorage}, - BlockhashStatus, ComputedBankState, Stake, SwitchForkDecision, ThresholdDecision, - Tower, TowerError, VotedStakes, SWITCH_FORK_THRESHOLD, + BlockhashStatus, ComputedBankState, Stake, SwitchForkDecision, Tower, TowerError, + VotedStakes, SWITCH_FORK_THRESHOLD, }, cost_update_service::CostUpdate, repair::{ @@ -882,7 +882,7 @@ impl ReplayStage { vote_bank, reset_bank, heaviest_fork_failures, - } = Self::select_vote_and_reset_forks( + } = select_vote_and_reset_forks( &heaviest_bank, heaviest_bank_on_same_voted_fork.as_ref(), &ancestors, @@ -3641,353 +3641,6 @@ impl ReplayStage { ); } - fn select_forks_failed_switch_threshold( - reset_bank: Option<&Bank>, - progress: &ProgressMap, - tower: &Tower, - heaviest_bank_slot: Slot, - failure_reasons: &mut Vec, - switch_proof_stake: u64, - total_stake: u64, - switch_fork_decision: SwitchForkDecision, - ) -> SwitchForkDecision { - let last_vote_unable_to_land = match reset_bank { - Some(heaviest_bank_on_same_voted_fork) => { - match tower.last_voted_slot() { - Some(last_voted_slot) => { - match progress - .my_latest_landed_vote(heaviest_bank_on_same_voted_fork.slot()) - { - Some(my_latest_landed_vote) => - // Last vote did not land - { - my_latest_landed_vote < last_voted_slot - // If we are already voting at the tip, there is nothing we can do. - && last_voted_slot < heaviest_bank_on_same_voted_fork.slot() - // Last vote outside slot hashes of the tip of fork - && !heaviest_bank_on_same_voted_fork - .is_in_slot_hashes_history(&last_voted_slot) - } - None => false, - } - } - None => false, - } - } - None => false, - }; - - if last_vote_unable_to_land { - // If we reach here, these assumptions are true: - // 1. We can't switch because of threshold - // 2. Our last vote was on a non-duplicate/confirmed slot - // 3. Our last vote is now outside slot hashes history of the tip of fork - // So, there was no hope of this last vote ever landing again. - - // In this case, we do want to obey threshold, yet try to register our vote on - // the current fork, so we choose to vote at the tip of current fork instead. - // This will not cause longer lockout because lockout doesn't double after 512 - // slots, it might be enough to get majority vote. - SwitchForkDecision::SameFork - } else { - // If we can't switch and our last vote was on a non-duplicate/confirmed slot, then - // reset to the the next votable bank on the same fork as our last vote, - // but don't vote. - - // We don't just reset to the heaviest fork when switch threshold fails because - // a situation like this can occur: - - /* Figure 1: - slot 0 - | - slot 1 - / \ - slot 2 (last vote) | - | slot 8 (10%) - slot 4 (9%) - */ - - // Imagine 90% of validators voted on slot 4, but only 9% landed. If everybody that fails - // the switch threshold abandons slot 4 to build on slot 8 (because it's *currently* heavier), - // then there will be no blocks to include the votes for slot 4, and the network halts - // because 90% of validators can't vote - info!( - "Waiting to switch vote to {}, - resetting to slot {:?} for now, - switch proof stake: {}, - threshold stake: {}, - total stake: {}", - heaviest_bank_slot, - reset_bank.as_ref().map(|b| b.slot()), - switch_proof_stake, - total_stake as f64 * SWITCH_FORK_THRESHOLD, - total_stake - ); - failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( - heaviest_bank_slot, - switch_proof_stake, - total_stake, - )); - switch_fork_decision - } - } - - /// Given a `heaviest_bank` and a `heaviest_bank_on_same_voted_fork`, return - /// a bank to vote on, a bank to reset to, and a list of switch failure - /// reasons. - /// - /// If `heaviest_bank_on_same_voted_fork` is `None` due to that fork no - /// longer being valid to vote on, it's possible that a validator will not - /// be able to reset away from the invalid fork that they last voted on. To - /// resolve this scenario, validators need to wait until they can create a - /// switch proof for another fork or until the invalid fork is be marked - /// valid again if it was confirmed by the cluster. - /// Until this is resolved, leaders will build each of their - /// blocks from the last reset bank on the invalid fork. - pub fn select_vote_and_reset_forks( - heaviest_bank: &Arc, - // Should only be None if there was no previous vote - heaviest_bank_on_same_voted_fork: Option<&Arc>, - ancestors: &HashMap>, - descendants: &HashMap>, - progress: &ProgressMap, - tower: &mut Tower, - latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks, - fork_choice: &HeaviestSubtreeForkChoice, - ) -> SelectVoteAndResetForkResult { - // Try to vote on the actual heaviest fork. If the heaviest bank is - // locked out or fails the threshold check, the validator will: - // 1) Not continue to vote on current fork, waiting for lockouts to expire/ - // threshold check to pass - // 2) Will reset PoH to heaviest fork in order to make sure the heaviest - // fork is propagated - // This above behavior should ensure correct voting and resetting PoH - // behavior under all cases: - // 1) The best "selected" bank is on same fork - // 2) The best "selected" bank is on a different fork, - // switch_threshold fails - // 3) The best "selected" bank is on a different fork, - // switch_threshold succeeds - let mut failure_reasons = vec![]; - struct CandidateVoteAndResetBanks<'a> { - // A bank that the validator will vote on given it passes all - // remaining vote checks - candidate_vote_bank: Option<&'a Arc>, - // A bank that the validator will reset its PoH to regardless - // of voting behavior - reset_bank: Option<&'a Arc>, - switch_fork_decision: SwitchForkDecision, - } - let candidate_vote_and_reset_banks = { - let switch_fork_decision: SwitchForkDecision = tower.check_switch_threshold( - heaviest_bank.slot(), - ancestors, - descendants, - progress, - heaviest_bank.total_epoch_stake(), - heaviest_bank - .epoch_vote_accounts(heaviest_bank.epoch()) - .expect("Bank epoch vote accounts must contain entry for the bank's own epoch"), - latest_validator_votes_for_frozen_banks, - fork_choice, - ); - - match switch_fork_decision { - SwitchForkDecision::FailedSwitchThreshold(switch_proof_stake, total_stake) => { - let final_switch_fork_decision = Self::select_forks_failed_switch_threshold( - heaviest_bank_on_same_voted_fork.map(|bank| bank.as_ref()), - progress, - tower, - heaviest_bank.slot(), - &mut failure_reasons, - switch_proof_stake, - total_stake, - switch_fork_decision, - ); - let candidate_vote_bank = if final_switch_fork_decision.can_vote() { - // The only time we would still vote despite `!switch_fork_decision.can_vote()` - // is if we switched the vote candidate to `heaviest_bank_on_same_voted_fork` - // because we needed to refresh the vote to the tip of our last voted fork. - heaviest_bank_on_same_voted_fork - } else { - // Otherwise, we should just return the original vote candidate, the heaviest bank - // for logging purposes, namely to check if there are any additional voting failures - // besides the switch threshold - Some(heaviest_bank) - }; - CandidateVoteAndResetBanks { - candidate_vote_bank, - reset_bank: heaviest_bank_on_same_voted_fork, - switch_fork_decision: final_switch_fork_decision, - } - } - SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor) => { - // If we can't switch and our last vote was on an unconfirmed, duplicate slot, - // then we need to reset to the heaviest bank, even if the heaviest bank is not - // a descendant of the last vote (usually for switch threshold failures we reset - // to the heaviest descendant of the last vote, but in this case, the last vote - // was on a duplicate branch). This is because in the case of *unconfirmed* duplicate - // slots, somebody needs to generate an alternative branch to escape a situation - // like a 50-50 split where both partitions have voted on different versions of the - // same duplicate slot. - - // Unlike the situation described in `Figure 1` above, this is safe. To see why, - // imagine the same situation described in Figure 1 above occurs, but slot 2 is - // a duplicate block. There are now a few cases: - // - // Note first that DUPLICATE_THRESHOLD + SWITCH_FORK_THRESHOLD + DUPLICATE_LIVENESS_THRESHOLD = 1; - // - // 1) > DUPLICATE_THRESHOLD of the network voted on some version of slot 2. Because duplicate slots can be confirmed - // by gossip, unlike the situation described in `Figure 1`, we don't need those - // votes to land in a descendant to confirm slot 2. Once slot 2 is confirmed by - // gossip votes, that fork is added back to the fork choice set and falls back into - // normal fork choice, which is covered by the `FailedSwitchThreshold` case above - // (everyone will resume building on their last voted fork, slot 4, since slot 8 - // doesn't have for switch threshold) - // - // 2) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, > SWITCH_FORK_THRESHOLD of the network voted - // on slot 8. Then everybody abandons the duplicate fork from fork choice and both builds - // on slot 8's fork. They can also vote on slot 8's fork because it has sufficient weight - // to pass the switching threshold - // - // 3) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, <= SWITCH_FORK_THRESHOLD of the network voted - // on slot 8. This means more than DUPLICATE_LIVENESS_THRESHOLD of the network is gone, so we cannot - // guarantee progress anyways - - // Note the heaviest fork is never descended from a known unconfirmed duplicate slot - // because the fork choice rule ensures that (marks it as an invalid candidate), - // thus it's safe to use as the reset bank. - let reset_bank = Some(heaviest_bank); - info!( - "Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?}", - heaviest_bank.slot(), - reset_bank.as_ref().map(|b| b.slot()), - latest_duplicate_ancestor, - ); - failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( - heaviest_bank.slot(), - 0, // In this case we never actually performed the switch check, 0 for now - 0, - )); - CandidateVoteAndResetBanks { - candidate_vote_bank: None, - reset_bank, - switch_fork_decision, - } - } - _ => CandidateVoteAndResetBanks { - candidate_vote_bank: Some(heaviest_bank), - reset_bank: Some(heaviest_bank), - switch_fork_decision, - }, - } - }; - - let CandidateVoteAndResetBanks { - candidate_vote_bank, - reset_bank, - switch_fork_decision, - } = candidate_vote_and_reset_banks; - - if let Some(candidate_vote_bank) = candidate_vote_bank { - // If there's a bank to potentially vote on, then make the remaining - // checks - let ( - is_locked_out, - vote_thresholds, - propagated_stake, - is_leader_slot, - fork_weight, - total_threshold_stake, - total_epoch_stake, - ) = { - let fork_stats = progress.get_fork_stats(candidate_vote_bank.slot()).unwrap(); - let propagated_stats = &progress - .get_propagated_stats(candidate_vote_bank.slot()) - .unwrap(); - ( - fork_stats.is_locked_out, - &fork_stats.vote_threshold, - propagated_stats.propagated_validators_stake, - propagated_stats.is_leader_slot, - fork_stats.fork_weight(), - fork_stats.total_stake, - propagated_stats.total_epoch_stake, - ) - }; - - // If we reach here, the candidate_vote_bank exists in the bank_forks, so it isn't - // dumped and should exist in progress map. - let propagation_confirmed = is_leader_slot - || progress - .get_leader_propagation_slot_must_exist(candidate_vote_bank.slot()) - .0; - - if is_locked_out { - failure_reasons.push(HeaviestForkFailures::LockedOut(candidate_vote_bank.slot())); - } - let mut threshold_passed = true; - for threshold_failure in vote_thresholds { - let &ThresholdDecision::FailedThreshold(vote_depth, fork_stake) = threshold_failure - else { - continue; - }; - failure_reasons.push(HeaviestForkFailures::FailedThreshold( - candidate_vote_bank.slot(), - vote_depth, - fork_stake, - total_threshold_stake, - )); - // Ignore shallow checks for voting purposes - if (vote_depth as usize) >= tower.threshold_depth { - threshold_passed = false; - } - } - if !propagation_confirmed { - failure_reasons.push(HeaviestForkFailures::NoPropagatedConfirmation( - candidate_vote_bank.slot(), - propagated_stake, - total_epoch_stake, - )); - } - - if !is_locked_out - && threshold_passed - && propagation_confirmed - && switch_fork_decision.can_vote() - { - info!( - "voting: {} {:.1}%", - candidate_vote_bank.slot(), - 100.0 * fork_weight - ); - SelectVoteAndResetForkResult { - vote_bank: Some((candidate_vote_bank.clone(), switch_fork_decision)), - reset_bank: Some(candidate_vote_bank.clone()), - heaviest_fork_failures: failure_reasons, - } - } else { - SelectVoteAndResetForkResult { - vote_bank: None, - reset_bank: reset_bank.cloned(), - heaviest_fork_failures: failure_reasons, - } - } - } else if reset_bank.is_some() { - SelectVoteAndResetForkResult { - vote_bank: None, - reset_bank: reset_bank.cloned(), - heaviest_fork_failures: failure_reasons, - } - } else { - SelectVoteAndResetForkResult { - vote_bank: None, - reset_bank: None, - heaviest_fork_failures: failure_reasons, - } - } - } - fn update_fork_propagated_threshold_from_votes( progress: &mut ProgressMap, mut newly_voted_pubkeys: Vec, @@ -4481,7 +4134,7 @@ pub(crate) mod tests { progress_map::{ValidatorStakeInfo, RETRANSMIT_BASE_DELAY_MS}, tower_storage::{FileTowerStorage, NullTowerStorage}, tree_diff::TreeDiff, - Tower, VOTE_THRESHOLD_DEPTH, + ThresholdDecision, Tower, VOTE_THRESHOLD_DEPTH, }, replay_stage::ReplayStage, vote_simulator::{self, VoteSimulator}, @@ -7465,7 +7118,7 @@ pub(crate) mod tests { .select_forks(&frozen_banks, &tower, &progress, &ancestors, &bank_forks); assert_eq!(heaviest_bank.slot(), 7); assert!(heaviest_bank_on_same_fork.is_none()); - ReplayStage::select_vote_and_reset_forks( + select_vote_and_reset_forks( &heaviest_bank, heaviest_bank_on_same_fork.as_ref(), &ancestors, @@ -7588,7 +7241,7 @@ pub(crate) mod tests { .select_forks(&frozen_banks, &tower, &progress, &ancestors, &bank_forks); assert_eq!(heaviest_bank.slot(), 5); assert!(heaviest_bank_on_same_fork.is_none()); - ReplayStage::select_vote_and_reset_forks( + select_vote_and_reset_forks( &heaviest_bank, heaviest_bank_on_same_fork.as_ref(), &ancestors, @@ -8224,50 +7877,47 @@ pub(crate) mod tests { assert_eq!(tower.last_voted_slot(), Some(last_voted_slot)); assert_eq!(progress.my_latest_landed_vote(tip_of_voted_fork), Some(0)); let other_fork_bank = &bank_forks.read().unwrap().get(other_fork_slot).unwrap(); - let SelectVoteAndResetForkResult { vote_bank, .. } = - ReplayStage::select_vote_and_reset_forks( - other_fork_bank, - Some(&new_bank), - &bank_forks.read().unwrap().ancestors(), - &bank_forks.read().unwrap().descendants(), - &progress, - &mut tower, - &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, - ); + let SelectVoteAndResetForkResult { vote_bank, .. } = select_vote_and_reset_forks( + other_fork_bank, + Some(&new_bank), + &bank_forks.read().unwrap().ancestors(), + &bank_forks.read().unwrap().descendants(), + &progress, + &mut tower, + &latest_validator_votes_for_frozen_banks, + &heaviest_subtree_fork_choice, + ); assert!(vote_bank.is_some()); assert_eq!(vote_bank.unwrap().0.slot(), tip_of_voted_fork); // If last vote is already equal to heaviest_bank_on_same_voted_fork, // we should not vote. let last_voted_bank = &bank_forks.read().unwrap().get(last_voted_slot).unwrap(); - let SelectVoteAndResetForkResult { vote_bank, .. } = - ReplayStage::select_vote_and_reset_forks( - other_fork_bank, - Some(last_voted_bank), - &bank_forks.read().unwrap().ancestors(), - &bank_forks.read().unwrap().descendants(), - &progress, - &mut tower, - &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, - ); + let SelectVoteAndResetForkResult { vote_bank, .. } = select_vote_and_reset_forks( + other_fork_bank, + Some(last_voted_bank), + &bank_forks.read().unwrap().ancestors(), + &bank_forks.read().unwrap().descendants(), + &progress, + &mut tower, + &latest_validator_votes_for_frozen_banks, + &heaviest_subtree_fork_choice, + ); assert!(vote_bank.is_none()); // If last vote is still inside slot hashes history of heaviest_bank_on_same_voted_fork, // we should not vote. let last_voted_bank_plus_1 = &bank_forks.read().unwrap().get(last_voted_slot + 1).unwrap(); - let SelectVoteAndResetForkResult { vote_bank, .. } = - ReplayStage::select_vote_and_reset_forks( - other_fork_bank, - Some(last_voted_bank_plus_1), - &bank_forks.read().unwrap().ancestors(), - &bank_forks.read().unwrap().descendants(), - &progress, - &mut tower, - &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, - ); + let SelectVoteAndResetForkResult { vote_bank, .. } = select_vote_and_reset_forks( + other_fork_bank, + Some(last_voted_bank_plus_1), + &bank_forks.read().unwrap().ancestors(), + &bank_forks.read().unwrap().descendants(), + &progress, + &mut tower, + &latest_validator_votes_for_frozen_banks, + &heaviest_subtree_fork_choice, + ); assert!(vote_bank.is_none()); // create a new bank and make last_voted_slot land, we should not vote. @@ -8275,17 +7925,16 @@ pub(crate) mod tests { .entry(new_bank.slot()) .and_modify(|s| s.fork_stats.my_latest_landed_vote = Some(last_voted_slot)); assert!(!new_bank.is_in_slot_hashes_history(&last_voted_slot)); - let SelectVoteAndResetForkResult { vote_bank, .. } = - ReplayStage::select_vote_and_reset_forks( - other_fork_bank, - Some(&new_bank), - &bank_forks.read().unwrap().ancestors(), - &bank_forks.read().unwrap().descendants(), - &progress, - &mut tower, - &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, - ); + let SelectVoteAndResetForkResult { vote_bank, .. } = select_vote_and_reset_forks( + other_fork_bank, + Some(&new_bank), + &bank_forks.read().unwrap().ancestors(), + &bank_forks.read().unwrap().descendants(), + &progress, + &mut tower, + &latest_validator_votes_for_frozen_banks, + &heaviest_subtree_fork_choice, + ); assert!(vote_bank.is_none()); } @@ -8751,7 +8400,7 @@ pub(crate) mod tests { vote_bank, reset_bank, heaviest_fork_failures, - } = ReplayStage::select_vote_and_reset_forks( + } = select_vote_and_reset_forks( &heaviest_bank, heaviest_bank_on_same_fork.as_ref(), ancestors, diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index f886d2821af4b0..31395f65a42a6e 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -4,7 +4,7 @@ use { cluster_info_vote_listener::VoteTracker, cluster_slots_service::cluster_slots::ClusterSlots, consensus::{ - fork_choice::SelectVoteAndResetForkResult, + fork_choice::{select_vote_and_reset_forks, SelectVoteAndResetForkResult}, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, progress_map::{ForkProgress, ProgressMap}, @@ -212,7 +212,7 @@ impl VoteSimulator { let SelectVoteAndResetForkResult { heaviest_fork_failures, .. - } = ReplayStage::select_vote_and_reset_forks( + } = select_vote_and_reset_forks( &vote_bank, None, &ancestors, From 22eec1c87d1a4116e24b3f3285d189a5225979ab Mon Sep 17 00:00:00 2001 From: Andrei Silviu Dragnea Date: Thu, 22 Aug 2024 21:28:09 +0100 Subject: [PATCH 196/529] sdk: refactor: clean up entrypoint (#2635) refactor: remove redundant references in entrypoint! macro --- sdk/program/src/entrypoint.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/program/src/entrypoint.rs b/sdk/program/src/entrypoint.rs index d0d579411dcfe8..0be1ec34b48cb7 100644 --- a/sdk/program/src/entrypoint.rs +++ b/sdk/program/src/entrypoint.rs @@ -129,7 +129,7 @@ macro_rules! entrypoint { pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64 { let (program_id, accounts, instruction_data) = unsafe { $crate::entrypoint::deserialize(input) }; - match $process_instruction(&program_id, &accounts, &instruction_data) { + match $process_instruction(program_id, &accounts, instruction_data) { Ok(()) => $crate::entrypoint::SUCCESS, Err(error) => error.into(), } From 1444baa426fbe712da848c784176cb1bf440c21a Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 22 Aug 2024 18:14:23 -0400 Subject: [PATCH 197/529] replay: do not early return when marking slots duplicate confirmed (#2700) * replay: do not early return when marking slots duplicate confirmed * pr feedback: catch panic explicitely, comments, add root test case * pr feedback: add custom string to panic message * pr feedback: add slot to log, use should_panic * pr feedback: notification for {slot} -> notification for slot {slot} --- core/src/replay_stage.rs | 252 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 248 insertions(+), 4 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index b368ac1de89ea8..f2cedd7d731aac 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1829,9 +1829,12 @@ impl ReplayStage { } else if let Some(prev_hash) = duplicate_confirmed_slots.insert(confirmed_slot, duplicate_confirmed_hash) { - assert_eq!(prev_hash, duplicate_confirmed_hash); + assert_eq!( + prev_hash, duplicate_confirmed_hash, + "Additional duplicate confirmed notification for slot {confirmed_slot} with a different hash" + ); // Already processed this signal - return; + continue; } let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( @@ -3791,9 +3794,12 @@ impl ReplayStage { progress.set_duplicate_confirmed_hash(*slot, *frozen_hash); if let Some(prev_hash) = duplicate_confirmed_slots.insert(*slot, *frozen_hash) { - assert_eq!(prev_hash, *frozen_hash); + assert_eq!( + prev_hash, *frozen_hash, + "Additional duplicate confirmed notification for slot {slot} with a different hash" + ); // Already processed this signal - return; + continue; } let duplicate_confirmed_state = DuplicateConfirmedState::new_from_state( @@ -4184,6 +4190,7 @@ pub(crate) mod tests { sync::{atomic::AtomicU64, Arc, RwLock}, }, tempfile::tempdir, + test_case::test_case, trees::{tr, Tree}, }; @@ -9018,4 +9025,241 @@ pub(crate) mod tests { assert_eq!(working_bank.slot(), good_slot); assert_eq!(working_bank.parent_slot(), initial_slot); } + + #[test] + #[should_panic(expected = "Additional duplicate confirmed notification for slot 6")] + fn test_mark_slots_duplicate_confirmed() { + let generate_votes = |pubkeys: Vec| { + pubkeys + .into_iter() + .zip(iter::once(vec![0, 1, 2, 5, 6]).chain(iter::repeat(vec![0, 1, 3, 4]).take(2))) + .collect() + }; + let tree = tr(0) / (tr(1) / (tr(3) / (tr(4))) / (tr(2) / (tr(5) / (tr(6))))); + let (vote_simulator, blockstore) = + setup_forks_from_tree(tree, 3, Some(Box::new(generate_votes))); + let VoteSimulator { + bank_forks, + mut heaviest_subtree_fork_choice, + mut progress, + .. + } = vote_simulator; + + let (ancestor_hashes_replay_update_sender, _) = unbounded(); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); + let bank_hash_0 = bank_forks.read().unwrap().bank_hash(0).unwrap(); + bank_forks + .write() + .unwrap() + .set_root(1, &AbsRequestSender::default(), None) + .unwrap(); + + // Mark 0 as duplicate confirmed, should fail as it is 0 < root + let confirmed_slots = [(0, bank_hash_0)]; + ReplayStage::mark_slots_duplicate_confirmed( + &confirmed_slots, + &blockstore, + &bank_forks, + &mut progress, + &mut DuplicateSlotsTracker::default(), + &mut heaviest_subtree_fork_choice, + &mut EpochSlotsFrozenSlots::default(), + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + &mut PurgeRepairSlotCounter::default(), + &mut duplicate_confirmed_slots, + ); + + assert!(!duplicate_confirmed_slots.contains_key(&0)); + + // Mark 5 as duplicate confirmed, should suceed + let bank_hash_5 = bank_forks.read().unwrap().bank_hash(5).unwrap(); + let confirmed_slots = [(5, bank_hash_5)]; + + ReplayStage::mark_slots_duplicate_confirmed( + &confirmed_slots, + &blockstore, + &bank_forks, + &mut progress, + &mut DuplicateSlotsTracker::default(), + &mut heaviest_subtree_fork_choice, + &mut EpochSlotsFrozenSlots::default(), + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + &mut PurgeRepairSlotCounter::default(), + &mut duplicate_confirmed_slots, + ); + + assert_eq!(*duplicate_confirmed_slots.get(&5).unwrap(), bank_hash_5); + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(5, bank_hash_5)) + .unwrap_or(false)); + + // Mark 5 and 6 as duplicate confirmed, should succeed + let bank_hash_6 = bank_forks.read().unwrap().bank_hash(6).unwrap(); + let confirmed_slots = [(5, bank_hash_5), (6, bank_hash_6)]; + + ReplayStage::mark_slots_duplicate_confirmed( + &confirmed_slots, + &blockstore, + &bank_forks, + &mut progress, + &mut DuplicateSlotsTracker::default(), + &mut heaviest_subtree_fork_choice, + &mut EpochSlotsFrozenSlots::default(), + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + &mut PurgeRepairSlotCounter::default(), + &mut duplicate_confirmed_slots, + ); + + assert_eq!(*duplicate_confirmed_slots.get(&5).unwrap(), bank_hash_5); + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(5, bank_hash_5)) + .unwrap_or(false)); + assert_eq!(*duplicate_confirmed_slots.get(&6).unwrap(), bank_hash_6); + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(6, bank_hash_6)) + .unwrap_or(false)); + + // Mark 6 as duplicate confirmed again with a different hash, should panic + let confirmed_slots = [(6, Hash::new_unique())]; + ReplayStage::mark_slots_duplicate_confirmed( + &confirmed_slots, + &blockstore, + &bank_forks, + &mut progress, + &mut DuplicateSlotsTracker::default(), + &mut heaviest_subtree_fork_choice, + &mut EpochSlotsFrozenSlots::default(), + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + &mut PurgeRepairSlotCounter::default(), + &mut duplicate_confirmed_slots, + ); + } + + #[test_case(true ; "same_batch")] + #[test_case(false ; "seperate_batches")] + #[should_panic(expected = "Additional duplicate confirmed notification for slot 6")] + fn test_process_duplicate_confirmed_slots(same_batch: bool) { + let generate_votes = |pubkeys: Vec| { + pubkeys + .into_iter() + .zip(iter::once(vec![0, 1, 2, 5, 6]).chain(iter::repeat(vec![0, 1, 3, 4]).take(2))) + .collect() + }; + let tree = tr(0) / (tr(1) / (tr(3) / (tr(4))) / (tr(2) / (tr(5) / (tr(6))))); + let (vote_simulator, blockstore) = + setup_forks_from_tree(tree, 3, Some(Box::new(generate_votes))); + let VoteSimulator { + bank_forks, + mut heaviest_subtree_fork_choice, + progress, + .. + } = vote_simulator; + + let (ancestor_hashes_replay_update_sender, _) = unbounded(); + let (sender, receiver) = unbounded(); + let mut duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); + let bank_hash_0 = bank_forks.read().unwrap().bank_hash(0).unwrap(); + bank_forks + .write() + .unwrap() + .set_root(1, &AbsRequestSender::default(), None) + .unwrap(); + + // Mark 0 as duplicate confirmed, should fail as it is 0 < root + sender.send(vec![(0, bank_hash_0)]).unwrap(); + + ReplayStage::process_duplicate_confirmed_slots( + &receiver, + &blockstore, + &mut DuplicateSlotsTracker::default(), + &mut duplicate_confirmed_slots, + &mut EpochSlotsFrozenSlots::default(), + &bank_forks, + &progress, + &mut heaviest_subtree_fork_choice, + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + &mut PurgeRepairSlotCounter::default(), + ); + + assert!(!duplicate_confirmed_slots.contains_key(&0)); + + // Mark 5 as duplicate confirmed, should succed + let bank_hash_5 = bank_forks.read().unwrap().bank_hash(5).unwrap(); + sender.send(vec![(5, bank_hash_5)]).unwrap(); + + ReplayStage::process_duplicate_confirmed_slots( + &receiver, + &blockstore, + &mut DuplicateSlotsTracker::default(), + &mut duplicate_confirmed_slots, + &mut EpochSlotsFrozenSlots::default(), + &bank_forks, + &progress, + &mut heaviest_subtree_fork_choice, + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + &mut PurgeRepairSlotCounter::default(), + ); + + assert_eq!(*duplicate_confirmed_slots.get(&5).unwrap(), bank_hash_5); + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(5, bank_hash_5)) + .unwrap_or(false)); + + // Mark 5 and 6 as duplicate confirmed, should suceed + let bank_hash_6 = bank_forks.read().unwrap().bank_hash(6).unwrap(); + if same_batch { + sender + .send(vec![(5, bank_hash_5), (6, bank_hash_6)]) + .unwrap(); + } else { + sender.send(vec![(5, bank_hash_5)]).unwrap(); + sender.send(vec![(6, bank_hash_6)]).unwrap(); + } + + ReplayStage::process_duplicate_confirmed_slots( + &receiver, + &blockstore, + &mut DuplicateSlotsTracker::default(), + &mut duplicate_confirmed_slots, + &mut EpochSlotsFrozenSlots::default(), + &bank_forks, + &progress, + &mut heaviest_subtree_fork_choice, + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + &mut PurgeRepairSlotCounter::default(), + ); + + assert_eq!(*duplicate_confirmed_slots.get(&5).unwrap(), bank_hash_5); + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(5, bank_hash_5)) + .unwrap_or(false)); + assert_eq!(*duplicate_confirmed_slots.get(&6).unwrap(), bank_hash_6); + assert!(heaviest_subtree_fork_choice + .is_duplicate_confirmed(&(6, bank_hash_6)) + .unwrap_or(false)); + + // Mark 6 as duplicate confirmed again with a different hash, should panic + sender.send(vec![(6, Hash::new_unique())]).unwrap(); + + ReplayStage::process_duplicate_confirmed_slots( + &receiver, + &blockstore, + &mut DuplicateSlotsTracker::default(), + &mut duplicate_confirmed_slots, + &mut EpochSlotsFrozenSlots::default(), + &bank_forks, + &progress, + &mut heaviest_subtree_fork_choice, + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + &mut PurgeRepairSlotCounter::default(), + ); + } } From 2ff381489a91054c5a8a9908ac5c298adc712b2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:34:37 +0000 Subject: [PATCH 198/529] build(deps): bump quote from 1.0.36 to 1.0.37 (#2712) * build(deps): bump quote from 1.0.36 to 1.0.37 Bumps [quote](https://github.com/dtolnay/quote) from 1.0.36 to 1.0.37. - [Release notes](https://github.com/dtolnay/quote/releases) - [Commits](https://github.com/dtolnay/quote/compare/1.0.36...1.0.37) --- updated-dependencies: - dependency-name: quote dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- programs/sbf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 629d897225c250..cfe5f83e20e55d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4448,9 +4448,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f78add5c5688cb..db1cdcc0de8144 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3731,9 +3731,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] From 93341c6682a721403a3f59e7addf6f0d736da936 Mon Sep 17 00:00:00 2001 From: Jon C Date: Fri, 23 Aug 2024 14:19:25 +0200 Subject: [PATCH 199/529] cli: Use a better amount during transaction simulation (#2709) * cli: Use a better amount during transaction simulation #### Problem There are issues with the current strategy of simulating a test transaction in the `resolve_spend_message`. If the transaction is creating an account, the test transaction will fail because not enough lamports are sent to the destination. #### Summary of changes It's a tricky situation in which we can't always be correct, since there's a chicken-and-egg situation with calculating the fee for spend variants of `All` and `RentExempt` if the sender and the fee payer are the same account. To get the simulation correct in almost all situations, we simulate with the real transfer amount. But if the fee payer is the sender, we simulate with `0`. But we also add a new variant on `SpendAmount` to cover some minimum amount required that must be transferred. Currently, the only situations in which we have an issue are: * creating a nonce account * creating a stake account * transferring SOL Those first two have a minimum requirement, so use that. The third works fine with a 0 amount, since it's just a SOL transfer. * Address feedback --- cli/src/nonce.rs | 10 ++++++++-- cli/src/spend_utils.rs | 33 +++++++++++++++++++++++++++++++-- cli/src/stake.rs | 11 +++++++++-- 3 files changed, 48 insertions(+), 6 deletions(-) diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index 0f0bc15aa9295a..36ee8cc89e80fd 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -445,7 +445,7 @@ pub fn process_create_nonce_account( seed: Option, nonce_authority: Option, memo: Option<&String>, - amount: SpendAmount, + mut amount: SpendAmount, compute_unit_price: Option, ) -> ProcessResult { let nonce_account_pubkey = config.signers[nonce_account].pubkey(); @@ -460,6 +460,13 @@ pub fn process_create_nonce_account( (&nonce_account_address, "nonce_account".to_string()), )?; + let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(State::size())?; + if amount == SpendAmount::All { + amount = SpendAmount::AllForAccountCreation { + create_account_min_balance: minimum_balance, + }; + } + let nonce_authority = nonce_authority.unwrap_or_else(|| config.signers[0].pubkey()); let compute_unit_limit = ComputeUnitLimit::Default; @@ -516,7 +523,6 @@ pub fn process_create_nonce_account( return Err(CliError::BadParameter(err_msg).into()); } - let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(State::size())?; if lamports < minimum_balance { return Err(CliError::BadParameter(format!( "need at least {minimum_balance} lamports for nonce account to be rent exempt, \ diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index a03e351de26862..f09887d831beaf 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -20,6 +20,7 @@ pub enum SpendAmount { All, Some(u64), RentExempt, + AllForAccountCreation { create_account_min_balance: u64 }, } impl Default for SpendAmount { @@ -166,7 +167,35 @@ where { let (fee, compute_unit_info) = match blockhash { Some(blockhash) => { - let mut dummy_message = build_message(0); + // If the from account is the same as the fee payer, it's impossible + // to give a correct amount for the simulation with `SpendAmount::All` + // or `SpendAmount::RentExempt`. + // To know how much to transfer, we need to know the transaction fee, + // but the transaction fee is dependent on the amount of compute + // units used, which requires simulation. + // To get around this limitation, we simulate against an amount of + // `0`, since there are few situations in which `SpendAmount` can + // be `All` or `RentExempt` *and also* the from account is the fee + // payer. + let lamports = if from_pubkey == fee_pubkey { + match amount { + SpendAmount::Some(lamports) => lamports, + SpendAmount::AllForAccountCreation { + create_account_min_balance, + } => create_account_min_balance, + SpendAmount::All | SpendAmount::RentExempt => 0, + } + } else { + match amount { + SpendAmount::Some(lamports) => lamports, + SpendAmount::AllForAccountCreation { .. } | SpendAmount::All => from_balance, + SpendAmount::RentExempt => { + from_balance.saturating_sub(from_rent_exempt_minimum) + } + } + }; + let mut dummy_message = build_message(lamports); + dummy_message.recent_blockhash = *blockhash; let compute_unit_info = if compute_unit_limit == ComputeUnitLimit::Simulated { // Simulate for correct compute units @@ -196,7 +225,7 @@ where fee, }, ), - SpendAmount::All => { + SpendAmount::All | SpendAmount::AllForAccountCreation { .. } => { let lamports = if from_pubkey == fee_pubkey { from_balance.saturating_sub(fee) } else { diff --git a/cli/src/stake.rs b/cli/src/stake.rs index e63f3b7e1ee50a..d70a41c0a57e6f 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -1368,7 +1368,7 @@ pub fn process_create_stake_account( withdrawer: &Option, withdrawer_signer: Option, lockup: &Lockup, - amount: SpendAmount, + mut amount: SpendAmount, sign_only: bool, dump_transaction_message: bool, blockhash_query: &BlockhashQuery, @@ -1454,6 +1454,14 @@ pub fn process_create_stake_account( let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; + if !sign_only && amount == SpendAmount::All { + let minimum_balance = + rpc_client.get_minimum_balance_for_rent_exemption(StakeStateV2::size_of())?; + amount = SpendAmount::AllForAccountCreation { + create_account_min_balance: minimum_balance, + }; + } + let (message, lamports) = resolve_spend_tx_and_check_account_balances( rpc_client, sign_only, @@ -1478,7 +1486,6 @@ pub fn process_create_stake_account( let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(StakeStateV2::size_of())?; - if lamports < minimum_balance { return Err(CliError::BadParameter(format!( "need at least {minimum_balance} lamports for stake account to be rent exempt, \ From 9483006fa1daffbe0e066edf5e021483a97d2676 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 23 Aug 2024 09:13:16 -0500 Subject: [PATCH 200/529] TransactionView: Address Table Lookup Iterator (#2639) --- .../src/address_table_lookup_meta.rs | 85 ++++++++- transaction-view/src/bytes.rs | 71 +++++++- transaction-view/src/instructions_meta.rs | 39 ++-- transaction-view/src/transaction_meta.rs | 170 +++++++++++++++++- 4 files changed, 331 insertions(+), 34 deletions(-) diff --git a/transaction-view/src/address_table_lookup_meta.rs b/transaction-view/src/address_table_lookup_meta.rs index 28ecc67adc51ed..c065386641c193 100644 --- a/transaction-view/src/address_table_lookup_meta.rs +++ b/transaction-view/src/address_table_lookup_meta.rs @@ -2,11 +2,12 @@ use { crate::{ bytes::{ advance_offset_for_array, advance_offset_for_type, check_remaining, - optimized_read_compressed_u16, read_byte, + optimized_read_compressed_u16, read_byte, read_slice_data, read_type, }, result::Result, }, solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Signature}, + solana_svm_transaction::message_address_table_lookup::SVMMessageAddressTableLookup, }; // Each ATL has at least a Pubkey, one byte for the number of write indexes, @@ -46,7 +47,7 @@ const MAX_ATLS_PER_PACKET: usize = (PACKET_DATA_SIZE - MIN_SIZED_PACKET_WITH_ATL /// Contains metadata about the address table lookups in a transaction packet. pub struct AddressTableLookupMeta { /// The number of address table lookups in the transaction. - pub(crate) num_address_table_lookup: u8, + pub(crate) num_address_table_lookups: u8, /// The offset to the first address table lookup in the transaction. pub(crate) offset: u16, } @@ -97,12 +98,84 @@ impl AddressTableLookupMeta { } Ok(Self { - num_address_table_lookup: num_address_table_lookups, + num_address_table_lookups, offset: address_table_lookups_offset, }) } } +pub struct AddressTableLookupIterator<'a> { + pub(crate) bytes: &'a [u8], + pub(crate) offset: usize, + pub(crate) num_address_table_lookups: u8, + pub(crate) index: u8, +} + +impl<'a> Iterator for AddressTableLookupIterator<'a> { + type Item = SVMMessageAddressTableLookup<'a>; + + fn next(&mut self) -> Option { + if self.index < self.num_address_table_lookups { + self.index = self.index.wrapping_add(1); + + // Each ATL has 3 pieces: + // 1. Address (Pubkey) + // 2. write indexes ([u8]) + // 3. read indexes ([u8]) + + // Advance offset for address of the lookup table. + const _: () = assert!(core::mem::align_of::() == 1, "Pubkey alignment"); + // SAFETY: + // - The offset is checked to be valid in the slice. + // - The alignment of Pubkey is 1. + // - `Pubkey` is a byte array, it cannot be improperly initialized. + let account_key = unsafe { read_type::(self.bytes, &mut self.offset) }.ok()?; + + // Read the number of write indexes, and then update the offset. + let num_write_accounts = + optimized_read_compressed_u16(self.bytes, &mut self.offset).ok()?; + + const _: () = assert!(core::mem::align_of::() == 1, "u8 alignment"); + // SAFETY: + // - The offset is checked to be valid in the byte slice. + // - The alignment of u8 is 1. + // - The slice length is checked to be valid. + // - `u8` cannot be improperly initialized. + let writable_indexes = + unsafe { read_slice_data::(self.bytes, &mut self.offset, num_write_accounts) } + .ok()?; + + // Read the number of read indexes, and then update the offset. + let num_read_accounts = + optimized_read_compressed_u16(self.bytes, &mut self.offset).ok()?; + + const _: () = assert!(core::mem::align_of::() == 1, "u8 alignment"); + // SAFETY: + // - The offset is checked to be valid in the byte slice. + // - The alignment of u8 is 1. + // - The slice length is checked to be valid. + // - `u8` cannot be improperly initialized. + let readonly_indexes = + unsafe { read_slice_data::(self.bytes, &mut self.offset, num_read_accounts) } + .ok()?; + + Some(SVMMessageAddressTableLookup { + account_key, + writable_indexes, + readonly_indexes, + }) + } else { + None + } + } +} + +impl ExactSizeIterator for AddressTableLookupIterator<'_> { + fn len(&self) -> usize { + usize::from(self.num_address_table_lookups.wrapping_sub(self.index)) + } +} + #[cfg(test)] mod tests { use { @@ -115,7 +188,7 @@ mod tests { let bytes = bincode::serialize(&ShortVec::(vec![])).unwrap(); let mut offset = 0; let meta = AddressTableLookupMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_address_table_lookup, 0); + assert_eq!(meta.num_address_table_lookups, 0); assert_eq!(meta.offset, 1); assert_eq!(offset, bytes.len()); } @@ -141,7 +214,7 @@ mod tests { .unwrap(); let mut offset = 0; let meta = AddressTableLookupMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_address_table_lookup, 1); + assert_eq!(meta.num_address_table_lookups, 1); assert_eq!(meta.offset, 1); assert_eq!(offset, bytes.len()); } @@ -163,7 +236,7 @@ mod tests { .unwrap(); let mut offset = 0; let meta = AddressTableLookupMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_address_table_lookup, 2); + assert_eq!(meta.num_address_table_lookups, 2); assert_eq!(meta.offset, 1); assert_eq!(offset, bytes.len()); } diff --git a/transaction-view/src/bytes.rs b/transaction-view/src/bytes.rs index e66442efbb2a93..9e2724e3cac6de 100644 --- a/transaction-view/src/bytes.rs +++ b/transaction-view/src/bytes.rs @@ -3,11 +3,15 @@ use crate::result::{Result, TransactionParsingError}; /// Check that the buffer has at least `len` bytes remaining starting at /// `offset`. Returns Err if the buffer is too short. /// +/// * `bytes` - Slice of bytes to read from. +/// * `offset` - Current offset into `bytes`. +/// * `num_bytes` - Number of bytes that must be remaining. +/// /// Assumptions: /// - The current offset is not greater than `bytes.len()`. #[inline(always)] -pub fn check_remaining(bytes: &[u8], offset: usize, len: usize) -> Result<()> { - if len > bytes.len().wrapping_sub(offset) { +pub fn check_remaining(bytes: &[u8], offset: usize, num_bytes: usize) -> Result<()> { + if num_bytes > bytes.len().wrapping_sub(offset) { Err(TransactionParsingError) } else { Ok(()) @@ -29,6 +33,9 @@ pub fn read_byte(bytes: &[u8], offset: &mut usize) -> Result { /// If the buffer is too short or the encoding is invalid, return Err. /// `offset` is updated to point to the byte after the compressed u16. /// +/// * `bytes` - Slice of bytes to read from. +/// * `offset` - Current offset into `bytes`. +/// /// Assumptions: /// - The current offset is not greater than `bytes.len()`. #[allow(dead_code)] @@ -61,6 +68,7 @@ pub fn read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result { } /// Domain-specific optimization for reading a compressed u16. +/// /// The compressed u16's are only used for array-lengths in our transaction /// format. The transaction packet has a maximum size of 1232 bytes. /// This means that the maximum array length within a **valid** transaction is @@ -70,6 +78,9 @@ pub fn read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result { /// case, and reads a maximum of 2 bytes. /// If the buffer is too short or the encoding is invalid, return Err. /// `offset` is updated to point to the byte after the compressed u16. +/// +/// * `bytes` - Slice of bytes to read from. +/// * `offset` - Current offset into `bytes`. #[inline(always)] pub fn optimized_read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result { let mut result = 0u16; @@ -98,6 +109,10 @@ pub fn optimized_read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result /// Update the `offset` to point to the byte after an array of length `len` and /// of type `T`. If the buffer is too short, return Err. /// +/// * `bytes` - Slice of bytes to read from. +/// * `offset` - Curernt offset into `bytes`. +/// * `num_elements` - Number of `T` elements in the array. +/// /// Assumptions: /// 1. The current offset is not greater than `bytes.len()`. /// 2. The size of `T` is small enough such that a usize will not overflow if @@ -106,9 +121,9 @@ pub fn optimized_read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result pub fn advance_offset_for_array( bytes: &[u8], offset: &mut usize, - len: u16, + num_elements: u16, ) -> Result<()> { - let array_len_bytes = usize::from(len).wrapping_mul(core::mem::size_of::()); + let array_len_bytes = usize::from(num_elements).wrapping_mul(core::mem::size_of::()); check_remaining(bytes, *offset, array_len_bytes)?; *offset = offset.wrapping_add(array_len_bytes); Ok(()) @@ -117,6 +132,9 @@ pub fn advance_offset_for_array( /// Update the `offset` to point t the byte after the `T`. /// If the buffer is too short, return Err. /// +/// * `bytes` - Slice of bytes to read from. +/// * `offset` - Curernt offset into `bytes`. +/// /// Assumptions: /// 1. The current offset is not greater than `bytes.len()`. /// 2. The size of `T` is small enough such that a usize will not overflow. @@ -128,6 +146,51 @@ pub fn advance_offset_for_type(bytes: &[u8], offset: &mut usize) -> Re Ok(()) } +/// Return a reference to the next slice of `T` in the buffer, checking bounds +/// and advancing the offset. +/// If the buffer is too short, return Err. +/// +/// * `bytes` - Slice of bytes to read from. +/// * `offset` - Curernt offset into `bytes`. +/// * `num_elements` - Number of `T` elements in the slice. +/// +/// # Safety +/// 1. `bytes` must be a valid slice of bytes. +/// 2. `offset` must be a valid offset into `bytes`. +/// 3. `bytes + offset` must be properly aligned for `T`. +/// 4. `T` slice must be validly initialized. +/// 5. The size of `T` is small enough such that a usize will not overflow if +/// given the maximum slice size (u16::MAX). +#[inline(always)] +pub unsafe fn read_slice_data<'a, T: Sized>( + bytes: &'a [u8], + offset: &mut usize, + num_elements: u16, +) -> Result<&'a [T]> { + let current_ptr = bytes.as_ptr().wrapping_add(*offset); + advance_offset_for_array::(bytes, offset, num_elements)?; + Ok(unsafe { core::slice::from_raw_parts(current_ptr as *const T, usize::from(num_elements)) }) +} + +/// Return a reference to the next `T` in the buffer, checking bounds and +/// advancing the offset. +/// If the buffer is too short, return Err. +/// +/// * `bytes` - Slice of bytes to read from. +/// * `offset` - Curernt offset into `bytes`. +/// +/// # Safety +/// 1. `bytes` must be a valid slice of bytes. +/// 2. `offset` must be a valid offset into `bytes`. +/// 3. `bytes + offset` must be properly aligned for `T`. +/// 4. `T` must be validly initialized. +#[inline(always)] +pub unsafe fn read_type<'a, T: Sized>(bytes: &'a [u8], offset: &mut usize) -> Result<&'a T> { + let current_ptr = bytes.as_ptr().wrapping_add(*offset); + advance_offset_for_type::(bytes, offset)?; + Ok(unsafe { &*(current_ptr as *const T) }) +} + #[cfg(test)] mod tests { use { diff --git a/transaction-view/src/instructions_meta.rs b/transaction-view/src/instructions_meta.rs index 8d68019f932c66..45de23d47c60fd 100644 --- a/transaction-view/src/instructions_meta.rs +++ b/transaction-view/src/instructions_meta.rs @@ -2,6 +2,7 @@ use { crate::{ bytes::{ advance_offset_for_array, check_remaining, optimized_read_compressed_u16, read_byte, + read_slice_data, }, result::Result, }, @@ -82,6 +83,8 @@ impl<'a> Iterator for InstructionsIterator<'a> { fn next(&mut self) -> Option { if self.index < self.num_instructions { + self.index = self.index.wrapping_add(1); + // Each instruction has 3 pieces: // 1. Program ID index (u8) // 2. Accounts indexes ([u8]) @@ -93,27 +96,29 @@ impl<'a> Iterator for InstructionsIterator<'a> { // Read the number of account indexes, and then update the offset // to skip over the account indexes. let num_accounts = optimized_read_compressed_u16(self.bytes, &mut self.offset).ok()?; - // SAFETY: Only returned after we check that there are enough bytes. - let accounts = unsafe { - core::slice::from_raw_parts( - self.bytes.as_ptr().add(self.offset), - usize::from(num_accounts), - ) - }; - advance_offset_for_array::(self.bytes, &mut self.offset, num_accounts).ok()?; + + const _: () = assert!(core::mem::align_of::() == 1, "u8 alignment"); + // SAFETY: + // - The offset is checked to be valid in the byte slice. + // - The alignment of u8 is 1. + // - The slice length is checked to be valid. + // - `u8` cannot be improperly initialized. + let accounts = + unsafe { read_slice_data::(self.bytes, &mut self.offset, num_accounts) } + .ok()?; // Read the length of the data, and then update the offset to skip // over the data. let data_len = optimized_read_compressed_u16(self.bytes, &mut self.offset).ok()?; - // SAFETY: Only returned after we check that there are enough bytes. - let data = unsafe { - core::slice::from_raw_parts( - self.bytes.as_ptr().add(self.offset), - usize::from(data_len), - ) - }; - advance_offset_for_array::(self.bytes, &mut self.offset, data_len).ok()?; - self.index = self.index.wrapping_add(1); + + const _: () = assert!(core::mem::align_of::() == 1, "u8 alignment"); + // SAFETY: + // - The offset is checked to be valid in the byte slice. + // - The alignment of u8 is 1. + // - The slice length is checked to be valid. + // - `u8` cannot be improperly initialized. + let data = + unsafe { read_slice_data::(self.bytes, &mut self.offset, data_len) }.ok()?; Some(SVMInstruction { program_id_index, diff --git a/transaction-view/src/transaction_meta.rs b/transaction-view/src/transaction_meta.rs index 38cf52468ac9ee..17d92599a102ee 100644 --- a/transaction-view/src/transaction_meta.rs +++ b/transaction-view/src/transaction_meta.rs @@ -1,6 +1,6 @@ use { crate::{ - address_table_lookup_meta::AddressTableLookupMeta, + address_table_lookup_meta::{AddressTableLookupIterator, AddressTableLookupMeta}, bytes::advance_offset_for_type, instructions_meta::{InstructionsIterator, InstructionsMeta}, message_header_meta::{MessageHeaderMeta, TransactionVersion}, @@ -44,7 +44,7 @@ impl TransactionMeta { let instructions = InstructionsMeta::try_new(bytes, &mut offset)?; let address_table_lookup = match message_header.version { TransactionVersion::Legacy => AddressTableLookupMeta { - num_address_table_lookup: 0, + num_address_table_lookups: 0, offset: 0, }, TransactionVersion::V0 => AddressTableLookupMeta::try_new(bytes, &mut offset)?, @@ -102,7 +102,7 @@ impl TransactionMeta { /// Return the number of address table lookups in the transaction. pub fn num_address_table_lookups(&self) -> u8 { - self.address_table_lookup.num_address_table_lookup + self.address_table_lookup.num_address_table_lookups } } @@ -201,6 +201,22 @@ impl TransactionMeta { index: 0, } } + + /// Return an iterator over the address table lookups in the transaction. + /// # Safety + /// - This function must be called with the same `bytes` slice that was + /// used to create the `TransactionMeta` instance. + pub unsafe fn address_table_lookup_iter<'a>( + &self, + bytes: &'a [u8], + ) -> AddressTableLookupIterator<'a> { + AddressTableLookupIterator { + bytes, + offset: usize::from(self.address_table_lookup.offset), + num_address_table_lookups: self.address_table_lookup.num_address_table_lookups, + index: 0, + } + } } #[cfg(test)] @@ -246,7 +262,7 @@ mod tests { tx.message.instructions().len() as u16 ); assert_eq!( - meta.address_table_lookup.num_address_table_lookup, + meta.address_table_lookup.num_address_table_lookups, tx.message .address_table_lookups() .map(|x| x.len() as u8) @@ -305,7 +321,21 @@ mod tests { } } - fn v0_with_lookup() -> VersionedTransaction { + fn multiple_transfers() -> VersionedTransaction { + let payer = Pubkey::new_unique(); + VersionedTransaction { + signatures: vec![Signature::default()], // 1 signature to be valid. + message: VersionedMessage::Legacy(Message::new( + &[ + system_instruction::transfer(&payer, &Pubkey::new_unique(), 1), + system_instruction::transfer(&payer, &Pubkey::new_unique(), 1), + ], + Some(&payer), + )), + } + } + + fn v0_with_single_lookup() -> VersionedTransaction { let payer = Pubkey::new_unique(); let to = Pubkey::new_unique(); VersionedTransaction { @@ -325,6 +355,36 @@ mod tests { } } + fn v0_with_multiple_lookups() -> VersionedTransaction { + let payer = Pubkey::new_unique(); + let to1 = Pubkey::new_unique(); + let to2 = Pubkey::new_unique(); + VersionedTransaction { + signatures: vec![Signature::default()], // 1 signature to be valid. + message: VersionedMessage::V0( + v0::Message::try_compile( + &payer, + &[ + system_instruction::transfer(&payer, &to1, 1), + system_instruction::transfer(&payer, &to2, 1), + ], + &[ + AddressLookupTableAccount { + key: Pubkey::new_unique(), + addresses: vec![to1], + }, + AddressLookupTableAccount { + key: Pubkey::new_unique(), + addresses: vec![to2], + }, + ], + Hash::default(), + ) + .unwrap(), + ), + } + } + #[test] fn test_minimal_sized_transaction() { verify_transaction_view_meta(&minimally_sized_transaction()); @@ -342,7 +402,7 @@ mod tests { #[test] fn test_v0_with_lookup() { - verify_transaction_view_meta(&v0_with_lookup()); + verify_transaction_view_meta(&v0_with_single_lookup()); } #[test] @@ -452,7 +512,20 @@ mod tests { } #[test] - fn test_instructions_iter() { + fn test_instructions_iter_empty() { + let tx = minimally_sized_transaction(); + let bytes = bincode::serialize(&tx).unwrap(); + let meta = TransactionMeta::try_new(&bytes).unwrap(); + + // SAFETY: `bytes` is the same slice used to create `meta`. + unsafe { + let mut iter = meta.instructions_iter(&bytes); + assert!(iter.next().is_none()); + } + } + + #[test] + fn test_instructions_iter_single() { let tx = simple_transfer(); let bytes = bincode::serialize(&tx).unwrap(); let meta = TransactionMeta::try_new(&bytes).unwrap(); @@ -470,4 +543,87 @@ mod tests { assert!(iter.next().is_none()); } } + + #[test] + fn test_instructions_iter_multiple() { + let tx = multiple_transfers(); + let bytes = bincode::serialize(&tx).unwrap(); + let meta = TransactionMeta::try_new(&bytes).unwrap(); + + // SAFETY: `bytes` is the same slice used to create `meta`. + unsafe { + let mut iter = meta.instructions_iter(&bytes); + let ix = iter.next().unwrap(); + assert_eq!(ix.program_id_index, 3); + assert_eq!(ix.accounts, &[0, 1]); + assert_eq!( + ix.data, + &bincode::serialize(&SystemInstruction::Transfer { lamports: 1 }).unwrap() + ); + let ix = iter.next().unwrap(); + assert_eq!(ix.program_id_index, 3); + assert_eq!(ix.accounts, &[0, 2]); + assert_eq!( + ix.data, + &bincode::serialize(&SystemInstruction::Transfer { lamports: 1 }).unwrap() + ); + assert!(iter.next().is_none()); + } + } + + #[test] + fn test_address_table_lookup_iter_empty() { + let tx = simple_transfer(); + let bytes = bincode::serialize(&tx).unwrap(); + let meta = TransactionMeta::try_new(&bytes).unwrap(); + + // SAFETY: `bytes` is the same slice used to create `meta`. + unsafe { + let mut iter = meta.address_table_lookup_iter(&bytes); + assert!(iter.next().is_none()); + } + } + + #[test] + fn test_address_table_lookup_iter_single() { + let tx = v0_with_single_lookup(); + let bytes = bincode::serialize(&tx).unwrap(); + let meta = TransactionMeta::try_new(&bytes).unwrap(); + + let atls_actual = tx.message.address_table_lookups().unwrap(); + // SAFETY: `bytes` is the same slice used to create `meta`. + unsafe { + let mut iter = meta.address_table_lookup_iter(&bytes); + let lookup = iter.next().unwrap(); + assert_eq!(lookup.account_key, &atls_actual[0].account_key); + assert_eq!(lookup.writable_indexes, atls_actual[0].writable_indexes); + assert_eq!(lookup.readonly_indexes, atls_actual[0].readonly_indexes); + assert!(iter.next().is_none()); + } + } + + #[test] + fn test_address_table_lookup_iter_multiple() { + let tx = v0_with_multiple_lookups(); + let bytes = bincode::serialize(&tx).unwrap(); + let meta = TransactionMeta::try_new(&bytes).unwrap(); + + let atls_actual = tx.message.address_table_lookups().unwrap(); + // SAFETY: `bytes` is the same slice used to create `meta`. + unsafe { + let mut iter = meta.address_table_lookup_iter(&bytes); + + let lookup = iter.next().unwrap(); + assert_eq!(lookup.account_key, &atls_actual[0].account_key); + assert_eq!(lookup.writable_indexes, atls_actual[0].writable_indexes); + assert_eq!(lookup.readonly_indexes, atls_actual[0].readonly_indexes); + + let lookup = iter.next().unwrap(); + assert_eq!(lookup.account_key, &atls_actual[1].account_key); + assert_eq!(lookup.writable_indexes, atls_actual[1].writable_indexes); + assert_eq!(lookup.readonly_indexes, atls_actual[1].readonly_indexes); + + assert!(iter.next().is_none()); + } + } } From c6703cb6fb1b28d441b3c86dddb8da6f8524e83f Mon Sep 17 00:00:00 2001 From: Andrei Silviu Dragnea Date: Fri, 23 Aug 2024 17:21:18 +0100 Subject: [PATCH 201/529] Program Test: clean up invoke_builtin_function (#2694) refactor: clean up invoke_builtin_function --- program-test/src/lib.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index be8e763ef60f62..eec8e215b833a0 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -112,7 +112,6 @@ pub fn invoke_builtin_function( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let instruction_data = instruction_context.get_instruction_data(); let instruction_account_indices = 0..instruction_context.get_number_of_instruction_accounts(); // mock builtin program must consume units @@ -131,19 +130,17 @@ pub fn invoke_builtin_function( // Serialize entrypoint parameters with SBF ABI let (mut parameter_bytes, _regions, _account_lengths) = serialize_parameters( - invoke_context.transaction_context, - invoke_context - .transaction_context - .get_current_instruction_context()?, + transaction_context, + instruction_context, true, // copy_account_data // There is no VM so direct mapping can not be implemented here )?; // Deserialize data back into instruction params - let (program_id, account_infos, _input) = + let (program_id, account_infos, input) = unsafe { deserialize(&mut parameter_bytes.as_slice_mut()[0] as *mut u8) }; // Execute the program - builtin_function(program_id, &account_infos, instruction_data).map_err(|err| { + builtin_function(program_id, &account_infos, input).map_err(|err| { let err = InstructionError::from(u64::from(err)); stable_log::program_failure(&log_collector, program_id, &err); let err: Box = Box::new(err); From 0ad9fe1927e601acb0c77dc7f51462f0831daba6 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sat, 24 Aug 2024 01:11:00 +0800 Subject: [PATCH 202/529] ci: only upload the integer part for the benchmark (#2713) --- ci/upload-benchmark.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ci/upload-benchmark.sh b/ci/upload-benchmark.sh index c775cc38b1189b..01d979d5e8b1b6 100755 --- a/ci/upload-benchmark.sh +++ b/ci/upload-benchmark.sh @@ -69,13 +69,13 @@ fi while IFS= read -r line; do - if [[ $line =~ ^test\ (.*)\ \.\.\.\ bench:\ *([0-9,]+)\ ns\/iter\ \(\+\/-\ *([0-9,]+)\) ]]; then + if [[ $line =~ ^test\ (.*)\ \.\.\.\ bench:\ *([0-9,\.]+)\ ns\/iter\ \(\+\/-\ *([0-9,\.]+)\) ]]; then test_name="${BASH_REMATCH[1]}" ns_iter="${BASH_REMATCH[2]}" plus_minus="${BASH_REMATCH[3]}" - ns_iter=$(echo "$ns_iter" | tr -d ',') - plus_minus=$(echo "$plus_minus" | tr -d ',') + ns_iter=$(echo "$ns_iter" | tr -d ',' | cut -d'.' -f1) + plus_minus=$(echo "$plus_minus" | tr -d ',' | cut -d'.' -f1) datapoint="${INFLUX_MEASUREMENT},commit=${COMMIT_HASH},test_suite=${TEST_SUITE},name=${test_name} median=${ns_iter}i,deviation=${plus_minus}i" echo "datapoint: $datapoint" From 09f5f5b0bb256a410ed0a3ef040408c79e81a641 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 23 Aug 2024 18:56:21 +0000 Subject: [PATCH 203/529] adds test coverage for Shred::is_shred_duplicate (#2723) --- ledger/src/shred.rs | 95 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 5ece826ce5369b..16f0fa9d36ec13 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -2122,4 +2122,99 @@ mod tests { assert_eq!((flags & ShredFlags::SHRED_TICK_REFERENCE_MASK).bits(), 61u8); assert_eq!(bincode::serialize(&flags).unwrap(), [0b1011_1101]); } + + #[test_case(false, false)] + #[test_case(false, true)] + #[test_case(true, false)] + #[test_case(true, true)] + fn test_is_shred_duplicate(chained: bool, is_last_in_slot: bool) { + fn fill_retransmitter_signature( + rng: &mut R, + shred: Shred, + chained: bool, + is_last_in_slot: bool, + ) -> Shred { + let mut shred = shred.into_payload(); + let mut signature = [0u8; SIGNATURE_BYTES]; + rng.fill(&mut signature[..]); + let out = layout::set_retransmitter_signature(&mut shred, &Signature::from(signature)); + if chained && is_last_in_slot { + assert_matches!(out, Ok(())); + } else { + assert_matches!(out, Err(Error::InvalidShredVariant)); + } + Shred::new_from_serialized_shred(shred).unwrap() + } + let mut rng = rand::thread_rng(); + let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); + let reed_solomon_cache = ReedSolomonCache::default(); + let keypair = Keypair::new(); + let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); + let slot = 285_376_049 + rng.gen_range(0..100_000); + let parent_slot = slot - rng.gen_range(1..=65535); + let shred_version = rng.gen(); + let reference_tick = rng.gen_range(1..64); + let next_shred_index = rng.gen_range(0..671); + let next_code_index = rng.gen_range(0..781); + let mut data = vec![0u8; 1200 * 5]; + rng.fill(&mut data[..]); + let shreds: Vec<_> = merkle::make_shreds_from_data( + &thread_pool, + &keypair, + chained_merkle_root, + &data[..], + slot, + parent_slot, + shred_version, + reference_tick, + is_last_in_slot, + next_shred_index, + next_code_index, + &reed_solomon_cache, + &mut ProcessShredsStats::default(), + ) + .unwrap() + .into_iter() + .flatten() + .map(Shred::from) + .map(|shred| fill_retransmitter_signature(&mut rng, shred, chained, is_last_in_slot)) + .collect(); + { + let num_data_shreds = shreds.iter().filter(|shred| shred.is_data()).count(); + let num_coding_shreds = shreds.iter().filter(|shred| shred.is_code()).count(); + assert!(num_data_shreds > if is_last_in_slot { 31 } else { 5 }); + assert!(num_coding_shreds > if is_last_in_slot { 31 } else { 20 }); + } + // Shreds of different (slot, index, shred-type) are not duplicate. + // A shred is not a duplicate of itself either. + for shred in &shreds { + for other in &shreds { + assert!(!shred.is_shred_duplicate(other)); + } + } + // Different retransmitter signature does not make shreds duplicate. + for shred in &shreds { + let other = + fill_retransmitter_signature(&mut rng, shred.clone(), chained, is_last_in_slot); + if chained && is_last_in_slot { + assert_ne!(shred.payload(), other.payload()); + } + assert!(!shred.is_shred_duplicate(&other)); + assert!(!other.is_shred_duplicate(shred)); + } + // Shreds of the same (slot, index, shred-type) with different payload + // (ignoring retransmitter signature) are duplicate. + for shred in &shreds { + let mut other = shred.payload().clone(); + other[90] = other[90].wrapping_add(1); + let other = Shred::new_from_serialized_shred(other).unwrap(); + assert_ne!(shred.payload(), other.payload()); + assert_eq!( + layout::get_retransmitter_signature(shred.payload()).ok(), + layout::get_retransmitter_signature(other.payload()).ok() + ); + assert!(shred.is_shred_duplicate(&other)); + assert!(other.is_shred_duplicate(shred)); + } + } } From fc84a523592a3ea058f64a7083c1f342c7cee1b6 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 23 Aug 2024 14:09:31 -0500 Subject: [PATCH 204/529] add stats on clean (#2703) --- accounts-db/src/accounts_db.rs | 41 ++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9095a29b19cc50..641c7d63f9f013 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1959,6 +1959,7 @@ struct CleanAccountsStats { remove_dead_accounts_shrink_us: AtomicU64, clean_stored_dead_slots_us: AtomicU64, uncleaned_roots_slot_list_1: AtomicU64, + get_account_sizes_us: AtomicU64, } impl CleanAccountsStats { @@ -3593,6 +3594,13 @@ impl AccountsDb { .swap(0, Ordering::Relaxed), i64 ), + ( + "get_account_sizes_us", + self.clean_accounts_stats + .get_account_sizes_us + .swap(0, Ordering::Relaxed), + i64 + ), ( "clean_old_root_us", self.clean_accounts_stats @@ -8043,20 +8051,25 @@ impl AccountsDb { dead_slots.insert(*slot); } else { - let mut offsets = offsets.iter().cloned().collect::>(); - // sort so offsets are in order. This improves efficiency of loading the accounts. - offsets.sort_unstable(); - let dead_bytes = store.accounts.get_account_sizes(&offsets).iter().sum(); - store.remove_accounts(dead_bytes, reset_accounts, offsets.len()); - if Self::is_shrinking_productive(*slot, &store) - && self.is_candidate_for_shrink(&store) - { - // Checking that this single storage entry is ready for shrinking, - // should be a sufficient indication that the slot is ready to be shrunk - // because slots should only have one storage entry, namely the one that was - // created by `flush_slot_cache()`. - new_shrink_candidates.insert(*slot); - } + let (_, us) = measure_us!({ + let mut offsets = offsets.iter().cloned().collect::>(); + // sort so offsets are in order. This improves efficiency of loading the accounts. + offsets.sort_unstable(); + let dead_bytes = store.accounts.get_account_sizes(&offsets).iter().sum(); + store.remove_accounts(dead_bytes, reset_accounts, offsets.len()); + if Self::is_shrinking_productive(*slot, &store) + && self.is_candidate_for_shrink(&store) + { + // Checking that this single storage entry is ready for shrinking, + // should be a sufficient indication that the slot is ready to be shrunk + // because slots should only have one storage entry, namely the one that was + // created by `flush_slot_cache()`. + new_shrink_candidates.insert(*slot); + } + }); + self.clean_accounts_stats + .get_account_sizes_us + .fetch_add(us, Ordering::Relaxed); } } }); From 2bdabfafa08e78914a22ef158827107e145f4277 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 23 Aug 2024 15:14:31 -0400 Subject: [PATCH 205/529] Checks ref count and slot list again before flushing index entry (#2722) --- .../src/accounts_index/in_mem_accounts_index.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index c19e15d87b35d5..051769ad9e55ad 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -1255,6 +1255,16 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex>(), - v.ref_count(), + ref_count, ), ) }; From acf5baf0f8e3313e6ccd074d28b9f944e76f3183 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Sat, 24 Aug 2024 04:30:14 +0900 Subject: [PATCH 206/529] [zk-sdk] Instantiate `From` for elgamal pod types (#2711) --- zk-sdk/src/encryption/pod/auth_encryption.rs | 7 +++- zk-sdk/src/encryption/pod/elgamal.rs | 10 +++++- zk-sdk/src/encryption/pod/grouped_elgamal.rs | 35 +++++++++++++++++++- zk-sdk/src/encryption/pod/mod.rs | 11 ++++++ zk-sdk/src/encryption/pod/pedersen.rs | 20 ++++++++++- 5 files changed, 79 insertions(+), 4 deletions(-) diff --git a/zk-sdk/src/encryption/pod/auth_encryption.rs b/zk-sdk/src/encryption/pod/auth_encryption.rs index abfd8d782105a8..d18e82f6b5e60c 100644 --- a/zk-sdk/src/encryption/pod/auth_encryption.rs +++ b/zk-sdk/src/encryption/pod/auth_encryption.rs @@ -3,7 +3,10 @@ #[cfg(not(target_os = "solana"))] use crate::{encryption::auth_encryption::AeCiphertext, errors::AuthenticatedEncryptionError}; use { - crate::encryption::{pod::impl_from_str, AE_CIPHERTEXT_LEN}, + crate::encryption::{ + pod::{impl_from_bytes, impl_from_str}, + AE_CIPHERTEXT_LEN, + }, base64::{prelude::BASE64_STANDARD, Engine}, bytemuck::{Pod, Zeroable}, std::fmt, @@ -41,6 +44,8 @@ impl_from_str!( BASE64_LEN = AE_CIPHERTEXT_MAX_BASE64_LEN ); +impl_from_bytes!(TYPE = PodAeCiphertext, BYTES_LEN = AE_CIPHERTEXT_LEN); + impl Default for PodAeCiphertext { fn default() -> Self { Self::zeroed() diff --git a/zk-sdk/src/encryption/pod/elgamal.rs b/zk-sdk/src/encryption/pod/elgamal.rs index 9c70724307d43c..10874fe4cea118 100644 --- a/zk-sdk/src/encryption/pod/elgamal.rs +++ b/zk-sdk/src/encryption/pod/elgamal.rs @@ -2,7 +2,8 @@ use { crate::encryption::{ - pod::impl_from_str, DECRYPT_HANDLE_LEN, ELGAMAL_CIPHERTEXT_LEN, ELGAMAL_PUBKEY_LEN, + pod::{impl_from_bytes, impl_from_str}, + DECRYPT_HANDLE_LEN, ELGAMAL_CIPHERTEXT_LEN, ELGAMAL_PUBKEY_LEN, }, base64::{prelude::BASE64_STANDARD, Engine}, bytemuck::Zeroable, @@ -52,6 +53,11 @@ impl_from_str!( BASE64_LEN = ELGAMAL_CIPHERTEXT_MAX_BASE64_LEN ); +impl_from_bytes!( + TYPE = PodElGamalCiphertext, + BYTES_LEN = ELGAMAL_CIPHERTEXT_LEN +); + #[cfg(not(target_os = "solana"))] impl From for PodElGamalCiphertext { fn from(decoded_ciphertext: ElGamalCiphertext) -> Self { @@ -91,6 +97,8 @@ impl_from_str!( BASE64_LEN = ELGAMAL_PUBKEY_MAX_BASE64_LEN ); +impl_from_bytes!(TYPE = PodElGamalPubkey, BYTES_LEN = ELGAMAL_PUBKEY_LEN); + #[cfg(not(target_os = "solana"))] impl From for PodElGamalPubkey { fn from(decoded_pubkey: ElGamalPubkey) -> Self { diff --git a/zk-sdk/src/encryption/pod/grouped_elgamal.rs b/zk-sdk/src/encryption/pod/grouped_elgamal.rs index 25825bbb474a6d..7d437bc2bc6933 100644 --- a/zk-sdk/src/encryption/pod/grouped_elgamal.rs +++ b/zk-sdk/src/encryption/pod/grouped_elgamal.rs @@ -5,15 +5,25 @@ use crate::encryption::grouped_elgamal::GroupedElGamalCiphertext; use { crate::{ encryption::{ - pod::{elgamal::PodElGamalCiphertext, pedersen::PodPedersenCommitment}, + pod::{ + elgamal::PodElGamalCiphertext, impl_from_bytes, impl_from_str, + pedersen::PodPedersenCommitment, + }, DECRYPT_HANDLE_LEN, ELGAMAL_CIPHERTEXT_LEN, PEDERSEN_COMMITMENT_LEN, }, errors::ElGamalError, }, + base64::{prelude::BASE64_STANDARD, Engine}, bytemuck::Zeroable, std::fmt, }; +/// Maximum length of a base64 encoded grouped ElGamal ciphertext with 2 handles +const GROUPED_ELGAMAL_CIPHERTEXT_2_HANDLES_MAX_BASE64_LEN: usize = 132; + +/// Maximum length of a base64 encoded grouped ElGamal ciphertext with 3 handles +const GROUPED_ELGAMAL_CIPHERTEXT_3_HANDLES_MAX_BASE64_LEN: usize = 176; + macro_rules! impl_extract { (TYPE = $type:ident) => { impl $type { @@ -78,6 +88,18 @@ impl Default for PodGroupedElGamalCiphertext2Handles { Self::zeroed() } } + +impl_from_str!( + TYPE = PodGroupedElGamalCiphertext2Handles, + BYTES_LEN = GROUPED_ELGAMAL_CIPHERTEXT_2_HANDLES, + BASE64_LEN = GROUPED_ELGAMAL_CIPHERTEXT_2_HANDLES_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodGroupedElGamalCiphertext2Handles, + BYTES_LEN = GROUPED_ELGAMAL_CIPHERTEXT_2_HANDLES +); + #[cfg(not(target_os = "solana"))] impl From> for PodGroupedElGamalCiphertext2Handles { fn from(decoded_ciphertext: GroupedElGamalCiphertext<2>) -> Self { @@ -115,6 +137,17 @@ impl Default for PodGroupedElGamalCiphertext3Handles { } } +impl_from_str!( + TYPE = PodGroupedElGamalCiphertext3Handles, + BYTES_LEN = GROUPED_ELGAMAL_CIPHERTEXT_3_HANDLES, + BASE64_LEN = GROUPED_ELGAMAL_CIPHERTEXT_3_HANDLES_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodGroupedElGamalCiphertext3Handles, + BYTES_LEN = GROUPED_ELGAMAL_CIPHERTEXT_3_HANDLES +); + #[cfg(not(target_os = "solana"))] impl From> for PodGroupedElGamalCiphertext3Handles { fn from(decoded_ciphertext: GroupedElGamalCiphertext<3>) -> Self { diff --git a/zk-sdk/src/encryption/pod/mod.rs b/zk-sdk/src/encryption/pod/mod.rs index 850a9a40aa42d9..928f657b939317 100644 --- a/zk-sdk/src/encryption/pod/mod.rs +++ b/zk-sdk/src/encryption/pod/mod.rs @@ -26,3 +26,14 @@ macro_rules! impl_from_str { }; } pub(crate) use impl_from_str; + +macro_rules! impl_from_bytes { + (TYPE = $type:ident, BYTES_LEN = $bytes_len:expr) => { + impl std::convert::From<[u8; $bytes_len]> for $type { + fn from(bytes: [u8; $bytes_len]) -> Self { + Self(bytes) + } + } + }; +} +pub(crate) use impl_from_bytes; diff --git a/zk-sdk/src/encryption/pod/pedersen.rs b/zk-sdk/src/encryption/pod/pedersen.rs index faf39ca949bc09..aecac312b78090 100644 --- a/zk-sdk/src/encryption/pod/pedersen.rs +++ b/zk-sdk/src/encryption/pod/pedersen.rs @@ -1,7 +1,11 @@ //! Plain Old Data type for the Pedersen commitment scheme. use { - crate::encryption::PEDERSEN_COMMITMENT_LEN, + crate::encryption::{ + pod::{impl_from_bytes, impl_from_str}, + PEDERSEN_COMMITMENT_LEN, + }, + base64::{prelude::BASE64_STANDARD, Engine}, bytemuck_derive::{Pod, Zeroable}, std::fmt, }; @@ -11,6 +15,9 @@ use { curve25519_dalek::ristretto::CompressedRistretto, }; +/// Maximum length of a base64 encoded ElGamal public key +const PEDERSEN_COMMITMENT_MAX_BASE64_LEN: usize = 44; + /// The `PedersenCommitment` type as a `Pod`. #[derive(Clone, Copy, Default, Pod, Zeroable, PartialEq, Eq)] #[repr(transparent)] @@ -29,6 +36,17 @@ impl From for PodPedersenCommitment { } } +impl_from_str!( + TYPE = PodPedersenCommitment, + BYTES_LEN = PEDERSEN_COMMITMENT_LEN, + BASE64_LEN = PEDERSEN_COMMITMENT_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodPedersenCommitment, + BYTES_LEN = PEDERSEN_COMMITMENT_LEN +); + // For proof verification, interpret pod::PedersenCommitment directly as CompressedRistretto #[cfg(not(target_os = "solana"))] impl From for CompressedRistretto { From 1588bca45f021ccc0687efd64e0f3683c6be6166 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 23 Aug 2024 12:30:36 -0700 Subject: [PATCH 207/529] ff cleanup: remove enable_gossip_duplicate_proof_ingestion (#2707) ff cleanup: remove enable_gossip_duplicate_proof_ingestion, it's active everywhere. --- gossip/src/duplicate_shred_handler.rs | 82 +++------------------------ 1 file changed, 8 insertions(+), 74 deletions(-) diff --git a/gossip/src/duplicate_shred_handler.rs b/gossip/src/duplicate_shred_handler.rs index edf62aaf4276fc..84134d76f241bd 100644 --- a/gossip/src/duplicate_shred_handler.rs +++ b/gossip/src/duplicate_shred_handler.rs @@ -9,7 +9,6 @@ use { solana_runtime::bank_forks::BankForks, solana_sdk::{ clock::{Epoch, Slot}, - feature_set, pubkey::Pubkey, }, std::{ @@ -141,30 +140,16 @@ impl DuplicateShredHandler { shred1.into_payload(), shred2.into_payload(), )?; - if self.should_notify_state_machine(slot) { - // Notify duplicate consensus state machine - self.duplicate_slots_sender - .send(slot) - .map_err(|_| Error::DuplicateSlotSenderFailure)?; - } + // Notify duplicate consensus state machine + self.duplicate_slots_sender + .send(slot) + .map_err(|_| Error::DuplicateSlotSenderFailure)?; } self.consumed.insert(slot, true); } Ok(()) } - fn should_notify_state_machine(&self, slot: Slot) -> bool { - let root_bank = self.bank_forks.read().unwrap().root_bank(); - let Some(activated_slot) = root_bank - .feature_set - .activated_slot(&feature_set::enable_gossip_duplicate_proof_ingestion::id()) - else { - return false; - }; - root_bank.epoch_schedule().get_epoch(slot) - > root_bank.epoch_schedule().get_epoch(activated_slot) - } - fn should_consume_slot(&mut self, slot: Slot) -> bool { slot > self.last_root && slot < self.last_root.saturating_add(self.cached_slots_in_epoch) @@ -304,9 +289,7 @@ mod tests { let shred_version = 0; let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; - let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::enable_gossip_duplicate_proof_ingestion::id()); - let slots_in_epoch = bank.get_epoch_info().slots_in_epoch; + let bank = Bank::new_for_tests(&genesis_config); let bank_forks_arc = BankForks::new_rw_arc(bank); { let mut bank_forks = bank_forks_arc.write().unwrap(); @@ -321,8 +304,7 @@ mod tests { &bank_forks_arc.read().unwrap().working_bank(), )); let (sender, receiver) = unbounded(); - // The feature will only be activated at Epoch 1. - let start_slot: Slot = slots_in_epoch + 1; + let start_slot: Slot = 10; let mut duplicate_shred_handler = DuplicateShredHandler::new( blockstore.clone(), @@ -400,9 +382,7 @@ mod tests { let shred_version = 0; let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; - let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::enable_gossip_duplicate_proof_ingestion::id()); - let slots_in_epoch = bank.get_epoch_info().slots_in_epoch; + let bank = Bank::new_for_tests(&genesis_config); let bank_forks_arc = BankForks::new_rw_arc(bank); { let mut bank_forks = bank_forks_arc.write().unwrap(); @@ -424,8 +404,7 @@ mod tests { sender, shred_version, ); - // The feature will only be activated at Epoch 1. - let start_slot: Slot = slots_in_epoch + 1; + let start_slot: Slot = 10; // This proof will not be accepted because num_chunks is too large. let chunks = create_duplicate_proof( @@ -483,49 +462,4 @@ mod tests { assert!(blockstore.has_duplicate_shreds_in_slot(start_slot)); assert_eq!(receiver.try_iter().collect_vec(), vec![start_slot]); } - - #[test] - fn test_feature_disabled() { - let ledger_path = get_tmp_ledger_path_auto_delete!(); - let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); - let my_keypair = Arc::new(Keypair::new()); - let my_pubkey = my_keypair.pubkey(); - let shred_version = 0; - let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000); - let GenesisConfigInfo { genesis_config, .. } = genesis_config_info; - let mut bank = Bank::new_for_tests(&genesis_config); - bank.deactivate_feature(&feature_set::enable_gossip_duplicate_proof_ingestion::id()); - assert!(!bank - .feature_set - .is_active(&feature_set::enable_gossip_duplicate_proof_ingestion::id())); - let bank_forks_arc = BankForks::new_rw_arc(bank); - let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank( - &bank_forks_arc.read().unwrap().working_bank(), - )); - let (sender, receiver) = unbounded(); - - let mut duplicate_shred_handler = DuplicateShredHandler::new( - blockstore.clone(), - leader_schedule_cache, - bank_forks_arc, - sender, - shred_version, - ); - let chunks = create_duplicate_proof( - my_keypair.clone(), - None, - 1, - None, - DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, - shred_version, - ) - .unwrap(); - assert!(!blockstore.has_duplicate_shreds_in_slot(1)); - for chunk in chunks { - duplicate_shred_handler.handle(chunk); - } - // If feature disabled, blockstore gets signal but state machine doesn't see it. - assert!(blockstore.has_duplicate_shreds_in_slot(1)); - assert!(receiver.try_iter().collect_vec().is_empty()); - } } From 52e73297d8edf52343906b4f2776eee66b106378 Mon Sep 17 00:00:00 2001 From: Andrei Silviu Dragnea Date: Fri, 23 Aug 2024 21:47:15 +0100 Subject: [PATCH 208/529] Program Test: Fix invoke_builtin_function unwinding (#2632) --- program-test/src/lib.rs | 28 ++++++++++++++++++------- program-test/tests/panic.rs | 42 +++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 8 deletions(-) create mode 100644 program-test/tests/panic.rs diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index eec8e215b833a0..c0f0a32d0ddc7e 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -56,6 +56,7 @@ use { fs::File, io::{self, Read}, mem::transmute, + panic::AssertUnwindSafe, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, Ordering}, @@ -140,12 +141,25 @@ pub fn invoke_builtin_function( unsafe { deserialize(&mut parameter_bytes.as_slice_mut()[0] as *mut u8) }; // Execute the program - builtin_function(program_id, &account_infos, input).map_err(|err| { - let err = InstructionError::from(u64::from(err)); - stable_log::program_failure(&log_collector, program_id, &err); - let err: Box = Box::new(err); - err - })?; + match std::panic::catch_unwind(AssertUnwindSafe(|| { + builtin_function(program_id, &account_infos, input) + })) { + Ok(program_result) => { + program_result.map_err(|program_error| { + let err = InstructionError::from(u64::from(program_error)); + stable_log::program_failure(&log_collector, program_id, &err); + let err: Box = Box::new(err); + err + })?; + } + Err(_panic_error) => { + let err = InstructionError::ProgramFailedToComplete; + stable_log::program_failure(&log_collector, program_id, &err); + let err: Box = Box::new(err); + Err(err)?; + } + }; + stable_log::program_success(&log_collector, program_id); // Lookup table for AccountInfo @@ -724,8 +738,6 @@ impl ProgramTest { // If SBF is not required (i.e., we were invoked with `test`), use the provided // processor function as is. - // - // TODO: figure out why tests hang if a processor panics when running native code. (false, _, Some(builtin_function)) => { self.add_builtin_program(program_name, program_id, builtin_function) } diff --git a/program-test/tests/panic.rs b/program-test/tests/panic.rs new file mode 100644 index 00000000000000..de8e74b3d40902 --- /dev/null +++ b/program-test/tests/panic.rs @@ -0,0 +1,42 @@ +use { + solana_program_test::{processor, ProgramTest}, + solana_sdk::{ + account_info::AccountInfo, + entrypoint::ProgramResult, + instruction::{Instruction, InstructionError}, + pubkey::Pubkey, + signature::Signer, + transaction::{Transaction, TransactionError}, + }, +}; + +fn panic(_program_id: &Pubkey, _accounts: &[AccountInfo], _input: &[u8]) -> ProgramResult { + panic!("I panicked"); +} + +#[tokio::test] +async fn panic_test() { + let program_id = Pubkey::new_unique(); + + let program_test = ProgramTest::new("panic", program_id, processor!(panic)); + + let context = program_test.start_with_context().await; + + let instruction = Instruction::new_with_bytes(program_id, &[], vec![]); + + let transaction = Transaction::new_signed_with_payer( + &[instruction], + Some(&context.payer.pubkey()), + &[&context.payer], + context.last_blockhash, + ); + assert_eq!( + context + .banks_client + .process_transaction(transaction) + .await + .unwrap_err() + .unwrap(), + TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete) + ); +} From 106d4cf399523079f9b214a635142ade99358047 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Fri, 23 Aug 2024 17:47:50 -0300 Subject: [PATCH 209/529] Remove `slot_epoch` from SVM (#2720) --- program-runtime/src/loaded_programs.rs | 5 ----- runtime/src/bank_forks.rs | 9 +-------- svm/tests/mock_bank.rs | 6 +----- 3 files changed, 2 insertions(+), 18 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 89f0ef0d5304a0..db5bb8224bff4f 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -55,11 +55,6 @@ pub enum BlockRelation { pub trait ForkGraph { /// Returns the BlockRelation of A to B fn relationship(&self, a: Slot, b: Slot) -> BlockRelation; - - /// Returns the epoch of the given slot - fn slot_epoch(&self, _slot: Slot) -> Option { - Some(0) - } } /// The owner of a programs accounts, thus the loader of a program diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 884fdddcfa616e..aa3d78ea128ecd 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -13,10 +13,7 @@ use { log::*, solana_measure::measure::Measure, solana_program_runtime::loaded_programs::{BlockRelation, ForkGraph}, - solana_sdk::{ - clock::{Epoch, Slot}, - hash::Hash, - }, + solana_sdk::{clock::Slot, hash::Hash}, std::{ collections::{hash_map::Entry, HashMap, HashSet}, ops::Index, @@ -721,10 +718,6 @@ impl ForkGraph for BankForks { }) .unwrap_or(BlockRelation::Unknown) } - - fn slot_epoch(&self, slot: Slot) -> Option { - self.banks.get(&slot).map(|bank| bank.epoch()) - } } #[cfg(test)] diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 169ac63cf8854b..99e9b9162067f2 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -17,7 +17,7 @@ use { solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, - clock::{Clock, Epoch, UnixTimestamp}, + clock::{Clock, UnixTimestamp}, feature_set::FeatureSet, native_loader, pubkey::Pubkey, @@ -49,10 +49,6 @@ impl ForkGraph for MockForkGraph { Ordering::Greater => BlockRelation::Descendant, } } - - fn slot_epoch(&self, _slot: Slot) -> Option { - Some(0) - } } #[derive(Default, Clone)] From d109065545fb34970537334e82d8addee4d18959 Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 23 Aug 2024 14:14:20 -0700 Subject: [PATCH 210/529] SVM: add new `solana-svm-rent-collector` crate (#2688) --- Cargo.lock | 7 + Cargo.toml | 2 + svm-rent-collector/Cargo.toml | 13 + svm-rent-collector/src/lib.rs | 6 + svm-rent-collector/src/rent_state.rs | 15 ++ svm-rent-collector/src/svm_rent_collector.rs | 137 ++++++++++ .../src/svm_rent_collector/rent_collector.rs | 255 ++++++++++++++++++ 7 files changed, 435 insertions(+) create mode 100644 svm-rent-collector/Cargo.toml create mode 100644 svm-rent-collector/src/lib.rs create mode 100644 svm-rent-collector/src/rent_state.rs create mode 100644 svm-rent-collector/src/svm_rent_collector.rs create mode 100644 svm-rent-collector/src/svm_rent_collector/rent_collector.rs diff --git a/Cargo.lock b/Cargo.lock index cfe5f83e20e55d..5b12b805c98838 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7747,6 +7747,13 @@ dependencies = [ "termcolor", ] +[[package]] +name = "solana-svm-rent-collector" +version = "2.1.0" +dependencies = [ + "solana-sdk", +] + [[package]] name = "solana-svm-transaction" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 2a772d3ee85d3d..ae3e93fd024d12 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,6 +119,7 @@ members = [ "streamer", "svm", "svm-conformance", + "svm-rent-collector", "svm-transaction", "svm/examples/paytube", "test-validator", @@ -434,6 +435,7 @@ solana-streamer = { path = "streamer", version = "=2.1.0" } solana-svm = { path = "svm", version = "=2.1.0" } solana-svm-conformance = { path = "svm-conformance", version = "=2.1.0" } solana-svm-example-paytube = { path = "svm/examples/paytube", version = "=2.1.0" } +solana-svm-rent-collector = { path = "svm-rent-collector", version = "=2.1.0" } solana-svm-transaction = { path = "svm-transaction", version = "=2.1.0" } solana-system-program = { path = "programs/system", version = "=2.1.0" } solana-test-validator = { path = "test-validator", version = "=2.1.0" } diff --git a/svm-rent-collector/Cargo.toml b/svm-rent-collector/Cargo.toml new file mode 100644 index 00000000000000..426f06593a3eac --- /dev/null +++ b/svm-rent-collector/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "solana-svm-rent-collector" +description = "Solana SVM Rent Collector" +documentation = "https://docs.rs/solana-svm-rent-collector" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-sdk = { workspace = true } diff --git a/svm-rent-collector/src/lib.rs b/svm-rent-collector/src/lib.rs new file mode 100644 index 00000000000000..5038410e9e9f6f --- /dev/null +++ b/svm-rent-collector/src/lib.rs @@ -0,0 +1,6 @@ +//! Solana SVM Rent Collector. +//! +//! Rent management for SVM. + +pub mod rent_state; +pub mod svm_rent_collector; diff --git a/svm-rent-collector/src/rent_state.rs b/svm-rent-collector/src/rent_state.rs new file mode 100644 index 00000000000000..f4ec54a0532a24 --- /dev/null +++ b/svm-rent-collector/src/rent_state.rs @@ -0,0 +1,15 @@ +//! Account rent state. + +/// Rent state of a Solana account. +#[derive(Debug, PartialEq, Eq)] +pub enum RentState { + /// account.lamports == 0 + Uninitialized, + /// 0 < account.lamports < rent-exempt-minimum + RentPaying { + lamports: u64, // account.lamports() + data_size: usize, // account.data().len() + }, + /// account.lamports >= rent-exempt-minimum + RentExempt, +} diff --git a/svm-rent-collector/src/svm_rent_collector.rs b/svm-rent-collector/src/svm_rent_collector.rs new file mode 100644 index 00000000000000..6decf8fbe6a36c --- /dev/null +++ b/svm-rent-collector/src/svm_rent_collector.rs @@ -0,0 +1,137 @@ +//! Plugin trait for rent collection within the Solana SVM. + +use { + crate::rent_state::RentState, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + clock::Epoch, + pubkey::Pubkey, + rent::{Rent, RentDue}, + rent_collector::CollectedInfo, + transaction::{Result, TransactionError}, + transaction_context::{IndexOfAccount, TransactionContext}, + }, +}; + +mod rent_collector; + +/// Rent collector trait. Represents an entity that can evaluate the rent state +/// of an account, determine rent due, and collect rent. +/// +/// Implementors are responsible for evaluating rent due and collecting rent +/// from accounts, if required. Methods for evaluating account rent state have +/// default implementations, which can be overridden for customized rent +/// management. +pub trait SVMRentCollector { + /// Check rent state transition for an account in a transaction. + /// + /// This method has a default implementation that calls into + /// `check_rent_state_with_account`. + fn check_rent_state( + &self, + pre_rent_state: Option<&RentState>, + post_rent_state: Option<&RentState>, + transaction_context: &TransactionContext, + index: IndexOfAccount, + ) -> Result<()> { + if let Some((pre_rent_state, post_rent_state)) = pre_rent_state.zip(post_rent_state) { + let expect_msg = + "account must exist at TransactionContext index if rent-states are Some"; + self.check_rent_state_with_account( + pre_rent_state, + post_rent_state, + transaction_context + .get_key_of_account_at_index(index) + .expect(expect_msg), + &transaction_context + .get_account_at_index(index) + .expect(expect_msg) + .borrow(), + index, + )?; + } + Ok(()) + } + + /// Check rent state transition for an account directly. + /// + /// This method has a default implementation that checks whether the + /// transition is allowed and returns an error if it is not. It also + /// verifies that the account is not the incinerator. + fn check_rent_state_with_account( + &self, + pre_rent_state: &RentState, + post_rent_state: &RentState, + address: &Pubkey, + _account_state: &AccountSharedData, + account_index: IndexOfAccount, + ) -> Result<()> { + if !solana_sdk::incinerator::check_id(address) + && !self.transition_allowed(pre_rent_state, post_rent_state) + { + let account_index = account_index as u8; + Err(TransactionError::InsufficientFundsForRent { account_index }) + } else { + Ok(()) + } + } + + /// Collect rent from an account. + fn collect_rent(&self, address: &Pubkey, account: &mut AccountSharedData) -> CollectedInfo; + + /// Determine the rent state of an account. + /// + /// This method has a default implementation that treats accounts with zero + /// lamports as uninitialized and uses the implemented `get_rent` to + /// determine whether an account is rent-exempt. + fn get_account_rent_state(&self, account: &AccountSharedData) -> RentState { + if account.lamports() == 0 { + RentState::Uninitialized + } else if self + .get_rent() + .is_exempt(account.lamports(), account.data().len()) + { + RentState::RentExempt + } else { + RentState::RentPaying { + data_size: account.data().len(), + lamports: account.lamports(), + } + } + } + + /// Get the rent collector's rent instance. + fn get_rent(&self) -> &Rent; + + /// Get the rent due for an account. + fn get_rent_due(&self, lamports: u64, data_len: usize, account_rent_epoch: Epoch) -> RentDue; + + /// Check whether a transition from the pre_rent_state to the + /// post_rent_state is valid. + /// + /// This method has a default implementation that allows transitions from + /// any state to `RentState::Uninitialized` or `RentState::RentExempt`. + /// Pre-state `RentState::RentPaying` can only transition to + /// `RentState::RentPaying` if the data size remains the same and the + /// account is not credited. + fn transition_allowed(&self, pre_rent_state: &RentState, post_rent_state: &RentState) -> bool { + match post_rent_state { + RentState::Uninitialized | RentState::RentExempt => true, + RentState::RentPaying { + data_size: post_data_size, + lamports: post_lamports, + } => { + match pre_rent_state { + RentState::Uninitialized | RentState::RentExempt => false, + RentState::RentPaying { + data_size: pre_data_size, + lamports: pre_lamports, + } => { + // Cannot remain RentPaying if resized or credited. + post_data_size == pre_data_size && post_lamports <= pre_lamports + } + } + } + } + } +} diff --git a/svm-rent-collector/src/svm_rent_collector/rent_collector.rs b/svm-rent-collector/src/svm_rent_collector/rent_collector.rs new file mode 100644 index 00000000000000..610bb4c63f1ca5 --- /dev/null +++ b/svm-rent-collector/src/svm_rent_collector/rent_collector.rs @@ -0,0 +1,255 @@ +//! Implementation of `SVMRentCollector` for `RentCollector` from the Solana +//! SDK. + +use { + crate::svm_rent_collector::SVMRentCollector, + solana_sdk::{ + account::AccountSharedData, + clock::Epoch, + pubkey::Pubkey, + rent::{Rent, RentDue}, + rent_collector::{CollectedInfo, RentCollector}, + }, +}; + +impl SVMRentCollector for RentCollector { + fn collect_rent(&self, address: &Pubkey, account: &mut AccountSharedData) -> CollectedInfo { + self.collect_from_existing_account(address, account) + } + + fn get_rent(&self) -> &Rent { + &self.rent + } + + fn get_rent_due(&self, lamports: u64, data_len: usize, account_rent_epoch: Epoch) -> RentDue { + self.get_rent_due(lamports, data_len, account_rent_epoch) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::rent_state::RentState, + solana_sdk::{ + account::ReadableAccount, + clock::Epoch, + epoch_schedule::EpochSchedule, + pubkey::Pubkey, + transaction::TransactionError, + transaction_context::{IndexOfAccount, TransactionContext}, + }, + }; + + #[test] + fn test_get_account_rent_state() { + let program_id = Pubkey::new_unique(); + let uninitialized_account = AccountSharedData::new(0, 0, &Pubkey::default()); + + let account_data_size = 100; + + let rent_collector = RentCollector::new( + Epoch::default(), + EpochSchedule::default(), + 0.0, + Rent::free(), + ); + + let rent_exempt_account = AccountSharedData::new(1, account_data_size, &program_id); // if rent is free, all accounts with non-zero lamports and non-empty data are rent-exempt + + assert_eq!( + rent_collector.get_account_rent_state(&uninitialized_account), + RentState::Uninitialized + ); + assert_eq!( + rent_collector.get_account_rent_state(&rent_exempt_account), + RentState::RentExempt + ); + + let rent = Rent::default(); + let rent_minimum_balance = rent.minimum_balance(account_data_size); + let rent_paying_account = AccountSharedData::new( + rent_minimum_balance.saturating_sub(1), + account_data_size, + &program_id, + ); + let rent_exempt_account = AccountSharedData::new( + rent.minimum_balance(account_data_size), + account_data_size, + &program_id, + ); + let rent_collector = + RentCollector::new(Epoch::default(), EpochSchedule::default(), 0.0, rent); + + assert_eq!( + rent_collector.get_account_rent_state(&uninitialized_account), + RentState::Uninitialized + ); + assert_eq!( + rent_collector.get_account_rent_state(&rent_paying_account), + RentState::RentPaying { + data_size: account_data_size, + lamports: rent_paying_account.lamports(), + } + ); + assert_eq!( + rent_collector.get_account_rent_state(&rent_exempt_account), + RentState::RentExempt + ); + } + + #[test] + fn test_transition_allowed() { + let rent_collector = RentCollector::default(); + + let post_rent_state = RentState::Uninitialized; + assert!(rent_collector.transition_allowed(&RentState::Uninitialized, &post_rent_state)); + assert!(rent_collector.transition_allowed(&RentState::RentExempt, &post_rent_state)); + assert!(rent_collector.transition_allowed( + &RentState::RentPaying { + data_size: 0, + lamports: 1, + }, + &post_rent_state + )); + + let post_rent_state = RentState::RentExempt; + assert!(rent_collector.transition_allowed(&RentState::Uninitialized, &post_rent_state)); + assert!(rent_collector.transition_allowed(&RentState::RentExempt, &post_rent_state)); + assert!(rent_collector.transition_allowed( + &RentState::RentPaying { + data_size: 0, + lamports: 1, + }, + &post_rent_state + )); + + let post_rent_state = RentState::RentPaying { + data_size: 2, + lamports: 5, + }; + + // These transitions are not allowed. + assert!(!rent_collector.transition_allowed(&RentState::Uninitialized, &post_rent_state)); + assert!(!rent_collector.transition_allowed(&RentState::RentExempt, &post_rent_state)); + + // Transition is not allowed if data size changes. + assert!(!rent_collector.transition_allowed( + &RentState::RentPaying { + data_size: 3, + lamports: 5, + }, + &post_rent_state + )); + assert!(!rent_collector.transition_allowed( + &RentState::RentPaying { + data_size: 1, + lamports: 5, + }, + &post_rent_state + )); + + // Transition is always allowed if there is no account data resize or + // change in account's lamports. + assert!(rent_collector.transition_allowed( + &RentState::RentPaying { + data_size: 2, + lamports: 5, + }, + &post_rent_state + )); + // Transition is always allowed if there is no account data resize and + // account's lamports is reduced. + assert!(rent_collector.transition_allowed( + &RentState::RentPaying { + data_size: 2, + lamports: 7, + }, + &post_rent_state + )); + // Transition is not allowed if the account is credited with more + // lamports and remains rent-paying. + assert!(!rent_collector.transition_allowed( + &RentState::RentPaying { + data_size: 2, + lamports: 3, + }, + &post_rent_state + )); + } + + #[test] + fn test_check_rent_state_with_account() { + let rent_collector = RentCollector::default(); + + let pre_rent_state = RentState::RentPaying { + data_size: 2, + lamports: 3, + }; + + let post_rent_state = RentState::RentPaying { + data_size: 2, + lamports: 5, + }; + let account_index = 2 as IndexOfAccount; + let key = Pubkey::new_unique(); + let result = rent_collector.check_rent_state_with_account( + &pre_rent_state, + &post_rent_state, + &key, + &AccountSharedData::default(), + account_index, + ); + assert_eq!( + result.err(), + Some(TransactionError::InsufficientFundsForRent { + account_index: account_index as u8 + }) + ); + + let result = rent_collector.check_rent_state_with_account( + &pre_rent_state, + &post_rent_state, + &solana_sdk::incinerator::id(), + &AccountSharedData::default(), + account_index, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_check_rent_state() { + let rent_collector = RentCollector::default(); + + let context = TransactionContext::new( + vec![(Pubkey::new_unique(), AccountSharedData::default())], + Rent::default(), + 20, + 20, + ); + + let pre_rent_state = RentState::RentPaying { + data_size: 2, + lamports: 3, + }; + + let post_rent_state = RentState::RentPaying { + data_size: 2, + lamports: 5, + }; + + let result = rent_collector.check_rent_state( + Some(&pre_rent_state), + Some(&post_rent_state), + &context, + 0, + ); + assert_eq!( + result.err(), + Some(TransactionError::InsufficientFundsForRent { account_index: 0 }) + ); + + let result = rent_collector.check_rent_state(None, Some(&post_rent_state), &context, 0); + assert!(result.is_ok()); + } +} From 82aeef812c484933947694f21dbe45b4f0505d81 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 23 Aug 2024 14:20:32 -0700 Subject: [PATCH 211/529] wen_restart: Fix the epoch_stakes used in calculation. (#2376) * wen_restart: Fix the epoch_stakes used in calculation. * Fix a bad merge. * Remove EpochStakesCache, it only caches epoch stakes from root_bank, better to just keep root_bank around. * Split aggregate into smaller functions. * Switch to node_id_to_stake which is simpler. * Rename update_slots_stake_map and switch to epoch_total_stake(). * Remove unnecessary utility functions. * Do not modify epoch_info_vec, just init it with two epochs we will consider. * Switch to epoch_node_id_to_stake() * Add test for divergence at Epoch boundary. * Make linter happy. * - wait for the new Epoch if > 1/3 of the validators voted for some slot in the new Epoch - switch to voted_percent and voted_for_this_epoch_percent * Fix a bad merge. * Fix a bad merge. * Change constant format. * Do not loop through the whole table. * Address reviewer feedback. * Address reviewer comments. --- Cargo.lock | 1 + wen-restart/Cargo.toml | 1 + wen-restart/proto/wen_restart.proto | 9 +- wen-restart/src/heaviest_fork_aggregate.rs | 4 +- .../src/last_voted_fork_slots_aggregate.rs | 524 +++++++++++++----- wen-restart/src/wen_restart.rs | 418 ++++++++++++-- 6 files changed, 780 insertions(+), 177 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5b12b805c98838..e42722486c837d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8152,6 +8152,7 @@ dependencies = [ "solana-sdk", "solana-streamer", "solana-timings", + "solana-vote", "solana-vote-program", "tempfile", ] diff --git a/wen-restart/Cargo.toml b/wen-restart/Cargo.toml index f5fbf99a1e20c8..ff755c75331124 100644 --- a/wen-restart/Cargo.toml +++ b/wen-restart/Cargo.toml @@ -34,6 +34,7 @@ solana-entry = { workspace = true } solana-logger = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-streamer = { workspace = true } +solana-vote = { workspace = true } tempfile = { workspace = true } [build-dependencies] diff --git a/wen-restart/proto/wen_restart.proto b/wen-restart/proto/wen_restart.proto index e3fc0743ef5dc8..856e7df9ef114a 100644 --- a/wen-restart/proto/wen_restart.proto +++ b/wen-restart/proto/wen_restart.proto @@ -21,9 +21,16 @@ message LastVotedForkSlotsAggregateRecord { optional LastVotedForkSlotsAggregateFinal final_result = 2; } +message LastVotedForkSlotsEpochInfoRecord { + uint64 epoch = 1; + uint64 total_stake = 2; + uint64 actively_voting_stake = 3; + uint64 actively_voting_for_this_epoch_stake = 4; +} + message LastVotedForkSlotsAggregateFinal { map slots_stake_map = 1; - uint64 total_active_stake = 2; + repeated LastVotedForkSlotsEpochInfoRecord epoch_infos = 2; } message HeaviestForkRecord { diff --git a/wen-restart/src/heaviest_fork_aggregate.rs b/wen-restart/src/heaviest_fork_aggregate.rs index dac13bd8274568..d5e454b6eeebe1 100644 --- a/wen-restart/src/heaviest_fork_aggregate.rs +++ b/wen-restart/src/heaviest_fork_aggregate.rs @@ -15,7 +15,8 @@ pub(crate) struct HeaviestForkAggregate { supermajority_threshold: f64, my_shred_version: u16, my_pubkey: Pubkey, - // TODO(wen): using local root's EpochStakes, need to fix if crossing Epoch boundary. + // We use the epoch_stakes of the Epoch our heaviest bank is in. Proceed and exit only if + // enough validator agree with me. epoch_stakes: EpochStakes, heaviest_forks: HashMap, block_stake_map: HashMap<(Slot, Hash), u64>, @@ -171,7 +172,6 @@ impl HeaviestForkAggregate { Some(record) } - // TODO(wen): use better epoch stake and add a test later. pub(crate) fn total_active_stake(&self) -> u64 { self.active_peers.iter().fold(0, |sum: u64, pubkey| { sum.saturating_add(self.epoch_stakes.node_id_to_stake(pubkey).unwrap_or(0)) diff --git a/wen-restart/src/last_voted_fork_slots_aggregate.rs b/wen-restart/src/last_voted_fork_slots_aggregate.rs index 67cd7c1c77ea87..f680dc73238156 100644 --- a/wen-restart/src/last_voted_fork_slots_aggregate.rs +++ b/wen-restart/src/last_voted_fork_slots_aggregate.rs @@ -3,69 +3,121 @@ use { anyhow::Result, log::*, solana_gossip::restart_crds_values::RestartLastVotedForkSlots, - solana_runtime::epoch_stakes::EpochStakes, - solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}, + solana_runtime::bank::Bank, + solana_sdk::{ + clock::{Epoch, Slot}, + hash::Hash, + pubkey::Pubkey, + }, std::{ - collections::{HashMap, HashSet}, + collections::{BTreeSet, HashMap, HashSet}, str::FromStr, + sync::Arc, }, }; +// If at least 1/3 of the stake has voted for a slot in next Epoch, we think +// the cluster's clock is in sync and everyone will enter the new Epoch soon. +// So we require that we have >80% stake in the new Epoch to exit. +const EPOCH_CONSIDERED_FOR_EXIT_THRESHOLD: f64 = 1f64 / 3f64; + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct LastVotedForkSlotsEpochInfo { + pub epoch: Epoch, + pub total_stake: u64, + // Total stake of active peers in this epoch, no matter they voted for a slot + // in this epoch or not. + pub actively_voting_stake: u64, + // Total stake of active peers which has voted for a slot in this epoch. + pub actively_voting_for_this_epoch_stake: u64, +} + pub(crate) struct LastVotedForkSlotsAggregate { - root_slot: Slot, - repair_threshold: f64, - // TODO(wen): using local root's EpochStakes, need to fix if crossing Epoch boundary. - epoch_stakes: EpochStakes, + // Map each peer pubkey to the epoch of its last vote. + node_to_last_vote_epoch_map: HashMap, + epoch_info_vec: Vec, last_voted_fork_slots: HashMap, - slots_stake_map: HashMap, - active_peers: HashSet, - slots_to_repair: HashSet, my_pubkey: Pubkey, + repair_threshold: f64, + root_bank: Arc, + slots_stake_map: HashMap, + slots_to_repair: BTreeSet, } #[derive(Clone, Debug, PartialEq)] pub struct LastVotedForkSlotsFinalResult { pub slots_stake_map: HashMap, - pub total_active_stake: u64, + pub epoch_info_vec: Vec, } impl LastVotedForkSlotsAggregate { pub(crate) fn new( - root_slot: Slot, + root_bank: Arc, repair_threshold: f64, - epoch_stakes: &EpochStakes, - last_voted_fork_slots: &Vec, + my_last_voted_fork_slots: &Vec, my_pubkey: &Pubkey, ) -> Self { - let mut active_peers = HashSet::new(); - let sender_stake = Self::validator_stake(epoch_stakes, my_pubkey); - active_peers.insert(*my_pubkey); let mut slots_stake_map = HashMap::new(); - for slot in last_voted_fork_slots { + let root_slot = root_bank.slot(); + let root_epoch = root_bank.epoch(); + for slot in my_last_voted_fork_slots { if slot >= &root_slot { - slots_stake_map.insert(*slot, sender_stake); + let epoch = root_bank.epoch_schedule().get_epoch(*slot); + if let Some(sender_stake) = root_bank.epoch_node_id_to_stake(epoch, my_pubkey) { + slots_stake_map.insert(*slot, sender_stake); + } else { + warn!("The root bank {root_slot} does not have the stake for slot {slot}"); + } } } + + let my_last_vote_epoch = root_bank + .get_epoch_and_slot_index( + *my_last_voted_fork_slots + .iter() + .max() + .expect("my voted slots should not be empty"), + ) + .0; + let mut node_to_last_vote_epoch_map = HashMap::new(); + node_to_last_vote_epoch_map.insert(*my_pubkey, my_last_vote_epoch); + // We would only consider slots in root_epoch and the next epoch. + let epoch_info_vec: Vec = (root_epoch + ..root_epoch + .checked_add(2) + .expect("root_epoch should not be so big")) + .map(|epoch| { + let total_stake = root_bank + .epoch_total_stake(epoch) + .expect("epoch stake not found"); + let my_stake = root_bank + .epoch_node_id_to_stake(epoch, my_pubkey) + .unwrap_or(0); + let actively_voting_for_this_epoch_stake = if epoch <= my_last_vote_epoch { + my_stake + } else { + 0 + }; + LastVotedForkSlotsEpochInfo { + epoch, + total_stake, + actively_voting_stake: my_stake, + actively_voting_for_this_epoch_stake, + } + }) + .collect(); Self { - root_slot, - repair_threshold, - epoch_stakes: epoch_stakes.clone(), + node_to_last_vote_epoch_map, + epoch_info_vec, last_voted_fork_slots: HashMap::new(), - slots_stake_map, - active_peers, - slots_to_repair: HashSet::new(), my_pubkey: *my_pubkey, + repair_threshold, + root_bank, + slots_stake_map, + slots_to_repair: BTreeSet::new(), } } - fn validator_stake(epoch_stakes: &EpochStakes, pubkey: &Pubkey) -> u64 { - epoch_stakes - .node_id_to_vote_accounts() - .get(pubkey) - .map(|x| x.total_stake) - .unwrap_or_default() - } - pub(crate) fn aggregate_from_record( &mut self, key_string: &str, @@ -90,80 +142,137 @@ impl LastVotedForkSlotsAggregate { &mut self, new_slots: RestartLastVotedForkSlots, ) -> Option { - let total_stake = self.epoch_stakes.total_stake(); - let threshold_stake = (total_stake as f64 * self.repair_threshold) as u64; let from = &new_slots.from; if from == &self.my_pubkey { return None; } - let sender_stake = Self::validator_stake(&self.epoch_stakes, from); - if sender_stake == 0 { - warn!( - "Gossip should not accept zero-stake RestartLastVotedFork from {:?}", - from - ); + let root_slot = self.root_bank.slot(); + let new_slots_vec = new_slots.to_slots(root_slot); + if new_slots_vec.is_empty() { return None; } - self.active_peers.insert(*from); - let new_slots_vec = new_slots.to_slots(self.root_slot); - let record = LastVotedForkSlotsRecord { - last_voted_fork_slots: new_slots_vec.clone(), + let last_vote_epoch = self + .root_bank + .get_epoch_and_slot_index(*new_slots_vec.last().unwrap()) + .0; + let old_last_vote_epoch = self + .node_to_last_vote_epoch_map + .insert(*from, last_vote_epoch); + if old_last_vote_epoch != Some(last_vote_epoch) { + self.update_epoch_info(from, last_vote_epoch, old_last_vote_epoch); + } + if self.update_and_check_if_message_already_saved(new_slots.clone(), new_slots_vec.clone()) + { + return None; + } + Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: new_slots_vec, last_vote_bankhash: new_slots.last_voted_hash.to_string(), shred_version: new_slots.shred_version as u32, wallclock: new_slots.wallclock, - }; + }) + } + + // Return true if the message has already been saved, so we can skip the rest of the processing. + fn update_and_check_if_message_already_saved( + &mut self, + new_slots: RestartLastVotedForkSlots, + new_slots_vec: Vec, + ) -> bool { + let from = &new_slots.from; let new_slots_set: HashSet = HashSet::from_iter(new_slots_vec); let old_slots_set = match self.last_voted_fork_slots.insert(*from, new_slots.clone()) { Some(old_slots) => { if old_slots == new_slots { - return None; + return true; } else { - HashSet::from_iter(old_slots.to_slots(self.root_slot)) + HashSet::from_iter(old_slots.to_slots(self.root_bank.slot())) } } None => HashSet::new(), }; for slot in old_slots_set.difference(&new_slots_set) { + let epoch = self.root_bank.epoch_schedule().get_epoch(*slot); let entry = self.slots_stake_map.get_mut(slot).unwrap(); - *entry = entry.saturating_sub(sender_stake); - if *entry < threshold_stake { - self.slots_to_repair.remove(slot); + if let Some(sender_stake) = self.root_bank.epoch_node_id_to_stake(epoch, from) { + *entry = entry.saturating_sub(sender_stake); + let repair_threshold_stake = (self.root_bank.epoch_total_stake(epoch).unwrap() + as f64 + * self.repair_threshold) as u64; + if *entry < repair_threshold_stake { + self.slots_to_repair.remove(slot); + } } } for slot in new_slots_set.difference(&old_slots_set) { + let epoch = self.root_bank.epoch_schedule().get_epoch(*slot); let entry = self.slots_stake_map.entry(*slot).or_insert(0); - *entry = entry.saturating_add(sender_stake); - if *entry >= threshold_stake { - self.slots_to_repair.insert(*slot); + if let Some(sender_stake) = self.root_bank.epoch_node_id_to_stake(epoch, from) { + *entry = entry.saturating_add(sender_stake); + let repair_threshold_stake = (self.root_bank.epoch_total_stake(epoch).unwrap() + as f64 + * self.repair_threshold) as u64; + if *entry >= repair_threshold_stake { + self.slots_to_repair.insert(*slot); + } } } - Some(record) + false } - pub(crate) fn active_percent(&self) -> f64 { - let total_stake = self.epoch_stakes.total_stake(); - let total_active_stake = self.active_peers.iter().fold(0, |sum: u64, pubkey| { - sum.saturating_add(Self::validator_stake(&self.epoch_stakes, pubkey)) - }); - total_active_stake as f64 / total_stake as f64 * 100.0 + fn update_epoch_info( + &mut self, + from: &Pubkey, + last_vote_epoch: Epoch, + old_last_vote_epoch: Option, + ) { + if Some(last_vote_epoch) < old_last_vote_epoch { + // We only have two entries so old epoch must be the second one. + let entry = self.epoch_info_vec.last_mut().unwrap(); + if let Some(stake) = self.root_bank.epoch_node_id_to_stake(entry.epoch, from) { + entry.actively_voting_for_this_epoch_stake = entry + .actively_voting_for_this_epoch_stake + .checked_sub(stake) + .unwrap(); + } + } else { + for entry in self.epoch_info_vec.iter_mut() { + if let Some(stake) = self.root_bank.epoch_node_id_to_stake(entry.epoch, from) { + if old_last_vote_epoch.is_none() { + entry.actively_voting_stake = + entry.actively_voting_stake.checked_add(stake).unwrap(); + } + if Some(entry.epoch) > old_last_vote_epoch && entry.epoch <= last_vote_epoch { + entry.actively_voting_for_this_epoch_stake = entry + .actively_voting_for_this_epoch_stake + .checked_add(stake) + .unwrap(); + } + } + } + } } - pub(crate) fn slots_to_repair_iter(&self) -> impl Iterator { - self.slots_to_repair.iter() + pub(crate) fn min_active_percent(&self) -> f64 { + self.epoch_info_vec + .iter() + .filter(|info| { + info.actively_voting_for_this_epoch_stake as f64 / info.total_stake as f64 + > EPOCH_CONSIDERED_FOR_EXIT_THRESHOLD + }) + .map(|info| info.actively_voting_stake as f64 / info.total_stake as f64 * 100.0) + .min_by(|a, b| a.partial_cmp(b).unwrap()) + .unwrap_or(0.0) } - // TODO(wen): use better epoch stake and add a test later. - fn total_active_stake(&self) -> u64 { - self.active_peers.iter().fold(0, |sum: u64, pubkey| { - sum.saturating_add(Self::validator_stake(&self.epoch_stakes, pubkey)) - }) + pub(crate) fn slots_to_repair_iter(&self) -> impl Iterator { + self.slots_to_repair.iter() } pub(crate) fn get_final_result(self) -> LastVotedForkSlotsFinalResult { - let total_active_stake = self.total_active_stake(); LastVotedForkSlotsFinalResult { slots_stake_map: self.slots_stake_map, - total_active_stake, + epoch_info_vec: self.epoch_info_vec, } } } @@ -172,18 +281,20 @@ impl LastVotedForkSlotsAggregate { mod tests { use { crate::{ - last_voted_fork_slots_aggregate::LastVotedForkSlotsAggregate, - solana::wen_restart_proto::LastVotedForkSlotsRecord, + last_voted_fork_slots_aggregate::*, solana::wen_restart_proto::LastVotedForkSlotsRecord, }, solana_gossip::restart_crds_values::RestartLastVotedForkSlots, - solana_program::{clock::Slot, pubkey::Pubkey}, + solana_program::clock::Slot, solana_runtime::{ bank::Bank, + epoch_stakes::EpochStakes, genesis_utils::{ create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, }, }, solana_sdk::{hash::Hash, signature::Signer, timing::timestamp}, + solana_vote::vote_account::VoteAccount, + solana_vote_program::vote_state::create_account_with_authorized, }; const TOTAL_VALIDATOR_COUNT: u16 = 10; @@ -218,9 +329,8 @@ mod tests { ]; TestAggregateInitResult { slots_aggregate: LastVotedForkSlotsAggregate::new( - root_slot, + root_bank, REPAIR_THRESHOLD, - root_bank.epoch_stakes(root_bank.epoch()).unwrap(), &last_voted_fork_slots, &validator_voting_keypairs[MY_INDEX].node_keypair.pubkey(), ), @@ -234,7 +344,9 @@ mod tests { fn test_aggregate() { let mut test_state = test_aggregate_init(); let root_slot = test_state.root_slot; - let initial_num_active_validators = 3; + // Until 33% stake vote, the percentage should be 0. + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); + let initial_num_active_validators = 2; for validator_voting_keypair in test_state .validator_voting_keypairs .iter() @@ -261,9 +373,44 @@ mod tests { }), ); } + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); + assert!(test_state + .slots_aggregate + .slots_to_repair_iter() + .next() + .is_none()); + + // Now add one more validator, min_active_percent should be 40% but repair + // is still empty (< 42%). + let new_active_validator = test_state.validator_voting_keypairs + [initial_num_active_validators] + .node_keypair + .pubkey(); + let now = timestamp(); + let new_active_validator_last_voted_slots = RestartLastVotedForkSlots::new( + new_active_validator, + now, + &test_state.last_voted_fork_slots, + Hash::default(), + SHRED_VERSION, + ) + .unwrap(); + assert_eq!( + test_state + .slots_aggregate + .aggregate(new_active_validator_last_voted_slots), + Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: Hash::default().to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: now, + }), + ); + let expected_active_percent = + (initial_num_active_validators + 2) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0; assert_eq!( - test_state.slots_aggregate.active_percent(), - (initial_num_active_validators + 1) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0 + test_state.slots_aggregate.min_active_percent(), + expected_active_percent ); assert!(test_state .slots_aggregate @@ -271,6 +418,7 @@ mod tests { .next() .is_none()); + // Add one more validator, then repair is > 42% and no longer empty. let new_active_validator = test_state.validator_voting_keypairs [initial_num_active_validators + 1] .node_keypair @@ -296,9 +444,9 @@ mod tests { }), ); let expected_active_percent = - (initial_num_active_validators + 2) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0; + (initial_num_active_validators + 3) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0; assert_eq!( - test_state.slots_aggregate.active_percent(), + test_state.slots_aggregate.min_active_percent(), expected_active_percent ); let mut actual_slots = @@ -306,7 +454,8 @@ mod tests { actual_slots.sort(); assert_eq!(actual_slots, test_state.last_voted_fork_slots); - let replace_message_validator = test_state.validator_voting_keypairs[2] + let replace_message_validator = test_state.validator_voting_keypairs + [initial_num_active_validators] .node_keypair .pubkey(); // Allow specific validator to replace message. @@ -331,31 +480,7 @@ mod tests { }), ); assert_eq!( - test_state.slots_aggregate.active_percent(), - expected_active_percent - ); - let mut actual_slots = - Vec::from_iter(test_state.slots_aggregate.slots_to_repair_iter().cloned()); - actual_slots.sort(); - assert_eq!(actual_slots, vec![root_slot + 1]); - - // test that zero stake validator is ignored. - let random_pubkey = Pubkey::new_unique(); - assert_eq!( - test_state.slots_aggregate.aggregate( - RestartLastVotedForkSlots::new( - random_pubkey, - timestamp(), - &[root_slot + 1, root_slot + 4, root_slot + 5], - Hash::default(), - SHRED_VERSION, - ) - .unwrap(), - ), - None, - ); - assert_eq!( - test_state.slots_aggregate.active_percent(), + test_state.slots_aggregate.min_active_percent(), expected_active_percent ); let mut actual_slots = @@ -379,6 +504,35 @@ mod tests { ), None, ); + + assert_eq!( + test_state.slots_aggregate.get_final_result(), + LastVotedForkSlotsFinalResult { + slots_stake_map: vec![ + (root_slot + 1, 500), + (root_slot + 2, 400), + (root_slot + 3, 400), + (root_slot + 4, 100), + (root_slot + 5, 100), + ] + .into_iter() + .collect(), + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 500, + actively_voting_for_this_epoch_stake: 500, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 500, + actively_voting_for_this_epoch_stake: 0, + } + ], + }, + ); } #[test] @@ -393,7 +547,7 @@ mod tests { last_vote_bankhash: last_vote_bankhash.to_string(), shred_version: SHRED_VERSION as u32, }; - assert_eq!(test_state.slots_aggregate.active_percent(), 10.0); + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); assert_eq!( test_state .slots_aggregate @@ -407,7 +561,33 @@ mod tests { .unwrap(), Some(record.clone()), ); - assert_eq!(test_state.slots_aggregate.active_percent(), 20.0); + // Before 33% voted for slot in this epoch, the percentage should be 0. + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); + for i in 1..3 { + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); + let pubkey = test_state.validator_voting_keypairs[i] + .node_keypair + .pubkey(); + let now = timestamp(); + let last_voted_fork_slots = RestartLastVotedForkSlots::new( + pubkey, + now, + &test_state.last_voted_fork_slots, + last_vote_bankhash, + SHRED_VERSION, + ) + .unwrap(); + assert_eq!( + test_state.slots_aggregate.aggregate(last_voted_fork_slots), + Some(LastVotedForkSlotsRecord { + wallclock: now, + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + }), + ); + } + assert_eq!(test_state.slots_aggregate.min_active_percent(), 40.0); // Now if you get the same result from Gossip again, it should be ignored. assert_eq!( test_state.slots_aggregate.aggregate( @@ -451,26 +631,7 @@ mod tests { }), ); // percentage doesn't change since it's a replace. - assert_eq!(test_state.slots_aggregate.active_percent(), 20.0); - - // Record from validator with zero stake should be ignored. - assert_eq!( - test_state - .slots_aggregate - .aggregate_from_record( - &Pubkey::new_unique().to_string(), - &LastVotedForkSlotsRecord { - wallclock: timestamp(), - last_voted_fork_slots: vec![root_slot + 10, root_slot + 300], - last_vote_bankhash: Hash::new_unique().to_string(), - shred_version: SHRED_VERSION as u32, - } - ) - .unwrap(), - None, - ); - // percentage doesn't change since the previous aggregate is ignored. - assert_eq!(test_state.slots_aggregate.active_percent(), 20.0); + assert_eq!(test_state.slots_aggregate.min_active_percent(), 40.0); // Record from my pubkey should be ignored. assert_eq!( @@ -491,6 +652,33 @@ mod tests { .unwrap(), None, ); + assert_eq!( + test_state.slots_aggregate.get_final_result(), + LastVotedForkSlotsFinalResult { + slots_stake_map: vec![ + (root_slot + 1, 400), + (root_slot + 2, 400), + (root_slot + 3, 400), + (root_slot + 4, 100), + ] + .into_iter() + .collect(), + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 400, + actively_voting_for_this_epoch_stake: 400, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 400, + actively_voting_for_this_epoch_stake: 0, + } + ], + }, + ); } #[test] @@ -554,4 +742,84 @@ mod tests { ) .is_err()); } + + #[test] + fn test_aggregate_init_across_epoch() { + let validator_voting_keypairs: Vec<_> = (0..TOTAL_VALIDATOR_COUNT) + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 10_000, + &validator_voting_keypairs, + vec![100; validator_voting_keypairs.len()], + ); + let (_, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let root_bank = bank_forks.read().unwrap().root_bank(); + // Add bank 1 linking directly to 0, tweak its epoch_stakes, and then add it to bank_forks. + let mut new_root_bank = Bank::new_from_parent(root_bank.clone(), &Pubkey::default(), 1); + + // For epoch 1, let our validator have 90% of the stake. + let vote_accounts_hash_map = validator_voting_keypairs + .iter() + .enumerate() + .map(|(i, keypairs)| { + let stake = if i == MY_INDEX { + 900 * (TOTAL_VALIDATOR_COUNT - 1) as u64 + } else { + 100 + }; + let authorized_voter = keypairs.vote_keypair.pubkey(); + let node_id = keypairs.node_keypair.pubkey(); + ( + authorized_voter, + ( + stake, + VoteAccount::try_from(create_account_with_authorized( + &node_id, + &authorized_voter, + &node_id, + 0, + 100, + )) + .unwrap(), + ), + ) + }) + .collect(); + let epoch1_eopch_stakes = EpochStakes::new_for_tests(vote_accounts_hash_map, 1); + new_root_bank.set_epoch_stakes_for_test(1, epoch1_eopch_stakes); + + let last_voted_fork_slots = vec![root_bank.slot() + 1, root_bank.get_slots_in_epoch(0) + 1]; + let slots_aggregate = LastVotedForkSlotsAggregate::new( + Arc::new(new_root_bank), + REPAIR_THRESHOLD, + &last_voted_fork_slots, + &validator_voting_keypairs[MY_INDEX].node_keypair.pubkey(), + ); + assert_eq!( + slots_aggregate.get_final_result(), + LastVotedForkSlotsFinalResult { + slots_stake_map: vec![ + (root_bank.slot() + 1, 100), + (root_bank.get_slots_in_epoch(0) + 1, 8100), + ] + .into_iter() + .collect(), + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 100, + actively_voting_for_this_epoch_stake: 100, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 9000, + actively_voting_stake: 8100, + actively_voting_for_this_epoch_stake: 8100, + } + ], + } + ); + } } diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 67b148b17149b5..0fa7ec1cb65f3e 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -4,13 +4,13 @@ use { crate::{ heaviest_fork_aggregate::HeaviestForkAggregate, last_voted_fork_slots_aggregate::{ - LastVotedForkSlotsAggregate, LastVotedForkSlotsFinalResult, + LastVotedForkSlotsAggregate, LastVotedForkSlotsEpochInfo, LastVotedForkSlotsFinalResult, }, solana::wen_restart_proto::{ self, GenerateSnapshotRecord, HeaviestForkAggregateFinal, HeaviestForkAggregateRecord, HeaviestForkRecord, LastVotedForkSlotsAggregateFinal, - LastVotedForkSlotsAggregateRecord, LastVotedForkSlotsRecord, State as RestartState, - WenRestartProgress, + LastVotedForkSlotsAggregateRecord, LastVotedForkSlotsEpochInfoRecord, + LastVotedForkSlotsRecord, State as RestartState, WenRestartProgress, }, }, anyhow::Result, @@ -27,7 +27,10 @@ use { blockstore_processor::{process_single_slot, ConfirmationProgress, ProcessOptions}, leader_schedule_cache::LeaderScheduleCache, }, - solana_program::{clock::Slot, hash::Hash}, + solana_program::{ + clock::{Epoch, Slot}, + hash::Hash, + }, solana_runtime::{ accounts_background_service::AbsRequestSender, bank::Bank, @@ -216,9 +219,8 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( let root_bank = bank_forks.read().unwrap().root_bank(); let root_slot = root_bank.slot(); let mut last_voted_fork_slots_aggregate = LastVotedForkSlotsAggregate::new( - root_slot, + root_bank.clone(), REPAIR_THRESHOLD, - root_bank.epoch_stakes(root_bank.epoch()).unwrap(), last_voted_fork_slots, &cluster_info.id(), ); @@ -260,7 +262,7 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( // Because all operations on the aggregate are called from this single thread, we can // fetch all results separately without worrying about them being out of sync. We can // also use returned iterator without the vector changing underneath us. - let active_percent = last_voted_fork_slots_aggregate.active_percent(); + let active_percent = last_voted_fork_slots_aggregate.min_active_percent(); let mut filtered_slots: Vec; { filtered_slots = last_voted_fork_slots_aggregate @@ -303,6 +305,23 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( Ok(last_voted_fork_slots_aggregate.get_final_result()) } +fn is_over_stake_threshold( + epoch_info_vec: &[LastVotedForkSlotsEpochInfo], + epoch: Epoch, + stake: &u64, +) -> bool { + epoch_info_vec + .iter() + .find(|info| info.epoch == epoch) + .map_or(false, |info| { + let threshold = info + .actively_voting_stake + .checked_sub((info.total_stake as f64 * HEAVIEST_FORK_THRESHOLD_DELTA) as u64) + .unwrap(); + stake >= &threshold + }) +} + // Verify that all blocks with at least (active_stake_percnet - 38%) of the stake form a // single chain from the root, and use the highest slot in the blocks as the heaviest fork. // Please see SIMD 46 "gossip current heaviest fork" for correctness proof. @@ -314,16 +333,17 @@ pub(crate) fn find_heaviest_fork( ) -> Result<(Slot, Hash)> { let root_bank = bank_forks.read().unwrap().root_bank(); let root_slot = root_bank.slot(); - // TODO: Should use better epoch_stakes later. - let epoch_stake = root_bank.epoch_stakes(root_bank.epoch()).unwrap(); - let total_stake = epoch_stake.total_stake(); - let stake_threshold = aggregate_final_result - .total_active_stake - .saturating_sub((HEAVIEST_FORK_THRESHOLD_DELTA * total_stake as f64) as u64); let mut slots = aggregate_final_result .slots_stake_map .iter() - .filter(|(slot, stake)| **slot > root_slot && **stake > stake_threshold) + .filter(|(slot, stake)| { + **slot > root_slot + && is_over_stake_threshold( + &aggregate_final_result.epoch_info_vec, + root_bank.epoch_schedule().get_epoch(**slot), + stake, + ) + }) .map(|(slot, _)| *slot) .collect::>(); slots.sort(); @@ -604,8 +624,6 @@ pub(crate) fn aggregate_restart_heaviest_fork( progress: &mut WenRestartProgress, ) -> Result<()> { let root_bank = bank_forks.read().unwrap().root_bank(); - let epoch_stakes = root_bank.epoch_stakes(root_bank.epoch()).unwrap(); - let total_stake = epoch_stakes.total_stake(); if progress.my_heaviest_fork.is_none() { return Err(WenRestartError::MalformedProgress( RestartState::HeaviestFork, @@ -616,6 +634,13 @@ pub(crate) fn aggregate_restart_heaviest_fork( let my_heaviest_fork = progress.my_heaviest_fork.clone().unwrap(); let heaviest_fork_slot = my_heaviest_fork.slot; let heaviest_fork_hash = Hash::from_str(&my_heaviest_fork.bankhash)?; + // When checking whether to exit aggregate_restart_heaviest_fork, use the epoch_stakes + // associated with the heaviest fork slot we picked. This ensures that everyone agreeing + // with me use the same EpochStakes to calculate the supermajority threshold. + let epoch_stakes = root_bank + .epoch_stakes(root_bank.epoch_schedule().get_epoch(heaviest_fork_slot)) + .unwrap(); + let total_stake = epoch_stakes.total_stake(); let adjusted_threshold_percent = wait_for_supermajority_threshold_percent .saturating_sub(HEAVIEST_FORK_DISAGREE_THRESHOLD_PERCENT.round() as u64); // The threshold for supermajority should definitely be higher than 67%. @@ -963,7 +988,17 @@ pub(crate) fn increment_and_write_wen_restart_records( if let Some(aggregate_record) = progress.last_voted_fork_slots_aggregate.as_mut() { aggregate_record.final_result = Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: aggregate_final_result.slots_stake_map.clone(), - total_active_stake: aggregate_final_result.total_active_stake, + epoch_infos: aggregate_final_result + .epoch_info_vec + .iter() + .map(|info| LastVotedForkSlotsEpochInfoRecord { + epoch: info.epoch, + total_stake: info.total_stake, + actively_voting_stake: info.actively_voting_stake, + actively_voting_for_this_epoch_stake: info + .actively_voting_for_this_epoch_stake, + }) + .collect(), }); } WenRestartProgressInternalState::FindHeaviestFork { @@ -1091,7 +1126,17 @@ pub(crate) fn initialize( r.final_result.as_ref().map(|result| { LastVotedForkSlotsFinalResult { slots_stake_map: result.slots_stake_map.clone(), - total_active_stake: result.total_active_stake, + epoch_info_vec: result + .epoch_infos + .iter() + .map(|info| LastVotedForkSlotsEpochInfo { + epoch: info.epoch, + total_stake: info.total_stake, + actively_voting_stake: info.actively_voting_stake, + actively_voting_for_this_epoch_stake: info + .actively_voting_for_this_epoch_stake, + }) + .collect(), } }) }), @@ -1112,7 +1157,17 @@ pub(crate) fn initialize( .as_ref() .map(|result| LastVotedForkSlotsFinalResult { slots_stake_map: result.slots_stake_map.clone(), - total_active_stake: result.total_active_stake, + epoch_info_vec: result + .epoch_infos + .iter() + .map(|info| LastVotedForkSlotsEpochInfo { + epoch: info.epoch, + total_stake: info.total_stake, + actively_voting_stake: info.actively_voting_stake, + actively_voting_for_this_epoch_stake: info + .actively_voting_for_this_epoch_stake, + }) + .collect(), }) }) .ok_or(WenRestartError::MalformedProgress( @@ -1184,6 +1239,7 @@ mod tests { vote::state::{TowerSync, Vote}, }, solana_runtime::{ + epoch_stakes::EpochStakes, genesis_utils::{ create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, }, @@ -1192,16 +1248,19 @@ mod tests { snapshot_utils::build_incremental_snapshot_archive_path, }, solana_sdk::{ + pubkey::Pubkey, signature::{Keypair, Signer}, timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, + solana_vote::vote_account::VoteAccount, + solana_vote_program::vote_state::create_account_with_authorized, std::{fs::remove_file, sync::Arc, thread::Builder}, tempfile::TempDir, }; const SHRED_VERSION: u16 = 2; - const EXPECTED_SLOTS: Slot = 90; + const EXPECTED_SLOTS: Slot = 40; const TICKS_PER_SLOT: u64 = 2; const TOTAL_VALIDATOR_COUNT: u16 = 20; const MY_INDEX: usize = TOTAL_VALIDATOR_COUNT as usize - 1; @@ -1636,6 +1695,8 @@ mod tests { .iter() .map(|slot| (*slot, total_active_stake_during_heaviest_fork)), ); + // We are simulating 5% joined LastVotedForkSlots but not HeaviestFork. + let voted_stake = total_active_stake_during_heaviest_fork + 100; assert_eq!( progress, WenRestartProgress { @@ -1650,8 +1711,20 @@ mod tests { received: expected_received_last_voted_fork_slots, final_result: Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: expected_slots_stake_map, - // We are simulating 5% joined LastVotedForkSlots but not HeaviestFork. - total_active_stake: total_active_stake_during_heaviest_fork + 100, + epoch_infos: vec![ + LastVotedForkSlotsEpochInfoRecord { + epoch: 0, + total_stake: 2000, + actively_voting_stake: voted_stake, + actively_voting_for_this_epoch_stake: voted_stake, + }, + LastVotedForkSlotsEpochInfoRecord { + epoch: 1, + total_stake: 2000, + actively_voting_stake: voted_stake, + actively_voting_for_this_epoch_stake: voted_stake, + }, + ], }), }), my_heaviest_fork: Some(HeaviestForkRecord { @@ -1693,6 +1766,180 @@ mod tests { std::fs::set_permissions(wen_restart_proto_path, perms).unwrap(); } + #[test] + fn test_wen_restart_divergence_across_epoch_boundary() { + solana_logger::setup(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let test_state = wen_restart_test_init(&ledger_path); + let last_vote_slot = test_state.last_voted_fork_slots[0]; + + let old_root_bank = test_state.bank_forks.read().unwrap().root_bank(); + + // Add bank last_vote + 1 linking directly to 0, tweak its epoch_stakes, and then add it to bank_forks. + let new_root_slot = last_vote_slot + 1; + let mut new_root_bank = + Bank::new_from_parent(old_root_bank.clone(), &Pubkey::default(), new_root_slot); + assert_eq!(new_root_bank.epoch(), 1); + + // For epoch 2, make validator 0 have 90% of the stake. + let vote_accounts_hash_map = test_state + .validator_voting_keypairs + .iter() + .enumerate() + .map(|(i, keypairs)| { + let stake = if i == 0 { + 900 * (TOTAL_VALIDATOR_COUNT - 1) as u64 + } else { + 100 + }; + let authorized_voter = keypairs.vote_keypair.pubkey(); + let node_id = keypairs.node_keypair.pubkey(); + ( + authorized_voter, + ( + stake, + VoteAccount::try_from(create_account_with_authorized( + &node_id, + &authorized_voter, + &node_id, + 0, + 100, + )) + .unwrap(), + ), + ) + }) + .collect(); + let epoch2_eopch_stakes = EpochStakes::new_for_tests(vote_accounts_hash_map, 2); + new_root_bank.set_epoch_stakes_for_test(2, epoch2_eopch_stakes); + let _ = insert_slots_into_blockstore( + test_state.blockstore.clone(), + 0, + &[new_root_slot], + TICKS_PER_SLOT, + old_root_bank.last_blockhash(), + ); + let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() + .thread_name(|i| format!("solReplayTx{i:02}")) + .build() + .expect("new rayon threadpool"); + let recyclers = VerifyRecyclers::default(); + let mut timing = ExecuteTimings::default(); + let opts = ProcessOptions::default(); + let mut progress = ConfirmationProgress::new(old_root_bank.last_blockhash()); + let last_vote_bankhash = new_root_bank.hash(); + let bank_with_scheduler = test_state + .bank_forks + .write() + .unwrap() + .insert_from_ledger(new_root_bank); + if let Err(e) = process_single_slot( + &test_state.blockstore, + &bank_with_scheduler, + &replay_tx_thread_pool, + &opts, + &recyclers, + &mut progress, + None, + None, + None, + None, + &mut timing, + ) { + panic!("process_single_slot failed: {:?}", e); + } + + { + let mut bank_forks = test_state.bank_forks.write().unwrap(); + let _ = bank_forks.set_root( + last_vote_slot + 1, + &AbsRequestSender::default(), + Some(last_vote_slot + 1), + ); + } + let new_root_bank = test_state + .bank_forks + .read() + .unwrap() + .get(last_vote_slot + 1) + .unwrap(); + + // Add two more banks: old_epoch_bank (slot = last_vote_slot + 2) and + // new_epoch_bank (slot = first slot in epoch 2). They both link to last_vote_slot + 1. + // old_epoch_bank has everyone's votes except 0, so it has > 66% stake in the old epoch. + // new_epoch_bank has 0's vote, so it has > 66% stake in the new epoch. + let old_epoch_slot = new_root_slot + 1; + let _ = insert_slots_into_blockstore( + test_state.blockstore.clone(), + new_root_bank.slot(), + &[old_epoch_slot], + TICKS_PER_SLOT, + new_root_bank.last_blockhash(), + ); + let new_epoch_slot = new_root_bank.epoch_schedule().get_first_slot_in_epoch(2); + let _ = insert_slots_into_blockstore( + test_state.blockstore.clone(), + new_root_slot, + &[new_epoch_slot], + TICKS_PER_SLOT, + new_root_bank.last_blockhash(), + ); + let mut rng = rand::thread_rng(); + // Everyone except 0 votes for old_epoch_bank. + for (index, keypairs) in test_state + .validator_voting_keypairs + .iter() + .take(TOTAL_VALIDATOR_COUNT as usize - 1) + .enumerate() + { + let node_pubkey = keypairs.node_keypair.pubkey(); + let node = ContactInfo::new_rand(&mut rng, Some(node_pubkey)); + let last_vote_hash = Hash::new_unique(); + let now = timestamp(); + // Validator 0 votes for the new_epoch_bank while everyone elese vote for old_epoch_bank. + let last_voted_fork_slots = if index == 0 { + vec![new_epoch_slot, new_root_slot, 0] + } else { + vec![old_epoch_slot, new_root_slot, 0] + }; + push_restart_last_voted_fork_slots( + test_state.cluster_info.clone(), + &node, + &last_voted_fork_slots, + &last_vote_hash, + &keypairs.node_keypair, + now, + ); + } + + assert_eq!( + wait_for_wen_restart(WenRestartConfig { + wen_restart_path: test_state.wen_restart_proto_path, + last_vote: VoteTransaction::from(Vote::new( + vec![new_root_slot], + last_vote_bankhash + )), + blockstore: test_state.blockstore, + cluster_info: test_state.cluster_info, + bank_forks: test_state.bank_forks, + wen_restart_repair_slots: Some(Arc::new(RwLock::new(Vec::new()))), + wait_for_supermajority_threshold_percent: 80, + snapshot_config: SnapshotConfig::default(), + accounts_background_request_sender: AbsRequestSender::default(), + genesis_config_hash: test_state.genesis_config_hash, + exit: Arc::new(AtomicBool::new(false)), + }) + .unwrap_err() + .downcast::() + .unwrap(), + WenRestartError::BlockNotLinkedToExpectedParent( + new_epoch_slot, + Some(new_root_slot), + old_epoch_slot + ) + ); + } + #[test] fn test_wen_restart_initialize() { solana_logger::setup(); @@ -1889,7 +2136,20 @@ mod tests { received: HashMap::new(), final_result: Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: HashMap::new(), - total_active_stake: 1000, + epoch_infos: vec![ + LastVotedForkSlotsEpochInfoRecord { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 800, + actively_voting_for_this_epoch_stake: 800, + }, + LastVotedForkSlotsEpochInfoRecord { + epoch: 2, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + ], }), }), ..Default::default() @@ -1906,7 +2166,20 @@ mod tests { WenRestartProgressInternalState::FindHeaviestFork { aggregate_final_result: LastVotedForkSlotsFinalResult { slots_stake_map: HashMap::new(), - total_active_stake: 1000, + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 800, + actively_voting_for_this_epoch_stake: 800, + }, + LastVotedForkSlotsEpochInfo { + epoch: 2, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + } + ], }, my_heaviest_fork: progress.my_heaviest_fork.clone(), }, @@ -2212,7 +2485,12 @@ mod tests { received: HashMap::new(), final_result: Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: vec![(0, 900), (1, 800)].into_iter().collect(), - total_active_stake: 900, + epoch_infos: vec![LastVotedForkSlotsEpochInfoRecord { + epoch: 0, + total_stake: 2000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }), }); let my_heaviest_fork = Some(HeaviestForkRecord { @@ -2264,13 +2542,23 @@ mod tests { last_voted_fork_slots: vec![0, 1], aggregate_final_result: Some(LastVotedForkSlotsFinalResult { slots_stake_map: expected_slots_stake_map.clone(), - total_active_stake: 900, + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 2000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }), }, WenRestartProgressInternalState::FindHeaviestFork { aggregate_final_result: LastVotedForkSlotsFinalResult { slots_stake_map: expected_slots_stake_map.clone(), - total_active_stake: 900, + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 2000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, my_heaviest_fork: None, }, @@ -2291,7 +2579,12 @@ mod tests { WenRestartProgressInternalState::FindHeaviestFork { aggregate_final_result: LastVotedForkSlotsFinalResult { slots_stake_map: expected_slots_stake_map, - total_active_stake: 900, + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 2000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, my_heaviest_fork: Some(HeaviestForkRecord { slot: 1, @@ -2399,7 +2692,7 @@ mod tests { let exit = Arc::new(AtomicBool::new(false)); let test_state = wen_restart_test_init(&ledger_path); let last_vote_slot = test_state.last_voted_fork_slots[0]; - let slot_with_no_block = last_vote_slot + 5; + let slot_with_no_block = 1; // This fails because corresponding block is not found, which is wrong, we should have // repaired all eligible blocks when we exit LastVotedForkSlots state. assert_eq!( @@ -2408,7 +2701,12 @@ mod tests { slots_stake_map: vec![(0, 900), (slot_with_no_block, 800)] .into_iter() .collect(), - total_active_stake: 900, + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, test_state.bank_forks.clone(), test_state.blockstore.clone(), @@ -2423,8 +2721,13 @@ mod tests { assert_eq!( find_heaviest_fork( LastVotedForkSlotsFinalResult { - slots_stake_map: vec![(last_vote_slot, 900)].into_iter().collect(), - total_active_stake: 900, + slots_stake_map: vec![(3, 900)].into_iter().collect(), + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, test_state.bank_forks.clone(), test_state.blockstore.clone(), @@ -2433,19 +2736,20 @@ mod tests { .unwrap_err() .downcast::() .unwrap(), - WenRestartError::BlockNotLinkedToExpectedParent( - last_vote_slot, - Some(last_vote_slot - 1), - 0 - ), + WenRestartError::BlockNotLinkedToExpectedParent(3, Some(2), 0), ); // The following fails because we expect to see the some slot in slots_stake_map doesn't chain to the // one before it. assert_eq!( find_heaviest_fork( LastVotedForkSlotsFinalResult { - slots_stake_map: vec![(2, 900), (last_vote_slot, 900)].into_iter().collect(), - total_active_stake: 900, + slots_stake_map: vec![(2, 900), (5, 900)].into_iter().collect(), + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, test_state.bank_forks.clone(), test_state.blockstore.clone(), @@ -2454,11 +2758,7 @@ mod tests { .unwrap_err() .downcast::() .unwrap(), - WenRestartError::BlockNotLinkedToExpectedParent( - last_vote_slot, - Some(last_vote_slot - 1), - 2 - ), + WenRestartError::BlockNotLinkedToExpectedParent(5, Some(4), 2), ); // The following fails because the new slot is not full. let not_full_slot = last_vote_slot + 5; @@ -2489,7 +2789,20 @@ mod tests { find_heaviest_fork( LastVotedForkSlotsFinalResult { slots_stake_map, - total_active_stake: 900, + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + ], }, test_state.bank_forks.clone(), test_state.blockstore.clone(), @@ -2529,7 +2842,20 @@ mod tests { find_heaviest_fork( LastVotedForkSlotsFinalResult { slots_stake_map, - total_active_stake: 900, + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + ], }, test_state.bank_forks.clone(), test_state.blockstore.clone(), From 42e72bf1b31f5335d3f7ee56ce1f607ceb899c3f Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Sat, 24 Aug 2024 22:08:04 +0700 Subject: [PATCH 212/529] VoteAccount: remove OnceLock around VoteState (#2659) * VoteAccount: remove OnceLock around VoteState The lazy-init OnceLock logic wasn't really being used. Execution/perf wise this code doesn't change anything since we were already forcing early deserialization in StakesCache::check_and_store: - // Called to eagerly deserialize vote state - let _res = vote_account.vote_state(); For more context see https://discord.com/channels/428295358100013066/439194979856809985/1268759289531596872 https://discord.com/channels/428295358100013066/439194979856809985/1272661616399224892 * VoteAccounts: discard invalid accounts on deserialization Before switching to eager vote account deserialization we were (accidentally) able to load snapshots with invalid vote accounts (because account parsing was deferred). This change ensures that we're still able to parse such snapshots. * VoteAccount: remove Deserialize impl VoteAccount is never deserialized individually, but only as part of VoteAccounts, which has a custom deser that doesn't require VoteAccount to implement Deserialize --- core/Cargo.toml | 1 + core/src/commitment_service.rs | 20 +- core/src/consensus.rs | 24 +- core/src/consensus/progress_map.rs | 18 +- core/src/replay_stage.rs | 20 +- core/src/validator.rs | 2 +- core/src/vote_simulator.rs | 9 +- ledger-tool/src/main.rs | 3 - ledger/src/blockstore_processor.rs | 28 +- programs/bpf_loader/Cargo.toml | 2 +- programs/bpf_loader/src/syscalls/mod.rs | 10 +- rpc/src/rpc.rs | 5 +- runtime/src/bank.rs | 9 +- runtime/src/bank/fee_distribution.rs | 2 +- .../partitioned_epoch_rewards/calculation.rs | 14 +- runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/bank/tests.rs | 8 +- runtime/src/epoch_stakes.rs | 14 - runtime/src/snapshot_minimizer.rs | 5 +- runtime/src/stakes.rs | 12 +- vote/Cargo.toml | 3 +- vote/src/vote_account.rs | 249 +++++++++++------- 22 files changed, 208 insertions(+), 252 deletions(-) diff --git a/core/Cargo.toml b/core/Cargo.toml index 4d3c59a8ada4f8..d107296bba0e6e 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -110,6 +110,7 @@ solana-stake-program = { workspace = true } solana-unified-scheduler-pool = { workspace = true, features = [ "dev-context-only-utils", ] } +solana-vote = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } systemstat = { workspace = true } test-case = { workspace = true } diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 03c04ad9ab4c96..5cdcfa94ec593b 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -202,19 +202,17 @@ impl AggregateCommitmentService { } let vote_state = if pubkey == node_vote_pubkey { // Override old vote_state in bank with latest one for my own vote pubkey - Ok(node_vote_state) + node_vote_state } else { account.vote_state() }; - if let Ok(vote_state) = vote_state { - Self::aggregate_commitment_for_vote_account( - &mut commitment, - &mut rooted_stake, - vote_state, - ancestors, - *lamports, - ); - } + Self::aggregate_commitment_for_vote_account( + &mut commitment, + &mut rooted_stake, + vote_state, + ancestors, + *lamports, + ); } (commitment, rooted_stake) @@ -546,7 +544,7 @@ mod tests { fn test_highest_super_majority_root_advance() { fn get_vote_state(vote_pubkey: Pubkey, bank: &Bank) -> VoteState { let vote_account = bank.get_vote_account(&vote_pubkey).unwrap(); - vote_account.vote_state().cloned().unwrap() + vote_account.vote_state().clone() } let block_commitment_cache = RwLock::new(BlockCommitmentCache::new_for_tests()); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 93c80c554c6ab1..96fbbc6b68d0bd 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -379,20 +379,7 @@ impl Tower { continue; } trace!("{} {} with stake {}", vote_account_pubkey, key, voted_stake); - let mut vote_state = match account.vote_state().cloned() { - Err(_) => { - datapoint_warn!( - "tower_warn", - ( - "warn", - format!("Unable to get vote_state from account {key}"), - String - ), - ); - continue; - } - Ok(vote_state) => vote_state, - }; + let mut vote_state = account.vote_state().clone(); for vote in &vote_state.votes { lockout_intervals .entry(vote.lockout.last_locked_out_slot()) @@ -591,7 +578,7 @@ impl Tower { pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option { let vote_account = bank.get_vote_account(vote_account_pubkey)?; let vote_state = vote_account.vote_state(); - vote_state.as_ref().ok()?.last_voted_slot() + vote_state.last_voted_slot() } pub fn record_bank_vote(&mut self, bank: &Bank) -> Option { @@ -1576,10 +1563,7 @@ impl Tower { bank: &Bank, ) { if let Some(vote_account) = bank.get_vote_account(vote_account_pubkey) { - self.vote_state = vote_account - .vote_state() - .cloned() - .expect("vote_account isn't a VoteState?"); + self.vote_state = vote_account.vote_state().clone(); self.initialize_root(root); self.initialize_lockouts(|v| v.slot() > root); trace!( @@ -2428,7 +2412,7 @@ pub mod test { .get_vote_account(&vote_pubkey) .unwrap(); let state = observed.vote_state(); - info!("observed tower: {:#?}", state.as_ref().unwrap().votes); + info!("observed tower: {:#?}", state.votes); let num_slots_to_try = 200; cluster_votes diff --git a/core/src/consensus/progress_map.rs b/core/src/consensus/progress_map.rs index a06f51b2001534..447ebff0f8361e 100644 --- a/core/src/consensus/progress_map.rs +++ b/core/src/consensus/progress_map.rs @@ -420,19 +420,7 @@ impl ProgressMap { #[cfg(test)] mod test { - use { - super::*, - solana_sdk::account::{Account, AccountSharedData}, - solana_vote::vote_account::VoteAccount, - }; - - fn new_test_vote_account() -> VoteAccount { - let account = AccountSharedData::from(Account { - owner: solana_vote_program::id(), - ..Account::default() - }); - VoteAccount::try_from(account).unwrap() - } + use {super::*, solana_vote::vote_account::VoteAccount}; #[test] fn test_add_vote_pubkey() { @@ -467,7 +455,7 @@ mod test { let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys .iter() .skip(num_vote_accounts - staked_vote_accounts) - .map(|pubkey| (*pubkey, (1, new_test_vote_account()))) + .map(|pubkey| (*pubkey, (1, VoteAccount::new_random()))) .collect(); let mut stats = PropagatedStats::default(); @@ -509,7 +497,7 @@ mod test { let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys .iter() .skip(num_vote_accounts - staked_vote_accounts) - .map(|pubkey| (*pubkey, (1, new_test_vote_account()))) + .map(|pubkey| (*pubkey, (1, VoteAccount::new_random()))) .collect(); stats.add_node_pubkey_internal(&node_pubkey, &vote_account_pubkeys, &epoch_vote_accounts); assert!(stats.propagated_node_ids.contains(&node_pubkey)); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index f2cedd7d731aac..398a15601ba3ee 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -2485,17 +2485,6 @@ impl ReplayStage { Some(vote_account) => vote_account, }; let vote_state = vote_account.vote_state(); - let vote_state = match vote_state.as_ref() { - Err(_) => { - warn!( - "Vote account {} is unreadable. Unable to vote", - vote_account_pubkey, - ); - return GenerateVoteTxResult::Failed; - } - Ok(vote_state) => vote_state, - }; - if vote_state.node_pubkey != node_keypair.pubkey() { info!( "Vote account node_pubkey mismatch: {} (expected: {}). Unable to vote", @@ -3473,9 +3462,7 @@ impl ReplayStage { let Some(vote_account) = bank.get_vote_account(my_vote_pubkey) else { return; }; - let Ok(mut bank_vote_state) = vote_account.vote_state().cloned() else { - return; - }; + let mut bank_vote_state = vote_account.vote_state().clone(); if bank_vote_state.last_voted_slot() <= tower.vote_state.last_voted_slot() { return; } @@ -7612,10 +7599,7 @@ pub(crate) mod tests { let vote_account = expired_bank_child .get_vote_account(&my_vote_pubkey) .unwrap(); - assert_eq!( - vote_account.vote_state().as_ref().unwrap().tower(), - vec![0, 1] - ); + assert_eq!(vote_account.vote_state().tower(), vec![0, 1]); expired_bank_child.fill_bank_with_ticks_for_tests(); expired_bank_child.freeze(); diff --git a/core/src/validator.rs b/core/src/validator.rs index 016514dd817166..0f99e4c4768497 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -2488,7 +2488,7 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo if activated_stake == 0 { continue; } - let vote_state_node_pubkey = vote_account.node_pubkey().copied().unwrap_or_default(); + let vote_state_node_pubkey = *vote_account.node_pubkey(); if let Some(peer) = peers.get(&vote_state_node_pubkey) { if peer.shred_version() == my_shred_version { diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index 31395f65a42a6e..60333dae94c12d 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -104,7 +104,7 @@ impl VoteSimulator { let tower_sync = if let Some(vote_account) = parent_bank.get_vote_account(&keypairs.vote_keypair.pubkey()) { - let mut vote_state = vote_account.vote_state().unwrap().clone(); + let mut vote_state = vote_account.vote_state().clone(); process_vote_unchecked( &mut vote_state, solana_vote_program::vote_state::Vote::new( @@ -143,12 +143,7 @@ impl VoteSimulator { .get_vote_account(&keypairs.vote_keypair.pubkey()) .unwrap(); let state = vote_account.vote_state(); - assert!(state - .as_ref() - .unwrap() - .votes - .iter() - .any(|lockout| lockout.slot() == parent)); + assert!(state.votes.iter().any(|lockout| lockout.slot() == parent)); } } while new_bank.tick_height() < new_bank.max_tick_height() { diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index d9a3a60d2f4600..ac9e3fb9f929c2 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -206,7 +206,6 @@ fn graph_forks(bank_forks: &BankForks, config: &GraphConfig) -> String { // Search all forks and collect the last vote made by each validator let mut last_votes = HashMap::new(); - let default_vote_state = VoteState::default(); for fork_slot in &fork_slots { let bank = &bank_forks[*fork_slot]; @@ -217,7 +216,6 @@ fn graph_forks(bank_forks: &BankForks, config: &GraphConfig) -> String { .sum(); for (stake, vote_account) in bank.vote_accounts().values() { let vote_state = vote_account.vote_state(); - let vote_state = vote_state.unwrap_or(&default_vote_state); if let Some(last_vote) = vote_state.votes.iter().last() { let entry = last_votes.entry(vote_state.node_pubkey).or_insert(( last_vote.slot(), @@ -258,7 +256,6 @@ fn graph_forks(bank_forks: &BankForks, config: &GraphConfig) -> String { loop { for (_, vote_account) in bank.vote_accounts().values() { let vote_state = vote_account.vote_state(); - let vote_state = vote_state.unwrap_or(&default_vote_state); if let Some(last_vote) = vote_state.votes.iter().last() { let validator_votes = all_votes.entry(vote_state.node_pubkey).or_default(); validator_votes diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 304cc549e57f5b..98701cf3468209 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1870,7 +1870,6 @@ fn load_frozen_forks( let new_root_bank = { if bank_forks.read().unwrap().root() >= max_root { supermajority_root_from_vote_accounts( - bank.slot(), bank.total_epoch_stake(), &bank.vote_accounts(), ).and_then(|supermajority_root| { @@ -2005,27 +2004,17 @@ fn supermajority_root(roots: &[(Slot, u64)], total_epoch_stake: u64) -> Option Option { let mut roots_stakes: Vec<(Slot, u64)> = vote_accounts - .iter() - .filter_map(|(key, (stake, account))| { + .values() + .filter_map(|(stake, account)| { if *stake == 0 { return None; } - match account.vote_state().as_ref() { - Err(_) => { - warn!( - "Unable to get vote_state from account {} in bank: {}", - key, bank_slot - ); - None - } - Ok(vote_state) => Some((vote_state.root_slot?, *stake)), - } + Some((account.vote_state().root_slot?, *stake)) }) .collect(); @@ -4603,23 +4592,20 @@ pub mod tests { }; let total_stake = 10; - let slot = 100; // Supermajority root should be None - assert!( - supermajority_root_from_vote_accounts(slot, total_stake, &HashMap::default()).is_none() - ); + assert!(supermajority_root_from_vote_accounts(total_stake, &HashMap::default()).is_none()); // Supermajority root should be None let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 1)]; let accounts = convert_to_vote_accounts(roots_stakes); - assert!(supermajority_root_from_vote_accounts(slot, total_stake, &accounts).is_none()); + assert!(supermajority_root_from_vote_accounts(total_stake, &accounts).is_none()); // Supermajority root should be 4, has 7/10 of the stake let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 5)]; let accounts = convert_to_vote_accounts(roots_stakes); assert_eq!( - supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(), + supermajority_root_from_vote_accounts(total_stake, &accounts).unwrap(), 4 ); @@ -4627,7 +4613,7 @@ pub mod tests { let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 6)]; let accounts = convert_to_vote_accounts(roots_stakes); assert_eq!( - supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(), + supermajority_root_from_vote_accounts(total_stake, &accounts).unwrap(), 8 ); } diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index ace6f2ed9c0b83..4c085663513300 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -34,7 +34,7 @@ assert_matches = { workspace = true } memoffset = { workspace = true } rand = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } -solana-vote = { workspace = true } +solana-vote = { workspace = true, features = ["dev-context-only-utils"] } test-case = { workspace = true } [lib] diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index e70a266f340917..876b734af19156 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -4820,15 +4820,7 @@ mod tests { let mut vote_accounts_map = HashMap::new(); vote_accounts_map.insert( vote_address, - ( - expected_epoch_stake, - VoteAccount::try_from(AccountSharedData::new( - 0, - 0, - &solana_sdk::vote::program::id(), - )) - .unwrap(), - ), + (expected_epoch_stake, VoteAccount::new_random()), ); with_mock_invoke_context!(invoke_context, transaction_context, vec![]); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index db4d7bf9e69b53..0cd32ab3ff84b1 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -92,7 +92,7 @@ use { TransactionBinaryEncoding, TransactionConfirmationStatus, TransactionStatus, UiConfirmedBlock, UiTransactionEncoding, }, - solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY}, + solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY, spl_token_2022::{ extension::{ interest_bearing_mint::InterestBearingConfig, BaseStateWithExtensions, @@ -1004,7 +1004,6 @@ impl JsonRpcRequestProcessor { let epoch_vote_accounts = bank .epoch_vote_accounts(bank.get_epoch_and_slot_index(bank.slot()).0) .ok_or_else(Error::invalid_request)?; - let default_vote_state = VoteState::default(); let delinquent_validator_slot_distance = config .delinquent_slot_distance .unwrap_or(DELINQUENT_VALIDATOR_SLOT_DISTANCE); @@ -1021,7 +1020,6 @@ impl JsonRpcRequestProcessor { } let vote_state = account.vote_state(); - let vote_state = vote_state.unwrap_or(&default_vote_state); let last_vote = if let Some(vote) = vote_state.votes.iter().last() { vote.slot() } else { @@ -4360,6 +4358,7 @@ pub mod tests { transaction::{ self, SimpleAddressLoader, Transaction, TransactionError, TransactionVersion, }, + vote::state::VoteState, }, solana_transaction_status::{ EncodedConfirmedBlock, EncodedTransaction, EncodedTransactionWithStatusMeta, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 34a1cd9d70b89c..4b6377c0c53e3c 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2282,12 +2282,8 @@ impl Bank { invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::WrongOwner); return None; } - let Ok(vote_state) = vote_account.vote_state().cloned() else { - invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::BadState); - return None; - }; let vote_with_stake_delegations = VoteWithStakeDelegations { - vote_state: Arc::new(vote_state), + vote_state: Arc::new(vote_account.vote_state().clone()), vote_account: AccountSharedData::from(vote_account), delegations: Vec::default(), }; @@ -2705,7 +2701,6 @@ impl Bank { let vote_accounts = self.vote_accounts(); let recent_timestamps = vote_accounts.iter().filter_map(|(pubkey, (_, account))| { let vote_state = account.vote_state(); - let vote_state = vote_state.as_ref().ok()?; let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?; (slot_delta <= slots_per_epoch).then_some({ ( @@ -2881,7 +2876,7 @@ impl Bank { // up and can be used to set the collector id to the highest staked // node. If no staked nodes exist, allow fallback to an unstaked test // collector id during tests. - let collector_id = self.stakes_cache.stakes().highest_staked_node(); + let collector_id = self.stakes_cache.stakes().highest_staked_node().copied(); #[cfg(feature = "dev-context-only-utils")] let collector_id = collector_id.or(collector_id_for_tests); self.collector_id = diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 4dc511a5eee95c..383521c016179f 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -203,7 +203,7 @@ impl Bank { None } else { total_staked += *staked; - Some((*account.node_pubkey()?, *staked)) + Some((*account.node_pubkey(), *staked)) } }) .collect::>(); diff --git a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs index 9d929accb5cdb1..257a531f3e04d4 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs @@ -385,7 +385,7 @@ impl Bank { if vote_account.owner() != &solana_vote_program { return None; } - let vote_state = vote_account.vote_state().cloned().ok()?; + let vote_state = vote_account.vote_state(); let pre_lamport = stake_account.lamports(); @@ -393,7 +393,7 @@ impl Bank { rewarded_epoch, stake_state, &mut stake_account, - &vote_state, + vote_state, &point_value, stake_history, reward_calc_tracer.as_ref(), @@ -407,13 +407,14 @@ impl Bank { "calculated reward: {} {} {} {}", stake_pubkey, pre_lamport, post_lamport, stakers_reward ); + let commission = vote_state.commission; // track voter rewards let mut voters_reward_entry = vote_account_rewards .entry(vote_pubkey) .or_insert(VoteReward { + commission, vote_account: vote_account.into(), - commission: vote_state.commission, vote_rewards: 0, vote_needs_store: false, }); @@ -438,7 +439,7 @@ impl Bank { reward_type: RewardType::Staking, lamports: i64::try_from(stakers_reward).unwrap(), post_balance, - commission: Some(vote_state.commission), + commission: Some(commission), }, stake, }); @@ -508,13 +509,10 @@ impl Bank { if vote_account.owner() != &solana_vote_program { return 0; } - let Ok(vote_state) = vote_account.vote_state() else { - return 0; - }; solana_stake_program::points::calculate_points( stake_account.stake_state(), - vote_state, + vote_account.vote_state(), stake_history, new_warmup_cooldown_rate_epoch, ) diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 258c49593f0e12..bdcf7ae6215f72 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -535,7 +535,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "J7MnnLU99fYk2hfZPjdqyTYxgHstwRUDk2Yr8fFnXxFp") + frozen_abi(digest = "HQYDRuCaM5V1ggSuMPTKT5Mu2vE5HX4y4ZM1Xuorx6My") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index dfbd449731179a..dc8175cebd9550 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -4012,12 +4012,8 @@ fn test_bank_epoch_vote_accounts() { accounts .iter() .filter_map(|(pubkey, (stake, account))| { - if let Ok(vote_state) = account.vote_state().as_ref() { - if vote_state.node_pubkey == leader_pubkey { - Some((*pubkey, *stake)) - } else { - None - } + if account.node_pubkey() == &leader_pubkey { + Some((*pubkey, *stake)) } else { None } diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 84b6bdc40a6345..fa4de74d8e23cb 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -101,20 +101,6 @@ impl EpochStakes { .iter() .filter_map(|(key, (stake, account))| { let vote_state = account.vote_state(); - let vote_state = match vote_state.as_ref() { - Err(_) => { - datapoint_warn!( - "parse_epoch_vote_accounts", - ( - "warn", - format!("Unable to get vote_state from account {key}"), - String - ), - ); - return None; - } - Ok(vote_state) => vote_state, - }; if *stake > 0 { if let Some(authorized_voter) = vote_state diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index b48f1832fc0256..7aeba40adea04d 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -158,9 +158,8 @@ impl<'a> SnapshotMinimizer<'a> { .par_iter() .for_each(|(pubkey, (_stake, vote_account))| { self.minimized_account_set.insert(*pubkey); - if let Ok(vote_state) = vote_account.vote_state().as_ref() { - self.minimized_account_set.insert(vote_state.node_pubkey); - } + self.minimized_account_set + .insert(*vote_account.node_pubkey()); }); } diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index d79d8e43492687..f878510402e929 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -98,11 +98,6 @@ impl StakesCache { if VoteStateVersions::is_correct_size_and_initialized(account.data()) { match VoteAccount::try_from(account.to_account_shared_data()) { Ok(vote_account) => { - { - // Called to eagerly deserialize vote state - let _res = vote_account.vote_state(); - } - // drop the old account after releasing the lock let _old_vote_account = { let mut stakes = self.0.write().unwrap(); @@ -428,7 +423,6 @@ impl Stakes { new_rate_activation_epoch: Option, ) -> Option { debug_assert_ne!(vote_account.lamports(), 0u64); - debug_assert!(vote_account.is_deserialized()); let stake_delegations = &self.stake_delegations; self.vote_accounts.insert(*vote_pubkey, vote_account, || { @@ -507,9 +501,9 @@ impl Stakes { &self.stake_delegations } - pub(crate) fn highest_staked_node(&self) -> Option { + pub(crate) fn highest_staked_node(&self) -> Option<&Pubkey> { let vote_account = self.vote_accounts.find_max_by_delegated_stake()?; - vote_account.node_pubkey().copied() + Some(vote_account.node_pubkey()) } } @@ -835,7 +829,7 @@ pub(crate) mod tests { let vote11_node_pubkey = vote_state::from(&vote11_account).unwrap().node_pubkey; - let highest_staked_node = stakes_cache.stakes().highest_staked_node(); + let highest_staked_node = stakes_cache.stakes().highest_staked_node().copied(); assert_eq!(highest_staked_node, Some(vote11_node_pubkey)); } diff --git a/vote/Cargo.toml b/vote/Cargo.toml index 2eb821eac407a4..89f3b5e433f49f 100644 --- a/vote/Cargo.toml +++ b/vote/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } [dependencies] itertools = { workspace = true } log = { workspace = true } +rand = { workspace = true, optional = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } @@ -34,7 +35,7 @@ targets = ["x86_64-unknown-linux-gnu"] rustc_version = { workspace = true, optional = true } [features] -dev-context-only-utils = [] +dev-context-only-utils = ["dep:rand"] frozen-abi = [ "dep:rustc_version", "dep:solana-frozen-abi", diff --git a/vote/src/vote_account.rs b/vote/src/vote_account.rs index 14cc788cca13d9..8155d1540f04e7 100644 --- a/vote/src/vote_account.rs +++ b/vote/src/vote_account.rs @@ -1,6 +1,9 @@ use { itertools::Itertools, - serde::ser::{Serialize, Serializer}, + serde::{ + de::{MapAccess, Visitor}, + ser::{Serialize, Serializer}, + }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, instruction::InstructionError, @@ -10,6 +13,7 @@ use { std::{ cmp::Ordering, collections::{hash_map::Entry, HashMap}, + fmt, iter::FromIterator, mem, sync::{Arc, OnceLock}, @@ -18,8 +22,7 @@ use { }; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Clone, Debug, PartialEq, Deserialize)] -#[serde(try_from = "AccountSharedData")] +#[derive(Clone, Debug, PartialEq)] pub struct VoteAccount(Arc); #[derive(Debug, Error)] @@ -34,17 +37,17 @@ pub enum Error { #[derive(Debug)] struct VoteAccountInner { account: AccountSharedData, - vote_state: OnceLock>, + vote_state: VoteState, } pub type VoteAccountsHashMap = HashMap; - #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Clone, Debug, Deserialize)] -#[serde(from = "Arc")] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct VoteAccounts { + #[serde(deserialize_with = "deserialize_accounts_hash_map")] vote_accounts: Arc, // Inner Arc is meant to implement copy-on-write semantics. + #[serde(skip)] staked_nodes: OnceLock< Arc< HashMap< @@ -68,22 +71,49 @@ impl VoteAccount { self.0.account.owner() } - pub fn vote_state(&self) -> Result<&VoteState, &Error> { - // VoteState::deserialize deserializes a VoteStateVersions and then - // calls VoteStateVersions::convert_to_current. - self.0 - .vote_state - .get_or_init(|| VoteState::deserialize(self.0.account.data()).map_err(Error::from)) - .as_ref() - } - - pub fn is_deserialized(&self) -> bool { - self.0.vote_state.get().is_some() + pub fn vote_state(&self) -> &VoteState { + &self.0.vote_state } /// VoteState.node_pubkey of this vote-account. - pub fn node_pubkey(&self) -> Option<&Pubkey> { - self.vote_state().ok().map(|s| &s.node_pubkey) + pub fn node_pubkey(&self) -> &Pubkey { + &self.0.vote_state.node_pubkey + } + + #[cfg(feature = "dev-context-only-utils")] + pub fn new_random() -> VoteAccount { + use { + rand::Rng as _, + solana_sdk::{ + clock::Clock, + vote::state::{VoteInit, VoteStateVersions}, + }, + }; + + let mut rng = rand::thread_rng(); + + let vote_init = VoteInit { + node_pubkey: Pubkey::new_unique(), + authorized_voter: Pubkey::new_unique(), + authorized_withdrawer: Pubkey::new_unique(), + commission: rng.gen(), + }; + let clock = Clock { + slot: rng.gen(), + epoch_start_timestamp: rng.gen(), + epoch: rng.gen(), + leader_schedule_epoch: rng.gen(), + unix_timestamp: rng.gen(), + }; + let vote_state = VoteState::new(&vote_init, &clock); + let account = AccountSharedData::new_data( + rng.gen(), // lamports + &VoteStateVersions::new_current(vote_state.clone()), + &solana_sdk::vote::program::id(), // owner + ) + .unwrap(); + + VoteAccount::try_from(account).unwrap() } } @@ -103,9 +133,7 @@ impl VoteAccounts { self.vote_accounts .values() .filter(|(stake, _)| *stake != 0u64) - .filter_map(|(stake, vote_account)| { - Some((*vote_account.node_pubkey()?, stake)) - }) + .map(|(stake, vote_account)| (*vote_account.node_pubkey(), stake)) .into_grouping_map() .aggregate(|acc, _node_pubkey, stake| { Some(acc.unwrap_or_default() + stake) @@ -164,7 +192,7 @@ impl VoteAccounts { // The node keys have changed, we move the stake from the old node to the // new one Self::do_sub_node_stake(staked_nodes, *stake, old_node_pubkey); - Self::do_add_node_stake(staked_nodes, *stake, new_node_pubkey.copied()); + Self::do_add_node_stake(staked_nodes, *stake, *new_node_pubkey); } } @@ -175,11 +203,7 @@ impl VoteAccounts { // This is a new vote account. We don't know the stake yet, so we need to compute it. let (stake, vote_account) = entry.insert((calculate_stake(), new_vote_account)); if let Some(staked_nodes) = self.staked_nodes.get_mut() { - Self::do_add_node_stake( - staked_nodes, - *stake, - vote_account.node_pubkey().copied(), - ); + Self::do_add_node_stake(staked_nodes, *stake, *vote_account.node_pubkey()); } None } @@ -220,24 +244,22 @@ impl VoteAccounts { return; }; - VoteAccounts::do_add_node_stake(staked_nodes, stake, vote_account.node_pubkey().copied()); + VoteAccounts::do_add_node_stake(staked_nodes, stake, *vote_account.node_pubkey()); } fn do_add_node_stake( staked_nodes: &mut Arc>, stake: u64, - node_pubkey: Option, + node_pubkey: Pubkey, ) { if stake == 0u64 { return; } - node_pubkey.map(|node_pubkey| { - Arc::make_mut(staked_nodes) - .entry(node_pubkey) - .and_modify(|s| *s += stake) - .or_insert(stake) - }); + Arc::make_mut(staked_nodes) + .entry(node_pubkey) + .and_modify(|s| *s += stake) + .or_insert(stake); } fn sub_node_stake(&mut self, stake: u64, vote_account: &VoteAccount) { @@ -251,24 +273,22 @@ impl VoteAccounts { fn do_sub_node_stake( staked_nodes: &mut Arc>, stake: u64, - node_pubkey: Option<&Pubkey>, + node_pubkey: &Pubkey, ) { if stake == 0u64 { return; } - if let Some(node_pubkey) = node_pubkey { - let staked_nodes = Arc::make_mut(staked_nodes); - let current_stake = staked_nodes - .get_mut(node_pubkey) - .expect("this should not happen"); - match (*current_stake).cmp(&stake) { - Ordering::Less => panic!("subtraction value exceeds node's stake"), - Ordering::Equal => { - staked_nodes.remove(node_pubkey); - } - Ordering::Greater => *current_stake -= stake, + let staked_nodes = Arc::make_mut(staked_nodes); + let current_stake = staked_nodes + .get_mut(node_pubkey) + .expect("this should not happen"); + match (*current_stake).cmp(&stake) { + Ordering::Less => panic!("subtraction value exceeds node's stake"), + Ordering::Equal => { + staked_nodes.remove(node_pubkey); } + Ordering::Greater => *current_stake -= stake, } } } @@ -303,8 +323,8 @@ impl TryFrom for VoteAccountInner { return Err(Error::InvalidOwner(*account.owner())); } Ok(Self { + vote_state: VoteState::deserialize(account.data()).map_err(Error::InstructionError)?, account, - vote_state: OnceLock::new(), }) } } @@ -368,13 +388,51 @@ impl FromIterator<(Pubkey, (/*stake:*/ u64, VoteAccount))> for VoteAccounts { } } -impl Serialize for VoteAccounts { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.vote_accounts.serialize(serializer) +// This custom deserializer is needed to ensure compatibility at snapshot loading with versions +// before https://github.com/anza-xyz/agave/pull/2659 which would theoretically allow invalid vote +// accounts in VoteAccounts. +// +// In the (near) future we should remove this custom deserializer and make it a hard error when we +// find invalid vote accounts in snapshots. +fn deserialize_accounts_hash_map<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + struct VoteAccountsVisitor; + + impl<'de> Visitor<'de> for VoteAccountsVisitor { + type Value = Arc; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a map of vote accounts") + } + + fn visit_map(self, mut access: M) -> Result + where + M: MapAccess<'de>, + { + let mut accounts = HashMap::new(); + + while let Some((pubkey, (stake, account))) = + access.next_entry::()? + { + match VoteAccount::try_from(account) { + Ok(vote_account) => { + accounts.insert(pubkey, (stake, vote_account)); + } + Err(e) => { + log::warn!("failed to deserialize vote account: {e}"); + } + } + } + + Ok(Arc::new(accounts)) + } } + + deserializer.deserialize_map(VoteAccountsVisitor) } #[cfg(test)] @@ -441,12 +499,10 @@ mod tests { .into_iter() .filter(|(_, (stake, _))| *stake != 0) { - if let Some(node_pubkey) = vote_account.node_pubkey() { - staked_nodes - .entry(*node_pubkey) - .and_modify(|s| *s += *stake) - .or_insert(*stake); - } + staked_nodes + .entry(*vote_account.node_pubkey()) + .and_modify(|s| *s += *stake) + .or_insert(*stake); } staked_nodes } @@ -458,9 +514,7 @@ mod tests { let lamports = account.lamports(); let vote_account = VoteAccount::try_from(account).unwrap(); assert_eq!(lamports, vote_account.lamports()); - assert_eq!(vote_state, *vote_account.vote_state().unwrap()); - // 2nd call to .vote_state() should return the cached value. - assert_eq!(vote_state, *vote_account.vote_state().unwrap()); + assert_eq!(vote_state, *vote_account.vote_state()); } #[test] @@ -468,39 +522,14 @@ mod tests { let mut rng = rand::thread_rng(); let (account, vote_state) = new_rand_vote_account(&mut rng, None); let vote_account = VoteAccount::try_from(account.clone()).unwrap(); - assert_eq!(vote_state, *vote_account.vote_state().unwrap()); - // Assert than VoteAccount has the same wire format as Account. + assert_eq!(vote_state, *vote_account.vote_state()); + // Assert that VoteAccount has the same wire format as Account. assert_eq!( bincode::serialize(&account).unwrap(), bincode::serialize(&vote_account).unwrap() ); } - #[test] - fn test_vote_account_deserialize() { - let mut rng = rand::thread_rng(); - let (account, vote_state) = new_rand_vote_account(&mut rng, None); - let data = bincode::serialize(&account).unwrap(); - let vote_account = VoteAccount::try_from(account).unwrap(); - assert_eq!(vote_state, *vote_account.vote_state().unwrap()); - let other_vote_account: VoteAccount = bincode::deserialize(&data).unwrap(); - assert_eq!(vote_account, other_vote_account); - assert_eq!(vote_state, *other_vote_account.vote_state().unwrap()); - } - - #[test] - fn test_vote_account_round_trip() { - let mut rng = rand::thread_rng(); - let (account, vote_state) = new_rand_vote_account(&mut rng, None); - let vote_account = VoteAccount::try_from(account).unwrap(); - assert_eq!(vote_state, *vote_account.vote_state().unwrap()); - let data = bincode::serialize(&vote_account).unwrap(); - let other_vote_account: VoteAccount = bincode::deserialize(&data).unwrap(); - // Assert that serialize->deserialized returns the same VoteAccount. - assert_eq!(vote_account, other_vote_account); - assert_eq!(vote_state, *other_vote_account.vote_state().unwrap()); - } - #[test] fn test_vote_accounts_serialize() { let mut rng = rand::thread_rng(); @@ -536,6 +565,40 @@ mod tests { assert_eq!(*vote_accounts.vote_accounts, vote_accounts_hash_map); } + #[test] + fn test_vote_accounts_deserialize_invalid_account() { + let mut rng = rand::thread_rng(); + // we'll populate the map with 1 valid and 2 invalid accounts, then ensure that we only get + // the valid one after deserialiation + let mut vote_accounts_hash_map = HashMap::::new(); + + let (valid_account, _) = new_rand_vote_account(&mut rng, None); + vote_accounts_hash_map.insert(Pubkey::new_unique(), (0xAA, valid_account.clone())); + + // bad data + let invalid_account_data = + AccountSharedData::new_data(42, &vec![0xFF; 42], &solana_sdk::vote::program::id()) + .unwrap(); + vote_accounts_hash_map.insert(Pubkey::new_unique(), (0xBB, invalid_account_data)); + + // wrong owner + let invalid_account_key = + AccountSharedData::new_data(42, &valid_account.data().to_vec(), &Pubkey::new_unique()) + .unwrap(); + vote_accounts_hash_map.insert(Pubkey::new_unique(), (0xCC, invalid_account_key)); + + let data = bincode::serialize(&vote_accounts_hash_map).unwrap(); + let options = bincode::options() + .with_fixint_encoding() + .allow_trailing_bytes(); + let mut deserializer = bincode::de::Deserializer::from_slice(&data, options); + let vote_accounts = deserialize_accounts_hash_map(&mut deserializer).unwrap(); + + assert_eq!(vote_accounts.len(), 1); + let (stake, _account) = vote_accounts.values().next().unwrap(); + assert_eq!(*stake, 0xAA); + } + #[test] fn test_staked_nodes() { let mut rng = rand::thread_rng(); @@ -684,7 +747,7 @@ mod tests { let staked_nodes = vote_accounts.staked_nodes(); let (pubkey, (more_stake, vote_account)) = accounts.find(|(_, (stake, _))| *stake != 0).unwrap(); - let node_pubkey = *vote_account.node_pubkey().unwrap(); + let node_pubkey = *vote_account.node_pubkey(); vote_accounts.insert(pubkey, vote_account, || more_stake); assert_ne!(staked_nodes, vote_accounts.staked_nodes()); assert_eq!( From c842b0d88a7c220039cfabe1fe871dcdd597aa5a Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Sat, 24 Aug 2024 17:38:34 -0700 Subject: [PATCH 213/529] =?UTF-8?q?wen=5Frestart:=20Make=20validator=20res?= =?UTF-8?q?tart=20and=20wait=20for=20supermajority=20after=20=E2=80=A6=20(?= =?UTF-8?q?#1335)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * wen_restart: Make validator restart and wait for supermajority after Phase One completes. * Fix cargo sort error. * Adding two unittests. * Change function name and return type. * Don't change wait_for_supermajority, try to re-initialzie validator after wen_restart phase one. * Fix a bad merge and fix test. * Make validator exit if wen_restart is finished. * Remove unnecessary dependencies from Cargo.toml * Remove unused function. * Fix tests to check OK() is returned when in DONE state. * Add wen_restart_proto_path if specified in command line. * Fix command line name for wen_restart. * Log how much stake is required to exit. * Don't double count my own RestartLastVotedForkSlots. * Ignore HeaviestFork from myself. * Also send out HeaviestForkAggregate if I saw supermajority for the first time. * Forbid replay if wen_restart is in progress. * We still need the new bank part. * Try not grabbing a big write lock on bankforks. * Should read GenerateSnapshot in initialize as well. * Skip all replay stages while in wen_restart. * Root banks if necessary to send EAH request. * Root banks every 100 slot. * Do not start replay thread if in wen_restart. * Do not need to check in_wen_restart inside replay_stage any more. * Fix failed merge. * Make linter happy. * Fix the bad merge after switching to anyhow. * Remove unused code. * Fix bad merge. * Remove unnecessary clone. * Remove unused map_error. * No need to specify snapshot_path in restart commandline. * Small fixes. * Split the enabling of wen_restart into another PR. --- validator/src/main.rs | 22 +++-- wen-restart/src/wen_restart.rs | 154 ++++++++++++++++++++++++++++++--- 2 files changed, 156 insertions(+), 20 deletions(-) diff --git a/validator/src/main.rs b/validator/src/main.rs index 349f01ecace8d7..87b6a44cd9666e 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -34,7 +34,7 @@ use { tpu::DEFAULT_TPU_COALESCE, validator::{ is_snapshot_config_valid, BlockProductionMethod, BlockVerificationMethod, Validator, - ValidatorConfig, ValidatorStartProgress, + ValidatorConfig, ValidatorError, ValidatorStartProgress, }, }, solana_gossip::{ @@ -2045,7 +2045,7 @@ pub fn main() { // the one pushed by bootstrap. node.info.hot_swap_pubkey(identity_keypair.pubkey()); - let validator = Validator::new( + let validator = match Validator::new( node, identity_keypair, &ledger_path, @@ -2062,11 +2062,19 @@ pub fn main() { tpu_enable_udp, tpu_max_connections_per_ipaddr_per_minute, admin_service_post_init, - ) - .unwrap_or_else(|e| { - error!("Failed to start validator: {:?}", e); - exit(1); - }); + ) { + Ok(validator) => validator, + Err(err) => match err.downcast_ref() { + Some(ValidatorError::WenRestartFinished) => { + error!("Please remove --wen_restart and use --wait_for_supermajority as instructed above"); + exit(200); + } + _ => { + error!("Failed to start validator: {:?}", err); + exit(1); + } + }, + }; if let Some(filename) = init_complete_file { File::create(filename).unwrap_or_else(|_| { diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 0fa7ec1cb65f3e..85f54779a854f2 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -88,6 +88,7 @@ pub enum WenRestartError { MalformedProgress(RestartState, String), MissingLastVotedForkSlots, MissingFullSnapshot(String), + MissingSnapshotInProtobuf, NotEnoughStakeAgreeingWithUs(Slot, Hash, HashMap<(Slot, Hash), u64>), UnexpectedState(wen_restart_proto::State), } @@ -148,6 +149,9 @@ impl std::fmt::Display for WenRestartError { WenRestartError::MissingFullSnapshot(directory) => { write!(f, "Missing full snapshot, please check whether correct directory is supplied {directory}") } + WenRestartError::MissingSnapshotInProtobuf => { + write!(f, "Missing snapshot in protobuf") + } WenRestartError::NotEnoughStakeAgreeingWithUs(slot, hash, block_stake_map) => { write!( f, @@ -188,7 +192,11 @@ pub(crate) enum WenRestartProgressInternalState { new_root_slot: Slot, my_snapshot: Option, }, - Done, + Done { + slot: Slot, + hash: Hash, + shred_version: u16, + }, } pub(crate) fn send_restart_last_voted_fork_slots( @@ -719,9 +727,10 @@ pub(crate) fn aggregate_restart_heaviest_fork( let total_active_stake_seen_supermajority = heaviest_fork_aggregate.total_active_stake_seen_supermajority(); info!( - "Total active stake seeing supermajority: {} Total active stake: {} Total stake {}", + "Total active stake seeing supermajority: {} Total active stake: {} Required to exit {} Total stake {}", total_active_stake_seen_supermajority, heaviest_fork_aggregate.total_active_stake(), + majority_stake_required, total_stake ); let can_exit = total_active_stake_seen_supermajority >= majority_stake_required; @@ -833,6 +842,7 @@ pub(crate) fn aggregate_restart_heaviest_fork( } } +#[derive(Clone)] pub struct WenRestartConfig { pub wen_restart_path: PathBuf, pub last_vote: VoteTransaction, @@ -953,7 +963,20 @@ pub fn wait_for_wen_restart(config: WenRestartConfig) -> Result<()> { my_snapshot: Some(snapshot_record), } } - WenRestartProgressInternalState::Done => return Ok(()), + // Proceed to restart if we are ready to wait for supermajority. + WenRestartProgressInternalState::Done { + slot, + hash, + shred_version, + } => { + error!( + "Wen start finished, please remove --wen_restart and restart with \ + --wait-for-supermajority {} --expected-bank-hash {} --shred-version {}\ + --hard-fork {} --no-snapshot-fetchsnapshot", + slot, hash, shred_version, slot + ); + return Ok(()); + } }; state = increment_and_write_wen_restart_records( &config.wen_restart_path, @@ -1038,12 +1061,16 @@ pub(crate) fn increment_and_write_wen_restart_records( if let Some(my_snapshot) = my_snapshot { progress.set_state(RestartState::Done); progress.my_snapshot = Some(my_snapshot.clone()); - WenRestartProgressInternalState::Done + WenRestartProgressInternalState::Done { + slot: my_snapshot.slot, + hash: Hash::from_str(&my_snapshot.bankhash).unwrap(), + shred_version: my_snapshot.shred_version as u16, + } } else { - return Err(WenRestartError::UnexpectedState(RestartState::Done).into()); + return Err(WenRestartError::MissingSnapshotInProtobuf.into()); } } - WenRestartProgressInternalState::Done => { + WenRestartProgressInternalState::Done { .. } => { return Err(WenRestartError::UnexpectedState(RestartState::Done).into()) } }; @@ -1077,7 +1104,20 @@ pub(crate) fn initialize( } }; match progress.state() { - RestartState::Done => Ok((WenRestartProgressInternalState::Done, progress)), + RestartState::Done => { + if let Some(my_snapshot) = progress.my_snapshot.as_ref() { + Ok(( + WenRestartProgressInternalState::Done { + slot: my_snapshot.slot, + hash: Hash::from_str(&my_snapshot.bankhash).unwrap(), + shred_version: my_snapshot.shred_version as u16, + }, + progress, + )) + } else { + Err(WenRestartError::MissingSnapshotInProtobuf.into()) + } + } RestartState::Init => { let last_voted_fork_slots; let last_vote_bankhash; @@ -1515,7 +1555,6 @@ mod tests { #[test] fn test_wen_restart_normal_flow() { - solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let wen_restart_repair_slots = Some(Arc::new(RwLock::new(Vec::new()))); let test_state = wen_restart_test_init(&ledger_path); @@ -2219,8 +2258,29 @@ mod tests { progress, ) ); + let last_vote_slot = test_state.last_voted_fork_slots[0]; + let snapshot_slot_hash = Hash::new_unique(); let progress = WenRestartProgress { state: RestartState::Done.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + my_heaviest_fork: Some(HeaviestForkRecord { + slot: last_vote_slot, + bankhash: snapshot_slot_hash.to_string(), + total_active_stake: 0, + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + my_snapshot: Some(GenerateSnapshotRecord { + slot: last_vote_slot, + bankhash: snapshot_slot_hash.to_string(), + shred_version: SHRED_VERSION as u32, + path: "/path/to/snapshot".to_string(), + }), ..Default::default() }; assert!(write_wen_restart_records(&test_state.wen_restart_proto_path, &progress,).is_ok()); @@ -2231,7 +2291,14 @@ mod tests { test_state.blockstore.clone() ) .unwrap(), - (WenRestartProgressInternalState::Done, progress) + ( + WenRestartProgressInternalState::Done { + slot: last_vote_slot, + hash: snapshot_slot_hash, + shred_version: SHRED_VERSION, + }, + progress + ) ); } @@ -2500,11 +2567,13 @@ mod tests { shred_version: SHRED_VERSION as u32, wallclock: 0, }); + let my_bankhash = Hash::new_unique(); + let new_shred_version = SHRED_VERSION + 57; let my_snapshot = Some(GenerateSnapshotRecord { slot: 1, - bankhash: Hash::new_unique().to_string(), + bankhash: my_bankhash.to_string(), path: "snapshot_1".to_string(), - shred_version: SHRED_VERSION as u32, + shred_version: new_shred_version as u32, }); let heaviest_fork_aggregate = Some(HeaviestForkAggregateRecord { received: HashMap::new(), @@ -2637,7 +2706,11 @@ mod tests { new_root_slot: 1, my_snapshot: my_snapshot.clone(), }, - WenRestartProgressInternalState::Done, + WenRestartProgressInternalState::Done { + slot: 1, + hash: my_bankhash, + shred_version: new_shred_version, + }, WenRestartProgress { state: RestartState::HeaviestFork.into(), my_last_voted_fork_slots: my_last_voted_fork_slots.clone(), @@ -2675,7 +2748,11 @@ mod tests { assert_eq!( increment_and_write_wen_restart_records( &wen_restart_proto_path, - WenRestartProgressInternalState::Done, + WenRestartProgressInternalState::Done { + slot: 1, + hash: my_bankhash, + shred_version: new_shred_version, + }, &mut progress ) .unwrap_err() @@ -3242,4 +3319,55 @@ mod tests { WenRestartError::BlockNotFound(empty_slot), ); } + + #[test] + fn test_return_ok_after_wait_is_done() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let test_state = wen_restart_test_init(&ledger_path); + let last_vote_slot = test_state.last_voted_fork_slots[0]; + let last_vote_bankhash = Hash::new_unique(); + let config = WenRestartConfig { + wen_restart_path: test_state.wen_restart_proto_path.clone(), + last_vote: VoteTransaction::from(Vote::new(vec![last_vote_slot], last_vote_bankhash)), + blockstore: test_state.blockstore.clone(), + cluster_info: test_state.cluster_info.clone(), + bank_forks: test_state.bank_forks.clone(), + wen_restart_repair_slots: Some(Arc::new(RwLock::new(Vec::new()))), + wait_for_supermajority_threshold_percent: 80, + snapshot_config: SnapshotConfig::default(), + accounts_background_request_sender: AbsRequestSender::default(), + genesis_config_hash: test_state.genesis_config_hash, + exit: Arc::new(AtomicBool::new(false)), + }; + assert!(write_wen_restart_records( + &test_state.wen_restart_proto_path, + &WenRestartProgress { + state: RestartState::Done.into(), + ..Default::default() + } + ) + .is_ok()); + assert_eq!( + wait_for_wen_restart(config.clone()) + .unwrap_err() + .downcast::() + .unwrap(), + WenRestartError::MissingSnapshotInProtobuf + ); + assert!(write_wen_restart_records( + &test_state.wen_restart_proto_path, + &WenRestartProgress { + state: RestartState::Done.into(), + my_snapshot: Some(GenerateSnapshotRecord { + slot: 0, + bankhash: Hash::new_unique().to_string(), + shred_version: SHRED_VERSION as u32, + path: "snapshot".to_string(), + }), + ..Default::default() + } + ) + .is_ok()); + assert!(wait_for_wen_restart(config).is_ok()); + } } From c8a839aeffa76b0b3325d642ee38bba2595170b4 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Sat, 24 Aug 2024 19:32:11 -0700 Subject: [PATCH 214/529] Enable wen restart from command line and update comments. (#2640) Enable wen_restart from command line and update comments. --- validator/src/cli.rs | 19 ++++++++++--------- validator/src/main.rs | 6 ++++++ 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 9f4276d8d67ee0..38410aaeabd7ab 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1580,7 +1580,9 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .required(false) .conflicts_with("wait_for_supermajority") .help( - "When specified, the validator will enter Wen Restart mode which \ + "Only used during coordinated cluster restarts.\ + \n\n\ + When specified, the validator will enter Wen Restart mode which \ pauses normal activity. Validators in this mode will gossip their last \ vote to reach consensus on a safe restart slot and repair all blocks \ on the selected fork. The safe slot will be a descendant of the latest \ @@ -1588,16 +1590,15 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { optimistically confirmed slots. \ \n\n\ The progress in this mode will be saved in the file location provided. \ - If consensus is reached, the validator will automatically exit and then \ - execute wait_for_supermajority logic so the cluster will resume execution. \ - The progress file will be kept around for future debugging. \ - \n\n\ - After the cluster resumes normal operation, the validator arguments can \ - be adjusted to remove --wen_restart and update expected_shred_version to \ - the new shred_version agreed on in the consensus. \ + If consensus is reached, the validator will automatically exit with 200 \ + status code. Then the operators are expected to restart the validator \ + with --wait_for_supermajority and other arguments (including new shred_version, \ + supermajority slot, and bankhash) given in the error log before the exit so \ + the cluster will resume execution. The progress file will be kept around \ + for future debugging. \ \n\n\ If wen_restart fails, refer to the progress file (in proto3 format) for \ - further debugging.", + further debugging and watch the discord channel for instructions.", ), ) .args(&thread_args(&default_args.thread_args)) diff --git a/validator/src/main.rs b/validator/src/main.rs index 87b6a44cd9666e..c61b0f6d3ec87e 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1523,6 +1523,7 @@ pub fn main() { replay_transactions_threads, delay_leader_block_for_pending_fork: matches .is_present("delay_leader_block_for_pending_fork"), + wen_restart_proto_path: value_t!(matches, "wen_restart", PathBuf).ok(), ..ValidatorConfig::default() }; @@ -1965,6 +1966,11 @@ pub fn main() { let mut node = Node::new_with_external_ip(&identity_keypair.pubkey(), node_config); if restricted_repair_only_mode { + if validator_config.wen_restart_proto_path.is_some() { + error!("--restricted-repair-only-mode is not compatible with --wen_restart"); + exit(1); + } + // When in --restricted_repair_only_mode is enabled only the gossip and repair ports // need to be reachable by the entrypoint to respond to gossip pull requests and repair // requests initiated by the node. All other ports are unused. From 45768bf380d921a5e69e2d53134e00b28b671327 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 17:41:53 +0800 Subject: [PATCH 215/529] build(deps): bump serde_json from 1.0.125 to 1.0.127 (#2737) * build(deps): bump serde_json from 1.0.125 to 1.0.127 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.125 to 1.0.127. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/1.0.125...1.0.127) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e42722486c837d..17dee6dbc1dd69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5103,9 +5103,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.125" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ "itoa", "memchr", diff --git a/Cargo.toml b/Cargo.toml index ae3e93fd024d12..ccf6d769f2e23b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -333,7 +333,7 @@ seqlock = "0.2.0" serde = "1.0.208" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" serde_derive = "1.0.208" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 -serde_json = "1.0.125" +serde_json = "1.0.127" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index db1cdcc0de8144..00d5545e1c9056 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4263,9 +4263,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.125" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ "itoa", "memchr", From a1f1e9c9e0ca6da0a16cdc7e253f8f1efa3c0ce1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 17:42:06 +0800 Subject: [PATCH 216/529] build(deps): bump syn from 2.0.75 to 2.0.76 (#2739) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.75 to 2.0.76. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.75...2.0.76) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 68 +++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17dee6dbc1dd69..b5dddc63359e17 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -724,7 +724,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -877,7 +877,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1034,7 +1034,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "syn_derive", ] @@ -1166,7 +1166,7 @@ checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1777,7 +1777,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1788,7 +1788,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1850,7 +1850,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1974,7 +1974,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -2080,7 +2080,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -2350,7 +2350,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -3676,7 +3676,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -3749,7 +3749,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -4374,7 +4374,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -5098,7 +5098,7 @@ checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -5153,7 +5153,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -5203,7 +5203,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -6426,7 +6426,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -6852,7 +6852,7 @@ version = "2.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "toml 0.8.12", ] @@ -7492,7 +7492,7 @@ dependencies = [ "bs58", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -8348,7 +8348,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -8360,7 +8360,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.75", + "syn 2.0.76", "thiserror", ] @@ -8419,7 +8419,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -8607,9 +8607,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.75" +version = "2.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" +checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525" dependencies = [ "proc-macro2", "quote", @@ -8625,7 +8625,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -8811,7 +8811,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -8823,7 +8823,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "test-case-core", ] @@ -8859,7 +8859,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -8996,7 +8996,7 @@ source = "git+https://github.com/anza-xyz/solana-tokio.git?rev=7cf47705faacf7bf0 dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -9240,7 +9240,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -9550,7 +9550,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "wasm-bindgen-shared", ] @@ -9584,7 +9584,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9943,7 +9943,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -9963,7 +9963,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] From ae6285ffbfbd737c29ddf2464796b0d90adf3119 Mon Sep 17 00:00:00 2001 From: Jon C Date: Mon, 26 Aug 2024 13:19:30 +0200 Subject: [PATCH 217/529] cli: Use simulated compute units in nonce interactions (#2695) * cli: Use simulated compute unit limit for nonces #### Problem The CLI can simulate to get the compute budget used by a transaction, but nonce interactions are still using the default compute unit limit. #### Summary of changes Refactor the tests into `test_case`s, add tests for setting a compute unit price, and then change compute unit limit to `Simulated`. * Add compute unit price test case * Change to using simulated compute units everywhere * Run simulations where it isn't done normally * Fix clippy issues --- cli/src/nonce.rs | 26 ++++++++++++++--------- cli/tests/nonce.rs | 52 ++++++++++++---------------------------------- 2 files changed, 29 insertions(+), 49 deletions(-) diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index 36ee8cc89e80fd..708ce2b677afdc 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -5,7 +5,9 @@ use { log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, }, - compute_budget::{ComputeUnitConfig, WithComputeUnitConfig}, + compute_budget::{ + simulate_and_update_compute_unit_limit, ComputeUnitConfig, WithComputeUnitConfig, + }, memo::WithMemo, spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, }, @@ -421,9 +423,10 @@ pub fn process_authorize_nonce_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit: ComputeUnitLimit::Simulated, }); - let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + let mut message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + simulate_and_update_compute_unit_limit(rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, latest_blockhash)?; @@ -469,7 +472,7 @@ pub fn process_create_nonce_account( let nonce_authority = nonce_authority.unwrap_or_else(|| config.signers[0].pubkey()); - let compute_unit_limit = ComputeUnitLimit::Default; + let compute_unit_limit = ComputeUnitLimit::Simulated; let build_message = |lamports| { let ixs = if let Some(seed) = seed.clone() { create_nonce_account_with_seed( @@ -579,10 +582,11 @@ pub fn process_new_nonce( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit: ComputeUnitLimit::Simulated, }); let latest_blockhash = rpc_client.get_latest_blockhash()?; - let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + let mut message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + simulate_and_update_compute_unit_limit(rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, latest_blockhash)?; check_account_for_fee_with_commitment( @@ -648,9 +652,10 @@ pub fn process_withdraw_from_nonce_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit: ComputeUnitLimit::Simulated, }); - let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + let mut message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + simulate_and_update_compute_unit_limit(rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, latest_blockhash)?; check_account_for_fee_with_commitment( @@ -676,9 +681,10 @@ pub(crate) fn process_upgrade_nonce_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit: ComputeUnitLimit::Simulated, }); - let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + let mut message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + simulate_and_update_compute_unit_limit(rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, latest_blockhash)?; check_account_for_fee_with_commitment( diff --git a/cli/tests/nonce.rs b/cli/tests/nonce.rs index ff305e3954e46f..a9edd06656efd6 100644 --- a/cli/tests/nonce.rs +++ b/cli/tests/nonce.rs @@ -20,46 +20,20 @@ use { }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidator, + test_case::test_case, }; -#[test] -fn test_nonce() { - let mint_keypair = Keypair::new(); - let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = - TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); - - full_battery_tests(test_validator, None, false); -} - -#[test] -fn test_nonce_with_seed() { - let mint_keypair = Keypair::new(); - let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); - let test_validator = - TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); - - full_battery_tests(test_validator, Some(String::from("seed")), false); -} - -#[test] -fn test_nonce_with_authority() { +#[test_case(None, false, None; "base")] +#[test_case(Some(String::from("seed")), false, None; "with_seed")] +#[test_case(None, true, None; "with_authority")] +#[test_case(None, false, Some(1_000_000); "with_compute_unit_price")] +fn test_nonce(seed: Option, use_nonce_authority: bool, compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); - full_battery_tests(test_validator, None, true); -} - -fn full_battery_tests( - test_validator: TestValidator, - seed: Option, - use_nonce_authority: bool, -) { let rpc_client = RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); let json_rpc_url = test_validator.rpc_url(); @@ -113,7 +87,7 @@ fn full_battery_tests( nonce_authority: optional_authority, memo: None, amount: SpendAmount::Some(sol_to_lamports(1000.0)), - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); @@ -151,7 +125,7 @@ fn full_battery_tests( nonce_account, nonce_authority: index, memo: None, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); @@ -172,7 +146,7 @@ fn full_battery_tests( memo: None, destination_account_pubkey: payee_pubkey, lamports: sol_to_lamports(100.0), - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); check_balance!( @@ -197,7 +171,7 @@ fn full_battery_tests( nonce_authority: index, memo: None, new_authority: new_authority.pubkey(), - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); @@ -206,7 +180,7 @@ fn full_battery_tests( nonce_account, nonce_authority: index, memo: None, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap_err(); @@ -216,7 +190,7 @@ fn full_battery_tests( nonce_account, nonce_authority: 1, memo: None, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); @@ -227,7 +201,7 @@ fn full_battery_tests( memo: None, destination_account_pubkey: payee_pubkey, lamports: sol_to_lamports(100.0), - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); check_balance!( From df892c42418047ade3365c1b3ddcf6c45f95d1f1 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 26 Aug 2024 07:53:27 -0500 Subject: [PATCH 218/529] remove disable_block_production_forwarding cli flag (#2687) --- validator/src/cli.rs | 10 ---------- validator/src/main.rs | 5 +---- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 38410aaeabd7ab..9ebef5a0527db7 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1553,16 +1553,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .possible_values(BlockProductionMethod::cli_names()) .help(BlockProductionMethod::cli_message()), ) - .arg( - Arg::with_name("disable_block_production_forwarding") - .long("disable-block-production-forwarding") - .requires("staked_nodes_overrides") - .takes_value(false) - .help("Disable forwarding of non-vote transactions in block production. \ - By default, forwarding is already disabled, it is enabled by setting \ - \"staked-nodes-overrides\". This flag can be used to disable forwarding \ - even when \"staked-nodes-overrides\" is set."), - ) .arg( Arg::with_name("unified_scheduler_handler_threads") .long("unified-scheduler-handler-threads") diff --git a/validator/src/main.rs b/validator/src/main.rs index c61b0f6d3ec87e..fd98392fc1766d 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1784,10 +1784,7 @@ pub fn main() { BlockProductionMethod ) .unwrap_or_default(); - validator_config.enable_block_production_forwarding = staked_nodes_overrides_path - .as_ref() - .map(|_| !matches.is_present("disable_block_production_forwarding")) - .unwrap_or_default(); + validator_config.enable_block_production_forwarding = staked_nodes_overrides_path.is_some(); validator_config.unified_scheduler_handler_threads = value_t!(matches, "unified_scheduler_handler_threads", usize).ok(); From 71eb1d86c6f62612bcd9e6f2769b19ca8bd4fe4c Mon Sep 17 00:00:00 2001 From: Jon C Date: Mon, 26 Aug 2024 19:47:08 +0200 Subject: [PATCH 219/529] sdk: Add `deserialize_into` to entrypoint to avoid heap allocations (#2618) * sdk: Add `deserialize_into` to entrypoint #### Problem The main entrypoint for Solana programs allocates a vector on the heap and pushes AccountInfos to it. Allocation is expensive. #### Summary of changes Add a new version of `deserialize` called `deserialize_into`, which expects a slice of `MaybeUninit`. The entrypoint can allocate a maximum array of AccountInfos on the stack and then pass it in. This new version of the entrypoint saves roughly 30 CUs per unique account passed to the program. In an earlier version, I had the new function return the array itself, but this used slightly more CUs, and didn't work for an array with 64 elements. Let me know how it looks! * Call instruction processor in non-inlined function * Add test for max supported accounts in a transaction * Refactor extracting account infos and instruction data * Changes required from rebase * Add clippy allow * Add panic message if too many accounts provided * Add `entrypoint_no_alloc!` and revert behavior in entrypoint! * Use entrypoint_no_alloc! everywhere except noop * Comment why noop program works the way it does * Add limit in doc-string * CHANGELOG: Add entry for entrypoint --- CHANGELOG.md | 4 +- program-test/tests/bpf.rs | 53 +++- programs/sbf/rust/call_args/src/lib.rs | 2 +- programs/sbf/rust/caller_access/src/lib.rs | 2 +- programs/sbf/rust/custom_heap/src/lib.rs | 2 +- programs/sbf/rust/dup_accounts/src/lib.rs | 2 +- programs/sbf/rust/error_handling/src/lib.rs | 2 +- programs/sbf/rust/external_spend/src/lib.rs | 2 +- programs/sbf/rust/finalize/src/lib.rs | 2 +- .../rust/get_minimum_delegation/src/lib.rs | 2 +- .../rust/instruction_introspection/src/lib.rs | 2 +- programs/sbf/rust/invoke/src/lib.rs | 2 +- programs/sbf/rust/invoke_and_error/src/lib.rs | 2 +- programs/sbf/rust/invoke_and_ok/src/lib.rs | 2 +- .../sbf/rust/invoke_and_return/src/lib.rs | 2 +- programs/sbf/rust/invoked/src/lib.rs | 2 +- programs/sbf/rust/log_data/src/lib.rs | 2 +- programs/sbf/rust/mem/src/lib.rs | 2 +- programs/sbf/rust/noop/src/lib.rs | 2 + programs/sbf/rust/panic/src/lib.rs | 2 +- programs/sbf/rust/rand/src/lib.rs | 2 +- programs/sbf/rust/realloc/src/lib.rs | 2 +- programs/sbf/rust/realloc_invoke/src/lib.rs | 2 +- .../rust/remaining_compute_units/src/lib.rs | 2 +- .../sbf/rust/ro_account_modify/src/lib.rs | 2 +- programs/sbf/rust/ro_modify/src/lib.rs | 2 +- programs/sbf/rust/sanity/src/lib.rs | 2 +- .../sibling_inner_instructions/src/lib.rs | 2 +- .../sbf/rust/sibling_instructions/src/lib.rs | 2 +- programs/sbf/rust/simulation/src/lib.rs | 2 +- programs/sbf/rust/spoof1/src/lib.rs | 2 +- programs/sbf/rust/spoof1_system/src/lib.rs | 2 +- programs/sbf/rust/sysvar/src/lib.rs | 2 +- programs/sbf/rust/upgradeable/src/lib.rs | 2 +- programs/sbf/rust/upgraded/src/lib.rs | 2 +- sdk/program/src/entrypoint.rs | 262 +++++++++++++----- 36 files changed, 289 insertions(+), 96 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c809f2b78b115..0e07ac3c42c17c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,7 +23,9 @@ Release channels have their own copy of this changelog: * Banks-client: * relax functions to use `&self` instead of `&mut self` (#2591) * Changes - * SDK: removed the `respan` macro. This was marked as "internal use only" and was no longer used internally. + * SDK: + * removed the `respan` macro. This was marked as "internal use only" and was no longer used internally. + * add `entrypoint_no_alloc!`, a more performant program entrypoint that avoids allocations, saving 20-30 CUs per unique account * `agave-validator`: Update PoH speed check to compare against current hash rate from a Bank (#2447) * `solana-test-validator`: Add `--clone-feature-set` flag to mimic features from a target cluster (#2480) * `solana-genesis`: the `--cluster-type` parameter now clones the feature set from the target cluster (#2587) diff --git a/program-test/tests/bpf.rs b/program-test/tests/bpf.rs index d601c6c040ef9f..a211c02df879d8 100644 --- a/program-test/tests/bpf.rs +++ b/program-test/tests/bpf.rs @@ -1,9 +1,13 @@ use { solana_program_test::ProgramTest, solana_sdk::{ - bpf_loader, instruction::Instruction, pubkey::Pubkey, signature::Signer, + bpf_loader, feature_set, + instruction::{AccountMeta, Instruction}, + pubkey::Pubkey, + signature::Signer, transaction::Transaction, }, + test_case::test_case, }; #[tokio::test] @@ -39,3 +43,50 @@ async fn test_add_bpf_program() { .await .unwrap(); } + +#[test_case(64, true, true; "success with 64 accounts and without feature")] +#[test_case(65, true, false; "failure with 65 accounts and without feature")] +#[test_case(128, false, true; "success with 128 accounts and with feature")] +#[test_case(129, false, false; "failure with 129 accounts and with feature")] +#[tokio::test] +async fn test_max_accounts(num_accounts: u8, deactivate_feature: bool, expect_success: bool) { + let program_id = Pubkey::new_unique(); + + let mut program_test = ProgramTest::default(); + + program_test.prefer_bpf(true); + program_test.add_program("noop_program", program_id, None); + if deactivate_feature { + program_test.deactivate_feature(feature_set::increase_tx_account_lock_limit::id()); + } + + let context = program_test.start_with_context().await; + + // Subtract 2 to account for the program and fee payer + let num_extra_accounts = num_accounts.checked_sub(2).unwrap(); + let account_metas = (0..num_extra_accounts) + .map(|_| AccountMeta::new_readonly(Pubkey::new_unique(), false)) + .collect::>(); + let instruction = Instruction::new_with_bytes(program_id, &[], account_metas); + let transaction = Transaction::new_signed_with_payer( + &[instruction], + Some(&context.payer.pubkey()), + &[&context.payer], + context.last_blockhash, + ); + + // Invoke the program. + if expect_success { + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); + } else { + context + .banks_client + .process_transaction(transaction) + .await + .unwrap_err(); + } +} diff --git a/programs/sbf/rust/call_args/src/lib.rs b/programs/sbf/rust/call_args/src/lib.rs index 1925eff36125bb..43d61187110bce 100644 --- a/programs/sbf/rust/call_args/src/lib.rs +++ b/programs/sbf/rust/call_args/src/lib.rs @@ -33,7 +33,7 @@ struct OutputData { many_args_2: i64, } -solana_program::entrypoint!(entry); +solana_program::entrypoint_no_alloc!(entry); pub fn entry(_program_id: &Pubkey, _accounts: &[AccountInfo], data: &[u8]) -> ProgramResult { // This code is supposed to occupy stack space. The purpose of this test is to make sure diff --git a/programs/sbf/rust/caller_access/src/lib.rs b/programs/sbf/rust/caller_access/src/lib.rs index 5b8168c2ac55ec..b1c41ad6e1a9bb 100644 --- a/programs/sbf/rust/caller_access/src/lib.rs +++ b/programs/sbf/rust/caller_access/src/lib.rs @@ -10,7 +10,7 @@ use { std::convert::TryInto, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/custom_heap/src/lib.rs b/programs/sbf/rust/custom_heap/src/lib.rs index 27327da106bf90..9738db15226335 100644 --- a/programs/sbf/rust/custom_heap/src/lib.rs +++ b/programs/sbf/rust/custom_heap/src/lib.rs @@ -55,7 +55,7 @@ unsafe impl std::alloc::GlobalAlloc for BumpAllocator { #[global_allocator] static A: BumpAllocator = BumpAllocator; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); pub fn process_instruction( _program_id: &Pubkey, _accounts: &[AccountInfo], diff --git a/programs/sbf/rust/dup_accounts/src/lib.rs b/programs/sbf/rust/dup_accounts/src/lib.rs index 2dc96fab5a6eab..fb49994a79abf2 100644 --- a/programs/sbf/rust/dup_accounts/src/lib.rs +++ b/programs/sbf/rust/dup_accounts/src/lib.rs @@ -13,7 +13,7 @@ use solana_program::{ pubkey::Pubkey, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/error_handling/src/lib.rs b/programs/sbf/rust/error_handling/src/lib.rs index 75aa233d867d12..b2c1a4d577e1dc 100644 --- a/programs/sbf/rust/error_handling/src/lib.rs +++ b/programs/sbf/rust/error_handling/src/lib.rs @@ -45,7 +45,7 @@ impl PrintProgramError for MyError { } } -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/external_spend/src/lib.rs b/programs/sbf/rust/external_spend/src/lib.rs index aeb88fefc529e7..ed5abce617f93c 100644 --- a/programs/sbf/rust/external_spend/src/lib.rs +++ b/programs/sbf/rust/external_spend/src/lib.rs @@ -5,7 +5,7 @@ extern crate solana_program; use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult, pubkey::Pubkey}; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/finalize/src/lib.rs b/programs/sbf/rust/finalize/src/lib.rs index fce71415165293..6c6b6bb8b61062 100644 --- a/programs/sbf/rust/finalize/src/lib.rs +++ b/programs/sbf/rust/finalize/src/lib.rs @@ -8,7 +8,7 @@ use solana_program::{ program::invoke, pubkey::Pubkey, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/get_minimum_delegation/src/lib.rs b/programs/sbf/rust/get_minimum_delegation/src/lib.rs index 03f4de6f641363..d5df8448a37a3d 100644 --- a/programs/sbf/rust/get_minimum_delegation/src/lib.rs +++ b/programs/sbf/rust/get_minimum_delegation/src/lib.rs @@ -7,7 +7,7 @@ use solana_program::{ account_info::AccountInfo, entrypoint::ProgramResult, msg, pubkey::Pubkey, stake, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, _accounts: &[AccountInfo], diff --git a/programs/sbf/rust/instruction_introspection/src/lib.rs b/programs/sbf/rust/instruction_introspection/src/lib.rs index 502521ea29fc78..ef0763c663aeff 100644 --- a/programs/sbf/rust/instruction_introspection/src/lib.rs +++ b/programs/sbf/rust/instruction_introspection/src/lib.rs @@ -12,7 +12,7 @@ use solana_program::{ sysvar::instructions, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/invoke/src/lib.rs b/programs/sbf/rust/invoke/src/lib.rs index d663f960a01842..182b5dbe7cb945 100644 --- a/programs/sbf/rust/invoke/src/lib.rs +++ b/programs/sbf/rust/invoke/src/lib.rs @@ -65,7 +65,7 @@ fn do_nested_invokes(num_nested_invokes: u64, accounts: &[AccountInfo]) -> Progr Ok(()) } -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction<'a>( program_id: &Pubkey, accounts: &[AccountInfo<'a>], diff --git a/programs/sbf/rust/invoke_and_error/src/lib.rs b/programs/sbf/rust/invoke_and_error/src/lib.rs index ffa82c52ed89f8..86d6114827567d 100644 --- a/programs/sbf/rust/invoke_and_error/src/lib.rs +++ b/programs/sbf/rust/invoke_and_error/src/lib.rs @@ -9,7 +9,7 @@ use solana_program::{ pubkey::Pubkey, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/invoke_and_ok/src/lib.rs b/programs/sbf/rust/invoke_and_ok/src/lib.rs index 39a19cd58a765e..fe624f7f98c3f7 100644 --- a/programs/sbf/rust/invoke_and_ok/src/lib.rs +++ b/programs/sbf/rust/invoke_and_ok/src/lib.rs @@ -9,7 +9,7 @@ use solana_program::{ pubkey::Pubkey, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/invoke_and_return/src/lib.rs b/programs/sbf/rust/invoke_and_return/src/lib.rs index 89d2ad8d12f53d..cdec1102e7c0e3 100644 --- a/programs/sbf/rust/invoke_and_return/src/lib.rs +++ b/programs/sbf/rust/invoke_and_return/src/lib.rs @@ -9,7 +9,7 @@ use solana_program::{ pubkey::Pubkey, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/invoked/src/lib.rs b/programs/sbf/rust/invoked/src/lib.rs index 2260aa29021ac2..1429cd71a7e51b 100644 --- a/programs/sbf/rust/invoked/src/lib.rs +++ b/programs/sbf/rust/invoked/src/lib.rs @@ -17,7 +17,7 @@ use { solana_sbf_rust_invoked_dep::*, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); #[allow(clippy::cognitive_complexity)] fn process_instruction( program_id: &Pubkey, diff --git a/programs/sbf/rust/log_data/src/lib.rs b/programs/sbf/rust/log_data/src/lib.rs index 514fc345f50492..b4c67634e22c87 100644 --- a/programs/sbf/rust/log_data/src/lib.rs +++ b/programs/sbf/rust/log_data/src/lib.rs @@ -5,7 +5,7 @@ use solana_program::{ program::set_return_data, pubkey::Pubkey, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); #[allow(clippy::cognitive_complexity)] fn process_instruction( _program_id: &Pubkey, diff --git a/programs/sbf/rust/mem/src/lib.rs b/programs/sbf/rust/mem/src/lib.rs index 231816e8e2138a..52da6609c88723 100644 --- a/programs/sbf/rust/mem/src/lib.rs +++ b/programs/sbf/rust/mem/src/lib.rs @@ -10,7 +10,7 @@ use { solana_sbf_rust_mem_dep::{run_mem_tests, MemOps}, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); pub fn process_instruction( _program_id: &Pubkey, _accounts: &[AccountInfo], diff --git a/programs/sbf/rust/noop/src/lib.rs b/programs/sbf/rust/noop/src/lib.rs index 250715b981b42f..c3b8f7962bc862 100644 --- a/programs/sbf/rust/noop/src/lib.rs +++ b/programs/sbf/rust/noop/src/lib.rs @@ -3,6 +3,8 @@ extern crate solana_program; use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult, pubkey::Pubkey}; +// This program intentionally uses `entrypoint!` instead of `entrypoint_no_alloc!` +// to handle any number of accounts. solana_program::entrypoint!(process_instruction); fn process_instruction( _program_id: &Pubkey, diff --git a/programs/sbf/rust/panic/src/lib.rs b/programs/sbf/rust/panic/src/lib.rs index 2f859f5d618748..d7b5f99d14ff68 100644 --- a/programs/sbf/rust/panic/src/lib.rs +++ b/programs/sbf/rust/panic/src/lib.rs @@ -11,7 +11,7 @@ fn custom_panic(info: &core::panic::PanicInfo<'_>) { extern crate solana_program; use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult, pubkey::Pubkey}; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, _accounts: &[AccountInfo], diff --git a/programs/sbf/rust/rand/src/lib.rs b/programs/sbf/rust/rand/src/lib.rs index 515b85b29c3685..e4241b67ecaa81 100644 --- a/programs/sbf/rust/rand/src/lib.rs +++ b/programs/sbf/rust/rand/src/lib.rs @@ -5,7 +5,7 @@ extern crate solana_program; use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult, msg, pubkey::Pubkey}; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, _accounts: &[AccountInfo], diff --git a/programs/sbf/rust/realloc/src/lib.rs b/programs/sbf/rust/realloc/src/lib.rs index dd7bbafbcec4ff..8e2bf3f211d6da 100644 --- a/programs/sbf/rust/realloc/src/lib.rs +++ b/programs/sbf/rust/realloc/src/lib.rs @@ -16,7 +16,7 @@ use { std::{convert::TryInto, mem}, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/realloc_invoke/src/lib.rs b/programs/sbf/rust/realloc_invoke/src/lib.rs index 530c62a826125a..7ccda1b2395c05 100644 --- a/programs/sbf/rust/realloc_invoke/src/lib.rs +++ b/programs/sbf/rust/realloc_invoke/src/lib.rs @@ -16,7 +16,7 @@ use { std::convert::TryInto, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/remaining_compute_units/src/lib.rs b/programs/sbf/rust/remaining_compute_units/src/lib.rs index ecf0376397b519..4259799e0add1a 100644 --- a/programs/sbf/rust/remaining_compute_units/src/lib.rs +++ b/programs/sbf/rust/remaining_compute_units/src/lib.rs @@ -5,7 +5,7 @@ use solana_program::{ account_info::AccountInfo, compute_units::sol_remaining_compute_units, entrypoint::ProgramResult, msg, pubkey::Pubkey, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); pub fn process_instruction( _program_id: &Pubkey, _accounts: &[AccountInfo], diff --git a/programs/sbf/rust/ro_account_modify/src/lib.rs b/programs/sbf/rust/ro_account_modify/src/lib.rs index 32a3af00779018..3fca230c183915 100644 --- a/programs/sbf/rust/ro_account_modify/src/lib.rs +++ b/programs/sbf/rust/ro_account_modify/src/lib.rs @@ -14,7 +14,7 @@ const INSTRUCTION_INVOKE_MODIFY: u8 = 1; const INSTRUCTION_MODIFY_INVOKE: u8 = 2; const INSTRUCTION_VERIFY_MODIFIED: u8 = 3; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/ro_modify/src/lib.rs b/programs/sbf/rust/ro_modify/src/lib.rs index daa529f5370f19..7d50b70f646704 100644 --- a/programs/sbf/rust/ro_modify/src/lib.rs +++ b/programs/sbf/rust/ro_modify/src/lib.rs @@ -108,7 +108,7 @@ fn check_preconditions( Ok(()) } -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/sanity/src/lib.rs b/programs/sbf/rust/sanity/src/lib.rs index c0628ca9cd4d8f..cb4cbc6353624c 100644 --- a/programs/sbf/rust/sanity/src/lib.rs +++ b/programs/sbf/rust/sanity/src/lib.rs @@ -36,7 +36,7 @@ fn return_sstruct() -> SStruct { SStruct { x: 1, y: 2, z: 3 } } -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); pub fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/sibling_inner_instructions/src/lib.rs b/programs/sbf/rust/sibling_inner_instructions/src/lib.rs index da70004d505e2c..1aa6e85e80fe90 100644 --- a/programs/sbf/rust/sibling_inner_instructions/src/lib.rs +++ b/programs/sbf/rust/sibling_inner_instructions/src/lib.rs @@ -13,7 +13,7 @@ use solana_program::{ pubkey::Pubkey, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/sibling_instructions/src/lib.rs b/programs/sbf/rust/sibling_instructions/src/lib.rs index 7fd705007bb81e..a1406a9eb65209 100644 --- a/programs/sbf/rust/sibling_instructions/src/lib.rs +++ b/programs/sbf/rust/sibling_instructions/src/lib.rs @@ -12,7 +12,7 @@ use solana_program::{ pubkey::Pubkey, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/simulation/src/lib.rs b/programs/sbf/rust/simulation/src/lib.rs index 843a842ec4081d..8decae36b58811 100644 --- a/programs/sbf/rust/simulation/src/lib.rs +++ b/programs/sbf/rust/simulation/src/lib.rs @@ -15,7 +15,7 @@ use { declare_id!("Sim1jD5C35odT8mzctm8BWnjic8xW5xgeb5MbcbErTo"); -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); pub fn process_instruction( _program_id: &Pubkey, diff --git a/programs/sbf/rust/spoof1/src/lib.rs b/programs/sbf/rust/spoof1/src/lib.rs index 66adae1437437d..c6efd612ef50fc 100644 --- a/programs/sbf/rust/spoof1/src/lib.rs +++ b/programs/sbf/rust/spoof1/src/lib.rs @@ -9,7 +9,7 @@ use solana_program::{ system_program, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/spoof1_system/src/lib.rs b/programs/sbf/rust/spoof1_system/src/lib.rs index b5dc9cb572b0be..b4956414077b56 100644 --- a/programs/sbf/rust/spoof1_system/src/lib.rs +++ b/programs/sbf/rust/spoof1_system/src/lib.rs @@ -2,7 +2,7 @@ use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult, pubkey::Pubkey}; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/sysvar/src/lib.rs b/programs/sbf/rust/sysvar/src/lib.rs index 50f6891d85e3ed..0827c53c6c102d 100644 --- a/programs/sbf/rust/sysvar/src/lib.rs +++ b/programs/sbf/rust/sysvar/src/lib.rs @@ -17,7 +17,7 @@ use solana_program::{ }, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); pub fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/upgradeable/src/lib.rs b/programs/sbf/rust/upgradeable/src/lib.rs index 293655e14bdae5..cc1160f0e277da 100644 --- a/programs/sbf/rust/upgradeable/src/lib.rs +++ b/programs/sbf/rust/upgradeable/src/lib.rs @@ -5,7 +5,7 @@ use solana_program::{ account_info::AccountInfo, entrypoint::ProgramResult, msg, pubkey::Pubkey, sysvar::clock, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/programs/sbf/rust/upgraded/src/lib.rs b/programs/sbf/rust/upgraded/src/lib.rs index fd261d8a8da66f..085f532f28923a 100644 --- a/programs/sbf/rust/upgraded/src/lib.rs +++ b/programs/sbf/rust/upgraded/src/lib.rs @@ -5,7 +5,7 @@ use solana_program::{ account_info::AccountInfo, entrypoint::ProgramResult, msg, pubkey::Pubkey, sysvar::clock, }; -solana_program::entrypoint!(process_instruction); +solana_program::entrypoint_no_alloc!(process_instruction); fn process_instruction( _program_id: &Pubkey, accounts: &[AccountInfo], diff --git a/sdk/program/src/entrypoint.rs b/sdk/program/src/entrypoint.rs index 0be1ec34b48cb7..dbb6d9ab753651 100644 --- a/sdk/program/src/entrypoint.rs +++ b/sdk/program/src/entrypoint.rs @@ -11,7 +11,7 @@ use { std::{ alloc::Layout, cell::RefCell, - mem::size_of, + mem::{size_of, MaybeUninit}, ptr::null_mut, rc::Rc, result::Result as ResultGeneric, @@ -139,6 +139,59 @@ macro_rules! entrypoint { }; } +/// Declare the program entrypoint and set up global handlers. +/// +/// This is similar to the `entrypoint!` macro, except that it does not perform +/// any dynamic allocations, and instead writes the input accounts into a pre- +/// allocated array. +/// +/// This version reduces compute unit usage by 20-30 compute units per unique +/// account in the instruction. It may become the default option in a future +/// release. +/// +/// For more information about how the program entrypoint behaves and what it +/// does, please see the documentation for [`entrypoint!`]. +/// +/// NOTE: This entrypoint has a hard-coded limit of 64 input accounts. +#[macro_export] +macro_rules! entrypoint_no_alloc { + ($process_instruction:ident) => { + /// # Safety + #[no_mangle] + pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64 { + use std::mem::MaybeUninit; + // Clippy complains about this because a `const` with interior + // mutability `RefCell` should use `static` instead to make it + // clear that it can change. + // In our case, however, we want to create an array of `AccountInfo`s, + // and the only way to do it is through a `const` expression, and + // we don't expect to mutate the internals of this `const` type. + #[allow(clippy::declare_interior_mutable_const)] + const UNINIT_ACCOUNT_INFO: MaybeUninit = + MaybeUninit::::uninit(); + const MAX_ACCOUNT_INFOS: usize = 64; + let mut accounts = [UNINIT_ACCOUNT_INFO; MAX_ACCOUNT_INFOS]; + let (program_id, num_accounts, instruction_data) = + unsafe { $crate::entrypoint::deserialize_into(input, &mut accounts) }; + // Use `slice_assume_init_ref` once it's stabilized + let accounts = &*(&accounts[..num_accounts] as *const [MaybeUninit>] + as *const [AccountInfo<'_>]); + + #[inline(never)] + fn call_program(program_id: &Pubkey, accounts: &[AccountInfo], data: &[u8]) -> u64 { + match $process_instruction(program_id, accounts, data) { + Ok(()) => $crate::entrypoint::SUCCESS, + Err(error) => error.into(), + } + } + + call_program(&program_id, accounts, &instruction_data) + } + $crate::custom_heap_default!(); + $crate::custom_panic_default!(); + }; +} + /// Define the default global allocator. /// /// The default global allocator is enabled only if the calling crate has not @@ -265,6 +318,86 @@ pub const MAX_PERMITTED_DATA_INCREASE: usize = 1_024 * 10; /// `assert_eq(std::mem::align_of::(), 8)` is true for BPF but not for some host machines pub const BPF_ALIGN_OF_U128: usize = 8; +#[allow(clippy::arithmetic_side_effects)] +#[inline(always)] // this reduces CU usage +unsafe fn deserialize_instruction_data<'a>(input: *mut u8, mut offset: usize) -> (&'a [u8], usize) { + #[allow(clippy::cast_ptr_alignment)] + let instruction_data_len = *(input.add(offset) as *const u64) as usize; + offset += size_of::(); + + let instruction_data = { from_raw_parts(input.add(offset), instruction_data_len) }; + offset += instruction_data_len; + + (instruction_data, offset) +} + +#[allow(clippy::arithmetic_side_effects)] +#[inline(always)] // this reduces CU usage by half! +unsafe fn deserialize_account_info<'a>( + input: *mut u8, + mut offset: usize, +) -> (AccountInfo<'a>, usize) { + #[allow(clippy::cast_ptr_alignment)] + let is_signer = *(input.add(offset) as *const u8) != 0; + offset += size_of::(); + + #[allow(clippy::cast_ptr_alignment)] + let is_writable = *(input.add(offset) as *const u8) != 0; + offset += size_of::(); + + #[allow(clippy::cast_ptr_alignment)] + let executable = *(input.add(offset) as *const u8) != 0; + offset += size_of::(); + + // The original data length is stored here because these 4 bytes were + // originally only used for padding and served as a good location to + // track the original size of the account data in a compatible way. + let original_data_len_offset = offset; + offset += size_of::(); + + let key: &Pubkey = &*(input.add(offset) as *const Pubkey); + offset += size_of::(); + + let owner: &Pubkey = &*(input.add(offset) as *const Pubkey); + offset += size_of::(); + + #[allow(clippy::cast_ptr_alignment)] + let lamports = Rc::new(RefCell::new(&mut *(input.add(offset) as *mut u64))); + offset += size_of::(); + + #[allow(clippy::cast_ptr_alignment)] + let data_len = *(input.add(offset) as *const u64) as usize; + offset += size_of::(); + + // Store the original data length for detecting invalid reallocations and + // requires that MAX_PERMITTED_DATA_LENGTH fits in a u32 + *(input.add(original_data_len_offset) as *mut u32) = data_len as u32; + + let data = Rc::new(RefCell::new({ + from_raw_parts_mut(input.add(offset), data_len) + })); + offset += data_len + MAX_PERMITTED_DATA_INCREASE; + offset += (offset as *const u8).align_offset(BPF_ALIGN_OF_U128); // padding + + #[allow(clippy::cast_ptr_alignment)] + let rent_epoch = *(input.add(offset) as *const u64); + offset += size_of::(); + + ( + AccountInfo { + key, + is_signer, + is_writable, + lamports, + data, + owner, + executable, + rent_epoch, + }, + offset, + ) +} + /// Deserialize the input arguments /// /// The integer arithmetic in this method is safe when called on a buffer that was @@ -273,7 +406,6 @@ pub const BPF_ALIGN_OF_U128: usize = 8; /// /// # Safety #[allow(clippy::arithmetic_side_effects)] -#[allow(clippy::type_complexity)] pub unsafe fn deserialize<'a>(input: *mut u8) -> (&'a Pubkey, Vec>, &'a [u8]) { let mut offset: usize = 0; @@ -290,62 +422,9 @@ pub unsafe fn deserialize<'a>(input: *mut u8) -> (&'a Pubkey, Vec(); if dup_info == NON_DUP_MARKER { - #[allow(clippy::cast_ptr_alignment)] - let is_signer = *(input.add(offset) as *const u8) != 0; - offset += size_of::(); - - #[allow(clippy::cast_ptr_alignment)] - let is_writable = *(input.add(offset) as *const u8) != 0; - offset += size_of::(); - - #[allow(clippy::cast_ptr_alignment)] - let executable = *(input.add(offset) as *const u8) != 0; - offset += size_of::(); - - // The original data length is stored here because these 4 bytes were - // originally only used for padding and served as a good location to - // track the original size of the account data in a compatible way. - let original_data_len_offset = offset; - offset += size_of::(); - - let key: &Pubkey = &*(input.add(offset) as *const Pubkey); - offset += size_of::(); - - let owner: &Pubkey = &*(input.add(offset) as *const Pubkey); - offset += size_of::(); - - #[allow(clippy::cast_ptr_alignment)] - let lamports = Rc::new(RefCell::new(&mut *(input.add(offset) as *mut u64))); - offset += size_of::(); - - #[allow(clippy::cast_ptr_alignment)] - let data_len = *(input.add(offset) as *const u64) as usize; - offset += size_of::(); - - // Store the original data length for detecting invalid reallocations and - // requires that MAX_PERMITTED_DATA_LENGTH fits in a u32 - *(input.add(original_data_len_offset) as *mut u32) = data_len as u32; - - let data = Rc::new(RefCell::new({ - from_raw_parts_mut(input.add(offset), data_len) - })); - offset += data_len + MAX_PERMITTED_DATA_INCREASE; - offset += (offset as *const u8).align_offset(BPF_ALIGN_OF_U128); // padding - - #[allow(clippy::cast_ptr_alignment)] - let rent_epoch = *(input.add(offset) as *const u64); - offset += size_of::(); - - accounts.push(AccountInfo { - key, - is_signer, - is_writable, - lamports, - data, - owner, - executable, - rent_epoch, - }); + let (account_info, new_offset) = deserialize_account_info(input, offset); + offset = new_offset; + accounts.push(account_info); } else { offset += 7; // padding @@ -356,18 +435,77 @@ pub unsafe fn deserialize<'a>(input: *mut u8) -> (&'a Pubkey, Vec( + input: *mut u8, + accounts: &mut [MaybeUninit>], +) -> (&'a Pubkey, usize, &'a [u8]) { + let mut offset: usize = 0; + + // Number of accounts present + #[allow(clippy::cast_ptr_alignment)] - let instruction_data_len = *(input.add(offset) as *const u64) as usize; + let num_accounts = *(input.add(offset) as *const u64) as usize; offset += size_of::(); - let instruction_data = { from_raw_parts(input.add(offset), instruction_data_len) }; - offset += instruction_data_len; + if num_accounts > accounts.len() { + panic!( + "{} accounts provided, but only {} are supported", + num_accounts, + accounts.len() + ); + } + + // Account Infos + + for i in 0..num_accounts { + let dup_info = *(input.add(offset) as *const u8); + offset += size_of::(); + if dup_info == NON_DUP_MARKER { + let (account_info, new_offset) = deserialize_account_info(input, offset); + offset = new_offset; + accounts[i].write(account_info); + } else { + offset += 7; // padding + + // Duplicate account, clone the original + accounts[i].write(accounts[dup_info as usize].assume_init_ref().clone()); + } + } + + // Instruction data + + let (instruction_data, new_offset) = deserialize_instruction_data(input, offset); + offset = new_offset; // Program Id let program_id: &Pubkey = &*(input.add(offset) as *const Pubkey); - (program_id, accounts, instruction_data) + (program_id, num_accounts, instruction_data) } #[cfg(test)] From 4a777d1288ec7d935db8d8d45c3eff16a4ade0a3 Mon Sep 17 00:00:00 2001 From: Liam Vovk <63673978+vovkman@users.noreply.github.com> Date: Mon, 26 Aug 2024 11:17:57 -0700 Subject: [PATCH 220/529] feat: Allow skip preflight health check (#2278) add config to allow skipping health check in preflight call --- rpc/src/rpc.rs | 29 ++++++++++++++++------------- validator/src/cli.rs | 8 ++++++++ validator/src/main.rs | 1 + 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 0cd32ab3ff84b1..05bd4caa8d6088 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -146,6 +146,7 @@ pub struct JsonRpcConfig { pub enable_extended_tx_metadata_storage: bool, pub faucet_addr: Option, pub health_check_slot_distance: u64, + pub skip_preflight_health_check: bool, pub rpc_bigtable_config: Option, pub max_multiple_accounts: Option, pub account_indexes: AccountSecondaryIndexes, @@ -3695,21 +3696,23 @@ pub mod rpc_full { if !skip_preflight { verify_transaction(&transaction, &preflight_bank.feature_set)?; - match meta.health.check() { - RpcHealthStatus::Ok => (), - RpcHealthStatus::Unknown => { - inc_new_counter_info!("rpc-send-tx_health-unknown", 1); - return Err(RpcCustomError::NodeUnhealthy { - num_slots_behind: None, + if !meta.config.skip_preflight_health_check { + match meta.health.check() { + RpcHealthStatus::Ok => (), + RpcHealthStatus::Unknown => { + inc_new_counter_info!("rpc-send-tx_health-unknown", 1); + return Err(RpcCustomError::NodeUnhealthy { + num_slots_behind: None, + } + .into()); } - .into()); - } - RpcHealthStatus::Behind { num_slots } => { - inc_new_counter_info!("rpc-send-tx_health-behind", 1); - return Err(RpcCustomError::NodeUnhealthy { - num_slots_behind: Some(num_slots), + RpcHealthStatus::Behind { num_slots } => { + inc_new_counter_info!("rpc-send-tx_health-behind", 1); + return Err(RpcCustomError::NodeUnhealthy { + num_slots_behind: Some(num_slots), + } + .into()); } - .into()); } } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 9ebef5a0527db7..60cdc3044748f6 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -286,6 +286,14 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { latest optimistically confirmed slot", ), ) + .arg( + Arg::with_name("skip_preflight_health_check") + .long("skip-preflight-health-check") + .takes_value(false) + .help( + "Skip health check when running a preflight check", + ), + ) .arg( Arg::with_name("rpc_faucet_addr") .long("rpc-faucet-address") diff --git a/validator/src/main.rs b/validator/src/main.rs index fd98392fc1766d..b2fb6a7ac275b9 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1422,6 +1422,7 @@ pub fn main() { "rpc_max_request_body_size", usize )), + skip_preflight_health_check: matches.is_present("skip_preflight_health_check"), }, on_start_geyser_plugin_config_files, rpc_addrs: value_t!(matches, "rpc_port", u16).ok().map(|rpc_port| { From a10cd5548d2e21d10b3e43a52af2684333425f26 Mon Sep 17 00:00:00 2001 From: mjain-jump <150074777+mjain-jump@users.noreply.github.com> Date: Mon, 26 Aug 2024 18:56:50 -0500 Subject: [PATCH 221/529] Tweaks for transaction-level fuzzing (#2744) Tweak register blockhash for tests + add optional feature set parameter into bank for fuzzing --- ledger/src/blockstore_processor.rs | 1 + program-test/src/lib.rs | 1 + runtime/src/bank.rs | 9 ++++++++- runtime/src/bank/partitioned_epoch_rewards/mod.rs | 2 ++ runtime/src/bank/tests.rs | 5 +++++ 5 files changed, 17 insertions(+), 1 deletion(-) diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 98701cf3468209..5273b4601bfe33 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -865,6 +865,7 @@ pub(crate) fn process_blockstore_for_bank_0( None, exit, None, + None, ); let bank0_slot = bank0.slot(); let bank_forks = BankForks::new_rw_arc(bank0); diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index c0f0a32d0ddc7e..e6744366ec4f5d 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -867,6 +867,7 @@ impl ProgramTest { None, Arc::default(), None, + None, ); // Add commonly-used SPL programs as a convenience to the user diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4b6377c0c53e3c..1a2887e3f9c3c9 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -987,6 +987,7 @@ impl Bank { #[allow(unused)] collector_id_for_tests: Option, exit: Arc, #[allow(unused)] genesis_hash: Option, + #[allow(unused)] feature_set: Option, ) -> Self { let accounts_db = AccountsDb::new_with_config( paths, @@ -1005,6 +1006,11 @@ impl Bank { bank.transaction_debug_keys = debug_keys; bank.cluster_type = Some(genesis_config.cluster_type); + #[cfg(feature = "dev-context-only-utils")] + { + bank.feature_set = Arc::new(feature_set.unwrap_or_default()); + } + #[cfg(not(feature = "dev-context-only-utils"))] bank.process_genesis_config(genesis_config); #[cfg(feature = "dev-context-only-utils")] @@ -3157,7 +3163,6 @@ impl Bank { w_blockhash_queue .register_hash(blockhash, self.fee_rate_governor.lamports_per_signature); } - self.update_recent_blockhashes_locked(&w_blockhash_queue); } /// Tell the bank which Entry IDs exist on the ledger. This function assumes subsequent calls @@ -6631,6 +6636,7 @@ impl Bank { Some(Pubkey::new_unique()), Arc::default(), None, + None, ) } @@ -6655,6 +6661,7 @@ impl Bank { Some(Pubkey::new_unique()), Arc::default(), None, + None, ) } diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index f8ad09e06f66ec..f4d439d7a9ea43 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -380,6 +380,7 @@ mod tests { Some(Pubkey::new_unique()), Arc::default(), None, + None, ); // Fill bank_forks with banks with votes landing in the next slot @@ -488,6 +489,7 @@ mod tests { Some(Pubkey::new_unique()), Arc::default(), None, + None, ); let stake_account_stores_per_block = diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index dc8175cebd9550..34be9d6aab66a8 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -9010,6 +9010,7 @@ fn test_epoch_schedule_from_genesis_config() { None, Arc::default(), None, + None, )); assert_eq!(bank.epoch_schedule(), &genesis_config.epoch_schedule); @@ -9041,6 +9042,7 @@ where None, Arc::default(), None, + None, )); let vote_and_stake_accounts = load_vote_and_stake_accounts(&bank).vote_with_stake_delegations_map; @@ -12645,6 +12647,7 @@ fn test_rehash_with_skipped_rewrites() { Some(Pubkey::new_unique()), Arc::new(AtomicBool::new(false)), None, + None, )); // This test is only meaningful while the bank hash contains rewrites. // Once this feature is enabled, it may be possible to remove this test entirely. @@ -12707,6 +12710,7 @@ fn test_rebuild_skipped_rewrites() { Some(Pubkey::new_unique()), Arc::new(AtomicBool::new(false)), None, + None, )); // This test is only meaningful while the bank hash contains rewrites. // Once this feature is enabled, it may be possible to remove this test entirely. @@ -12818,6 +12822,7 @@ fn test_get_accounts_for_bank_hash_details(skip_rewrites: bool) { Some(Pubkey::new_unique()), Arc::new(AtomicBool::new(false)), None, + None, )); // This test is only meaningful while the bank hash contains rewrites. // Once this feature is enabled, it may be possible to remove this test entirely. From 552771ff0305e1deeb3506bdd9a01b5872a2c8a1 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 27 Aug 2024 11:00:38 +0800 Subject: [PATCH 222/529] metrics: remove dropped-vote-slot (#2715) --- programs/vote/src/vote_state/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index d9485c47d10384..3b5f1a5210519a 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -499,7 +499,6 @@ fn check_slots_are_valid( "{} dropped vote slots {:?} failed to match slot hashes: {:?}", vote_state.node_pubkey, vote_slots, slot_hashes, ); - inc_new_counter_info!("dropped-vote-slot", 1); return Err(VoteError::SlotsMismatch); } if &slot_hashes[j].1 != vote_hash { From 991d0b764503fc3304662becda3e2d270f49dd43 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 27 Aug 2024 11:00:46 +0800 Subject: [PATCH 223/529] metrics: remove dropped-vote-hash (#2714) --- programs/vote/src/vote_state/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 3b5f1a5210519a..9c709ab0885fd2 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -509,7 +509,6 @@ fn check_slots_are_valid( "{} dropped vote slots {:?} failed to match hash {} {}", vote_state.node_pubkey, vote_slots, vote_hash, slot_hashes[j].1 ); - inc_new_counter_info!("dropped-vote-hash", 1); return Err(VoteError::SlotHashMismatch); } Ok(()) From eaca00155696d1e9f81fdefbaec8f5679eb20c66 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 03:40:59 +0000 Subject: [PATCH 224/529] build(deps): bump serde from 1.0.208 to 1.0.209 (#2738) * build(deps): bump serde from 1.0.208 to 1.0.209 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.208 to 1.0.209. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.208...v1.0.209) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files * sync --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: yihau --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5dddc63359e17..79484babb78665 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5074,9 +5074,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" dependencies = [ "serde_derive", ] @@ -5092,9 +5092,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index ccf6d769f2e23b..37c2dbf9b6e504 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -330,9 +330,9 @@ rustls = { version = "0.21.12", default-features = false, features = ["quic"] } scopeguard = "1.2.0" semver = "1.0.23" seqlock = "0.2.0" -serde = "1.0.208" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde = "1.0.209" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" -serde_derive = "1.0.208" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_derive = "1.0.209" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.127" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 00d5545e1c9056..9fc1e187c9a7ef 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4234,9 +4234,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" dependencies = [ "serde_derive", ] @@ -4252,9 +4252,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2", "quote", From 164d37c526ba46cffed765416344013e2c1807c7 Mon Sep 17 00:00:00 2001 From: Jon C Date: Tue, 27 Aug 2024 11:28:55 +0200 Subject: [PATCH 225/529] cli: Use simulated compute units in ping (#2693) * cli: Use simulated compute units in ping #### Problem The CLI has the ability to simulate transactions before sending to use the correct number of compute units, but `solana ping` is still using the default compute unit limit. #### Summary of changes Simulate once to get the compute unit limit and then re-use the simulated number for every ping. * Refactor per review * Only get compute unit limit if simulation needed, add test --- cli/src/cluster_query.rs | 33 +++++++++++++--- cli/src/compute_budget.rs | 77 +++++++++++++++++++++++++------------- cli/tests/cluster_query.rs | 61 ++++++++++++++++++++++++++++++ 3 files changed, 140 insertions(+), 31 deletions(-) create mode 100644 cli/tests/cluster_query.rs diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 82cb9f906f4777..0e66d95a5d3bd1 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -1,7 +1,9 @@ use { crate::{ cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult}, - compute_budget::{ComputeUnitConfig, WithComputeUnitConfig}, + compute_budget::{ + simulate_for_compute_unit_limit, ComputeUnitConfig, WithComputeUnitConfig, + }, feature::get_feature_activation_epoch, spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, }, @@ -1435,10 +1437,14 @@ pub fn process_ping( rpc_client: &RpcClient, ) -> ProcessResult { let (signal_sender, signal_receiver) = unbounded(); - ctrlc::set_handler(move || { + match ctrlc::try_set_handler(move || { let _ = signal_sender.send(()); - }) - .expect("Error setting Ctrl-C handler"); + }) { + // It's possible to set the ctrl-c handler more than once in testing + // situations, so let that case through + Err(ctrlc::Error::MultipleHandlers) => {} + result => result.expect("Error setting Ctrl-C handler"), + } let mut cli_pings = vec![]; @@ -1458,6 +1464,23 @@ pub fn process_ping( } } + let to = config.signers[0].pubkey(); + let compute_unit_limit = if compute_unit_price.is_some() { + let ixs = vec![system_instruction::transfer( + &config.signers[0].pubkey(), + &to, + lamports, + )] + .with_compute_unit_config(&ComputeUnitConfig { + compute_unit_price, + compute_unit_limit: ComputeUnitLimit::Simulated, + }); + let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + ComputeUnitLimit::Static(simulate_for_compute_unit_limit(rpc_client, &message)?) + } else { + ComputeUnitLimit::Default + }; + 'mainloop: for seq in 0..count.unwrap_or(u64::MAX) { let now = Instant::now(); if fixed_blockhash.is_none() && now.duration_since(blockhash_acquired).as_secs() > 60 { @@ -1468,10 +1491,8 @@ pub fn process_ping( blockhash_acquired = Instant::now(); } - let to = config.signers[0].pubkey(); lamports = lamports.saturating_add(1); - let compute_unit_limit = ComputeUnitLimit::Default; let build_message = |lamports| { let ixs = vec![system_instruction::transfer( &config.signers[0].pubkey(), diff --git a/cli/src/compute_budget.rs b/cli/src/compute_budget.rs index 71373dca462992..31237181ed771a 100644 --- a/cli/src/compute_budget.rs +++ b/cli/src/compute_budget.rs @@ -20,32 +20,31 @@ pub(crate) enum UpdateComputeUnitLimitResult { NoInstructionFound, } -// Returns the index of the compute unit limit instruction -pub(crate) fn simulate_and_update_compute_unit_limit( - rpc_client: &RpcClient, - message: &mut Message, -) -> Result> { - let Some(compute_unit_limit_ix_index) = - message - .instructions - .iter() - .enumerate() - .find_map(|(ix_index, instruction)| { - let ix_program_id = message.program_id(ix_index)?; - if ix_program_id != &compute_budget::id() { - return None; - } +fn get_compute_unit_limit_instruction_index(message: &Message) -> Option { + message + .instructions + .iter() + .enumerate() + .find_map(|(ix_index, instruction)| { + let ix_program_id = message.program_id(ix_index)?; + if ix_program_id != &compute_budget::id() { + return None; + } - matches!( - try_from_slice_unchecked(&instruction.data), - Ok(ComputeBudgetInstruction::SetComputeUnitLimit(_)) - ) - .then_some(ix_index) - }) - else { - return Ok(UpdateComputeUnitLimitResult::NoInstructionFound); - }; + matches!( + try_from_slice_unchecked(&instruction.data), + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(_)) + ) + .then_some(ix_index) + }) +} +/// Like `simulate_for_compute_unit_limit`, but does not check that the message +/// contains a compute unit limit instruction. +fn simulate_for_compute_unit_limit_unchecked( + rpc_client: &RpcClient, + message: &Message, +) -> Result> { let transaction = Transaction::new_unsigned(message.clone()); let simulate_result = rpc_client .simulate_transaction_with_config( @@ -67,8 +66,36 @@ pub(crate) fn simulate_and_update_compute_unit_limit( .units_consumed .expect("compute units unavailable"); + u32::try_from(units_consumed).map_err(Into::into) +} + +/// Returns the compute unit limit used during simulation +/// +/// Returns an error if the message does not contain a compute unit limit +/// instruction or if the simulation fails. +pub(crate) fn simulate_for_compute_unit_limit( + rpc_client: &RpcClient, + message: &Message, +) -> Result> { + if get_compute_unit_limit_instruction_index(message).is_none() { + return Err("No compute unit limit instruction found".into()); + } + simulate_for_compute_unit_limit_unchecked(rpc_client, message) +} + +// Returns the index of the compute unit limit instruction +pub(crate) fn simulate_and_update_compute_unit_limit( + rpc_client: &RpcClient, + message: &mut Message, +) -> Result> { + let Some(compute_unit_limit_ix_index) = get_compute_unit_limit_instruction_index(message) + else { + return Ok(UpdateComputeUnitLimitResult::NoInstructionFound); + }; + + let compute_unit_limit = simulate_for_compute_unit_limit_unchecked(rpc_client, message)?; + // Overwrite the compute unit limit instruction with the actual units consumed - let compute_unit_limit = u32::try_from(units_consumed)?; message.instructions[compute_unit_limit_ix_index].data = ComputeBudgetInstruction::set_compute_unit_limit(compute_unit_limit).data; diff --git a/cli/tests/cluster_query.rs b/cli/tests/cluster_query.rs new file mode 100644 index 00000000000000..09f0912eeed3c3 --- /dev/null +++ b/cli/tests/cluster_query.rs @@ -0,0 +1,61 @@ +use { + solana_cli::{ + check_balance, + cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}, + test_utils::check_ready, + }, + solana_faucet::faucet::run_local_faucet, + solana_rpc_client::rpc_client::RpcClient, + solana_sdk::{ + commitment_config::CommitmentConfig, + fee::FeeStructure, + native_token::sol_to_lamports, + signature::{Keypair, Signer}, + }, + solana_streamer::socket::SocketAddrSpace, + solana_test_validator::TestValidator, + std::time::Duration, + test_case::test_case, +}; + +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_ping(compute_unit_price: Option) { + solana_logger::setup(); + let fee = FeeStructure::default().get_max_fee(1, 0); + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + fee, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + + let default_signer = Keypair::new(); + let signer_pubkey = default_signer.pubkey(); + + let mut config = CliConfig::recent_for_tests(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&default_signer]; + + request_and_confirm_airdrop(&rpc_client, &config, &signer_pubkey, sol_to_lamports(1.0)) + .unwrap(); + check_balance!(sol_to_lamports(1.0), &rpc_client, &signer_pubkey); + check_ready(&rpc_client); + + let count = 5; + config.command = CliCommand::Ping { + interval: Duration::from_secs(0), + count: Some(count), + timeout: Duration::from_secs(15), + blockhash: None, + print_timestamp: false, + compute_unit_price, + }; + process_command(&config).unwrap(); +} From 669d1bc4baa371076b313155caee071cd7a0d2cd Mon Sep 17 00:00:00 2001 From: Joe C Date: Tue, 27 Aug 2024 06:03:30 -0700 Subject: [PATCH 226/529] Runtime: add `RentCollectorWithMetrics` wrapper type for SVM (#2726) --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 8 ++++ runtime/Cargo.toml | 1 + runtime/src/bank.rs | 5 ++ runtime/src/lib.rs | 1 + runtime/src/rent_collector.rs | 90 +++++++++++++++++++++++++++++++++++ 6 files changed, 106 insertions(+) create mode 100644 runtime/src/rent_collector.rs diff --git a/Cargo.lock b/Cargo.lock index 79484babb78665..8a32f9e599712e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7378,6 +7378,7 @@ dependencies = [ "solana-sdk", "solana-stake-program", "solana-svm", + "solana-svm-rent-collector", "solana-svm-transaction", "solana-system-program", "solana-timings", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9fc1e187c9a7ef..b3c0ee479b921d 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5719,6 +5719,7 @@ dependencies = [ "solana-sdk", "solana-stake-program", "solana-svm", + "solana-svm-rent-collector", "solana-svm-transaction", "solana-system-program", "solana-timings", @@ -6455,6 +6456,13 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-svm-rent-collector" +version = "2.1.0" +dependencies = [ + "solana-sdk", +] + [[package]] name = "solana-svm-transaction" version = "2.1.0" diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 44d291725b7419..3e3573f0eeb9d8 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -68,6 +68,7 @@ solana-runtime-transaction = { workspace = true } solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-svm = { workspace = true } +solana-svm-rent-collector = { workspace = true } solana-svm-transaction = { workspace = true } solana-system-program = { workspace = true } solana-timings = { workspace = true } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 1a2887e3f9c3c9..de2cb370abfe8e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -43,6 +43,7 @@ use { bank_forks::BankForks, epoch_stakes::{split_epoch_stakes, EpochStakes, NodeVoteAccounts, VersionedEpochStakes}, installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, + rent_collector::RentCollectorWithMetrics, runtime_config::RuntimeConfig, serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_hash::SnapshotHash, @@ -3476,6 +3477,10 @@ impl Bank { timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_us); let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature(); + // TODO: Pass into `TransactionProcessingEnvironment` in place of + // `rent_collector` when SVM supports the new `SVMRentCollector` trait. + let _rent_collector_with_metrics = + RentCollectorWithMetrics::new(self.rent_collector.clone()); let processing_environment = TransactionProcessingEnvironment { blockhash, epoch_total_stake: self.epoch_total_stake(self.epoch()), diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 8e9f4b4c82d931..c3772735b9ef42 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -17,6 +17,7 @@ pub mod loader_utils; pub mod non_circulating_supply; pub mod prioritization_fee; pub mod prioritization_fee_cache; +pub mod rent_collector; pub mod root_bank_cache; pub mod serde_snapshot; pub mod snapshot_archive_info; diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs new file mode 100644 index 00000000000000..683bd91cd6cb30 --- /dev/null +++ b/runtime/src/rent_collector.rs @@ -0,0 +1,90 @@ +//! Bank's wrapper around `RentCollector` to allow for overriding of some +//! `SVMRentCollector` trait methods, which are otherwise implemented on +//! `RentCollector` directly. +//! +//! Agave requires submission of logs and metrics during account rent state +//! assessment, which is not included in the `RentCollector` implementation +//! of the `SVMRentCollector` trait. This wrapper allows all `SVMRentCollector` +//! methods to be passed through to the underlying `RentCollector`, except for +//! those which require additional logging and metrics. + +use { + log::*, + solana_sdk::{ + account::AccountSharedData, + clock::Epoch, + pubkey::Pubkey, + rent::{Rent, RentDue}, + rent_collector::{CollectedInfo, RentCollector}, + transaction::{Result, TransactionError}, + transaction_context::IndexOfAccount, + }, + solana_svm_rent_collector::{rent_state::RentState, svm_rent_collector::SVMRentCollector}, +}; + +/// Wrapper around `RentCollector` to allow for overriding of some +/// `SVMRentCollector` trait methods, which are otherwise implemented on +/// `RentCollector` directly. +/// +/// Overrides inject logging and metrics submission into the rent state +/// assessment process. +pub struct RentCollectorWithMetrics(RentCollector); + +impl RentCollectorWithMetrics { + pub fn new(rent_collector: RentCollector) -> Self { + Self(rent_collector) + } +} + +impl SVMRentCollector for RentCollectorWithMetrics { + fn collect_rent(&self, address: &Pubkey, account: &mut AccountSharedData) -> CollectedInfo { + self.0.collect_rent(address, account) + } + + fn get_rent(&self) -> &Rent { + self.0.get_rent() + } + + fn get_rent_due(&self, lamports: u64, data_len: usize, account_rent_epoch: Epoch) -> RentDue { + self.0.get_rent_due(lamports, data_len, account_rent_epoch) + } + + // Overriden to inject logging and metrics. + fn check_rent_state_with_account( + &self, + pre_rent_state: &RentState, + post_rent_state: &RentState, + address: &Pubkey, + account_state: &AccountSharedData, + account_index: IndexOfAccount, + ) -> Result<()> { + submit_rent_state_metrics(pre_rent_state, post_rent_state); + if !solana_sdk::incinerator::check_id(address) + && !self.transition_allowed(pre_rent_state, post_rent_state) + { + debug!( + "Account {} not rent exempt, state {:?}", + address, account_state, + ); + let account_index = account_index as u8; + Err(TransactionError::InsufficientFundsForRent { account_index }) + } else { + Ok(()) + } + } +} + +fn submit_rent_state_metrics(pre_rent_state: &RentState, post_rent_state: &RentState) { + match (pre_rent_state, post_rent_state) { + (&RentState::Uninitialized, &RentState::RentPaying { .. }) => { + inc_new_counter_info!("rent_paying_err-new_account", 1); + } + (&RentState::RentPaying { .. }, &RentState::RentPaying { .. }) => { + inc_new_counter_info!("rent_paying_ok-legacy", 1); + } + (_, &RentState::RentPaying { .. }) => { + inc_new_counter_info!("rent_paying_err-other", 1); + } + _ => {} + } +} From 3bf82c9c9e7dec45871484cfd163c09c9218f5f6 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 27 Aug 2024 22:11:25 +0800 Subject: [PATCH 227/529] feat: move precompile verification to SVM (#2441) --- Cargo.lock | 2 + banks-server/src/banks_server.rs | 10 +- core/src/banking_stage/consumer.rs | 21 +++- program-runtime/src/invoke_context.rs | 31 +++++- .../tests/process_transaction.rs | 38 +++++++- programs/sbf/Cargo.lock | 2 + rpc/src/rpc.rs | 8 +- runtime/src/bank.rs | 10 +- sdk/Cargo.toml | 2 + sdk/src/feature_set.rs | 5 + sdk/src/precompiles.rs | 17 +++- svm/src/message_processor.rs | 95 +++++++++---------- 12 files changed, 175 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a32f9e599712e..f543e85f1cb23f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7452,6 +7452,8 @@ dependencies = [ "libsecp256k1", "log", "memmap2", + "num-derive", + "num-traits", "num_enum", "pbkdf2 0.11.0", "qstring", diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 002e77b0549061..10cc43a5878619 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -17,7 +17,7 @@ use { account::Account, clock::Slot, commitment_config::CommitmentLevel, - feature_set::FeatureSet, + feature_set::{self, FeatureSet}, hash::Hash, message::{Message, SanitizedMessage}, pubkey::Pubkey, @@ -163,7 +163,13 @@ fn verify_transaction( feature_set: &Arc, ) -> transaction::Result<()> { transaction.verify()?; - transaction.verify_precompiles(feature_set)?; + + let move_precompile_verification_to_svm = + feature_set.is_active(&feature_set::move_precompile_verification_to_svm::id()); + if !move_precompile_verification_to_svm { + transaction.verify_precompiles(feature_set)?; + } + Ok(()) } diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index b773296fc2eb5f..39e3dc1fd95d5c 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -392,11 +392,20 @@ impl Consumer { let check_results = bank.check_transactions(txs, &pre_results, MAX_PROCESSING_AGE, &mut error_counters); // If checks passed, verify pre-compiles and continue processing on success. + let move_precompile_verification_to_svm = bank + .feature_set + .is_active(&feature_set::move_precompile_verification_to_svm::id()); let check_results: Vec<_> = txs .iter() .zip(check_results) .map(|(tx, result)| match result { - Ok(_) => tx.verify_precompiles(&bank.feature_set), + Ok(_) => { + if !move_precompile_verification_to_svm { + tx.verify_precompiles(&bank.feature_set) + } else { + Ok(()) + } + } Err(err) => Err(err), }) .collect(); @@ -421,7 +430,10 @@ impl Consumer { txs: &[SanitizedTransaction], max_slot_ages: &[Slot], ) -> ProcessTransactionBatchOutput { - // Verify pre-compiles. + let move_precompile_verification_to_svm = bank + .feature_set + .is_active(&feature_set::move_precompile_verification_to_svm::id()); + // Need to filter out transactions since they were sanitized earlier. // This means that the transaction may cross and epoch boundary (not allowed), // or account lookup tables may have been closed. @@ -439,7 +451,10 @@ impl Consumer { } } else { // Verify pre-compiles. - tx.verify_precompiles(&bank.feature_set)?; + if !move_precompile_verification_to_svm { + tx.verify_precompiles(&bank.feature_set)?; + } + // Any transaction executed between sanitization time and now may have closed the lookup table(s). // Above re-sanitization already loads addresses, so don't need to re-check in that case. let lookup_tables = tx.message().message_address_table_lookups(); diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 96079b9a87072d..97d8200053a1a5 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -22,10 +22,11 @@ use { bpf_loader_deprecated, clock::Slot, epoch_schedule::EpochSchedule, - feature_set::FeatureSet, + feature_set::{self, FeatureSet}, hash::Hash, instruction::{AccountMeta, InstructionError}, native_loader, + precompiles::Precompile, pubkey::Pubkey, saturating_add_assign, stable_layout::stable_instruction::StableInstruction, @@ -465,6 +466,34 @@ impl<'a> InvokeContext<'a> { .and(self.pop()) } + /// Processes a precompile instruction + pub fn process_precompile<'ix_data>( + &mut self, + precompile: &Precompile, + instruction_data: &[u8], + instruction_accounts: &[InstructionAccount], + program_indices: &[IndexOfAccount], + message_instruction_datas_iter: impl Iterator, + ) -> Result<(), InstructionError> { + self.transaction_context + .get_next_instruction_context()? + .configure(program_indices, instruction_accounts, instruction_data); + self.push()?; + + let feature_set = self.get_feature_set(); + let move_precompile_verification_to_svm = + feature_set.is_active(&feature_set::move_precompile_verification_to_svm::id()); + if move_precompile_verification_to_svm { + let instruction_datas: Vec<_> = message_instruction_datas_iter.collect(); + precompile + .verify(instruction_data, &instruction_datas, feature_set) + .map_err(InstructionError::from) + .and(self.pop()) + } else { + self.pop() + } + } + /// Calls the instruction's program entrypoint method fn process_executable_chain( &mut self, diff --git a/programs/ed25519-tests/tests/process_transaction.rs b/programs/ed25519-tests/tests/process_transaction.rs index 25897f8fd2075a..7ffb64ef0d6499 100644 --- a/programs/ed25519-tests/tests/process_transaction.rs +++ b/programs/ed25519-tests/tests/process_transaction.rs @@ -3,6 +3,9 @@ use { solana_program_test::*, solana_sdk::{ ed25519_instruction::new_ed25519_instruction, + feature_set, + instruction::InstructionError, + precompiles::PrecompileError, signature::Signer, transaction::{Transaction, TransactionError}, }, @@ -44,6 +47,37 @@ async fn test_success() { assert_matches!(client.process_transaction(transaction).await, Ok(())); } +#[tokio::test] +async fn test_failure_without_move_precompiles_feature() { + let mut program_test = ProgramTest::default(); + program_test.deactivate_feature(feature_set::move_precompile_verification_to_svm::id()); + let mut context = program_test.start_with_context().await; + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + + let privkey = generate_keypair(); + let message_arr = b"hello"; + let mut instruction = new_ed25519_instruction(&privkey, message_arr); + + instruction.data[0] += 1; + + let transaction = Transaction::new_signed_with_payer( + &[instruction], + Some(&payer.pubkey()), + &[payer], + recent_blockhash, + ); + + assert_matches!( + client.process_transaction(transaction).await, + Err(BanksClientError::TransactionError( + TransactionError::InvalidAccountIndex + )) + ); +} + #[tokio::test] async fn test_failure() { let mut context = ProgramTest::default().start_with_context().await; @@ -68,7 +102,9 @@ async fn test_failure() { assert_matches!( client.process_transaction(transaction).await, Err(BanksClientError::TransactionError( - TransactionError::InvalidAccountIndex + TransactionError::InstructionError(0, InstructionError::Custom(3)) )) ); + // this assert is for documenting the matched error code above + assert_eq!(3, PrecompileError::InvalidDataOffsets as u32); } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b3c0ee479b921d..e70ba341f05492 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6257,6 +6257,8 @@ dependencies = [ "libsecp256k1 0.6.0", "log", "memmap2", + "num-derive", + "num-traits", "num_enum", "pbkdf2 0.11.0", "qstring", diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 05bd4caa8d6088..848159cf9688f9 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -2251,8 +2251,12 @@ fn verify_transaction( return Err(RpcCustomError::TransactionSignatureVerificationFailure.into()); } - if let Err(e) = transaction.verify_precompiles(feature_set) { - return Err(RpcCustomError::TransactionPrecompileVerificationFailure(e).into()); + let move_precompile_verification_to_svm = + feature_set.is_active(&feature_set::move_precompile_verification_to_svm::id()); + if !move_precompile_verification_to_svm { + if let Err(e) = transaction.verify_precompiles(feature_set) { + return Err(RpcCustomError::TransactionPrecompileVerificationFailure(e).into()); + } } Ok(()) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index de2cb370abfe8e..ec042861873d71 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5518,9 +5518,13 @@ impl Bank { ) }?; - if verification_mode == TransactionVerificationMode::HashAndVerifyPrecompiles - || verification_mode == TransactionVerificationMode::FullVerification - { + let move_precompile_verification_to_svm = self + .feature_set + .is_active(&feature_set::move_precompile_verification_to_svm::id()); + if !move_precompile_verification_to_svm && { + verification_mode == TransactionVerificationMode::HashAndVerifyPrecompiles + || verification_mode == TransactionVerificationMode::FullVerification + } { sanitized_tx.verify_precompiles(&self.feature_set)?; } diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index faf38677af31c8..2e817063aae792 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -68,6 +68,8 @@ lazy_static = { workspace = true } libsecp256k1 = { workspace = true, optional = true, features = ["hmac"] } log = { workspace = true } memmap2 = { workspace = true, optional = true } +num-derive = { workspace = true } +num-traits = { workspace = true } num_enum = { workspace = true } pbkdf2 = { workspace = true } qstring = { workspace = true } diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 0cb89f631eabef..7f8ad417bb10f4 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -841,6 +841,10 @@ pub mod vote_only_retransmitter_signed_fec_sets { solana_sdk::declare_id!("RfEcA95xnhuwooVAhUUksEJLZBF7xKCLuqrJoqk4Zph"); } +pub mod move_precompile_verification_to_svm { + solana_sdk::declare_id!("9ypxGLzkMxi89eDerRKXWDXe44UY2z4hBig4mDhNq5Dp"); +} + pub mod enable_transaction_loading_failure_fees { solana_sdk::declare_id!("PaymEPK2oqwT9TXAVfadjztH2H6KfLEB9Hhd5Q5frvP"); } @@ -1058,6 +1062,7 @@ lazy_static! { (move_stake_and_move_lamports_ixs::id(), "Enable MoveStake and MoveLamports stake program instructions #1610"), (ed25519_precompile_verify_strict::id(), "Use strict verification in ed25519 precompile SIMD-0152"), (vote_only_retransmitter_signed_fec_sets::id(), "vote only on retransmitter signed fec sets"), + (move_precompile_verification_to_svm::id(), "SIMD-0159: Move precompile verification into SVM"), (enable_transaction_loading_failure_fees::id(), "Enable fees for some additional transaction failures SIMD-0082"), (enable_turbine_extended_fanout_experiments::id(), "enable turbine extended fanout experiments #"), (deprecate_legacy_vote_ixs::id(), "Deprecate legacy vote instructions"), diff --git a/sdk/src/precompiles.rs b/sdk/src/precompiles.rs index 04e1e2ea2ec389..cb16e5ecd86276 100644 --- a/sdk/src/precompiles.rs +++ b/sdk/src/precompiles.rs @@ -3,14 +3,16 @@ #![cfg(feature = "full")] use { - crate::{feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey}, + crate::{feature_set::FeatureSet, pubkey::Pubkey}, lazy_static::lazy_static, + num_derive::{FromPrimitive, ToPrimitive}, solana_decode_error::DecodeError, + solana_program::instruction::CompiledInstruction, thiserror::Error, }; /// Precompile errors -#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[derive(Error, Debug, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)] pub enum PrecompileError { #[error("public key is not valid")] InvalidPublicKey, @@ -23,6 +25,7 @@ pub enum PrecompileError { #[error("instruction is incorrect size")] InvalidInstructionDataSize, } + impl DecodeError for PrecompileError { fn type_of() -> &'static str { "PrecompileError" @@ -96,6 +99,16 @@ where .any(|precompile| precompile.check_id(program_id, |feature_id| is_enabled(feature_id))) } +/// Find an enabled precompiled program +pub fn get_precompile(program_id: &Pubkey, is_enabled: F) -> Option<&Precompile> +where + F: Fn(&Pubkey) -> bool, +{ + PRECOMPILES + .iter() + .find(|precompile| precompile.check_id(program_id, |feature_id| is_enabled(feature_id))) +} + pub fn get_precompiles<'a>() -> &'a [Precompile] { &PRECOMPILES } diff --git a/svm/src/message_processor.rs b/svm/src/message_processor.rs index 4bcc7d37492ee8..39a1b0ad1b9ee0 100644 --- a/svm/src/message_processor.rs +++ b/svm/src/message_processor.rs @@ -1,9 +1,9 @@ use { - solana_measure::measure::Measure, + solana_measure::measure_us, solana_program_runtime::invoke_context::InvokeContext, solana_sdk::{ account::WritableAccount, - precompiles::is_precompile, + precompiles::get_precompile, saturating_add_assign, sysvar::instructions, transaction::TransactionError, @@ -44,10 +44,6 @@ impl MessageProcessor { .zip(program_indices.iter()) .enumerate() { - let is_precompile = is_precompile(program_id, |id| { - invoke_context.get_feature_set().is_active(id) - }); - // Fixup the special instructions key if present // before the account pre-values are taken care of if let Some(account_index) = invoke_context @@ -87,53 +83,48 @@ impl MessageProcessor { }); } - let result = if is_precompile { - invoke_context - .transaction_context - .get_next_instruction_context() - .map(|instruction_context| { - instruction_context.configure( - program_indices, - &instruction_accounts, - instruction.data, - ); - }) - .and_then(|_| { - invoke_context.transaction_context.push()?; - invoke_context.transaction_context.pop() - }) - } else { - let time = Measure::start("execute_instruction"); - let mut compute_units_consumed = 0; - let result = invoke_context.process_instruction( - instruction.data, - &instruction_accounts, - program_indices, - &mut compute_units_consumed, - execute_timings, - ); - let time = time.end_as_us(); - *accumulated_consumed_units = - accumulated_consumed_units.saturating_add(compute_units_consumed); - execute_timings.details.accumulate_program( - program_id, - time, - compute_units_consumed, - result.is_err(), - ); - invoke_context.timings = { - execute_timings.details.accumulate(&invoke_context.timings); - ExecuteDetailsTimings::default() - }; - saturating_add_assign!( - execute_timings - .execute_accessories - .process_instructions - .total_us, - time - ); - result + let mut compute_units_consumed = 0; + let (result, process_instruction_us) = measure_us!({ + if let Some(precompile) = get_precompile(program_id, |feature_id| { + invoke_context.get_feature_set().is_active(feature_id) + }) { + invoke_context.process_precompile( + precompile, + instruction.data, + &instruction_accounts, + program_indices, + message.instructions_iter().map(|ix| ix.data), + ) + } else { + invoke_context.process_instruction( + instruction.data, + &instruction_accounts, + program_indices, + &mut compute_units_consumed, + execute_timings, + ) + } + }); + + *accumulated_consumed_units = + accumulated_consumed_units.saturating_add(compute_units_consumed); + execute_timings.details.accumulate_program( + program_id, + process_instruction_us, + compute_units_consumed, + result.is_err(), + ); + invoke_context.timings = { + execute_timings.details.accumulate(&invoke_context.timings); + ExecuteDetailsTimings::default() }; + saturating_add_assign!( + execute_timings + .execute_accessories + .process_instructions + .total_us, + process_instruction_us + ); result .map_err(|err| TransactionError::InstructionError(instruction_index as u8, err))?; From 900e96fd44ba76042eaf7319794fd4b5ab3aadb3 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 27 Aug 2024 09:43:48 -0500 Subject: [PATCH 228/529] TransactionView (#2719) --- transaction-view/Cargo.toml | 2 +- ...ransaction_meta.rs => transaction_view.rs} | 6 +- .../src/address_table_lookup_meta.rs | 12 +- transaction-view/src/instructions_meta.rs | 4 +- transaction-view/src/lib.rs | 12 +- transaction-view/src/message_header_meta.rs | 2 +- .../src/static_account_keys_meta.rs | 2 +- transaction-view/src/transaction_data.rs | 12 ++ transaction-view/src/transaction_meta.rs | 47 +++-- transaction-view/src/transaction_view.rs | 185 ++++++++++++++++++ 10 files changed, 243 insertions(+), 41 deletions(-) rename transaction-view/benches/{transaction_meta.rs => transaction_view.rs} (97%) create mode 100644 transaction-view/src/transaction_data.rs create mode 100644 transaction-view/src/transaction_view.rs diff --git a/transaction-view/Cargo.toml b/transaction-view/Cargo.toml index cbe16529521cb6..44347099927442 100644 --- a/transaction-view/Cargo.toml +++ b/transaction-view/Cargo.toml @@ -27,5 +27,5 @@ name = "bytes" harness = false [[bench]] -name = "transaction_meta" +name = "transaction_view" harness = false diff --git a/transaction-view/benches/transaction_meta.rs b/transaction-view/benches/transaction_view.rs similarity index 97% rename from transaction-view/benches/transaction_meta.rs rename to transaction-view/benches/transaction_view.rs index 89dd07600ba4b1..79a4393be5207e 100644 --- a/transaction-view/benches/transaction_meta.rs +++ b/transaction-view/benches/transaction_view.rs @@ -1,5 +1,5 @@ use { - agave_transaction_view::transaction_meta::TransactionMeta, + agave_transaction_view::transaction_view::TransactionView, criterion::{ black_box, criterion_group, criterion_main, measurement::Measurement, BenchmarkGroup, Criterion, Throughput, @@ -42,10 +42,10 @@ fn bench_transactions_parsing( }); // New Transaction Parsing - group.bench_function("TransactionMeta", |c| { + group.bench_function("TransactionView", |c| { c.iter(|| { for bytes in serialized_transactions.iter() { - let _ = TransactionMeta::try_new(black_box(bytes)).unwrap(); + let _ = TransactionView::try_new(black_box(bytes.as_ref())).unwrap(); } }); }); diff --git a/transaction-view/src/address_table_lookup_meta.rs b/transaction-view/src/address_table_lookup_meta.rs index c065386641c193..bbce7817cda5fc 100644 --- a/transaction-view/src/address_table_lookup_meta.rs +++ b/transaction-view/src/address_table_lookup_meta.rs @@ -4,7 +4,7 @@ use { advance_offset_for_array, advance_offset_for_type, check_remaining, optimized_read_compressed_u16, read_byte, read_slice_data, read_type, }, - result::Result, + result::{Result, TransactionParsingError}, }, solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Signature}, solana_svm_transaction::message_address_table_lookup::SVMMessageAddressTableLookup, @@ -42,10 +42,11 @@ const MIN_SIZED_PACKET_WITH_ATLS: usize = { }; /// The maximum number of ATLS that can fit in a valid packet. -const MAX_ATLS_PER_PACKET: usize = (PACKET_DATA_SIZE - MIN_SIZED_PACKET_WITH_ATLS) / MIN_SIZED_ATL; +const MAX_ATLS_PER_PACKET: u8 = + ((PACKET_DATA_SIZE - MIN_SIZED_PACKET_WITH_ATLS) / MIN_SIZED_ATL) as u8; /// Contains metadata about the address table lookups in a transaction packet. -pub struct AddressTableLookupMeta { +pub(crate) struct AddressTableLookupMeta { /// The number of address table lookups in the transaction. pub(crate) num_address_table_lookups: u8, /// The offset to the first address table lookup in the transaction. @@ -59,11 +60,14 @@ impl AddressTableLookupMeta { /// This function will parse each ATL to ensure the data is well-formed, /// but will not cache data related to these ATLs. #[inline(always)] - pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { + pub(crate) fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Maximum number of ATLs should be represented by a single byte, // thus the MSB should not be set. const _: () = assert!(MAX_ATLS_PER_PACKET & 0b1000_0000 == 0); let num_address_table_lookups = read_byte(bytes, offset)?; + if num_address_table_lookups > MAX_ATLS_PER_PACKET { + return Err(TransactionParsingError); + } // Check that the remaining bytes are enough to hold the ATLs. check_remaining( diff --git a/transaction-view/src/instructions_meta.rs b/transaction-view/src/instructions_meta.rs index 45de23d47c60fd..5b0e6153ad45d3 100644 --- a/transaction-view/src/instructions_meta.rs +++ b/transaction-view/src/instructions_meta.rs @@ -11,7 +11,7 @@ use { /// Contains metadata about the instructions in a transaction packet. #[derive(Default)] -pub struct InstructionsMeta { +pub(crate) struct InstructionsMeta { /// The number of instructions in the transaction. pub(crate) num_instructions: u16, /// The offset to the first instruction in the transaction. @@ -26,7 +26,7 @@ impl InstructionsMeta { /// instruction data is well-formed, but will not cache data related to /// these instructions. #[inline(always)] - pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { + pub(crate) fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Read the number of instructions at the current offset. // Each instruction needs at least 3 bytes, so do a sanity check here to // ensure we have enough bytes to read the number of instructions. diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index 13c5a43fd4016c..def04240b2aab7 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -4,16 +4,12 @@ pub mod bytes; #[cfg(not(feature = "dev-context-only-utils"))] mod bytes; -#[allow(dead_code)] -pub mod address_table_lookup_meta; -#[allow(dead_code)] +mod address_table_lookup_meta; mod instructions_meta; -#[allow(dead_code)] mod message_header_meta; pub mod result; -#[allow(dead_code)] mod signature_meta; -#[allow(dead_code)] pub mod static_account_keys_meta; -#[allow(dead_code)] -pub mod transaction_meta; +pub mod transaction_data; +mod transaction_meta; +pub mod transaction_view; diff --git a/transaction-view/src/message_header_meta.rs b/transaction-view/src/message_header_meta.rs index 2b83571d154494..dfc04766958a28 100644 --- a/transaction-view/src/message_header_meta.rs +++ b/transaction-view/src/message_header_meta.rs @@ -34,7 +34,7 @@ pub(crate) struct MessageHeaderMeta { impl MessageHeaderMeta { #[inline(always)] - pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { + pub(crate) fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Get the message offset. // We know the offset does not exceed packet length, and our packet // length is less than u16::MAX, so we can safely cast to u16. diff --git a/transaction-view/src/static_account_keys_meta.rs b/transaction-view/src/static_account_keys_meta.rs index bae934863cfa4e..f1f3b64f42bf83 100644 --- a/transaction-view/src/static_account_keys_meta.rs +++ b/transaction-view/src/static_account_keys_meta.rs @@ -24,7 +24,7 @@ pub(crate) struct StaticAccountKeysMeta { impl StaticAccountKeysMeta { #[inline(always)] - pub fn try_new(bytes: &[u8], offset: &mut usize) -> Result { + pub(crate) fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Max size must not have the MSB set so that it is size 1. const _: () = assert!(MAX_STATIC_ACCOUNTS_PER_PACKET & 0b1000_0000 == 0); diff --git a/transaction-view/src/transaction_data.rs b/transaction-view/src/transaction_data.rs new file mode 100644 index 00000000000000..2bfe0c85ce0e55 --- /dev/null +++ b/transaction-view/src/transaction_data.rs @@ -0,0 +1,12 @@ +/// Trait for accessing transaction data from an abstract byte container. +pub trait TransactionData { + /// Returns a reference to the serialized transaction data. + fn data(&self) -> &[u8]; +} + +impl TransactionData for &[u8] { + #[inline] + fn data(&self) -> &[u8] { + self + } +} diff --git a/transaction-view/src/transaction_meta.rs b/transaction-view/src/transaction_meta.rs index 17d92599a102ee..39fe9d5700fc5a 100644 --- a/transaction-view/src/transaction_meta.rs +++ b/transaction-view/src/transaction_meta.rs @@ -11,25 +11,25 @@ use { solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Signature}, }; -pub struct TransactionMeta { +pub(crate) struct TransactionMeta { /// Signature metadata. - pub(crate) signature: SignatureMeta, + signature: SignatureMeta, /// Message header metadata. - pub(crate) message_header: MessageHeaderMeta, + message_header: MessageHeaderMeta, /// Static account keys metadata. - pub(crate) static_account_keys: StaticAccountKeysMeta, + static_account_keys: StaticAccountKeysMeta, /// Recent blockhash offset. - pub(crate) recent_blockhash_offset: u16, + recent_blockhash_offset: u16, /// Instructions metadata. - pub(crate) instructions: InstructionsMeta, + instructions: InstructionsMeta, /// Address table lookup metadata. - pub(crate) address_table_lookup: AddressTableLookupMeta, + address_table_lookup: AddressTableLookupMeta, } impl TransactionMeta { /// Parse a serialized transaction and verify basic structure. /// The `bytes` parameter must have no trailing data. - pub fn try_new(bytes: &[u8]) -> Result { + pub(crate) fn try_new(bytes: &[u8]) -> Result { let mut offset = 0; let signature = SignatureMeta::try_new(bytes, &mut offset)?; let message_header = MessageHeaderMeta::try_new(bytes, &mut offset)?; @@ -66,44 +66,49 @@ impl TransactionMeta { } /// Return the number of signatures in the transaction. - pub fn num_signatures(&self) -> u8 { + pub(crate) fn num_signatures(&self) -> u8 { self.signature.num_signatures } /// Return the version of the transaction. - pub fn version(&self) -> TransactionVersion { + pub(crate) fn version(&self) -> TransactionVersion { self.message_header.version } /// Return the number of required signatures in the transaction. - pub fn num_required_signatures(&self) -> u8 { + pub(crate) fn num_required_signatures(&self) -> u8 { self.message_header.num_required_signatures } /// Return the number of readonly signed accounts in the transaction. - pub fn num_readonly_signed_accounts(&self) -> u8 { + pub(crate) fn num_readonly_signed_accounts(&self) -> u8 { self.message_header.num_readonly_signed_accounts } /// Return the number of readonly unsigned accounts in the transaction. - pub fn num_readonly_unsigned_accounts(&self) -> u8 { + pub(crate) fn num_readonly_unsigned_accounts(&self) -> u8 { self.message_header.num_readonly_unsigned_accounts } /// Return the number of static account keys in the transaction. - pub fn num_static_account_keys(&self) -> u8 { + pub(crate) fn num_static_account_keys(&self) -> u8 { self.static_account_keys.num_static_accounts } /// Return the number of instructions in the transaction. - pub fn num_instructions(&self) -> u16 { + pub(crate) fn num_instructions(&self) -> u16 { self.instructions.num_instructions } /// Return the number of address table lookups in the transaction. - pub fn num_address_table_lookups(&self) -> u8 { + pub(crate) fn num_address_table_lookups(&self) -> u8 { self.address_table_lookup.num_address_table_lookups } + + /// Return the offset to the message. + pub(crate) fn message_offset(&self) -> u16 { + self.message_header.offset + } } // Separate implementation for `unsafe` accessor methods. @@ -112,7 +117,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. - pub unsafe fn signatures<'a>(&self, bytes: &'a [u8]) -> &'a [Signature] { + pub(crate) unsafe fn signatures<'a>(&self, bytes: &'a [u8]) -> &'a [Signature] { // Verify at compile time there are no alignment constraints. const _: () = assert!( core::mem::align_of::() == 1, @@ -144,7 +149,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. - pub unsafe fn static_account_keys<'a>(&self, bytes: &'a [u8]) -> &'a [Pubkey] { + pub(crate) unsafe fn static_account_keys<'a>(&self, bytes: &'a [u8]) -> &'a [Pubkey] { // Verify at compile time there are no alignment constraints. const _: () = assert!(core::mem::align_of::() == 1, "Pubkey alignment"); // The length of the slice is not greater than isize::MAX. @@ -174,7 +179,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. - pub unsafe fn recent_blockhash<'a>(&self, bytes: &'a [u8]) -> &'a Hash { + pub(crate) unsafe fn recent_blockhash<'a>(&self, bytes: &'a [u8]) -> &'a Hash { // Verify at compile time there are no alignment constraints. const _: () = assert!(core::mem::align_of::() == 1, "Hash alignment"); @@ -193,7 +198,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. - pub unsafe fn instructions_iter<'a>(&self, bytes: &'a [u8]) -> InstructionsIterator<'a> { + pub(crate) unsafe fn instructions_iter<'a>(&self, bytes: &'a [u8]) -> InstructionsIterator<'a> { InstructionsIterator { bytes, offset: usize::from(self.instructions.offset), @@ -206,7 +211,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. - pub unsafe fn address_table_lookup_iter<'a>( + pub(crate) unsafe fn address_table_lookup_iter<'a>( &self, bytes: &'a [u8], ) -> AddressTableLookupIterator<'a> { diff --git a/transaction-view/src/transaction_view.rs b/transaction-view/src/transaction_view.rs new file mode 100644 index 00000000000000..6ee705e6c5560e --- /dev/null +++ b/transaction-view/src/transaction_view.rs @@ -0,0 +1,185 @@ +use { + crate::{ + address_table_lookup_meta::AddressTableLookupIterator, + instructions_meta::InstructionsIterator, message_header_meta::TransactionVersion, + result::Result, transaction_data::TransactionData, transaction_meta::TransactionMeta, + }, + solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Signature}, +}; + +/// A view into a serialized transaction. +/// +/// This struct provides access to the transaction data without +/// deserializing it. This is done by parsing and caching metadata +/// about the layout of the serialized transaction. +/// The owned `data` is abstracted through the `TransactionData` trait, +/// so that different containers for the serialized transaction can be used. +pub struct TransactionView { + data: D, + meta: TransactionMeta, +} + +impl TransactionView { + /// Creates a new `TransactionView` from the given serialized transaction data. + /// Returns an error if the data does not meet the expected format. + pub fn try_new(data: D) -> Result { + let meta = TransactionMeta::try_new(data.data())?; + Ok(Self { data, meta }) + } + + /// Return the number of signatures in the transaction. + pub fn num_signatures(&self) -> u8 { + self.meta.num_signatures() + } + + /// Return the version of the transaction. + pub fn version(&self) -> TransactionVersion { + self.meta.version() + } + + /// Return the number of required signatures in the transaction. + pub fn num_required_signatures(&self) -> u8 { + self.meta.num_required_signatures() + } + + /// Return the number of readonly signed accounts in the transaction. + pub fn num_readonly_signed_accounts(&self) -> u8 { + self.meta.num_readonly_signed_accounts() + } + + /// Return the number of readonly unsigned accounts in the transaction. + pub fn num_readonly_unsigned_accounts(&self) -> u8 { + self.meta.num_readonly_unsigned_accounts() + } + + /// Return the number of static account keys in the transaction. + pub fn num_static_account_keys(&self) -> u8 { + self.meta.num_static_account_keys() + } + + /// Return the number of instructions in the transaction. + pub fn num_instructions(&self) -> u16 { + self.meta.num_instructions() + } + + /// Return the number of address table lookups in the transaction. + pub fn num_address_table_lookups(&self) -> u8 { + self.meta.num_address_table_lookups() + } + + /// Return the slice of signatures in the transaction. + pub fn signatures(&self) -> &[Signature] { + let data = self.data(); + // SAFETY: `meta` was created from `data`. + unsafe { self.meta.signatures(data) } + } + + /// Return the slice of static account keys in the transaction. + pub fn static_account_keys(&self) -> &[Pubkey] { + let data = self.data(); + // SAFETY: `meta` was created from `data`. + unsafe { self.meta.static_account_keys(data) } + } + + /// Return the recent blockhash in the transaction. + pub fn recent_blockhash(&self) -> &Hash { + let data = self.data(); + // SAFETY: `meta` was created from `data`. + unsafe { self.meta.recent_blockhash(data) } + } + + /// Return an iterator over the instructions in the transaction. + pub fn instructions_iter(&self) -> InstructionsIterator { + let data = self.data(); + // SAFETY: `meta` was created from `data`. + unsafe { self.meta.instructions_iter(data) } + } + + /// Return an iterator over the address table lookups in the transaction. + pub fn address_table_lookup_iter(&self) -> AddressTableLookupIterator { + let data = self.data(); + // SAFETY: `meta` was created from `data`. + unsafe { self.meta.address_table_lookup_iter(data) } + } + + /// Return the full serialized transaction data. + #[inline] + pub fn data(&self) -> &[u8] { + self.data.data() + } + + /// Return the serialized **message** data. + /// This does not include the signatures. + pub fn message_data(&self) -> &[u8] { + &self.data()[usize::from(self.meta.message_offset())..] + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + message::{Message, VersionedMessage}, + pubkey::Pubkey, + signature::Signature, + system_instruction::{self}, + transaction::VersionedTransaction, + }, + }; + + fn verify_transaction_view_meta(tx: &VersionedTransaction) { + let bytes = bincode::serialize(tx).unwrap(); + let view = TransactionView::try_new(bytes.as_ref()).unwrap(); + + assert_eq!(view.num_signatures(), tx.signatures.len() as u8); + + assert_eq!( + view.num_required_signatures(), + tx.message.header().num_required_signatures + ); + assert_eq!( + view.num_readonly_signed_accounts(), + tx.message.header().num_readonly_signed_accounts + ); + assert_eq!( + view.num_readonly_unsigned_accounts(), + tx.message.header().num_readonly_unsigned_accounts + ); + + assert_eq!( + view.num_static_account_keys(), + tx.message.static_account_keys().len() as u8 + ); + assert_eq!( + view.num_instructions(), + tx.message.instructions().len() as u16 + ); + assert_eq!( + view.num_address_table_lookups(), + tx.message + .address_table_lookups() + .map(|x| x.len() as u8) + .unwrap_or(0) + ); + } + + fn multiple_transfers() -> VersionedTransaction { + let payer = Pubkey::new_unique(); + VersionedTransaction { + signatures: vec![Signature::default()], // 1 signature to be valid. + message: VersionedMessage::Legacy(Message::new( + &[ + system_instruction::transfer(&payer, &Pubkey::new_unique(), 1), + system_instruction::transfer(&payer, &Pubkey::new_unique(), 1), + ], + Some(&payer), + )), + } + } + + #[test] + fn test_multiple_transfers() { + verify_transaction_view_meta(&multiple_transfers()); + } +} From cffbe444ebc9ab26c4d8890c5ff572cd65b6f015 Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Tue, 27 Aug 2024 07:51:58 -0700 Subject: [PATCH 229/529] svm: better testing for transaction batches (#2623) implement a rudimentary test framework for integration testing of transaction loading and execution --- Cargo.lock | 1 + svm/Cargo.toml | 1 + svm/tests/integration_test.rs | 861 +++++++++++++++++++++++----------- svm/tests/mock_bank.rs | 22 +- 4 files changed, 605 insertions(+), 280 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f543e85f1cb23f..ae84f0371cd5c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7720,6 +7720,7 @@ dependencies = [ "solana-timings", "solana-type-overrides", "solana-vote", + "test-case", "thiserror", ] diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 2627f4fbd8bf5a..994fc9d59d4a04 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -54,6 +54,7 @@ solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-svm = { path = ".", features = ["dev-context-only-utils"] } solana-svm-conformance = { workspace = true } +test-case = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 317535f52cd361..37a4870812c2d8 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -1,27 +1,27 @@ #![cfg(test)] use { - crate::{ - mock_bank::{ - create_executable_environment, deploy_program, register_builtins, MockBankCallback, - MockForkGraph, - }, - transaction_builder::SanitizedTransactionBuilder, + crate::mock_bank::{ + create_executable_environment, deploy_program, program_address, register_builtins, + MockBankCallback, MockForkGraph, WALLCLOCK_TIME, }, solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - clock::Clock, + account::{AccountSharedData, WritableAccount}, + clock::Slot, hash::Hash, - instruction::AccountMeta, + instruction::{AccountMeta, Instruction}, + native_token::LAMPORTS_PER_SOL, pubkey::Pubkey, - signature::Signature, - sysvar::SysvarId, - transaction::{SanitizedTransaction, TransactionError}, + signature::Signer, + signer::keypair::Keypair, + system_instruction, system_program, system_transaction, + transaction::{SanitizedTransaction, Transaction, TransactionError}, + transaction_context::TransactionReturnData, }, solana_svm::{ account_loader::{CheckedTransactionDetails, TransactionCheckResult}, - transaction_processing_callback::TransactionProcessingCallback, - transaction_processing_result::TransactionProcessingResultExtensions, + transaction_execution_result::TransactionExecutionDetails, + transaction_processing_result::ProcessedTransaction, transaction_processor::{ ExecutionRecordingConfig, TransactionBatchProcessor, TransactionProcessingConfig, TransactionProcessingEnvironment, @@ -29,213 +29,546 @@ use { }, solana_type_overrides::sync::{Arc, RwLock}, std::collections::{HashMap, HashSet}, + test_case::test_case, }; // This module contains the implementation of TransactionProcessingCallback mod mock_bank; -mod transaction_builder; const DEPLOYMENT_SLOT: u64 = 0; const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot const EXECUTION_EPOCH: u64 = 2; // The execution epoch must be greater than the deployment epoch +const LAMPORTS_PER_SIGNATURE: u64 = 5000; -fn prepare_transactions( - mock_bank: &MockBankCallback, -) -> (Vec, Vec) { - let mut transaction_builder = SanitizedTransactionBuilder::default(); - let mut all_transactions = Vec::new(); - let mut transaction_checks = Vec::new(); - - // A transaction that works without any account - let hello_program = deploy_program("hello-solana".to_string(), DEPLOYMENT_SLOT, mock_bank); - let fee_payer = Pubkey::new_unique(); - transaction_builder.create_instruction(hello_program, Vec::new(), HashMap::new(), Vec::new()); - - let sanitized_transaction = - transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique()), false); - - all_transactions.push(sanitized_transaction.unwrap()); - transaction_checks.push(Ok(CheckedTransactionDetails { - nonce: None, - lamports_per_signature: 20, - })); - - // The transaction fee payer must have enough funds - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(80000); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(fee_payer, account_data); - - // A simple funds transfer between accounts - let transfer_program_account = - deploy_program("simple-transfer".to_string(), DEPLOYMENT_SLOT, mock_bank); - let sender = Pubkey::new_unique(); - let recipient = Pubkey::new_unique(); - let fee_payer = Pubkey::new_unique(); - let system_account = Pubkey::from([0u8; 32]); - - transaction_builder.create_instruction( - transfer_program_account, - vec![ - AccountMeta { - pubkey: sender, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: recipient, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: system_account, - is_signer: false, - is_writable: false, - }, - ], - HashMap::from([(sender, Signature::new_unique())]), - vec![0, 0, 0, 0, 0, 0, 0, 10], - ); +pub type AccountMap = HashMap; - let sanitized_transaction = - transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique()), true); - all_transactions.push(sanitized_transaction.unwrap()); - transaction_checks.push(Ok(CheckedTransactionDetails { - nonce: None, - lamports_per_signature: 20, - })); - - // Setting up the accounts for the transfer - - // fee payer - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(80000); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(fee_payer, account_data); - - // sender - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(900000); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(sender, account_data); - - // recipient - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(900000); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(recipient, account_data); - - // The system account is set in `create_executable_environment` - - // A program that utilizes a Sysvar - let program_account = deploy_program("clock-sysvar".to_string(), DEPLOYMENT_SLOT, mock_bank); - let fee_payer = Pubkey::new_unique(); - transaction_builder.create_instruction(program_account, Vec::new(), HashMap::new(), Vec::new()); - - let sanitized_transaction = - transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique()), false); - - all_transactions.push(sanitized_transaction.unwrap()); - transaction_checks.push(Ok(CheckedTransactionDetails { - nonce: None, - lamports_per_signature: 20, - })); - - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(80000); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(fee_payer, account_data); - - // A transaction that fails - let sender = Pubkey::new_unique(); - let recipient = Pubkey::new_unique(); - let fee_payer = Pubkey::new_unique(); - let system_account = Pubkey::new_from_array([0; 32]); - let data = 900050u64.to_be_bytes().to_vec(); - transaction_builder.create_instruction( - transfer_program_account, - vec![ - AccountMeta { - pubkey: sender, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: recipient, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: system_account, - is_signer: false, - is_writable: false, - }, - ], - HashMap::from([(sender, Signature::new_unique())]), - data, - ); +// container for a transaction batch and all data needed to run and verify it against svm +#[derive(Debug, Default)] +pub struct SvmTestEntry { + // programs to deploy to the new svm before transaction execution + pub initial_programs: Vec<(String, Slot)>, + + // accounts to deploy to the new svm before transaction execution + pub initial_accounts: AccountMap, + + // transactions to execute and transaction-specific checks to perform on the results from svm + pub transaction_batch: Vec, + + // expected final account states, checked after transaction execution + pub final_accounts: AccountMap, +} + +impl SvmTestEntry { + // add a new a rent-exempt account that exists before the batch + // inserts it into both account maps, assuming it lives unchanged (except for svm fixing rent epoch) + // rent-paying accounts must be added by hand because svm will not set rent epoch to u64::MAX + pub fn add_initial_account(&mut self, pubkey: Pubkey, account: &AccountSharedData) { + assert!(self + .initial_accounts + .insert(pubkey, account.clone()) + .is_none()); + + self.create_expected_account(pubkey, account); + } + + // add a new rent-exempt account that is created by the transaction + // inserts it only into the post account map + pub fn create_expected_account(&mut self, pubkey: Pubkey, account: &AccountSharedData) { + let mut account = account.clone(); + account.set_rent_epoch(u64::MAX); + + assert!(self.final_accounts.insert(pubkey, account).is_none()); + } + + // edit an existing account to reflect changes you expect the transaction to make to it + pub fn update_expected_account_data(&mut self, pubkey: Pubkey, account: &AccountSharedData) { + let mut account = account.clone(); + account.set_rent_epoch(u64::MAX); + + assert!(self.final_accounts.insert(pubkey, account).is_some()); + } + + // add lamports to an existing expected final account state + pub fn increase_expected_lamports(&mut self, pubkey: &Pubkey, lamports: u64) { + self.final_accounts + .get_mut(pubkey) + .unwrap() + .checked_add_lamports(lamports) + .unwrap(); + } + + // subtract lamports from an existing expected final account state + pub fn decrease_expected_lamports(&mut self, pubkey: &Pubkey, lamports: u64) { + self.final_accounts + .get_mut(pubkey) + .unwrap() + .checked_sub_lamports(lamports) + .unwrap(); + } + + // convenience function that adds a transaction that is expected to succeed + pub fn push_transaction(&mut self, transaction: Transaction) { + self.transaction_batch.push(TransactionBatchItem { + transaction, + ..TransactionBatchItem::default() + }); + } + + // convenience function that adds a transaction that is expected to execute but fail + pub fn push_failed_transaction(&mut self, transaction: Transaction) { + self.transaction_batch.push(TransactionBatchItem { + transaction, + asserts: TransactionBatchItemAsserts::failed(), + ..TransactionBatchItem::default() + }); + } + + // internal helper to gather SanitizedTransaction objects for execution + fn prepare_transactions(&self) -> (Vec, Vec) { + self.transaction_batch + .iter() + .cloned() + .map(|item| { + ( + SanitizedTransaction::from_transaction_for_tests(item.transaction), + item.check_result, + ) + }) + .unzip() + } + + // internal helper to gather test items for post-execution checks + fn asserts(&self) -> Vec { + self.transaction_batch + .iter() + .cloned() + .map(|item| item.asserts) + .collect() + } +} + +// one transaction in a batch plus check results for svm and asserts for tests +#[derive(Clone, Debug)] +pub struct TransactionBatchItem { + pub transaction: Transaction, + pub check_result: TransactionCheckResult, + pub asserts: TransactionBatchItemAsserts, +} + +impl Default for TransactionBatchItem { + fn default() -> Self { + Self { + transaction: Transaction::default(), + check_result: Ok(CheckedTransactionDetails { + nonce: None, + lamports_per_signature: LAMPORTS_PER_SIGNATURE, + }), + asserts: TransactionBatchItemAsserts::succeeded(), + } + } +} + +// asserts for a given transaction in a batch +// we can automatically check whether it executed, whether it succeeded +// log items we expect to see (exect match only), and rodata +#[derive(Clone, Debug)] +pub struct TransactionBatchItemAsserts { + pub executed: bool, + pub succeeded: bool, + pub logs: Vec, + pub return_data: ReturnDataAssert, +} - let sanitized_transaction = - transaction_builder.build(Hash::default(), (fee_payer, Signature::new_unique()), true); - all_transactions.push(sanitized_transaction.clone().unwrap()); - transaction_checks.push(Ok(CheckedTransactionDetails { - nonce: None, - lamports_per_signature: 20, - })); - - // fee payer - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(80000); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(fee_payer, account_data); - - // Sender without enough funds - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(900000); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(sender, account_data); - - // recipient - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(900000); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(recipient, account_data); - - // A transaction whose verification has already failed - all_transactions.push(sanitized_transaction.unwrap()); - transaction_checks.push(Err(TransactionError::BlockhashNotFound)); - - (all_transactions, transaction_checks) +impl TransactionBatchItemAsserts { + pub fn succeeded() -> Self { + Self { + executed: true, + succeeded: true, + logs: vec![], + return_data: ReturnDataAssert::Skip, + } + } + + pub fn failed() -> Self { + Self { + executed: true, + succeeded: false, + logs: vec![], + return_data: ReturnDataAssert::Skip, + } + } + + pub fn not_executed() -> Self { + Self { + executed: false, + succeeded: false, + logs: vec![], + return_data: ReturnDataAssert::Skip, + } + } + + pub fn check_executed_transaction(&self, execution_details: &TransactionExecutionDetails) { + assert!(self.executed); + assert_eq!(self.succeeded, execution_details.status.is_ok()); + + if !self.logs.is_empty() { + let actual_logs = execution_details.log_messages.as_ref().unwrap(); + for expected_log in &self.logs { + assert!(actual_logs.contains(expected_log)); + } + } + + if self.return_data != ReturnDataAssert::Skip { + assert_eq!( + self.return_data, + execution_details.return_data.clone().into() + ); + } + } } -#[test] -fn svm_integration() { +#[derive(Clone, Debug, Default, PartialEq)] +pub enum ReturnDataAssert { + Some(TransactionReturnData), + None, + #[default] + Skip, +} + +impl From> for ReturnDataAssert { + fn from(option_ro_data: Option) -> Self { + match option_ro_data { + Some(ro_data) => Self::Some(ro_data), + None => Self::None, + } + } +} + +fn program_medley() -> Vec { + let mut test_entry = SvmTestEntry::default(); + + // 0: A transaction that works without any account + { + let program_name = "hello-solana".to_string(); + let program_id = program_address(&program_name); + test_entry + .initial_programs + .push((program_name, DEPLOYMENT_SLOT)); + + let fee_payer_keypair = Keypair::new(); + let fee_payer = fee_payer_keypair.pubkey(); + + let mut fee_payer_data = AccountSharedData::default(); + fee_payer_data.set_lamports(LAMPORTS_PER_SOL); + test_entry.add_initial_account(fee_payer, &fee_payer_data); + + let instruction = Instruction::new_with_bytes(program_id, &[], vec![]); + test_entry.push_transaction(Transaction::new_signed_with_payer( + &[instruction], + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + )); + + test_entry.transaction_batch[0] + .asserts + .logs + .push("Program log: Hello, Solana!".to_string()); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + } + + // 1: A simple funds transfer between accounts + { + let program_name = "simple-transfer".to_string(); + let program_id = program_address(&program_name); + test_entry + .initial_programs + .push((program_name, DEPLOYMENT_SLOT)); + + let fee_payer_keypair = Keypair::new(); + let sender_keypair = Keypair::new(); + + let fee_payer = fee_payer_keypair.pubkey(); + let sender = sender_keypair.pubkey(); + let recipient = Pubkey::new_unique(); + + let transfer_amount = 10; + + let mut fee_payer_data = AccountSharedData::default(); + fee_payer_data.set_lamports(LAMPORTS_PER_SOL); + test_entry.add_initial_account(fee_payer, &fee_payer_data); + + let mut sender_data = AccountSharedData::default(); + sender_data.set_lamports(LAMPORTS_PER_SOL); + test_entry.add_initial_account(sender, &sender_data); + + let mut recipient_data = AccountSharedData::default(); + recipient_data.set_lamports(LAMPORTS_PER_SOL); + test_entry.add_initial_account(recipient, &recipient_data); + + let instruction = Instruction::new_with_bytes( + program_id, + &u64::to_be_bytes(transfer_amount), + vec![ + AccountMeta::new(sender, true), + AccountMeta::new(recipient, false), + AccountMeta::new_readonly(system_program::id(), false), + ], + ); + + test_entry.push_transaction(Transaction::new_signed_with_payer( + &[instruction], + Some(&fee_payer), + &[&fee_payer_keypair, &sender_keypair], + Hash::default(), + )); + + test_entry.increase_expected_lamports(&recipient, transfer_amount); + test_entry.decrease_expected_lamports(&sender, transfer_amount); + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE * 2); + } + + // 2: A program that utilizes a Sysvar + { + let program_name = "clock-sysvar".to_string(); + let program_id = program_address(&program_name); + test_entry + .initial_programs + .push((program_name, DEPLOYMENT_SLOT)); + + let fee_payer_keypair = Keypair::new(); + let fee_payer = fee_payer_keypair.pubkey(); + + let mut fee_payer_data = AccountSharedData::default(); + fee_payer_data.set_lamports(LAMPORTS_PER_SOL); + test_entry.add_initial_account(fee_payer, &fee_payer_data); + + let instruction = Instruction::new_with_bytes(program_id, &[], vec![]); + test_entry.push_transaction(Transaction::new_signed_with_payer( + &[instruction], + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + )); + + let ro_data = TransactionReturnData { + program_id, + data: i64::to_be_bytes(WALLCLOCK_TIME).to_vec(), + }; + test_entry.transaction_batch[2].asserts.return_data = ReturnDataAssert::Some(ro_data); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + } + + // 3: A transaction that fails + { + let program_id = program_address("simple-transfer"); + + let fee_payer_keypair = Keypair::new(); + let sender_keypair = Keypair::new(); + + let fee_payer = fee_payer_keypair.pubkey(); + let sender = sender_keypair.pubkey(); + let recipient = Pubkey::new_unique(); + + let base_amount = 900_000; + let transfer_amount = base_amount + 50; + + let mut fee_payer_data = AccountSharedData::default(); + fee_payer_data.set_lamports(LAMPORTS_PER_SOL); + test_entry.add_initial_account(fee_payer, &fee_payer_data); + + let mut sender_data = AccountSharedData::default(); + sender_data.set_lamports(base_amount); + test_entry.add_initial_account(sender, &sender_data); + + let mut recipient_data = AccountSharedData::default(); + recipient_data.set_lamports(base_amount); + test_entry.add_initial_account(recipient, &recipient_data); + + let instruction = Instruction::new_with_bytes( + program_id, + &u64::to_be_bytes(transfer_amount), + vec![ + AccountMeta::new(sender, true), + AccountMeta::new(recipient, false), + AccountMeta::new_readonly(system_program::id(), false), + ], + ); + + test_entry.push_failed_transaction(Transaction::new_signed_with_payer( + &[instruction], + Some(&fee_payer), + &[&fee_payer_keypair, &sender_keypair], + Hash::default(), + )); + + test_entry.transaction_batch[3] + .asserts + .logs + .push("Transfer: insufficient lamports 900000, need 900050".to_string()); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE * 2); + } + + // 4: A transaction whose verification has already failed + { + let fee_payer_keypair = Keypair::new(); + let fee_payer = fee_payer_keypair.pubkey(); + + test_entry.transaction_batch.push(TransactionBatchItem { + transaction: Transaction::new_signed_with_payer( + &[], + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + ), + check_result: Err(TransactionError::BlockhashNotFound), + asserts: TransactionBatchItemAsserts::not_executed(), + }); + } + + vec![test_entry] +} + +fn simple_transfer() -> Vec { + let mut test_entry = SvmTestEntry::default(); + let transfer_amount = LAMPORTS_PER_SOL; + + // 0: a transfer that succeeds + { + let source_keypair = Keypair::new(); + let source = source_keypair.pubkey(); + let destination = Pubkey::new_unique(); + + let mut source_data = AccountSharedData::default(); + let mut destination_data = AccountSharedData::default(); + + source_data.set_lamports(LAMPORTS_PER_SOL * 10); + test_entry.add_initial_account(source, &source_data); + + test_entry.push_transaction(system_transaction::transfer( + &source_keypair, + &destination, + transfer_amount, + Hash::default(), + )); + + destination_data + .checked_add_lamports(transfer_amount) + .unwrap(); + test_entry.create_expected_account(destination, &destination_data); + + test_entry.decrease_expected_lamports(&source, transfer_amount + LAMPORTS_PER_SIGNATURE); + } + + // 1: an executable transfer that fails + { + let source_keypair = Keypair::new(); + let source = source_keypair.pubkey(); + + let mut source_data = AccountSharedData::default(); + + source_data.set_lamports(transfer_amount - 1); + test_entry.add_initial_account(source, &source_data); + + test_entry.push_failed_transaction(system_transaction::transfer( + &source_keypair, + &Pubkey::new_unique(), + transfer_amount, + Hash::default(), + )); + + test_entry.decrease_expected_lamports(&source, LAMPORTS_PER_SIGNATURE); + } + + // 2: a non-executable transfer that fails before loading + { + test_entry.transaction_batch.push(TransactionBatchItem { + transaction: system_transaction::transfer( + &Keypair::new(), + &Pubkey::new_unique(), + transfer_amount, + Hash::default(), + ), + check_result: Err(TransactionError::BlockhashNotFound), + asserts: TransactionBatchItemAsserts::not_executed(), + }); + } + + // 3: a non-executable transfer that fails loading the fee-payer + // NOTE when we support the processed/executed distinction, this is NOT processed + { + test_entry.transaction_batch.push(TransactionBatchItem { + transaction: system_transaction::transfer( + &Keypair::new(), + &Pubkey::new_unique(), + transfer_amount, + Hash::default(), + ), + asserts: TransactionBatchItemAsserts::not_executed(), + ..TransactionBatchItem::default() + }); + } + + // 4: a non-executable transfer that fails loading the program + // NOTE when we support the processed/executed distinction, this IS processed + // thus this test case will fail with the feature enabled + { + let source_keypair = Keypair::new(); + let source = source_keypair.pubkey(); + + let mut source_data = AccountSharedData::default(); + + source_data.set_lamports(transfer_amount * 10); + test_entry + .initial_accounts + .insert(source, source_data.clone()); + test_entry.final_accounts.insert(source, source_data); + + let mut instruction = + system_instruction::transfer(&source, &Pubkey::new_unique(), transfer_amount); + instruction.program_id = Pubkey::new_unique(); + + test_entry.transaction_batch.push(TransactionBatchItem { + transaction: Transaction::new_signed_with_payer( + &[instruction], + Some(&source), + &[&source_keypair], + Hash::default(), + ), + asserts: TransactionBatchItemAsserts::not_executed(), + ..TransactionBatchItem::default() + }); + } + + vec![test_entry] +} + +#[test_case(program_medley())] +#[test_case(simple_transfer())] +fn svm_integration(test_entries: Vec) { + for test_entry in test_entries { + execute_test_entry(test_entry); + } +} + +fn execute_test_entry(test_entry: SvmTestEntry) { let mock_bank = MockBankCallback::default(); - let (transactions, check_results) = prepare_transactions(&mock_bank); + + for (name, slot) in &test_entry.initial_programs { + deploy_program(name.to_string(), *slot, &mock_bank); + } + + for (pubkey, account) in &test_entry.initial_accounts { + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(*pubkey, account.clone()); + } + let batch_processor = TransactionBatchProcessor::::new( EXECUTION_SLOT, EXECUTION_EPOCH, @@ -263,7 +596,9 @@ fn svm_integration() { ..Default::default() }; - let result = batch_processor.load_and_execute_sanitized_transactions( + // execute transaction batch + let (transactions, check_results) = test_entry.prepare_transactions(); + let batch_output = batch_processor.load_and_execute_sanitized_transactions( &mock_bank, &transactions, check_results, @@ -271,68 +606,52 @@ fn svm_integration() { &processing_config, ); - assert_eq!(result.processing_results.len(), 5); - - let executed_tx_0 = result.processing_results[0] - .processed_transaction() - .unwrap() - .executed_transaction() - .unwrap(); - assert!(executed_tx_0.was_successful()); - let logs = executed_tx_0 - .execution_details - .log_messages - .as_ref() - .unwrap(); - assert!(logs.contains(&"Program log: Hello, Solana!".to_string())); - - let executed_tx_1 = result.processing_results[1] - .processed_transaction() - .unwrap() - .executed_transaction() - .unwrap(); - assert!(executed_tx_1.was_successful()); - - // The SVM does not commit the account changes in MockBank - let recipient_key = transactions[1].message().account_keys()[2]; - let recipient_data = executed_tx_1 - .loaded_transaction - .accounts + // build a hashmap of final account states incrementally, starting with all initial states, updating to all final states + // NOTE with SIMD-83 an account may appear multiple times in the same batch + let mut final_accounts_actual = test_entry.initial_accounts.clone(); + for processed_transaction in batch_output + .processing_results + .iter() + .filter_map(|r| r.as_ref().ok()) + { + match processed_transaction { + ProcessedTransaction::Executed(executed_transaction) => { + for (pubkey, account_data) in + executed_transaction.loaded_transaction.accounts.clone() + { + final_accounts_actual.insert(pubkey, account_data); + } + } + // NOTE this is a possible state with `feature_set::enable_transaction_loading_failure_fees` enabled + // by using `TransactionProcessingEnvironment::default()` we have all features disabled + // in other words, this will be unreachable until we are ready to test fee-only transactions + // (or the feature is activated on mainnet and removed... but we should do it before then!) + ProcessedTransaction::FeesOnly(_) => unreachable!(), + } + } + + // check that all the account states we care about are present and correct + for (pubkey, expected_account_data) in test_entry.final_accounts.iter() { + let actual_account_data = final_accounts_actual.get(pubkey); + assert_eq!( + Some(expected_account_data), + actual_account_data, + "mismatch on account {}", + pubkey + ); + } + + // now run our transaction-by-transaction checks + for (processing_result, test_item_asserts) in batch_output + .processing_results .iter() - .find(|key| key.0 == recipient_key) - .unwrap(); - assert_eq!(recipient_data.1.lamports(), 900010); - - let executed_tx_2 = result.processing_results[2] - .processed_transaction() - .unwrap() - .executed_transaction() - .unwrap(); - let return_data = executed_tx_2 - .execution_details - .return_data - .as_ref() - .unwrap(); - let time = i64::from_be_bytes(return_data.data[0..8].try_into().unwrap()); - let clock_data = mock_bank.get_account_shared_data(&Clock::id()).unwrap(); - let clock_info: Clock = bincode::deserialize(clock_data.data()).unwrap(); - assert_eq!(clock_info.unix_timestamp, time); - - let executed_tx_3 = result.processing_results[3] - .processed_transaction() - .unwrap() - .executed_transaction() - .unwrap(); - assert!(executed_tx_3.execution_details.status.is_err()); - assert!(executed_tx_3 - .execution_details - .log_messages - .as_ref() - .unwrap() - .contains(&"Transfer: insufficient lamports 900000, need 900050".to_string())); - - assert!(matches!( - result.processing_results[4], - Err(TransactionError::BlockhashNotFound) - )); + .zip(test_entry.asserts()) + { + match processing_result { + Ok(ProcessedTransaction::Executed(executed_transaction)) => test_item_asserts + .check_executed_transaction(&executed_transaction.execution_details), + Ok(ProcessedTransaction::FeesOnly(_)) => unreachable!(), + Err(_) => assert!(!test_item_asserts.executed), + } + } } diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 99e9b9162067f2..ee6a6692a7b0fe 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -35,10 +35,11 @@ use { env, fs::{self, File}, io::Read, - time::{SystemTime, UNIX_EPOCH}, }, }; +pub const WALLCLOCK_TIME: i64 = 1704067200; // Arbitrarily Jan 1, 2024 + pub struct MockForkGraph {} impl ForkGraph for MockForkGraph { @@ -111,10 +112,16 @@ fn load_program(name: String) -> Vec { buffer } +#[allow(unused)] +pub fn program_address(program_name: &str) -> Pubkey { + Pubkey::create_with_seed(&Pubkey::default(), program_name, &Pubkey::default()).unwrap() +} + #[allow(unused)] pub fn deploy_program(name: String, deployment_slot: Slot, mock_bank: &MockBankCallback) -> Pubkey { - let program_account = Pubkey::new_unique(); - let program_data_account = Pubkey::new_unique(); + let program_account = program_address(&name); + let program_data_account = bpf_loader_upgradeable::get_program_data_address(&program_account); + let state = UpgradeableLoaderState::Program { programdata_address: program_data_account, }; @@ -124,6 +131,7 @@ pub fn deploy_program(name: String, deployment_slot: Slot, mock_bank: &MockBankC account_data.set_data(bincode::serialize(&state).unwrap()); account_data.set_lamports(25); account_data.set_owner(bpf_loader_upgradeable::id()); + account_data.set_executable(true); mock_bank .account_shared_data .write() @@ -177,16 +185,12 @@ pub fn create_executable_environment( program_cache.fork_graph = Some(Arc::downgrade(&fork_graph)); // We must fill in the sysvar cache entries - let time_now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backwards") - .as_secs() as i64; let clock = Clock { slot: DEPLOYMENT_SLOT, - epoch_start_timestamp: time_now.saturating_sub(10) as UnixTimestamp, + epoch_start_timestamp: WALLCLOCK_TIME.saturating_sub(10) as UnixTimestamp, epoch: DEPLOYMENT_EPOCH, leader_schedule_epoch: DEPLOYMENT_EPOCH, - unix_timestamp: time_now as UnixTimestamp, + unix_timestamp: WALLCLOCK_TIME as UnixTimestamp, }; let mut account_data = AccountSharedData::default(); From eff8961dcf7880aa7e438463203611a8a0088554 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 27 Aug 2024 23:07:26 +0800 Subject: [PATCH 230/529] Fix flaky fuzzer test (#2749) --- Cargo.lock | 1 + runtime/Cargo.toml | 1 + runtime/src/bank/tests.rs | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index ae84f0371cd5c8..a85583c01b9111 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7312,6 +7312,7 @@ dependencies = [ name = "solana-runtime" version = "2.1.0" dependencies = [ + "agave-transaction-view", "aquamarine", "arrayref", "assert_matches", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 3e3573f0eeb9d8..9afeea8c469f02 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -94,6 +94,7 @@ crate-type = ["lib"] name = "solana_runtime" [dev-dependencies] +agave-transaction-view = { workspace = true } assert_matches = { workspace = true } ed25519-dalek = { workspace = true } libsecp256k1 = { workspace = true } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 34be9d6aab66a8..184c3ec9a55033 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -16,6 +16,7 @@ use { snapshot_bank_utils, snapshot_utils, status_cache::MAX_CACHE_ENTRIES, }, + agave_transaction_view::static_account_keys_meta::MAX_STATIC_ACCOUNTS_PER_PACKET, assert_matches::assert_matches, crossbeam_channel::{bounded, unbounded}, itertools::Itertools, @@ -6192,7 +6193,7 @@ fn test_fuzz_instructions() { }) .collect(); let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); - let max_keys = 100; + let max_keys = MAX_STATIC_ACCOUNTS_PER_PACKET; let keys: Vec<_> = (0..max_keys) .enumerate() .map(|_| { From b9623a084693e01c22a14aa16ca9b872af28c985 Mon Sep 17 00:00:00 2001 From: asolana <110843012+ksolana@users.noreply.github.com> Date: Tue, 27 Aug 2024 11:03:13 -0700 Subject: [PATCH 231/529] Upgrade zstd to 0.13.2 as current version is quite old (#2730) Upgrade zstd to 0.13 as current version is quite old It also uses the newer version of zstd library (1.5.6 vs 1.5.2) --- Cargo.lock | 15 +++++++-------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 15 +++++++-------- 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a85583c01b9111..73acd4996778b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9973,29 +9973,28 @@ dependencies = [ [[package]] name = "zstd" -version = "0.11.2+zstd.1.5.2" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "5.0.1+zstd.1.5.2" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c12659121420dd6365c5c3de4901f97145b79651fb1d25814020ed2ed0585ae" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ - "libc", "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", - "libc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 37c2dbf9b6e504..4b7f35a6ce2a4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -500,7 +500,7 @@ winreg = "0.50" x509-parser = "0.14.0" # See "zeroize versioning issues" below if you are updating this version. zeroize = { version = "1.3", default-features = false } -zstd = "0.11.2" +zstd = "0.13.2" [patch.crates-io] # for details, see https://github.com/anza-xyz/crossbeam/commit/fd279d707025f0e60951e429bf778b4813d1b6bf diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index e70ba341f05492..1246b51366c09f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -8332,29 +8332,28 @@ dependencies = [ [[package]] name = "zstd" -version = "0.11.2+zstd.1.5.2" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "5.0.1+zstd.1.5.2" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c12659121420dd6365c5c3de4901f97145b79651fb1d25814020ed2ed0585ae" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ - "libc", "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", - "libc", + "pkg-config", ] From 7b6e6c179fd035b10e8702a0ed80ce9bf9bdb35e Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 27 Aug 2024 14:55:47 -0400 Subject: [PATCH 232/529] gossip: demote invalid duplicate proof errors to info (#2754) * gossip: demote invalid duplicate proof errors to info * pr feedback: explicitly list every enum --- gossip/src/duplicate_shred.rs | 27 +++++++++++++++++++++++++++ gossip/src/duplicate_shred_handler.rs | 8 +++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index ad8c8cc6eb484c..a6d49f104fadea 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -90,6 +90,33 @@ pub enum Error { UnknownSlotLeader(Slot), } +impl Error { + /// Errors indicating that the initial node submitted an invalid duplicate proof case + pub(crate) fn is_non_critical(&self) -> bool { + match self { + Self::SlotMismatch + | Self::InvalidShredVersion(_) + | Self::InvalidSignature + | Self::ShredTypeMismatch + | Self::InvalidDuplicateShreds + | Self::InvalidLastIndexConflict + | Self::InvalidErasureMetaConflict => true, + Self::BlockstoreInsertFailed(_) + | Self::DataChunkMismatch + | Self::DuplicateSlotSenderFailure + | Self::InvalidChunkIndex { .. } + | Self::InvalidDuplicateSlotProof + | Self::InvalidSizeLimit + | Self::InvalidShred(_) + | Self::NumChunksMismatch + | Self::MissingDataChunk + | Self::SerializationError(_) + | Self::TryFromIntError(_) + | Self::UnknownSlotLeader(_) => false, + } + } +} + /// Check that `shred1` and `shred2` indicate a valid duplicate proof /// - Must be for the same slot /// - Must match the expected shred version diff --git a/gossip/src/duplicate_shred_handler.rs b/gossip/src/duplicate_shred_handler.rs index 84134d76f241bd..9095c6f1fc75e0 100644 --- a/gossip/src/duplicate_shred_handler.rs +++ b/gossip/src/duplicate_shred_handler.rs @@ -56,8 +56,14 @@ impl DuplicateShredHandlerTrait for DuplicateShredHandler { fn handle(&mut self, shred_data: DuplicateShred) { self.cache_root_info(); self.maybe_prune_buffer(); + let slot = shred_data.slot; + let pubkey = shred_data.from; if let Err(error) = self.handle_shred_data(shred_data) { - error!("handle packet: {error:?}") + if error.is_non_critical() { + info!("Received invalid duplicate shred proof from {pubkey} for slot {slot}: {error:?}"); + } else { + error!("Unable to process duplicate shred proof from {pubkey} for slot {slot}: {error:?}"); + } } } } From 1bf1916c6672c46a5b820f1e91f20d3baf1ae498 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 27 Aug 2024 15:56:46 -0500 Subject: [PATCH 233/529] ancient packing iterates serially instead of parallel (#2748) --- accounts-db/src/ancient_append_vecs.rs | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 6af2ae647bd25c..5016f1cf9df1fd 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -739,7 +739,7 @@ impl AccountsDb { /// 'accounts_per_storage' should be sorted by slot fn calc_accounts_to_combine<'a>( &self, - accounts_per_storage: &'a mut Vec<(&'a SlotInfo, GetUniqueAccountsResult)>, + accounts_per_storage: &'a mut [(&'a SlotInfo, GetUniqueAccountsResult)], tuning: &PackedAncientStorageTuning, alive_bytes: u64, mut many_ref_slots: IncludeManyRefSlots, @@ -752,18 +752,17 @@ impl AccountsDb { // `shrink_collect` all accounts in the append vecs we want to combine. // This also unrefs all dead accounts in those append vecs. - let mut accounts_to_combine = self.thread_pool_clean.install(|| { - accounts_per_storage - .par_iter() - .map(|(info, unique_accounts)| { - self.shrink_collect::>( - &info.storage, - unique_accounts, - &self.shrink_ancient_stats.shrink_stats, - ) - }) - .collect::>() - }); + // This needs to serially iterate largest to smallest slot so that we unref older dead slots after we have visited the newer alive slots. + let mut accounts_to_combine = accounts_per_storage + .iter() + .map(|(info, unique_accounts)| { + self.shrink_collect::>( + &info.storage, + unique_accounts, + &self.shrink_ancient_stats.shrink_stats, + ) + }) + .collect::>(); let mut many_refs_old_alive_count = 0; From fff6257b0d0ed2c929fb31c20413314b0a2f0b96 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 27 Aug 2024 15:58:52 -0500 Subject: [PATCH 234/529] use correct alive bytes in ancient pack (#2747) * use correct alive bytes in ancient pack * remove log --- accounts-db/src/ancient_append_vecs.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 5016f1cf9df1fd..67cdeb223d4e3a 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -410,7 +410,6 @@ impl AccountsDb { let mut accounts_to_combine = self.calc_accounts_to_combine( &mut accounts_per_storage, &tuning, - ancient_slot_infos.total_alive_bytes_shrink.0, IncludeManyRefSlots::Skip, ); metrics.unpackable_slots_count += accounts_to_combine.unpackable_slots_count; @@ -741,9 +740,13 @@ impl AccountsDb { &self, accounts_per_storage: &'a mut [(&'a SlotInfo, GetUniqueAccountsResult)], tuning: &PackedAncientStorageTuning, - alive_bytes: u64, mut many_ref_slots: IncludeManyRefSlots, ) -> AccountsToCombine<'a> { + let alive_bytes = accounts_per_storage + .iter() + .map(|a| a.0.alive_bytes) + .sum::(); + // reverse sort by slot # accounts_per_storage.sort_unstable_by(|a, b| b.0.slot.cmp(&a.0.slot)); let mut accounts_keep_slots = HashMap::default(); @@ -1618,11 +1621,9 @@ pub mod tests { ) .collect::>(); - let alive_bytes = 1000; let accounts_to_combine = db.calc_accounts_to_combine( &mut accounts_per_storage, &default_tuning(), - alive_bytes, IncludeManyRefSlots::Include, ); let mut stats = ShrinkStatsSub::default(); @@ -1724,6 +1725,11 @@ pub mod tests { for two_refs in [false, true] { let (db, mut storages, _slots, mut infos) = get_sample_storages(num_slots, None); + + infos.iter_mut().for_each(|a| { + a.alive_bytes += alive_bytes_per_slot; + }); + if unsorted_slots { storages = storages.into_iter().rev().collect(); infos = infos.into_iter().rev().collect(); @@ -1753,11 +1759,9 @@ pub mod tests { .zip(original_results.into_iter()) .collect::>(); - let alive_bytes = num_slots as u64 * alive_bytes_per_slot; let accounts_to_combine = db.calc_accounts_to_combine( &mut accounts_per_storage, &tuning, - alive_bytes, many_ref_slots, ); let mut expected_accounts_to_combine = num_slots; @@ -1803,6 +1807,10 @@ pub mod tests { for two_refs in [false, true] { let (db, mut storages, slots, mut infos) = get_sample_storages(num_slots, None); + infos.iter_mut().for_each(|a| { + a.alive_bytes += 1; + }); + let slots_vec; if unsorted_slots { slots_vec = slots.rev().collect::>(); @@ -1857,11 +1865,9 @@ pub mod tests { .zip(original_results.into_iter()) .collect::>(); - let alive_bytes = num_slots; let accounts_to_combine = db.calc_accounts_to_combine( &mut accounts_per_storage, &default_tuning(), - alive_bytes as u64, many_ref_slots, ); assert_eq!( @@ -2024,11 +2030,9 @@ pub mod tests { .zip(original_results.into_iter()) .collect::>(); - let alive_bytes = 1000; // just something let accounts_to_combine = db.calc_accounts_to_combine( &mut accounts_per_storage, &default_tuning(), - alive_bytes, IncludeManyRefSlots::Include, ); let slots_vec = slots.collect::>(); @@ -2214,11 +2218,9 @@ pub mod tests { .zip(original_results.into_iter()) .collect::>(); - let alive_bytes = 0; // just something let accounts_to_combine = db.calc_accounts_to_combine( &mut accounts_per_storage, &default_tuning(), - alive_bytes, IncludeManyRefSlots::Include, ); let slots_vec = slots.collect::>(); From d53152835046f544310f0ef53fa6719bda8387e6 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Wed, 28 Aug 2024 19:18:01 +0700 Subject: [PATCH 235/529] vote: make VoteAccount::try_from deserialize in place (#2734) * vote: add VoteAccount::try_from bench * vote: make VoteAccount::try_from deserialize in place There were two issues with the previous implementation: - VoteState::deserialize is slow because serde is slow - Arc::new(VoteAccountInner::try_from(account)) is not able to allocate inner in place but is making a copy: mov edx, 1816 mov rdi, rax call qword ptr [rip + memcpy@GOTPCREL] The new implementation fixes both issues by using VoteState::deserialize_into_uninit() and by manually building VoteAccountInner in place inside the Arc allocation. before: test bench_vote_account_try_from ... bench: 5,320.65 ns/iter (+/- 1,077.39) after: test bench_vote_account_try_from ... bench: 383.44 ns/iter (+/- 156.15) * vote_account: add try_from failure tests --- vote/benches/vote_account.rs | 52 +++++++++++++++++++++++ vote/src/vote_account.rs | 80 +++++++++++++++++++++++++++++------- 2 files changed, 117 insertions(+), 15 deletions(-) create mode 100644 vote/benches/vote_account.rs diff --git a/vote/benches/vote_account.rs b/vote/benches/vote_account.rs new file mode 100644 index 00000000000000..34d3de33f5dc0d --- /dev/null +++ b/vote/benches/vote_account.rs @@ -0,0 +1,52 @@ +#![feature(test)] +extern crate test; + +use { + rand::Rng, + solana_sdk::{ + account::AccountSharedData, + pubkey::Pubkey, + vote::state::{VoteInit, VoteState, VoteStateVersions}, + }, + solana_vote::vote_account::VoteAccount, + test::Bencher, +}; + +fn new_rand_vote_account( + rng: &mut R, + node_pubkey: Option, +) -> (AccountSharedData, VoteState) { + let vote_init = VoteInit { + node_pubkey: node_pubkey.unwrap_or_else(Pubkey::new_unique), + authorized_voter: Pubkey::new_unique(), + authorized_withdrawer: Pubkey::new_unique(), + commission: rng.gen(), + }; + let clock = solana_sdk::sysvar::clock::Clock { + slot: rng.gen(), + epoch_start_timestamp: rng.gen(), + epoch: rng.gen(), + leader_schedule_epoch: rng.gen(), + unix_timestamp: rng.gen(), + }; + let vote_state = VoteState::new(&vote_init, &clock); + let account = AccountSharedData::new_data( + rng.gen(), // lamports + &VoteStateVersions::new_current(vote_state.clone()), + &solana_sdk::vote::program::id(), // owner + ) + .unwrap(); + (account, vote_state) +} + +#[bench] +fn bench_vote_account_try_from(b: &mut Bencher) { + let mut rng = rand::thread_rng(); + let (account, vote_state) = new_rand_vote_account(&mut rng, None); + + b.iter(|| { + let vote_account = VoteAccount::try_from(account.clone()).unwrap(); + let state = vote_account.vote_state(); + assert_eq!(state, &vote_state); + }); +} diff --git a/vote/src/vote_account.rs b/vote/src/vote_account.rs index 8155d1540f04e7..7b01606499fe95 100644 --- a/vote/src/vote_account.rs +++ b/vote/src/vote_account.rs @@ -15,7 +15,8 @@ use { collections::{hash_map::Entry, HashMap}, fmt, iter::FromIterator, - mem, + mem::{self, MaybeUninit}, + ptr::addr_of_mut, sync::{Arc, OnceLock}, }, thiserror::Error, @@ -309,23 +310,53 @@ impl From for AccountSharedData { } impl TryFrom for VoteAccount { - type Error = Error; - fn try_from(account: AccountSharedData) -> Result { - let vote_account = VoteAccountInner::try_from(account)?; - Ok(Self(Arc::new(vote_account))) - } -} - -impl TryFrom for VoteAccountInner { type Error = Error; fn try_from(account: AccountSharedData) -> Result { if !solana_sdk::vote::program::check_id(account.owner()) { return Err(Error::InvalidOwner(*account.owner())); } - Ok(Self { - vote_state: VoteState::deserialize(account.data()).map_err(Error::InstructionError)?, - account, - }) + + // Allocate as Arc> so we can initialize in place. + let mut inner = Arc::new(MaybeUninit::::uninit()); + let inner_ptr = Arc::get_mut(&mut inner) + .expect("we're the only ref") + .as_mut_ptr(); + + // Safety: + // - All the addr_of_mut!(...).write(...) calls are valid since we just allocated and so + // the field pointers are valid. + // - We use write() so that the old values aren't dropped since they're still + // uninitialized. + unsafe { + let vote_state = addr_of_mut!((*inner_ptr).vote_state); + // Safety: + // - vote_state is non-null and MaybeUninit is guaranteed to have same layout + // and alignment as VoteState. + // - Here it is safe to create a reference to MaybeUninit since the value is + // aligned and MaybeUninit is valid for all possible bit values. + let vote_state = &mut *(vote_state as *mut MaybeUninit); + + // Try to deserialize in place + if let Err(e) = VoteState::deserialize_into_uninit(account.data(), vote_state) { + // Safety: + // - Deserialization failed so at this point vote_state is uninitialized and must + // not be dropped. We're ok since `vote_state` is a subfield of `inner` which is + // still MaybeUninit - which isn't dropped by definition - and so neither are its + // subfields. + return Err(e.into()); + } + + // Write the account field which completes the initialization of VoteAccountInner. + addr_of_mut!((*inner_ptr).account).write(account); + + // Safety: + // - At this point both `inner.vote_state` and `inner.account`` are initialized, so it's safe to + // transmute the MaybeUninit to VoteAccountInner. + Ok(VoteAccount(mem::transmute::< + Arc>, + Arc, + >(inner))) + } } } @@ -442,6 +473,7 @@ mod tests { bincode::Options, rand::Rng, solana_sdk::{ + account::WritableAccount, pubkey::Pubkey, sysvar::clock::Clock, vote::state::{VoteInit, VoteStateVersions}, @@ -508,13 +540,31 @@ mod tests { } #[test] - fn test_vote_account() { + fn test_vote_account_try_from() { let mut rng = rand::thread_rng(); let (account, vote_state) = new_rand_vote_account(&mut rng, None); let lamports = account.lamports(); - let vote_account = VoteAccount::try_from(account).unwrap(); + let vote_account = VoteAccount::try_from(account.clone()).unwrap(); assert_eq!(lamports, vote_account.lamports()); assert_eq!(vote_state, *vote_account.vote_state()); + assert_eq!(&account, vote_account.account()); + } + + #[test] + #[should_panic(expected = "InvalidOwner")] + fn test_vote_account_try_from_invalid_owner() { + let mut rng = rand::thread_rng(); + let (mut account, _) = new_rand_vote_account(&mut rng, None); + account.set_owner(Pubkey::new_unique()); + VoteAccount::try_from(account).unwrap(); + } + + #[test] + #[should_panic(expected = "InvalidAccountData")] + fn test_vote_account_try_from_invalid_account() { + let mut account = AccountSharedData::default(); + account.set_owner(solana_sdk::vote::program::id()); + VoteAccount::try_from(account).unwrap(); } #[test] From c8fbfe7af7320bd0581e94a96b8071b1eb3289a3 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Wed, 28 Aug 2024 19:18:33 +0700 Subject: [PATCH 236/529] bank: add current_epoch_staked_nodes() (#2650) * bank: add current_epoch_staked_nodes() Add current_epoch_staked_nodes() which returns the staked nodes for the current epoch. Remove Bank::staked_nodes() which used to return the bank's view of staked nodes updated to the last vote processed. The updated call sites don't really need super up to date stake info, and with this change we can stop updating staked node info for every vote on every bank. Instead we now compute it once per epoch. * bank: current_epoch_stakes: explain why self.epoch + 1 --- .../banking_stage/latest_unprocessed_votes.rs | 10 ++++++++-- core/src/repair/quic_endpoint.rs | 2 +- core/src/staked_nodes_updater_service.rs | 2 +- gossip/src/cluster_info.rs | 4 ++-- ledger/src/leader_schedule_utils.rs | 2 +- runtime/src/bank.rs | 18 ++++++++++++++---- turbine/src/quic_endpoint.rs | 2 +- 7 files changed, 28 insertions(+), 12 deletions(-) diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 29ab17b7b3d11c..1ead68e564c9ce 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -132,7 +132,7 @@ pub(crate) fn weighted_random_order_by_stake<'a>( pubkeys: impl Iterator, ) -> impl Iterator + 'static { // Efraimidis and Spirakis algo for weighted random sample without replacement - let staked_nodes = bank.staked_nodes(); + let staked_nodes = bank.current_epoch_staked_nodes(); let mut pubkey_with_weight: Vec<(f64, Pubkey)> = pubkeys .filter_map(|&pubkey| { let stake = staked_nodes.get(&pubkey).copied().unwrap_or(0); @@ -420,6 +420,7 @@ mod tests { solana_perf::packet::{Packet, PacketBatch, PacketFlags}, solana_runtime::{ bank::Bank, + epoch_stakes::EpochStakes, genesis_utils::{self, ValidatorVoteKeypairs}, }, solana_sdk::{hash::Hash, signature::Signer, system_transaction::transfer}, @@ -845,7 +846,12 @@ mod tests { #[test] fn test_forwardable_packets() { let latest_unprocessed_votes = LatestUnprocessedVotes::new(); - let bank = Arc::new(Bank::default_for_tests()); + let mut bank = Bank::default_for_tests(); + bank.set_epoch_stakes_for_test( + bank.epoch().saturating_add(1), + EpochStakes::new_for_tests(HashMap::new(), bank.epoch().saturating_add(1)), + ); + let bank = Arc::new(bank); let mut forward_packet_batches_by_accounts = ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index d463e3d6d4be0d..20484ddb3ed1b6 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -724,7 +724,7 @@ async fn prune_connection_cache( debug_assert!(prune_cache_pending.load(Ordering::Relaxed)); let staked_nodes = { let root_bank = bank_forks.read().unwrap().root_bank(); - root_bank.staked_nodes() + root_bank.current_epoch_staked_nodes() }; { let mut cache = cache.lock().await; diff --git a/core/src/staked_nodes_updater_service.rs b/core/src/staked_nodes_updater_service.rs index e4bffd8666d857..98f7a0fa9e1cba 100644 --- a/core/src/staked_nodes_updater_service.rs +++ b/core/src/staked_nodes_updater_service.rs @@ -32,7 +32,7 @@ impl StakedNodesUpdaterService { while !exit.load(Ordering::Relaxed) { let stakes = { let root_bank = bank_forks.read().unwrap().root_bank(); - root_bank.staked_nodes() + root_bank.current_epoch_staked_nodes() }; let overrides = staked_nodes_overrides.read().unwrap().clone(); *staked_nodes.write().unwrap() = StakedNodes::new(stakes, overrides); diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 850346c8e0e9d0..686510023b476f 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -1895,7 +1895,7 @@ impl ClusterInfo { Some(ref bank_forks) => { let root_bank = bank_forks.read().unwrap().root_bank(); ( - root_bank.staked_nodes(), + root_bank.current_epoch_staked_nodes(), Some(root_bank.feature_set.clone()), ) } @@ -2704,7 +2704,7 @@ impl ClusterInfo { Some(bank_forks) => { let bank = bank_forks.read().unwrap().root_bank(); let feature_set = bank.feature_set.clone(); - (Some(feature_set), bank.staked_nodes()) + (Some(feature_set), bank.current_epoch_staked_nodes()) } }; self.process_packets( diff --git a/ledger/src/leader_schedule_utils.rs b/ledger/src/leader_schedule_utils.rs index 3ed5528af5ea90..661e337a0c3d44 100644 --- a/ledger/src/leader_schedule_utils.rs +++ b/ledger/src/leader_schedule_utils.rs @@ -99,7 +99,7 @@ mod tests { let bank = Bank::new_for_tests(&genesis_config); let pubkeys_and_stakes: Vec<_> = bank - .staked_nodes() + .current_epoch_staked_nodes() .iter() .map(|(pubkey, stake)| (*pubkey, *stake)) .collect(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ec042861873d71..95661ce5318c7a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5905,10 +5905,6 @@ impl Bank { }); } - pub fn staked_nodes(&self) -> Arc> { - self.stakes_cache.stakes().staked_nodes() - } - /// current vote accounts for this bank along with the stake /// attributed to each account pub fn vote_accounts(&self) -> Arc { @@ -5923,6 +5919,15 @@ impl Bank { Some(vote_account.clone()) } + /// Get the EpochStakes for the current Bank::epoch + pub fn current_epoch_stakes(&self) -> &EpochStakes { + // The stakes for a given epoch (E) in self.epoch_stakes are keyed by leader schedule epoch + // (E + 1) so the stakes for the current epoch are stored at self.epoch_stakes[E + 1] + self.epoch_stakes + .get(&self.epoch.saturating_add(1)) + .expect("Current epoch stakes must exist") + } + /// Get the EpochStakes for a given epoch pub fn epoch_stakes(&self, epoch: Epoch) -> Option<&EpochStakes> { self.epoch_stakes.get(&epoch) @@ -5932,6 +5937,11 @@ impl Bank { &self.epoch_stakes } + /// Get the staked nodes map for the current Bank::epoch + pub fn current_epoch_staked_nodes(&self) -> Arc> { + self.current_epoch_stakes().stakes().staked_nodes() + } + pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option>> { Some(self.epoch_stakes.get(&epoch)?.stakes().staked_nodes()) } diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index d16bb5c188b831..ce1de6eb039cc6 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -575,7 +575,7 @@ async fn prune_connection_cache( debug_assert!(prune_cache_pending.load(Ordering::Relaxed)); let staked_nodes = { let root_bank = bank_forks.read().unwrap().root_bank(); - root_bank.staked_nodes() + root_bank.current_epoch_staked_nodes() }; { let mut cache = cache.lock().await; From b6c3d50d578be08486a03c9bf83cb97c4f1f8d2c Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Wed, 28 Aug 2024 08:26:21 -0500 Subject: [PATCH 237/529] accounts-index: remove in-memory account index limit (#2721) * panic when exceed in-mem index budget * remove in-memory account index limit * add index limit arg to deprecated arg list * fix a test * pr reviews * add copy for IndexLimitMb enum to allow unwrap_or_default --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_index.rs | 22 +++---- .../accounts_index/in_mem_accounts_index.rs | 62 +------------------ accounts-db/src/bucket_map_holder.rs | 50 ++------------- ledger-tool/src/args.rs | 25 ++------ validator/src/cli.rs | 23 +++---- validator/src/main.rs | 13 ++-- 6 files changed, 40 insertions(+), 155 deletions(-) diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 3b4ddbe3927e5d..6c0a1ec62c8f27 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -50,7 +50,7 @@ pub const ACCOUNTS_INDEX_CONFIG_FOR_TESTING: AccountsIndexConfig = AccountsIndex bins: Some(BINS_FOR_TESTING), flush_threads: Some(FLUSH_THREADS_TESTING), drives: None, - index_limit_mb: IndexLimitMb::Unspecified, + index_limit_mb: IndexLimitMb::Unlimited, ages_to_stay_in_cache: None, scan_results_limit_bytes: None, started_from_validator: false, @@ -59,7 +59,7 @@ pub const ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS: AccountsIndexConfig = AccountsIn bins: Some(BINS_FOR_BENCHMARKS), flush_threads: Some(FLUSH_THREADS_TESTING), drives: None, - index_limit_mb: IndexLimitMb::Unspecified, + index_limit_mb: IndexLimitMb::Unlimited, ages_to_stay_in_cache: None, scan_results_limit_bytes: None, started_from_validator: false, @@ -189,22 +189,16 @@ pub struct AccountSecondaryIndexesIncludeExclude { } /// specification of how much memory in-mem portion of account index can use -#[derive(Debug, Clone)] +#[derive(Debug, Copy, Clone, Default)] pub enum IndexLimitMb { - /// nothing explicit specified, so default - Unspecified, - /// limit was specified, use disk index for rest - Limit(usize), + /// use disk index while allowing to use as much memory as available for + /// in-memory index. + #[default] + Unlimited, /// in-mem-only was specified, no disk index InMemOnly, } -impl Default for IndexLimitMb { - fn default() -> Self { - Self::Unspecified - } -} - #[derive(Debug, Default, Clone)] pub struct AccountsIndexConfig { pub bins: Option, @@ -2487,7 +2481,7 @@ pub mod tests { let mut config = ACCOUNTS_INDEX_CONFIG_FOR_TESTING; config.index_limit_mb = if use_disk { - IndexLimitMb::Limit(10_000) + IndexLimitMb::Unlimited } else { IndexLimitMb::InMemOnly // in-mem only }; diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index 051769ad9e55ad..15610713a69652 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -980,13 +980,6 @@ impl + Into> InMemAccountsIndex usize { - std::mem::size_of::() - + std::mem::size_of::() - + std::mem::size_of::>() - } - fn should_evict_based_on_age( current_age: Age, entry: &AccountMapEntry, @@ -1003,16 +996,12 @@ impl + Into> InMemAccountsIndex, startup: bool, update_stats: bool, - exceeds_budget: bool, ages_flushing_now: Age, ) -> (bool, Option>>) { // this could be tunable dynamically based on memory pressure // we could look at more ages or we could throw out more items we are choosing to keep in the cache if Self::should_evict_based_on_age(current_age, entry, startup, ages_flushing_now) { - if exceeds_budget { - // if we are already holding too many items in-mem, then we need to be more aggressive at kicking things out - (true, None) - } else if entry.ref_count() != 1 { + if entry.ref_count() != 1 { Self::update_stat(&self.stats().held_in_mem.ref_count, 1); (false, None) } else { @@ -1199,7 +1188,6 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex + Into> InMemAccountsIndex bool { - let in_mem_count = self.stats().count_in_mem.load(Ordering::Relaxed); - let limit = self.storage.mem_budget_mb; - let estimate_mem = in_mem_count * Self::approx_size_of_one_entry(); - let exceeds_budget = limit - .map(|limit| estimate_mem >= limit * 1024 * 1024) - .unwrap_or_default(); - self.stats() - .estimate_mem - .store(estimate_mem as u64, Ordering::Relaxed); - exceeds_budget - } - /// for each key in 'keys', look up in map, set age to the future fn move_ages_to_future(&self, next_age: Age, current_age: Age, keys: &[Pubkey]) { let map = self.map_internal.read().unwrap(); @@ -1564,7 +1536,7 @@ impl Drop for EvictionsGuard<'_> { mod tests { use { super::*, - crate::accounts_index::{AccountsIndexConfig, IndexLimitMb, BINS_FOR_TESTING}, + crate::accounts_index::{AccountsIndexConfig, BINS_FOR_TESTING}, assert_matches::assert_matches, itertools::Itertools, }; @@ -1582,10 +1554,7 @@ mod tests { fn new_disk_buckets_for_test() -> InMemAccountsIndex { let holder = Arc::new(BucketMapHolder::new( BINS_FOR_TESTING, - &Some(AccountsIndexConfig { - index_limit_mb: IndexLimitMb::Limit(1), - ..AccountsIndexConfig::default() - }), + &Some(AccountsIndexConfig::default()), 1, )); let bin = 0; @@ -1615,7 +1584,6 @@ mod tests { &one_element_slot_list_entry, startup, false, - false, 1, ) .0, @@ -1695,23 +1663,6 @@ mod tests { AccountMapEntryMeta::default(), )); - // exceeded budget - assert!( - bucket - .should_evict_from_mem( - current_age, - &Arc::new(AccountMapEntryInner::new( - vec![], - ref_count, - AccountMapEntryMeta::default() - )), - startup, - false, - true, - 0, - ) - .0 - ); // empty slot list assert!( !bucket @@ -1724,7 +1675,6 @@ mod tests { )), startup, false, - false, 0, ) .0 @@ -1737,7 +1687,6 @@ mod tests { &one_element_slot_list_entry, startup, false, - false, 0, ) .0 @@ -1754,7 +1703,6 @@ mod tests { )), startup, false, - false, 0, ) .0 @@ -1774,7 +1722,6 @@ mod tests { )), startup, false, - false, 0, ) .0 @@ -1789,7 +1736,6 @@ mod tests { &one_element_slot_list_entry, startup, false, - false, 0, ) .0 @@ -1804,7 +1750,6 @@ mod tests { &one_element_slot_list_entry, startup, false, - false, 0, ) .0 @@ -1819,7 +1764,6 @@ mod tests { &one_element_slot_list_entry, startup, false, - false, 0, ) .0 diff --git a/accounts-db/src/bucket_map_holder.rs b/accounts-db/src/bucket_map_holder.rs index fb90b68d124702..8a6fb5028c1060 100644 --- a/accounts-db/src/bucket_map_holder.rs +++ b/accounts-db/src/bucket_map_holder.rs @@ -29,9 +29,6 @@ const _: () = assert!(std::mem::size_of::() == std::mem::size_of:: = Some(10_000); - pub struct BucketMapHolder + Into> { pub disk: Option>, @@ -59,10 +56,6 @@ pub struct BucketMapHolder + Into> pub threads: usize, - // how much mb are we allowed to keep in the in-mem index? - // Rest goes to disk. - pub mem_budget_mb: Option, - /// how many ages should elapse from the last time an item is used where the item will remain in the cache pub ages_to_stay_in_cache: Age, @@ -217,43 +210,15 @@ impl + Into> BucketMapHolder config.drives.clone() }); - let mem_budget_mb = match config + let disk = match config .as_ref() - .map(|config| &config.index_limit_mb) - .unwrap_or(&IndexLimitMb::Unspecified) + .map(|config| config.index_limit_mb) + .unwrap_or_default() { - // creator said to use disk idx with a specific limit - IndexLimitMb::Limit(mb) => Some(*mb), - // creator said InMemOnly, so no disk index IndexLimitMb::InMemOnly => None, - // whatever started us didn't specify whether to use the acct idx - IndexLimitMb::Unspecified => { - // check env var if we were not started from a validator - let mut use_default = true; - if !config - .as_ref() - .map(|config| config.started_from_validator) - .unwrap_or_default() - { - if let Ok(_limit) = std::env::var("SOLANA_TEST_ACCOUNTS_INDEX_MEMORY_LIMIT_MB") - { - // Note this env var means the opposite of the default. The default now is disk index is on. - // So, if this env var is set, DO NOT allocate with disk buckets if mem budget was not set, we were NOT started from validator, and env var was set - // we do not want the env var to have an effect when running the validator (only tests, benches, etc.) - use_default = false; - } - } - if use_default { - // if validator does not specify disk index limit or specify in mem only, then this is the default - DEFAULT_DISK_INDEX - } else { - None - } - } + IndexLimitMb::Unlimited => Some(BucketMap::new(bucket_config)), }; - // only allocate if mem_budget_mb is Some - let disk = mem_budget_mb.map(|_| BucketMap::new(bucket_config)); Self { disk, ages_to_stay_in_cache, @@ -270,7 +235,7 @@ impl + Into> BucketMapHolder age_timer: AtomicInterval::default(), bins, startup: AtomicBool::default(), - mem_budget_mb, + threads, _phantom: PhantomData, startup_stats: Arc::default(), @@ -512,10 +477,7 @@ pub mod tests { #[test] fn test_disk_index_enabled() { let bins = 1; - let config = AccountsIndexConfig { - index_limit_mb: IndexLimitMb::Limit(0), - ..AccountsIndexConfig::default() - }; + let config = AccountsIndexConfig::default(); let test = BucketMapHolder::::new(bins, &Some(config), 1); assert!(test.is_disk_index_enabled()); } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 131bc91e9a7914..510cbab05dba84 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -59,22 +59,12 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { .validator(is_pow2) .takes_value(true) .help("Number of bins to divide the accounts index into"), - Arg::with_name("accounts_index_memory_limit_mb") - .long("accounts-index-memory-limit-mb") - .value_name("MEGABYTES") - .validator(is_parsable::) - .takes_value(true) - .help( - "How much memory the accounts index can consume. If this is exceeded, some \ - account index entries will be stored on disk.", - ), Arg::with_name("disable_accounts_disk_index") .long("disable-accounts-disk-index") .help( "Disable the disk-based accounts index. It is enabled by default. The entire \ accounts index will be kept in memory.", - ) - .conflicts_with("accounts_index_memory_limit_mb"), + ), Arg::with_name("accounts_db_skip_shrink") .long("accounts-db-skip-shrink") .help( @@ -241,14 +231,11 @@ pub fn get_accounts_db_config( let ledger_tool_ledger_path = ledger_path.join(LEDGER_TOOL_DIRECTORY); let accounts_index_bins = value_t!(arg_matches, "accounts_index_bins", usize).ok(); - let accounts_index_index_limit_mb = - if let Ok(limit) = value_t!(arg_matches, "accounts_index_memory_limit_mb", usize) { - IndexLimitMb::Limit(limit) - } else if arg_matches.is_present("disable_accounts_disk_index") { - IndexLimitMb::InMemOnly - } else { - IndexLimitMb::Unspecified - }; + let accounts_index_index_limit_mb = if arg_matches.is_present("disable_accounts_disk_index") { + IndexLimitMb::InMemOnly + } else { + IndexLimitMb::Unlimited + }; let accounts_index_drives = values_t!(arg_matches, "accounts_index_path", String) .ok() .map(|drives| drives.into_iter().map(PathBuf::from).collect()) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 60cdc3044748f6..440cf155d8d892 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1400,17 +1400,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { this is exceeded, the scan aborts.", ), ) - .arg( - Arg::with_name("accounts_index_memory_limit_mb") - .long("accounts-index-memory-limit-mb") - .value_name("MEGABYTES") - .validator(is_parsable::) - .takes_value(true) - .help( - "How much memory the accounts index can consume. If this is exceeded, some \ - account index entries will be stored on disk.", - ), - ) .arg( Arg::with_name("accounts_index_bins") .long("accounts-index-bins") @@ -2009,6 +1998,18 @@ fn deprecated_arguments() -> Vec { Ok(()) } })); + // deprecated in v2.1 by PR #2721 + add_arg!(Arg::with_name("accounts_index_memory_limit_mb") + .long("accounts-index-memory-limit-mb") + .value_name("MEGABYTES") + .validator(is_parsable::) + .takes_value(true) + .help( + "How much memory the accounts index can consume. If this is exceeded, some \ + account index entries will be stored on disk.", + ), + usage_warning: "index memory limit has been deprecated. The limit arg has no effect now.", + ); add_arg!(Arg::with_name("accountsdb_repl_bind_address") .long("accountsdb-repl-bind-address") .value_name("HOST") diff --git a/validator/src/main.rs b/validator/src/main.rs index b2fb6a7ac275b9..3738eabced96de 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1187,14 +1187,11 @@ pub fn main() { TestPartitionedEpochRewards::None }; - accounts_index_config.index_limit_mb = - if let Ok(limit) = value_t!(matches, "accounts_index_memory_limit_mb", usize) { - IndexLimitMb::Limit(limit) - } else if matches.is_present("disable_accounts_disk_index") { - IndexLimitMb::InMemOnly - } else { - IndexLimitMb::Unspecified - }; + accounts_index_config.index_limit_mb = if matches.is_present("disable_accounts_disk_index") { + IndexLimitMb::InMemOnly + } else { + IndexLimitMb::Unlimited + }; { let mut accounts_index_paths: Vec = if matches.is_present("accounts_index_path") { From 1679a5fb11f9093605329c1bf0604d2769bb15eb Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Wed, 28 Aug 2024 21:52:59 +0700 Subject: [PATCH 238/529] VoteAccounts: reset ::staked_nodes on clone (#2732) This fixes a bug where we were copying-on-write ::staked_nodes for each bank and then updating it for each vote even if the field was never accessed. --- vote/src/vote_account.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/vote/src/vote_account.rs b/vote/src/vote_account.rs index 7b01606499fe95..3edd9a7bf0288a 100644 --- a/vote/src/vote_account.rs +++ b/vote/src/vote_account.rs @@ -43,7 +43,7 @@ struct VoteAccountInner { pub type VoteAccountsHashMap = HashMap; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct VoteAccounts { #[serde(deserialize_with = "deserialize_accounts_hash_map")] vote_accounts: Arc, @@ -59,6 +59,19 @@ pub struct VoteAccounts { >, } +impl Clone for VoteAccounts { + fn clone(&self) -> Self { + Self { + vote_accounts: Arc::clone(&self.vote_accounts), + // Reset this so that if the previous bank did compute `staked_nodes`, the new bank + // won't copy-on-write and keep updating the map if the staked nodes on this bank are + // never accessed. See [`VoteAccounts::add_stake`] [`VoteAccounts::sub_stake`] and + // [`VoteAccounts::staked_nodes`]. + staked_nodes: OnceLock::new(), + } + } +} + impl VoteAccount { pub fn account(&self) -> &AccountSharedData { &self.0.account From 9753b22ba85debe89f3c16cedba035dfa234ffb4 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 28 Aug 2024 11:01:14 -0400 Subject: [PATCH 239/529] Adds `search` command to hash-cache-tool (#2763) --- .../accounts-hash-cache-tool/src/main.rs | 72 ++++++++++++++++++- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index b692ca9482fe25..8e3d0ff225e5cf 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -1,9 +1,9 @@ use { - ahash::{HashMap, RandomState}, + ahash::{HashMap, HashSet, RandomState}, bytemuck::Zeroable as _, clap::{ - crate_description, crate_name, value_t_or_exit, App, AppSettings, Arg, ArgMatches, - SubCommand, + crate_description, crate_name, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, + ArgMatches, SubCommand, }, memmap2::Mmap, rayon::prelude::*, @@ -30,6 +30,7 @@ use { }; const CMD_INSPECT: &str = "inspect"; +const CMD_SEARCH: &str = "search"; const CMD_DIFF: &str = "diff"; const CMD_DIFF_FILES: &str = "files"; const CMD_DIFF_DIRS: &str = "directories"; @@ -66,6 +67,25 @@ fn main() { .help("Accounts hash cache file to inspect"), ), ) + .subcommand( + SubCommand::with_name(CMD_SEARCH) + .about("Search for accounts hash cache entries") + .arg( + Arg::with_name("path") + .index(1) + .takes_value(true) + .value_name("PATH") + .help("Accounts hash cache directory to search"), + ) + .arg( + Arg::with_name("addresses") + .index(2) + .takes_value(true) + .value_name("PUBKEYS") + .value_delimiter(",") + .help("Search for the entries of one or more pubkeys, delimited by commas"), + ), + ) .subcommand( SubCommand::with_name(CMD_DIFF) .about("Compares cache files") @@ -176,6 +196,7 @@ fn main() { let subcommand_str = subcommand.0; match subcommand { (CMD_INSPECT, Some(subcommand_matches)) => cmd_inspect(&matches, subcommand_matches), + (CMD_SEARCH, Some(subcommand_matches)) => cmd_search(&matches, subcommand_matches), (CMD_DIFF, Some(subcommand_matches)) => { let diff_subcommand = subcommand_matches.subcommand(); match diff_subcommand { @@ -208,6 +229,16 @@ fn cmd_inspect( do_inspect(path, force) } +fn cmd_search( + _app_matches: &ArgMatches<'_>, + subcommand_matches: &ArgMatches<'_>, +) -> Result<(), String> { + let path = value_t_or_exit!(subcommand_matches, "path", String); + let addresses = values_t_or_exit!(subcommand_matches, "addresses", Pubkey); + let addresses = HashSet::from_iter(addresses); + do_search(path, addresses) +} + fn cmd_diff_files( _app_matches: &ArgMatches<'_>, subcommand_matches: &ArgMatches<'_>, @@ -274,6 +305,41 @@ fn do_inspect(file: impl AsRef, force: bool) -> Result<(), String> { Ok(()) } +fn do_search(dir: impl AsRef, addresses: HashSet) -> Result<(), String> { + let _timer = ElapsedOnDrop::new(format!("searching '{}' took ", dir.as_ref().display())); + let files = get_cache_files_in(&dir).map_err(|err| { + format!( + "failed to get cache files in dir '{}': {err}", + dir.as_ref().display(), + ) + })?; + + files.par_iter().for_each(|file| { + let Ok((mmap, _header)) = mmap_file(&file.path, false) + .inspect_err(|err| eprintln!("failed to mmap file '{}': {err}", file.path.display())) + else { + return; + }; + let file_name = Path::new(file.path.file_name().expect("path is a file")); + let mut count = Saturating(0); + scan_mmap(&mmap, |entry| { + if addresses.contains(&entry.pubkey) { + println!( + "pubkey: {:44}, hash: {:44}, lamports: {}, file: {}, index: {}", + entry.pubkey.to_string(), + entry.hash.0.to_string(), + entry.lamports, + file_name.display(), + count, + ); + } + count += 1; + }); + }); + + Ok(()) +} + fn do_diff_files(file1: impl AsRef, file2: impl AsRef) -> Result<(), String> { let LatestEntriesInfo { latest_entries: entries1, From 278713978c8e387b8dca74eabd5aa99d6a5f211a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Aug 2024 23:04:09 +0800 Subject: [PATCH 240/529] build(deps): bump bytemuck from 1.17.0 to 1.17.1 (#2762) * build(deps): bump bytemuck from 1.17.0 to 1.17.1 Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.17.0 to 1.17.1. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/v1.17.0...v1.17.1) --- updated-dependencies: - dependency-name: bytemuck dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 73acd4996778b5..328615b0d6efd1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1151,9 +1151,9 @@ checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "bytemuck" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd4c6dcc3b0aea2f5c0b4b82c2b15fe39ddbc76041a310848f4706edf76bb31" +checksum = "773d90827bc3feecfb67fab12e24de0749aad83c74b9504ecde46237b5cd24e2" dependencies = [ "bytemuck_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 4b7f35a6ce2a4c..fe3592daaa5042 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -200,7 +200,7 @@ bs58 = "0.5.1" bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" -bytemuck = "1.17.0" +bytemuck = "1.17.1" bytemuck_derive = "1.7.1" byteorder = "1.5.0" bytes = "1.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1246b51366c09f..d9eb6b6d7f87f0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -855,9 +855,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd4c6dcc3b0aea2f5c0b4b82c2b15fe39ddbc76041a310848f4706edf76bb31" +checksum = "773d90827bc3feecfb67fab12e24de0749aad83c74b9504ecde46237b5cd24e2" dependencies = [ "bytemuck_derive", ] From 61d6a22a472f0adb0ac68c3b287a5d68a66ee7e4 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Wed, 28 Aug 2024 09:21:07 -0700 Subject: [PATCH 241/529] Remove explicit stream finish (#2760) * remove finish as it is redaundant to stream drop * remove finish as it is redaundant to stream drop --- quic-client/src/nonblocking/quic_client.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index 5817a676f3b760..1195211189a63a 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -274,7 +274,6 @@ impl QuicClient { let mut send_stream = connection.open_uni().await?; send_stream.write_all(data).await?; - send_stream.finish().await?; Ok(()) } From 9a4b094ded997ccc5f97fe00646912a09314930e Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 28 Aug 2024 10:23:49 -0600 Subject: [PATCH 242/529] RPC: rewards, return error if epoch_boundary_block is a lie (#2758) * Return error if epoch_boundary_block is not actually the epoch boundary block * Update rpc-client-api/src/custom_error.rs Co-authored-by: Trent Nelson <490004+t-nelson@users.noreply.github.com> --------- Co-authored-by: Trent Nelson <490004+t-nelson@users.noreply.github.com> --- rpc-client-api/src/custom_error.rs | 11 +++++++++++ rpc/src/rpc.rs | 14 ++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/rpc-client-api/src/custom_error.rs b/rpc-client-api/src/custom_error.rs index 62857b1ee55c16..2e54e8edd22e02 100644 --- a/rpc-client-api/src/custom_error.rs +++ b/rpc-client-api/src/custom_error.rs @@ -25,6 +25,7 @@ pub const JSON_RPC_SERVER_ERROR_BLOCK_STATUS_NOT_AVAILABLE_YET: i64 = -32014; pub const JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION: i64 = -32015; pub const JSON_RPC_SERVER_ERROR_MIN_CONTEXT_SLOT_NOT_REACHED: i64 = -32016; pub const JSON_RPC_SERVER_ERROR_EPOCH_REWARDS_PERIOD_ACTIVE: i64 = -32017; +pub const JSON_RPC_SERVER_ERROR_SLOT_NOT_EPOCH_BOUNDARY: i64 = -32018; #[derive(Error, Debug)] pub enum RpcCustomError { @@ -72,6 +73,8 @@ pub enum RpcCustomError { current_block_height: u64, rewards_complete_block_height: u64, }, + #[error("SlotNotEpochBoundary")] + SlotNotEpochBoundary { slot: Slot }, } #[derive(Debug, Serialize, Deserialize)] @@ -228,6 +231,14 @@ impl From for Error { rewards_complete_block_height, })), }, + RpcCustomError::SlotNotEpochBoundary { slot } => Self { + code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_SLOT_NOT_EPOCH_BOUNDARY), + message: format!( + "Rewards cannot be found because slot {slot} is not the epoch boundary. This \ + may be due to gap in the queried node's local ledger or long-term storage" + ), + data: None, + }, } } } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 848159cf9688f9..d9b057c9fd178b 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -634,6 +634,20 @@ impl JsonRpcRequestProcessor { .into()); }; + // If there is a gap in blockstore or long-term historical storage that + // includes the epoch boundary, the `get_blocks_with_limit()` call above + // will return the slot of the block at the end of that gap, not a + // legitimate epoch-boundary block. Therefore, verify that the parent of + // `epoch_boundary_block` occurred before the `first_slot_in_epoch`. If + // it didn't, return an error; it will be impossible to locate + // rewards properly. + if epoch_boundary_block.parent_slot >= first_slot_in_epoch { + return Err(RpcCustomError::SlotNotEpochBoundary { + slot: first_confirmed_block_in_epoch, + } + .into()); + } + // Collect rewards from first block in the epoch if partitioned epoch // rewards not enabled, or address is a vote account let mut reward_map: HashMap = { From ad6002f90ba8ed24ca2d7ca5e07c049498474e75 Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Wed, 28 Aug 2024 13:55:55 -0300 Subject: [PATCH 243/529] Bump platform tools version (#2745) * Bump platform tools version * Remove unused struct --- programs/sbf/rust/ro_modify/src/lib.rs | 16 ---------------- sdk/cargo-build-sbf/src/main.rs | 2 +- sdk/program/Cargo.toml | 2 +- sdk/sbf/scripts/install.sh | 2 +- 4 files changed, 3 insertions(+), 19 deletions(-) diff --git a/programs/sbf/rust/ro_modify/src/lib.rs b/programs/sbf/rust/ro_modify/src/lib.rs index 7d50b70f646704..de005569efafcd 100644 --- a/programs/sbf/rust/ro_modify/src/lib.rs +++ b/programs/sbf/rust/ro_modify/src/lib.rs @@ -39,22 +39,6 @@ struct SolAccountInfo { executable: bool, } -/// Rust representation of C's SolSignerSeed -#[derive(Debug)] -#[repr(C)] -struct SolSignerSeedC { - addr: u64, - len: u64, -} - -/// Rust representation of C's SolSignerSeeds -#[derive(Debug)] -#[repr(C)] -struct SolSignerSeedsC { - addr: u64, - len: u64, -} - const READONLY_ACCOUNTS: &[SolAccountInfo] = &[ SolAccountInfo { is_signer: false, diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 6d02499c8fbc6d..3c3c56a08d14bf 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -913,7 +913,7 @@ fn main() { // The following line is scanned by CI configuration script to // separate cargo caches according to the version of platform-tools. - let platform_tools_version = String::from("v1.42"); + let platform_tools_version = String::from("v1.43"); let rust_base_version = get_base_rust_version(platform_tools_version.as_str()); let version = format!( "{}\nplatform-tools {}\n{}", diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 7b96ccf7d1f0e1..a5b525aaf0dca0 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -9,7 +9,7 @@ repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -rust-version = "1.75.0" # solana platform-tools rust version +rust-version = "1.79.0" # solana platform-tools rust version [dependencies] bincode = { workspace = true } diff --git a/sdk/sbf/scripts/install.sh b/sdk/sbf/scripts/install.sh index e51f46d58c418a..2b98ff6595eddd 100755 --- a/sdk/sbf/scripts/install.sh +++ b/sdk/sbf/scripts/install.sh @@ -109,7 +109,7 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install platform tools -version=v1.42 +version=v1.43 if [[ ! -e platform-tools-$version.md || ! -e platform-tools ]]; then ( set -e From 377118b2ccff9d0e5c931b4c297fa583b6a962f4 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 28 Aug 2024 13:10:56 -0500 Subject: [PATCH 244/529] when generate index re-used existing disk index, stats were incorrect (#2761) --- bucket_map/src/bucket.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index ebd77178450b37..c8f7fb699173dd 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -158,12 +158,13 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { Arc::clone(&stats.index), count, ); - stats.index.resize_grow(0, index.capacity_bytes()); let random = thread_rng().gen(); restartable_bucket.set_file(file_name, random); (index, random, false /* true = reused file */) }); + stats.index.resize_grow(0, index.capacity_bytes()); + Self { random, drives, From d651409c403d26b1840cf19f70b146247fe81739 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 28 Aug 2024 13:15:07 -0500 Subject: [PATCH 245/529] account_saver: Remove nested options (#2724) --- accounts-db/src/accounts.rs | 4 +- accounts-db/src/accounts_db.rs | 60 +++++++------------ .../src/accounts_db/geyser_plugin_utils.rs | 10 ++-- runtime/src/bank.rs | 7 ++- svm/src/account_saver.rs | 23 +++---- 5 files changed, 44 insertions(+), 60 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index eb416c29e2f4e9..8de5431318a3a0 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -588,10 +588,10 @@ impl Accounts { pub fn store_cached<'a>( &self, accounts: impl StorableAccounts<'a>, - transactions: &'a [Option<&'a SanitizedTransaction>], + transactions: Option<&'a [&'a SanitizedTransaction]>, ) { self.accounts_db - .store_cached_inline_update_index(accounts, Some(transactions)); + .store_cached_inline_update_index(accounts, transactions); } pub fn store_accounts_cached<'a>(&self, accounts: impl StorableAccounts<'a>) { diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 641c7d63f9f013..9f43360e4f1b71 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -90,6 +90,7 @@ use { hash::Hash, pubkey::Pubkey, rent_collector::RentCollector, + saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, }, @@ -6817,27 +6818,20 @@ impl AccountsDb { &self, slot: Slot, accounts_and_meta_to_store: &impl StorableAccounts<'b>, - txn_iter: Box> + 'a>, + txs: Option<&[&SanitizedTransaction]>, ) -> Vec { - let mut write_version_producer: Box> = - if self.accounts_update_notifier.is_some() { - let mut current_version = self - .write_version - .fetch_add(accounts_and_meta_to_store.len() as u64, Ordering::AcqRel); - Box::new(std::iter::from_fn(move || { - let ret = current_version; - current_version += 1; - Some(ret) - })) - } else { - Box::new(std::iter::empty()) - }; + let mut current_write_version = if self.accounts_update_notifier.is_some() { + self.write_version + .fetch_add(accounts_and_meta_to_store.len() as u64, Ordering::AcqRel) + } else { + 0 + }; - let (account_infos, cached_accounts) = txn_iter - .enumerate() - .map(|(i, txn)| { + let (account_infos, cached_accounts) = (0..accounts_and_meta_to_store.len()) + .map(|index| { + let txn = txs.map(|txs| *txs.get(index).expect("txs must be present if provided")); let mut account_info = AccountInfo::default(); - accounts_and_meta_to_store.account_default_if_zero_lamport(i, |account| { + accounts_and_meta_to_store.account_default_if_zero_lamport(index, |account| { let account_shared_data = account.to_account_shared_data(); let pubkey = account.pubkey(); account_info = AccountInfo::new(StorageLocation::Cached, account.lamports()); @@ -6845,10 +6839,11 @@ impl AccountsDb { self.notify_account_at_accounts_update( slot, &account_shared_data, - txn, + &txn, pubkey, - &mut write_version_producer, + current_write_version, ); + saturating_add_assign!(current_write_version, 1); let cached_account = self.accounts_cache.store(slot, pubkey, account_shared_data); @@ -6872,7 +6867,7 @@ impl AccountsDb { &self, accounts: &'c impl StorableAccounts<'b>, store_to: &StoreTo, - transactions: Option<&[Option<&'a SanitizedTransaction>]>, + transactions: Option<&'a [&'a SanitizedTransaction]>, ) -> Vec { let mut calc_stored_meta_time = Measure::start("calc_stored_meta"); let slot = accounts.target_slot(); @@ -6895,18 +6890,7 @@ impl AccountsDb { .fetch_add(calc_stored_meta_time.as_us(), Ordering::Relaxed); match store_to { - StoreTo::Cache => { - let txn_iter: Box>> = - match transactions { - Some(transactions) => { - assert_eq!(transactions.len(), accounts.len()); - Box::new(transactions.iter()) - } - None => Box::new(std::iter::repeat(&None).take(accounts.len())), - }; - - self.write_accounts_to_cache(slot, accounts, txn_iter) - } + StoreTo::Cache => self.write_accounts_to_cache(slot, accounts, transactions), StoreTo::Storage(storage) => self.write_accounts_to_storage(slot, storage, accounts), } } @@ -8292,7 +8276,7 @@ impl AccountsDb { pub fn store_cached<'a>( &self, accounts: impl StorableAccounts<'a>, - transactions: Option<&'a [Option<&'a SanitizedTransaction>]>, + transactions: Option<&'a [&'a SanitizedTransaction]>, ) { self.store( accounts, @@ -8306,7 +8290,7 @@ impl AccountsDb { pub(crate) fn store_cached_inline_update_index<'a>( &self, accounts: impl StorableAccounts<'a>, - transactions: Option<&'a [Option<&'a SanitizedTransaction>]>, + transactions: Option<&'a [&'a SanitizedTransaction]>, ) { self.store( accounts, @@ -8334,7 +8318,7 @@ impl AccountsDb { &self, accounts: impl StorableAccounts<'a>, store_to: &StoreTo, - transactions: Option<&'a [Option<&'a SanitizedTransaction>]>, + transactions: Option<&'a [&'a SanitizedTransaction]>, reclaim: StoreReclaims, update_index_thread_selection: UpdateIndexThreadSelection, ) { @@ -8523,7 +8507,7 @@ impl AccountsDb { &self, accounts: impl StorableAccounts<'a>, store_to: &StoreTo, - transactions: Option<&'a [Option<&'a SanitizedTransaction>]>, + transactions: Option<&'a [&'a SanitizedTransaction]>, reclaim: StoreReclaims, update_index_thread_selection: UpdateIndexThreadSelection, ) { @@ -8569,7 +8553,7 @@ impl AccountsDb { accounts: impl StorableAccounts<'a>, store_to: &StoreTo, reset_accounts: bool, - transactions: Option<&[Option<&SanitizedTransaction>]>, + transactions: Option<&'a [&'a SanitizedTransaction]>, reclaim: StoreReclaims, update_index_thread_selection: UpdateIndexThreadSelection, ) -> StoreAccountsTiming { diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs index 6f94f4300de675..cedebafa4f08e7 100644 --- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs +++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs @@ -57,23 +57,21 @@ impl AccountsDb { notify_stats.report(); } - pub fn notify_account_at_accounts_update

A*+IDc1e*K;F>kro-`21Wru3xDB(;sx+v%7G|nDyD8o3AY7 z>a#wlU!mReb_PD-aXmU*y9@2&&fh|QKTtZgpUL6FJb}0O$BQo$|893i7wCR#u6@_eTWnVSOwx0c*q;46Vn0vI`F1!v9Qvuh{V?kDV9LMu z{Xf<}r~>5Gp!zpt`uB3EN8Nt6F~`>ja(rD7es3uKb+f>6eaP&(`5isLB7U9C&xGya zQ*wS&F}mC6xm5Gjlk=Wjdw739&yzX6xcsxAXRXrnWWv323*g@Tq{E%E8E~(A(&0Yv zLBM_WlL8ld@qDGT`$>W4dIY%N(Rrq>Cmrr9b)0#D!mX!|n{w^rWr04{%fCFA|I)+c z-ddiLL&+Xoku>Whsy3{)6gP+?hKT%%XUzvwzx$eRo&5*3wp!CKAtcRSNzaI(m zoiN|GxZk+R;+&^Q%2w}pDj2CJMk%%9#XUlQJV1T`|3z9)t}}-9#Cg(TJBRdSm?okFS5k{Z`Mz?!zi+xp4oD3~P^SAJ+Dq-u=UiGr6FD z^hc&1Bmd6l9rm(X#cZAE$pGb5f)gSS^Z}grbs`f#0)%^Wx9|wL9$~q#R(s;(Z$9HgCDDOdX(luZCEcf6lUO)Fi_PbepuZE$# zcVZkipkVb}Meq^&(JpeY+$0JsPo>(W97)R5pGvjW;*@KpI?>{*6@R7r3oYmO)K#i~ zZ}H`p{sD_GvG|=9Uu5y?EWUv8zAlYdR<>%mqAC+-@>Fkb(y+2>qJ}-KpVj*HE#`T+ z*Ozx_ymIq&O|SNUp6TTQ4OjF(q~Y~tt_#Lh>=HnG8`Sr4b&AD5XmPF!`o4L&Z;k3GXb- zawERd;?P#nzbm<)sb_{pvwaq(f5m94=7+w5lbOC+y$4tixsS!@KHlsb>*Mz}Caur1 z_nRyq=}lUv>-(O?&tX`e#IWmW8hSs_SDvWxzWbUq^!rQt%$`=Or)vdPtitSo?9Xm} z9^p(pgXPM+hbOL{t?89j=WAHK=|T>&vg#Fz#o2=qayay!txLAFsNFaQfazI5({)oUS`HUr)!I zHB4IH$gq5ihAXPKv)roNSg!95mg~5SDa1a()vDz<$E<;QQgLJs|H!F?|u#YIv!xT)(>iUefgsr#{IMx zJ>`!vUj2yXk5_$)aQZ$^I5&NPaJv3p^YwImNyDV|^9;+M(r`uf%PhC*VV3Lr3d?nT zo#k4;s^RtJf6`FCtF!ulgYoJ=YW{fDw+W~3`-F4Tj|ivh7n-l90%@{cYYPhUsXn|RSrU)Mx! zmpvU5tbP7jGbH85G!#3kcDu5Q2OU)V@RhFUeaCSu*UJ4^*Ow=0T)ta0{F524K2!6@ ztELc6-?KHW-gGMAbhQ#r$4m{A)@L&;PtkBibr#F5n$B{4r?Xtg9F}W+j)vElpQoYw z5k2KI7_a_?=8spMO*nn$6V6Q+5>8ha;dET0VbaQdVM+OHjZ3>{xm6djT;GdXuH!P6 zYki4^((W~s`EaZM<&0NfuKA_?6Hec&2FcPcH9L)038 zEU&QN2OPk-8uctS>{fim=*Jp@4oOPoT0NEiXSd&PXnttckJIj=9N_j%k$>dz_5*q1 zeJ1bU)Zi&b|E6(Y--TZajnMB+a(?qiugLu7^uCk1z$W7l!|UI<`$Zbe9Ux)N-=lRO9hB>Qg*SKRDY5{B+v~ z%=PKS&p)&Ahu_EN=hvqGnJ^EU@89G5xL_=Nz8(#}!-e;Kaj5sx_wYM%9XA-iN{!_I zGV|+Q4qk8Q8B7GqKdlLEg6rfXGqu{$h>dr{{fE#iABUpc;x?h9V)=vi@m##G+WVyp zfe(+L7lraKo*wK2o5ggO_i1v@!ez)W^}Sp|A74X%4&?M_S)e~v!&@qxr1~lAUkbQt z_5u6<@Kmd)J@&bhFmI>TRg9O$ks!Rso3#C==ap`zUJS6ErT4fGQh$<;?MxqH`%Svc zzdgGdXrp8%$7}w5YjO#s%KCz`yk2DV^E}Or{$DDdYL#?%7x>+s^B?*mCMJs0`@(H} zVf$vqu9MF6zVOwI9|vmCzE^5Iw1;Z12AGb0!7D{CAkS51?*ygri{qkDf8GxL3CcYb z=o9oqimN{Up;zSHZ}0^e&YKJM*7N;V@;xZJi>KQ6Tn*#ZNQHbw3FYzqpRkvp3-S#4 zo66VDHIR97tuNprf3^A-03&dkOrJid>DB7<^nG`M_p7Dn-d2V2^2oVOnlG+WkJ588tv&d6C?Q|- za@6*Hj)GG;%oSF;eFUFSrKF=%@uc$I#{8hKu6zxDDBm3-x5U?ZhONAi4Lx%E;`S{p z_W<-F&>z){3d@K2K9tj+&y1lz+azD#z2vihpWZixa!9{lpE6q!pnnDbL;O1cH^2|m ziFWWFfm@8&&hqy3PCklmle?;&yECN$$dme^ z){nbikiTycdNd^YLH`yB!~Su0P9C=f^2qGRZnGE0_wp`_laBPAXmz@I9(2)pEX+AZ#QsJ|JWW=kK9N85SO?j4WHVRfgLt5)Z0IJ)0|vA)mxea-{s z^`g4!B`kl~et$Ljl=l19f2RFD+r8|U(tJtRN`~$i`nkB)-}w2%kR$sg)}yG_2K|rI z@BO~h@BKdMuGjCsRBDv<`@^K$^{;ia#ZBHeubApzd84L>^3L}c8%^GQBJZ>vdB5MU z?9_4T4U#Xko9OpLd;QpJq@M8&THrL9J^&y5JMKJR`tF65Li*D+tX6N-uq3;5g@xkJ z(S}xSUMrJ3*SS-Y zVm#H~4>6wV@7p!*``xQmr-$d*R;y0W?=)YvN;mK}VClzNe38YcT6~Gcn>7sSgry1V zhjtp$dmHLCVD*wlxex1*R!`*XlqB%^jao0`=ajWtK*rJJ@08URC;xkRZM7mC^NQe--v`U`@{yzK^SHFUgduycIijFZ+i|6;@^Rd_~Wc zb^3m^Kg;*9zkee+LOyCg9{96e%FkWK&qCBJ`Tag1__J!i`*%`5o}gZP|5>k{W5yfp z9P9g`Kf58P$EOGK^!Xm2pY(Z>J}xIn`a{7G>+AsCCd)!wIe_l@$g_q&)v$)s)ddAcG z16wWr+Yk@%Z#VouR{T4(zTM+KYxuY4>i=$qm*Kz1;*S35t>OQ;;@@ES-(vXJ=J5Yf;br)LZ*j-}OU6_Dz*dVNE1Mzy zm4<&%@vk=giQ!+F!~ZNP9k##YE$;ZA#(0V!*lO`lmp38)C5Hc=7U=(S!+*WuUy{TB zLc-75zr`K@a~MzY0~^H?mOrH6ZG4;69bqueWhE;&{^ebzeu?#`owKVMpAb_x);^&zgFz zB-(>|zF&smAs<%f`0&$3GCyqh)rR}YHc3A01H1A2(uvxGbud(E>M=5}9pIT-lzsF; z9C*jSDD!8XPWWr6nxE4Y(zP6P4GX#Zh7AxH4* z?sm~fJ5OR{GWjsg`q5KtUsl_Rv-Ug9D99mp;STJ%>-b{+F5+FWRNFWvtQ*I-^%uhdV(%r&tE9?T_E|E z$sf4!ica}Qo|4Xz92ZTyneh>!9S`oq5wQr|0}9vIRUR}}7WH&ZXa75_E1AUt&q3hv|<6J&mZyyQZseCRs`H0>sJPSu=C^&@&`8+60+$j~i zah5Px9`Ln>Pli9rTG6`E5UM+RrWY{w?X4%X}RhH5_G^ z2M7=SsQbI?-y-`%w$k2qPk>a}ukHx?E9mKS@Lu?{H|Y1krDBe;dv{BA) zY8+Mf9!@c|8d8-50`lJy=RS;rR~ZQQp%o>BaQKOFYQ&aYPrzrYVX?oarB{di!t!kH?K;y~aZdAj?1vR_p5Efi(L z54nd%ysH1u=l$F_6rQcCQc)vt6+LHna(ButRU*VRf5kebAMHHapeeLG^ib~(#I zUyjPINxgOo|87jbWB94oYb1cDcI`5=Ym#5>nuUJf8tf!A3GGASopczxcE9tF@*HK? z41ZBhWk$`w{|h7>)2^L>Tw?3ouBGL<4gh*{czXbEy}nNOFQ9!a|674~KlR!7arwEWlh#>&(-YtKfN}#B(ceMpo%=Q3 zZ@M1?Jr@>@x*spS7th8YlU5T?V(rQ6bKZ%kYl8C$@SwSVj|!>@_<4H!S_n_ihm)TU zx9t|>M)^}T^nGpDPJN8})vvF0J}2ZC>U)Cl$?@m;CFf;mdH3JrfliJ4zQIx>{T`>& z^{_VUu6FQr6W0>^+LU;3aFmr3D5T-IDKC4kiKa`-{|^qpH?yYiNa6kL#@6c z{riM|$mJc=1kN_v;V@n0XL^f#8zmp;#M4uzAL4X|-<7HVQDr{R+blm(UeFzUP<{hM zrJv>9F5^8O=eLiWdgYWhXj-qF=JHGp&r*M~jI(;>bgda0b}F5K_iTeF)z$i(y`gh7it+tbQ+y zW((fl!YlRMwo>6s(P?AQv8bj#=UMpd`;9Mm}v;elE{7EUbx6RQ#XQ^9}2jpI=k|)2#f9b>C5`@L9D8^~$ZT z!OQOhdJFTTQx)H*qn0u1yQzj>CmOy>b!SDX@SyG#uZM3_4WEv+a+gGC@}>Bz%X!-l2XN*D5o|^nyEdS;RGr0BO@2lbe`!#>Pd^>9BzHiKY!!`NX zgVlq#rv~rFG2!j1$@i8q^X;pV8wcw3@b0SN&$=<;?WoE3hB5Okuc7bOvGdi?w`$CM z^K1Bbepj_O+7e~T93Z@HSp$)32%8#J>Z?y$Gf2h z@2s)&)!;?1ULW4-8oqE`Q?DM4HF)8z*N4|wgO?6{J$SQg>I?6-KD@a#^l|)M58l3- zd@?u~1Kxq49^raQIA7xX{(%RNe;45EEa7_8Zj6Ikn9uiDz~BGFi!*=T*GvNgqpQnCAX~M-ien9^32l^H+F&*qf0b@Z^L4aBO6}Fab&}-97i_XM>#f}DG;Py4QK0& zSxFWWgk@5GyS@kfo?qAUg>ry5O}-r)%r`>rn3^f-=!xlH21{dq7&jDkw8QdKbLk~b$7q=8kL2JJKfHRT?+nt5 zT6?}^!sz^ zsFBZC#OuUR$m8}eUf;-g>welFHe>mIiSs8tYiKWeuA^N^I(BNlq}A+}eYcReOThQ8 zQ)M2`R4h#2Y)avq~$e$*^kN)12cJVwK#y3PfC#`|K# z@4->;&;P7epP@-b`N~&e4*@O_=n06b`BF?y*$qMUC3aV)i1P~#%W3}_{#orF z;6Osf=mja9TYgmwhkF08!I{T-l7`oEo}}R$I!^*PGZjvI3TNU^YvI6R6(gSK(%aC< z`I3g$aK5DBA9TJ1aNrrqxsDl}AJxKPz4(1zZ^H$g_h?wkd5?x&+L8bV^B~2D^AwQZ zM}A%lXTGMhU+itDaQ>s=?`W^3JUjwvmnj`5=j<8V&1U1v63&A(?9_P>;1~6j^3P1m z&&%o0X_~&t%D)q3PTp0Q$d`;{)|+qm_RZ=S3QJuz!;Bw1g4op&Wmx7cVn9@6&k$lw<$%P+IPi z9ABt!msq)X3U0`?p#8plFPoOzm8;)ONhwAbSh=_BJPyi{{*R>P{xW9|Fybr9slUij zYPd`1b5IT*qI~Cq*Q$3{{I`A$sJZ^@-oKPIH<3v%$;F2|+m z`*QYdx#H)!G{FCjT>H65)BjTq64GCu<1_V|=h+~=C6`V=gy-cT{rx#TWIvUiBY#a! z-)N6{jt|P8n$s(`Q=adG^v81TgL?U+l>T#a{G&ebTp*NxZH_;SwEmBz<;UmvL;J$> zgi!uFIsK)5;(0|#|4EKN(BCe_xv3pOX*!#rx9qH9326wU)m(O+PJHfA(8< zr|DnJwNLspZ%xx@=EhgFb9XX*ydbj&KjSkQ!G73iEq~)Z?p4P#!eM(f0wt4Y8k`Gx z9%K;t`TcF&Hmq??XYr`JXmHTTjmfecxGrd{}QkGv=?tc|6q6$FbphcI`fDj{mZGKhD2} z_=W{;xGxLyf2hZ=#UR96IsS;Z*?gVN>-qjZ~|AAa-L=A>mRero%?@dwZTAbx*v_tptXiy)Q< z`M-U_maFi4)#^J>PgQ`FfW(D5F{8xInH zJY?;B3;B~QWcx`jB_on^*gkq@v7PrAKAB%4e4p=kJF(f|^*em(o7??mLS(bhcdLcy z$APh^qd?XJnA)n(mVafxpwwIUA+6SE1ahX!&yhDXK1=#H#80{9Y5KzVw>lre7o_|7 ze|QhV>&fX4LA&w&`rU=?Dz6dgU+`f6BIuj8Orz<#V5xx}xU8cqUy>Qd$D53g#~2^2 zAG=lPY8DVZCy=kj=u=uQq-(y&+v@>6+N<=-Hw<$p#2ld zgKXfv)xz@{g%Ek<9zv0bw3}g**HXm9&x-z5>6#_KmG3S3I-0~{(hp4?We;8~bSLGx zqu#$u-_IWPeu2I}cGUY@_5J?;0>+Rpm`6>@k>;0nuHh`9r(N>-{kifT4Abr2=lD4C zU(TbnayvAH{cP592NX}zx?JCv#`nrwf!DE;`3J0Ai@uKH<4wLDY>)AYlyhv~msL)p zTr130L%-XFSIEG&vf1oH^f{GhvSzd5^ZSG2ZPuQ)vOPhccM1v63)BdDQ4)T1t5&l8 z{3Eos&;y>I4N5-R79692JQAG>EQIo9-dBkR?(R*Of;MCzOO z_r8B0bRuVZzsCUiSQ=_-XC(ewNe=p_~h?=_BVzL>}1>uJK8xmvM`| zPvtro4xm?M1=62HTw(r;UeZZ$2&yGUAFC(25 z(wEFLez!F13$go}?*4>-p8h=od!D+z5 z1Nq}g%I%hq1~lHx^3Fd`ce^9st7?ANkLpZ=V{$7^c!s`^^At2+e8PN<%enLx@3eTQ z#@%l!P56lNZJF?EuH^W#g-+*uBo+a2gGl3zaz}|ZOIo8Q6EajOjHEuj@#vz>l^GC zG|l%z$I~^NemAJ?UOi;#za$^Z;apkx``yRzJG$8JnT`i2uhKZ`owrBF1G*q*(7!?G zmG3^aUVsafNtyAu2UvujrJ!nbJyHH|8YF#q6Xmvbt%jw>Zwn-OkB&;7aD1ZtQIDE> z)-36ObM7mpK0*}g(k?#FmNu>|D@?4^L`5yL{>m-vrpdX06zje8?HLX9mfyeAm?b~~FU;9F3K(goqzATb!ufk_H{@g1Zph!T-So=`d-dS|W3F9d8KM|nq4bst|5xRd=VN>lJEnB} zVfuD$Neaso;>qh-ujo`J-^ZtK#pq*o>8hS2z~!l)sA`dYm*#ppFkkuJ*1~-R@lnwI z@;Y?K1Dy(QlG#z8hsJzE-F7oa$%~Q}hO*9DzaB>ad6!>Sf$Nx?P+v_4jhAy)nPn?M*z$`czLB!OByGLm^MvE=<4L=8Wk3 zOs|Bo$k(rd`epg{fGe-kdYGM!%?`(dtoI}v2j%VZD0^)BCHLo2uL8Vd>CH08&Cc0# zzLpAS97GE*Mh__A@mBV0@wQ!c;re&nuK)26`&0BQyPuuuMLaND+l%`pE|0{{;qi0A z67x^|`%qu!DotQN2Aa{n>dpV7e+J(1kn0ff#q+cpVrN$>{qcpV-^KVvQb3-hV=dzs z)4nBL8yMd*mvq0#-ap^+pJs4MgdcBPuKB7}HgM3{Pym`9}X5IJzh$`zBKM%S^r8_9_c9m&sv_}-G=%NKe}GORfe8z z+pO^1|8jp#zEAu!I4koH#5Xw1d_$Z!miaM_Cml-|u34mEPZ#$C$vF;6V5Oh=;a4@w zn^&~x>kO{XYi%YS#i&E)-!T3L{vL`~cy_!N%Op~TLp{J`&>xqkdtBKTs& z@!NT5F!H416SMDn9z^gR0p$7@(&Zo&F&><@jQA!kXE?H!q0E~r-{OJ!p$B^rvGf0J!Q6wqVDz*{PsP&@1Lkxz5N^&uWxMg ze`!5BSdSk6u47=Q(&zI<={UvClXg7--!ByT#{-mG++uPc0_v@9F&M z5cv?#BY*uq?f63DYwKL<(?!OQj&{Z`COt{lY>g*Vt^8~&H`nsdXFm6np0fGKbRO~w zrN`}x&$onn_3wW~|7m{KQql)|x+`Z--*ic4PgDDQ_t^QDg!#?>`1&*8AIQO99Ojp` zHazh%@~_lj{n&Ey(fxe*4ani&S;XtKSUQi}Aof)4ZwuddkE37W=c8ungzM*0VBT`J z6bbqC4dD~;waa^~tE&CR4|!taTWK5>Q}zSsd*^!y=PSx@n}=U_fcj(loYAAjMZbA| z#n|-P`hn2fEJZ>*4=Enro8kLCd|!m0%j5f*N{u$3G0*s2VEc6cvU^;U5>#rSUZ?)* zdeRB|^dZ4lQfWXQbR^-A;PLbIin=>j_%)C3{k#xAM;>stDID_E?;$8P5Pv~Sv`E8ROlB&@56IW_DLLa`-Fr( z&hc}80_LMXJsPtf+01x0lduC>T5N`|OuE$3%7n^^s zDEKENmk)m610Mhs3C0~tF71*B^C=KnJ^4UVp?r}4$&}C9r&vC}IIUJb{}IIhU&=?= z=uhKV@>wQohW2)(^1-+a`JYVry!0uS&!+-;p#6L?h`W6J9LGsEj&b|e#?R5}*tZ$V z+cE9irEJwc4+i@NJ@@qoU;p1GjY6t_@Ky;gkn~bv0^!=alAqg}bkTmLe7AekAFthV zy9s`r{1o&5%>loG?;Sz>@$>&McB~HnwO^^zzx;2pV?Wb%^F8#N?0$(Sv!A&vkPGCr zB#4i&J5Tq4d;9Qy&HI1%ciexdtW$Lsy{5cMa{drnhWpXJP9FMia!lMo_kZ1=^!@KX zPmg}{y(?PP3x93*?>>^8K|F9vwLdYx#{JWg=}ezy^RV$9^yf~KeS`u^ydzFj_`HVC z*Y@0#!Bx9waHp-}`*?#eN*)~d3KZwv&Gzf{^LFO@PQlkI>Sunk*K<-;!9+ez2=l*p zp{&>0`c+ZC3OqFbdXJEQ;r`dd@h__{xEn>Mjhmp~+ve*T%JIrW)H5-?$ji(9BZpg0 zKd?{G`R07ArytKb^%-+O>$XL|{N>^5H@1E>sD3y-E>F}G`T%*J1A3MMllb>Jxju1y zddU1!-tT!RI%Xb%j$*|5myt!J)BB~77cic*HY*;t$KIZM{C=Z#Q}-ihz3&WW3*K-a z#tw;tFR_Gx11Ax3M7ziP3I8ecf)l4IzHt9xKHqNXVv%3K9F*_y4Syr;-#XuSen97M z;tkyY9oxL1pHmXwB>?2{?dpQr=wU6!j zLgov{-;HP&t$Q>b?e5PfaJ-HgvhaJR$DSt2WRIV36!%k3sh;*zf83uA@yxBGhv4%# zfjROf0tWuJXMs<~hcfjj?(ZaD$|eWf@0pZmQ!eQ~Sn|{7kK?cRNC%r4)zOXKiE+6B%jD`qL7-hO=k*zZFM^?Rz+7xinF5~$Zn;%{J` z25^?iAKu?;VR!Tgs|V#5b9z$vVDua(8Re060@4R1mM5Gy0luWm>NmvtCS6vKR&&&|V+x0)Qb zS-Tmg9F7JrnqY|c1mLxPV=TN!13zD{MumUqdL0e?JpF$*`&7`sU*SJ>^gjyt zPoZAzQTT_W|3jvCY&eSPQM&;@1M{fy9KJ{Y zDF3JaVSfK~{C4K^b-S_lPmi*5rMN$b z8~^Syp3Hnn>vk<)oqRyc$$F^8Z69{)X2z#*oj>y{*sjqo+ocMyOW*AlJGw5b58lf? z99lli_s_X}OEe$tLYVIxxqKTnAMIM0?;j*z-|}BbD*8Lp+c)*+7H56>rtY&i>zhp7 zYw;aQUov%%#mUcP>TZi0zo+i9_-@O;Q{%~<8e$iVm$($kl1Z4=lhmo@geY}-+8TJ{}T`FA|0(h?ik%4?G!x@`_Z|G4-y{sMQlLa z`YX`$7HM}C+ph>c2R+58SqsD!D<_RXiXHU%lX#HzkB6+j1C~$P9q`G1E2R_g@hMiw z58%i9n|1wsk=EPS+hQ6fMSMd%_bcfd zW_-)vX(;=B88^H=%_l4Tq@99II=POXEU3jh7QgH9?`22HzvtHQPZ%pt+`n4s4Ea1u z^1VY-MQ(HX!`H)n9R6giPfis)PsaM>@d}5&^^vSkHfern74)3*V*hj2Cr?+pTo3$v zug9}Kset&;F^5`b!MNyX)>(8MW#i~4W1aHjYA^P%y*vf;$9IT9fxQtlYIimT@w{I9 z{kkRD|Bh!W)kJ@PN%mcZ^U6pn+WLB2mB|))k|_*h`ye2hLOT*y#lg^YhH;gE6ZoKkJe1ld^r;zE28p&;$7WEpiV# z;SKN7dQ94+QC(L9h1Io;`@K-T!Ud38lK((wNlxQK+`r2zMZZ!w#b~(}OcqnGOLE#E z;AQ#z+=U*^=W_J))0&p(d1Tdfk2E&G{(%EVaH11u1?U4L`t1zt=I3r_^+Y@?$Lc^1UGO z3ZE5k9ezV1)pg{T-y`bs@cmHOuY`QbMCyg_vrpGo`Xo^v*sJQ5(DVBBJsdsq9efR+ zoDP={);B(*9kz?PFdj`2fiG&rdoL)FDp$aqW~u@r3@#KC!bSOPSC2%eo(d zdYyvw+bn$v!`S_`tp^ldF|sji>L8j(CuCykfd0%RH~e z&0abmoG;xCzfiqfDD7PFG!am>(oFaT%GLWJ*dxtXpx#w0q@z?|Ij6603g5dtv-gBs zjQ&mWLSA@!=2&RwCV08L-{NrU%^U9*IfwFpuYl~1E>XZfU*8~PHp`o^9p}s4tK}}x za%hD{qMi#HuU^3S!URF5b-;_Zslq^c;&{G>J$L>|)AD%2@v01KlwNQW?cjC^OKPn^ z_wy8v`|YFP*f`tm3(EhMl<3ykAol&WbT3zU@bl`^y+qR=58W%JWJWjb*3r-n%DQbF zI-1{Wm9DYrreS+LbYHD>(_!pZtPmr0iH zi1uYvz2Z#@U{t+MqWz3lZ`Aaj`;K0Z&R4uYQ~mSxh;-jVj&8T}z8?yDbheiNftDXd z_wBRm(|w}(tLvII-PeP9)*LwqUuUtWJlUZdI6E?rQDALRWyPt~~mDJDw z(P_L-8h{tdA41r@=@KSu=qLJjLXOYt;rnCI|Hw8nwfmr}Z94CZEEQNju9By|ZW7C8 zCCL{3t^6T>+{bkE-^w=|cc$O1n|~Ic$@-w4fB$b{pWJT~AYfJKzkNy4OLlL3F?vYx z#hXcw+dGUunx&lc5BE31vT&7=$VtyDJSB#Rh9OG-OT)XNI&N+bmOLEWN3=IpW zS2v~QsAnAT^cJ3{GjXLtm%v6jKM(zNX@2xb#pp(jBfm61$#;pKw1@nN`?wo?V0>Ac zACfN>F68|Vk{|J6v{ciPpYe*uON9$muS$hZZEt`Nf20__GR@EUHJ1Op2u-e3IN$Dt z^m+8F()`e`qV#-tN`)7y-joXGMlBlmdkhw6I_No7{)*8nG+rvafcFLzI(VMC(1Y_C zicy!QBR}IWw*2Sl$sVP`T%Ln3`C$Qy(Zy+g#xJt`&)0o(IIp5n-k`p)!^H@_rnC$B zWBfdgmkMX{K7hhmJU3nPLr>+qUrk4T#?P_*bJUZO@4tC(g5*a#@q1>FpYb`GAMq13 zo&Fx;u%PLDhWv)bE$S)pq!0R5l=&Du$Pc~q^#R18S4GuArlTt;s+KYi3tEgI4aUVJ zNF1$#@$ni*tr&;Ai^>JYPt-U&7~pHxINmZHkkjvQdYvxV_jXAF-WOdi^!8gmA#;~>9GFK~@Op5T&qxnLS5<@r;8rG;=0_ukWCJJu|b%VgyF`g7*|z~B468Q?ih(jZqn`ShdV%c)1n zmj?sBjGRsU?r+5>G9Jf5pgghlx6|0&Mdww}Dl@+#^|u1P<5RAzJo!7x%BS;YLH>zC zzpw8n?`$uc?8Gl-s zzt8v$|4K-Rl9_~8)LVcuzSpZaO0-ReRTjTa=+b&fxK`qQYv_Nu|8}duM|~DbsQDz+ ze65^63iHA4jm~F$5h7uSz&AmqeA_Q!pWW*X{v0E5U*{W5SABe!3O#!P$K(coP3e`e zo_v5uns5{^ujT)=b@Ec4^Qxw|6~GoyE8k zA`}&|T8GIi5`{NlZO=m&ViI!M-xIk1EZNtgmc32tD_9k+g2qc;9{7 zG(G!Z_ls?G?(Gx zhd2qHJ-{%C*%7=V36)C_VlEv`uT1lf*XdsY#n~-rxJazqQwVGlOX4{LguwtOsW8b${1; zTkpDlhn`cvHt962hxHEn{Q}-|r#*~y&m)EPHXPk(n)(+w{>|zgHu%%hdqTaiOJMH- zV!$!e@D(zPbuf_E4 z2(R*fw$d#R@4O%Ou-P&4ey4{GKD3ARg(jF#9M)kM6!@ZBCeSUu__&qdZS|o%=*;$V zBd7KQ5^&yadi9!&>6byB4he^uKF`=qx@CUNn6@_fNdIeA?3@x2DrC;x;G@z?5y zPkTSV$Ka+su-$U<%hnziGsK~uiul!qCf8Yy0X8tl=0Agl``-B81@dzpBinE9{cN3M zbi45~vyh12dgat-(`llY>pW!`DLK8=$cvG>JQGXsGnbC@DENmK?R&} zW$x=qxG2}ShsN&+BY#mYCqaH)?rV5JT-;dsJ}*l?;8S5&y~m;Qd#1rXN`B8YI7i8^ z+hM4`$jATo&ye3%`^EG>h}zG${rLR%X;Bf}w*x%#Z`AoQj<0FE(LLz*K1I{G`u%AE zPwU%hdcJ?+Yc!lrT&xB0eMO}6oCWj4zOncVH6QR-q~|9DG1Rx^6G zaxeIscF1rl^xiYClHPkv=sFfZJf-~L>&()hB0mg8`~bW`kJxLyf}deUi|Y6B#?K+C zlk;2PA1)iV@ig~y>^$!y-xo1G?ozS1a&lDReKq9$vVy!r)9@VsE62YLoH7143jFD5K5qEAa<1pa z`QEE^a=m{F#_f6N@9D-**^}m@pK-tQM~(46s@MkqA2WPUU!XDh+|Lkquir+<)dc%kLdfpXQ6bo%tpQvHa+{xqaH4+Re|oeRR6Oo*jH%;p2XD^A5|G zmP{ej-pZbQQS+Tly~vmy8d+~zGJ&IAm_P80w`=u=``N<|r@%HI+~14q_xBmowq;t7 z_)bf!G|YDz$CA%!kk4Re`~HWtTa@Fk$OZ?;Y3j2S1D80SbBg)!bg38Z!3DbAV`1_I z(qC)2)&r6oHrf_+w`%X^Pn0Ca+8s$MxIgpr=j!C#rT2|LSi>f@MKLnPQP+e^z$wxxIz16`{~@b{=boK=Z{0TcWeJlpN&P=vm*Qkz3?io7o6pK zR{7QE)0}5ubkL<^6(wId6tfPfVt`} z13c?dc24TH{ipK@wV%M;;&46>=XCQsy}T$RsY2%!NayX_l9C-?xm{+h!kPtPyX0O4f8x}c3{1QZ?}5*Jr~{!#n;UK`P%Nl zofbdXV?OmZzgG%;BV&HArE}k{`KW~nNA(e{PWWN1Cd|Cr&KoZ2w*jB~#{Av`*1d)z zeG2`zce&plkYR}JYiGz0^vGs*b3{URYR+Wk&gQU!ZycoZyl~F zybMRUn{T%;@wnup7idItkHvSdH=pv`?d>+#S^Sb+&L6`ThPpliJ#FZab&D+{=*M#C z_rD55H=T~ze#@8bHGT-+r!cS+zKaI_g#16RuNyt_QSc|Cwx9}(Bvw6((s%5x@9=ehDrrT&C39V;CGoJUgZ$+ zmsotSoXZc{niq|GHf2L$}Q z7x&buCmWVdKFORPzP>eHStCqJhZXQTLPsmc4WwHx%o zLk_3I12|LBd+c9L>~c8rNqyMn9S#4^OMTex{WrP>wf+I0N8r0z5dH8yP=6%-Pgqw6jW=Qr#z|FPZX@4Qca@Tt

re=z zA>K#K{W}No`8cK9J_SeD9rg-8Q;sk`JYU!s`RntA9uYq8Kz^q?@c4|#MM`BZvXyXP~aacG9v*NdVs>b+DU zbdn$-0bKv~-PXFqD0{{6OnVaazWp}YPxSpx;CD;qmY+f0m8~~#(Hj0P+1d zZ@!J|9f+^p+^<&2w*H31BZuHg;*0qD8-eRY`xL(+ij?%pH@aOY^+3OlwNG}2#Gh!N z?6-h7XO))Rf96MkFZ1`x;&p{*1-^3f2bvD};W%D_kDo2@@y%~;!OL?5oY(iZ+sRqf zBdi$qM-UbkN*cZ{P)Wq=mpq2zk;2bRfz zq2ZrPuGX+2v{w3AJ;5K#G4ufF_j97J2Yg)?`QYlqc8{mgWpY;^wsnw3*Y5;91IPw^ zKc4T+XHU9(9yB>BC#HCD4*V;UKP|0IzNvmG`GNT5#0*X1qgLAN{U#pzlaHgFfQN>b zYvG};=crqFxK7VmxA3qbIilr>hxh7D$g%M7DSYS4$CGs8ElT(L@Skgc8?ImL!}cCb zqwCiK9zYSEuVg!OBO0qe=SKaZ-Bmm7ZBiQWUZj2u?>}n%#U4+1FO-)IV*H<nDCyP z{IP}!Z#97&wBS7_c}Bux!FvnrhJgQbbbk6$_~&^f?o)A{xwWtLywXX&_Z{dZA}xIT ztcJ_UTO|be5DWe+>3R0)Lk>fP%8t5CY-_qVI>Z_tY<2XV)q*x#K^BANqA9iSpJt2d;jUB;3Ph{ zo=rL6`zdv|D;oYTOMTe%Tf@H#J$O$=;mGG=bd+(;*NgdGle*g>jp;p_ULQ8S(3n0) z!?54c-xv>y7w=z;kA;Gda$@&6X-{XfwVI^A-S7Xk+XR~D2r(akzR0HHukdf%EcLS~ z8`bCczS8y?W~a{fe)zsrHd~M>E?@kd(jl7#u9EP4lMkLp=e_>cccx#k{F^kpFfU`z;;w8SEQ@eyK!}&*FY8mH2Eb{-Pf?an7c!)-dm}#QC>LN;VaLkzX1Rmrc1t z{n$?UlkbQ;(JuFM9DLU>Khye~IX!Z*LEtDS{2m(b!(>x;YkKRwox_HQDSIs5?a`Fq z@9}Z4Pw2Eja&Uaa`-7ka&x`ZEO!gr0YH)Gg33%Hf{DJpp;7}g*f&RcV_r0opux#?G|1I`QocpsV@^+NSKhZR(Dzvj?>}Pxc><8Qz!yG$%;!7&=V@}_ z3*MW_o-n&$!(1CL;(5eId5=3A*lqRF&fji$WxhVqBj`8v4!<8=6<<7{aGg&#bxWQ|Iiz7xu4t1%#2Bv+-l<**K25R@|-T(`&IOV_4>Qt zJYSwY_Mu>%d(W9$aBQ=mQYA4hT96nUB{d8hry?Yb}4^|0^AS=|9Q?o*Tjc zE~jam)44u6-_JP2a8Y zpa(F{@|3>~3$4OztM!w3=DD|Qzr~|pDaH>Er@HfI^B6S1cQMe;0tpk3e8*l>tsRx zT|QABctQD9QGbHI6tX~$TKBy9eM-mgtqNAaYu;n}g6A;SNv9wnPs@MwkLyMV!v(!^ zwY}fj1Tf;*UbbfP3kY*Pko=!cKy8Bmb@sj|-f4rYE|3n3>T>h~s)`_cjeC zdCBOU*DXwVXitzHpic$mS6e=Qhas%HN;R79^>7^Gz7FvF2Ei}ZZQ}P2s0Uepbz!Hp zB=9@mc7NE~%O7)l!RZG35bXgU@cDie*L|xOYc&bjlQ_Z!curG#d%cfXdJ(?r zsvl|s;)PYzVO=7-WR3pD`Uv>LQUw2JAywM(_BwZ2O1JCl>^wst^(Ega!MFetj>7M9 zX+RwMq3QcHsP?f;e~xoKBJS6BE*}HWw^_Mf;T!a$ocxVOj0Y$8Q^LNBfi3p8>49Rq zaji<7Z*rMeuL=)?nCmu9_VLH_jw-I`#2wq@6E3t0}igsVA#U=&HE#rn-!e!J&W$^ zElj(nx@(n2uzXrtqv26}@AgtQW3r`7J0tY?jHzCa;Xk~y?RG8S%?k8*yJipP5;)EV zx`^F3re5)VTFCRoW8k6b{cF+V9Bt^O^asDY#ra#d&+KuIkAx5H&I6w}4F00Nt}c>8 z&EimhH)SG z4EOx^Sw8UHh;eby;?d97jDcU`rFYT<9T#c0@jL*^T_Ywa?;`{~Ghp?0n;H!7UsUJ) zippuz?U!=$|Crngf6b60{0=$amkst-w69W;eAR_FY6ICE5j1fFJ!S_F7T#6P9=CUp z>OGn*;kxrye-H4+u3y*ovmU=sDCGcx&OD&>Vy4yObD8mu1P8q zm0vqU>*IH`u}*Uo^0|1u#y1@f$n8?(_wTT0XU~-o`XQSt;zB>%uFHn4AGv>jrruML zT(Xbi^ZKnlb2NNom+h-&J$6ow`<7XcF$&Kg5At?OUV$tIt;O1U@SVxq`gq^DQ0r zx(qepJ&~tvc_vBQ|3C}om;3h7QR~ z_5kOr+!p~@KpoG4^+{j>!a_edz89XKX8`ASRy01Glb8zaDf)AGfwsr@yK`S3%Z3+u zyk^t$w`h;>ke-_*vLf+2+~;@DTJN)L(h9h*oL{?C>pdBH{1Vpje14NX`GS_?eM{am z%oiJ+Ej?oWIhp$ZEUw@6Q*9EsALEMEo3^C}zuS|PvzYGR{g%+Zq1l9nm$5o#!?oFTXAapgF(K_C9CzKW_X{PJUm*Iomzx=}(&6 zXZ}4hes??Y%XVw0)5lBRbK$#ISZ4!YZP+3Pf;ifpX`28fu21+4=|MlVxAe!K3SRhL zR(7XmZ|xK8GCaq4{EXz!?litk+l=1Xu8J0n^LGX#TU;e$quwLumO^NG&O6pJjemV8USbE|x^7%d1pdW6ve#Z6v8v6C5!BI}$Z*c5= z)_lL`H*P;jzXPWK(~_MB-MY{0kQ?XQ`K0>3Lt0${GavwM=k%{OSOZUp9{<9+W465JFMyXT(cvxeKRaP z|6cX8ee={GoPUq{^?e^vzcIhz;rl$iQT=S+koo(!tDj%=Q`YYOZ60?0<>xE&i;N$$ z{Z^j$m-DOlXt~_iz4MOyEPUCJ)#vojr{CdW8yE8F+dORcLq2_rhkg8*zEQ*Zqw76B zF}S9$vG{vdo4G=H*uWhZHf^XqY_8siBT?TP+*)qd_T9U(P2roDJ zuK9X1$N8rvO@<12H{IwN^oGEVc<8wmA}@?j^>hiwN%ShV7py(*1EYSF=lGZ{)9m3M zKiAJt|64}Xk5VWnNDx=7kLS6l$42+V@K1Pwf7FO}=8tISU!499cZcDJ_3<27uzy@1 z!%jqf?BDqB-1t649la2JEVdckW>MQH|8m|T(t`G9@`=7<16JXCxcojn=ChHT9L}S| z*cF%cTfg}od)N=ChwI|`wa+V_Ug`Jjf%jj7-A%oE3chdi8)6cm9oW6_KM{q4-1ca9 zHD4w)1pQPlovtvyEAoGb`@CO5`is49{4)Qj`97~q+g~Fk1dmtRJ_gU#WLNTgnR_*V zcBSoO@ElIyGe1w(vL|g^uQtS>6h}VGI9`a2%6QPJG1cftyOnm}MCl(S2zIOd4ClM6 zey#&@0mthDv~-o?5+@EYzdlu?_M^Yx%`kHOSIQe9;8-0Gv(T0R?&lIkVz z*D%kmbZ*jM!gJx>4G#~S5BQHcJrB$8Y}n7Y`1(BL4o&j?QjUK!dknsCe!$POWIn#~ zJB1vNXZ9@62GJf$a=y)XKvQ{V7jDTO*82_()4u3-I~3=nAX2<9$aC3TzsY%CZi*IH zKAa~sDDoZa<$6E$R}~Aa;z&2}3+O`ov({$k6Up!8W_})s^BLxE@vUw6 zxAL2vME%Zt7ZLaQO-6rl(dkZ|?SO{*WjRarvfJ zzclRfMm<3JqWy_;wa*pKY5&s&Eq%m&bIyy$%Kv9$<-b$G(RX=XDAfOrIKS_k$oDH< zFAb?rKFM8Q0dLwL*IV-a%a9LA#4GhzxQ}jXNch?m#*(d+gIlz|>Q>i(!=@K$C$_#v z<$80f;kSi6Y<*Z_H=;Of3GXQ>)-|gc-+R~SRhu`Pr{4;Iy zbO8$Kv7VOTzeZZ&Noi+bX z1oxfOHd8b_|3tlldpMDsT%7}`P4B>u;o2u~ab1SV+K8|Vf1Al%S* z;|F@JpS5ybPx=q^$^fA0*CpqRKhSIXs#doB;DKJ#<+bvARsYo5*Xeuu^hFr=+gcC%yG-w$wEBB#@|>o#ze|!ok#Md3lB6O&`&;+^HvGF`ucW`| za#?Q^jO-UkUrub_tkthKCBG)|EjrFmenG>e;~Z^iV3zkw&Vk=7 z`D^VzrSt^;f!X>>`6I4zr4eM%^%3<%?(hF`A1e2KO73&!JtjC@$H{wa9i>HIOD7#` z zV)N-TAZk3Q)VhBb=6n3!EYIQcd$V0Wp7DFLJU2aH_3&LMp2N)_a6SFN3#P9hFnP@% zIHuuvTsbP^NqnyQn1nHIR0KYZH;-R7+I~p&D;;(&9S~##z*4Kf*?<((uo)uRfaw?Z zpWh4McXDZm1Af%kxX1Djg6QJL!+YKBsUIKjuIB>YUC#x)yPgYpH~u)4c;EgKcn1Xk zf8qV4`D~w(PtQGqufqCe#9!Bp*-w*`AGd$xr%`-l>>2pV#q1@|`>qq_gaAD)|G}r@ zoeQSjH}3j&q5bA54nzOJ0j@dXL#`(Z-YHjc`chACkJ8caPKoa?b?EmM`sJM~R0%x5 z@g4x)nFC%Bk96v_emSpvx`g}1UWH_*B|FKFdKC!PfL^a{Vc?zJ>EY`OP%uD@^@&wAv`U;URVB+A2W)w?*kx=esH}8VT`!6 zixI}S5r2mV@bLZ;(!s&p4?!61hw({%V@D(4Uw#9>Jb&-%Q`avLUzPbW9O^+hloKRB zmHo%-@*dN_ILGzH+3S`dZ}yL`xiIgsej@(N%gZo z5B|Mq2Xh|6^XvI`tB-P3e1A9|SMKw1WS-Owep@Rx3GYczZ-)C>T}O0W%Ibhb#t~mX zZq;w?<~Q77A4Hgt5aZs8@ST^ zv;*8G^U*C9&Ym37`Uji=Hw(HTzn!V2^PI@V(hSC>!n#z~c_xpF?~a*TAK?%9SgFy;z*_Tr+uo62Nw&Gr z!n}_&Fl9(11|84r-%Q_c;l0GCrBh)2YTfy1lhw0BBGF%-Yw3L0;`uI4^AQX4y@i1b zEj)OY`_o3|XZu)R(*6bd+57*X4;;^>)ym5?oIQD1`7Opb&c|?lj{6ai|91%fc+RFK z;~`u&?0U%+H_K7a<_~JNpttM8U)T0{PO|R%K#eZD7d+tWDrxCoj!Rc$J?uHnKj`); z&)?SleVRtsmo&XTY~>qWU)FHG)9n9j*yJ|c-+SzsrZ@K+T);Q_i*>BOc|ArtbKviD z?!2`UYx&f`Q!Sx z6!L%jCGsyS3hT9SLuk{~XeBbs94d<@5%1P_SqiSIkGcNkJ{e_2S>lIHtE=G0vJ|g#LqTo%7&%7K`Woj`;k@_p~09MV=es z{bJ&Q=Rnc_f7JZe5Z|{NYuyz&f}QH3-W)PGXy5UiKnre5FY+1mEb2o(_~V}&Ia@0r zeSVyCBR~0mmGS%5DJ^{k`ri?SsRvG;KJ#8R`h5CRpZXLk6R&-|b3J{+H?t%U_QBw? z>rJ1~e#)M7n+Gf%b^*@$15ZQH@2BwG&P5R& z(B8@@OuF!XZ|4ODALS?9JMw<56CXcorI(=3Dl5QyhP@N*+y?L4P=8~-_kTKl-g%1Y z(-zSK^qCTcUpal|C?UtyZ)4C091r@yr+s-)#xv>u@g3q{D)IQ9J6w2oK=>Gx=DD^Y z7k*#wmC6O}DB$Cbrx^dAmp0H2`U(H*QJ8iL?H=CWL%vPtw$?r3@8kZHEc9?(^3Tz> z!v5V})(5*1?E=#P$??qDJ-`_yr{zJ@Q8!+eJNy&)EJPg9CUU(f(C7evp4K zpFvK}2lM4tE_eK+eZWV!itxTv`?U#Uw+Fi#{PjxPqyEV%1~2ckVBH5XnXiM?N@ky9 z+|c&?T}!Mtp3FSAvGWp5nZv?$`}_d)z;RtK+}E^jCN*D|;kg;) z!_*se!_ZXqc|+qKtw!xZ{Ng)HTnD9niT0G<){Q{#OxBOJ+(H}A^92bb$k%;aHyh(s`xDqd#!ePk<9G^Be*9tz>vLJ$$BS7dZYw zWr5>*cA3TVo@TAX;3uAg-u8Rgz$Z|h`#xQBnU&`|6x4UzpUw8~(e`qG=PT^vx_pLx zUO(@E^)=MbcPu;abogdh^hZ8zp89OB)3(2oe1+u;@*sQ&yuywcVTW%*tl-yGW_6Px z5_mjA{u7^EN94OUI5!TGTeM9WF1GW#EDip1G%vkxS$L(%Y@Z|eJ zz7JZ!dyy{W1M*9L!(PRAIkELJ%0ZuWdx2ElbiU&GF`kPWoHkz@=DD@OX+96~b7_N) zcb==wr&T=vGe%z@C-R4-Gve|Fs3`uRdh&j!z5msic8*3244Y0KoF;>%INJZ=y{`Sw zX}!>opcwfg^LKT5pOf!;7x|Fuj(kThEuEjB!qz>!htO=&l(7UZ;K$e@+!xIJ+)mc8 ze1r%36L8`CgK%7z%=$E#_Y$+S4R3tki1+|qwH;p{40JZ~$aw&M zKgHHpi}eol7+rbZZJ@^ra^GU0$K;9knzNpY0v`4~e7~l3uFBWrTz`?zH%$F)rB^(! zIVK7p{q4m0Mwf?dukkhYLs@Poph3Q0$9wIau9pVt#;^h2x)F%*4%7|*gctIG9zZU@ zfoT7){|@(dtsK+K`Wmg|v*O`>V1yCH_m+9Cn(Ktnr>l{E`XPhc^?kN=jl_!nebf{< z>H%hBIR04R;5b=VrNHob5kiaP=8Kp*+fk zeAcYs+w$=~0QrpPZUdkB`8J*(1=#2p&o5*D0eI%Sg&aqz=Q@{3qB!7fz4-Z=HooK^ zs~|ezyZ?~y>!jj!;_LY|FGoF}+47&PZyNt)em`%+R8!QN=g)WB#!9i*(GX7YB_V)VKLGU`_DhdZnCPr}e<@A1c_p zXU}epOEEuPAzKvGC!iNv=6S)q2er5C)7E>Dj(@(3=}vQ!|^@d7_K3}Rkw7&^+_3;TJNMAUBh~|?_W?(kbf=Yd83u5-I4J< zQ@(S@_e?pTI>~dLe6NJ}tGSQD@0Fl^9MR%_6zno!6Lt-{JmBLGzxUVQVJoTq9j{kS zn0A=qgnd)wM|nG+FjYI$-(ly>(vH{YIf}I7bU#<{1Lf0x3Cna=-jPe~9D`KyP6^Y# zg`NPX@jOJ@F;(rSw4+1e2B%7F>sOUSL?+q+8Y3+u`P_xn))dGbdb^&95_;2&*2O0P)1Y!-5h{xWpwc?aSV z@oAgur9On@Th|gq`#Z$HRKm@98lCjFNhibszE2n6L=MVVe!A_C(eCaSJYgo=-?72z z`Ey!f+VP~()0$6uWj+MV%DeQc`8&61HXU~m(U5lGAh+P3eCLZwU!KeE9?;|@^Y0&1 z|5T@L(gdDo>F?;$aUktD>iqKe$}hCDd*vMgz!A$S);YuYCW&YtcsKxk?R3rd`o7>n zgDZw#SYv$n{hW69t&&L`;ji|*!@~Za3C}^~b&Xa$px&~c3<}p=|g#g-UD7hpKbb%yU|0=51_dqpS*83;PYj+yJ5iW zz7y4tictUcG5wwHd^aTRm{kHFr5)FnsPE7|>DxPL;z{$_I<#{F{(^jwK1F)4UzCsD z(o@GFawqB^@wa*e{(e*CE$w)>-jEp0PaBk*n zsC_g)7W+^9xAeoi749OR7UgVweM7#SsCwjN@H#W@kMj-YYvAKe&hK)p7$yCXKN(HG^WM^z#@3f}HQR`Hx;P$f2VOT>dP|O< z)rjKxztMW*?^WK%(;KnA*c#(G=!>tqy=itH?U_8%XR%(UzGDBE>;e{Huid9ViumMt zSe{#feeuOh#csb>i|M=vzo;+yJp`_c1pOgEpn+ojB3>6mhGQ0sd&(r1Kg4%y&WgdSNEfz{%#}ncV}kx=J%UV{SA3R3v7Sfap`5#?|rX2{ccwJ zjXf^Cdh~l-5k&l*h<<$za6`LVZIi( ze^lz>xS#pB*MevEm~`d-AK{AY``r`Pccs?H`#ACWL#*FppKiQ+ID4Y~JDh_%5`{4j zODnB?nQV^B<1&{=KR?HJ6S6rjkIP&hX94RXPd(<7-v)auVsK$a6M|fsIEMFXX!qtj z*I55{u6BQ$`&-=K=>B^32X~qr@IGU{^N6P(cK?w2H|~5!{dm7|wcsnp-+F`(82mW@ zcMM@0=JcN9A%u5XI`AbyImck{i{uY1vi5FR;O*Y0KK9duByv7$$m8#Df3N$y&3|Bz z`q>k!)#vxX(hfU^7t8Y<(k}7K{YJoZ3;5aZJ!S{3{o=ShR=Ia!d^U9~@31d*xEy}p zSIM4Oqw#5nG$byDXZJYpoUQO!KRNI3lT}=>N_NQlOMLQPAp6rNb~f@6Ui5Df;M|JV z1)q<3z56VTeZsNmnGG*8ygq6D;=RD;-Bylt%!apFzq?+v{KH#3{DOx!diZ${ulMjV z^-1S+;uNj8Ul<7df%@Mf6EC)tZ#Tv-^WA#4V-Jemi2OUmzf@{y(hlC#g7Y?E$fk*# zoU6nva_oFR9koaN68{``0MEfCSU)lNc^--5G|%_YUgUXz);mr|9S_dGeBWYtkK^T8 zPv7g|=hTno5Au{fvB2W%)=$hsFaiUcKuM`kOuJ^7n-0=l2zg?eTnHt?f-(0pIb09+0lcc_`=W zRzB-@E1&g?=lxsx3=ge*)^EORihVm> z-}3VxEjb<1cJsw|nBQgfx85V(rtw*Lk9dpa<2#8i#~6pC3E?Y;!{3F(ckAIeUc>&1 z?@47(7@So<7m0jue221Hmx{#!-`nK?0(=p_cB|vf@sr)A-AS^?oe!@)tng-! zyF6TbM8o;DN6q(r2!6LEzu`HHzxK;dlPeVU)&zv4MAzFW%k(I~$|^Beqp&!$i43Oe)rW<0)MC3w&L{dC#^ z*%;>nuaJDy2ZWR1y6cO6`JOoRM}M2W_tW3jW#_ossuC-Gk@1jDK$7!h*Qh>-YACVudR!-h7 zf06%;w7fAib*RuhO=2^;}k*&(6i*w4T)iIMTKS2{cNa&&HW^k!?i0uuxK;W_E7cevYQ| z{3XZjxE|oOoXpjHaXj$P_oU)@;MHDB8QCs05$%>ZKk&-)lW{!oUry{oPaF@(%SxS* z^~1#E_Y>p%kaUitaXk2l_m$#!j0AiyCXNRm^E_o754`i9XB-d6Yx0f*@Wk`RL7x0R zVJvsw_^Rl;jgna$<%svsUrE*w0Q(zVT zF3mAH^zWw;j$#Kc#80{H<{gl-s@LG7a zhWq6no62)(p2>6RY?Wt}i}?)wwD2-5hj-K@O78vksazv}%xB1NVfzkUDtJ}-F7>K> zBY(_i$Zz2bz5ZDy=cU0x zl5cnWX*;;b>OqT*i!?Y&E_uHM_UV>c-~@A=$He3P5s_P-Cy3*ZYWxd2o&zrV7v`N@ zk3{_=?+KWlf&6fM|Cx3X>^SrwUKhap6LPdj5Q}o!FC#~iJz@3%%deAkU@E*HQwQ~- z$JCQ~s@-e*c?m)l+~e(>=f1H^&_8@frbQ=f zAM|-PTKDC5b~Qd;58-!5{rq|EaK`)qxNx8ISX3YI@wkW@d`Xt~GQ8i@jbFq35tl5* z@jVTG2a5AVes5@C*zhqpy`uR8o}C|H_Ya{Sr_Z^L7qnkrgm|NGY-e634@L1_3D)i4 zV)`PC@`JJ-!uw%NNBnm#PLiAM5JZaOdK}Y%5A+*p$B%z^1@eDR_Ne&2+|wF={CL0U zPdL7x!+yzkAC3M{3gWOnFeE{`vd3asTF{f5zqs_pifxkH80q@_6kr zz(3H6|Ge~%^VFN}aX6vm#Ic^S`uW?^Pw*T1->=$1@;_+h&~MblccGU|wR#Z`$No33 zwlLrM$5&YRM(*rs0Y3{Q2bAN{*+vV_0Ha{cWmQCA-=-fEDBCQ)d z@bfO4hvr>dRDSZVjqa~kpY!Rw>p>5DyIuEt*xT*eauDvRg+6Mm;P&_VkCHpvp>f1rT>V)|QS z{6AR0|APhmKUl!O%P;TUw&(#a5dI4En(txOXAIe*H(uv+dMW*uhwv+*XTHfy1g_%E_SSLpofZ&QqW7q*71ki}#p5y7oTxc~8Gj?AlpUg8C)qv;Xns zmYyCf9kwd@FhBi>)$eej9yor_DA*Nud;HGb4!_X{_BFb|{l|6QU#z1dex3W=@8}bH zDBK{TIKFSjb?3ngEI!-+jKOt*At>W_UoJ2MCG&SG@(ZSDfo%WRJiifS@U9+>A3UR? zKK8p11Aak`>>u$1xM9ulJCWtY=Fj|2AM`s=!*Lk%8Hp19t;%2VF9xSb{nAdHkLNzX zhc6cF=M8UY(IIYUHtHLkqxE6_aZL0@b(+oPitVF4V5I)3D(+n}$^gq(A5Bn5`rHtc9l-K{?OlI_`$4zo+T(d;;st z&2?H_`S3gcD&gBLoE ztgZ-O^jJLmJC^?Zm$?z&l8?OHR@d(VZ^Kp}WHsf9lVVIzKv|xG$Tw zEz^8`f)~(>`}>f8Kn8t%zUcLG@>Z+Qnj7^#E$udl`I@g7O#uk%sdzo#6;#!9bxYd0 zPbwCdp#gDN9}u!=8{&PadZoq3=cgdQ$ak{u-)$@K|H?@3kuUSEJ_X;6 z2Or75B%k4M-&K*2@a;17bl)EQ+$-;+BOl+h0w1q0!0}5FA7>vne&KrrJcqHh$Li&~ z^5K4mun+3zxOuj%2=mYmz@h=r0_>A1pZ@Ipzon{G?$53w% zR$--o!&VA-0bM}G&kF*$-jbF~Pset#>fgGR17EC>pC~s=_#%AY!kU%$h+A|)6zP($ zH~g1%71Z<#(g*yULHy1yx=Os{bFDtsL%SF73;4%L*N;jWxXkrgc;92O=>eYa$isKX zCTadWe0NNP;d}_!BlsN*>KDE*$ay&5`{TWcyvOPZ@XWSw=L)ai=i{M&o9w_(8!t$BVEIL4LAk!n_^zJp1Lb<@&alQBMnA<9p(P{V(>L^U~%)0|@8rpA&jL zZQTs(MXVQm1xkajej0EMSUH42=URf1N#g4k5Bj63m`}C-sGk2OI*$hSu}G{QV}*x*9HF)p_7 z;^We=@p&HBQ(sV>%=f`t>#arr)DKEhuDJhBJW+q3oqGj85KAcM;IQh;K`hQtl3;sC%sVG0^QB3Fiukky$t7o_B zA)V=u`%aawvxAQ+f?vonI16yi`s>zwa;)@k%hUw@$!QL{LoB1uNUY0yFxy&68K^1 zEaW%F0=)81^q=Xqc%8IJ*X0FxN!Mpa;LX-sUJf2+7V3qG1$rEc>IK}tC~#9=HVe=d z$+yFOoLc}q{c>&r_`taZ@Ke8>H$eX|e#51uN8IN-kq>H^<9j*Tt>J#TA3IZi^_O~7 zfA`Bd!&Jg)>AikFprqk)a(^g~bh&r0;r`O=biUAET9|ZexvTYipxEa}xSZUn`OvO~ zcWD^s7NFnzOAB;AKP_EZ*zeCnIot2w>E#wAz49w9y-U{}($c&29Wk_r__BVL)9_&7 zK`(c%pO3g)@}PW4!)5(CspW@!l#`o6e#l{3x=ddfLw=LXEt-!0Sa@^DFQk+F%l*7V z$@lL!h5Se_Cv^>{rAt-b($Z4ZL#Q8gEbFUe29Jf`>-m@X`G%j?^9{(4@s{Vn0FQ-h zp8qXLMSi8Fwdb;=*~A%QYSKgI+ujmXdD>IN$zlBZTAq-8%@0BBrlN{y5*3%G%gqQW}Le`!P zYZ5+N{#tmBhD}c+KlrMwTxjWMX&9v}Z09>b4GXu4UskEGaJz=lD+@z&h3~`3?=v-w zS}naIKF@=pebgMj=OVu`^WytqNEh=@!c*k0m4_)+R&KR0dJ^Ve@*DKxc`%fpq+w9W z@`FCKmynJXS=z@4qb1r)2!k#*E4K2$B=sE6QH1jm{w_Yo)sp0cUYz|`ExVC+esP{K zUOd}@jO^Sv@h`J&X`aVjC!Ga7`X{w(`>DJy1~;$~R75{){+mwfuyM1l z(LqlG-qm6+5FY~@A^j)^scy|XsMnGIFQmbBk_JPYKF?*>uchFDbh@GJp$mSM_@Z=_I3Pb_&k({2Ay{M{f)t+HZjuJp`P8g!FBi z+>!$mPff?mje$RBTRxtfr2UP0{;H>yFN2a3mzkPNvhxj}+4*QaNq%1b3kWAw?<8q6 zv=3z=j^`!VFSOq)?T3~|=A5bsc8$kR2N#;9* zlm5x*lV244Gk=)Z@V<%bAJqSw5q?jU?{8+c;P{E%#&eoGm0+;*puc`%xBbL!`-$BK zeP;va(;^e1UL>5VoFM#vliddW_$7gvdKC7WL?!EF?1CNB-*&pqSKDUTe6`Kzt8F%4 z#l6L6zJ{5XxwP#Q`UCNAo#sZ3#GqcYCy!`)zr3>yKI(6)*t}KldjNxJ+j_MRQ2vjn zj4W?>J?!OQYx7pQcY=9o+E!EhALW)5%9)+}q?emv^H#Z!0t+y0TdVT~l>7gtw%RKv z4|x7Jo8D`?(&npeSLu8e`Tw@C9$@zVFMGMF%~z!y<`u|aF5tuL=|@9;*oA4^6*^x< z{(B4ku=&XDkRNnP+uo%!0OY?uf}iVI>_^)6PMfbv{TPXnf6=iP-O9<`T5sC+ZkwkH z+~9EJI~38koD6xsE0c=+N(CR_f8;x>0I%W0#t+~b^aGtRk3s&s3-H?fN{m{~ca_ag z+t%s)6#0)7@NM(Kn?3*4Hb0g7RiP4-JniL%F?$dJXPnA$>;yE?ZYv64K`v>Nh^WB&5$6!FLyh z_)iwv#Z0xVyCVko(}i@K-@iGOKev!>aEb9L4)l0&1RwW>_$LefF+9!=>5q(=4y|ML zelfx`Cxvud4+B;mu0s7*-X;Qo`zB=Yke-d%Z=2sD{R4$`tKW1T z(to;8Kc)(t_anWdkiJOL!#;qqMHoN$j!}5G#-=Sgr^C;9UfC@hbSIiuepALEj8B;F z4h})vNxMG3%r|OKWxgaoKWf2*?S}m&pKq*_OwfD%rCGKfAnRUmoR46B35@dI3HB$^ zZocz*41*)~$+6#-cg=eN@t8*s-PRg6@~#CMk2x&#e%@6H`R<71BJY|J;x~>cKUw4B zefej#J-e5WdW9rXuie|{eGks(u&;4z)DHL&S{`)#*@zE;*IZbb@`ZIeDVpq%u2jS) zUXbtoGVxv~aX`f2>2_W#-p2(#n4kM`nAhJGwTE{Y05|H}FYU(rwCEqpquk-a7Cw<5 zPkwQK8{sD;%yqTqA*oax?sqF3yDS{T@vayj0tD!Sdfp8Oac^3N9f_u;5K;F^H``q13mD22t5ze%6Q&OZe?l9&!L z9^?LB6T>5&0G-j!o6-LRs6}9u_&<&5Aen|@`bGVJp#cAl0zde&PwE-$`Uit6s#iXX zeLTj`$>Mb*@ZxyDnNy6%Xh(g4Pkw4Y9{tOadZd_Mm%Qk&fT8PVgFkco2O55l0P=ft zW$<>XDrq$O2l@Rd#NV>5|5n7;Z`*nc*RR4k=bgU4PW=$|SNX7xlEN?2_E`J?KA0TI zJ>Xkj;Db*^{pb0TwC(p5zkF{8d`Ud)5ImHV|6=%u5*NpHCcb+DyIzWj-95?lU>jC( zCErv#v&FF_n^w_y?$eQ>dQpST>(8}U1b$iFw@Ki*3NU&uevi>9x`KH;#{3;n)F>#=hX$PY(-pAFmj zO`fxEy@z7w^+xHdx|K&i{4Ez0yWNFf%Rgg$JuCrn zpywk>&!ZZT{6kY>J~}cgztR7t>A6GbNxj4OSJJjW()zMt!$&N4lT_~hP}9K=UljP@ z3wU}i=W|h*_YhDX_CCLN*kAH__Bi{(&?9>zeVmq>Qrh&(k;tw^z0$Psn`f7V8wv;;-jVtBk*FAr6U-&AU zUrYWk7uqrV{RuC}^~JTmFI=;Ewv>A;Y6tL{{rW3jj{Cyzkv0bcM~cCI2rK@>~7TjL7$H)rZ&*7f`kSvkUOpJojGD zzsC24*V=qr>R()_-{`T+^ZUHJx6eG}GYfoR{k$xspF3vz%R~CF7t*a?w*LOR|v4`2okBVwmytpO!j6f${QTJY662i@^D2$&d2i^KosK`yA)KAB9KF z!|PJm%YRJZ9c#aFkH$;G;!d>R_~&BVo@l@ES2W%5$9W~~SHS=8^IH9;eT?|sVhR%< zjL-P~z?sl)g7;zd)c$~-k8)>-y@2m6!tvemV!qgZ#CbK$bDzDGPx_&>60n&U;o(8-BGtoj=wsKgQVw@)Py+i7JTO ze+KXiKkSX*kL}z~$iT|>_zn~AiE~{h%cNp)e6OJUK@BEL>Sn_Qx;`KQ1C5iJ-$QyOC>%nb;w`o=O*hPzgwMO?(Ydqn`-h9EFL0-mfIy<9i^e=cMu8=Xmc!DLLcte=y!LlAKDsFOd3yZ{i*N z#c2#tfL2;B@tn(rs?QHss*7{zm zRw(qhEOLnp{OWo(=Q~AG80C5XH(RFJbsrJ&Tu(#&*N&(ksVIk07<}vUBhg6V{Rf`^ zV*9OrjMP7IKi)o~oqtii%=&Wq9t9lhYozF7AMzBVx*s-tYlO9KJ~U#c;S6wb1&L%`!jlu?pFr6P=^UgcVLE z-&-QPh4upVXI3}Nue_IyU*reUlXytwoklp0Go1!rpnE9i`ml;YSRBSvxOE<1 zb2;O<7|L1pB%K5+Q|3C8-Vo&WY1f;OzjGts3DE?? zIaa@+QL0k~Y2wxiT-p!uha^0>)7F)_{=7qgMt|0culdB+eDi;#{f+ZMgOASVco95d zo&>x}Q;N42#2@Tx;b($`IS!15YwY$`2|UM8kJAnGRd~f$c*P$=y3+@C959`25d_1p ztCw`|GJnZ{k0%~ay+OMVbUBRswxPV!#mmR@2Dx?@z6XPHA73bX?hb1wzsK4SdiOGq z_xrBleIS?j><)+5=^o&%18X909v|RsSbb>sF@cx*pwsR1{03Xc&3wEi-;uvMAGPxQ zeo6Pk9^TZd_xGhgEXVt{nZNHI+w&hqzqeSq{N9oM27JF;iS&!HrM{1boV@SLaWvLn z&>Qi-?zTuTa9@8`;%g-k^b>KP?`;TlIe{e<1^?jxZK-gI>r~cELHYS(y|S#^)uu^y*S>B z-z?=T2+u~W-1U^yiuio@<)({-SP091zybNM$nRb`w*~oau8=?CHhVeVpZ~1q+u`wP z`y9%5=ix8PTSa~H8}rKQh5u9a zP&qOCpxVoPGI)t2yuEU-Pup|;>YS~~$-w#M*V?BjeQDS5otv3gvp%!42d2GN0jSCS zLO9l+wgVdBb0A6FK#%EB=sVQF`5`zFVeUT>Kh<9Q4j=Dp^tS)tI9P`C<@+s4e}Mq= zw?h&Cf}b9gw&8Ew@XHG{65P%$#-0k@U#fiO!;43>3Cdb`2g=5B_91-v%Mv^=vSZEsqEhlF}JXS z!>fqEBg%CzwR-Rl@*hO-bzWxiTX#qU;xN7dj_y?!Pdkdjnb^q#rY<|RumF#4tw z-me2lv){^ft~7sOz4^^_WIXoiLGOW@#m~Ik<{?Y&My!m5(htxJ;}Tr{nCWG{N7cE& z;<+xvbtd2s6yv>j%0)WC02th5e44gf7;=PmdOMb?9H2Rbo7j98$Mur|r`IiKdwgbo z^JC^;ECqoH&gc51fB#eJ>3qb}t7Yp~Ir)ytCGpv5(aGR4%g6ZE_d5K0=0YBo>xT?5 zzN1L^RbJ3u4`}&+pH7n)*rxuN>J9%U72R!o?B((4X5af1lu0`{Q)2E--qf z<$u%u4Au>=<;2(dT;A(Fre_H+;b=_t_*o`jo#$CQ7*7zx_s=M&eD@}w?eZw^*Tcci zQ+j#-I8JjvvQ`p8h+{eC$GigN_`Q{nYA)%&*TZ%pZ_gFSuu+@(&2hdb(Zczsu@p`7ke%yvSE)9ZYn>_dH4 zU-bf$dmSevs)h-(ILiItB|c*Q(EIB_nqSWlL~VCK%9RJy|BAb|6Or@W0P=>lkXnUejq=q9V|G(_`Orcv~1}*X@*s1{0R+Uhd%)=JWgv-z&s;gnh}ftSrG1&%g`t$#(L~eI03~ zW|MszlT*UeI$w90X2>45`~yC|*4ifMei-WmpP>Gf z-Zq=3%#dMtNE|Z32OLPJT=DxJ*%bax>9XM|WoluXQR}|@xabEr+ z40tiWtzMNPTKD@zbdu^O(oY6kaoNS{B~7P4wEnZ7``qUcE8gcig^%D6f0;P^hR^Zf zip_`-J^3;7YcNQ~JuUxB0!wxBH+0-&KVc_S3i{*YZ*1us(kCt3M2qwQK2_c-s9o%D z`Hza5E}y+JOGe9q&xuFDGhC+)1BpMAnPjB`pYL-%pK9@hqe%Z+`P~X87(d%}b%LnyNTI$t|)K}#9C94h5oOdxD_*zuJ*RRL;5<0`pyvz2ZsSj!A^W0%) zMYAOX=E<3`HkBis#}{>~`~uFPUTm!=n?CHm(h8D4>uuJ*PVYD8t&NJ?8_thS*&BxI zElKUt7v8UjoICD+K%f zL5UQH@d6ygbtjG^7(agd+}6C9@y(T5Oz~iH{w9qWaQiCln4m9BP(L?;G;t79^rO~s zn!Y^2`q;lzz8_YVbe01@2`lUQYgtx1&U)B>I>F@!nZ((;M=9)sa9m)!)Jt5SqrFut z8{@AN-~e9=<67LkUT;}hGUg@3q;&DLIH1(MD345yj?0TZPTB}Poi?XT6 z&(FHg_YZFRFj|%JtJF(w`mnvb$@P(PV&}#=U#Ty2e(JIOjcFDxC!bPz1Ks8eNy7X? z!f*_upWPDPEIv2}>uAT`;GL5&`7^6XKpe~Qokpzl(cQ4NJvrCw|ExrU)TrrF`5(%? zT>|35{g-(b#x@I+__t1cFjVgZ8}ItcE|=6_QZJgG>GD@jY#vgqKfrIz1^rZn zx19W}=4f7E8N2T?pZ(?i2+n^ouY|*UK4z!grr}_ZIo=NnkiaYFT}AjvPrwOORnIVk zK6^x~)9=B>b_(0)x-Zu6M%XLYTe&NwJ~*Vq4#RKw*Liwv!fHf?`#n1ZPPE@A|5g79 zKfpm>s9?))_?!=4=XM6__bQX6%R7^EJ+MRaqKfOHdZa3Klcf>rozA5e$8{F&*A(%D z`YQ4te2{$oU(20q#1{wr$v@o(|2M%i@z_K@an&=rySQbaT)Qul*LkL+VkyM)i!q ztH-lHMLZVqjQT24HR!H*l`z~ysr0*oPvCcoggI_g9@3K8VFQn7b{*$qx#f7%JE24A zQ-Y;*h2ibT~gr9aO;Y$6ztbsm* zt7spRzj}o*BjCB+^E;jZKk&x<)vIj1i+q{wdq&kL*Q3do{4Ocqg+lwNC!J^tVcent1HZ3TLjv0O@qqk%ugeL)dzO}_SU)e&?7}B@eu?`n z=!e29zK%x&maq4;e=$3L$ijW%={UCA!u(#8(Y=)1w95c*`nuUoyZ&D3)AaKEo;me6 z_b;;pww}|u%ZkKzc0=GeAdVBeBe9H zaHK=pZU{ubK;PCp(-2KNKE&S%ZI1SiDLjMzu2jC(0OtM6-0fqo6GC63E8qj@dcG!EYQE0~_yITZ!SN>!19FDBk4rln^9?|iw#}8lg1`D+_0hkx z?G}KP~spxc{@YnWTQc`LvTa3m%c5 z^Wo0rmQKFldWau8PtJ9Gb>40H8f(l4gdoWd!B<6m;8V}zte1eeH0%r6`2l{vHuZUT zZ|PlXJ2ci>e$W|sWWM~Oy_&#tH|r!3bVy5PCj~r8XB3ZN9`;4$&laAH-&pTv!S9Iv zpQp`)eKhVn*RQhpK6DaxBYbgBerx~bPer0IzG(lKT6$}rtkC}nR#9)MYW;Wo zpG^NDnW?N(!*P8R^WRj~pimwVqWs$m<(H0A{^N!6 z;0=_2YoYwmIOU%$lz+XKzd*`ky#CE-Ke_&q`WGa!dEh!}f6`2WLhP@_TYdmP^K+x=#2$^O9p8C7QcZs$p}IzsgFag) za3ZGGZsn?z`ZPn{wMOejKcH#z%QtGcx?;Yjb3EuAltgh@2V5#X?V7ZGKB5zjkFd{C z@QM{0Jt7|bF;+b2K2|(1Fjl;vY|Qvd6n{|q1H8hXs)>^%^_#9&pg#cTbt{n~YgDd} zk^g*gh=b4Z_PVztM)Z!~fMe_28OS2%l0;dH%e!_f{qn^d{~h7R=|_#e$Skfp!G7$J z-xc}KcK{mmKdt%V^`U2eAmgN$LwkE9Klt^BvfgKQiNNXUl4S7RX%XDN-623%B(2f) z0fXm(E$XAZ{7b5L;;+?*lAC2du|Rw|pZ6b#kKx(&?_#p&xs z-^AyH0T1|MuYiDZ=L)^^8*Cp+zN=>Z*~9HpQTnUapQ zJLP*<3SUjR7B=-8wShW_Dz1K+`P{$cJJDFbu1L}j_c6|(9oW6_H*CRQ3&Y3wj2ND~ z=6VbGup;q0+~07|-yD}<_&#!3zV3!_Z`;oHKJnR)V4od8s@avm8_&mpj%csp^QVcz zp9r&j?&rI+LnfEpC#)_)F3CrGPwF{a!qwSQ0pfeq6FmLA{>b*5{suf42R`e5zS!fX z7)tV7Am9+TJl?YVmTEm|n++&2Ux0skKDK^=ozIWcwZF5qcv8R6&+khRJ=n0)`oVb! z@FC3}ude*A;*)klSyqEk1N7Ja*iI5S!3ogM#|rEDvtQS;gHWznC@1Pd%e`1A_r^lG zFBi(Oy~#UVNTeRfVOl8nVxinc-rl@Ixz`uUy}7$pFUEn3z1-45xrv2x{e^N>FSou> z4hPHuFYpX}!=-Zm0WKRhy-E9B#sihRvafCz`6t_F{BzTJrhfph^5G`=A&&N~e3#C` z`_$9%;Pm@FA>vHm+jF0yND$|1)FT%Pg@0-oB z^vwb+FxR=l!hHV?(*-Rj?ZegAm=9f}>pL4Q-0=Q~`J6;oY?ThV&g z7=L^FfEO%-de@=e{btW(`#j%5NfejQzeDv3*U7SB3ulK+pY)b~_qf_A08;bs)%@8$ z8wch4rye$Z<@1L;Z2ipV->2cHg$|I%a`IUfNckST%@d?vF$u!@4EKYw16HoO!sJ)p z4byt6XIi+N?6>}&0jR`2b$LxEEYNUsuX?&h20hie)WWeGgI-+Eqg>W&HZImhN>O5F z+4g^yl;!Y>xU?k57DqXu{Sxa%<%7pHo1SYy`@BEX@Ok_dF8|aIXkY$?_1ES`D_>p6 z{+_83w66|Id5jxHd#JwN%JJS-^Ii*Y7W`M_Cx#&S*e^!S8}7CGxqrM);t<1gX3aY! zAa0cXCUIK+4nJq6xj^5)RlKDW{z`rJd!4sOz9K)=eEqnb992Nb7psIXfDg)bi!Y47 zT72Q*z!w{TY`%cLI2C-Mdgv#7@iO`1gCqC?;~!MfXudE#)Zz;d2fmp3WAlaD*FL_z zn)*V=qo44_%jAp2qxb?5#X-Bulq0j4hK2Zi+G>|X0@o}>Rz^7pzCt=uqX}D))h7I3Ci0v>PuciVT zv8QH@p2xxdT;eIDoh%Xu8v4RCHK}&ilFkQ5?6jCk(!E*FjcWyPYs@#ErLp zfpHLU(2m$}kJCX*iS2fprH6anTP&eC9&gz4b(J5~2UkwMrs3YUYf60*W`8)Ji|v(H zv<`B>^efL}S1&NQxeg-VXIJ`F`z*ekeD#NbYqRTbo)>}qK`xlTH4ikyE|v8`xS0N@ zW}U>;%gIw3X8+MIUC(jCX3~)1TDz?10Hjb(Pi6 z{rz&X%H|3CG~4KT`UJBFc@BVd9czAeD(C5hpKx%V#&B=@4@d6D20dx^e-(;STsirc z@<*%(N6gQ8j_qE<2hZ&^w|dy+o$~4^P#cupyho2@v5(1c>AHBr9U3oPa!ub&T|9# zJ z9Pbc0VJka6gJdpTFCw$56yLQFfI;jgSQZ8!A4^{rLKK#Mxd_Kx|NxrexvDV9i z8`d|p|AKGTzwT2{fAGUtUAFZBO&27?|KMG|uRW~EYTu}ybiAY91=29rIkI8bSGK(# z*7X73*-}O8y`kNs^w=k`9~be($7R5yeVEES)NsvS^^(rieD1gN{?tUl3&zR%+bq3a zGk@k)c2B6fQUDgWL@uhqh4rF`G^qGHvSXqJaig9iWMRkc zscpiK?MAVT3pj3PWC$lBv{VwZs{rv}AwYb5Z7o9x33u88Nx|jLz1Ol0cKXa6Xy;B% zr%mPln*KeP_IcVex0A+g=Si8~rgi`6Uv%Ght@VEA)2AcbB$@Vh+WpwtXYalC+H0@9 z_S$RzekD4nx^g;>x_z#3u=`d+pWlb+4msAiJa?O?Yuu}LUTN|m`%>}jSCo^N333R&setno(ucozEb=B)kUrL(>e|7M}6I{-~MS4KKOq& z_%9aWd!3%M!T(+nesVGWhf4JSb`gH;-1vX90H59Y^xrZ^w&kGtDJuDdBA%hJOEoWGJlnrrJjbPW@vK?9c-F98e7z8VEa;W9INBZ0Yt#sB$@jTzM;QUU&Gk(GOortIC zaQw(teZt#)=W+8nuCkwzJ~`4)Ii+hn?myH%q_d9rQ;{#<4-Y-lh0&-?z;h(z2NKk<1dW*IQ9`F z!qpyee+?SF?fI3Jo?a}6^R#@wKlrdWzX$D%D#CMp^hsBDi_e?f?f6-rfp?)kk9nB$ zD)RkQ_~~c7J^0;2%D>>AL^@p=zF0o#ZFeq&bg|s;!#JM46#GK6j*oh@ye>j`^8XM( znC}lDJK4Q%=jwb2+ZE=^f_d%jq&M_!K67-MvFn_>fy~^aF zs&``@JfCmt9ir~5Jz)Y~9skGp15WECIuCLK7rt>k#@jU!uI)2$n~%~SX?FVg zX7Yjeop|3E>Jj4^dl~H~-;dTiSz1ri`!S8nkrA$v{F1fM1cLm*{`X+v{HD%Fjjc9t zo&z=k(s^{sGx^atkNwX-#n{Sr0l9fUsCFO%PW?i3=scn9GRjXKaGEy_kyyC7x_Qag zLHD))yFS9TfFqyCx0c=H_cFvMjL+8ZWKYT->F<2Q_u;(T;GDolD=2&FkhQz^Z;E!N ztt%gOdT75;PPN{r`x4_H^K`X8;qb3D4+%Z2@3b^-^A0NeUE_!PU3%Q2Y~GE^83K?Y;;eXfsNvgwUFeoiBu{#_sU{V3w2t0cZo*5CQK z?IVl+3GkIo^cAm*ML*2vuSdLJ=JI)s3xe#VO7eqdXQ_O>3%wK_#d$6DOm~cPv~MX_ zS}z{^rkKCp82gtuy5Mv{f0J(&yR{8FWSPJ7nbVh_cYogPzr*AjYA zum|aUYOYV?y^3KdF9KFC_)(jg;{7mVc(vm*=9l$jc^{?BFU`NlZi;;t;3lHGt}}G` z{SJ=j=zjB!>*AdfDp5GBYZ~3zQ;(P&6~Cis=LBAIxGv}`wvXhn-MN(Cy}BhI*{V24 za=`OTjybNq3$|E#JnWQ$+??wi$!`FE_SE8YB;+^o{>vrsn*B5BbhM-Umkh7l0a}+J zy;LQ{y9{`xSBm`MI!Vz!`!?_g^)^1{>oUhAM{(X{)g7Kb>-E5Sf(U#0*{Y9NnEjvq zEqm1Uv*fOEwYMwY^R{&=$t(3MQ?f1lJU@wt(>qBMXm9cO2hPoi5usW92kM7d#r|h<^^zsqJ4&QmcH=nOk8bD=_t}3)Qf|Khui<;b(;IzG@NkTOYX9bPUl_r6 z+^l`Z1|6Q=hW&X0ejWd(!3!zaKdq_0)R-vbUF9SM7@P z&CXu%*UvX+TVfnTJy8_SaXFv!{kLN-*XePW1KBz0(NS-&SjS3_L^zkHKXAD($y2*? zRZGdyS=ZGeRpeLeT$Rg9br}xN!{zdT_9W+u@?iSwxPjSsXk8vyd(4mBONREe(%Z_z zhdf>OnZ8e@bBo8oXKQ!(b%0KXKlY^O>yGhkZmD(${DO8zm~J@g(>-dz{ND4Tb`Q9_ zx?(*YVLGj+x6W0)!{Q}RX{o()`OeQ9jF=!wZqmapFSkH`$fvUWwB*J0X-f}9IG2}~ zzG8WKd*|@ZUtVl{{bzZx{wBS7{_^66YFS=555T`A<)!g}iCO-=rPeq#=5iA2c@yXt z#8ZuPm3sG4?}Vg>e4Lk_^Gq`s0K?%t9NsJ>s&pdDN*dMX+N9#H1tm`4#@f!d! zb*X{b`Bd6xjhiVY7zehQ2R-;RzAicIVdmpFpmC#mfsg0Ka~Z^-{g|@-f5Oq=K8*iZ z134oHv>reHL64{XM?I4KEj~Tw;l|_-8s5h9|H1s8E4$1u^IPA;sKtCu z>v6C3ho9XdD1_TupE^qqYQ8x`W zIMLg4y)#1VY~6Qzy6BYMKZ%bpGNs!2SGI4cchV+9UMUacht3gd9#p#~@J0H--CjZB zCp__!&LaQ62mIH)I@*z3TBu(5FAS0P2i2~5e*qlZuXmoJUGUYt)heUkQh`)6Aoas9IFKUzm=?Hj-9;rLEe^}^43_>jZT zVZ5U2-sAoWhybeEdLJT0VmRWPT|e&mU-bNq5B{WuPXG_d!*@M?Zb7=v|L`3g>M522 zuh#g`YX*1Ed)(epy{QLJ1HfYm9rTQnDvWp0FZ3P??Z7(X8%mFQ%&j3b^dsm#LwsnH zmc!ew9}0W{r*U`!Kv4ay^j)4y?MV9GB0iK`a2@*oh2XT`F@bWSDw8}t=qT0?$t!ez zM|NsnpMEbz;{o+5?T9@6Zcop49yHz---STRpy~xS*gIN{IFF+EY}F2zqgiiHtwU@6 zHUYUskx8CU^0w+#%ZGPI%-6aB_4-!8AA=7z&3D%kfV=O7lMeTshxLvY`z{^%WcojR z>NC2%ck?w?pZ^jsYgV>G8g9Qe028@9rJ;bjro6psQ#f@?9_Lmmh_!NY4=8A;i}Xc)kgQt={3U zflra+xckZn@qRGy{%Z&j^Z#$P);f{iW#f2R2fWso2!AWWjZPO(g{gl|mh{h41-jD1 zAqPhmwx`;08hFY7`-9)w*Swz{29EWhTyo#_VB!99%T9N`-#=#H`8q!3tB$DY@YAa! z{T}xxE{gQ<_0HS+m={c=P|J_<>1V6F9DPSe?{a1r-(m8XJ$2lC$zOTjP~W4Rh8z;# z+;04VQ+e6WSl{06@n!nEH+j6yRh841q#tcjN7n4l;Qup@XFVGe4(rfHH~Cygcsl(2 z-L?k_g?11>!k_zok6(Y2`wPhbuKqb z&vx!Jc(n)4fmwL&A?5e>&hy{9+|z@f`VK_4&f#<(N%v8NSL=c%n{$9pXK0jgkINmaz`U`3f`z?bOpFKKb{{A%v zo}Yi!csUV#Qhm8Uz;|`AU*q}jTxot|H54Hn)A>HH#wE>n&V~39s)v zFIhg|*zV*Debu)g?|ehV^7msEPdN6_#%f=OW;qP&oqVR#ZYsj_JW-Kv#9QQBvHrwI z_;l9$S$f#zUF$g6D#g28%Kl1^51M?8JV6A|FXQfGo@(*h@9qAS$ICuQkGk9pgKh$f z?)0S%man$k;q`uTwq=FK`!g*0d!yM-hv&PX#>W@D+>uXue%*&=|6=+C{@3we?{rD- z<`@Zg1OAh*T(2;G_xXp8G5@#ndoF!9(d7Bd{ZQ}Lm-YMB)y#!_H@d&xyHQd@pP@@% zdd2YT9b)dQ(axC){i$`N@;D%UliL|Fo_611XtA#3{AfJ3$0E|h&iA`6{hXD@c4vje zAJsE`$@NHj%-dJvzusjV==FuD^ia_4Y9@cLSL59NXve%hgGNR&5_((Z&dqo{*+IVR zowzk?<>#PUCiZc%P-G$7akI|0NIWLoaDQ z)E(`Vt&4ntzj3#}OCZxNd(`bnS}p#gACJ zSbTq{!@cDB zwQeUlX|XCT=}TTO&2QCSdAl5QxB*UV;L@Yc?}3{#4~N`xJS9cV z&Pj`}n8gM8MStP=nZD?Jp1R5B&iOga;4kOh%&+K8ii zsP!q@SFfZYWXR8X$FG4En!ijSu8#lH;it3C_xq#2NKa~hb0X?Dpmp9{{(_H7$#2MQ zSLCN%VfIAW5d)9h3kooPthmSgY+YjY&R!TaAM4=mAO48>wO#{*{#a|_^)4p)eT95K z{*i#2y~E(TeHg^Ks$Hec|_$E2f_daE}>D=nTa!M)XalQFEFQfj_?c$GfM_x{2rORMWZ}3xkRQ<}CvU zg}-L_)qgbJq{lzu^$9%cuLH~Dd_;)6&PQneCjF*&(Hhr=p4@K5B(>Y!AGp>L>$}<1 zXO|->J-WlnZG6xb+Zyou4G41g!N_-q$IFh$mO5a^f2i+_BLug1hDu-(Ywl6SK%Jsg!SRDc@4> z-t8*o<93RIm{)Sg;Z1jgJ_5G~svb_d(PoFDtt)p_>JBDxS4v3`hIq4nG@;H6&3>(l4(!pnIIHAbEuba9`i;09$cala7PwdZvY zTJf@rd5#Jw^Lx^Y*SLEMc()?`7<{0a*Sv=M>JY-UF)x+%+<>2ir@sdNF8E#W%j>zn z`|iU04e4~uSBEeAigL*%!1GzpeI2ms7Zbok{w%c~t9l&+9P2X;pY^-{O8kLaYTlQs z9pb!$_FuH#q59g+e(sj>wjLAjqv-y=?xnFG)KSEA__S-- z?#$=wTVO21h-cSleSBTZ?J$OYU5i@N^X-P;xR#?C=zq+-#p_zpUs#T>XSK=!N-M|k ztiH++upD2P_eObcm7`yF@2iGa-}lpZ!Kgp!hQWUV zWEa2Yfj(Ld8eKAv~4 z@OX}M1evOPxaabk{ph*!z2v+$qsFrv(C#Kj`Tc+|SUuUUOWBcY9|cpw zQLkeuHP`Ri&iWZ9-GEL_yxE;AES#-d?*0pj`J!tY<<|jsKjjZD+p^Nqb z)a6F=CXSnI7qw6NlFJ|M0FHN*%dJRH4>?`RpB{Jl*ZoHEVPIoFpaFj*jtrw(xzmtRcp!aXtU$m14OY(UV443_(eoDAIN^n0`fTR2`H6J=% zvfmGv@;y|Vf2{_sEbpySzW=q9?>(h_y^(K4iOx?K^67iJ`FCUAvQ-)A^Bs`0i^Z z@%5E~iFX=;xv{|afctE(Pn7t63qU1D0~7yx$$a}t`34L5G=6pO_Hu08#ZRqs_FT`? zT?og#LF>Qlm(&kkpo@I@>YdyC1}jhfp7>b~^%V8Qx!Q~TxUCL)&(SUw>$R{)fR`q0 z^}?!^OL=2>D(nW8TfOiSZ~tcuEZ0km&U=>V%wBN4zeuijI3CjbzPi)F`ssXrnf~-M zjz@Auc_jQId&jGhoQm{;huuI(XT5(+pnlAED*U}w-&fT7dpo&1RFa>!Akvb%`|dym z;IfanVUR^Xsw7XD9@h8hIi65XD#@Q%d~=!WbJD+*o_V!YE@eXHPS#l}_y)Xeuf~Pn zV&Qa`>jlQM=8X$?MZD{YTtD1w(TmShfh^Lwcfu!lS>5YJyBAHH+_%s34FfstHOeVg77%tpV~pUB}g4S4L!`b&Dr`hHrF(h3^}61*do9vic^A&jweRU)ye8l5H5Y zAEXCeBN_PMIt$mXGY|ajvmeS&`K7zB?)W*+oiFJ1yncu`dg@M37VqcWSGVkWxpQ}W z_(=l8W%auGEqc5h?E~ie_eK}YTFfK0AEfW7lh`g^_RV&5#CIF!8oKw`w9}-pw{#wR44L6b&&n$bau@4w z7M~U4-CmO3*L$-1KCQ~ho(es7#PL%v>)hTO&^1-#GW6Gsr;A^-@5o85qey;OkIkru z_LnPIC1XxX&%mF}TPZ*B$j@IdJSmwBmtN)VlCEya^Lq>Zi}j#J`yTvfJDyMe_m}c> zMWLKOw_mPCB3#N13i=O*yo>LUhmjIKO04kd{h?-Zm*+nKM!;1&`1&5?F7_cPFEjDR zoW~zG{uIwu9>lnqtva+Q{*cAD>|)A)w#wU&cpoXY|Eg$z#Ctif8$QvQy%6?Msol%t z1l#j@$U!r?(eP+|O=S-q2!34}{EBgud?9_L>$O#HeTJE@{phhz-nGI9;OvEiE*D6D z^B);M9x{H^S*0XB5cLi_it}ZNKhC?I^!g#Z{9z0V;j(qFS$wwcRr6(kF&`x+TlY$y zZo>s(IGM=sD+AQ#@gMfuoq9`aE>zuQh8nyA0d*=b!w{gM5YA~zM^d&<_iycX#= z*oF?u$pXE$>J8`5tTT=_0{0CmkN5t*`v}_iXINgoZgVu?p(ME8pY3$H=Q{fW{5zw6 zP?^B#`{7y-%GQa0PJgj~o&;X4bC90jMwmXld3?{q+c)|z>$P@4eO)eSkFtO0ymi*5 zzNd+AmD8WkmsY*9h%ZIG!16W}<*g(K#d;h!{LY7B{40nr*6(K#Zq+a3ca_(p_+1!| z8@aq(V{El`$qo3){8L|x`rhmI8tHtcu+NmOa{G?)uP)$M%=->IKK2vif1#c29Qun< z9`e(1JS^g2`Ao{|v1OCDQ=E^@b`DxNz0J$Z*6r}{hhjc@o6|iEBuuDwpfcvy4KL}w zzc4=FyP=k!?Z#0V??gJjUJwG;+E1=ocbC-Lu_Q6$TGYXN_}{a6k~ zS^Z4?zu2x%1Fybo-I6DlFXFx9Z_%;C<#PF`x3kVa67G>gJ@wuj;~y@>7whkGUao(< zw_c)~cs6}!!Hy}6V@17ozV#}?4O;!Z|FE2TQNQXwN1V4U=G$QT4q5v#-vYfH<9ZQ( zm%%@6@WpuRA9p*0DTj9gE|w-04f?vSg z7M;{S?LOszlsC4qwiush&9n6k>Ur^jd_MpXouAQp)47KoUi+R+d{2}0tHeA+`a*oJ zUf}BsCqiZDJd@rVCLHrmIy>`sg88mFDHNVAyFLNUW3!VkNBhY zhlW1~o9I3J#?8LIRe{w2^r)}PO?TL*YR}0_n2Pf63I8MRi(fsxq>g;>cN*~>EyJgL z&3HFHjq{A@QEy+(_qE@tdA|7Av*U!}$@$jnjkL% zy#2>~ejqxV9X>x&zn;9?J6Ia^8GB}fA=0|>aFn0zjDCHm^S`;w&!K6*k97Y;Vf?9f zAOeo@KVFDueK?Nj{YZuhnTA}XpLTlFZ}R@DdW(M5D?JkZHQCN$TKS)i{4tN!K3t9m zg^_Q<*G#_gtocd*e=5;WdWA1N;`~tii@p|q-{(%>8}pQBtvJh1J_ug#C{^rLFYu@H z>kUwNrXHO$IZ7jc`YEqx@qM8==ox&Ez~Hhi$K4Nooj&V|Amzss;nGjn-T(fz=8rvn zt@)Ebu*v+Xn4gYc?EP+h)fvZ^pFeVao^A2|ovnM_(m9?TTrf_)X7NRR=;PT*kAKXH z!@iccC-vhupdQWG-!A7{sy&!*4f0i`FQc8Ou6FsJdN}ws>gB25yKf` z0rgh1!*yBOYsqcbnSQEGee&~`XUX@IrF4Q#BS&!=6Y%{)dsB@{3PsyY1SxGMQ^@ana zj^nYP^Vhi040|M1`42?-h4sXrDb!Q+=KV?aW98tQ$^UKbCVr}Yrd|j=_<+NuQ(n$_ z>%j-SUDK)1gAdjXQTotY^T+mu9-Iz6_~=_k$Ny+OSZ;^nJGm#J*VJxmkEX5u((d>& z^@OX%+-|8Pv))gnFXG)C^#`3tY<9T8IgD=247na)M~N_f^$+REW+x6C!Zl$R5OQ+N z4L^qI(%9$9-`$Sz*a7cP(kImpx3ANX!}PJma<-u)XDm;0CjB_|u**Ru@%`ha@QHr* z(^F@`TaE9ExA1!Z&^dSM9ra)7W8rUhaMcog+3gK;E_9r2edX()kwm{CG=vSgs^i^$i!Wz=`PM^k8wR8G-(9`SX>75M8tMG~cdS?pXL2|jf z(ewRNv3qiU9rk*+>;dP8`1Td|^O#DA|M-5f@jI83lHH^BrkuQX7I_!GX7X9XSGId3 z@3hPQ59+T`uczcidSmiZ@4wB?=*MnOBxkc1e&H;12|v~o|Ar-ITQ-@W-+NuP!^6)x zJrgj0s2^yz@_aU9aoRQ+2|dVoL`QsgGWWda8@bYbz5mX8?HoCV;m?ET)#V)Z9k2bc zKPwW=^M_21$t<^dC8t-7g&DMPkXky z>~brIVdAMIPDgdwAI>wKbg;aI=kQk~JvTdE(aH7XmK#JmbD`XEkL5W-=HPnBFQR^V zeuRIC-y40x;Z$Fpqt^N8`{KNk_DfW5w$APG?B3HR13Y(7+{Yka=%!ZvwWU{K50f(7 z_cl3ecDlY~zRsfDI{C@oXZ=SR?=-S)1s>fG)_LFa!LzJbPVhzcDD@2GoZq=Hej&^% z48vyy_H@3};;Wt4BZPV!_x_^x)V+1~i%Ov!yi0BMCmd^ZU%VH>Fv}_0GovWKlH5o1 zkh5zHp5`-0`cvUk5B(Y7n4fO%0sp~xS|ID6v}o|z`+0xYL&@CxTm5|+aFjRhlUL!Z z|3B%?;rF~|TJ_Vt3|nuUfxen`{4{GwZ;}4f3?GSfFmDv{rFA>Kcfx-B)L-HKWt3<2 z3O}74H9j?C|DssV$uF~6+9`iu0`)Goqxh`xS?jm#Cv^A4`Yp$e z1?OPaxm;*nL+>GIUoD;WbnSPDKUwT=-E-8}6?m@D_!$0P9*%_5`e=IOHREe~_@wzo zzOwzYI3G;@P%gCJmCnXEu`2etW(}>a+wNk!zUX1L=j{mNy?^W1_}-lRN4%el4&rae zw`Qj+eD<3QmS_(op*}vaoC{9_gFlC(o*>rb?m(-aUw};K z<7lkYpKm)aNmtG%|1K@-_l^P|h`%2sn4T}}uaWQgM09JPX6IUK-)u|W+xb0CceZMS zhoAHC+aA{sdcF}iWAkyIdc?+c_%^QdLI51;Z#j(XANSDN#`TrJE&2Ge$Mxk)jO(E9 zpX9jykn?@fxIXOh_r?7EoZ~w9w_sd%Hn$nqUGDO69r?~Wu9xuU<9f(xKCYh!o(1DN z=%Wap|G0i(jO#Ao?Z%1XcrHCu?fe?kXJvg_)H|n3Q3YXaD`re@?wQYW=gd z@9ygXZ+ASX*ezM_N3>g{Z`dE--Z;0;1VsH&_KMzTsA9j1j(UlFe*5DcZ}XykX3ewl z#Pn=I@1Bcqw|)_Pi}6h3o5sT;-qEv;Z=o;SjBlIRZiVsf5q=+TcF@{M?-k|p2_~Fv z+)_If<-+ND!|>(f(7irRXK}8f2v7c`v#!S}H`T&8Bs+V2i+_JX_f=^3*TLfH@F{mQ z2sf?`dx+{9Hf7`5ufY$rN1u85eW_wTwshm#ngI->-M8Y`E%0aHPp$Jgpw8t?Bc3W^ zG2EA|zC}2mQ(O$^->qW#oDmJfhYCn;fqw@6RMhhX!mNVsT~4j@`ZJz#SuyLjRWIT( zy5Y|lo+u~iWqnREzJLc&z(XqA(b>K}#B&<(!ZVHXj2}ysPkQlh!90NXtJ%(HU{~e# z#wwSOrOq`G&j#ROzW;)GaqiT34f2y!IUlZwdIF!-bDX)czPOC1@(Xa6)-61Cz{i8}Rlff}dBF2e#d+4T zICrjd0r~#P$GqOv3w)nOdX!<-Pxt8;@$>Gb__^KaQN4-J@i_ho1lW$cf2i->r8hZ$ z$ctjXxT^vLaKe*rvtS~`n8l@twX_y_N+%&M)EBdvRZV`kanz zi}w?qe^)vA`(C|KuXxXPY5DW@hNApYZn%6bIpnxny@*_A`@I@+AO0oeFzl;IrTe+G z3COecFZIJX*O$Mu72loI`M*W-u&7^gU`M;zU((^_G0b1mH*+jKK1i4z*Aq4u4cT4 zrhDt@2fRH8p7Z(HFyL5HwTsZ?`%UKSorK94$8?{G^s|TQUQg{F1Gjw<#%C{hzp5_( zR!gWZA9j8pbU5y#uq5g~`kXJ*SHArFEr_f$KHiJa{i;TX)30-Vx(B8AC|Uo%Mw@8A z<169gccc6kAAuAu{aBPgW^dWV|p{Y1s)LpPppb~v5gqmCeb(eb-#z;2-I2jn_XPd~v?9K6I*| z_5-s!PZ}J@bAq(^;Q7*D+*@Rr{E(hh{TeF|E-0^(Jm>UA|5N>!>VM7TX@`&ZHZ=ZBtqeJc_d>>3 zx}55MweGp<{I~X%)IX(nI3Cx*hw1SBoY^J$xl8dy>jb%8ps>JcT+{g&>49eGVd;tV z)6qT$JfF^OY2GWml_b8;T6MVe*1G*jZ%2c}@f;b8Zg#9NKZ|> z_)7Ba%#Qb#UBAukwjk_IA75pUu^-Xp@4QskT3F}LrN?uEcr*cYwAA!&z|bchJ`H`xaqJAD8p~UDjFq#2Sg<~V z|6B2YDCC^&Z0k2r{*BHi}j&1__ko6r( zEr>mtU!DIvepV7c$6l-#G3s1kCGqclO5QY{uw4vp)bUAfd$v2HCcS&*=a?4P=ew*v z)U&67qOmPAL=0OyZF52=!fZ0E`ALO#Gxi-&LZi+(VG-5uL9*cf5i9|TrY1F&F2ikN# zBG+5J4p+XPogQ{P(&H7}8YiUmgvzNd_x_{!T<<*Kc+b~f7#Q{W&EzwbHXpC(^W2&B zuaJAaTlD4H3rn3#%JtggER}rvVXLpc-!qKMgOpYmr3{U z`#fvGg5BZcTP5-Dh4P&Xt5=N6)sAPukCt5O+_>9MG9$Y&1~ z0^>v8&qb&BS`9iGHhnZ^XmUO@uKdmga(&48Thpd-$S>fTgKPHr|p?z0v5 zNi)r7ojvF;-hMns!>HV@lHLISHy~bib+*psZG2<&pChk3KQ}sEwFB-Gp7Yo&LsSS6Nv1cITe3U~N500gxPOLpQdmiT&(bxIQu}lK*=FgX zmtJx?DZhVGJoj`O^=7^;me1R77{5t}#(9oA3|H`)F1)1n82`eKf_#u3$;Y0Hd__m^ zvQ#gM^&0S(PIk8TBe@>M`c;y@F}zCGdV%J>(*R=m!mD{-x{|Gr@?+d;%>%u_^zpi- zYu=F_b~$B#Al=QlSE6}WnJ>y$9N(!rn#t$9eO#^P>oz^l1wH2|SM=xeBj7Rp8smWY z-ME6ah8){p)Kl#l>uOV1xc<=k@c0$pemWnlbL+DAweFQZ?sPVnePSX01MCg72go6m z_es}hYd{85bUszbw!C-~P|fAfBp9`yQBPizBRXSkEq>r(S>6<_-(ohV?qyFW8F+YfK|MFF4uTtL@@pT^{H0;_-++S=jUzW@PPdOj; z+aR(?U+O(Hp6A_B*e_*&ehhK+foA@mIG?KVbZV9N-CZ(QaBIr&08 z%KftqB|hAYxSS840wA2mi^fLg@-X$X#*4tauJ|g+_gH(C@uiPB9p}PR?bv7eHO{1$N4`tE zzou31m%1NAde|O0{1pLrrRQ5}|C91U{!k8n5j@NF`rVQ5arc>iHx4ui|JVxV^7R5F z*}K4O*>>smNH5Z-@k;NLrEz~#^-9N(2~O)rI)ATvQm*Kxu5^A+QNh4vu5NNV7OiJF z+wiVZ@JIdhfMw6`3FY`#`MT27#bK|;{3spw{f=~Zybrj?-*GAOnfscWpJ;wZK3)w$ zO7He}OtJoXh9#%F*Sf!FgZauw_&VSxV}3GuIM%rkPgm`9!Dx*Kms)%p?+lOiZm{q$ z_{H}5hq|vijIt@vEXRc@?&z=%dK{}Le~9M$xpSz``g*> z!JhSgV%WQy*Di%ebaLE0`@KZ}zS}VHntY)SCSMowFXYYsw)E1HdPjO-%rmgupD&)@ z20MsR?Wy)Vm;R9*nA?{(IQ~@UTw?ttUw4@Fbjrb1_^EwAmdh4lxuipK(M;aWpvBWq zkGtJhUH%S>*EpkdwB3&p3i;z5G0B_GFX|pV$7{Mt&_SnnFc#ZmeLkMLSlGTMvrGp6 z*)Cg|zmHfjUZ4^+_{Dt$ip09E3|(qHrhKJ z&;3>G6hGm9BzdR7Z!-C*cHEGB8^U_$M*52S;&yBS$d1&$M$<+Kw+Frlyjq+-3v7tL z%se~KY4y6s!_(j&!$W-@mR@E&Td*8YexI4I+4nWbzj8YBQL~ognGZdFKhsf_JpCBj z3F~lx$~)yCYrmI^rRH-S5F4IL`08;nu+KfaAQJ zI~k0p{2YUC^PwvU&31VdbYT7R#nY~9M_ zcZX%q_i?I~t=3+e2T(7Npgolyi`Tjs`S4wc*S?PUS*_d<<@))5t%I>#>(?RQ5;cg4k>qpZ`x`d;6H`{ob01qk6q{S+<#1PhC}*^{HY^mI{g05A4Kn2VfSr` zw~`#Ua=N!80H^g=t?%hOF6p6@4#!4-({~$tR&W#{o~osjU+jsM*wL`>HLJ9OUcLi#~e=k1zi6aEzSGyMun*VsV8N}q_Hkt#raiLZDf{%j)LKPGG&) zAZBShFYNQ=)0U6tLufaQxq40ZTom>K(6gUbFB&ko>P4H~Ucfr?0(${)ZZA-Z2!FlX z3m0uPI;t0KaC;%R3#bO=;31^g7ROcYZ!RUP7r+|*TX2x z>;&XvQN!@fPKb2P*QXIrdiDKR<}*71zS#-z%}#*76>!Jko1O5y(cjDo^;|r@`it$kz6X$vuQ9J_>$PeTxNoJeyKJ|U=A9;QM^iSVjec${?Uf*9U z)VD0Zlsk?yl2gjp^6!q$Z1E?(Un1V++1CY%@^rp(?DF{b<=Ex_+;aSCv7WKND?6V3 z?T;b%8n3wiiKy0j92yO9P3a|9Yq-Ph`DG^Dpg`y7^i+?A~U7R4zy7M@F9Xa+_`#V87W1f{_CeAMYHJej5*Oa=kuq?H63X^g3Kl|0Nz?Ot-gV ziw{1(*ZI6`Be7##i1nE)?te)?R+3LyI_30d*RqE0bT3Qi3iyr{o3z@o*6pYmmt>zc zJDza63w9Qr_E{+>_?HZ9JYewy8=Wy4_qwB9CZ{bqnE(maF5z?hWPX*ir|S_r&rnJ3 zu?*#Mitq#&*hs6N{v+mVoYQ<`(#f*(!9{tfcCEK{?`qf9s?Wisd%@{{Z4jZc#L)_ka3uRR?*jo(E&skfM)ZeZih;eU(y<#r`L!Y|prz(Wf5bPYti z`Z^uuW*_3&@7N*Abdj$c5Z}1+U*a$6cf*G8)SqSeYRB)*Q(smrS9^R+-+QHhAF^V% z`xe9Pf&70T;FjjcYNKah<2v(~#2Y!lz=2R~x@HP4!Q+!fnYoa4&k+$r@F z{v}!HE!mHNibl#;F<58pk<48sStvq4-ChBLk zJ1pAPl_A1O?@olux{zXVPh0>bb%jNc)1dQA^I#MVIDR-CU%AlbubRthe6z z)b|^z%bZU-Kge|#t54LUyPsHE^i*tALHpn1@e?4+^jr9B50$$%{Q0%L|__Bp?vAP3gvlaCAo#@lF674aNp3%5B~(xto#fA#LG8mmhbjV zb@^2mlRn`II!`c-{P>p~0~w@;>+mFfXph5dGKPRg91Se2$@?Z9te1rK>J`ulce=sOLzB z)+N}#Pa@pd=J%P{e;C%gHnI=%eTmQm)eC$aRsOjTAQPOv^G^MF5@D^Uay=b3L^|OK(p|?NIKA7Q-#cbK5#I$qvmQF1FTFl_xn(oHn4SRr z^tpe>e0whV3)d4tr@sF(8Se&EmtAQHCNFb2oZ8~|l9U`qOus)*?UFe15 zZx-L3D$d7V2EAHGNLQ09SV!$OZ^8RTTn8w=D~GD$+=$!bIycRA_tC<*rSr{;>3r4X z^UEoh{AyS3u0pvwXOs7f_+Cfx-1_UNkJiPg26x~ot#Z3!m7w@q0^!|gs6PK+T z%;_Lt`90n2;yaw)dv}@N=)21BHCB7Ww!Sy)b}i-Pl_LM*yI|Bq5LL*{LC4qTdu1jU zvA#ukx@^^q!58Ih7wW6~Gc2#&cfH!JdktE?*B9a2Wbr30p7<87bG;Vyx||j1*kF8j zyRy;e1Z&?e+p;gmH?ssk*hRm3{P21$=EoaBzt^Kkzw=|MdM~c;KI79XhOdZc2Jyx3 zqdkWO56V5F=Vix9p0%F4SdN#Q98-_c%~4q3i066W5x(j&M8Gw!asATMr+JP0lJlZm zQ?*mBzh)hKlezcc8D=e#~v~ zFiZS96*eD^)BWo|gJ`kSt&Ry$p`T3w5eLbsZwbxto z%bwMJ2AHqkg#OdH((=Chp3a8TajD}O#yGK+<&ArJoNv?^4*s(p$+@9A{=oHv&!2~1 zu~&0pUm!ozCs*JH+%WzV-;f9M{WPw(a2-JQ*3do#;5Z*R1HUm5=L&`I5)cAMyUOrg zYC*eC$)BXt&i!5E@xtrx$6o^ad>+I8ytDM3?yo}@ox@-|UtjERVVCGU$x`;%cN&LI%rd(h6g{ERw%)W_ub_{ATcXAH-~8TgG(mp!JFWj0QtYc@Jvz!=Y}`uK_S zMlmiio)Fni*OO}hY}F%%pY=Yogx+L-{_5$?GkJLn#}Sujorf*zyFu`$IR1?Oau|f!Y3J2)(tF*-j~?eKf5(qoF(JwCy6 z-bu^ugy{q7C6|rb2Y?KY>k)PMr^4U+soyt#A443=>$%$L(t3^7AtrYt5l-U;=an{| zg}?VQau?~b?oh!l1;clRu31X&aJ<=98gE`Y)6y55x9tZW&f6#_lVhY3`ZVS(U8o*Q zuEck=w0|J`f%xwz;eVmt234_=gMdk_Vl~p}w!9{ORz?cZM5%Yw*|QhZHlO z^=S01W^{hes9k=xM5E93wYOs@!d`E^yUh8_L*L-KlK8T9-XEzKe+r=4I`6Lx|BR(a z|9g?puwM}W;YKSz%i)O1cOieM5U+P62*2*@Tkys5)|bkwS$J`IKUT1pSl-$w?;T&) z!pm`D68$XiH)?0lRlEG}<|zkt^)uHPm9P|y{7-7^G|B|784@*9qmEW-bCpo45`%i!HxAX1f4J7A!uF>b? zN-^KA|Jpv^uFqXu$mj2IDc>;4JB@Z+RGyQG`(QskfBL1)ydMyr-$Pj%{)tw#+u!*Y$vk*C7yM%Tv;OJRhX{^W zoL8E!b)fFhXS7er=kDh$IhhNQr5$JS&wD(hQH^nZPA@99rIXXhC8 zPGEP~w~ZaB2wWxk4fDqyG%p#y*?r;5@k35nUUgZA`C6Z0zo;WiqwjkyBg1s#KHMe! zu$#6bTuGjaco%fSjUuMe_oT(Q@=^PG|CoW_Sp7JIz(*?<@yK@wexq*&e?3fpd^0*U z9Q)Nt_>I-y$spprV>91rgd2S+qw{fVe6zO$^RYuURzGCvtCH}CUwu?Cujc=YY-`daex&1o6`C}drxKYb@Wgg~_@y(y` zFw$l$pHIxjH~-rl?vUk+{+5^bbe`{|<@*;F9^d@wJl|={2ZIoPIa} zI{uycRzJq~q4D83|03sfzE|_VVdSqNK7Hm8{z8BHaw68nUH-Lxp!I-$#IFGy$9ZC& zbMlk!K?LB&pik+9|4GsZr*iZ@)c#vMUgs~Waqqgn!_SXvJZ3Mqa=f}p_j1oS?#-K@ z-_SZl{vO{8Vdo<=>o+&-`KXt{ie%u*%Pm6Zj3h|cH1_G(|IB7mubD2@D`w zADoTuGx&ao);lt-bFcg+!y`G+cgngyX3;p`^Mw77{X}~WRfZkE*4pWWgJGRw(88*R z>=N>mE?aenr=uQpv>$i*7;x;_FQ zjE}PhX7;D?vDd@n+uYZ8Yjl4Z-zl?t>brK?IuEDEoImMN=T{|h`!!WPWhN^<+pl8KbL-yp6=o%FcZM{+JX>hE};%ZZzN$@<{WSja>7r=0%oKKIA3b)Vx3 z%O{=W3-vR@(~y@s!;pWIlX%x4my_c`-vI!@slBSpTuvlMdIzY#W39^x3bFP(?D;64 z#?MPVJRbcno%Qxf4|{*^?)CKaaL7xnN2@N{0^lmTKT)D1eaZWs z?p+l19k&c6uh|uc4A0!CdHFh4&n>}&Uby>Uv7VW@(iEfDZXrf*l{TzF? z#lu>M(|OLb>D7n+u%%aZFGc5uqz{`U3{L#1B-<>k^M+NtAJISz-;viO7Szl{b}xmsUjJJ2!x4eYO|Jn>EEFi02qQc2u??1uiNn>%El zjlVN!U+phW?b>e%>SyJ9a9UsBxJVNED;L>$TIpTt#Tmqtz60bGnq<4e%QszK+Z=)3mOwa-Ssg;PgGvVt=su#rPxo zGJQAgMbB5%7eqxmzfgP!1@&Qjlg|Hu^NIhM`TdEv$?qFb?|(>s&$Qw9n2WRNWtZ=& zHRRisZ_W$r$SFDJ`kjSuH*{wI0pzP9=N~>VzW<)+yKEHkWVbyUPFj)Z>4S?`xO!`2U~z{3~B3pIx5+;rRSofzNsmYwQ}1>X5tD z=Gl51hF07&bK8J+WF28YZ^H2>;xRZ=LCSum+`Wmg)(II;NBQ+}f!4Xho1e$CG}suB zL%K!wP~D=F+#Uj6vrnQQ4kI7wkUcgHx_}zvgujR?dtz~a>NUjq_dT5qvuPICI~Vx( zv}x~n`}w~5SD)VK|4x?8A7(&dtN)+de*5mCTrOFkCG58-_niH{MBgI&E$lnV#o6q) zhu@Yw`aEEYyg@x!N4+DU z{`vPQcWd63+%2UCu@C^4zk6Jezgw(+H(9#$;NtO&s}wEyT$~>A_;&T87i;GqOb=dg zzIrg^@tk%;$YZV#Lmt0kI=^yx{4JcP|BCIOUTc=*?^7Ost+;N0Mq5L<06+PCghsV) z1aTaolfJkC{fOyWA6o-8XyIKBR~vO-`$EG=xA&{z?G!yo+(%?^vR`!GP37>u*aG}P|2gqD9q)ZHJ}pIu#_toLgLSP1efj#$i;hS00*>d! z?~_O_+3&uyAjh&Vb>Cg~0O5{+FW>KI^LJUtE{Sqn4cR_%9Z{MmodO>i7M?v7KNlnY zdJ6pBiu9L*e$|6~WIm41^jZF=&QiYc6YhK4z_ru&pDe(oy(k2({5>h2vzz&FmnY&c z*9TFh{5@asL-@rv((x1oN$I+GnZw0;X0tQKqcJZc|E|dDl@`JBKe1wA`CwUIe#K6G zLTPw_>Q}Ld8yYtfauJmDt<8uFL@RjoR zT4|l~nlJwIJJTn?CbCrbK^Uf^zGb}RMRFpa^7L#$o{I6mRgAxk*l6E`Tt)j!4}1^O zC_gGk^T)LG-C^w~vfTe#&=2MBqibEh_gxL?H??}WBqK*(As(7A1L7=y~J~^+JpS2Uxz;(KJ632>ANJ=3w_7Y>@UPOFMOA;SNL{fe$GMnboBn6-idDX zcOVz>`M@|0IvAt>{9ZZPW_eVLrCw~6bghRzdWCZX~&YD?%kH0jNRnE+kdr0 z_vMZOh3>ar^guom_d&(p8Hv(?XZzeMzu?GDz5PWbg*(*DkOCclod@_pF59Zv7< z>fHt1dzb)h9U#-;H!j5wIM$o(&=~jc$aIe~cJX+i`8MU6eCGK2Mya1a{`-Y_RIlS9 zJbx1YWC{Me3h>z$UtiLFB9&L>$J{NBm-11^&*|{BUM;!Q`;hBnJks~Z*SlGhxBE2e zPdw`<@CQ!cpY8snhpBh*5BmEpw@0AN8E!n!zmPW{=JIzAM6b>x=v}48l=EA9T=?>H zbCZV7zN=ox&k5kAKXed(;L`X``TA=;yuLsDQTMxN!VkIB_o%z;5g+{_>u`eeaqenQ z*L#v0CsdD9sODCH%-!Q~FZ;2>q!zx;2Mz;{9N|2#<<4x?cZ&4>k>G`Y8ky?IKsh0Y z8kc&5<>T}Nj*a8dFQKjH@Sje2ST6H%Tx+>PxshMzq=@IIi6`jP{U_lQJvyH;jX{m| z>hJjCJVZF*xxIqbA2QwQvzi#dzZ>0Wd>ucBBA#-? z_mG)NU;TDu4P$&907x>@XThZV2KSGl5=PH<4{LnYyB)P_Jzjc<>8!W%>wTD!wUHl* zR!-2>y(98J9_h*-=Xj}4`93pK>5oMGNYKY|rud%8`db{&`oPO~gP0||bI|l!cCVjr zYgU{ft^cGi`ng1{&r-jV??W0tcR60hlm2$`tarP3kkB^X+N}-#dIA0z(vAOx@=YF? z8Lp=MqZY(>7UTTh9Ujj44mjg`zsKu4qWUiAG}1}G;?ozMU*dO}pFF2D2zZW53I}eO(?W(!(x~{T;fm)Mtow zUO7GDeBQd*%adMTA9|@4`a<@W?5j%hn{1B!Jw4i!j5vWm^i^(p)bTgsodKrPH9q7& zn2)Y!kB2qy9Y&BbDzDk`%Lb6nx?I+R-%98ENd_PJR4?88RJqER&N^MHUnTiT!>@KI z^O5`;0X>z(`KNm6ePi0Yb)?Ha(mLbTF+{@YJmLE79cYC;YQ^Y^#zUndL=6HVZc}Q|{o5M*i zD#;IYFeba$rd|2ELG6IUmHEkbuOq2(nPmfC?=(QIm$O|55mWoPMcX@g`|-25zBj4DU z9sPePy*RM)a|XB6_@n+Hy8Ao++~!Bw&bpP8ZSnG`zXyS-_DMsR%!M8rh920Ds3Jd3 zgCDa0WskMf13So{_)eb2wK6}_!_If<&4J#PMnJK=L*&2gk?6PS5g&)7KlDyM+hsG$ zojqjr+v?~R(El|1|6^WH@n83u^lk+4EJOcid$PUK*{2O(-oK;W8@;X|g{P8y%n+xu z-o7e7oprg>Je2QTl==F4JGwsYbdsKF5K`pJ$u{unH|dMcALjdgynj^8_X_!W_@vb* zJ@T6Qk`uz;i*nKPf3<#(m!9Q%BXek7Nbd${9iHh#rTFxvH>}*+hQQP3{xtM2K$D?s7RQ@C+9n>X zXbWF2#B2SmXg`o%&S#4A!IABVgsZ8XwI1fYs5qaKozDKidW{5pcfd<-na=tsp7X*5 z^DE&;{DOH^#A`mKb-HF{pSO2`U&i;ZxOvv6L^`^Djd?#Fnk@uP8D z^bax-t|!*T^)A&g%RziI`cWm>X>j^3hu*KzeJ|Nr(hGVAOL~oV67e;BxXkbE=-r6) zSoHfxteAxLfcmia`z_y-L2uX>wW}Sj5$_bS9J<=g;m3HL&c-+w>o(H68vo}s4#oIa zNd^o7^(;%2-ch~E^BdVAvNIaD2c01wJvVu{Jl|<{xZ==wrSUp17xiJz#=8Pv)KBA6 zW6i>H(qpcNYcXHdyrVn#p?OlS7ec>QY*zq!Z?*GHa9h<+p7D6S6Q*@m*}bASr!(qZ zyD{>GeW7(j@k{SNr!RTCYaKOTUyl0c_@67)_oW>s*Ns?D%-cWoy7av4Ce8oSmqH$b zuB=Y206Xbvr#IW>19>(|Ez;VLJ>ki?$3JerG_T^gK+0qn3&+~e`< z(?|#YP54>+w1;(`XBhdmB1&?2f3y?jmUQL%_iBbbOkeX7^1Y65E!w+`_x@dk27VR} zcYn}dd(6WF*LcUzddXGvCoiUTo&ogHY5i3DnR}LBVtnjE`Nq$i9ggz>)<<;TzsKXr zKcZIt+LIoy@n{beDe;^@emaE(KNRtrm(PtFSkjn!)AUIf=w$xt1@E*3*`KN>^%0%+ z)%1N+PLC~L;stOY&gfig;ECqTdmsO6D`zg~*89Q3sK2$}0}j9cM)yZzopETw!%GAo-n(mZ|8`H8-JSJ%IE(M2=X74`@w8_OZe}FbBNFX~UZycDdC(-z@S858?k$@Iqm|zoLGu`GnqWeiE>B)W>vtZ&&G(g6Z=Ej?fAaFKaXPf_`=sildggYh&lj_hZ|TXrU9Jv1E5o00 zUwqTL-QJ1dU!>=J()yd)Vchep9df#qKhkBFP9q<)YF%q6_^b78rc*T9#j^(~p8cRW zPb%i;$RN04JVZ2)=X`d79TIjs%dZ#Uj)k5s;vxOoZ_)a{*40NofJC@yfp3XF05o)*ravy1!`0>F8tfb3;kZEk-*T_tQhVM* z3(qYiGb5ot)A;U&^y38j^>xUicOQ!T{2XC5K9%jMp>19c`9QetH6G6SANpY$_&M&3 z^m%$M^aSOR@!CHu+X0L8M~qjeBA@J>moE*x;cMK=;X)t09Q4$}FY}*#{&uWazWlJ` z(>SknXr0rL-r9RN2!T_4POrwdXfN7ztf%_u`I!et}5~;J@_P*X?24E!Dr=O8Rbr zj7@;g@a(+K!OzEFhp^t~$v=ll;R|zw`&uwi?NZb~e-4$C+oc6QJ^a3ussnQM`vc5>1@m_? zrhw-G4>SEe1wIzP8%+qw*Ie`)_6yv^d4DDOw}v2l!RIE9Cq7-|Byv zY4?8U5|h7QSt8$`mGXV+vp-?l;iEtK9melpSR&sSOZnc9fs5ren?Gyi{qz#~zQBA3 z2)U5{zowjvKT3br;aI=VGauxTZef2;1~uNLpK<;Z{tWR65AxD6{(HDj9DFLu?e7zQ z%;AcB{Xy0v;1~A0Ggi-+m@avWc9DE*KE&~-4%F4oo2&rI{T}F1&V#fMr}GV>LvWlg zb;52|J@@AO`5ax5zaZ}xVSh%KvA(a8Z=x^w*@d{D2TraV)d5R>Zo>Un6APdOz#)zqcv3 zC=VS4KIxq%>M+t}`VZ~3x7XJf+Skv^d8SZK5q~H6B>PnLC0@4U-e>TG<)FUSuEFmK z;4(YNlMMzEx>fXv4%Y9nLb+OR$j9w5&sydi^EFW~&37d~3*lT|cNOGS^I*LvcOocT zIRBsDFF78$Cdx-$Tkg!(MxWnXT)3WadV8afw;Lf}fNxd(%zo|j>pSI?bJ`!I7voHf zKlA%v?zQZ>{i5+)_RIv5Y1b{RXEHNXk9?Q9&;CdLvmd=5eyz{rhY@DE`!{%a5A-?9 zf6{~bygla&@U$H29p{1#(nfaru!A=Td!##v-$w9)hFsX zjIhNgG8 z9{eB1|M~f?@ioYk=>INYk~g*+;WWQzdx4#`{$*c>*kr|AogT(k_XhdCINXYgy@Cq- z!FPR!P~X4t_MF*YqW6}TT=p79hl8F39?8!E&>;EgU-oYA*XUDhXJ&$S$mMlp(9qaE zvH#-V70J+`hp2~{Pwl34lKWSC`iY=FUxz`Mj`N{9{ORx|zjFxFiBCg)9u~j*mzmtH zXz>Yz!)d&$;FdANqzjxJ6n`mib;O69&)}yy&G(0(f9ND{Y!76CzW4Pv@v(~cG3nHv zq@QqV?{@q4ta7_}Qr<3}CPKS-lGrZ3UWh-2!q^^^i{d<7{b3q~l3wj2YoBe=x=HX| z=K&}`tjEZW_yebVI9lIgI_sl&@_E6!N8mF(f`t#_;QGhfD6tOM8V~x!5k$BTLSQqrHpcA}rbM-6c6F>m98xYaF3murlOd%bl_HVa7LZ zw5sOsedpit4B(*+{zG!7ds8pR{B~jg#`?nb4$tz~&J0<9Ewz{4!yeh;@Wpi^@^zTw zJA_>KQ>O9T;^ABTgPu?OD*YEsB!7oA^#iS=w9>~dJ-gV~mGxaAeczPp8fe0j-8-1y zdsx^GEAoA!A+2W~aC+E}tN6)%o%jCf1rKSRj_qGZy!yG$wQ{|V@j92YzA&CL{z(!A zHvzv6pK?aO_#N09!sT?y;rfsl>L=!V694HY@V}1#ieJwF9K(hWem%nU=RhajdR7ij zbZdROT%Yv;Pd&ZE`kLh{8LYR*0kS$lw->1mS zfjm-Qq=#Hy^?t<(#Iql0eXqOAvD9vGpXcdm7js|mOZZ($yrV9Z-!uDl9@<~Qg{uLFblz4wD&uk=Ny`~I5@eX`%l$@TL3$2?x=;fRkEHJ)&~#Bb{TR=vCP zb$0G^lHWsDIv(BQ$=4|#_IT|V#`x`#xB)&SdYs&(ybl)YBjC^M@2NOlvR8S|pyl4$ zpB`~}=vn6PXfM*gZr&v?FXHPNYt!^_5{0(3~WuAG-AKvx-5-Fj)NpD#g?W!>R{jxVlKFCzSFVH8LKaLXv z=m`GkJX9HPW49BebE?u4)eHVV>fQsci6v?rO)nr)6j20;ii%2?CWv$t6hr~BAp{a2 z0tqIe*+4;T2o}Vy=*5QE3yNYFJJ_)IUcufFd1q#K11Mhaz2Eo$zxURkY^LtaX>;bB zGrI^&(7rRislHz>W^xF;KsGfVR3pA4VXr{#VX8F4j)Xj+-V^!rbK`XolnFW}>Z8AC zJt6MYIHDm>0RK;r_XTiA7gerH?~BtE-BJ3|2|zM}HwC9HLdCVp!y`d zDEb=YfxE|g0GP)x51|~&-%R;q5nm#@8bsj(rib(kqIL825d<37M*#i1`k0>@^^Y*{ zcl9x!k@eSs?tt+1bNpWj34&W8~)N?XQDsqJqBpKK=4Dkc;6F2l!px=?$rB0C_bb7 zbieFx2)3jlzag4W2;A|$XDZPTlH=$(UD#cSeGg$CqEUKNLwQGBj_4QL@z5k2u5?QO zYAP4Nej*|&{p=s5^rgmn@CkU2p5`>7zUD&W{7(87V*!2w{G%ydHqtv)^EV;t5zWZIjrGkg&6I;4$LvVtBj#?n(!;tE z3Ca#Y>9v&pZloZ}yD-~fJmP%`z&!-!ittMD0d*cSNN~X!uDKM{3Ke3@Vq>5b=CSptye#e(YPF-J;Dptvq}5_IT89C+!4LY&_m#&;`!M$KLS6{uPfy1GD!S% zMzvtR=y`Cb^g}BCrm=iP^J*?1y%#`Q6Nn#tGtO+SK0`6@jd#??g6Cd8lWy#`?f z`O%C9Lu0sp!A~n9pTdVq&nD|r@qCTr_Aq|PcE9M^jq;n|hs;NCMEEi4O*|Lr4`_%X zaL0HdO=s zK^Im>3>W7JW1K<36X}28=B=M-L;8uk5cMg&g?cXHBRmd!qMV9PpyJJ`_(#oDqI~RP z;;vXk+^KaZz8~o)f#+hp*dVHJScj1P7tMo9j!V-W7pvaCL!I|RctCJVH73f(dk}ZD zPN4Z9`oedpgvKqVf2&9J|0#S+;Q8qKg?<|4qjCy9nyx4X>2AcGde7M}^aJgi(9d9! zer#$8dKZ%I-H5x2e!PgZihf8wMQ}p%iua)C=gEKM8{$3A6n`N98N&HQD?*U|OxFry zVmuTc;#(2l8Kb_l6asia1NkEzlk^q>{DkTwxza>_kbFap3$3H*qV#o&PsfK4`6~W| zB!Y_>i&+0v-`}w%>LtVTFu#y|_$$8{5^{v7j^7)C@J7*vnH?!lNP1AoDH8vwDMZ6m z32_gdK~zNVd&Bq#;Nm-pKQphd7;hB)PqZNfCq=Imo+ncLLh7~Dxx3%d$=E33`G!LY zIx`wdq$#?kzL!SLE7F(zj%guL4?P#{qbWHgEFj9Gp;!R_Pmp&?E+PCNxHsK@{S*6# z9<&0y8si&{+LuxN{r}bO@$-JR>3#y>Ld}J8Ar^WhS6!jaZ}nk!D1@sy`!ICbY(9``v-yb6n(GJaTSK4_ zE{cDUTo6E-s=p46AFf~Y;cloE=!Y;j1b^!Ml#v`~<2djp`fwK_KP-{BQ})>~vOZ;J zru>hn9LN}Lg7)YNOCs)wzR^6w5{NYQURFweqH>HU zvK`{bCU|O;-vm!&K7td*6STv43WpzXp>+b|3GNtABwn_VdM>q2L<1htI@a{uK{{;^ zNoSP22xYXx{SH#%8;y$pV$)1<2DOef-OtkK)Vd$mnrNrX3dKLDbsg;+5FLcTIMJQ5 zw@~|aHkJ%9J~T+x1@;_UVmxLxs1?%7CgOY3=Y#!(d@gmKkWI_N^{DTR8X=;AtAu=h z7v4jNvI4qHN4X7tb|jy0K1shasE@7^_>V^L^ZU3_{JH@0k=+m7)26~7xTyCEQE+9` zs_IexFXPrlxoBPl;TVaCG?IJh(q+ZtJ8FmM7A3$I2q>yQ zUDdwfI`;2TcE?~Ap+|?2<73l^_ck}34=sI*HSr*%{~d-v7S(gThw@$Z~RgC8S&)-;v1w z`TR8ESBgGRjeoD#qaJ|njPXsS*Z99Nv4<=^AyFv~ zmP(Wgva+8r#Yd`8$`d1%5}7neE{qfp6icI(FlflJM@<+hlr(0uM}~7f|8n; zOsOO+VFkia8_ftMo1F#Oxw{O2o9r!=#Hr>7&-0SX5~E|tDg(r+REGdA0GI@z zJx)x)n?}pf_`dQ^*pod)6Ig{w&pXU4-`CZ5)0}VZPTlpnHmdK)(T$+$DDY8r;Fk!v zQAnb#Ik!LEuTIRp)7Cw7|D(8bH+JiP_siI;(d|dsSCeb2W_>3oNDjv>PA(RTQXBda zan@DowKE+xxMTLIfzG~f(}vxs<}F=S=lSsF^_c`m{QciJNrYS|Pqm1Y$;B25d8CCR zP7*1$h!@94#>ma(3R-?vbFbV4);;d5~)a>B9J92 z1+pkX1kj*J;V4kXhy`(CsX(F-pz*aGe<8~D#${G;-H}6wjmvX84ozHhv-C_D(htb_ z`AHNCi8OkMcx<9X4x}K7mI{@LaZ3rO&$&W1N2C6Ww10>Dod7Puwml0h?j`~XcDPXESC!7 z1Y)8dv=GCr1c_2WUu29hB2Fxbl!?Sbfu}@~5GPC(Na7RX#HeS?Hv+jBCNG5+LMhP) zG!>^L006|IxKxW6S-jXno+t%ow@8-BV=bT$Wt2=FZ>E&V;uJq80XtAzp|qfojhUCg zwo(pYM|fs(_!?TR^z;oo8h7hyZe{D>BozY1N0H=1te(U-wosOc%zsA8cl3=hbQNR~ zHC^3_^(H-k5m9jeBdkbponAr&ls!@p!YaeFr2cv+Uu(_fXXsua_iu|#g0ADV-lJEn zrk58^noT~0b`LFCaNRp7l?fC|nOqnx7AO)xS_O#Z@rZ_GQctl|0uoEAlqgdLQkhZ^ zg{ZQ-AUOso5>YoySPHa?^h6c`8$}JLvN<_Lq5FV0* z7#M^&MJW)f79Vmvjmlu?z+8y-#3Fzv%os^eL4sTc@&JH_J5T^wiv>brjZ&b(Bnhl3 zfeCRE<=7s`_m_~1#&usp_y<3s|bELJR4zzr}b#gG+A6bQj0 zH;_jpl8MnZ>Iu4t0;CF(g$<)4)&cJ+$QOwt<*5k(A2iV@m=Vw%Fuf9R zuZR-MsR?_fC}Btlo0#ti4o3qP$Y3o`2H*mj0ecD(6=Ezp6+nMTe!}wWhb95DCSivt zrO_&c1_u&?npUJNK0*R=6FmxG0;nUNs~}eoK@3b!DwCRx6U${y$E<>1lvwN!HJY7} zLO{W?VM;0ngHfnPDMDHRfo%cEM=`9|k_17dP>O0tfb5Gw+#v#%1a1>}1o#O0B?{%h zDhi+i^Kyku&r1oG^#W4EU4~c*!0?~|I5QSDg%me5d ziD)JLpNw8b!2haq#N>&JX0Qx|0dw;dC&(1Q?!tJar~+{!y%GtJ6iIwyydX{zFHr`8 z-k_9$Y!yfo<0D{VK*ix?!*GoKa8OM#bAF|DH4Q} zj|@uy=?stxiy%aOSR^db5@i@xq{Z@Z#0C^35KY3!uxNqKyJ0Fu0)`~MUlA>dCnUYN zfdQzYBQmEL35zsk43eVI8>v?KgYXi^%MeE;$i+z#8GyCXY@L9L2sH_}2DM0%fb<{4 z29OX-Fzr%9C;N137$QtSAd{js2sjZDDllG@0QLe6QWT_sfCLK~Es__eAUz0i3xycq zNXx@Q7O|yLo+=PV3nfy7Lrl*N9QZ3uctigQLSSMstTqZkBkW+gh@NMLR4WkMu!>@_ z3ow)6(NY?hYA$d`q61bV zil!sP&>2AuNOn+|CHwhV7b3I0;`#|;?H42@Mu1i&h!v*-4hU_DtU;{-2n7IHaRTf| zqd}@-NdP*JTm@umwoO_Pau`sF1bcHUtg6*NA?_wU{6yT;jHne8V1qYWz;>!FAMgwS z2GB#&$S-YvqBaCXz$eI!)C5e!Smz;I`HPbq)@P!|PtB#C&ELvAaz7DYfEcuywO#04_G=HboDSwoTCPjVi+;9d3Z^&UXRphLbjPTmTjsn zqiQsi)bnW6GbQjF5s0HBl^Fu71K#`uply=kkL|!pfHV-G*Tz*nu)gx=)?Qep{*R5x zNB{Td{+E_+Xq5^e)H8DNY1|D3#%5vr$BGE}BDpa6j}=M9^N;nsU=xx`QvM$+!_?5` z;-6N>{Y8Tz?4LFWPJqo9)^7edlm;szV!{8&H?V`1M5Ttoq-0URI{&Q~q$xGwz~;v8 zEfzOHI7ykL5g!8`fp5arQa8G(#HK20Q!5`x7%Oh{~}!|-a~ zDK{{WRz3HBZ#n1IkGmNHymyAPznQVA;))@@YkC|2`L1yV7PBXr%^Zq*MB%nv^@ z0Bt11@@4?>hf4n%4u21T|C=-a!j< zfcUZ*_zwXg{imA$Vpzyr(7Y}~c~pQPWYoH;XfhyDj{dpgKjc-|(+iOS@V87sRrtTf z5Psxedse}M(qG7_#&F|357r~(#6W-hYGRM1lpk*X9cs4-!#77|-1Yy$?H zjHJh~3g0L>+d!G-$!Dnwu25^6BJ3i$ zg)HgK?UIxfAY)`Q@Gmyn9ierNDixs8f488i%=Z$)u%jZ#zgg0#IrN98HHN<t8(K8cCN*J83k#z^wwwG?I7B60o3v@@ z9kuX425psWYEm&+;)Z<>K{Ddrq>w72Dm5u=c!UpS)*po2T1_hT6Dvs$MsWjyfPyB) z4Sk?tTK}eu?4eha;=ojCBmt8q*$q9Ct(taDYAgTTfvlR7#kN|WWn1e5OIpMWid0C}^-;Q|PKN&KDchu{zx0;yeqy%23hNbe;W z040r-l7H%TZs?$SvnoS$5Tfxqgi)B{jV5{hS} zHDn;`wV2j`5wj#Kb4znenlKSUsvutfkK-YMZ-yHvIydx0BdNs8-}86$Ltqf$3rR*Z zB689U>$+r+Gi)kBw{57GAQLqNyF&?hhvh;uA))S?A@i961$B!67&xH&e;j11P!bQ6 z$jBavz+TwM3y1*FWbj=>ya+HF1oq&dUsMLs$Z-%Q*--SqIS>?cu7Jt^e<%u_5<3wP z#ty<8=v)D|4{RAirzddazJS=`{8DT#KucGY1e}oA#!A>kLASI-TBTK^Z5f<0kqD7N z6`fshgwR8D1me%+gN`ZvtBXcwFj;I4mq)hZtEsopXsM}HudUOnwQhYIy|(T2+Zz~m zFcJ_II(F)8+@)(blkPow)|;A{TUc6I_p-6Ivv=r?%T*~icMnf5?|%J#2MqM{4;VZo zFerFv$gtrfgb|V8D2tAfjERkx$`Zx`BodR7Q&PtvS{6%@TmW=aJH;Vl1-R6H-YH52 zC}fS?DN;(s&q3>`KayF(;D~?>dLBT@aqW` zBL)sGHO+Dd68PZ{*k2&IChe&XKq7;r?iKAiJLZ&}h9At7CY;5G@PvU$Iaudl@8;+8| zwhJ_)D17K4vEd+=N>>67fu%>Dh=QzziGL7WFsIYa2}f z8dFDAU}%!Wc%mb~1m41+81_`qm?7L8KLGNvYSxf!z(=wy5zaV(S^%M^!~qwIMTyAJ z2szTA1PKy|Z6;?Wf^aR$g^c^9PP|Z=u+0Xo5fy14+N?txzZa51JNGj`T>m&L?r5gNK~xw#54-$ z6Dg`hb=`psK`LUJM88oU>Jbqn`b`kzU__6|Dvpjt0z1MfLrjN(#Bn6=#JIQ+bf#kj z5FzM#+X206{U;6Lc_ypbbn4j@V*?BE?llM9C9Hrqlr_LPXJMlva*wVqFbFw!)Ic zks#V5g>d2vo$f;CXHZnA5V=udbA!W+L6d`H27fwG_iO)A@k;0$OIu8gC^iR6Y(%lD z@u*Hq!G;a=!$|9>I3>EHpjvX(;n8_a9!HC>rJ<$MR+n;<9}4S)Z6j>P1dNFv zGiywd{FMB;Ty1>M@qXhIGUjKLWZcSNWSe9U%1+K(nRhbp-7*6<@sG`5(-~|IgUy5+ z9sS_);1`omgP(Lh&gOGDsDQ=eF;E$U#iFxtF@pi8?omTHX3r#AGihu(m*|5Iee<|H z+!L3>;4}Fw+#QogXL0yk2JRF-D9vTEcpN6~8lK1DvY2!Z?w`fw@R(dWo5#QpVK8|t zI*-kTZ=B(Q(&>B#jnClm;fs)X&}=@3#^SJ8TrN3&2A>C?^oHIb9fAg14H4_I0P8E3@#f6 z%%KDHm@EdHPh&C(h%z}$E}zTe0T%gmE}g+=)8UZ>aDguQJU$EX%3|}_92S=iPbJ{Y zr}0^Q7MDinav1=B26RG)Hyz;lumBD$E|X2;@i>4BI`qQkQiQ}{aG6}dFq_3<0z3fb z05ljJfe1RC$>AZ4a~Xgk4uBt^2Yr)d$L8=@Oqc?X1FtIM(da-ld=8J#Cy9~4V=>^X zseBf|2L8dTD*&1-7N1L!C>@GvJQy8-2>DHbu(&J+gT-T$lnd`_W3ym@04d~00ZQX> z>2wB*0plfjg3V@dVT=f1@MZ`gJwB5Sj6nxHkjUh6IZPNQP&)W#;AIB@0xq-#OaLe` z@$+~*Iv3~|#svyL8=$~sLSr_Q33N-Kn8jkzc!;!l@DLz61Po}+;BdGMz!<@UzJzTSQr5J9}^$e(aaOIKy0MB_7LN<|H?(Jd(Ez6 za0DH63|PjHw=WfJAU~Z#28|~Ubn(8<0?X$6k)(#fdMrc>DU6m5|It?f&{=VGP8^*P zN9V)Q*>HR=9GwY=^WY39N3UTz3IERn7YY0UdKyexdS(ZB!J-wNcI2fxEzcHS`EywI zE~_)tf^Xy26i*%L7H?J!d`Vq&qcI0XuN|bX7p}YU;j7DYCiT%Kssh~UeHWPX7vE*p zIp}IyJW_Uk^|A62?-uLWjjiYQw@d4_mf6+lxj!1vA1?X?^F<@4T{<9NdHvcn3tgS^ zfIG1XQr}9=YYVERW4CwR8dbi@*iP4Q#3lDWEraSK4=IAr>eYM|zcbk%{Q1PK^joi` zA1Z2Yg?$cWH?LcOUSSP-g#{dPL>s$Q3y4{dmZ6i`zv4w?4tp@Aps&EP*)oX~4iNsP zFd^>uWvEm0NFr`147LjOVPLdltVh=XCj!6M8=qHr}eejMIZ|?LY8+ zTXOxU+FLl?;!3Pr&D@~}w7);X=|11n=4>)LKil;CE1b?)5^QBMYValR?;mh_*QvFe za*M|<7k>Ya(`RN?cTVoK?8W%+Ed2hS_`np`xjtt%=YMa3(;*eRLsND7OxgCmHBQ?P zt@X3Z(>ikIy8%uQTGZb`_9B1IlkdhjeZp}{morn9S7~*oIQ?Pa!G-54vkKbR+2Hi! z;vRa7)NE>O>-yky&UV`#wb2gS1L{0++R3H-;@!(9Gh^!p;&kb`OZweBZy%pp7lhOK zr@HK`SQ$INpl%dSXRe8uXdG93v$Rfx)1PjHX$*_n_*Ye398PzAd-TryFAv_=)G2X# zPkMdP`c1obsr{IM)3bah#IeI7rgiy|iPM3Whi$^^_n&h4k&V;AT}$U?Pxa3q@gonX z%O-VCFi!q@SNUTZPUr4=ssJu9f-VWc;)HvlrKY zz-d!)y9dMcN-pfE|BllH`@1fysBaK3=oJ;u>WTL$(P$CH$6uywVq(cfqh<2*zNGYa zncRV9fSc~n<(?W}vG9-s%^0T>M!gtcVLd4~kbECrO#1VUg$@_1;_=(B;5{gvmgsfu zHfZH^S|41$TkWySR;wdlucmq8^!iOFvmL~DwwBQb;`B}NM*SS#iHvKsAe>%eIM)5E zVd$}Uv{5)c>}mV8rV_t-8gvm(znGn%FRG^B=thsjY2W0UC5!)xU+qp;;`G$mi95%q z@ZXK1Pr&KqwzpGPJqp^HOwYvWBgdDHoR>8+YaTrtr=3IdPaY4nJy}f8!|5Fb3%`xp z*<-;8`ZAn;7*?>jY^C>|`}9Jb?vma6rp=M1YrfNqaeCCsMSsbclz(c&*oo5>2c8E7 z9=ugz#n_M258OJv(|iAY`as4poL-YM>eA8*!PzLrS)9Hb*m3;(5p5PtW?aT;M@yrH zw{{ggSjxDC)A>6djtabRef@66Bb@Hzxvuc~gr&9T8Lx0!%d>FZz=28oUNS!5wEi;J z-8DHIXK|R{aoRL9WUW?>(Rl$Vs3@bm)!gTg3$>OyFlz(uF zxGpd$PGB0~^wi3A<3n9S^Qyn&NbBz2|6)Wzji@n8XVT-S%WJ zTdvpZ@(pGmT)v_G!=3YPwp;O$>50?Vlkb*F59YtrWDUgWX4l_e){JPmM3MM+OGZxt zA=6=ZYGNhm(??)u)g8~D`a`~C0U2sV666#H+d7WR`Bn6HL_!>v25itW=}y_1|3O63 zuYHj#TBA|@?Su(0&9B|Sh(=n5!rB)xs+;AgZogK5Bjid&!^&f0X>M-T(-Lgj{zCs> zYd2Y?;57oUVs$q+Ctf3k4IIQzzeM7UBiJ>;QVLE9>}Ws-=^Yc`ojT~)I10|}*3A@b zr|@nWv7#e5xqf|IxCvMyl>&iW5*?%L9u7e(DD5Z+4>wgKGSP4N?&eq=6!M3-ZrJa?XwOube8#`M& zJ3D(j2fN<(miAWm*7m*ZZR~CB?dTJQU+c=0I_*;(p4m&j8sGk3NXNL%R(k}AO(0`3z;VH zja%ZgJ(v){){YaT&6{XRHUqs!75son0t~1Mux4S;7uhLQ^_LlghG>C3W4Jc*eV`Xp z{=a!q!sEZ+8Gn1hp18>c`)k-?H(U~M#h@2z!HMuhWt5o%;SW@hZXY7_pX5o>NkCLn z)c}MN=QJAE9cgZE9*XRPBtIjDM(g`(SYiPA>p~=!>>gcrMx1sB?+S1!G^||fo1`MM%0%lL9nYuaMl4-@TrmtgcU=*=7 z^1d-@*>#K`%zFN&l+;6#(Dw2XiPJN;?>}&`>gt=f8B=E!u03$@ z_=(CJH~TLtJ6B#&>Ek~rWccW?89BMzc9$GHTz=xJb{oBsqdtB9QJ)?^_RigwMpBu9 zVc5h;o42Ix+uug7gOPVX|3Sk>j2b;DZP)S3Rkz-}{U}%DDiia&np>>fQgZM_<<+}O zTo*2~%r&}rxuV{G(1?*-o~Bkei|QJw%--3>-D}R=!05!2r_NuvcKzv(df4}OnsS#l z#go^b#nGNz+%kP5+lW88J+mFyNi0}4U^An0Ib3bE08JfkFc>oo)WAdn)-tdUFrT&OTd-0Um^fPR=woXhnjswq|*Mg%|@4y_v8qQD8XltO>hCh&%K9jStu!SDWY87kp zjjr4lY7=oRK$pA(SMLgJZg7Idjl>{O}(|AU2n(r8?IgF+s~Q1o~!26H%gK_Pg@ptu)1bM z#E0+ofk8`_np<=;30_{X>aW6e>o=9|KgLmSp=;>q;x%~fy0hmBxb5^ib?NK!^i@s$ z@p6`+bC<3ry&N3-`wk2U3?7PTFFaBl6{|>@m^QPpXv@|^=QnSW$_~sQ-Dy0V$ueU` zG3gfO=~E1uR+Rpa#Mb>k2#e zrBR9M>BnXcjMT`q)M}GkG^O9Fy;FK~d$2}vx~h4rnXp?;**a1@fYqC;?TScZ;TK-! zl^*=nPbc@%q#JTtvUroTXRu<~8caS{YhHLizOqmHCp85xLDzd6VucWXyYy+3`!T1w zYwBhO7;!l1RXy2#JJJ)(nEEWnWLG0?M>c)(`5sfAq<`){kfp|AWN3R1bWT6g2aJ!w z?DjT{$t_J;q83Bd(lM@AklI*{>>~R0jSxc){SWB_avL zk|{uMw`s=cL%9CIA#cAG^zz@|fPOCWGi(It7h{f%rb`D#>J#Jh${PYRmq=S7| zr`_LN>hsA=Z1+{HvS)JX|G2o2T!1;QyeV*;T zwaNKl`yR#zeD9{*o;o1nS>SbzEvs!d#T#CYE$St69(#FO%%;b#*&=xsX94X@_Uuy| zj6Ns?+o|Ky@^>+vTqTWhTiU%FD~Th!@K{>jPry6Rs(?=Yp`!acOQot=UgEZQoVF(q;&Wb=7O$Cu-KR_*iHsq^_!=PG*QtLJOXoW#LJTVt0`Bg;v`dP7d6lRZ#BA{_IQ!NIduA_C?u=LxqOq5(PQRu zSPuJj`o8X#n7;qrtB)*u9=kkt;p}H)=M}s(kLgwJ>6`WWuSvtKrj7KRZ`ko(?U?Ev z;|8wSTo)tP-6QILt(5&Dd^lxSE9;fheVXxJ*~VB)PQ>B!xkK-LNxi)L(wh9N{nJX{ z9@`PuHhIyE!v48^)^F_I-F)}GX?OB|Y&@smtBa3%>GfM{x9FmpW^*y>YAM6yzr0vx^(RN+M)BXxOM*TKB?__ z*JDeO%!F$y?RVk+=A9Z*TR!Ta_DapXS$KAe-|IOiSI)j3&}ZR7u}($WUj-(+O1W<} zAGerT_apys*x8fvhz&lk13OT5!*QDXS}l9_tjjjO-Dl^@qLszZgR+Hlw#Lu~a|bQ{E*u>V>UI|0t(Nqc`RlY^tng)5IRuH(Y&}+YQ|w zd2PxVu4~`R2j;~1cY2UGsPDA7t69(1dM1xxJSZ9w_)uem-5K`k^SZiYDLX8)@3hmN zBS&b=O|R;(__|K!k*vHQYN^AX?Y=iNsyn4T8%Eq$^2^8GeZPbSD$b{?~m(R)?8|FM^c>x~Tjrxknl^O$x@5V0$2dwsXR zI&HYB+phKV+V!Oow3YR52T*q34LhB~kGdqwI!(}8yhE?z*rkqs4m&l2&-7dxlvc2u z{;B_%aPL)tJto!O%Nvx*IJ`LC{reeD8T zbGlpUq>aJ0skLXmb3TqtyRl-%n1IB>-doJ*qsOH# z&YjN8cOP#X*-HCoGz^$&wG6G%6;p~b7A3CBDLvrmiTt4?RqY4)jg+C zQ`0!XOA4yrjglR@d+M6+jHgZsU1tY4 z{j!Db%POl)7qiz{$-D8&EHldjGctOl)Rza8`CIwE>ht)OiOAS?=)o_Ab{9r(Zso0+ zxA=hF{RPSGr)5t4w0MzTtGzDL%;DpMADBGusysC5cF5Dp8ILV)ywLAO`3X8E)r6kB z_{JsjzzB}|n*BRB1jZ(guj1wXJd<*r;XcnUl|A~j>`i#D*`u!S+j!M% ztmxHpn`5TE)3@%ZEXwywon01v-0fp>zk*u@8NuHRuD*E{-rnbHk?Erup7LGGk7c#_ z&^uyjNtJzQaaBd9LV_ALZ=9guDyRvwJ z>%Z4!Ff!6UUXLg z_eA+->VfM+hDHrecs4Ti+$^Jcaox=4M1Q(#?LGE-Z2IoNn}1z1D5bq8{UC)V7tgjV ze3t%Z;+DEY8S77;+s;YXIoC-T7uw!=Tj9lTlf!s|ko4HpNPVmF>a|CgZFOQR?JOUj z`5s=DTr}sy(Pc9^+MeHU9&p-QtXsA7?FUb@{<^`n^Qw=(pJm3@)?4gr;Af_LyM5yB zH`OWLRih4uMi!)uxEyiu5$SianQ*er*cr81EsMvsT$z==puo?0^Ap?9%Q_+LE_$!O zSIS>?c6fM(a>mu-m080sE_y!TluLp`;f0=@vz>#IGwSD7-tOl87bkK&@1SY-U{Ts` z_o{7r15Y~XpRP_;jBz%4G~aiHVOZ&DLEn!(rblGW&skPIMLvG#q@dZPpU0!9^PnZd z70<1Vj(+$$x%|%MyjKrs-`Ju_@4q_q(e5*gb^Fq6*ZmoHMkhG*o29G1+3RxuZ2=pN z^OLeU?W%9gz8=C~dfWKQCZ|(+g_W1rf0=gWak|>oH3J{-H=NB>yYy+pK%d z-BX;9d3KuevQe~zCOFzJiSjEc?zz!ETz}P_UzvPDPpx*%oMA&~W3^&FoIMh}clp`W z%AM6wuCrTD8GCSX)%*uW#gi?AUHe}f__o}FcE;gmH@(#p#srLLS+Hr3)1f-f#kb2& z8%%xlMw@qit*>R-bZ>Ldy2!PL@oju_=a~0R(tWkecg2rs)-3;=;Y%n#R$X_OsrTR3 zxSAZEe&2FI(B}ysa-IyY?s;Kv%E#=!xAT@%3~OikMY(Uv(<|1NIjtQJ)3+G)u^Tex z;i_zx{o58#e|vaIw=+kdc;0$kvQAes{A2#!LtjQaYHjR(XN~qvoju$4KYg?NQ~%4l z%AuN9=ITY=ec5r7$Y!KZd+i0J-^}LK0orwqZKrFUY9vRddGB#wSM54zaH#V)wOH1U z?!%tw4DO-**P5LzZaq2jKvVy7?nZg(i>XiF^qXIxxHaR%w9>wVC- z#QU??UHQ7uiu|JkW@MD)SZ9~ciXEi=aq@(nD>jP6)-Dzsq@2~k^3_X)Sy#$_{Af8o zQ0?T@#A#mix>i|sVPVfrm7KDtL!RfmeP6VWWna?SLA$?c!MeY`4|YnsaGbTqG)s5H z=VSe!IJcTwGo`lE)0+yvMN2-Ce!*M!+UX*T!$5#vTpXnK%{8}p5 znjC)4x+cqb^jvct!&}PM6>5~xG}{?`!2QTX=k9 z&Bxh?BR_Qb<}&;Aj*Pn9`LT*^+b3*W`z6=1tl-gp(FE`Fx9{8xJ^yLPgyKgnKON98 zT|F~)gQNSnH|JuP&)Z?HUE;f`Jn+=TMRA9-Jd?x6Rf(2gD_K9Dd+@~AXOy2+?z2gB z)u5lYS?JOe<)0ikR4%>dtnp}3RjSRD)RdhwE*(AMKU>SN-uF!S+W}{^>@rJx4<6P- zez)Ssp4T0IA&zu-)f35WyC~V z4sU%|n;z-)k*hv;t=xL+2>0|5(yzPg4deFw9W%a1Pk8I*;9~myL9xf>6hZO6L$B?+ zXRKSgTw`ES=778bRaP4^-$%}69M`^kSv)#*h1$eR;y3$kJw8>n@vYyb=j~@+=+Y*i z{$jD0f6l=R6$1i#&#qirJ6Uw-%KfnxSw}`cnx1fU%DB}N-wYh#zptAjDU|Xf%f4t; zec_IAe6hu3cTLv^`)1h%t9_oqn(_GN_P;i6z45|4^TkdwnSBrSI5)7qzoS><;d8)Bj}%GqxbP z_2X}w-*%2wpV#Th>tj==FBraJ?~T{5ymIU|hCN>zm{|9jv2%LsoiE!m&fL_Vy#C_% zUCjGM?N)VNd8BN}mDNMI6aMmCTovZ?#=BSi(*bMdXPP}Zoi_ER+0YiQg$c48(oa5Q zZ>(m(xq)Z4cQsna-L&xhx^>>I3Tc8#7AcC8#%D)QIcd@&JBoHW$7$%CZ)RzqR)4eB zv`DATke`mVN&VVk=b63r7aa>+dk)&4HTh~u*Hu5(ADrfMQtQ(LyANBRSnn}jdFtv1 zvx%p#Yj{|N7PvflUA|94o@(G6FqiVn_n2F;FmhmF;)II+yYAnVEG^Z)-}k_ltW)h7 zOFP{d;&~)SxVU1|$Y-^ykGO4|6J7Q0=w281#qtN=*0kftmWCf+sDC;7)XHh+ZfS;} zVL#uqyG(HE@V;r!X+foDBhwb-(B#vf{X)@ z^c^|7b_GB-3%4)gYQpZqz_ujou{of~-%{kIvHoE7#@@*#Tef8f|z1sO+ zU)^r^W`_>eLv9P=)J-02y+8YCk(E0Ao-bPSy!V=JFVfZ^|;C*N0JY z4t}YeGddsU*=rx{8y2jWlYU%EthZJTrpv8-Mn*GE|nSb5#KY9kp}(cSe&4s*TtYKl5$)avak?R+%_bp6qMSFUC_~A+00w?I~8xpE9tnH!j z;4NeCERSh__4(SJTG9@0_7rJwH%4?E+jZWvgww26`8!TuI2pW8qOo;%Cylz}3&)2$ zv&lG)F#!`wCN9~N{g5?s$+($&bKJLYnwJy)KG~rBt!um2(c3R|7;eAa;Z5*g_U3cL zp3(14kFq#^$2%^z{K5WGCoADB-2!K)0mYp=C%2E=Fmqz>&H6kIwb2V-Z4B;oBJlOC z6W2TX&dIhddzzn^I4Y?13xnxF<~Nevt^`cFNXCsk-+R95g8Jjz(~{30J!rP+vgn!T zfO7xQ3wHWO+*sJ>Rc2pp^%|$QBg%xgznw1|&~0r_)wvflH*I@Wn%CQFQ-^Jp?|dh( zdENF-^oQ>r159^ROukx?FfFh0MY`4Z)}ud%xv!qh@Y|4@(PdR%rzN%@KVLj;=~2*X z*dr-lgN#%89yq^i?=#{o?tjfTkX>Ty8UuZ&1<9hwzrITF35hr;&5hWV5^op2e`K~zh5fb z%6Yd&Y;uN-!@2Bb?pZR;F%J}3#B?4 zFS=UsiVa4H)UN)O;0%8X@!tgJv*dM-ypvAH^akf6{#Ec(8ih7CzP>e1K(r$fG^5H4 zII8-Ie;5-%DpaQGqdH`UVE+NC2;WU&tDB_1GN{kP;cV9ukJ85FD6Xk-y2sPZxsB54 z9|TW!HA)v)Z3uhVD9t)}s(ntQ^l|h3tG6|zT|uK$ztfO@HiOmqM5Aw05ph(i^7L_f zC0(yxQ>Et=ORZC~rm509L)2W_#B5NdSG_)4(f-IKDqWD-+EqAscqWxTxu)Hmy=!D^ zsI;S|;6u0EGZj>tirXU7u8-CC+Fj-5t|HR`cl@JS=T5ylMWze$J5Ba-oggEEglIIs z<+~DkuHSTgIZ-Zn!+to>PN$vyaUyLJSKj{K(V=%UKM`r`6Qi`_9P`y%Ex~Emv_4dv z7*1=g93Z&af1m!5vNt$=Ijh5m=^DEN7ANJSbh>BjR#qG;88D#oe0-(b=Z8B#)UWh>Jy|Dr@|41kGsd-i`{m$? z1u>njzU%mx#+aV%hM0Zc(t97{Wwz|anK_(_DS0~SVxrKu>xb^`11sLQDrs5cv+=}tv(^XAX4-qC#92#hrTJgS_r7kv zx0n@_&rLge#HaT2jcK{#o?hiVAAf4Jve&J;8_#wwE`F2s*Vxz2`%DTX`?WiFI&`&o zeT#}1jThFuV?D2I?6Fnj+Qm8aqjL&f)IL&ijNk61-YAdmnmWW~=&Eg}_Y^t|Z##Dn zJt3rPjY6b?Ui8hjZ;5_cOyBD|~%YhShdHA$Be3zmYZm(V+^n0eZ`mmbsXSG+stf zag%u&gRINWuW_P@HRdOal^;_@4=26y%snj#d$w=P!I;HoI`~(%nzeY+v!n-OylRdd zoV2#%(QB_=R;+Hl^mv<%J8C*;y?F2cyz=Sa8+&N;^S9BPwTu2r=M2C0 z>Nyz~x*+eYpueo;v+cvO=KQd6n|}LNTI<V66?$hU!CoB}#6_alG(T{1bT=8wm?OSzEqfHD4ucF&t@h;y(*A1$> zKY6=&4d4C9tZSdUy6bP4Frt%D7}r}eJml$;VUn%SXQw3gvY)Z=sD;_8!<#bzDdk`Wiu$UfH5+N1Zl24%l5k&b?y2 zOX#Zu6+36PGJ5>i-R$S-r}~x7x2JEji|ZvT*L&0X{=Vz$_hh<-diJBK?=gH8cfE(! zr6-T)Ds_h+(pWpM)uR*oFFfX0+>F^|zv$#ND$cf6`{*LC1Et;bMh9IxWjN;kxGV2} zT#4`g!Em2Vw-rMkWVeVK7Pj`$;AcP})vZgWyt9jWx2{9$t1-#NaXIy(&RaYgH5a;GZ{e}vD%aY`DCzYW zp>4(cmQ|Us5#9ofrD%qG#;-cqlQQ;x^DwXB>Kxh5sNDB~Ir>rb*Sv`zJqPUFBAom&U> zwVp!K<9dW>{=ubF=dLz6{o__8GckAk#FQqvCzHX0K+oGAgkz^dWmvgs}6+xS3E{=3rJ5A7M$$+esPY;5M z?b7;G=58=KbLjTEHapvo3CUcke)RpNcmGdmcLJ|e)y0kfaIao*K3OT11D3_~ooGp1*ebs??~|-KX4e)4q2? zwR2Y8_WY)`XJ0VF_xKY%wL7==xWBEPSkEq31_GEuS#^qaiInu1#*s9reYfD<{@2m_D=V8`qwC#x)mC zJZ8mj7jJF%{0pzU*L^l^rtQ&^@zfjFZl85Ysb=f=O)uEC4;rxho+o=$ojhsH)$<2- zn0884dfzqvRTw)}h+-1x!X>pu8r-?!&IHd&u|$W5&`j-J|Q#jr&8TE~LhFS_d|@6N+6 z``tLBUB9jU-W>YnzjusZIrovfKM7v6ke6QX^X1{?)&4>nd)~6t;en&vF>=s(#7$P0~06f+r07S z?GEzn>O6MOEzawX{&v~1O-@9^CL`0JkWoi?~m zeY58?cV77Ofc!4?yNHbsUTOQ$jJ1DgWueC2cf`GCEdRm3%II>_8IL_+U%BX(SHI}l z`?*a&ul?x(-`gwqZGZaL&o&8meDT_TOK#kC z;NCyKTk^=W-~JOjBKrK!{u4vj{;{_6M>8K9w)Du_-V=5mchPO<83V4J+H=Ltt0E_S zG_m@W)cl{9t{wbSVE?Ax*R52)Gr9JdQOQ{gjy~qUPyBY_>|YPQdj4O%+Dz{?cJO7Fj$GJp_}laE?mFncdMdn%;iX z@@H?~X~^HT^jOyMu%~`r{MNiHf1T0gy$@e1J~*Sxw0<4`et*Z$L)Y2H{Jigmp%)%M zWX5LOTOF3$M_h8}!MSVm)lFac=<3b)t{b!U)GdxP*5yuGe#~hP9DVJ$dr!V+-+f)T zw)yz$($15I>>q#i`rWrxKRxozz0v32>d+$j_4M|irbbP_ZAg~|vrkmNqxtZJP>T!O zz1HfB6}FSFoHjf9TDR*j2p|1t&riP@amS7W*F5#x_s4|&za7{(?)(p)Kg+##_xxGM z$4m1=Q=V8dW5U2Mo84I0ebo6^cv^lob;{=1wjHM?r+?b##JdlF@0Q;eT=bgltn+sK zsfXS%SC3e@sn?7nc0c*tb@#S-dugW0HxKuczuP(K-7WY3aq^6_2mJKbrnbF~-M-3v zZ1kcIiyuyZ{KAJ1pR;uNWY0VAtiS2|Q-e4BaOxqhE{WQ6|9x@d&w=x9=r{Yk!~W{= z!9o51ox88^viZ+We)*G|C;OJIU-Iv3XZ_tj)BV?Dv-@lNu9+}p>9dK&!#2%*V)(1B zb|<|1-EmX*esIKlhWee*lUGf8f5z7Eq6=Obci5mQ6XIQ_AN$JYrn|SB1J~aF%G0wm z>;Cv>|IwpAxzYR5%_|1Zxb}~6&;3*UEq3O{4|30as7<-;o^1o}pLq9+TQ6yLW}*32 z$E9A$XruRq`z>#gnLhpW-6Plj@<-a&bK3Jg-`#TMmiOQJ=Ihu!p*vF@KXQlr$loQs zGI7gSGg^$!hYpD~@lO8shuTLk&j0m}&+&Gv&#T5?K7Li^yHTw=hX=R)`~5qXedSoU zY13C{-`w5bblcy{7I$-g?)_$#v-keKsnJn+3_m!(H@14l-UGQW*B*7*Cp+6ZeCl^lo`v78`?h)8|J41ArheFsx}P;n zrFm9r=hltMF_nGj_>DKeY~sVG|&Fdt4}uvPV*LL zuO4^i+^*yA-Mo6d_Wrc9W^P@5miB4av$g#02^+qc_Md%wx}K9QtUkx7{cujvw)9*r zNTU!OLqD>pqW^53maao}+f4czcU6_G$x*h>M@(vYLi6S}2Mr5A-+FF(boEesZ->n} zm>SR+2Da*EEvh=%P7uxM*X+%0EvgQ;RaG5NUyr5%463Vbo$06hErsbv{7r0!RJErs zPg6DZO=AsIx2WoD8$xYcQA>lmr(Ua@(s!$yRkf_^RcsQiv`V;R)$z1_YAu~?9{R;S zbxy+pjI&iWYh|BKV??xUc52lT)St~ZxQ!S#plM56x8}ABn$R!>^fQdZs+v^mO{+uG0&6R*%91~yv< zaa8HH5_Pt!?`Yd-Fo~)oswUV)WVSdSl$}{kH)>NFprh(ny5;E}VT;oGn%JBzjGDSz zs-~*CFCB2L866PY`1Z6Z)XU{G%mZE0QS^va(LfL_s|MDM^q|>l+G_Moe$6(#$vBE@ zeXBaqm?$*%91X3|taFpwtF^&RjwWVGUs_U`o`L;NZlFeRihn54>Pg6PyhPYU8CRUx70L;yl$75;v#oM8EJVLX(~i_U^VM1PtdbL>BlX$PZA(iq0nGfb`v_n+~bZOnL zzo-k^w7O*@)%oQ06s@R-y#CVnHQFe7y&qlnY9xMgg}6AK4QZr)Nrn3L73%2&TdjK| z{d`=Z{!==KHn!damiU2UjdqIcvkP60k=L#95tjJypK7$xvc^r9E_vM=zmtxeIzHC; z)0TL%{WaQP*|sZP4wTmyP+T2nv9GFsYh1Kbar-Yd8V$Trw;WX=Zd&5{uQm0@#9D8S zTkHR!dbK^)dTV^BrTM@#u{iyKp#HvNt$&E( zjji`NOFuc47Hhp2j71y^u~_3%D#Tx+xH#vD#ah3O;%&-d7SB;!J-1lv zcT|Y)t`HZm0b4cFe>KI`xx?CjD~ba*l&mr7Qr$nTaq;*VBx|hkJ1Ax}5*PR9(;A8YLoszbTKl1)vFaaZ)_5<9 zo3fveX*=7>>*8WBxuwO{b{nakI{#bayD8q-{(VJp^}4}Y@1)~o%5_-d$56bne&}^y zqxF|j{MbhNxt?yf#^PI2`-VqI_NrkXaqk^cKoys_;WX^9u%e()!mZqA0~LK-2gLF_mz zvh-u!o@EqoY&&nO5KmCNvD@n-iaX`Ht^I#W@$QYpAEPJ0#vYgJD1L4u^$%94k5!1@ zTA`ooDL%20{#R7!XHkWIW>%=5QX&2(#ZPHuz3)=IvDQ;_Ccr?f;1i^}kk#|4s1@^iM3-erSlk`u2z{ zUpGA8itq(w^?pqF1#)k3MJ@$$K!Gmv)Sr3{Ai@XJn4jvLBs`itLR^uHo!;;13UnEv zynG8<7k!X?v?c!@S-ln#;lH83VKwxTk){43vbFwh^6{4ZHnR2j(&(J*{&6DtAWQuj ziNVPzm?+Zb@L9Y zSI-sVxU`^uVzCcuQvbfnUJlj$KQoY?bZDuRFUq{C(=TaOSMR0%#1fR3aare<4#>;) zt(s}$>nb&^EuF%;%Ig7&ca+zy=cj8ZuI``K<90vA)#LtWYS&t}t+d_Y6Uy6{9$dbR z9ap~8bxyN|0S#EE8$JxPsr-_6#4yRal48|oX6FD(Ha-eZ|eEL8lP67{$h%&``|}y8n$}^ z9U!%CYkUgD)xKHd0g9{pp*4On#nt;0YkUsH)wwCMM&3TOEflU_vNV#cSgh^7B0EKp zmP+l^=VIBm<@;;7nnp6E^{C@#y&Z!?{?GM@^SpX2cBg*Sc||;b9VR=p#wSrs)wro% zo!6$(4ypTen%1w@t1d-X3Grf@mQ$&nx-DLycIti;pt$usIEk!ogU_kXS=XB0yHi}9 z&pJ~~)mYJUerAogr|l8QcnR z6c?sA>KEo(^Bb(xcp;$g9VS^*EN=HssYKNL`9fLV7uGPcnzg-;;$p$=nRIY7 z_CERovUR(}_Nac;rRXPb*&Y>Fmm=?!+vDroc>5y7#p0KjVR>1pz98NV(z52zhR2(5 zgsh%lYUqt#xr*MW%Kk)+b$`_Pzd-HOaXO#cIYl8YmDZm)rrdL-d{pKg?VHsXNl zJ#MV=X%ttFU2FUTimPq1#;>HfI^S61t10fJe`2x5Q{?V-Q6Aq-F?Fu8*6*WuWBre} zHC``1zf<-3jt#HlZl`|Kei@yb)t_Tdr?`5)u*T1@#A`b@yuK9Y6!kn}jW3|Mx_zzj zg%#qLRfvZw#1~bFUtS^p62;YiS-0l^#no#VYy1z2tMjxqZlmKV&dFl2##>OlvHfjX zA>OJ&ymf_mn+oyMD6XD2tlKk_;*D+pr4(1sU)K7C6mRS}ETg#E-$b{D_YLBHu5Nd0 z{9%f#<6wV9I4i|5ag)R$PSakE1G z4vIH+9ClWS@2U_#iq3b9ZD$XPH@2PURfun<_~`QHv+Wn3r=QkH{BukFUEQ12f9|}Q zYSn&O<6A1kU#JjYO8u+*Xa64jb3fP48qG=V)N(rI)$1=26X!$gGKKPV`05w*MUcOk zTwPyHuX~27J>t(l-uKFJkyp=`>QXc>gy_u^EtT?i@!=aSm(%*xYg6Id$kzRNh}=pf z+0c)d4yAbQEEa2g48@Nrhgm#A501v_Z>G3<-Yf2C zxSs{65B2=t(%`pGC-RYU9pb_RpWk49IMu6li24a+bzc{rPFCkm;mgSCJ&mvnZoda8 zf0Rhjawq*$=Y!XJ@y|_uB&+uUqPBT&z8_9<<)yx(IiB#kFBp` zbD#6iv9LX^?@a2ihgvyZMyWsR{cQo|%a53H5%zZl<<UgQ!%)hLB z?oyBc_?mKu>NT2rKk>)E|IQVek@8fhj!QCJ4yfBvJ#Nd77`ks>UCyh|nA*vmv)<0- zHU6Lf_tNKF;`R0aT10p4=!lhel+U2L>|fkZL_G6mIqsJ`BF=jU(KAL|ZnxS$aX(Pk z>1zGbhpsgiKdwT2M1}a+3h~($;?WB6hbqLy^I&7!nXM4tTOlr9e>T<+O`6kaT$@oh z)#TK=0l#T-UNtkOrsffzS{tg>>SouYS#&R+dhy%~r`BDe3D9QD3D#eyxzx(nse`E( z(4fUr3x< ziXp}8f;2TE%^65-F1&=A&ZDia3yby6n_0VH-X(QISJtd7r+h|@6QAWz)^4t?Ca0_^NR-{Eo%Eh;0QP~nAaCh z;5LVFM@;TGI0+7k2R>(Nw|C#qt;e}da1a~^r@=+Axmv4R?Bxs1cz`l;LtkW-t!o@a|5>*90n)BS+Mpv^anPRJg;r!c7P4A2b=(>!C7$dS-w6^ zydj`P-Ci!R9~=dzzy+}9CEkAwoCK%Afme8Y?Nx67Yut$pclu54>^ANKxCC}?=k@*^ zck(mtJXqVo^DeL-90e!Ad9Z6I@87YD+YL^A&hx%JcLZGcg6FeeatHTtN5C;~3T*7< z?M<*3Y%lQo_>bHLu%pQH9&iX8182a2{k*?0I10{}c)e4+ai&E*-n{>E`@|b)OFjYi zh(7>W^0DsR?jGDZaHuEGI}C2q#q9$Jz}f!1-addk1~$vnyVdW17F+5&SKA7j}CyMNNyTIY&dAr@#fUehF`Hg8g%O-WTMKfD_^mtCsCagR@}o0$yKO$nCg{ zJ0Ic>UCx~VhZgg^zJ%KZ>tUWxT*aLO$HgCpE&J=fmOB7;t>F1ggu4VzujF~(4crm1 z_a>e%+|2EWau;sl4&TO|1cz7gyyH%857==R&nNHZ&V!Tp@Vw_f?hx1$=lT3t7dz70dbY=IK*amJ_lG_hsZ!Q(#Y;=lw5n`(EWvfeYZ+>%6`MHs0X*T!!2ICU*erc$?=# z-~>4L4zG{B%bfw2-sAb?``meOW-HG}KHyG)3)^@;@ga8(Z2y?&GvE@~$np9-SpS6Q z%}=?t&$wM+hxnq1<@}lh+jsH2TYTZvQeOfad7k&}=5~C+?E$;?@O%;M+{^Rcuej}B zbGyOG?|44?6Lyh$ejb*JM(-7Tml;nfj^ZE?f`y$WBz|l0% zJ74E6f%SKJ-T=G7vG;j>_(Sd(H~|iR!s{d87}${CP*smt6YK>itE$=aL$roF3l6s6 z`3N`$jvUPEGvLCZJg>Fq*1-9e1e%uDw z4UU16;569VpZD)Qjynuag7aW)5O41SI|uW;8|(pl#Xu#N+a~}HflDXy`uuQi-Q;$I z1K=dsbrNsy0sFuSaPd^$-adxg0nUN7@w`5D7I*S&?&3sl`+v9{;Fyo+Bj<9bzk~`@jLP`yyUn0%zv%yg8TKALPz~-Iwxw z0&Jhh^U?X-dM$Sz99+Qjj)mNX%ed1a?$RRe(B<4&u;U7zPlB`H;$mL!xsp2#)|c>n z09*pcmh$=>xCBmwd3^@#zKZ9w;37D5HLo|8aeKfiuxB~14}lZlOoZ3x!9}n^gDJA} zE&2|gH~-6B0!QxU`QRGv0yz2*&qvmB`x4v*aApI~J09nDflY86>`3zV9&jF9LO;XX z>(6q#Q{3hY+);22?0k{e`@wN=9_&o>_C9bFoB?Yu@%C;VVB5pV*W2Is-rdwl&)unG2qeed)332+H)W_f)CoCQ0! z@_HXQ2F`&UA3%F>6r2U?+jx5qI1EmKi(unJ-k%>F182eZk9d0%90Di71+e{N-k$;X zfCJzNI04Rp3t;nHgS+VE*2i&&z?tzp?>dWHJDWQIj!xkD z%tUVAB<{j_-0sQTUa%kRm;&`vxszbWT%J#X!$F?6U&>to2j}s;XFhipoT%k_*8*<$ zLhckeb{Wsdui#EE=C)tY?FI+H`i)S36L&Vwtv$%?02^Q*I08h=$V)3&DU`5-69pi}jcraT`9XTe4duQ$N~ za1xvW7r=Tm-oGCl0;j-Pur9~&Qu~{2&-*WcjSf8T2gkq}a0zTUcz-@{1e^rt!1lv< ze{OI990O;-1+e{a-oF9%fCJz#I1WyO^I+`=zJ3ST4fcUU;3zl=&Vq|zy(8Zq7uX99 zf}`LhINOQ$Ujpl5;BZ>h{nrl;feT<~7v4SyPJq+kJXmu=f8Yc-4c5By_6~3nY(J9M z>ql{$;9NJJH@b8C!0sMA?*&J}8E^^g>dE`_gQMUyxCnL{ygx5E3{HacV7(Xb&jkCy zVQ>tb0_VUbu%kC$zZ>iWhrls#5}XAW!TQmB{VuQ<90W(fNpKci1nY8)NA-AiffHc6 z9OF^dXTWwf=5gIwLerdJKR7>#?|)!0cNiQ6Cx-C)G&l!#4dwM7aBLXQXTXl(Ja2$Q z;1swZCQ`7RKOLjE}1df4|;4HWRw$JAMJHaN{2M&RwVEY{2e`+ar7OY>z^D(e*8P7++ zDR2SoxQ4g)fYaCUynQ8i;a=|a{oKwtw;vn$l0gX7>7I0r6*18?y4$HDnGdER`B+Xr^O&GSic zn$bmScCS*BfcD{ytxy51a%Cvb;VG_H5<(FgOeLeZcF3;4Ij) zjn}8Z{ttOR1WtopV!|WK>xme+2zGqT>s{awI1hGg=k2550@#t`^`+0alRLOG;K)v% zPwnD%e9j#PJMuhV06TX>9;|)A^I35EOP)9PaJ%<%N5Q_Ycs}_xw^85@gB{=SeC#{! zEVu}E?Bn%ra2)LUf!C+NxgU96D{{xdd2k6F+|S!5z!`86tpCE>2f?mic|HM-{>Jm} z1Ki0!xm|y8d;aAvfMa@jL{N2pOC8E>Z_jOl1K=V!*nzi?9l@Oi=Q{Dc(V06A4)o;t zsKFiR$6au7`}%X|z@B4yJ_B|S;Q18TIgsc5gShn}-0l(FK5zz{2b&{#dvg?b7+eIK zC-M3ySUZ{L-C)NlJYNK7Jv^T|mD?=_VzxY=1i^7|7VH_z+xx+3a1k6kgSXFti(vbi zyxs%$g9GRA`Up4O*z)PI^RaY zzRP+0G&l#gU%~60;5aykUd-DEujCGc%_Tfbl)1lwou?RA3PU@tfT4ufOhBsc@kgG*rje7?N~*aZ8) zL2v{d2dBVUZ~<5RyQE&pB2Is&i432@5;0!nqE`jw~e0vSB z3HE`5;0QPlPJy%FJh%k5&*t0f02^R8*aP-~1K_6B5 zyTKl?4;%o8z!7i^oB*f58E_6<0GGh_i(&u42G|YufPLTqI0TM>W8efh1;`+m z0dN=`11G^5a28wuYnSrvwSygC7uXH`01kp9;3zl& zPJ?scBG|r=?~fDg27AE)a2Ol|C&3wT9$W(Jm+|d2z$VxS4uT`#I5-8)f(u|R#JA4@ zc7Z)$KR5)Af)n60I0r6*?Th&KI>BzR7aRbG!7*?WoB`*-C9r-u-(CZ3f_>m1I0BA? zQ{XJP0M@SH+vflqU^mzU_JIT75I6#kfs^1AI1A2!i{KL2z8Ll&Y=GTh57-9|fJ5L2 zI0jCFQ{Xf>1I~jBVC_oSf3Op5fL&k@*bfeYqu>NM1iCl8|+j5K1~by z8@d=7T242h35#DY%Hihg9VidiSnsf$|5MfL4r)Y;auV!44D#UAkvyLP`?~Rb0G#d4 z^LenhC(rxAC4=Yfy|{f~KiHJ>m#Y2G_2ca|7q<)S1Bby0a26atj`yE2xlIpu^h|E= z+1&a$+>zNa2dDvTJA8o z5aD^}O76IvA5d+NF6Z-8Ho-w~9Gn4{&~ko1)qipo-=6SY+(w++2abSK;2hY#TB}>s z`t1*Lr@_&6JnxqC+o|>ea15LQ7r>6kdH<%IpHB5(07v9}bt>=J%-egwA#ehm1#4S) zf4P^q^$)q-ay~e>pTwO7 z8*=_BwSFHs0#1N);FO=QFEEW;Kc71&=Yvw~kAt&dP0kOc>J6|D9Fp@#srq1$uRjhh z$oZjEeP98v*Dm9BflG3JDAhiCIj>KHql7kz)d`_MY&yI?N*+T$@!qv`ZD0yDqiopliLq= z$@!t``@5Uhm%y2Oc;0^>cNFZ8^L%MFw;|_;QtL0u`Jt2}ay}^K6ga$&_veuFL8*EV z*s-41Cm-d`gOeM0-jn1Gfjx45D7F5)oDWJ_e~P!yKg}I_mOB9s$@!t``+J_(n_&AE zo{zu4odx^TJRf?AJ1FOWQtQuxwbyukQqBjZ>h(8x-UJur{7|aiE9Zk!4uf5C{wI}> zg45vQJAD1gce(RmUC#fc`p%HJ&k>^W)b8B*bCe@$Oy8H%Sxd3*w;dxJ6ZoAIy z1_$JPOR9fE&ab5G14j<#{do@I4uKPLz9rS4U(TI1B zlB&;wwT`?#4K9M6ay}*1UmmP?;r*FTZcWawq}sc{emUQg$~)!!O3GevSkAYk@@99w zz92X*=UY-;&Db`tt3!%lVR2 z-VF|bBj6O+bn*UEa(*S%e*x@}^Czji2OI#$z|MhueI7YqlIkx6PJnaZ64;RQEvf$W z6Z!fQay})MPs#a{lnY>ooc~DWO>ht#2WP+~a8}Nzr25au`ID4OU_;K2r1Cy+7@P!$ zeo2kUZvB-P#p_JdPr^8UkTahHPJZaE*4>fgKz@^Zc+l`m}I z`CyXU`!07{&flZjC*}M+$^orBo~d%O1-H?j+u`62^x*dPO>>teYQ90j>+FyMb&zFXC8z#3G90mtZ;`PN-xILq}U2=XFwLRg}c|HPm{fFn1 zKJK`jPet{Y1((3aB;Gy=HYf9Za2j_UoCRysdA$?t1&3$w`pgyF$)(&ya5~KMg{!#D zW!zb?e>u-b!D+DlI$m$y&K1MoS;O<%L)-~)d@auh6WrRv z+`h-T^^M%QC%F?(a~sccd%;O?crUNdeaoE(yXE-H^~XbwudJK`7r^#v@mH~izi&Cf zi6%Us0()wB-UrS!Vx?G+it>x**yX_XJk@u!u;9$sHMl{+?;+kXbPPmUL@`b)|2qLuR# zdADC>+5gg{TJl;$0~0=$n!yP z9GnGfa=c>IUl^QB@cz?a&m%nV0~glwdVg^7<&)ljr#q*!2a^+Y8+K_uL6NUb0%hR^)j<*!dIB z$A9KFf8ox^@t0NqncsNcE5~D2dCwm_Z~v3q^EY=E>}cAQozKlS+&QqT6VHdbatFF` z7rJwoz#+@`FT|FH!7*?goCW8>T7SNN9UMBA=Of_IK*)n*U~>?!_YUSxfP+JMJ_Jt6 z@gmjjod-vU^Y+>(ZWlNu$A47)6~Kfwe^70I0jxjC`zt-m?U3XDsrFuQ1e^w! zz%DsnpXx6Fj)QYxeG}il)MoD3%iQkQxc%T5I19GF&fACOczkO6Q{a*upHJn3Z}Ij{ zIUb(M`@nHIzMRUtw(<7*huk@EK#td@+6QvHUjK(2c?{l<3TBh z`<2HdQO?Topp=tx{3qo!I4;M3Qu!n}D#w3P`H&p{N!b{}*B_JPHK}|=j;ExY1KUUQ z_9Z!&olyh=CAZ4c-eLK^ov`uU6OpE}k!Z$(`N9o%)eG ze}FsKwBhfCqJMu2?qqjvZx8MgIM$QrT?Th@0Jm=-cgpg8E3rN1AfC^Fizo8D+s9p; z%ANCb+b!Q`66;Ie+VFXI-SODMouAY2`I5-{=5mLE+(oeWQl3wPo%47;F`wJ0Q)`7xkZT$K`xWqF(&dzHG?1 zBql6wMK*0>ugkTq>C#u#;5Ou9reS;9lD%&=SSM?J=g*w6VBW0SMSW{SwOZfWnd1NF zFS=xY?L4}iRy(clyqOpGoi|sjyQD-pyJ$|`<+M4oW>6Oas;-?jooeQaCZgZ}$3<+b zI8&6jR@0p2(?mr_ZMU!?i82z+duP*0aZLOyJ4X4(LEZUcEeY!GP z+?oxS#Usm9|Ht(Yp@dkYYG0JU15uaqX-1R1*3UnqwnMDjx_Asgs0|7_o%MH8DDWG7dBh(UQx6 zWOHmuNPrmiZ^A&;4wZFmb)@#Bb71>5leu z8RNsOMVc>Owm|(btINWRExgjgOBp_GHN)+z)W`Ui`^t2&4 zdO*re7QfQM@hUdGNJ)8(@FWTRT*^Hl<1S4`m4!c6To#-DQ~gjTZo_Gm2jMi9 zdyja?Ux@xq^PztS?qMF}K>zlBfj;00Cn1*MAATrG=pY;a{V%5RJ?*5|2EAyFz=xhz zC$j%*1nxEo7bi%=;tC=$;n41?zeO0mWcXX(K$!WEKKXCbb{g%PP3h>d9Q>y~oaS?U zx*u8qjnIYBLohfe2^i>ayo~iazlDVyhcunxVC`cZv>e$is!osy)ZcF9l5{KA(#7~vZTw0I8nY1(C>M3hAQ9?gPf7K>yzijOb`n28_hM#wMm}+=<&c|Vsjl-4n ze@Wxen9FelAH&tIS)zWm`OWIbrTfXxn~iSYK2PIA&E!|Bo`9#t;=FNHG<5&m#5;jc!mns3S}%s1(HmW%Id(}+-K|0=sp zU#J`YPociicpuA!LFQ{co8iV*`crx=eSmm}T>He|M$ra|Zj*o4n_rALo_YK`ocVF6 z^a-Hi3enfJUyZs3RX#i^0s!sY^@zx&`XTmvfOw5h`yuo@rE`ni1CpBqyFhj zG#su`FPdU_4sV~#d`7o3=ODM>ce{E~t-niuxSaR+R;ot=2kAQQVLdUMN#>X5ZoI#9 z7?6)Y-k3FkKaTggyI-o|{P>IoBW<)^YH^BRse6P zb36(W*Fn|eS~cFLnP~Q;nL2J^ko9*jFnL&K{m@gM5WGS!R=8F@s&p22Q0@YMQ>39V zz;M{hbZzsWrbqbR4mqHkN^!MjqN%d5OuabDD zSJ%^$_0~Mr=j*NzNH3vAdTHIw^mBViSPkR%#%)%vmp1N5KJ4yE1qxHqFxbMvrjqfbUSH>pL!|ss%RWLSo%RtXZzO+zsOVQn^zn%FW5Ln^jlUYw8 zx>Un?x#RYp8R-5N&7$;{$iX7xeErpG^yBg!^97o!T*L-_{z&vPvR&l%u3HVR%Pf9` zeB64jHW=8wC=YME)!;E7;}QhFK3;{0`g;|MiMT@a_sU24dXj#FD7GLc`7G)8G{!ZS z(Qmc!bi5|=e;L~uA^+!Fd5VeXR0nC@~P$V|}BU#tm_{MWs z+TkfgT-ZMU`|0CC{mbb!+aKhJO|B<=VquqC=r{5eWt|J>cvxfc)uwqG?{d=T;|;1R3D`<>4*e<3U8Irwg`bGYIX_0ob$JE7Uj zS?dO=4*BDf>GKlC$a?;GmFe?!<_B3n3i-0v!U8PxZ}7p}#dm!-3{cOmHluc_&(`&M zyn^1ON_SgkmMPDxO(aCrd*??|eIg7{Pe%M;Cf?=O(T`8Nw)u3%2YA8wz;9ebkoOCi z((S|7i;wwZY`jyxhhCP;;v4->@V)rx@a^LcJgoQy;r(|q_|4+|+D!b@$Gh>x<-nrg zafx~n=n!~tKB<%ulJH#hWF2@^{LytJ{y>f35BP3}AITp!PR1YEaRNmqfQQOb{2fhxjl&-+Sgx@`i|hD2U-{#EVtDyQ$ff2C zp0oHrF%zHQe>U+nlKCMIj{<8U78Q))HzPP7N)8i)Alh_4q z-*1GS-1MEI>HER#iN3FhT8{hHe}4y+{@Zo${r1cW1l;X#1>KF2{$%vT6C`gT>O_%b zpA_}XSla!ki2MFqzxvJY3IC(|a+pfIA^nd^PrV-L+dlW=x!n`E4~=%z!-kQKRqrb|HFL^M$pOpTv)FkuKjv-rud|z^@qB zc)FGEZ)5&z*nZZ|{+Nw__iLpd+t(>J*}mJ(9a>*}!~KNE_D!Hypi8msyA_)_0ij(i z1>({W;gC$=Z{IJdOudY7+(-Q`Sr7izW#i zDnD$V#QvdF6;(FluarlkxW+r_V;>9cRPMHP1jJ!{fG@`z_JiOWyVaBV*TTN8vHpkq z2yYlTpm0~(m$96r!!@{!6#FRC&ut+<(32N^*fL2z@ZTrClJaB2){(6@Yc)}|aF)W| z*v0UaE%f`0|7I8FbHCt*2Z?vX1I7uEUv2VsjfB^|;b&30-ZMNno-7~i0f;J2I+OS{ zP^%N?YQyJFWy5YKLw{E1`t!+be-;qlR$HGrT-D~8!}~>Yjrwd{lKrnT%VQk@h$~YK zZXdtN9KU4TNai}P2xuw&Z6$sR(Mz<#P^bPHmmXET`Ml`%c(>#Gc_gUEap<6dRAYgK+tH=En)^MxeS7{gc*PEgVO> z5TF)He8TTWC*IG#lh{tR>EPid@)P_8KMb(Fjn;nSX6C~@1_%>AUMmGGtiA-nr_N6a zUtXlyqAY*S+hf2;68LLrh90N9iutp4YP1yJL%1Mk0cRol3ddo<voapVFogVPFUAPxA3cTF>dsm$XBJh|h2FDeMUFs{3Rn zC&PfzkCjX4$Ikr}q6f6y8B%UbZa?!$ncZ9;aC!iLpP0kvnHhXGRGN^p zE~Z0Xcgj!fhfC9fUr2Ysm+zxXB!<|MSw!fLq zG2L5_eFZ)K04~UvroG z@hZ|C&ToA}_$R;5S ze*!xU@EmvSB3~H%5=A|y1;1E7{J2EE1>U^9Grx{`hUJ~Fv*{O0`UWZ6CjYKC-|-vv znS2@KI8|*P@%f?FvQJ8+<59Hz!(#J`wckatTLEO)3);kAAwKZq_yb;`io|n_UL*{- zW82SgJ6ACJOyq^pVap?3>Xip2AI4$V;wb8|eRI$~&y~)V zQkVyx(0UVjW_G1^u!Dqq zX8Ux_k8uJW7n{o(&hi`Pao<<(8TdZ&HfZ>>^U|)5_<8F1e9DvfY|7DIJNNB;;O9)U z?O+Eup$CJ%5dEpP2YLm^rR6_II83evgQvUwA1&VGnV*Mfw{tntPdJyOVepXV@1FX& z`p`Gv3K7F$ko4BStxe<8`AvgSJeBxL=wxbc-kh47H>Zw1Zx*5tYx}^*-MM-3#o2jr z1>0?y9+AvQVL3^kCEDA%RN6CD5OMYH^N(;mz=DVLTzT?TCh zKk>tPGmm0EEp0qUE5Guk`>*OAWj?*+k^f$V`VL%My}kPg^XXd)k6wX%UGlHn=2Ms7 z)#ith6otDs(ZA+I^)X&JuY`bja*qBCjd%H%$TM3HqdtsBxNe5#3;k{CLoZn(zY0-V z!|@4^Dn8>?kExGx;KO*`K@CH`edczlXCK3BBn?2+E^3p%0)JCceIF;t%Uu#vyU6M< zJ)!mVT{K7YZ7iLkedxL9G!6Hb<}ti_o`!quzD2mSQ^U23y43G4wQ0U^k)l}Urx^@q z)(?W0#2(;w>M%fhDMVYf9`sK@jDq16@M<};L&WPm|6Q6NcwfDjG>-`2;VX|;t3XIxrOcx@fnU%FDmJ+@BhE%jNLaQ2qos$t+g%b#Ijv&PSCA-y?$7os+F zGs@#53_h&+_@-(&#UJUVXOQzp zkKwrxU8oJ9A6dB(2DWJV?iQ{yk)D+su2*%p*gPWXaL^00aw7~<59@CKw$@i|dbQ>Y z1Dgy!;@!tL?%?=f{LU1bbUv;|XDPpno}zqOY`#Eb1NAWQ3FfID@e7g;!WoVczVjH4fJT^(Z>0qryZwYq#ip*S}V69DCcD!WIud8>gVH) zpZn^pH<2fG*6Z>lId^PwqGsdba-^?LIkd5K9^tD~4)vD041c7DxYViPTD?vE{?a1# zXU-*G%K268f4ukr{SzKmKh&Kc==;0|{J->l(%*5{cbv~%U-I)DpqDS7o5~}kpDOr5 zy~3ty-vg#=0|sx$WcFvk`qL3m_h`lUR}^_xB&23XGZX_u>R zPwqV%vqq{n-Kiny-yCP~E7!L_)%HDHob zH5mCm8rFS?iI)f>#D!^X>NiNX5$rDOC8n~RvU8Zh=DBoTJy{_JoEUevn}u2% zx}o-7tf)XAz;9)05z%sY*wQ79-Vv}dfxDJy0qT2s>@%@DB63rGlJr<(3XgvCb z^{Csaps!#(>gQ48RYR<&gZK;!SWnn${xteCmr@?PJSjF4kB}$sKqtN~Dn#GV_T8T3 z_v5-}*?y4GdsxVNx@YlTBI<`r);$Y}-|lHoYJKUthvBf7bnfR4(BDp!H#zTi+PFkL z{~W0w>2U}3|M&#z{m`%4BtEOhK)-5`45hv&6;F_p@2X+FPQ0h&<-{?|4W1!|iy6 zw=w0-zi9dVIE<7xX6Ls1XW{)_?DuYtC-@w41(D)q?(LGW5`J^L@a=6Ze;da;KMuhA zU1yJmcj`&5uesejUp}_=VZ4~~8TI{GWc$c*TuV3-eb?x#uF-l=3UuM}N7mg5U3|K0 zwEBLV_4)m=I_s;Q)W&$zoBHPo;fgb{DsXiv?s|bpaeZ~_85?Ji4-@@)hIB-n=#vUZ z?+nw2>JM9(^Jl1cYJKqx;SaK zfBCg>1~>Zi^x*s~b``B3^;Q;U$L|EQ!%+aDK43oOGvcqu_%IBbeBOIN@f`Nq`r!@4 zcOjxc7zu~T-3%R_wj_n4+H{=4;rm6Sjl&MZ2knNY7mt9;;IaLz%KaN z_i+l*4DBc6|HC;u&?BEq^+}{J%B_FDl}o=rSN`uZ>Ant6>_>Kf!S!Ll)hXjLV!!0% zyxzDR-|C6BjOXbsGNz{?NC2l(?z&#-IcCquF-gmuhOyWm+fsD z@B3EHFIhQ!Pzpf)zf1LjgW682*LAYJ!9#4{?n94O{)ak$`y|7&beT*kLrBZTG_hch=74{rKBm!Z&r=>5EM-v-UYZ`Mu3h|CH7jm$;7; z>i1cg`#7Oqw=nl{YGxnlxARQEGc?}XH9NHaVzVGmT)0WSlpk!KnYoM|aX#?-1*j*> zZwY^zURa3Ut8k3m&oO-MveEvU} zOMf_%?&pn@cNy$_MsX6)ANf2#SK0w5`hJq%!><-rODyF!?dU&L_{031e3NM-YY(2e zl=#NHg?=UH8*LvDbjW%B39Ua_Pf7l)J+^!4@bxU_;jBHjd#Rmolz2Fo%XxcjcbVrK zogbQKg5d{QUCAg zO7!91+a>T0Fzoi;TAAg$n`noqmCIURTx!#DedReC-dMUo>7%E7nudEzD=j?F^3Txx zJ!Qflmrm4hD0f==WDUb!u}EUFe04|=0DgR}#Dkx{^i_egCO^cX|H5{uJ*#g0DzP7h zDAs(yvs5knTUJiULCdXS*w5({qCJ`)c+c(z5C+rHHQbQJgwJTh# z)ry@c#rJ(55_l530=QlHaA&9DA--oG@l&U~ip>rl?7av-Ul#78oDV|>6yMo)H%kYz z^F@AN<{rUgymP&li|<*helWQo+rEpRw}?xdG#z-!@|EvT%xnSokS`^R&>7`pNZ{wX zD}_H|d-oCU9!i=~?~@Cf~YSQvO0kdHxG_uB5x2^1f>4 zd&7O33D?k->ic@v_bWkvC+*7ahw!GN-ybX+&Sj*&o@jHTtKVNcvT0f4pCPb;7l=^m@(T zQzu-#rPpe>vETYD%F9p=K806odvIAioFi~%`*j)0QQaJtZ-Ms5;aM{b-rmwB7G^(s zOBY(0{p~NUWOzUE;qxBGWxc>NbN49#OZFG$X+F^1H*b^i*}I74mS8j`+@hY|GaR-K zUGQ>EaJ`}si7`za_~mH!gTf&BQ|!%bZ|@=X6Z`SmY(LRA?3_maJk6cjs`SIsU2Ybl z;rlux_+!$K4y(L2dJNMyX-(l;(lz=a!=gMY-i?)MG8p1|jE@>iH5g$&k*rZ{`}N_P zHciOBmv=y7{5`&co_3J@E7%U+Ie`P*n9>W;KTtl5xh^vO+Vm;dQHQPP7w{bsj0;?% zA4tAo>-qVV`_<-(=4)K1UNS#~m4q*BrJt^ghBPDK6}%jd-Cnr0jq%dXKKX_A<)`fP za2$&=zxPU+u#I7)A9^i^Lbmp*j@E+s6lCQP^tqXoTNXHHb>H$i^ zexq;mE7!7tFu-`6Z$Q2c@~2Jw72=~G=&$^rtp5>)OV<5VQzFM#qBj|S3eiV&Tu}~^ zw-D{;IGY~j=fZ1ctycJweB4)tw9oEav*BLfx3=?`F3)<(-1kZ3S$T?5w$5<*Scv9p z^`S-q69!i6xOSI+NV=yn@7|Kmbe8{~JJ{JY-#qH+cC_VFCvfFH*j;1g5||4{!+>PMuwY&)Nn zcJkxy^DOF{EM-RS*RUeAGkksgh~n+p@2ACD561t*52o}1cz*OpX+M#kw{yd2>3B%i zxvyfs6MObEJ`cGg`DT`t_Lt%SDJ$Y1igOfZQ%QZO2_FuL3+%}3)JKLzrG)b z@*mZG<9vCWC)_UU=ON47FHGeL{Y$Py1YK=gABD%7pae zn4L?fT^4d@UQRB(IU|>zPjctH7vxTQe#-gv1-UbhUi!ba_7{iC3o-|809nd znCHLT>B`BSuYNY8?<_)ZWF9-+{FCVikpXa69}crm*!fu_RWrJx zX|`&_SI9aSS^WDJ#ink}SZvzFd3-VVWgxd-(;<_iIn;|PU0=j~m{&-+5Z$9W{GN4f zeud!$v(=9~2=4}|4=8`gd|6LEi42%;fr5B3;rDsg-}iGnroX@I`$&M})}I_c?Dw@X zE?K?7@9DTc(r@1(*?6)jIN}fw3uM3F+qU(#>l;@zou>8#yrTdt6q_#NJ4`E3n(K-i zUOpT@izVLiGZy^*PA&8-^c(qi$-2h(y{~90tKFv9#4k|9HXc4+7?%&{#swJnb}=p( z--51)u-@i)){1D2=Mi)qq<#2f!I{PLhvk7kwC}8&cliDs^8FyU zE_|irb9jNrrCRQ{`MNgk+0OlN3L{>F=zMpyQjgc;xrw8h0>^_SXLo z@O!`ZHw)jiTz_F9&BBLv{oVQ69Ri3re;*^kcfMUCaMZKp5ZdKFI`A$24L=1u+)qb+pUJg{6HQ~*C$?rdmlIjKLOj}U zSGwA!ct-t?*}(N-g=-VXf38%9`fNUG&^)T= zz7Aj=vX$vB=d<>S5B#~5t1%sPSw=lJ{@Hw=lYAQ_txf*9y`*aIX*`|w-ji#uXN>kf zI!b#*m2%{3DW-0^{B^sP%i*U3Pb}?VU00WS91o-Q|NW!DQ?>8NVBDeKq~p$gIp@p# zIRHP0e$@KH)^YR&vyYFfFWfXkaW=NTz=z-yeL?yKdM-|)JvX?%An|au1@vdjd#sSR zS2X{a`J0&^=Vy==eDk}N;qvu-iI@3SYjV2mpTj&G=b$#3Z>4daZcL{^Tw|(~14)b-5$G8gnr2FpuZ5r?A zBeUyt>_elyGrKTfcsVkgjWu={vs@$uCYtT#HEAsbc z9dBX4@X-$FB>$!ACHL?={oEGPk%vL|`S+K8|3FG7j!%DIr`k-tH|!(crrgYS`rb%? z_VKUOeGKovb2h+Fy3cgE zQlit3(}kG8Bani(TWRBZ3*(P*+{odID)V12A!*0%9s2%S+(G#K{!)H?aE@83629bn z3f1F}(|W-t(4P|dVD#brP2XL5{P0%Ehy3uv0NYF6UAjpk#X)X(KO}2ZvHKe67hJ-Z za;^%_@6|g#M&qyi`%5hcQhH4A;B+}(t5rOr1wR+>_<=kI{DtWM=y+!3?dMG1KKL&5 z&*YrhoqQgT{W}bH{xP0!^4`v=$M&vZIE`?`W%_WTfc@}1UakNa25J@Rb&i$or5ki2d3&(~Xyudt8sR-=zgD(bOuY1yY? z&sS`|Ps7D#zWZ8i-f#ResPSRY#!cYj`%bI}{kvCk#OrLHs%v(YUu#Y7{==ErzgnyP zO82k+iS-Vw*ZA~(r%e_nohIillk$pTz!?!()TP z!0!^GUmU(<{T=ppYP+D9=~s=0??2OiV!YuB(I?agAMMD^6F>fRDmQ&ypmg}2qmj=f z@Kip(Slby}-W}_EGgK7fapm176)xaI(0O=#dAD5i7bl)~l=AL1nm;~jc}GPPcpEP7 zWD`Z)^H1JU`T>6+DZuUWYUdxM^n-NFoNn*96P*!vEal_MapdC$NgrQ69#6Q&l#c~X zf9~GX`=;{qvyr=%QROc6&G1W*yY0i|Zl~h&*GcYP**BWph1HSz3;4w8zQ3gl(W=)B z*W)>l`gxzkK9ihdxl8GyXW-37A2xsP<-D1mx4Tl)$3AZdYuRwQ0!zHxM}6JgGjOfe zlb-u{E9*_(Ww_kJtS=Z`e(uBh-t~~|e!!;OyfZs9@39_4s% zmrjerc>m~1@bN*7PxV#KcSt|~HhkmW*L^S1a@qO$bcsp%=v!LPvpqkR((7MqeV`+l zUcw&ItKWC>b6=GOZ&0|h=k=wC=zUyp`2JvGpOrMoC*L=&ycQvb;b-6dK|PgV=|~g@ zc%Vl)AMh5Cl&dj4`HtgKh8^z8f+ZR;n!Pe_fBnnPr+S6I-{arcu1;K}>lL>e#Q)IU!rmU2Nxsr2n8J=*4j1Vml{UbynVn_Zl?Oq`*ZERRO$hJfqq^7`S}#=2f`s9E;(Ny zbAX1o(MS1zmVI`g-xB@hS8jY?p&beraeOiVQBH7O?cb59aT<|vf4PpU-^;3v14_@bTdua^3f@7R1q>zDTeDMtnlaekdh_>0Y-)^xN3(HPr1ng~l4wadeg7gLX@ zls~5V{ajRB;`|JI9URI5pZ-31A-W=o&#sgGd|9nUyP4WQ%V=%5-VVxkeZ%Pwe06RH z&#vDB-*Y6tU%zr+t z9PE>7XO`Gu_Z1W>$gS_PK54uvzZ{NA`xi7mUUk3HLsq{7oX?%Td5G;xqs}$jzI`91 zGJhB0m;Rtjz^B_`-uS_EoU`ylQWqk=%kJ+w6{7RCp2kA;(tUer)a!*`S%$xC2flw_ z8}CL*(@|xcr5mt5?p2ecPFL91hP+Mo@mHWWsn__%wbf17CsRaF-Jsp36(C*))*S{UCGBt4yHuhx9Q?v?m{n7r4g z@h+!aJ_eJw{@sc3_wC7#!M>-E*3bBn`}QW+$K1DnBk5@tuqF3BjXwPQY6(5=-=q2P zJu-;A%H|(2Eck=m-q9^I!3H4(n@I4doPa)#|AHJW2 zdVybnl*~IeUf%z7AOBm2QIFT>=^K|wq`2%mz6$>($j$ufOK{luU!=+My(F8rg$VNd z^`~a>rAQp2pwjx|{fudR<<<_y1sCW_dr)k=^I|grsi}u-fF`h<&cTMi=`iFVpX% z*!m2`XmfNWkI1>BePk)Z`{tgM!bw6(p@dUAbul#ep z#q9~T`P2(jI6$qU-_Pgzve-oZAMnhS_}rv>B z$-bN6-}QWdPZE03{@;;u*J`(9z51Bqq1yELVT@D0zA#{PYwzu&e5at=%K_gg=lY<_ z(U9ENV>@{|C`3KlZqESm7zPh&IQ{Mm^{BA-35_4iPV92s`%{PpH9z!;2XpJ*x5<{8 z>y=1*%*TS|X+^LdmPi+6E6R_#!~*vxXk=lPxB zPCDp$v*oAbyLfz4RF<%u|IrM^$wB>K@Bg@63H%PuDNcSn+beC+cKn_a>U-r*q0@pE zkHY&ULY%*Yn(FoUY7fv4%p3lWWVoYE^XK*Y%F=@xU!3?j^?19Fg!0tmH!B>e9{)|v zAFtY^@#CKtIfw8FUWV!MOSz87>c5cx&z=5$rNR%o^3g;4m2zGChyDZbtls|Jd(v@5 zI`n{o{0E18a7SzRxRcp+|9EDOelW9V>1T$&{zb`O4`=2gWBU9rOTv)l-xX zJYRnsqUAd=lrvemcei9jySFdKJao6>r`Ytn%s)W7_jB-&-(^XGeu0LP_r2_$u-NIh zuEq7-3F?nKSP$B{;})Fv@OBhj=_jx{Tppub*53G@VdZ)?fAT)q4WvuxgBZ`)zDrv? z{=J$Ho>s{Fj}4P z+Ts16Iu!LSV}6(0zHakzP4)w%A8??fVdG@`0ZyMzKb75otn^ffg!#lJ@7dXU&h_tf zopwLz&+H8&*J(I4Nqbap#@+<_!A#`xB`bfk_Nqce{V~}0iPCk%dTkeS(;d1x^1R$y z&6iy#9QS4Pi{<6Lm=Wo-C4Ga`)h7Q&tRwbOUWY;Qce;+?_>cXa#mjX(kJ)z?cT?Ub z`uiyi7oz3bpX5Bd%byzzZadfJaw60jpR9{Tron~%?4R3JlXgsg7otVlj`OSY9qi!e zNqMKM%2{@vT?Pj`P8%1ZcFb@0svqp#*C}RCh!+!&jdo3r^8Ap#J8tLMMNXq$$RSjn z$W!SzoZp{G*TIh;?vi-qhaU#n-bQQR-yej2ul<;4^6^#RMkyy@^(CnM>-_&4ZfrPJ}BEy@H5K$(y9*(XfvvR z92A0pJllD5R-Yij`#gsJtd}C8eviTxm-tRes1I29D}=Ys_tnz(r$1%skEvg)zh8ZS zZ`$v<&y``th(IseBk-JK;dtkXPXbcs_tDOgYJUciOUlbQ9FX$KdiEN|JDsARe|h<6 zc<038^qA;nhJWWbr<0UE0ky#A=Xj?&ov~e~L#L~PYEQ5me31AWv~e~&PkPR2hvz1c9(KEA#q?#L zCzJCh3v^g?o%!(_`CCX??b-;r9_w=c>8yvN_ow3u`EZ=ncP!S!({uFGF7;;VXNk5$ zx%FJEhd(7#>vORl{`B_j{3Qt0egdmUUJrkCSh-%!|H67$(U9IhzKLPik6m9p7VF`E zdoZQXU*>ukOVnqw9{%?%{9rb5dHw&_X+69#XK$-Xe_g&{{&M$=UJuXuBIL^0>)~aR z{=$0r*sq7dC%=gGFzUrTh?U2$(t7v}8UFWm#*`P9s20-d9X8=zBijbVdQ%Fi3ig1zW?s~m&yL{ zZa^jL{^q0HA3oOaEKbaQzrepoDDNY~g|)f;;mJlvW9|>*gplCb?5FXnEozs*y9{v9 zC;lwAp6QTy*Efpto{peW)=fO;8R}Mkl7-Fw?si$&hp_U&`De82`vtMR+m88%`%heh zAIo*w2gm70yTWst{DqwHepuM--y!+VgBY^JrTdj%LwBOSxn+3izNP2i)~4~n-oMJ) zBOf*U-&1lAA=Dow{+oVV^T#{Qo;dWR#)tc!P=B=Y?-ANe%U{U$?3_p#ILPvYET2EG z5ueTe_;=xbpQaEU)Os)uLh>?R-(Wqt{eX1b2!G#6iVo2uHfjENG4bi|`uIL9=;(}F zGIo$1gu}ldk>dA0tq|=9y3+4$Y;Plc0vvYEkPdpRES19^;))ahnCAiv??~sl0NRDq za{-^#{PC*w3h(&m0)C|VVILZPF5qJtpVh+w&vSP!V6DPG(hl+t!Ylok|A(Cmc>H}S zKl?eLbf1TIW2Xl{e;Ecyk0bZ(KA`QyH>^}Ruz!aB`#rCCC+VR$c~=4t^iF?g0r(UV zCv?nwum>R=ui|)BTHdMUT+Sr>OSWHMtMgkm^4%kCzgGXb`f+KIg@0z@P76O};dvJ3 z_j3H5vRa+*2*`IuEd3{zf3k%SS@=Ztu|9_boq=EC8&+$)zi;N}lzp8Id5He`e!5IS zIzE;87$u3E zomZT{Q4d@-dX4&Ffb^B^Pk!J1`AFz(>^j=$f%lJqP60cs(a=N$kavb=ofWkmW4NgsQCzgF{yLCVu~ zzngd%d%t^$;`3PTcl&+wHPR=2r%n2Yp^r~6_{@Hh$~(p<``R)m!-b*E?9W!tPl?<~ zzH9U!x)14m)ViJJz`tUCRk}sHm9BJO8+y*hSIHl7K5murqZ;=2Wz+p_;xVZ|Ll2;! z^xowU6i%G;M*WZn3&l_5D}WH^cAIp6``d?+&-43!Zt`B+HZ8_?VvtYh6zx_P;V;AL zOAz#%*zaYJ70&7T+?MNxg>gP7zrUUR&O*AcOS%DF-zzo5>o`uM=|NRp$3CifNcVMl z|46=j&v8ulbsx4c$2GQlNXdR;=|N48JJ^0GEBJ!X`!zn_e)e4;d`}8|^jBN4j&*oQ zP}y@S>!r$2|2o?#?It{RzS9wxxL+IU-?enUdmQTjY~dmGYxS?G-(T9rczeHmyz|Jx z-cK5BUl)35_I;pT2=8P68;M-oqV1udQ+u$E^?v&K;#0Li9#6=B&Mx%Z^AoXtf9{#< ztM5{HdIn5S9klUI&LhuIc*5R;#M2o2yMF$l5N*|bS$z)sysi%wqTjRjc2L-js4v? zjr|NmGc5lZ+W-FTr>P%?+AO|e@$D8r&*EQU@v|(x)8cu~tbhA##@81yep)Brzmq<-1J-%GqLHf#^{bT!msXm7E^||yd z*>oA#{CR;d%MuB6CS-UxIe&SMb|dojTw*7=iQ^I8*_QMZKQJGHUR;m6Le7`NxjvT6 zZ+B>P^tPpz&S3OOfeUVp(Oq@IN)3;w*WSBL>NUNxRpth`x7|5jy__%lwvlc&NP8Gj z*PkDfc?jW$B>pvsUoL;dJyfu8onegMlY%ect>;U^pW8@}T?`ZWM1pW;_sP_$0 zZ?Nwr_`Ns8zp){3v72hIB1A$LGk$*w(*Nlzqo+qZzSM>E?`6^ve)7wbe==+Fbz&HJ zLg@|ZuN$YmzsuB@)knR(zsaPdzVD(vgFkzI`g0GZaOinO0V&H5Q8cT|-`7MvHzWU6 z!tM7%5dV<02mV77@p0^!^O2!HYMB1#beY^=T59yYyiF_idYrB+R}gTgKc~;iY>Ov2 zqKDqE_^Z6d;^~Qg@u0>x`k3*d9#IIK`YF$mgZ-vp_ zB%|Yg#ykB!Wbu9v_QEYpFGiG;Zol*Vt){p7K5M8G-d0=p`1iIwy_I4xYQ5OtS#EqK z{L(2={(kmD6OFHqz(>P!A8%nhelFAJF^v0@Qa+*I!6%7lq9^kAk$pV< zd$kC|`MoCp9>$bqjCVafJ6}V8^7~u9?nV2%8d8tzC3cV2&%IzBkEZlph%G67R<0ue z4hPZ^^R|T+FJU;GbA|Ks^scAR7D06QMzIeFe&Mc=d;;EKvCp=DCy1Z_u3@ly;IW3T1vBanR9Pxva= zkUkQ*Ed2t7hdmDy4?~n=et*E<*K&OJwsSob?A~eIPR0lWcJH*^^?TF*+tIqv@l^GM zj=4_5c%Y=OBOefUz&IxM)!g{Gyz=o&?pI7@KV9DTPPO)Wy}d7Kzc4>;&gpwghcf!y zLgt5k5jFUH<#Hx@pTXpe&x_z|;IGmv4Olt#B@%q*{Fc}sdo)|}9+uOsxfxhA|+GZZJ> zrr*!1PGrMJvJbBROa`CEUq1g;C;SHC+-7hZJ@lQ)u%Ba0?&HWH0Heu$91My$&@J$q z*jKEc$~h0|IFChFh({Hb+8;8X*C+A7H17Ez%Fjo;eGz<&c-L!?PvP2*zbpse_21;) z*QJ&&@WLf@ke{EtKei6=?;s`j%?HT0{$69H{R0ZW&;N-&YWcF`leb5olpP<#TkA5y z7xt6ys}trbKBm~b*0+i2v!hw0Bf$rH=JWK`B-3b(_)15Vc30aMj7b&wiS`JJ;UpNxrw$&U{W^Cf0V2E`$`gq!+Zrg3x?Mbc4(7Vm8E{~>q_>=;@3zgVQ-ChvR{Sha!n6|98br) z%RR^$h?dG(m$4qh*T%UDP9pa!<&_%Y?+3Wv6)z?}T~E#91^Nx}Qiv|m`m1u<7vliB zTqu544x<5=uc>|X0=|PGKmuQ$zR~n?zh^T^@TdLA>19s;2|f#`Mfk?;ql2Wsb6dEN zKbl=M`<;bKnFNsUZ`x0OFUy18s)uvYBndpi7ZC&H@L;f8;f3Kjoh4mJqAJ9G`I!ZCb5?~(z1G=@$FEAkk!924N!3=VzCnRr}tQuRpW(Y5TsebGgsYkwtFTt+b!7^`5(w0Q&yn zX!n}W%F-{3MXhhp;>kX8^|)7SKb)SYSbE)g{VQ??1hKobThzY+*sngMiEB2-YR9pY=oyb*}&BqIbAGvAc!m zE|C6?vp)MRr2pw{f3nrjopiSh{|+(a@%LMYx{z;4;k@^_eWtse`>j}K!=XNbZ#8}& z>VEj9qk->lr|nZNxn}du(?^5vZI8c9P8JG29=%uk`=ri)qrta%$7>fL-;e+8zxRxU z57J<`9dnk1-5%-h+5&&seVSx^S8DO(`&G{0{~O=4x%AB8^VVsC8lOkf`+ILygcT>h zO?^LC?CUVld#9unCts!Xn!E>sNl+Z>t8Nv(5_tkQ_IoxTm70Vv~g6y)N z{})I3p3R?W{-b}-hKecpxGE(9mxy9HFZX$^2mIj<@z344HLf4P$3A)rSNS~(j>`?e zeTl>X|C42H^0)AeP^)@PCllE&e! zOx>yZg6$jo_a|}AqKx`F)@#4O59mN#+QEM2-pPTU0EhOZsEh;W&os>Q4c@Mww?R7W zyoHG0&2~L0u|r8^Xg9GttU!oi_>E2YOPWEu6#y#E=cz;=lW2@z@-8T|MOC*uEYo7D z{{fFK4=U}yqX~ZQt?^Fg^YkOx8zy`@rB6S<*Lp3>S0}DjJcWH*6rWSBV|?FQ`m-lr zNvT*PG{NkRW6^p< z@sam4wG$mY+$HTIKYXXBPt}hz~6}}YjPr&vT`SXe(u^& zrE(|f{{oh8mAM2C_(O~7_fCly?4NW=d?8Bh$r4P~qyGMezuywudmH}FMo8Y1K`G%M zE|W6vb8w;T`%TGyhM&i?{i^K#58fBSJpYgH$leDdzQ%fQ&Cf5Q+~dQ_QQyOQzD?ls ze98Hw>0Boy@cBIL?~6=#_|ZG*pUWo!C##qHc`L6c;h*Fj&%Zz8cWYiF3d!^7p57l0 z!!Ni5c%?oBI$w|Qufe-D|074?N8+7&j?+E*3(9nQ`qg>2W{JwXWA|>&BCSu??>W03 zR3O*ev+E7)oTlsDS109n<@PN3&~4){FU-1Cw(2}c1-Mx?2>fkhn|qMH=ptpdQ|qjhxaF5 zxAG~iw__3g(v|cVuU6mpPk@Ks8>Rgd)@yvYP9ntl_lTjtVLnBicsKgKY9YEz0Z-&} zk>#Lgz_~sq_t;qPMQx0i`EwuA(HU`-X}_g@vFTi0w~Ic={B^<;uj^v?V$wmD&Ne@V zcSYMY9rdAJ^k?~-1^&IeSZ;6!{rVR5<@>4X_tf7(e_%8HebLVVNVPCgM`VKxZkvQJ zEF)I%WBn-T>Q2Jz4*gW)J0h!hGX2FT(qA=0{Vcp|ega8Lay4fmF)chu_NxAb}HXWPd)=xqBF1s=3N%i4dd zw$F0arc)JAj3bst-v6+d{gUrsYW-RL9(2;^)MC+PZS><+HlF*sG=CT%{^ec|!*!;| zcD^~DWqLw=lcvYhSWi~op`FVFUvL`){x6Iheds6mSvh87fvg5H>+H-zF z|Fh?xWL>2ADK@u|pM#=m!g{0ER;dQ$l(i~lachu~4mW$}V> z$&SNx9fuhTZ(Mqh>VwiR3*TvB;y*5ZNW(pK;=i}FSHszMKb9K@j7xSLjz@a~Z>|L$p zAg4YryA(b3%^FV6jd9}b8QMj-?p?2bzJAf~wOklvzfj*RW$u^tpvG65$navX&~RLO ztIB1uvsw6Z3vaRTn;ACw;`>lN19xb87^Iv@&!4@U<&yJf+bs+&3ggUqAG7l`E|^c&y&idJD6Cc@IXzJ@pT2JH4e%7AE{&Z;Hp82}hm%j!Rs}hXM9G?l_O_ z53=9!V(NSToqExe6z}okCpiumQ4Z$ipWO2xew=>oz0%?Y?uWa6utE4${!hMF*tcB4 zOZLyMUd`|t@%G-=oZOc!L^o;z?2*DU`VP3T)nW7#t`Kn_DoY>WJJ7>BzlZZP2LE8B z{mte((ef?o1RatOQvA^%7++;{b}#wH;w z^VJ!~_$vPnSXh8@JhcO##$U4D(s2hK1pLF<_L1N9)J8Asal0q-R|PL4@vU2Y!_J+0 zySKe_wEVId6bJdVMf^}FzFygbSn5F^`6A*C4?wEL*S{ciDCOWBKW;|^d>;`x?emVO zBYqO*QNa&fT%sJ4@g)7*Jo>gWGsGt_-OCTxe}A+L^y0}phvP2F#dx(avbbL9KV0bHk3_DV%y1#P zI-!5&l5Drz4`tS?y%4L zWq6KP+q$;P`qOFsX&a7bIloIdjV}BGZo!Fue$tp`QdyFegUN??<5$1 zCHp(3Z-;)ZPQDk>sXwxELf2i!Kj4R5GG#UHWq$Cr>d`$6`?+)E`|$UN%Nxo&y)3!|20vfv+zUD3f}Kb$sl{pbfQQ2sbS$BRuL+RJ%b)`QIN{9QCYm`%JmU&p3r6r$H_dp!fk znqQY%y?ZGqN9c(k)c8~m++kst1Mjx5@vGVEN5kVkA3S`x5Iv&(&hkm#9-wq=d;_|C zP#^@Ig&bWj%-Inh5qi$9SNe1LRWEppc315#UhnTpK3_*R<}lsorzvKS16|7hQU0D? z@6Jx`&l*dwHp#|-xa@p3`S-GV7Uf%Z9>cug_OEO_<~Nty>3m^$cY8spP1|?)dP_4j zEc-^}v%xLuXUAhm);GZCM??idxzXka=fAP{A8yxn0PmrUeusLe4RE}`$4EbzOaEy- zP51pHx1X+&@diOvCxFWcA23?bubvH;Uyn=ue*Z)J6R^MYxYA>>>7`n}X73*Kmu}Md zzWPD+H|E~^>9_lm{iP3UdZ_=8`hKoEF5PAE_8qsw)bB0v zJhh*v@_n_LEwEa$pMolphn#q_bnxcB6aFH#Yg2K^rmq{84yqBlF}thpD|EN~r^4ai z)$sk5?iSjQK?hpS&XI({yET7zJMSf>a++bM8-M4(?X8WqtS7M_8~+q1Qekzy8g`OS z$%=PJ&~#$YokRX8*eRa)?H=YLBx`?OZ{oj>m*_Jf$0bq$ z((QYU-3x6zqtUo1`~KWAJ3r}g8hj(ZKevSEC#w@=auye_qn?nJN4o@pz~j5$f%$uv z;wxRpp3Z*Nov*eKpZ47>hd-D;JzBkAnmJm%bG6=DZHM~BCc@|Eq?_qao&qfk6me~3Mt@D)in@j4s-BIS0x?g^%!BnTDf`!m7xd%xEcmXj{xb?kRo zPWp-0kuL+mk#{qQubE4z=YZ}BZ&~X{7@Sv#{y^Imdl1_<{tkPse)~RY{07q5Sl?YN zM0XyedH`iA-(8LrbT$%x@bR^GSGuQtpKu!;guTRT_q6Y5Jo*V2_S*RD<2ZCL|CXj# zn~49>=wpRUfGOQgQ3xPkznDA6cd^tz8o!?@@uSV(Z_3DJm-nFzC}{_R+(_s3jqG3Y z-pT0s!uB>z$m;#zAHSF7^4{<50N&2c!{<#=!&3Xk?Q946-XkoK-=_p#p&t~YHz<65 zpEZ4_=EoYJzEeYalzuOjXoB{N2ja$v@B!F^=dLL|5hZ)tbL}{DnL(dzuC{U6xd`zq(25Pv3c5 zr5XJF4A}MbJMJQ0v-=Wg2d+5rBK0v}ROMIF?$RY1pPk21am*(vG%op$6OZN44(omU%|edOQq<;{O7{{1`*^ahOIvpH8+{UgYi^;Yj%^)X&h z3BU)}O8N%*-6sF8H{Zui>@k`@eUHxQqh+7uOvg9AbPt6~36V{yW{8uoh- zVF##UxV!=1Iv=5(vqT=%l6Fqh{I&YuD_;9cH>lrN|2y?JmNu(js~@uT`^|TK+~{b8 zK2AD|OPt^067}(5`hQ%yTkDBSH>%%L{|EJZOR@T4tAZu#FSb9kAb_o=^xrO@S>$rr+B;7e23ax0oKdRS+!B=Yj4PzVimS zHGZqXHMmRr**%r-{Xq_4oxxmbd&OqzDPBG~e`)LFLiBOsYhV-oa+g+7sew8%ZwOf`mvZNE{aE^>lfw;AD3tLU8J$+iEq8@*JGa8 z_1PoO6Q7jy|Bds+J49rUoF`_HkB{Lzu}LQ1be`Zm^n9NuTuwiC^9ApnLcWeZUvM8g z{DS9;qn;;fHvias5u4rJ?NW)o-|Sa$3-xxl)A)L6F|8>!EP zi)T}SFDG53b}iCRV%IuP!4W$a@ilV^^MigsLVk`3VcM}iq;QY?{;Kt3 zAKM*&T}c8(y;#?zdMC6{%==~+OYBeoN_^S)rS_*EYJ5WP_8r)e*q`is(WBU(d|a=a zo*mcJ{`96CzpfX!M&s9GV}I&5de}n03@nVaQ~7rT1J&m!yp#FDZS-fBDIe4EqdW?` zO@1se`O)%IwzHe#9Tw2k-lf|YBSf9PW69!0sdAKlcRl(^e>X94lri-{~ntCt3eLqxn;N?#EbuobOEg{@k;*2fN($ z{m77!J2F&}>wAUhaXo*L*sso4@S|j(kz^Cqb~FA8k+&#cY@+@b-j5LN5U)H75}yus z4Poa*uKGCTjx8>@cdldDezWe0df}dkUJ;&$j z!4SIKhkXw{IT|>BTjYc5r>J*ruHKhF!+O6e{l8Pn!uh*;slJU_TH^0gFM?hRP8+e$ zo9ydyeG(`7^oz7$!{!s%FV|an;5pH!lYQa>^WUxBDEc?FY@wrDD0jgBn30_BydPoL zdSpxV=wNgdxQ!PbXUdYEylv|~PT0YdT)bNq?YOnXO zY@Lp*7u+lMhusgVpY9K{-jRAW6H8JI=IeBS_(Z~$><>@2u4XW-e^L&gEW zLRve&ru)MVAMus$4;#FDiPw?)!-l_o9ET(A4?BIlr6o%rseb;E9N)iI;2e$b$I{Om zpZ(%lzRs5Y6OMBkqdHRWmUF@!mvQpBog}?4x`^fM-gw;6X<^sH<}pllK+YqyY1sAm z{!*)kv-%hClD(JIYW#V}wXoN%*ZkF{CEAZr@6oV-Pc78@EWA?vP_J2dwS|L)*D}19 zav`v~C@dx1VHxQ^oHs+;iC1wv<5h=Pelf>i?td`cK|1mKv2lrfm7KpwzGL$k+qLgj z`8(lqPlWZ99=7x@3m;(E?`f2cFYUX9$#+TY{hVOqE9c%>t~`(M`@1;hHVbdDbnBn( zgN1UJrSGxyMHar>!kBGEF4(#T;|Pq!wm*h2oPQs{&$ngA_mJo#+3)t~c-wsJ=chsE zm#LmjJf`w^iq?;IsAsnjF8i)$T%um=`)YB=6s8YuQa@fiiDCQRS-k47@<2*J+H7*!{~=Q^uyOTfFDleq~^<>M|jPrF}^G>=W<7+|38wx zLCUttKOs5wef+{c;;$;2Gh9L^cJI~i;}o0hoPy94KtcPLiXWd%zH@kd{V4Y<6b}FH zZhl`lT!hv#^B9+50>vJU&*JA^@F>TvgX0$lD7St7>lyg6ruTN7LEr8v%e?@Wv+s4veG9ASMD@cU=_0oI zMegY^eN{w%962TTEfjA)44Cwp|Z+DyaP(gDN@*8#uvwze{;i)9`nYF3`%}06m#(xh z>z8{G7H0c#X|siA5MQM&43C2s&@=eI*1uK(D%yF{5IQxSg-JUS zhvZ#RsShsfCVvO}o=o>tdk;(MKOprNo4KBM`IP5#m%r7f*;-#gUr~^Hw{l$Moe9FR zb)KfneNPLwS(xLIJbGM|$3o(>Hp z@0sfvuha6y<5!KNXHWqI$&r1}jehP-}Jh}M!`x5)%9F5INvuz4T)0_OLxUWGY@`by2txmM*J zAGqwi^2LnZtq@(H^^eda2H6hw7gZmTv4zX(6R&ysX!9-=E7vCq(QCB+XRAjPqO%Dn z)!%gA>tw=JoOtwhiN~)S4el581z<9H?@#o!BcF@qI&P$0Vr+fkhFg9;^o5GZx=J4x!em@$x>7+BrtdGq-87 zu1~*S%jyqYmp||96K3~I&VM~!JH>q3wcW0j*eT}A1THR&Wv7@wo}FSmJ)%lI!t{dp z6synf%f$9Bxu27c&HnE9fMQ;+9j14vUE(P8gvE07s3);IHQByMUQg&cMtZ_I)DzZ_ zuF`WE*d-AB&SHFUd=?p=xt!^)-xMcK)dc7l6}aL z=f6Z*6Bq2>Veof#Nd{5K@3%;EqWZ~x%W^GOJpMe|K_niSs%`UqmL-~g^!ATcnm@6B zcst7$X?#{sMf=a)KFh;8J|p!R-ZvY~{;~e%RG;Y?_&&#XkakZ$AM58}kADB8OnjQ2 zpYER=NZ`{9@(1`CxqtGY=JWjutP7^gybx@^z`qNcO+Q=GN9ZdyE@2<>QnmMuYd7uF z=8Mhb-(s_!Gm{` zwTFT)(Eq=9vG9Z4gT{LX+?S>OI}Fl(9R_UNgm>VQ_d~8TIk|~^8=t`UHRE-BM^oNg zd=mMy{P-yNk!VtW+$tg=)F0Jx@%<*>uK|3YykVsM)#YxoKVp1ZhUdIpW| z?3}jqcl-u}_k7Bq-m^JB^v(ga0$v)Yc8qrw6 zc<}ATXHQ~4{=!Z;TYU6S&~X^=fdLY~r|q*p*oi6HJ4ODTPd}eoobXA29XUOL ze?bSe(-0d)S2PdlONh7^4p(fxMQ_9un;+EpLbPAY<6UC3^MK}(e3ReDdhcQc_O}t? z^Jlz@ax(jUZ9&W7+TF}wocsa(;q_G~zgycyd4N_~c(c}DyOZykY>@WBNwquV5fyP8 zBp+DuLI^K$@hZyO+MPV7jQpVU+TD;cl5eegQEe9)c!LDBzB|cS$glN@4_Dho`anLd z=MEY;E+qR(`ZUd#)blx%{YP+bvB=y|YqVX3_QZK@HC=ZJSxeyt|#WxUd zYor~GPx^Pi${GJ2aj^5%7p}B&JBKvAcIS&UBI*Af(27z#)$U}&N%}4Vdf{r#Chv7R z9!U>LzW0eBK|Lm)YInA3I7z2g>Vfi@_Y5g3yeEqdMm5gK`e~*=;9#OknjO*g!ox2!+H?&KHw^$g|f$(Mvw`sV4 zCq_lS&vV1i)DJ_%d+$!$=N#fVZIJGuMA*;q+#qO2_76-RR(5P_S z%rNlVCO^S%@UJ)D`7Z1uUi;S_BtG`BzwtWi(;K7=%7HK7UvK_}*3bAVzMIpt?Isp!c#1KU&G0K zyG(-$AK->OhjTq(lJrOAb(coVE3==_AJypZwV%-Q(7#IW-)X$x%kg`lwQVLpB_HUa zw#DS9{8oDC)M8P5)s+fw?dF{Pe5c9JH!`1puRLD-AoJUKc8ssqf3xwez^(OL|7*7h zp@~C2&|7UA3E;wBjgI`g*h5nbXvQ`?^^)uDywt%naR#eWypjb!qhlt zGI<$2IthVb+UQII1f!PWaR{PQGYpXo(I&$nu-WZ}1Rsc66CM)LwmCCNNNl;KtrhB{ zhu+%<)@tcPn|iBKtu5MKo3^(V>mzr4>sxE@ea@LlCJm8knV&HK`QK};fBoxU|N0+0 zK1@1aFYuy;Tg<=oRoYxCpM1Xb?|RC2K(yU>^l`Q?-K6s7CYJO2Lz@I%m3#JG!Aq}T zda{Q7Fl75Mz>D;v{D!@ae>bwB z-fk%@4)Xc}@zGCO-+dY_a&Ncw1IKZ`KbJ|Cs=yC-i}*^NIFB#g3{23JH+?*lpt@vCIUBIpP1ZE9_&`UAC^gckkC)b#NH^)JIfc=nMWBqX+{ddqs;*~C|D7WMLh4A9y52+Un-HTL=lgG5)P;bF;-HP~ujz#c-{CQQ3 z=XAbaXi(~q0U{1`#E>kl z;<=j{5|CwVj=nrE7n`0wxLb>fJR<)jD|$5|9wq;5Tk(1e8y~K?(8BERffd~rCVvd9 z_$dps{|8oJ)JZ>@yc<}-3CQL5zzPn?c$9oKu;S&Ke_-b_E4NqsBUw?@aB>-Y(Bc{j;ry^8)0-nUZg~XRhYIU;m2SVfO{q z#f+$)P2bkz4x5 z2F8M!q!Z+npd$5V_Edo2?}){{M&J4L`-~ssUQiG9(eImS@2+aB#FwrA=FsoEjQkjB zYpCD&vxf!dhjs75UdDRezKeSbaQ-ey-mdqQ&=GbYe4{S2ub-{gzAARO~%Ye&~%KLuM zcjY%UUsT|GXJNqgNNV@&L{1s+R*&12lhuSP4ET-%>`FU+iLwFtL63^aRJVs!FF~yE zgXv@bo_NaNm(yMlz7Oyw+es%0c(E~=bQcm1|JqU;*t#-ef})u8OhlTX06{i`#+#e518?0g#^Bp*ey=BNZH&-$PWF1c6B zexFCY0Vnhg-^U1pTNIwCaD&d1-d?f!z+ z13I}w{Eu%R@B)P&eCoZVaCQ8QLKFt~k}pP{)O>@Zf2{uSZD%Tf`u!sL4jae$-P6>U z?=xz?fwnmo=6D}yJ4?gKgGKe_yFr>S`>r7QH!2J(Ig&M&zTD-~4$U8Xe-2r{IzOlL z*^9^S|KXz6>;0RTXD*NTbO`-ZdHe~&aU0>1cQ~|t*At_r6-p=cCp0PNQ^-L8Q0gpc zy4Vj{&WMcsUgPe8$`N z`7%DS{lfz&1v<0c>a`Xgwf@~t{LtPrqOym<)0zQr{b?OsSQ$B7(Jbn}F30NmB$E$+ z4K~+tSR)^9Eh1X?^J?V7t}B(FqM27rQ~7Xnt$Y}|*47nGw8Xf5@WgWAdXP)otCb5E zYkLiHfogV8rFyOQ%!dU5$TO`+N} zzpeGlJ2DEF^NZ8j-|a*HW4;K3U)TJ8f5+{xpa&{=RHhd|_~>+lcZ0zzVK}$900y8| z!h|3W<ukSavyo2$BZ=}C?)}a6@GUNw9-F_eAH%a)K zt9}@ir-3?5S6}`p{=5GKL80rY`eLt1<&dV|_c@JUJC_q<+Rp-iZyo%5^6;Oh#e(*T zzQa`xKU9l{r4L@oeEyxSmY&*j^?diJkWTx@`Onq|EYa z1d-Ji_IHBf5zb5Pi;NCJZ#axc^l#}p{ACz^`n|FB2#K6rL%q(=3qp>axkKdQ9>U}A zl7SCv^y8K2^{k!r`yQ>N1M=7Cr7P|G9`-H-(rfh6mA1}gwSk|BAcGC$;;tn zW3Q%{*8iHO$D<5K%@5a{*OKojX}+OOlgrJAnSVd?yIdaEE->x)wBwLo1{@Ls;HCBZ zw4UcmF3Wcd6|YA5OnoT~`jozaS6CJAnQMOk4&eO?y~xRa{y;x~zOJ{_r{m1ce#f!`YZAGHs&E0=9`~Xx>SoA4BACD z!8zSAez+bWkr49JYu=)~& z982YPk0u9I3>tB3TX(7tIk{|Kt)B4SeVLvh?^7wh*?2R)Vsl!D^3?>!+xJD3$-Q&Z zZ;bKQHUYjWn|9}LHS&{oyPQ*6pqgQ9{1^$H(4pXC@K3y-@+Dbi`mn?+ANhCaM~Qc` ziu2OIA{&2aDaz@50lI+BJ|Fn|N7y%l{T1`?K`G+uTq4T zir(8L_29gMo?A3}$A`#|(X8()V5)%+JI9B6kYRbI>ArSZw#wCm-|Jq3D> zSi1t0ulN7rcF>XbHu?UxmiP5!7*G!7=S5ibeO~o>APl%6)N%pqZ(l**Bzh_)4Z9FgbJr__Fzavr-!Sdyp^rc^`7d^{je)0RIu0 z!y4raCo-28aR^RItbGif8=X77X=qw zJ!Uh)slA`)z~ai?#t&w%uH0i`v&W|V@{H2g_XmdCI8Kt~+|S6%(|rQZYLlmT7hNIXhDiuW78rSJ3_zg;X1i}U+0_2>G-;B4iid^^%|g(`4_7ykFC^_e2O+{d*$vuB)~)7C+t>qkr$vdLr9D_w}y4v#as)eNXzf4^sa= zaP~cf$C)o(ANMl6@EWBpOV_1JiX{qF0)ZJjqDyYTQGhBZAx+o03sulbE2Y4U{yxvq!n4Q)2q2tMkD`kpqloJ;do*Sr7r2W*&i6ec_ zsAk>vi<&RC_vptOcmE~> zUC#X~pG7m5F#k^VQu!M%WLUn3X5)zA1s%)v2igSyQDL=)^KwLng2<6qY5aKdD)xEL z-iwX*bG#=P5l{cF_`qV*yKVjD=ZW&|L2miD$n+Y{C-Sa2;UB$K{ee!N--$;|o^?{8 zk4H?Nb-qyHj!n)coz_0Zwcyps^a|mPt=`1nmM|o@}I4@!r)EBJDrbDRev)%v$#uIllIOtxpGla!;XIv zb!*u7=dyWqy2j`6dWpm|;Fa1{&Q*$zHGV#GtCUOUEiPnJc{t9zRnPCqdB*39+0Tjb zcc$lcYWwLr$?QN2T4mByxG9HyKJoRP+g(bX=uO~viS~o915>;4^3BGVn|T4kiQpxALmzkZCqTa6%17_)9`TH^)=(_ zxmYh=uk<~^dhv!veOuSxBcuoP0nAIlcmAIV{{G&$eJ>L6(u~|kdqVMzZ9l@_Bk${c z(&8nfIHzl0=jTZuvRM!h8{PeU*9wz2a*q=(3>eP$2jFsjN6sB8KLVali@?eBk7m_- z^5Yg31i9Z$xJ=$U-#`z8OKcq*-`TC*wQQkVXvTzY<=@yt5==d@p{h2!zMdLGNwGuHW^&4On%>u0}@q2YF4;P1V+On>-)nh*S;{O9>@E;3$_zg&JH9YeBo?Rt%m zrmy4q*LRQ(NXN=-=yJ;WsOi<3&-W>PzYF_v`SY(n&ZB9@r;=Wll4*PEXxGT|AK-}3 z(dyIh3}byX<~f9W1u$`LuWvYqAjuEk&bm-iFtK2By|t@LX=pF~?b;6w?`YgdR5 z{&?NbXMBfrXe_>;90tGq0|}<~d&qUrH?i|YXvZDkaXr}Wa#h0{bVGaoZZX=O`4IY> zdVKmh$ic~ZMT(0)2>-)oZdaYxZm-d?st27IHv%;-4AI|~zA z7iaVTQW?^X^j=bf@A&It8hpW*Dwq7doO_Ebf1Gu3t1JyOy}pzAo!`tZ>FZ*DpJu^& z_AADVG%5G&S8BO>xf2HKHU8OO5C4+(YrZ{b81nrgoqwPG_3v)-k?UQjxZVl%Howr_ z>}}cig}%skY}{wx7vg%@<$t=)HNC%Y5!b_!+1upZe67dtS>Rm}h|00P_dQ4Bp`{2s z1KsMo{7jZG9G^ws?T0Pw?#%wjZGqJOx5@az?SG-(b`o72%wi5LHmpO(SzpUJ0>vlk z=+@HU^Re{DF!-+KLpxX3=`Z_4P(ZF>d?3BMF8!mqbl)$mE+w!ypDDvLh&!k!mR4}$ z!F+@m&zGK)Gd!bdi?o8l8>L}!QPV>8^YZqnz>_~GhYYx{4F`BX^fAtts4^a1w1+UKPK>}>nq#0E48wsRBy2J>&d$njo^NILG`u#;qP=foqe5*`M*Yw zwDrI5_FzFUKOR?EIrdQSh{q=(=4%Jg4FNBIro$nOco z*A=w^$PMUc;737S&gp+iZ+^X>>)`MRqHF12@ z>cM}g_2lIZ;Q8^L)2F-wKh01`j2;~iXvoK}>v^F6x09?t5ihEo-1B7NF!)`TlX$q{ z4%WJ@g98ouVZj~t^HM7`oAARY^#|y1q>g_&B_+>4JrWKB(>GH7k-;o39+w|JDK)dd zYxrTHi0HuY%jD(6ZpnytACxB*;tJQenETi%f4P?H9>j~`&!Fy^o|b`ziN^3GEXbMe($_g+@cBT zIqZvT^BwOz_FsP_W2R+2;S)RU8)5Tt@_~!AbKbQU093JhhS}Z*$UvBED z#Uo<+AcUj1*zS!t*wG{nc%3zJ6+QAo+b}4|0_NMH`*<&$6qODc(`11`BawksBqOD^=j^g z)Svf!U2gn3Jz8GMa_t@T9bbRPF?o=9Kp*OsEMf3_q%WHdlDTFNnad7I<}TBUlDS3o z;}On3vDKg2b&Ma<{ZzAaC$?Ye=ac+ielnMK#QePam?V(CI4bOC z`;|Y{uzznbu5f=s_IWfuuKcd0@3ru6TX;9aH)=J(f{tI%AD}-_&-BAzOyePHeLl?Q zIqoN<=QE5CAx{oU{^T0+eI75@7naJJqd(Ob+#Ut)KsyF+dXDQ!!s~SO??O7A?fX%# zf2RG>Mtaq^pFh6`I)lEumA<4)RG|G9=SVjLp2|H&H$T7ipoPsIRT;6c*`q3lE&Po7 zLzO-1r{}lsZnQ`36F3^}QKa8k=QKda{C6XtLihlvC*#HXRn7-%yTI>@7mY>lCCtB7 zJ)t}K8T|)7XrLRjtGvG;o^mT64 zbXj+(FW)Ccwg&!n{>FG{&GBzkXx04C(~I)U<*v(V{~oIE6Q=8Amp^9jNaYTX!{~en zdO07t+?4w;#M9(iO22g9@rT@Z5&A8!Tjvc+eSlwvyV|`Zs1EBzZzto|qiH=y>+E9} zh{(^^GqdiV;;MYQ=yCP?i?=?Qm4|&s{>i&NS1Bye-{CN?iztzC3#+8`2fIb@^C7>_ z$GE2`s0O9Y_$%>P0Ew&Gx(DavP)}*MrE{3*z82_!cHpP$A5*BxAS~*+PCbEdo&H#R zh4IH_jL+|**55bn&f)3r6}B55<>?o2U9g1pv{?M$W`-Aw_H?%VOxHDr=XS$yllZ!> zF*qC6HNnfZqDWr(g1%RtzFfm=udIWgXa%ri)vs&T=i%?x;z4^a{c=zw{6&UaEPjx| z^c~6;p)Z`{QM%Il!SF?YEB*c6uhmm(rv4EIGTDnOU9?dBYPWhR9C$wm{TnKl(qBcU zKkM*xp_L$L?^aLV2Wk%R6YFX?e^1Hv@s@Rrk2_5dT*LW1?(EV8|K4@nxy-^(b6hVh zGHm-#d3^)(G{)VoEx?!W1y!97tv&SP9d+YzcD^4?zU1d;uXk8PzP}fQ?=iIrB(c3W zj`bLNZkW@ww9kRm_?|rR^mEE?hfYp#9^2{W_V#$GAd2>FpR-)}DGg&@0KcT~f(#WI zFYq0ZU+53{DeGoVG*MwL?Tt54PewXK!_ay;j35sBC+gb-U&4!<`F&mB3y1Y=e&0PR z%&Tc9D$L;fQ|r;Xv@iTFMS{|KkW!)#H@{HB*Gqjx32zV|RlD2(KA@}569~ikJdv1u za5|*&-u8Q)Ziv_WR?_T2DercCw-1cf-{Hh2!I&K`!Wcg7IC$^u9enfu&Zum*t5vH#({3ISu_6ybb z_a90_S7^9&_6yXHM{N9#vOTwp_`M_FpZD>S`1$#{MJ=E9E5(_#$AMdoG+EzCR5*pK z010vte(5p-h%lV@C-@c|R^l@zsoe^47W@pmAN+J)<8fp;Bf|Q8J}TYJb-SMnXgB!E z&9797#G}+BeV=EL;!N#O7*VA=S)ad47|r~$(zg`94B*Q8hyBslN`ar}!>0v4$g>ZB z0q64AAKpJsZ{HWXUYbEmv3>v6;n{YlHlybRkna7ycF@}EXS$#3fPCCq$6s&#aE`zB zFn_z1!?-}dWcRU*9+sZ8ah>A!4FA4Ji|M&8H%qStbkeWVzWNf>{%QVorI*_&(svL{ zU*EP4+8Oy5Mbq~Vr98r}*SnqB!~PuyKaU3f0|Wy(yEDSB@3|at`Xz1bH}DbKiJJT6 zFZmL_x957f=X1NWoPS2Pph_{J?}&n5kq_;b?!aG$;a5$s!Tzd_1No!*c33_{ii5rg zx}|!Xov*CEgXyVV`<)D@aIJc+Mx2cPHZ#u`m$SZhgQMKEgyX~HLW{)@E@QsMGZ#{S z6MTRHhEyLR;RalCKL z@xl1x{aU4u>zOB>-`}kHC(r!O{o0e6-(Q%cOXGN7dV=}A^Mv!e+GlLON$oSH=NxaI zhsJj*jb^V9QjzJGm=zf*gdz5jvv9qSw4cS+1HU9xu`unvK9JEq&;tD+ynVfxZ_e3Pwf zeLXLB5;&jN{2g^)&zJ1G18%3A@H(E$DqX*ET}C@xY0(;OuR5rn*g+V-_HwiP*t*f< zVfU%upLt=9PN^KQ^=`T8aw1$q2>w0Pj_Y48EyEEnbw&qIdi z)yy}zlfK)L9e?lz%BAPPVU!RDzR~rypS$U>c=2lbt@RUX2XK4qCczJ{O*&u3b}l@% zquM^{z#dy)qj7O=uPR+cdbyp^?G{NJ`=#EF;CN^D1V4Ad{HeX8hwb>i>eP;R76C}@ zcrp$YZ#!@4=Seesj9*)6XKgpR40@p^x389)rUl&yo4wTEXZ3SH$PfEKrcZoL={S+~ z{vXLvS>Lank_YUMvE;$sIXX>7c_2gOl;pvC^Z4~hW#az($%B_DUg6I7*XH|wo|38NoXb-p$W%_qca5`s;2A1+QaJ6My|R2yd#T8z zbl)cT-HGGx4|1M~4{#h#^n2qj?#2pH{9gcXShLkD*DX72$5XQH_ES-@@ZXUuXV5b$iCY z+;_bB|GBqk%sxdq@XvYtCXF1JvS+9~5G>?5v0wWzlLy7qBoE%7^*K5<>IKie-4mvgQ_&Azm&b2P z9$*+w>U|J&(dp0+X6O0fICjrhe|KVbPwFxDUe}N5-bej+U;Ldw*k54kEA0eSa-VT# zlfL(dc+96^aIwY@4e|SWr|`OaDX{a?NBI7hS9yw^77clPEIZrq23YxvHd0QvCkOhdgm?+aIbse1YyCA)&5 z_?FILMQ)eEecLM}EB1HZvOTB&46!`k4Fvs?Hj|UAUK7z?BDy79)%^7St|WTBMR`}s z-zD;Q)<7Rme~qMHFLe~<|69y2>n&n=hjHkD!~ie$OH`Pt=LjQ#12JgtO7W8=^N~%% zs5e<62#Fh*m*ER|{awmx4!?sEkN3FoK4SGwl#=r2v%d75#hdw18R(|>ENz^~(PMQJ zpGTj6>+69J=J;w7+1*d|-cb!+J>-Mu{=Ve1y}q8hcZ8nBy`#N#??QIw_i!&^CZjyi_tj3p&bB#^-;_MSFr3uu>Y~8+Y^jb#6zm2ycug8P@NDlVOubtG0q5`Ljr zpUSzM8oZ|D0EWw?wioc7Q+prV`fL36SjT^tx!!*GK#spBE(e}}`-_`$bZXQGp8NFy zPNJNOJh&>4-=vWTQ~Qf5mkN#hi#sLZbl4B093M={gYor(FW!ATz2Lc*2d$?l5B?&L z-;_MSFr3u-LAStnI^@B!h z?Ns!G&*$-*k_Qk6lUg1W1-{cE58j{SgDH71zC3t+j!u(NKRDHQkYAU_Z%Q6O9879? z&@J$t4tX#;#|KmLV0?M-NAH>3`oS6C-&z0p??HbykKdF$5NbX@<-uJ!KJf42J}QUa zp?^lruv!RR3golIzZ);lEa0Vu(yFa#y5^I^FgO?I-;odXE20vJ6Gu-OKdq4*F}az-<75=Q4)9{8-v_^9}lYA>9nO;BbVv^t5q*q`m*uGFXeCU9$g1tx&ML9cDr52cG1 zX!+_g^@5hG={I~o-tf=9yGVkUXS}F}{-_`4h<*GF?qq#<-vrLz2S~qj-%7ZWT}+QF z#BZP#(91g~*Q%F(_uk8IvvGnj-Ytno`7V!tC%$DJ%O#zxHw?Zj)XVX+rGuZ(+y{!# z;k}sVJRdA&eA32o?R1Zt@j46f!9Dv_o(R~0ytJwp$tk}69zwE)_<6rojn7Y7PModP z2Qvh8jIYv=5JKDu<-|KQ+xT)~HPiiGd1)0Rsvlx}^$z+cBOiX}-N(}}Og_Y{He<%7w=M;RSNGud&`%q77KBrdsuo`#bWWD?sylOU3`E?StPy-)%2t`O-yQ z`lEUy!!4K7PnNTNKOfv~@u&}&N78>%m9+6Kt+zaLy4LIcg?3b~EGOKK&)`DVUvB#P zG1LP%;d}ZvR4T4&^u{|^z@yZGT+FY&L@K?twB%o`$-(jXd|92`=+E&-%LbNjzleS$ zavm*)DlOT+NEtqx>8TvE@5~PvyvdC&zy4V15?i{*qa5n>{9*8inlGKV*RlSw>@c)Pd3Pl3e{hh$gH>v!J^{XlLwf~Fp#>TDD0dNBH z?FP>#314&757T$jLQT!+6wM^VMj|hDe?{`cm1q2f`fF`I1^m5r@BJ*!!_R7hcB&Qn zeG9-*4qLS!Q#{VmYJeR~ zq`bNmvZdiYLWR;Jx%Hdg(CUOaq>-$D~SXPxn8r)>hW<6 zwpMtheUvZZ;W%;7pVgOobv&5fTs@2N@j=2>T1UZ?j)PU?la?jS*S?5;Bzi00NZ*(1 zXMCz>NP9?!-t=mDH!2>Xev@2FxrK2F{mlDutaL!0$*<2T{$t6jC*PIRBRU9&k89+w zm&bpU&);MDSJEFV|KVKzWcd?6Le8Y^G4-7gQV;Da`02*$iYWy_%g1LPva5NEz|Ru?_}iQ z%Va=0zn8|9gRD0%2SM*l4t6U6<58niDhCN)wW!I#MC72}Lm6ecvE-n_d(v`{aFy1* z(%@u2lvbUq;j!c(;0OZ(Uab&*$l?F^(XnHYe}685Fdx501Tf;Ctc$P)`bCWXmDFF|$#7ooc1RTJ|De3blW+H{QcphLLd$nI^7XSGfAo~&pP^J$>llu{5_WcICA{xJF|Hr zS^kK^k+kho-}yROLp&Pg_+I8qG*OB7}@3?ZD=@XITf0(1=_;Q@|9bb+gB0Wz* zj?a-UKN&gxxjft_l;Z~fDa&!v_2lLF-WtA)E9BF7gmlZ~IP;y19KX=$CPO?JUyifh zyc`F;GdW(85cE)^Q!2*^-^s}FZmoyq#**U-?@7yX!c|&FdrmycekiRfN^3zdmK^Vv z_%L`8l56Db<03a*&O#oZ13BEU*@C>h?3O5`Cm+bmOZHo%yre!Gk1&0FdHDy*2V?2I zLwSClfV|w6&p!cqxiOc2$_`-tEi{2U89P8nj*gA;^2cunzzXkV;Rw5=bwN)Je1E5q{LNeCJd70k88jCeobEQeiHM? zF6IXvE|reyuRKXUDO|30kmPR8Ptd#NUy$7Pgr-L`KBEQW3i&E&8PMufp{X@Z~Uf}MMs#2_DXlom4|!f<&ztYl5?=BfQvb4mP6+L2(774(j!`^y`c zq3rghWehu9@jl{JnmM8g@d(3fZxlnDIR8$8$79~Rpw2FN%bmG?zEO(>zHd-&T1k84 z2>ZkL83v2Yw>WqO_mxB9@9UH8Mn8~E?Wy)%iPWAd`Qe;SDAy(7D3HU(>MvGHHsWDY zD~Dn{6}R@*TD+VOfLFb}wFa-9T0CeUqF*j7G&`%|6*GSD9{R4Iv~)0xb)J9{lAFwwK!3##k?!Rt@?90iN^xPp{pEOs{MJAE7s`k6{-@PXF6IVny#FZ+3qy+=I3HXk zzU}XW4#FZ*-whfMy24Hy1_K(dA0J|$U^}CP3*%(}w*<~~oUnekGrPQTJ`RJcwO-!` zad_jygtNc$*IIx7DDf)Gz6In?={*my+ySHOJ2V(LJ)Qp9{`PA%!{Fu27gsnQljVoB ze&0XxecX6I(_Ie573NF#&x9`MC�-4xt>M5$z!DFCSNnG+W@`7x8`V(o3y8JsrO~ zKK(l>MV9mNSlY2dBM?SY@jk+faf1GG{I(f?_<52R5AS4szAj1mPbvclDPGQp(hi)D z@6y!<_%)1)z_?z9jbl(=Ouk=##ncFAZ*K2g(^FTY%4AvHC&jx>@ z<)n@L0Pwm$V#Y#^e^UDs`QUt?-}m#8KmQ#ZKi5#t*Yjl@K2ayXhHlN|m(!Y?4Og#FFKD-QpVP_Z4*C&Im#!cH2*dfjmChIEF+S}# ztG9HomBS*6GnK+%nGe5r;b-A)cjm%QZc&P|lRjdWD9o9M(a* z?N>*$YUQ@ePlvN=^Bm~5Q0P{@NvlcudhI$+7=B&`^8T4RKKP|w86U)>CV%c^J?-zL zU+wpLA4~h`@r!}q?`qilIX|yFQrFI}=Gt+1Ty6ladj33^&%cN5kCpG;`FtG)@477i zQ+05BB%i;>@nS!q{`&m4G>Grk92=~ zmdQsM=29dmZNOg{9~}09pZ}R5bU}Tk)s`;h-~bQgA=*>9IE(338z1GF7G7uh1$e0{ zs17n*y5}|e!_VbaDQ1GT=NR9bUV(n8*S|LG%J{ZwKO% zIVN-f<;u-JQHzJocY`LMd|qiWdOQ5-zP90!>c>8BnY?m-E5*K#L%UP|$loeo#kQ`C z_di2EOZBd!78b@9mz+<63@l*>y8adF*W84T63zW_<6{W}G?#zz2>T(kCoWf7?)*o^&-)YQG1bYwpT?I1jyuGo z-?!QPQR-NXYGgfZe1}AFvM;OU{M>CiUs*p#(l0WUWW0VQZ5Nbuy=QUZ%K|>?eMHk; zAKhmCK4Cu4eh_{30Od})KBAaN`(y2EZQkht^fF#^y&W43WQ2W8cl@ zMc}tpl7Qc*PEY1^#(uyN)0ExgsjpSSQ14fj{Lz8Yw|MZ-f6eOlA=^!zEb zm!sQX3*FEzFhjc!OS}2}cObvTt(c`w%bk|kv52jer=|6ClMhib)r$1!8 zHPHFVAJ*cnc#T2#4=CM9Z_&fGp14AKi@t7Q(p&U+4fj__?}4`OvfT>d!M^RMe^A>l z=%1(e?V$HVmS5)mt$=fg;j!o**Y!_;{5LXxzJG3#e4rEDe5p6TzL+naln=W;8qGXg z#}~?hpX0;i*J$Rf;Gl%751=+V+b#8B91i8$MSqk!**{U!Z0(=xC9){-8^lL_F5l1| z@((Pr@go6%n*R=^1k>+OhH}0Q4sao#e{T}wN8#F~$uiD%lm7h-gZ`LW{M>n-??Jab zy?$Ezo&BB6`$erFuCTw8dB7Iqk^P;_dsxH$754YQypL*eN^jnJ|39YrQzh=q(kzdqNNkxWFF;Pax>){=mdxp`+lt036w60t@G~J z(wrxeue1tOg`Of775VA>kaTFYhS7fmcJ3_G=#N{YCwe6s+GaW4|Rmtl#P6{glq1OIbcYe_k$8et)Mt)A|oUi1YsT@jPJs5C#;O z*>`)1F!I%x(|$_JU8?DReii+$-}ANpt+!)9->>O6qiZy+N9zF{L2tBM7QfWPb26yM z9nxb?FO%wfzx|steaYoD#y$GuRKEASP~`&k^t}APLh8l*+#~7EueV5CQT}&2mhIek z_T66+JSx0U`7$arlivkSl-nRa=JwrV z#ZSivmnSNxjypcC8gG1@jcU{VD9DNY_^2NjK3;qs>vr&8a@A?RRLgRF1p;=;ztO z;6Id)qUP@%Lp|l@>8juRJd)Tt-szWg7=8jQ>JNi|)$&7~gfD6~yqf<>?`S$XF_@4=l3wZy#?zz{(Qf+HQkp zJ(9kap=Qjl|DTp|3ccg)AIJVF?PY$irXD`O|IxCF?Iriohr9*;em^e^zOL;%9#}8J zdHq;70^cxTd=#D=v`9D#mj>u@l)F%T$Wh?w?V+7#1m1EJN=thtpTM{5&lMV9U9X&AC7xJ8iw1;!( zVepi}@dA~{QTUZ(^H5&WU$5`R^j8E?t-}h&*&(3}a*GNCxeN?wYfPW53JmjqW3>xh1!cS7& zvDChb&Js5kTwA09+%LuY+3U~rB2Yu@V$YI2-sQKS7fS6LU;=S?$3#F6k_XMcjR3;S zO+MK@*=PoOTl5|2MBop+kk#)|K@UL~a!1To$D$dpk&uj&JJpl*_A9hp)ZC()7VO{P z`%p$9a!f?7Tyz4t0=Vks3i=E3z{kl%+PMJjn4C-H7Y2v8>xF;Oj?15Vc?$l<_}xZw z1@-c*Zzc7qJU>Ei`u%3e&t?BfJTbnut&%_De4QnBD5YEX#d8&3v@^SRT{*M-ifk4m*Y@_*D+)CSOhaFFwWCtXJg zo#Ff(8sd?TajJYMf0PfKIGzRxhR8tETY~Q$LlwB^h$QDj^dIU8gMU{%e7uJN^_I7A znWp<&$d~CO0)RNcf3Ez$N#I6$)QnzXe2@5{{0Hahj<3rNpWnS6r^|TbH{YK}gpbgl z$s1r-r`#?V&QrPO=aK63yZ(rByK?0+dAJB@h!f{QgH`l~Fz z(srhC0MzT%F*;dX)#}5#;$fke>s8+GC6P;T2*Y{*`uIe>HR}TH<(wS9<`Lx^z{{RD z{5!^9KQH*1Y<A&@p9e$x?tpUQ^O42NK2yry zzOFd(9>}#r9KUuy0R8Cc-<9<1rLLmcd`tIe4|^JKp(?);u1IHG6s=w;Y~di^o|^AH0yh>v{S22#}J}-QOi(cAe+g zZ&mVZ%IDnYP0wkV{B?U3=EZ$JX{eGG|EwKLJtto-0jS}fIzPo7=^ zs5sZlO7}zh$@>@9Uo&L-1;0B#xLj{vr`ghVmGf6=CF^rO!T5!9dOCg6d7_i)UN7V_ zShUn>_0UuP)&BMKv5Qy^_3QdT&Y|l1U_Il zE(dJ>zG|HDRd;^M^}YTP>YD>gUQ9ZV(EjA-9AaA^CQD#d5PmehA+h?x;NLipr{@Kq zWZ2)sSpEdVzRvOUUS^;7^SyrV-sj~opdxYV@R1)H>1On*({~!=QXSp$;{%+Rr1I#C ziYNA4?x;H#diBre=B3q2#%%xJ*2iP-S0e^GUy6{f$g+mkU$+HO7JK87HG(@Bg`szbBJ^4H4}4Ohmsv zn8#yEzXm&~JqvnbHlIO4OzGD?UqHW>X?0TR*Q@p3huaNKtY3HL@@IO1>9NnEemzI@ zf7^uP(NSk>lX0)4`uFhy(ybVt{W3>uLtFx9xkpQS zT+8$c*gc#6Bd?d;!g43Dd&c`8Q~CkE^T*WB-p+cA4?s`(m$RplehqSNi55%u9sFLX z-{%j5er?y^YZ$-39@l*3!V~Jl9^?)tIX^ED>MpzRSB0s76Z_wJ`ad=%{W(yR+t^Op z55A8**X-2BKlSy4Zh!v0Jl&zSN`KbTJ(|@^{;z{CPybbN0IiX)E@ge=&_Aw_pZ(tc zf)z3V#nr>z)L&)KwGzZ-$}u~XykGmKT9>0BU1O-p%BPRilri0c(&m1BCB zwi`7qlDxzhboKN5W0hn2ORZdoYAz^`b9s4v8*n8?@8i)A6@@`NJk>+$>5GvX2KEll z81zR!`FE?vf)^@}f45<*blk}B{p7LIF=9jQjj`ba-CT~0RUVq1pLZN99dsME_x*hz zZ-~>4oCBkLjhb7PuQ9LvaT{f~6jORF6Ce9jSMST>G2h&^JzGx=nI2kh=J$h!I>G(2 zW^Cqu-_Sx(iTw{h40wJU@WMg=Lsz2SYwkq)a*faGy!Rnj%3YIjrZ>h`1gh4PK*w*X9^tn6*UtelqX++i$xB1l1~1PR^NJ_2Tt$f+j*$I z_0*HS{df`gSK|YW@4J%wr%^Mw0`=Pd>xy$MECdjjtT6d&?ItTst}oPN;d|oi^cx28 z#O3(_IXvF)se1prKJ4diqZt?#((XpiV@cbSO0W2;Gmj!ZzHFxY{{D-s4=v33&hLew z|AsG-dS+8kir*^{D8Gk3^i(*xZ>m3h9h98I^s;@I#LtOFqCg>IsJEb{pQIbQBpiOoHpn0^!nuaS_zv6KAS5rSP0{$P>ihQlMGrE)sY>=zKL4BdNxTgyzYpyBykAlNTQVN1?EbVK#9u7& zZ~qXeBk2&D@U0!lNvs1AckI|dtw;Xf_<)QvX%F#Rl&X^ddgKs)Cw=65ne?CBo7b`> zD6rzffcyzM!=?03>23UaXE)P#b*VoVeYgHfet!R=&F}T+3!f(cnSK}Vv;OY2@zO1T zic7lKzXL^%;{ise{Fbg0jK7@!<9mx*KE8+bjRoiDL=Sd*KvcL@^QZO<=Yv!~+Dmwi zALJfC!*%+1eLLyB94=p!FCO|@)^1tuyTYa4BRuew*6ZhW<3p4ye(tsL-oQ33kAAyc zenm4Q4L87Xl|~2tK1x-13l98ntLT?L9?FFs)JtvM?{*8&`Jm)?xjEiFgz6TpR^|mV zlAjL)e7as2y+YUP9n2ToxLT0v(_+LE=ihhn^#tI2o#@Y-Bnl4}-VLU&&ul zPxq~KJ;v@#->dd>n5gId|B&IEjenEB*1tn!cby+Gq)V-srKMewZwULlj&X(SxqizZ z6|U2Kz*qjI@5l|DrP0~DIx87Y7Ll*xmE`ke5wspDw~~yWEMmcUrHzY4)B}glqFx$T zD9?w_dbyUvy{cEpFMs#U-?h*GT;M_hX z?>{JiMg`8BC=a(u*05OCArApIM3i%Y;YJp zF7M^t&=d~E$L3k5^FR~JW%(~>e(7IS6^|1C0n@AeJCj-d*K2;qZ`kgCINo8vd$@V~ z3Ly9VeqKDQZ=KeM{5$IO)9c@s>8I7JfsOE~$=UQh!%oIyAEZlu!rleG^z})yg7otH z5B@G#y*_b2s|j45!#=(l^@%+Z&hLXnv-(v&#EUj-d_MnfiAmP%Q+$$*?`$0GMnSGx|Q+qFsLi^y^-O1ISF}i zM8&A4-;bz9ORae8>U#k_#D2i;D7B`m?EB=co84(i`g{cTmBSjk^_&p$moi z#6jMoe#kc@z}}7dp>z=s^7%R;S#y_`tA97n#$v|84iOFE56B)kFWbRoYl*C)LX09-=p~eH(b2m@Dw-?N*MaL-mh4! z$#VXJ6EFJ7y9#p3&+#t~zj^Fz2`4vjen_slmHptEB@Ov~-y%N9^zBxXNxPOx#DUq8I@Zi)AGfu-ly4cG1x zyk-*)FOTsIUEljF9%1_1O}|QJlMg-rSnG(FzF*2qyWk&RN5qG%oYW16c1A81KA%NK zNFHRm(>Z>3x8{p&zOHTO*zNvqiR+PJ@CC4`>Tlq`^#0x;`7hpDM6%)~;z8aMdb@_B znQznaI<$xSP*k`<<9)pYzC7-IuEJ`~54x67Z|k)hPBv~Pp8yzhU>U!M?e77Pd!NhS z0p_qf@qI4e7wceS$qiSteSu4+8g*#DWba#{e*qfc#?B7Gy8s}Q;P zUJd8>F+tB{d2iYtb8z1_fvizc})1$@k!ROe$c1zP0VwT zDV}A~Uttk({WxD9>j&hI){9)VdstC{9SJ?;6R+|fA^ovpL4J&vxMFyuBOgxzAG2n373DL-rjSsugCk(>45fMF6-IGd9$c+WcLd8D*oBK?MpQs`~v=R z`Rw{&`fiw{L&Na&{Ja1@NauxnDA)7)ov>(n576ZS#wF)@#`7i+w_Co4+jN_jPafFk?oAn`9?W;IjPae`p3t+XY(Q^%1;lt9((pmGROZMuNy~w49!^MR`Po z0Vn1yg>M`a)g*nl3-g{_vpI%!p_X5Cox%&bf^s5vYv4b5c>am?NnLOmJdkbz$0h2i zeG%=!PKE-Q=iry#sl_#noPZ1biF)9dY0gaTX!-gSA1`mBA^qO+F4QLVIUOG&9ZNU< zl{T2H;dpdAVLZz8wf2ryYR8<*die{t~H0`#5~Ptjx0 z;CAuhpx5YkLv7wcKDa12Wd1|CFO%m3B$Q|TFNJ%p-5U&pb(-JLp``B(3%6q_o@8r5s!XU-&!Erk|z#PPQAP-yhTp(|+H;db55fz_IC-&}-}8Bmb?N zv-u`{j~s(kT)Fuzwe+@lr}yG%ZyK8~E@Jud_`=r}8w5PiB?@m>AM}JP&$wOjI^I3j z-_FC`$#RYSa&s;G&M)UNzps;9E@K${iH1{tA^G%vtMZHMRWd)qHSkNp-y^?X;Xiq~ zvzg_DU$lM+fj+)pSiPU|cnpXZ*+Z!r<3*9HsnqFYAx@Q{I%DIRW^(vBlCe`Ey1sp9wywc{_xqIINe){BCjZ zGnyU+eL8<-{B^DZmdPD{S9N^3QyQC(#*;e+U&==Yr>}$DZdv6vNh2Taak=vng)=re z-)`eOFGn#X(|PbFYDx&dNaDAxSM){C?_xbJ-)$UtzqQ-^@A51=uW*KzE6eEy)CYbB z%092V{dN#afVh;t1qxQ=hx7I7HfzWAh%lhtBgN17rvX1156~}4@ei~<@HhB*+pq=& zKOPWF=dU{a#_LzuqdC6*St}|}o37!BcOC=!v8AL3;E&D&hhDpUO-vC|CRDf zr$&oj&HiieVA#h)x$tn>&q#58_VsMW&t&k4*E>RdiAGxQaKkv^z^Grh#UqrT&L>#6 zUHTnaw;ej7^0qv)!u6l&>#m0^c$L#3`4RBJXq97K@?I46`b>}hp4J1YGTu3l?m9dA z>tqYj*RPH6_G*3c=+){Y-#u~;yK$XKMZnjYLoO!;4h*T%9!|`@ejL7s43Moe?*Lk< zKAxVRr}-y{cWQDF@3MC89@UF?zQSY5YLz+tp86yVXz6n;E>Z<@bE?LuN0$k>%ZP0=k`jq3{c*Bf*p7C~r@| zc_-&1AJ1XH{@NtjaVjDVUZ!xw6}Hn(GKihZ;4TnO^an5pUf@$7m!%cN*T?H%r&bp% zZaybnhofBM{^(Zaud=My;hbKZq#eKo{s0NLTYZ}(ta?y0uws76D)`-@^|)L?J+xOn zqVYa%#=?{hn8Gz(T7q5H-o z3a85h@cAFDuays7k}uouV+3aB?l&Vj#U~!_)^L1dm-_j6%klclgP5o4 z@!~vMk5~Tw*Ji0vzPF?FXvFs$8V@+1tmBLOb9~eIjwboo&j}_Ie3vEES7Zeqdlw1) zEdO%-yjJl~Ha?+nC2Jm0ALr=qkd#;M)a1bV6#aeHT|!5XC;4^0dck`#`g%J06Hd$B zE^y3#jP2b0xcX^6<1^&jF7uh6OL@C>sPmzpm#N(k;`y~N3tYet{NwvhUVoKrl+LGU z=W-c8rF4IsfTBEH{eF4N1B{RNkv{Q$jz?pov|c}l;QFMme^-kf0H+PxIskGT1b|)( zctIzObbq(W&lhZ(X8GWYE3KnE$n!Z!=kKoN<7FyK`9I#z{*8MCadD|UKZ{{AGswI_ zzFn}C?RYp|sKw+ws`>tXsl_ZGFC>AJBF{-Dy%jkEpU{v_WW1A%=s~4e()XQ^cN$(Y! zKk0rQ{jT-uM}@yRwj1>redC1)O{CsL^Ci8*^t-pw?;21)DtzhKZ2eD2s$`IXg+&_wF}l;%r%f0KUquh8!rRX-{`eyk{CEUOoxiPZal zSWoYt)9?Nq{jUG5epL9aW4$%?A~cbDzt4Jl|CN6CKhW>`hWb(AW5@2OsTZM%)cX^f zPsRcL?ibMS;(@rR@XN;@k(i7Q2u-BkpJYDcBjID#+l^PVWxRKFsvi~ZJ@%CRiWlPV zMCyGL%lBSEzx#FcyVk2872bOcLlxup1yY~OE9m==)y-oc8O-z}KSv%-v-QT+MP;P; zjq~aI`SD(!AD4IBHQw(tLOym$d*wjE5IIM^8t0}E6Y6W^f_HijOOz3D*}0K7Fnsq+ zUH2x-&s6_Wjg|RoC4Xdk66bfw(K%}%LFs_+GWiL7pRU7qTMpl(uAO?lpbBKfEfsl= zpXqz~muPgbVAuKj!|g4O|AITu)%axgQPpdcYo4LMoZq2K+5+Nd-}Q5Q66U#gtrL38 z=J)909TFifF3^X*2IuD~ef~_&^n6842mS}-*Q0{J{1baQc85goD$L_~rzXmOq+{jc z-y0lS%yp^1TUdYIY4Nl(v>!2_=g%#n9=NFR;+l3N+4Vdq@iJ}{|Di>Iiu@>51f_LC zSaH!b-pe}iIVGdpiOwk*z0!Hp=;Qmb+T$+(*=QtY1=n0YHn3=h9!WvZz16PvAUXXXo)m zd_3a)qS+i&_&E2G z!C%U+xEJ40FXgw_vq{7~5E+{Da&2!xKez*M?@(Xp!}dl9M;?!E8E<|bR_wef9~ZU% zyOloKdi8y*uX2gT%RLUwpRdp1UMy3JtWUJva^Y|d9Qpk71_yP;xVc~8I-(}%TENtt$wyhTCdMW$9>mkd3wWaT~aIb|qpJnqg<$1h+rluzw z*>4M|mIxnkLr%^`D}Ci%5%v9hN-lT&{BpcV$r~(KMSaB2Z}|HZ&pzFMmi_DF!PZTE ztBxw(@m!AU*z6sBt7vxuTyU|EZ(CpWwQ}8)=QHqSJVJSwmvevg^=y144|05F{BGmN z=ZV%L+p9b^CjAz4zFE^Vxh<28xV$`pByN=3Kdt$EU7fTY(SFo&x%n$;draez528)( z5wgGA$nSZ4_sO_U+Mdw#d_2axpNB{NPdJlnea@HUzkIo6nD6Xe`F#8i$d3^PU(XHu z_}?UUq1eWW+gTycB>S;EJ&ICP&fjQ1XZSbDm7AsgY`)3l)NlN*#8<3+!RLW-^0mu> z!`Qyho3wHK#+BQRejLAXC(cv@pl7?7LZ*<5BicEKGXEl|vThc#bQNSeSH; zM@{~($lwJ%h7R7W`G!UwV1CmBHc2N7N;3bE9$~PZsuG+)#-zM_i%YPTIbr^o}bR zSU-K!!j#)d+yAxjC6@kg>JQ)W*Xm>5)&AK}c@RyfA{Y0t-I!{Z;K_Ctm>o8g53nW+ zJgJ_;_WM@az35cGAsl@xpQ)vv%e_&;)wja#MK@ii^$w-;@5Kt|Q02E3?sDNmOaG*W z*ID>e8qU@`uhIBMd^qo(7@yzM`Wo^1V~ZaTpTA(b;Zv4(8sMs<>Zv{O)9Bxw496qn z2e&6C%ehWoEu8}nOxo=Jb>#@>zcyPxSFAqC2YFfLGD+m~nc6<&)z@DPy^efcF1*6@ zdDBPdV7F2FZ{!)Rx713*oR6!hiFEe+neiy|wXbBmv48J;5$ow&@oG)*djqa_Kp(-B z40zrm{Oj)=x&G((D5JuI${)zL3!B7g9+4`xXCFrLT`~ ztj~*5Z+?7^NAHj4=#6zP?46=-C|>a><=Q}}WD*yTd|cy`&a*81s}{EXhmrjjo~G%3 zk1h;uBz`%5`pS=qpZ4bPD;NG&?`_9Jwmva=0sG1Hul)Pp0O;?$$h1FP4mRRLc;j*LseK2- z^}$Abs5fT&X$;G}t$4vs1DD|`14*3755hNx;Y>bqAOkPV<1ROR|2OU|YI(Q&=lS3D zq~4=a?jZHLXgcXM)K7a5+CdkE0VlHjIHjHAEv&!t82e@9arH6&U-&z6o?4|o%CkSl zYX5DLuW@~e*`D$P<1Y+8tK%rn*0BtaJg#&}mfO7V>&$vRBqIMLFh+>;djN^u3l8;k z2gW(&e&aaje36ZF&MQf$k8{pPVX#Z#0-btudNb&8#+IDjr=M`R{@`|)1+5$q=ui14 zc-uJZH~vrc8ixB?ZJhO!FS7a=?rW_(|8cF2n|eD$r;W2hrves_m>ySolJqsZL9(3l z7w~!VGZX9It}*-f3))USKlr>Ak7n)s{g=n$4-pjNviXM(jzk64>vl9b@1XqX=Z~E} z$;xh)w{^dt2le+#@_Y_X9BbbW5V#!i^C>?5#y^kon>J234~zaz_*_pLAY5)2Zs|}k zQ#n@uZldew?%@28ll3}n!!})U*dtQV7Cf!Q1?}ZGp-8*%; zoXOQ+*Yfc}=5ODuL8T+)q<^nBHhXg7^N`7R*nI&qF(Ia=Kq|qt+K! zw%RzJPdF-@EqtbhueR_^3-?+$urTe6a-T@+YviZL$k#dh?XQXbtdXB+H*73#>%iQ& ze`w6|8;H-y6Kt=8^hxdg)_*4&hxzuq!4EdS`aTf)`ORWK$mfHdLF^)IH)nT+ogtqO zdS%i^ddYWQH9zXR9Qh6^_yO*7>-y06Cm!MY4f);xyTxH!PwZv>)xs`a@-w;H_Ty|E zByHTMOCIF>1lrH)C0;ZKP z_^F0?%;d6e(s<18prg+pu7{q8Z{@tjarr(B4*md$=HY4Ac9(0r{Ud~{QNLNG>6w1> zatphDv&h2r>zKXT-gx+g!F?$_7u4xP|2UB8Lx8tfm;Q7v-QW92&)ZEiIk3sr)xIx% z1^M02G5R_d{E6}7=ZiAEl=wu`dQ{$5ZM@-J)n??6 zf(}dwm_Ov7@Yj>tZs{!QeUaSO84y~;_c4roFk$;UB}3iR`>P~N!@C!Y(YiB8WV(J1QJQs{ozAC*?8aI-I48ydTce_!I31`}^jg1DxM4PAHaAx~21i zy&LA|jFS&>{!RCV?R;hWKFmC(qyGGP%Z7d81H=pWzJO=CPi$~Sg%{CIZtubP`L1Z% zPb%E9?o+y?cDf#o&h5i;gCSeTa($Q|-{|MD^i!pZFT|kEgWmY~H#;%gg$j@QiR=Lr+6Qx_{Zh4h(}$ z%s<9CH)-Zry3Y3VVh4z);+F_$Vq~J;Ubp+uT0*n8M}rw$pbk`xWp^ z=k5KEYdPTkW|>$T@dh{4>H#S4=e0$TPs`u>wXx_%#nb7Q+56a!X!p>18P7sCjO&u{ zJzSnoOA?m*y4p_v$Pd)dzCTF2arXT|z7v{#e~|BT`S%C2^Uqvw`+bU3pSSPI`?|ot zH}CeHWV!V}$s+whJ^}pbd0B@NK6~FGHa<#PUk|ikpV9QB^-2pLweXuQ{EUUKvoQB# zl2+PT(GKSS439z0|LmP7hE%AfKbDv-4@8g9_VVipVTY8Th3_>T{kOMnUjDahYv=7d z>e8nVWa-t{Dj4az{ptKY%k;oonU49kOMYhi$xKh)?Q$BuBJi7>u9wT9p8AsXi^(p?5<-4M&>5zArv5*dmmR3?Bh?>vhJbJm!qmr)k zsLk&~3vC`k>yMgkzN~QG9-50(X+Yfx+Nc{V|oIbQ3$(GHVhPTge)efZMT$kJbWP%fyAJ6&u1#rRnckTV%w0OVa z(Q}EWqdsKy`D&<#3=$PY5Q_7A#xC#j=cpd3vNl*`WA8Aqr8?|MgH*l zvag47&goKa;yZbX>xZ{-UPxwBkM(!pa6fRLFz9OWF;0BFS3mEi^MTE4{{9li!H3Vu z=7qAH&x8C);WU0rIw+qo@1n<`pMix@z9?fA{fu&<-nxXn^x2W)W62}X-{;pb;J!-L z^zG^ek|-|1LRy@^j|08|eX{q4O^&4JPEo7WyOaG0{8hi}&}hNy<4j-9eV(LkzS=>u zcaxvt`YU}mnd{|b4cF^F?m(~CiQI8LI)1lAi1T|~nf$p*Mz);u7jQO<5BjPcviC0I zEyh2CMM-5kBtmHq2XZtkQaLnqnaLXnd2z5`p`K0fCA?VPtq11lPdMmZ`E#7ntQjhY zaF6Mrqz_$Axl@++^CfHcz5B~in)M@}zkid-2|jdCFDJs_%}RLShbl^YcA^@Q_jefl z&<-GP{)hVcb=&dZ^*T@UqaVxitJHe4hLer7r{v)P>VDqH^>pyd!$Kz?2Y$afwXy7@)zNIbUa4syGf7h z4NK4!<5cYPQd0Pi&DYqq{5`MY-$8$+9)NO`S3A)?X+OpXsqe-89A9Fu(|NqGn2`XIC^0wK!_vAB;gUGEg|8Om>S41jtL-=!zko%v?c^3MynYf z67iLJgaoY`MN3+>6Kh*gtWqB}RjaAhN?U(6YW+1Tn&0}a?^@@ad+rRuiuL#Z{d+#i z?7Pl>t-aRTYp=cb<0MUktapg}Cz2-Krx*|YQqw0*Ll!?-!=#CJX*Bc$jUTZ4sbxR= zVbnZVh}kJ2#$O#T3{Gr*k=$4B4+Nh1eGNMRldY+LDz<%nkkiM6WcBoYm9{HS50vZs z5|Hm-e`_bDe7$H4WS-;dKDCVEduIsEGo=W3u=o+)7|xCLlE zseEnK{hdYm`gd*5Z&SW1=*Q#eqyJn8eN<6;l*4~9x}K?UrjO1w*yZa2gI&HZHrVCs z426Fe`sl^u%GX!4T~8)ot83)z!-cLKNXs?t-8j!IZzDUQbygi1KH9mg%et=Rim;I!h zc8~7|_3=7cX7jvbjoW6&jjdAg)A7AUeir4sIBxs-y!`lmaK?D_YtiwuuDqSB&%e(Q zKVa>+i|;Y6zg*87xqgcpU~jpc*Ohmxm6yx^uiqT6zr0!buc131?qr}wCF#EO?{D^XH{1U&R&iDM>7wnK0$=ADL z3$n|723maj9Sg+YF7L-)uF5cHKSW%Edou0mX7<+~qvj){DW1kBrMlZa8-Tsr!w`?j&C2@JuBj@*nCE zy4id15fPQ}`5N#0KK%O|)o`BP0H5!F;JfT6RXRS|{Gjx$@#O87Qe!5!E_YAo{NRBJ z^m9q|^8+e(c{!{!KXBRDfqOud*kx+Grx6yer^JOK6)@_b9Y;yLJU@;$QY8b)wJ$Wxv4 zF`d6fJl-zwM>SuxQq_RKJbxdp)agTdFO%N`8?S_QE_$R#gCM%RO@FvP?!AZlzi$oo zYwta<>SP>PL%VNa8Rr!~pGcZGk4U;XfADepnDcD&-xlRPKhHjvKK>_h@nv-9ceMQ+ zZoGmHkodAiihr~c+7I-**XVbq!S@*bIt{+d;4X!;eP=uLeY{d8pYmk%qa!M#-+YbF z=tuwC|BdwfL=FAi4@9|vp9&OnKVvCar|tLelg1oyML4)kN!>Nz7+iXG5NV|+*apfnLkwDo}@FAA-B>v^)zSv;r@97HvF8DinT>joi zz6@!g{@@~iPpjeY>oj30h>zJ{!*S;=a>z}d-zGnQe**oxRr&e*sxQkS+u`3`$^3Jx zO}{!n*BR{mTyL=R^HPO>7ySHOId8}KIJ{2j`(*v~yNC1om;2@9d$7mk=au93&qZ3_ zZ{6kNytV}2H_ zQ*M#>zTd~~pZva$*AH;t0oPIAqImm0f_Rkl8aTUIn||J%5hC z@RNL4_#yibm5;r4AG7nl+VHufC=&U%Opt$% z1MSUHARoz-2l*Z;?8o}?+T8vHx1VG#fhXJdH%aMTmHj4od_SL@mrxk>>V6!t0q|fQ zyHmm}oy|`-X>_`8rhLL#>Mu+>s1IbH-(jSX`%1Ju+5R2VC+Ys3T^gU?FZh*(l5Yz2 zY`n2YE2_VT?JRu{d`0aaJ@!3pl#g`(ece55I~9*(?;AYU{`;m0_TO9i$J~GaY^CE< z-GA@%T##0KGX_(5npRa3c(f3KmX41p%Jt~g>&{u!@xa-gj4|K~5|{q-m6ex7)k_K<(iLC&XYIj+A;0r5-sY1_Tna?VcErRy;xhgtt9 z@ks34M?y0pNG@i-O%{+YCDl%%&xW)axp~MYmNP>6K)cs}IgfWq@y_!bRk$2_d;C6q zhwIzteBn3}kM7rc{r=&o>jjF~+C`L~h;BaJuYjECzS}OAQx5Cp6XUX{%j1ibDZV4d z_b$=};|)qo{GP1V!h4gOQb4;FE(S^#`I)Vway-fJqwSO--u<%G@OaAOG4Icy9pHMQ zq+b`1a)!AdI6DVne#X;jN5sQ^?t%H^Ve>PdPX1@-2W)@Kg!gfnoEsnN@8e*-Zg2R0 z<@)<8zOVeKHgNooBUmY7k5G*C-qqTBLm*AGV;~={51~(v6F9^Fee6FI;{Tr(Pr0hc zpXZm3jsN)iFt+dA$J2WRE0G85H}UW;t@o*)!>8fm=W6^r8L{6(HXT;nZ&E%+38A-r zUnZHx@<316=T6Uf*!t%L{m}JGDcG*<%lGqVOF8oWCw=eVKdd`{*$D1QJMPsWwbROS zPdoVR<8WrTnZ5T8>ZOvTz~lRe(GS-NjQr^NY5oOFkM`y5Lbn@|j&+oa)ZW`*u^C5id&+K{V;rnh)(qd8-i0oAyc1QabuPt8Tw`cydiWI6rZ}mWSG< zeo+sgW93-qMCNO9-FFVY`MF9T|3G&<>G?`)uix_u{j2+ZQJFlLPXZ6$U!8O>RC*>I zJq)KTC4KMeLGmEB@guW0UZ?L-uhQ4Yle1g+Udmq~zYq`lCd-y6oYKquI9Kyx{`Pmvna$uTkbdUg(E?=S8|ZKgyG-_rS-hl;HTpBP^$~ z_-rjV->-mgOtA{a0LTNrkGubTk{~P(_=u-&-?>EB>^oO!5xTcBeX^!S+XMXB^GqIX z{t0|h1MpudA>@CiGzlT-?%#I-KSA%LV`f?&YcTqo-bXoj^solv(9hqyE)!Z;$7?yQAj|5C?}7Zz8+Hkz zeEjqeGC4u~iG}zd<>E7Yk^REI@8r_4LD=Qb2nrw(rhX(|Hl)EFRsU^|<|= z%B9_dymk>MhI#w)?b5%{&z}E~?9Uz2X6ifw`vKx%>dAP7dK2l6lZB4np?z8p?ZwR*?pFgl5j zzjAn%<=dp?Wb?Dt2HU<@>CT;!66fD^er9m8o&LK#KMsK(RQF>4uG4a|{jH=s==FM` zd3OH&Ioe--({qY`-*pX3>;iHD< zZ&%*({5Z3Q9}kiru8-4uS#7+X#`$}84=dki_pp*5iQU6G%ltE?;4{`P_CN2h&_i>C z{*a3gZbbk8b4{1m=c4-L{YA__;k^2j7B2Hk~)8F`J(^d%od$%7=;P^PS|=g!B29Cyg%!r)qoh^1*q2 zsQJ?UX)*`Kldad#Zt(Bl9P6G2(35uOA89#0|IglUXZd3D4f%iXIU;AT>(uyog$8mjpoJV>gIR5_(HE`<%MF_pPNo=LUrYJ- z@2+9qBwEW`x!u+~e~acD zv+=;sOJMxCU1G-0)@Zqp!RXboQOoglj--Qn&(}jzf6rp(OFC8)ADeeV9`5adeMmx; zf?-YQ{TT7D*Z8mMd(cx*5xIL?3PXgFWu$jy5#`nMm&0wwuZpIZb0`|hcR31lVSdoz z$zJ`|MBaH$7G9V#fmgKxyE= zQbN#os<>hhLXP7VV$jKx-OvAa@^^(K(Dw}2w!Kx~r{l~@%7c$LYrD2-e7257*!k!B z`yMS-a29-&kH3N6;xBFxyr0Bx@sEf%daRdvTjam{FMPk2^RXH%Qp}=O@0S-S-BLe* zjT@<-pxCd5G`s4V1438O_kyotUU)$1QVq}JI>gW;q;Goe=%B%*bNangvj@_3Fq^OV zeRO%d`;AgxejeoYf2a%dpEr^(;pK{8JjC^#>>R_JH9l$LzJhp&`<;@ej}R~OyZbtQ zG_+aY57>7;;vwo8x37=2p5yfP^`Cq@fOp=0-vyZL5I*)0pV4ew`H7Is>6^{FH(5L9 zF};oJ-oMJ>TQxr4Z=Wk~d|vJ28{`)LNS9mB@BU`!4MFc{lKPWIwZ7irzhu25#^+J; z*VidyyKg7C*!XS!^FCWIlJjlMKXjsokfX0(0NwKkHSddblXH!XA7Q(Z?3zN!k5ZqN#cz!V`Db@a|FQR=hjdw)-xBY5H}Q)v)8wgL6?51OW=(syM!5bd z1)~b*>5iFpM)xl(KKXc9Vp;qb**?SP*Ofaz$62IXUfxz8L*8~v)9dA}L)(=v7jiou zKW_aZ^uaD|N5&6oZ@^M_;BlJrbn`T^k?MLPgHM*u5%abh15fT?p1t}rllH= zJ)p1kJ{i>Sej3;NnAh}365usW^{?f-llk)TT@qi^yJfpCJg zg?OgxuAeuU^S0Rfo!>uK3bwI51-(2TUmkyKJo*}6Twk01Er*0fw2XZ);K1y1)>_TKeSRZmyKE~>`|^M;I`*C;;mz(N!+ z_hbn|@?_(*baHu6kFfh_V)tFgb}yFi)0@!0?)Q7!I#70A=`1Zby`S60JIDu~#Po#w zy)e$A-D&!q-(K-DJ)4(zmCqZsIF-jIq-pj2Qwo*`dWe46AL=~oHXj( zF2kf-rbnnptKl-~%}4`&07xfj_0-aM~{f)Q5f!+b`HqoafknJz0muGvR){Ni!Lru7ixfKP8#6 zPwP+C>?eJ1>tVh{EPrBug>+hZ>irWf`rhx;^Z9pP{~Z$ff_VSUS1^7Z(t0!dpse|2 zU$wrEy3SA-@t~vcAB(!qwD+wVf^WdMuj#v*&-oaSuw7%X)nrfGy@I|^^y#`+wTP$t zi&A^t{Xe#^A*H*GU!`Ec=+&tWt%kgpIqH(hlswh&8yXKk+YW*Kys__Z^m)y(=0TW8 zVhAkqGn?-`ruF!J)ZlYf3}WYVv7OcMZ`8jHexWPL_hB@@_iwzHfAXGkIxy|k0%hZ( z>(@8}_bHF67k!`n9+T@_Jj(NZ^rhfsS|9r5wt{|Gn9~ob-%|7*p19Te7u^-XOwpS) z>my4zUchcZeI=DnsrPi#3-Xfvoc4zMF;Gs8e|m}SPcuFvzUF&ZOKg7{l!-j<*Fd_O z?_Dio|F2G(!*p{=FQ;2JzVv83=w3s|ffl}(a&>%>cvM+An+7Jv2i+I5iF84J%tNEw z$ghdVm2ypa>A12LwFsZ3KFCG3-)O4B>!tD*d4Gw7z{CB6u5a`EE+ks$&i1;V_jafK z$mGTK3-E`<;PU0;ceKU*m<4^bg?`|22yBFhlHVVQi0k=ii%c%$sfOHN5N+9L@Xy$9 z-W)0H`$*7#J|!KlV&@u~cW87FZ|Tt=71^JKCu!`_5O~90bi7N!cT)M*1o8)Vb}9H) z-FuFg)Dysef1y8odR1nJrTh0dEtmTe`n34i=e+@lu{?NEHn;Tlb!Y+tQ#a?3`B*KX3#^#r~H08W$}uGaLZ7ajs%mq))8(F!VU zS89AVpXB_@_eUWgP4#8rzRa<9rgu3>)=>Vu zUhsF0B#9p|xqJuriTHiCK3}=6h2>Pb-ljj|y(VXDUf}CZ&Bib1XIZ2S{iqbs{;Nty z1Rv0UAOdN>H~w9-MT-|b{H>!cmJZ={UJtvC`ul;m*OPvK%;iAJuaSdRNi2`wgPZSX z^LEwtvkP-_nznNx^EY=f^m={X3VqrlQKf+UC(u4TzHXSff8O@vC+?4@7zXh!>dnaV z;X5jLoXR9tncOf#+2jT)c^h+Nw2>ea(`V`@Aou)eZ3qXygRT0~Ie{6X8I;7hney+#&AH*Xpul~Ei?EiJ&L7q;5j7Qo3 z*DfLb$_;HGkL=xnlnb3YNH4#~%<0yvw~7K!DjffgLEIolxjgQVh#TlvO*%-|-eIGE z2kn&J;loOYKL7rOeU~J?U(W3I#Lra^vz(-Z=RtdiSx(YG7X^Zjbc z40zcEZ(D~-X3)@%hghDUiv{1*-r=-S`o*uTmmQk$ZPWhVK7Gc~8C$-L2`-AFA@JGVR3*C(GDx^L8>Mq`T&mnEbAsrsH`*I0W{d6bg9R7BUw~5KUe0NXF>GSj1BeuTMO??@Ua$Pf7 zM*SM^Wm z8T%u?cUCh`zwBU27crUSe8wS#^YbFyPXjr;{}IfKn7(S`Y3%1t?L4acKjV=j#Bca< z4c#A84Hu~X%$I+_%0ChFu0f-_@I~qFdc*Z{(`J_Yz+)OF_x&+pTYpNLM(sVv&!lOa z!p%01sm!3g@Aq@A7yd#7eE#Bk#?Q(5zFf@zHQn$gEf4gVFa2R^Oh!SDAkpO$IBw+I z2m1NFvwltt`LD{|?@|q$lrDa**XQHVcjy=H*YWR)0mcKl1)ZvP&c^p;0-t{QkM%3~ zNxVF_XNvft9LyMi*V1#PT*-rR&cAaNk8(Wj-A#Mfzgym09AfNm5;;4o1g?`|_&p#?=7pM0myf*89nnC`czeS79UuAq; zg%ITkf90q63ure%U+4G9IsePy+?sm*JB86=>esP0DMX&*r99vbdqB(Wgf$8IfOlq` zb4Bg?TIE2REbD5-KM~sn59oj=Jx`bVvnZIwamf8cuGdn1FXgDdXSYee$6P5>FG63j zM8Q?+yJoRgE8l_n!O5a9_<5aH?+tfc~;4P%>RAAw}0=sYV)?dTpbenKrYVu zdbUmgJ%@ha{lVMe=Za##$KU7^53!#(UE(2=Z(FDH`-3O)3;MYt$nE0=yYZ^L++Kp# z?elQQNBQ@QgD*O6Y;AQ3~Btt{$kFzd|jw+99m-cIv5it_7`*gsXB>vU!9#P zLJRo?G7{wWv3^MLtFtrjp}f}HnXlLM<;H||=4EbYQjxpd zx6jbz|1SFy`oXmv>aDaAg4}}7z!UZmy2`;lvf9z!fA9*vq>Jo-7ubNB@~-d(5(W81%%?)S;%-*5Tn%XucBKUTw~ z%xB-}h_=2!;kx3o+CiM_ikg^@1vFYM!c$q)G$=%Aj0T(lsaeMb`fJ$Ro>65byoKEsko z9>~KkX@}42Q~TQPC(5^Xr=-j8w{*I#73P)$j<@-JWs>|T`cLUOS^dE8T&Wj&rSVHd+4?9h8G@p$}QC-u5t z40t?OhVE>>v{C7(@5KZKK9IMp-}!#VgPP92cN~u#AUyhrhRMam+wUv#b>Y~~Rr>z8 zc!Y9Lo&;_~4p@KEy-v$11wYWx?FheT4zQpW^wCVt#(i67DFxrue5&V0GJc?4yM*9Q zfB(KiW?wOF-8!c5v1T1}B?olJW8!tp3$$J7xFp$X)-hKW)-ff%W*u|o1nZcRu6BOp z>zJLauju~}p@F<1Qi9yL^j%H=)W;jwKQ=4vDqWNh^b6$7k5{MlWO9Z0 z+VQFz)#S!0%K5R!sSR4Mk5iD_f4BzymHB;rA|L+*;&&6@^-^bx{P*!G9_2Vv4Q9z( zvFp?C$vsEgbF6X7`Q!e(q8p8?DpWpTFD}+=&UX{bk6i@ee z#>3Pv@yKIJmw1$V$meN(Z5Ug3X## z$2<4?`?{I0pEkdaX}nz5JC%!A5nw#&{F-FbbqQ)>TYsC*c^u^1AN)+F6H4d*GG^l$ zqj{?O@_8ax$t_2e?#V5O8E$`^d@wsCnqF^DfUo)SRjwz~`O9}?O4mEIU-M^ja)Q?D z{PuGSML9vgL%Sevmz*=+IQ3aAw|AKH{cL=q+_=2>ew>Ns_1U;LX6>Zhrt>4eSIygR z_YfrJ|0&{Kq%ZJq=$C$Ibf|`Bvi*AwXnpzo+k|eh`E}wE_HXxNqMUQy4?As}(erec zGrWW4(Ck-wEr*?EL!#Snd3)eN;e9nYP4jzvUQY$V;U%bVs+7|ooT}-tu8+slgFkqX zuJZ=z2aP8y?D2SitJG7(BcHEJ?CfL;+nLYzHr0Q8f4z0A;(l!4E9BC)A3gf-ito>) zKJ*{F?+-pkx@=LjWPZu>%-)YPzOx=wGqYev+`BT<_h`@WU68?EPM)r7a`9vHSZ`)` zV6+f=yqWK@|DjWUV*fRIxce)UcaYGo*Rt<*bHMZUQ0(JfXZgX3H2+P^5B#RcdvB-P zi}Bs^Ql5AbL(p;Svz~Y#pl@CE&--V!5>Ia*?peZ<*KerL{RX9g>vFN(kB9Q!|N6923)3ol>pPt*M4 z$@#l$UBHR>vw*7y@}+*Q*C&L;)in3pFQjy{+O08`zaZY zl27sOM-)!3;D9!fzApvz<7WHJS>G}7Dl;AEfu{JpCBAK;MtFZZRyonu4o#nQ^Lr1` z*5?_#QSpzqQgI~Ro7ldsEf&AoU~6CZHnx{)SLo$<@gDDZYmerOyEI7mD^E!GW$RcT zyL+xYJI8g~29~$zDVDcpHSxWDon}bZ%u_hJeT~7L2EW|kg$A!SxX1D@H+ZSROATJG zu%CCy+RuAoV%uNn_a(qz0KWOYlCqu0@p(itrG@2n?_}7j)#&^J`Q$qmI-bP0ZqkVO z_RShbo3}CC`eugPwre=By<5`{bia_{l!Y25YdRU)y=h5P3*lumHI(~~SihaWmh+bi z4>V1&c+zR0=>&sGr-7zMgGndZ*KDxSsp*K8<9bEDk7BU#z3Fknd-rOXY(Hf28#Ekf z`ntgz4gPzBHyM1;;LRG!z7>PF8T^RBs|`M2aErnF4c=k!J`MAFU5^e*b}wDb?k z(;=mg+aYp(L}6K%)v&LL{ZIB|Djc^80p#)Vx}>++XZCht`|if(l0NYO$EoJ?_0?3@#O%R-Q&yky4dzd`1w+|gU6%GN2UH!@Gfm< z?=bgKWcAoSiu4>S=bOptUJvIPZm;(aze4$u<-c6vB+dUagFXL+22012=gIJVz2f;M z#S{HwU9R7Qjz74J{Z_;$)f?qWS8KTw?@xc1#zPK}FI_L}Yhk`|_NO0|bf|B;e5cm; zVU$A~$8Xuk_FT((fBe7^g?&A`d;;-7ec+t03qY@d0+qY2WWDzvWc#yt9!sNbP z`o5&Au%driTrl=JO_uFLmuZY-(Q_0CE35FAFv77If5l&S0?V!3Yw?`^)bB(0edmMB zm+re-N!Z)x`+DOCh-c-l3pIhCZ}fYw&p*}Xb9XX7<~zmx=-zIG)MuykOIE1IYaX$B>3sIF0QXRq@ZrA9E_oGt`<=$ZWGs}%H)E(7sCsilYetjzY z(Z7?&|7_MG4!oa zbY&m2^|xLU(B~oEpYLI^pxNXebOh;KZYy_lUpnRo(j|iHT9~2IMZfOUQwTTP`~dmD zhveD=+79?J&_7GTKPz07`>aup%MJQ3M8Ny|HN67JlZ_|eCOuH7Jl9!$$On!%UDEGM z4_kX}TuR$(<4`H!zBK5`O$9x9NnTIN7>Wn|uek4ZdtQDoVY>uBYW_EvJsYNbzhj)8V9XCJ zm)1-Dx{h>}MBultCs(GMo=x|wZDKmte}XspNrsQD*JAZva+Mp2lqA7J!eoS%oA8$~ zLf1=zo|aQF{ps_3=zGw`>4W*3=3@|4VWh~D+NIXc*zS3&v|Y^jrLQm`uHD{Tv-m z9$!Da<|M+^@X*l~`HB99(A$5V)jwu(j&pq44(a5QG^kvGzmiX24Fyy=uzr}1x1v+= z`1)hM{}s>w{N*b$J9R3MmnS{oL~@@c{YY_wHby5ZWh65xz0IAN~*VD)4(5u>W+2 zz{xV|w^YyAx%ar!=Fwd($fo^pw}i=*gIadQ?nfwxQ@LMb@~bsn{ke1c({UdX@~8WA zCj-KGwZAH@QlUJM57dZp6jMXU`PZaLF7L<#I{E!0D90bP ztN#28`!Vb#FYlfG&?D3sja^dYCdy@8k;5TS*Z0*7R z`xe3InC;KC`;#^8Zqs)E=J&t6yk?SQL&EfR>4L<+Ue6NBY;<%GvFTPUr2lN;o?}xq*)Yqwf{PBF_ zty4cNcwxRH-Andku)z2M_N#pRdZc|7o9~tET%OnO=bN9d`-Vy?{bFxA9V52W=>)o* zA^7 ziDx=L&LC!$l#8T3FY|LDdA$S+)b&z29&|}X@?bnxeA9cCxE>JS$a!jP_YA_% zgU9c8@OgYPpXJ1DW|z`!Aom4jS4@u~gzJd`H%wfSqjoZ~{glH*8u5*y(61+<4J=}z(f@ErA9_lFm%f2%*d zRsAac;d&X-F#df@6`0$j?lnx}nKg+FqKkTF;F#6h}jcH6O;|l9cb?Ey|bE1AK1P>Qg@q@VrA* zS7hfWs$zHI@$nP(^CINyWxjaC){jQBb+nOvSZ{eoT|ULz^7;Ddc=_-DIP!Cx$jdk8 z*)HF1m-mM)Izsn{bHh_5zCXMq1k0U%&YyC4Q$tza=lS&7^J@7d&9389Z0lprC(!4( zp8-GmQ~Tw6*ywlt;c~r^pePsR@YaSYg?js5R9i24RGoa>DKF*mbID_`B<%iHr2o`U zfd}t9>X+|uw@H%zuo|{VSdYgW8k+I`V)-Kv=-PRHEgqtd>hW;-FgZoN`<_6(+&9u6 z{(^&gH#C6*LYl? zi2l%YvL0dKJ`49-c)-F(EPTwugBBjLkl#;FmK`?uh=qaHpDb&%@I(t|TG(RYJPSK5 zTxemBg-b15ZsBSR*IBs1!i^SgvT(D8zJK*0Ha}gLD2Ge*jhqKB2`kj@X_c_jdbZNl z=aDHLch1)I(I8TZTsN`&Xt2d#)*B6?HzA(*M1!XpOgy8(Sq2mTXz;lPvt7|3*Ikk( zwl^BYXo>PjhiDK|0=P%p7uk1?bi8y&CxggQWu~Ouk2h92dmDrg9Js(*Gs?HG?rrLVn6mG|2Tau{SNA{cn5!V|&l~2Ackp@7Mgq-V?8ZbYB+n9%%ZR#j{-l zO+Po7?H*|Qg~6oDK+_WjlWqe|zcl!e;qxnlA2;}@!G{eFbiNS}9WgjGn0y^*YA~35 zA849nFy&&PX|lnTn}Md1!IZ0krsE8z+zm7xZ!qOD+qXu!t>4$?``(r+-UCg~u>8vn zex|{z4Strv>kNLj!5a*oYVbybPc(Rw!6zBK+2Cmk4{V>VVSc}t+DF4GR(kG7?UyZt zp=UAfK#%|GYSGU_mM)b01#@;)KHbkGU38};kSCuW<)WOoe;em+4l7=;+vtDzqzY~{ zMEUgnU-|r~FWm>Wn|P%5+3+?k&)3=FoSk;wzhON4JB{xTFAkxR%He0_h6v6_fvNjb z{9J$1P5asHL9ZtrHwRch+VeTNC!rL4PRn;Ywj4gJH)i=h>T>vZdc%j0pBOJcUl=c; zxqQ6D_l}_%O2MZZP?OxBsO85ek*-dkY@B>*_{uy~%ggZms@xa`dVDv8edhB&Kc@gZ zzg)0mFy(MOecqh;(Y~SO126m#y-qnCK0^9*EG0a`{+e_zCp_AyVX|yB!;5YHwZQbT zt-qCmch}I_?O)&z&07wC5kez7ot^$@$43kHDERN~0KK7^OTpVo@4ZIvEu?pJEA=$w z9B*7cl8zG(3n=Y0e_TN>OTh*$uN?j|Y?XB7@VJKA5*F#~?QnZCZ_lkM*mLtFe~P#G z8&Ga3Sf};)z8v_?fv3;I#4_H3w2R z3zu8C+QM}fZm@8pg_|tgY~eNyeg5O?>6v}^bM;)He}V6nt|L0o`#zS8o{N=|a$k>@ z7Y!~lnDs=1JqEM>Xz&7qiB~lEVuOiqG>cHNr8+P;BhX4iF0VApj_VAtL9KXx4mnw?kR zxD*d9Bwg#yEBxPK*U5QG!_VxyJt`Ku&uLuyE@FJ%Zu^T~(f4~TpG-q!n|O8(;Kus& zpOo~G-Fxi!(YrlT3OLV~eOz**62_BPhmd&8W$xy zYGHhBM!V2BgjA#Hc$?W-`<4F~XN7G?ll%4&mV8R@?Ryo@+tX_253^jHE81)J`GarC z?DJ~)iU!z8@owt}qeobd$_`;{F$))TdmwahA2<2)|2v9kPlAh^^A8r8SVIdp&e>3bC^o`QI`FI zv?I>|x^mCE2EngwKN@|V9_~Lb1q-#nR30uSp1|+B1^iTQ3i5*eAgz+66g07%+nE1Y z_M+Ptpx2$>2EAUO^m;n_^KG9%d9$Ck@=nwF*i0=CbU9R*XQ^GgiR~`h+fP*C(dIf6xM=+mzkud?1snUD}`X{QaKX zqbT1ycmf$w-$RA^bbW#5TPFAEeD(WUUva*g+3}pGCLJ>mBjH5zCdbd`^QGW?E%(Xj z^3HQ z?0(whBvJdivz8uy4}|HBJbj*7=!ZJK7W5wMBI;ePhqztj`ah#r!H@YA^uteo06A(> zeBB=IgHux;*`KfPeZA4gk39a(1^iEvd__AOcGG_e|6WBy_NjAz=HppjZ=CW&;NMV- z|0&E@3Xc8)zn%_#@^S6|Kh^T`?SG=s-$6R}o4Oy>^{La>&&6Xr!#I{~e}wwb{M70G z-=+ilzHXcwX8SPjIAHURIotE|jz3oaysYPFem~C}51HTC&i%$i<~O!;|L(8H{*8lD z?wHZv_0YKVzaRAflF|j^ia)v!BbB2skZ#rweBC*+aWM7QsW(pgzxHm7d-UJ@Kku(J zqGkTdMf#>1{^|*k6mm0L!ph(ilymuw_tBPC`73mCKjoGdfC6t;zDHXy`T*W!@M#8b zG#FMM;x`!lT!Ysc{2YT<8;sc}(l0moRD+iqj9DGxdkm%kq<+dR?3n3#^cG)#o@eQO z{kg?pXhW1Y(_mkJKG9%k8^kvnECZ6lfx*81Y<|iuzW!`}$}PVB{P-`CFWTbk&-8ak zTYUZ5{FGZ@4WYirEIqUu;71ImftdO!w{YxD{ghjL{n`AKTco3De#&{YRo(kRbUQmj zbQ?Ruz%uhsPT}}Bu%^~OInZtE&Hv+Bn2=f*z86g@+-SSa*S?liWA{K9J%#gz@Js z-@y2@Ex|beBe)$fJ@;O=7qwulLyOx5pwcg%g z?mO%2uzE(g4=&r6!+m+lGOK5|?ax}ndi;K%V|{-~z9XgeMK;d%j##}5Xs^d3+;5jG zpgkRraQ|Pjfc9-XLVGb;zyWjMbdI}z?n37OT7GPPhqwn+5xSV`Xx2@%Lvj8ed`rLU zcO1v@*v9j<>-n9#_&AR9v3-wo?fN51SLDYN2eb#`vly#12<2#pluU$`k>hPUy_%e<#D*b5U{**v^y6z9Xhz!F~|0GXD+P6SzMx@$$|wI-K32`EXvB z=O>pboUA!W`JJ+#VfTR=y7yhn_9oX7|8fK8Q+_TmI*aXSpLCg?%qUMHUFQn>JEaCc zuRN8*ikw$|M9YtdSugw%V!EZ@X~1`?AaP0h9VYW%?RlK}{C;mc|GsuE8N5o^iv(V7 zG+O|9Ualfw8oF_uhwMfb7I)>2e5bWFrejhEq zT&oRYE)xW^mhirRln>670{fomR;H^?`l;gWbcrU-(}?mU_Ivlw4=iMQtedbLmK!a$ z^bOVy!H4v;d9Qu%&DSCNT3Jpa-#f?S_nq}E;``XXi{|IJ{M=jmU2!QN{PS||VK!YS zYb<;hYs1oPU!31h0z7~j?g=3J^07vJZ|-y=BdGAT$Nj2rS#+F^Rq z?teh~n(sGU_H)Xsonu3M{vN}%mys_h4^M1*0_pPiAg*1(?`l*-(l77V!+mNlk7G+% z4%+kCKg-{*Mn1_ri}(yvj{Q7xRZbUye?EVM9}yCaau$4*_w10rL=^Z!llnN|{D|wm zqenUO@1>>RU0XmoPw&~aeWrc({XoAL+V4a2dn}z!eh-J=zv1_8$UZS`U;aJ~^b>Cv z+VdwBX{Yig;=FZV8>D_;iE!8{BU2E`vER z$vq|p3-jgidz7QL*#@t(_*R3L8{A@Wr@_qz?=^f+GkB-Lvkcy5u=!8BHyQjKi{D@{ zdLQsvZLs-I%?^$H+-&zki#Pvi_dJ8mf7(6M;Ift1XzZFtiom9)m@}D!hF*7`!Qihc?EXcutLfiw->C7)8v6g^-3-em_M4L2{)_}g`VTCkT=re2*~Q-A zx!5SJpZ7CG?Oy-Gh7aqjmiXOo;G^_6eByQvfXN!JKg8{E4SrGk7KO_t-uqK6@!r~~ z{Z@TnE%BbVsC}!!`wQ(pz;@5RP2a}UwYKe{MFJQr{%L9 zEWfvXNXt*w?AQ8w+uvg_*GYQY-)S({NqXDwF_`||-u6L*cUt~GGI)o*y*y*dVOGMIAH*GfJP zETWvo!_*snt<*osV2`CIetoSw41Qe8>uX(Z@N(@p1FfwJ57_#td_Tg{-EHX(8_fO` z53jR)vzae~cEJ3K^y+J+f{BNZSU&bkxu4GRwJ~2WdKdDM?tQKAHkkdnua*7Vzh^Pf zI!EINLdw6~Q)K1uvV7DFeXZ*Z-evVs(B!*}2A3>;pXDDln0ltK^_2$ixAe3V;^7{H zsbFPZV)^%4dg`sdR_Za~r=_ReA>T(dnB$PhiNW(M|7wF94L)oz?Hc)Rm%$w8+z&O- zN;^pI-?Mm*i}5h+q=8o2O>&Q=#d91LxzYYJ(E0+aZ=1zG)8I~vr(NcLvw>EQ(?So6 zryb|_qYt!lT#tvBTRiPPxo_3VryU@2WbvI=-vNt1(cq01&kiH~!QxM{_{|niJ5=WP z7EilG2FW23k2#`n+_Ym3DMIOgn0z zmE*khZ_7`+J07OpHPA{s%e2LJBcKr8J?p@+qvXXR}&__+ojHu!vlw;4Rk;3EdV*x;Q8pJwnpqvr(% z@3r`5g-ZeH{bci~&4qc?9V@bVc{H5{O*(&?{w6Iq=_cOMbn4}#o9&EFyiMbyQ#hVP zGdONVGdZqBr*3AveeWzjxkur+%>J0Huzp#ld?qVQPW|4*6~;HSqY}XtPh2K{lNHA2 zGUYv4!7whf|0gRL#^n~xm#naGagV~iCok2oI+=RY?Z&>veD8i#yvKOx@8gp>4ke2j z#wSycCyN=zC%;0=O%^kZPp1A$7Bd`}|7zAheIx7Vb~*LqMZx@yYy1L-!Z; z&8NLIa3}4Ffm?6U_<CWrIiXt2rM{LKpYPT!_s)xHPn z{(!z|UhN`IM7*`rQU|ypN~9+hE!U@kyhEr@co*zLj}DyLWo; zPh#h4SBaX3z*mpwI0?K^zwD3Ee&Bef-!E-q|8x6#po8rM+=6`eU0e8nYrkVldgc8a z8^pii-?x?hC0d{CBhfJLCt3VL$Y>5|XyAuYB`?pt6v6vy;?MgBdI zeEI`|kE~m#Sy7$ZFeV&itKj=KvC3GEH$MhT)gXSAq-mHk@bY+IwGxoh^ zzqdIueyo>zp}8KmuwPDjjY;`v|NEbl@egwP0!at|*YbB|?}5k0OESKrov;}C#*s&D z#|ZJm`G4@m^%=_PoS(JF@1c)}R+El)Pj77PD2Zmm6We#Nv-BGa>DeEP_dFH*bzWZN zxQWttvxHecXPe`pBUZj3to2h~{GRFbI{-8jQh#E+oB4cPhWH`?+44t^BgEU*=a*%+dUPw6Y!ZXte29isyO}UQKV|*k-LJSS3NT{J(yy zFif8Gey!IjsNco-CCZ6^$#TRj)p9Hx+re}w2NvJ;cTwI}Nj(<`yk7o8@-E#hf8<#; zx$wS6U#H^>)?2h4_TJmm+|G36;Oj>_<)_ol@2kVT-Ka0x_=MsKe88!+e!~;x%QaIe zol3#KYWmdAEO>(w<>N1uzoPeBGzk1V8PR0p!y>+~?>EK=l2iM$@}Y48KHSc7RtX=Q zqz%G5oCm*vyUIX9|G^cmWy})Z?;Y)3FL`qvtG~_{J?|I*8|XgwD`L1 z9DkDSHtuxnQhNC~>EGAz@znY19thfZlfsy+Kbret$pihf&zR3p`Ww zxcfGi)f|kMeBd{dT|!@ z$qJSOJ5=@Ja?^|QTE`U&$E<&1yl9qpkjDek{_Cw>zW$p3F1hN*%S=B?dM(dF{|+h2 zgMLK6(DLlN3>YsikpJr?UiFOCAMa)RSBY@d#GfsR?j-`NdQ^rOCdGh^p-30RTGW1X5 zcjXicj@ys8=N%8|E&s%CN4d@J=RaTL%eL<(U!Sf=->A{SSRX^xZ?{W*dnwPc?c;F0 zIo0%ZdR`=#@4YXV`bQ{-X@Biyxwsz~59A(rrh47_f!}BA@;!CECQtWqc>B*kh2x}8 zHBg?Y-E=(i{SQTa2U=Mk>qI!8V5y-DCc8<(w{>p1lv zXJ&fY5zLkMazCrxQ~LSt{s0X%yB;cJ5zYW_!5pXKpbGOsp&kLF)t`RD7tm;UfZ zJx5m#|4QweeEug2^hHl71=TJHPxZYTv&-a!KvU)JT0lnor2bS+@y)nETKB)8We7^H8s?GOO&Bu%D z`omuRfO^6C2v`~rVuKbxiBr{~-9>EBvt&o!FIDf(tk+xO+Ni~1-??=zia6~7<3oPf3VO$XGpxXy>NB>el zKW;gEAb=^DPq(`D0}nm(WIxjFiw zKU2TGw=UiBg>eadE(M@sZN2vw+SR7%?ygIBq(Cq5OYT*wOII$)C*^s2UAnL5+9CS6 zF8;(YI$fEV;uNxw)_OONe2yg*-=$i8no&xfBD^uq6H zI<8mb)4j9MzcKF0eUkFGw%$zz`eK}y@xLzJ7YqFpHI{-a>(YI308Ef8#UMZtHKHCT$lxu<`k7x0D-uwBT~%OLaa2tL-bNusrk#EzC+I%hYz7%Yf>UerQQlx(q5U(| zjwuU%CkTx6)5pb+`mw+96rCRdKiCCG|As>Or^@?MuvY#GeWd-;6(n4$l7{qi3hjV= zm2@Qo=`K;b2Kb*O?~(qo0zS{v^ypnk{}P@LkaSB6={W9)&}zD+I)6cV+wyjl@Rj&p zqUrj>>ukRJMw{;rD*gM#Zbv)%!ynsxm-D>r9_MdL$;5Zvb8G3%@?WIoqy9fH$Q9^V z(%CEXzp~H{j!)>VjK6u@c#Ou3Kd(Sv)(`82@zp{+=J&22f#1&x<(D1^N(wP=WcU6v}7*scHP~0{)b*XQc6;D#Vk% zLJss>xBj7)~u^7#IY z*!E>-@oYzIdLRB@)RVMYz6O5xCbsXDXYtHe3jR&o2Y(VIwLd&lM`Zavj`@AwW`3Wa z1T7Meeubh-0l&lqzSQ`Cr2f?!|Cp6K-TXhlZ~mXRnE&U0DP2%rm!^kjn|!SC14(%x zNh$biD{qeZfnH{Qpl#*{`aboOln4DJdM+*R3#Zlgd&=D>t-Ln#2ffMsLElk-5b%1T zmUDSpPK{qk@aoa{2d$jN<`=rn{6hc0@j}Xh{w@VCP0NXA)#AhP>-|>FBJ&U3YW|^v z>aPGk)UWgdf$s0f&kqD2#t&IJJ?1CcY<{9ItDZ$U?6)sW@wvIB79WV5e=oK_yukcL zUsQh)^21YI3b^10JpMUvCkh_3HGYTTakHd_+&rq|4Dth(`}@=Szg*DEoG)#&{5P5Z z=wbCAA%DB(Z%y;({Ydchvi{AM|8?eX`HcEokRSe+QgCXTzsBz(`6-W^EdOiGA2p`q zHu6*N(hr38zrDGZez2rv{I~oA=9l_}+BeAmERC0oxa2|pIR*Ko{k*~Q-)R1+htxlX z{InC~hvolzdu{zo6Y)k>aRk5^m`mg^F6m9 z2QSxhn=RkF)DA~Jj{AR_=6kfz|B2^J%eRyEuH=K1$T@>F-`oP8)Ia=QOF6tp{aL7Q zuIBqdn(ur0`IyiJ7P|OHt-d?eFNA#O>if|wUx9xZSH(YS`QEN}FY*!p_oVrLR^Z=K z&Byxy%Hdnp{zpDoDy85ZX})U;{c@qk^Zb4}e6#w;kPqXg`+?A(Yy4!=KKjXi_$w4# z4&S7HGURL4eB1C^`(cg0O!95e`0r~zlzT>@zfe#AQyO36?~?q~Bi~NrYy4ai&wlld zG=8YiUh0{zrSUa>Ey+(k{pB=%`MC1%#WcRgpC$R}&w4bCUsRAo>d}YO_!|F}jUPzLr(fiuH2$RpIl55!^I#f3r9eO0ryow^Usvd# z{hI#+Y5aEz?WdgXN#h#}dbU;bkEHQ6ekZ|yw#L6NjgJfTp`LqJ8eijQlKdQJ?@i-> zkoS{HJo$fD8voCQ_OpM!J&mvNCrN(Bza@=-M?ro#4&RZ+j}`2lwc5U|Y5W5P`mz0Q zOyk2s`Sjl=X?%^JNbtK%%fC5|ukpV~JnhJVH2$v&@^QB2zafqP{Q`X`pVy`FvkLTm zspemw#`hP->28hhOXK$y=4a2=_^TK{NdSBBGsfU3&&(>${$7~|MEKrb z>-)s!HDf38?L8Lhp`U|uzZdkRKibZd2+frFS4}%b|Kjm|Ut_%r2F;5YrtOqL4=F3X zO#cuDL3w;%Llq7&dAyuEjLugzF>JgYjp=_80l=?T7PHp0Py?n~h&-`-QJ)VPz5i=J*VH%9HZ>6v8Q=(YfTQ zZ$AlG#M9gF?S&nW2krgfIPLX);4W{zpAO~L?my=|#P?PBK0Nn3=I8SVFmG3`y}~M=Of^0lFs3=Rj6I~ z1LFfBj4Xsk?|8uOX&T~th$H-~V~Yu+yjMut-l6?kUo|{e?ZD(d+UIfP-~C#u@$y~m z!^oL_@9Y7#JKl3Z<1;!kT}FQ|X99e76CdB#;`dBL|6J4F|5u28=`UW@1bm-g2Av#V zA%;B3_Jzc6?_&&yh*xC`H&#`4mQ;cGzUHY?1|I(|Y^uLRu7pGZ^c*sjbXF(jubHRi z_O3uK$ycDK@WjkO&u0`}Eb_Gex@keM9k((RH_# zXXo$6=4dgxf3HP;-Yi5z7(YPy!M#VcbJ%c~*R;Ko9xXw;KyQo_fGg8x$Y1e`384G# zjBe*jx;A<5dVQ{>2Ry5cS#y>j`U&qV{@ry+Ct1^d<-XrO@q1BDF?w9#^pM2z+$@<| zBy{@te(7cm{_;26{Oq-usEC>$P^A@8hco*W;r$h)I!<>)T$M`AOxwKp>;;hXlXo z3V)OzEAUqKcpColT{^WZ^8Ez+)n_9x`;lzj4?TikL|nmzOPu;nKa(N z!w9)xKAu}muI2et@2AOjp6@MBx>51+dr$nFY22!>gLu022fL>uwtAE97UDC5^3Z!K zS}XmV$;a*g7mJ{0{r{c%J)z{56Isq4>o@mjdh&pj2f0TLpz{}HV{TP@fxMUY;XUG! z4)K$vOCp4RNH)%sw?emdEgHr{Gqs-NL?Rdu5&z^w$skWWL_Crc*`B@~Y){{2jh1`R zH*0>>CsoUNxE+K=6p%8xWgFWu)Tv=PT&(Gwp70xYYQ8|@ov$bl@0(M(;JH+<2lmox z&Bt=b%x>^ewKg;!ftMN&DiBHNuwlCu!=@HLldwZv|z0TKOI6373-nxHw$Y5za z#z)ZknKk^pfZvC5ed_vkh;(s#OYS33yw*;eq6OyVe>3=I_vN_W$bY|So}lRKr11#P zuX}m%F7n6EgE$?MMZ`PlIzoDkkPb=LVS`OBx*k_J*>cF{8dx!TcTyF4j@8c+5{5p?lee!)=9f$Js zAt^eYH_1LwO&_f!o@qZ3I>T@AipWzBCDvOGM)9*o$EPAiw!E=7Ay7aQOq!2 zLm$K=+d1xx9#H;(u8_N=gZ;wg zQS3EMmqgSXNksfoIa^6F|vp$@o`Ql&n4igf3**@~92ht*Ze~Dz2Cz(P$F?EN6L40|O{)jI!{MYQ&_EeVt zo#Go`$aKjv_HXB}&#Tt9Q32=WALlXs{x#73h*a(8EVKLVOkdo|^4&jxa~<+87!%tV z&(w<;#-qeLF+5931?eXZRZZaYP1is1sqEKrdJdKPZSC}9Ainb2b~_;-DDm;qSXP58{)p{$=~wPpPKFF66x?V-)+~snBCIM~|ny&}J#)$6IexlhZBdzpV~7ek*n#G~_+KGBld8c`fq zQ#)l3?S#58rDyo>!WHhGOF6YsbriWBkb6^7p4srV#ob}(GFjbZm@ z4OeNlqem~Dr`dw_(mK4llxk4!!6rT9AuFfpCe0_`BV;*EuQAxlZF-f#EI(kMnBhULnxL0fh(n66G%0m+Br9e9LCX`Tm;tn+ljw4dHLHNe-FD0<;FJt`n?=^zpUH+UO(;}g1!emlGy6svykl?Fxc$IwKm^O@4cN5 z?AVTTfsdt6?LaBCQ{v+rHJR9tjashzQ{o}gdte$?Z6%$}&jzN^;p_9`fobTqh$p?1 zX)OlNB;BW(d|E$DrWxKguTN|rSaPdo(|u;z-g7P81oJxa52`%ODr40Y^74gWB8>Nt z|9cHT{!@Zs+&eeZ$cZ?|qc->S=sD#CsZ3eZT8r z=6m2X40n%d=;w9U3m+jdZuh!eK;F>v$F2ZY5=uHe!0Y4Uh)nM-@8^5J&$-X;lj$8I zzI{zwiI?3=7PYo&hG;gVDeKpX`JFJHgU)__Hl2@{p09=?G{}cU^7we4^1bPOtS|je z+`S4XQ{Jv&-+gakxaJO~bN_53?dSM@$_w@(qWvY^?IQ9naErjl%S-5&Y9n?*2)|4Y ze4KLslaEiJFUq-elO~sa<>nW#@iiXarTi{8%vF3~hhwC0xph8(?r##hmx5MJ7xhxV zRKp(}1u5dIQ48!R-Y4SYKk_3U`TtPz`*>LjzNh7Yj+`&Hs+?jy$DadY*ZO@mG9TCY zF&{r{T_%3uG1Afev+|uagAZuv{>iJAR2c8-WIikg1HOnMhE32r zVRzDg*eOvEN9996z3X%>+qq)sHKfba?@DQZ@(%C!#523EhvTK+pVM~}^=RLlh({^t zsBQnR*>3s1vbGQNqVeVnuE*TYmwmM=7ji#_hW`E2as%=5`3vL)Z}RP21U~O!zJXi3 zKXCq#G_gPQUCVxu^&{&aP1Y~!`hf_LJoWv6>ka;0*_*}90t(-3a_{#BJAX2H-=XC_ znY_=Fc6k0$uwBz9kx4wOonQayvCy>OV@;iCn8*Fu#w!N4b(8 z59)1^@Ma4^C%~7s$sc)q9pCXqJnD7-!g?uR>u=ZOb?JS6kM=7b-ky`0ewDn}_DHDZ z;*UJ*<-eBO14!yCrWgI8<>lX_T~ZF7lzw7E;7PhEZ>uDq)+ZsVLi@EIE+grzPvf0# zmudFE>2kBt2kmK*DCdusi~4OIl+s_y*Lozq)^j$?OXIzsB7c@JUDCn+QpYF3U+Xu# z2W&h_y4mljUyq_G&|m5J;rDD`W&U{dBjD%#M)VJ|=(s1S%e`i#TS=`q@q-ONrezLv zSbraWMB$`^^~S>o3?|;b-qzP)<6&AJ?fz>9y9oD8V+>- z5yS0oW!Plpb`Z~NhQ6xrF@I|j{G;Ko5f;963he8By?g&w-}`*e>Eh?Xiux))UO}%W z){Y{by_`Dw4iUe2m~@FpNUvy=bo6^>ZWeg}-UC-O%U_}YJLq>GxERtSa=ZZClF+^< z;p66YZz4l|+S7>^z+taCOj zWqORSe(#9ik1}5Rf4n4@o^F!B&ugUTNNk=^K4BWr6uGATg7E~DE9tIOdA~)Yb-f$) zCnRrB4bck3GsHZS@r0|~-Vivx4+$OSN&0;Iuax$qKFAyVCdgksK?VYO;u~2`xq%%w zqa*Rmms3?bV%M1D+n_-Zn?2@!g5#xr#jlm|)drrAndp2;wc(J;J@9UkEcyB_(fYVP zbq|Zn?gK?5<#B&{xsm-hUS#bFjoynXpVbD&myf4hrt`8}S&ojEQeRn5hh%(7Zesql z|C#^U@2ig&F~9F4@$W97yiP5TbS~-cez8;izM#nR4m}alyq0M^ed}Bc@x_O zJdncgU-Wsx*k1h5}@C)U6xv8IlXz2KZpVg2Ak9&!S-`fX!0`Zl( z%rG{W^|XtiV(0^Y8d^Q*N4cl5GQfP`KOR3Xitz>c*2{ZjsfNs$w#V)XbvnhbL~cnh z#UY>fKcol7>33`X4}0$dU)Ob&iJvRyT25%OW9K@litBqV%T`p>%FfF*b!#V0;<^vp zNmQFqw^kI@j?=Whxow=|Gf=yk<4#j-T#1W*n z`eBK6|K!hDc&c-+(az}pp6o92LH163#lA(B$kFXv)c8l&Z;jue7vEaewi~)f8sT${ zcW_aEq}S&zA2R;Akk5bl^rt@!>dl!olYEw&IF=a9xyl9V0r;X^z^7*;m-jqRa{1L6 zULjwph8LhfizYt_PIar|aEG&Su~KPm$BY=Sfa)DafNJr?(fv zpS_&AT>vxMxoPACJw`hOSMz+w%}!hK&tUxYY73v)j#&rTzI51~N%aF7k=w7h2mk5X z6}4}$^M7Xhy`GQu@3{g`^sHs^0-7l!te548&Sht?`@@cL6M zH`-6N=f(cqFGJssvtOxs9F6C-PKy4)f4Hshe&q7D!N2+Ge4Q!er~6&g*bgy5bx0Ps zNLTtncAR@`-NH~=Kj|$Vp?SBt0XO^r;X4hl_ZRk(uElo-o&(Sp8&_C=8Ck5k-vI3^ zms9$r{mPK*5tMV3{S1{?`xrF-D8ldFXzRWobM!{<_e`75bo~6-Ir6i(fBNb8SuTH1 zN2kv@&Q$KAeC-;=8Oxo8-hkiWtmLTvZ8{oymhbN%|H}QVG9A_+{@$6JGm1_Urx_pZ-eH7(c6B z9`1US@cZkLp7S`xay?=6m)GmS=UXitcn(;;&=1&Ygb)A2M?ByVf2gP3|4#Q` z68ZL$jsrZuklq*b9@=NCdn2^ZHr6Ax&sOV;nc>F=eZY77K;2#K8%Z1PO|p4Q7~Y`$ z$>#Nip9r#fE5k23R=fZ9fvXuFb9i_*@1}?so;SPy_JLa&9vhvr zc|R0>rMoNq!uyW!yJPgf&41iJa1X zZl`yxb$_;N72OL4m_F`%)w+x7S9@J1>`UCQ4E?AZg=*LI{s+P_6QXty^JyP=Ep0yv zl)ra63ZIP^Exx>N4ZVb%O|@LFI?be~ab895PUu{S-o;4sb)~?=f?jLh!`=_`dI~xR0`4c?Xzg3*56Xpb?ZYm@ zJy?Ra3GSf7&6@&u(BQIN53{_xPSD+Zm~bX{eA*wX_ZpT5eb|?Tk}n#% z$Klkz&b`IG_I_lz-lH%)BWN_yLF8wlem8+;5DuFCC|R zwyk7;0etrG*mrE}*i3g#?E5hK!NPsaxw z@7!+WX8tkelf^rvdXFpX4mx93QbT51s=|%VL-F3$Yd4mr+*QNI?b$@f(I_3GN z-eOYk0S`QIvHupI@Mfq)_6^$ZYQ&&?R@{7lismcl4mp_Z|9gV?*U>L}X2c)oGqPB3 zv3D&!J=V{WUp3+T&FF9V{LX&j&GVc8q{C<0my+*Ond9N*eJoS0rWO6?M{0>UHy(Dlt+4;{4e$&ke~HK-7Xf# zX)m`a<074)!|N;LJGRRCtM%!;zQ%qU*hBCc@OK!!dbiK^Jvf0?A6*!KpXS_NdN}%B zo1z_EYY6Sj2FT~(I_>{3gH8XrK0P$X`Nwpe>4&>HzDUPtpR#TWEFI%Iau)9br(>MA z&$z)Z|Do!8u9>_a;B^VC1e=WYH4 ztOq=0Z8GeGnH8%qv}f?oUfK@Vc?xv%09TvE`W?^z*gL-$LQTxAxxJ~`{R-AJTiL(M z!_iMdjq1Og>tD@o>Q!?Iplp4&LH%m20JqqVGd=fvWzm1n+xHlszz@8`4NqqI(EuSF z`>PbLbgc&MiRh}G$NJD*LV8+_cK!$vd>Nn4kzu_>-|t`@Jm`>p$od{-`=@?1i)sDM z(+uDBl)JzWOWM{C${{}=Wb;7ys>w^-)xJULm*l4Xn%UK~(|JE%`z#MX#(XD^)Sg+w;8dgSYtRlhBa^#S!x`Jp|+S37?&S!}NIM?HgvEdDTJEuZ(3K+&&r zc>QZ_&|{zN>m}b}U+W_lh7>v{U+Y{3uzXwwJw-h1!)yN@^BGG#9&~xSLci^Yy4&_c zbc;^W({uhBzwb?aPe<85+G5B+*fZf5SfX}qru=H>zufsXQ$FcKln34;^Zcig3nbUv zkJ7@fA9nspZo01l`&9v_a_QWx#)rlJ-AS7odA9VVqs(9Y5*^wHfboaZvEiiCDS2x@ zp6*r9dxM$>-L|mh@Wp+H4wH?PG*IfLY^g`0M7! zz7*7}!P#sFG(TGFVEcyl9xa0AB~b5x2l_wD^k4TeH22fq4OYIv{gLkll-19Uzh$}i zyLZz5$p86wNYD3u)Lv*^yEf}aPmqsidYFFEwUDOS17inDuJY*oTH3>Ww_O|a1{aa9 zX%F3P*OE@8$Ct+Y#%Z1M*tRL=4J?}_7T;;*{Sx?d-d}k+n)QEjSNY@o6X+Q9zeX3E z&!kzmO?Jib$gehwe7?%?0>9|rcq^dI{zdxELjEqFS&V2ds5_kO(BLfY2a{gdy_p^# z+n$n?<||n=+Ys6(`=)%;xQ`%>{bSo^2j4WGx-9rMJNV{UjBg7=f0qZ}c93s_wtE90 z&E<86#JnN+qkMHA6a4s^%NyVh22XK&`wR~0Jv6m9cMPHTVsMe4 zRm(GmAqXT_Tyf8zHFx)vfacFyiZsDFBZxV z`dbCONN@j=iCfzA?kmI(bzklg8aHe)y#ghJ=dm6`|LW#%pF=YM`;gD;tla8f!~e?H z!jIe-ztjBEKdpl-xX`xaN7T;a|*iG1@;`6kQx)^p(l zoo}h^^uqqbmKVM~i*XFXH@7bSd6e;mUFUtaoWA%T!{H5WJq7*XDCmFV+ID%h4^H(< zdWQZ~-Qf3x3wrmNzd!otqJOxL^EU1`)#;y)R3`73pPvp7IN3iR+yqVC?h1a31;5%t zt~Ym|xveg?BZ>z-FNpXjJf8XQv7*94p}*qk&E1Fiy?pNx`znkDmS3;^HM@L^F#>MX z|7|gzQGEoTPE3Mx@I>eDT3UZFzMv#o*#D~EYTPdW)bfxX(R;nob<)ze_68r#ZzzpU zPGqUvQ}M9fR6O|NZ&PS0?i<1Wr~+O*u6V*hu$)%Uq~6F$3#`Kx_Yz0o3Yjfwt2#o$f=(%Qwt$ln-`k?rrpEah`JuomVkFr!(e#pNsjx zfB((rc0Taqjt|-Zy+rLZdwiDt!9U{gs1<;RR$Be|zi@x>|0(I;6yE9k_7vG=x{?e!6KN@E6`FeMJf9bze{@at&&kO%AF7Ua?|DHm)_z!)r z8>{+P@S>S)*JEeExn_goQS^_%zvfbo2R6q1M7DwRl+6Ykur?eUiZ&eQF`EsWfU+-# zs{^Zzli5sKe*A%W{Aw<{(BpTQdB#v>W3)fb2ItXpJUtvQZmc=D;o-Vmj;D7K@mO|_ zC&oR^27lymOF7=&VVwtui*h!Xa~^ny)nCw+t>*kwv-`V<*E($uAN_~sawZgf^HLIq zc~OT;MJM}Dc{!G&S7YUj{s8(n=s|7#7~!{==->-Pd zZLtUB`FC?Zej~|BFdlcjBa!arM?){zAIa0jd{$fDoOf(4WjhRh)(u6oC+49nd9V35 zUQ4{qxQn~Fl^7;T@{S#Dp=ez(dAdg>ORjF)4iV&6_i&b5`1e#8zA^e^m{)^ds1K9&i>d3x`1bog(H3=*^%I00GGod3$$-I-bcz-^F8jYi}P;Tu0M17wN8R{ z{JOzclmCchNplofz|W|UqvW6NXKo(ldH&{TtXIc9qRqI6p*i+trtj-@w|V4`=|;Pn z^{om&`{}y(vKi~{TkCfC=W;cQ|@Mcr^5gD;g5QFjC^R0 zeKW$h2VAVn_w5cp?_J6IcG7>C^O?;t?r+KZ?hd%q?q+=l!q0i2tZyIvC&=&SLotu2 z_5Qw10sl3+<6Mu?J(TL0I1Yn<n-g-VXOIANfFY1-2TM_AW zkMTI^ZCzm%$G+ir8zTF1`_pi5a{INkAKJf>uNU3V_$`(%=tjM%ThI>6%tb9JCT=v zvfK`Nxb{hv<){0VWj9+_)*XLYZbf^BBBtYyGC$*^=hyAY%n&AqaL;%+~ehc!~!j@Lg1KG8iLe=_5b=54D9?I`fz zQ#*@vy~kLG8P0qG2heA7um2o9-r(sTbAA-dy~oO#-|K%Ix!IhKWqo&vAI_h!ci_wC z7q{oH;GbvmY?tc`o=-YTxeQj=?@Y%I=kh=~2Kg+QkN1$T=Ew5$sP`Zg`au1wCZF^4 zXVa^TjE*UCiFUl!`5T^I`!zKVEY92BZ|}L<`&5L>^{&V1&^?*D&t{_T!9E9u@-!Rt zx4NizwJ7&sh4=G`PQm5SXxhqoDBR-6UuF0 zt1t4kb1wJ>JLWt7M*?5}4eSrX@A<~PZ72`&72aAWE7sZg{O30DR;;%mq3W;bm@P8o zWxMb_ZWlOyDeJrBCA~)aRcF&8}nnq4Pl1B=+Ct`o?~GZs&SkoHp*6-p*lO1M+O$OZku3{Q>aT=5QQVo1=Zi z>mAV}mObLD-fB!QuRk+UO|aS;+?zdJZO(;mM}f~&)xuxp;YB&jWJf0q68Rz>=*6e~ z2JLeWw6j^9Yf$^Da}Tx322ZE;8{mW9olE|oKzCE5SeC_!!^YGk$ zMSrab_br37_a>SCS?B)}rZ-`MF4*_KcD!x71%P6Ewe_s^RdRj$db|70;2^*0-l2K6 z?Nzl@Z&yQZ7f>$gLv+jX%RXufW}oVDuCEIRJUshoFWqHzch$eow4OgoyzTqRC|})A zmPPxf`zSRZWcTX39%#H()NkK6Gv?Xl_x(dYH(F4t`w1U<($j0dO6~kBg1)DScbfNo zX2QE3cu$=6blQ(otFS|_enWQe;}I|Xk23$!pwpCSCj74~;vXa3wT|wfmwc#Ig3o=A zIo`9?$BRn!5r|YDYn{|B?Ngk#{Wm>N<9>{MeK*bn6zkcCP#^8RcYHeUq;;Mm+ySpQ z)T49B|GfqeJ;W!vwLd!7!*if71$~_ym+SmU8v7`-g=>lbo>g?0QJ?d3T?@%?&2ME3 z>kg-RjkbPM9@6i;-}X@lH@nL5cGk6u`Vr?gRL``Y-@k(Wg0#VOLox2_G(JFL5Sxkh zOtxXGKu^p4p#N#KsGlr3>s*1#lf^woHqHWi z+aJsNrFr4(f|KOuILkBfVbYl%WjS;YQ@%g)0mf&$Xn(ZdZh~xddVk@1qif$}JQKHu zKkQg~jOo+JSL>r{ud@Y2I5+6Yu3`Gx`9I)4=El71#K#!k{}C^q>B)5-ux&@byA!uD z9`h1_7@1vhenQURtI|z;KjGGVl=fnAq*HxdJQVTwah|wnN6}xD+;o0je1ZJFU>q{Y&!-?K${ywRQER*TTCv)@ZudY$W`}4>;8zQ*+P8j z(Xi_`up&SYfJydH^?BPu&eP6RkHPONQE#K%srWEPJ1PB7LvOS%aymPGozXT`ZhWs> z?O?H;{}I!RYH}_0a*96kz3wS=NB_OJk7SwEGvOb!^#gp`A2Bq_dliSfwtGH9qnr;J z?izM~Gv2iutZ=7gI?8r88#2b&m%jrOa@Tu%(#uxY0mq|x_H5VP^zYqG_raa+4vjMZ z;jRycpZO1W@!m+ai(1!EJE!r2-G}SxV7JWF*u2G4xxWrN`%b&Rybn+3SBmp=Ctxr7 zo^W`rtEgXE8R#r%Z!=WD)0>t0#cK`%#>zVqKTL0JhqsR6~TUGaYIjj_DJ{I_l1<7#SYf?>aFrV?$ zglNb6@^?7FU*ynS=h4YX}!(F_WWN7pkTu#GX54#J#fU@R^Q|{OJAuYWpTJ_i8L1(+Xe{AVA zZ`Lkvx1YOdu2Xp%{$5GDyo>#vxpsLMC66Nibt>J(2Frgn~ztar@a;gz20@5*}E~Om%dh$ zot~gp=}f+8zcAkX3wK|Ye9PRj9>QPLQ|Wb)&tJ84Rnt;)3VQW-XI8jheE!B6^g!Eu z{@XJ@=YHwc=`+9Pe)0M1XTIQm@%b;$JZAo4`K4Fy!+J-~=e0hrc{)C8UA=wJhU^^e zw8rDI8{h*pOXs|b=#^pMnSS-gei?1dVrE+}g%+ER@e5~O=zhuZku&I-x9iK#onie^IX-#D)N)!me!EbPnstak|I^74a)G5%x%2ZP zu})VmKk7xVje)Cn`<5j;C;Xs8?YR2EvfD#(4sLiMv;+8P$A%ZuezxwZd$`v5vX2@8 z_Gvsn6#FuU7dAY6Xq0wfc;Qm_qo0WST&(9_?}t3v=P?nFa{cACDA$j8xitRP{=Jmw zt-at3Z>_?9YkFth5uyKiiT$c|kiLAMY4ms0?&RZ{VNd4yUT8n73HL3fcS6g#&NX0e zt%Lfk^C|6ngsD%u2f5h3zTvd#v-(vRlOF9y%r1r&n>?cY*~RY7=Ml5jY&WvLlg@{9 z{(1+(CA6ult75 z2Kke9(H=?e>c457JI&J(e>>f2@-6g5_Zy|h*j}YWn@CsKn{5{x!S)sF88|LGgK}rq zze5M^(%7e- ze!zh2gT26)y~q&Qm&QFlYi^t&@%(%NPxfHcqc}IeXg_*CM&~i|bMZayO_m$|C`ZoE7m=TH zKgf9L+qM-P(5HtOuKA7W`1#4R@N>|mWch{Ot31M2EYFV@$|Jpn9KZ+Ni;>=6cY<^d zSNGltPqvzTfSm7lJ)}H~^&a(Fa>?uc{VXT&fImn-uTZYsUPe8t+58f|YVrrPU$Ne? z_)ZVb`I|LpCv<*#izP#XN9;c^XkGw*e7QY&6YnR|JD18Y-Nbu}G*6^@gZJ$5rCWG^ z(Ie~*B>e;EP1yOApiAS!ei#V*G*7yCw|kSUVhUnkHblSZnRqSzYUd>njpNZ?Z#NLo zQ#W@a^rrt~%m?+{!vk-zo%+gatnD*%cz0pk-TyHz=*|tjT`c_!J|n+<&oH0gn>L?s z8W!eH3utxD{OLIBO?s6n0^+S0u%GU+Km8Yf-G8bb(s`zw&Xt6e{TtSKFQXfx$@z1C zn?D{t#&js3bx-sA){Ff5)HLNlJwf{Z`#$doW}-{$i2W(^6&;Kqq zF`xI5-h95< z9!$@_wGJ?qfB%8$#lI4r>B<3*e?Iv4_G!w~=HI=fYsj5M^Tgi?|K2i9e*K4jkoi0v z|1PH;p2EM2-p%wg@{jGK?40c7@P?8e%HBy2&EB17n2Y^Mug@2gAJQM_;Wg8gtIbC; zaP9|~PWEMa^hc`kE{NVSgx!F|MX$z@8kecQAm7WU$u~X5ejWNdXeV|5v)W6&+p}2n z{iE#n_wRqb6Ih$Q(cNqp@l4U@=$~jDGesVkT;u80zf%8F{l?*P{g3r8jT3V_VDwMR z|6xb(?fXU3l)ue)8gQ*cN?zsqu6rnze>z5fWUC){I|V*>PLoeMO8)fYa$ozDehR;$ zy-T~pewutIwL{%JkCPr@e!1OLx+Suok2yWrYSK4sn`V$N+GXd{$KbQ}<50z2wBrbu zJ)WY+F&-_R*Zm6Kl%?@M)sBMsoI~#cYAa)aVg#Y-lI;R^sSnVyI=jNi5mz9 z{{Ex=S3d>v!+KpB_gN3mCtNk*cvkHj;OZ9Dzv6%cWV;SK{$_*Y>KztuXziR|D_ebnYzyPIveoAfPZeQsXL{hrx_yFEO8i2S?#!7qiMe82s{|Kk4a-v7$* z6O`K)%g@h2gkEU8wZ+uN!=qoa#q1>f={Whb@k%Er*5~ayLDGg8mDB&t2KuEfu@3)eI`utLlbV_XMI{D)0CCK~pW=3zHtB zy{NjCG#*R?zo!5%yVq%O;0x^5i2a8h!l(UVAEDRw4-EX1hu^%G?uPYrS8sQ>Ir^I{ zPuHsOb3eT8uMI!TZTm06&wcQ^55GD3N0FZQZ8t}$S9-U&Ir^E1e>l>Io^{ndygB;m zi08Yg&Cy>8|AB!2_Imj1?NF6rD3b+fR zGw=;CUhwr5^xI#zFF;~nvw>Zvw6ny!hW-+r&1I_?Zg#}s8n2+=@H@UC_xgP9Nvr?a zUfPG|^5q`UT(->JY}WzOx2zuif$%rNzc2jV;ivs;E?W}*-4W0ExNO(f@PBW_Zwmjb z!oNQJuMYp(@GlMjs_^%?U-uGb4?gPQ=>M;^n5^$%_sj04ah`7@lO_2%IX0g*Uv-G^ z#vgAV$o~9%Qr@1xc-fc6zWXhfAKMAaF+Yc9d#vQMYjPhv1^Ug{yPIDj)O_;b^zu>Rh1ef!d|K z|3$U6@=}h-2l|wb-tGJXKG&y)1tNTm`T#gDf839+cY?QAIh?;yKa1xW2P{3>J7})@ zX~S_oKJQ0ZJm5B>fwnK(5O(d}u=jguXPPHy-`jRQ^f0&UN!TUX^{rl>wq1Yf+rZzp z>!-snyZ%J@PkK77Yq#xs==G@VI`yMDD!aZt@{?T;z0d7>Z_sxt@UIGZ+4X_&%dXc0 zUUq#>__?pj+P(10uFngyL$BcAa|N9926@{b-J=9X=WG zYNsC#zuNH=;eRahdo=v)&se(@e)e;+uEXJH|Ht+XxWBkw54%@lr^gFJ{x>9Lh zo94&Wuc@8?Rp(E>Z@la7bH@LDz(2}zXx&if0J6T_j>q)H-8TNM=Zyd5z#scBwJv4t ztf#M4*sr#8qk;cX=cnci`{yr#T3bJra_zMGi+LHXQx4J0n%!eM*i$nVxjoJHZgRZY zy=&<{xSsCnRqhU+$NoI}b*TTv{T#?w>%rPDR`uOb)~|#Su}|j?YMsB}{I4btc)0b? z9e-_>8CLt!QSwpenkLwNm~_tQ&c8JIzt_R-{-#ejzI-01m*G`^WoY{T>tbsgw6DCH z+~o;Su0i{y`(UyS2b`|{*awAq1CJ+ovfRjpy;A=dasXeThpz#B@Ypvv2V7=<7(tp( z2Hf4o8@MJWYo|}Me%$+HE?1<>wD0hI_|T7E=q}c6jO0Z3qtKyd|Lo$(EDb0v%V8v zu5+o!zh&?2}FKgLn%apE7uf~9?G|MfnzdSMblyI_lX{%*`)rc_BHzCCHRJ!C%c|M~%y}lOH{}u8X3;WVx1luS3QiOZb;M(@2XRMllJk396H?y3QYd-!kJLdZS0ntl2*U{l2QxVO)58hj zEZE`V9PY+wk2PM(7Bb^B^gmm;!x5Q&I^JyI*6>HYU$`m!2P2-0wsEENN#`GB=du|0 zr(?8pS$-aJYryvsU)Z~BSv~x_86Wqf7`*`xt*~;BZHjo54*vDwcdz*$W%*6Mu16Cyl@}=M;>!JTi3lU;;EOz3%9#Jw`cElZ*u!bSwQLI?F(;Zx;ST-*Sn*;Ssvq$ zm-kwxPmgSm^6s2mUaN=raz5Tfe|q$B^3m`(9}g#aI>)<-aM_jr&VOWgz0=((_R{dz zEoP(Y|L7^ln~pz0{Kh}$d)V7_e68zQ>&l;Se70|le7f>I?$4He%-wSRHG4yR(#P?6 zq}T8}z1^P7^zk(FKQf2$MxW;&^yPeCN%$${F?&(BsNCK!+v9-MWSP^g``&TR2K^>t zv30j=7W>aM<_-I=T||Fy$-$()yE5TmL4m)9HPH{x@+wQ26H#J32rA06fSK{OUBk zST7$u@9!MXBbFaVqIzdY_ucFMa-4?>l6}7s`iH}|B=`}cPoB^B!#?sCw%!r@1~g96 zeZ7yEJn`vXgKENc!*m_PwGOU#L_qg3$Jglq`95w8yzJ9Di~5a#f4{>sBb|p+{yHBI zIsl>jr!cO>H?hy1_WiRAa_=LB7e+TLh;s-0sfhNBvT>TK(Kg9Ph z$6up^V16H}iQf0Uxf}W5+->2TceeM#06O!1wN?@AOK)|@*B1d_d^Zdf<@@oT!MkCf zG=0tYfyO-qgVF!jd4V!Np||9K)3a|;gZl^uD;8lNy^$O)@E_MoNnfRNX@IHo4IdN^Ec^c?uWGbGSjI% z)Q?`=3jZiwl=}&z6Y~3c`<=#pmP2t*CF&96Saqw4ei7xgsKIh+ysLdjn%BwaEsi|u z%*wuRezPr_|;&z_I>J{>QMCi^&YVH&pcv@QI39I3+(Uo00Z>T_29gR zDPJ~gmd{VSANC6MJw3ws;fq)=WXH0|PwmFgIO}5$fB8~mW8>qvzw`3%3IAU@{_Juh z$Sz{LH@uYi^K&1OZ$8f&?*=J<(Ub3g&*3S@?4mmJi+y6*MN~MRN`*eLUTPn--c1ty z(i7pyZesfEeWsB1W$$Y91ODPUaQN|^{Ju|IFML_>ubOBb;Sl*8;}qSmkUdB}liXBZ z*`I8~V@zlA!>9QP>E)Uhq-OHX<;Q%g$sHcv-{QWaw*6&%mcNG=_AY-9kNN36yzI`p z;{kqrI$vK+cDY~eF6s|B^i1p)G~wy&R<`S;mtXcP+Yo-&a;qO*9Pc>`!er*MZbONn^gU|0=H6X#R8Ci>NQfdBw-90?K~E z&i#X(qs{TeI)>z)K197J&g**pjQfe_YChu@mRIx7(x)`eBd@tQ?Dn06gFQM_vPZLu z_UK)N!<;-m&6C#b-5l7ZG~Q2BeacGpROc_X|9@`a$u_Y54ZX(E=KZxPcI#p)8tMbS zw%r0 z1}_bJc5B$P8SJInDV;}=ox%79@C3cA2aE2kksHJ{#vb|7#u5Pmg`2)0Jbu%2D;(C@tOS zX-}88tGZ7r?z2tUz?JN8N>Yu{@?_3`l zG4t1|i~Pk;yi=Q+U;Ub94{||#C24v%_j5FRkel`2t1tC*%^qkE{Ff#eA~btQ|6p~w z-wf02VZ9%$_9T~iJo_hu)ovs+c*=FK+VFZkXhxvd!u5Wi_5tgBT-senGP_Tb@=Uv< zeOyhwNS6eE<6aH$2VZ_)L!n$f9!>++F4w%|r!2f(uJ2AT!_h9+3zDbYuX0_$`wn_k zt~q{BaC1qN%g*P%!{G5gANUV?z}fU#`(t0#xdJ}VP7ayBR((-2YHnGsN+-Fi|5KDx z-SIu;dfqP2-<|oAg}2M|jWZ9sUvm1}Gk@fMmFM)CzjD9I^Yt?@#qIL^<(YfT4|*W^ zTD&`$ts~z>XEmWbiuB!P@YRIvS)0BupVxp zK9@C?xF7t%S50F7>J^u`Kif<Ky;v8Y#1ooqs`W}%kv3-R1@xhNMm2O&KsoN7kUOd7m;?e zeaKd`Uz^6e-+B)O`QVeClYPs=uIXM)^&f2h)b%Vq!hYzs%lV!_s&jltn9sJ$`Hnz3 z9`-WMO>DcI_faX|^a$;S#+%!&{fgtS&5C}z?5xho=6c0)w)HOB!JO}uzxXNpRZZA` zMK~bTezoJdPid&z*<$jfUI|Z8E-yK&T-aY~%O(0tab6|uX1}Q|7uv;VBNwFne?>0R zo2S#)G~|~ap}uarh68@-OL~Ou#I|cF7ul=y2=#T_HPkQVm+HObqtw@JH`yezeX>Kf zS)qU5slJ*pp_i?7j((bY|2JMgiu$h?;$~L^Ll^~LZ@^6+F5yyi4Uoo}3--$fNOY*qV zw!RnTF=T$pL;P(naPo6|sd+n;4pS?GxB`5$&4hVKfh$&)dX&!QyKi zP5%Bk>KWGQWFM=^OC3OX*1Q+FAYTf)xz@dQ|323fC+?wN^BqkL67Abz}%aF77m+8I-&BN;6m zA$KP>xzo;<-2~r<>2z#i{Nf#Sb$_hxS!mtGaBTAfJ^eR6?)mB7={5Dt0c3CIt|fdc z@|B!wwhy*$Q4{+ZFSztLgGYsbevxH0Ov_O(fV&?8`i zUeEt?tDobJ&d2{IcE1Pm>esdFBlH*=0Q=QjsHdp6>kEG1`{bp>3p{hz6K`8zUh4t1 zIh2R$M|$)C<@EsJ=XTR=nhn6$Q+a)YC7p?z+?!}#uDNcX2eiU|sh_*3SA6395$u2! zA?e>A`njKY_E6lc zD%u;=GtH~2UV@K5d>QI7^$Pc!;!ETGrEI?e*jK*aRCaM_l<%Ys$GuKNu^u(t73cqA z-l|sl<{9us=Z{-mr(F&@7oX|fx4owr{vhcYiuvB*F7{`JMu~U0i|Z#veM0$g?r$c2 z+FodPy&Wj{!H3%l`lNl(YdRjGzU+54czgeFXe7z3q?jWUaz_o$lb?nRF*@SNdMhCZDhWka>|#?MGU^*XxH_ zuhf2nkMFQ@iyr6)W?VowN`QEK0x^I0{#^KYYY{%?YlR>9J_mgrGUWClzdGDPes32i}$r(Zgf4D`(@TTKFU%3o3p*Iz1ZPRVeI>t zw_nDLfqlB4O#PxARybh^wENAjX9AtK>ah-(HAU0zH~W_^pVvK4@)%cmRSn%TZhBOkP%SRaS0bCt)fy|d!~v%XLN;^&F~tpcBm{NG;)&*#UcnHRfo zMWG#E0B&19#_S0CBj++7_C%rITbvKOp)enlt!~eUtu6G!v(;1Q!_c$S{Fmm%rke*_ zTfmQg4CW&-j(kHQ9QOMzn-WpKG+k%`_NAIn8FjDkbHn*I!2hkm*Jg8_A=~>D#{(s} z;<;YTL!n<^o@e@;jW0T*9ag_^aCYoFDB(lCFSRQmaym_R_%weuGV6ujUmclMce_=a z)#LGIj$F@Wzt1*5_JgH<97K(AQBi*O?uO$XvGonep*HIkZtsfy!N*E?(DSbPYCGT; zC7AN4&1!gm{h82LDaZ}-1Rl`0y`W#!#FW+Xzu5cRMY_gIbfJ9Jg!A~%gpRC)$4rOQ zadHwJbqh87(r_oieCC%+>CU$(trr&IzE*-mR~hm@-}Fx770_${@^<-bay?kWC%G2& zF+WH74D{vaOL#FfgFFrw_yD?ZDAunSe`)?5^#bE6OA7n@p8l2Wcgr4WUL#v!i0n%j z#Jcl(AB!I7Kf<+tBwfmKf&Y{8y$P+H z>0!4!ZnpucdBE&yrW1dWzuWcLpM!ir-!Q9HZhvOVfAGBN^TYVuKN6fjW{W**u1?xq|5j&qM2Ec+Knl zj`6YX!&iMTk1ql5`VsJ24;zkoaM54B7e6oO4W4bjPa-cL)z!!UfEbMXi={_%H%B_t z7ksE^;CD6QI=%YY`FZZ^2sb#J<<9BSyx%UatAc-Et>*tG)-j#d543KieJknsAOJCtXThJ?xGDBQ40>xzJ@InfN&JAjzL0Mg z{LC)kI(C`g8W*A- z>sAZu5zqY|S=jUJ0VCeNY#607x8;t#pTT{n!wX*O0nJgi6|DuRH7|6u)~_X=TII3?fc^805%yJ+kGiXKpE-Xh{zUtW)7USW z4m-HpU0{G6gj?Q^lrAS+b|qb%w~!vm)%|vPK9`bj>HSP6en76CPY3DF-yfKXmt6AtrS`_~**~-Mq0bc4q;=Qzh`-Fs-N1=062U|2Re2BK>UV&c)ax|@TKFA zlFsEU2+{*0hYNgJAK}t?k3@PnSk}Y1H%sS@ptqn~_o87ug!1Tp7qwqn&(QvNw2NCE zF7)kVb@SOe?;&^mJ&3m&;w^^E_XDhBxa@oSBKmdzNE-JRZJTcf+rHv@8T2x{nRW~O zeIxYqDE;D_^i%x-*$3%w*^ahul>H>%+V+$6NcJ;(pzicYj=9}zQ693#dKWeeyHibm zz~zR1?x|A$^}U7utM)Ub^F15uXM_7D_c*7JUO2fv>pV%eI`ks$vqQVMt1wQJ{g^mT zyy&kTG(Hx`$0y)#G9G;WoW0`-JUjm+Iw2ONeHGc>eO{itzIE+p`3(NCLizNLz4SYO z=d^35!x!bc4|0un9*cBs0$oFGy58Dv`e1sAFOB^XTHn_CsPrA>xDxeryMyQbW}PF{ zeu3syjMsiiy+b}?6#!Llq;sy1W6^KGlx3~cl-b?6thW}99f5k+Zp0csNUqlTT^wh3 zUf|>GYVs{F_lWr~@c0+_z98`D_2yT95cLrCsp`F0;(;Z_d)knO_8EYlrS50Pvu1eF z4@EfP&7qGFdThJ^#MR`pZZYTX26obkfMKHpALziy3@fXt-rq5-!&xPtI78uyF_}d_ptMKe&hVmwz#+IZ1{l)a-Lh1 zbI7-9r;CwKt#efcdRyy!X9ap8J(3>w%;`L5ed<6yXVD|nBgv;Mk8(ZesXTfHRqJf! zgZ`j=|HI!C6#xEHlxw)+b_+=CYwOL9DSAUYd$aR>s=l3jefl!!c$)e&ydBg$8#}Po z=`QPOxjsox%lu5^yjPED<+<|pv0`~bZ)VD0eAD^?WI*0=r6mVDk^>1wmT zRjFH4{_gb<;hOBWSiNOE+}4bJLZggF|FKx#v`?Yfj(^BP~@~A#M6T3QE z>TW`#wBPczZ-mfZ@}G3w{7?Svj;qV zzk}x{&(@SC@XdB5We-ab!hNA{DRa=S5wAMqaen*GQH{l_2lA7y-+e<<#M zt=gDu?gyBU?7QfLeQ8*f+MQ=_KbBVNhUfXPyT51S=b73o^_x|$XVY%|n&B5e&eo4Z zfAOEMKJ2vIQ6Jdf?x~DE>-z9<@N>rc@KMj_xvUTWvB=+2eQ3+;-22-f2EMcOFVLTt zoYb$VCXai$+WDP(|Ms3D{b$M7*R4?h?Nh$0S5xZql=1Eb)7Iy6?&q#r>-nZv4!GM> znLWLJOfwGKh4RhR&)x6nygbEzZMi&a_H!a>Ci_^lM{hn$e$&~nJFYyt{Q}Fy=W6nk z+jTA$v$tPz1ezz*{m*$liFu5PYnTt(fj1f5S|`$Z zCegY0F2dz@C(eua%;x--)`w@}r`kE>v(oq}`g44`pE~3^_ipBw#rQ$?680Y&Q*M8r zeLFCm)D7SB(GJv5&R9p2zG~c3ZU?jPSl%CZhCLH`U-G|6-runI2%f3D%knMP-?rU2 z_i^%9@GgSJ{WIB*A!lPgp9OteWkF?np1r=+o;Q8_Rq%NReVehLyPxGn`6~8P^?h*m z#ZIUAu66xd$L*dVJ#vcu!)sVUvsbKV_%*O@hUXP)-A%876^0*NH2V_vCVK^HvH7*W zn1_3?!6_ZxG{+nG(_RP6UR0-hDe2JufTyp|d|zuOdo|rW#J``Gzn^nHl<`bAkNZGT z|4Qw0HF?>$QIK=NbMIO4+;Gl#_5;rpI_{e`z4qxA?RZ+GBkw2iKK61sl)l)mHjzG0 z_iz4by7DVsG2dO&=KJ;O(^F0I@s`@V;&^Mj;hWOGjCQt7-@<9-taT01IUW9+&I$i| z$8YboF`sc)Yu~qk`7x}=&2*0LbYa~M`_JY*gqZ_=Xm9r|;W*FGoWXhu>bc5`_|w)A zgo}8%Uo80b-l*Dl^+&Z1nC-orbniVJ?xAoGhI_!>*4`-B3DSl6eqVRoPk*t#7`aZ* zLsuA`V*%e`@0=KY#q)&+;6D=a)`|9c#2B|vyr1DJ*I+VC`}W|@po{hk>t*0acJJd6 z&iVHI9hKN$oZUo9S-^XyC@Gm<$j$%U2gmZ->38w8;;iU zVfy9P9s*8yf$v>LS31i0xv`#~Js9}Mm>%nz)5*`?GjKZ6hjx>m4->vDzjXAl$7{cc z-Vf1!60QGgzezfJAmC59D>>KD(AlST#XFiPjplJLbl=yabQEoiB4YjrXwA_Y%Gx z&wYm)cf+ob?~ghzwQKEg)_>4BSq=|7e0DGGN;Tm-bY(hq4~EVUYJFeh8LVG<{_C9V zBz++GyOMO*I)0II8e=_Ie>&4SpBVR7ojT*Q&iTiEGi^IJxxQ$euKiN!IQ3Kga?~em zCDMDnY2@F|_X(#fzwa`qk9=&ykxx1&Aw0RA%gaGN>i%8W3o{Ri-WPAz>wV<=>bvR2 zeJ$;K$#;`q`Mu<9A2QYNsQ0qdSzqKE?|9WJ)F1Shis${(V+S2?{}Ag1_;b=WBFJuu z&neYAnb<*jE{}RB`=@)-Yfvcr)NTOJzx|T!W4eI!6!$F}DY=~i+^-b!RXZeoUUcz7 z$D585Prl#xeI9J@SyG?V_lKVCGlcf(UiI_?EI{*v4G(YLQ+KyD9PR@Q*Sl>xH<`^M zp6tfJ8~2QB-!=4niD5#q-8D9%{tkA0mGU}%h;A@2jdm$-cV`kM4GO>0eP_ zaQ+}2JK*uEKRQ>SevS4aBmECMJn7fHV0nKo_Jyk*0Q`-{-*k-SYVV(8d5dswG&t2; z>A&<8cy24`vC1L%q1eYb(s91;j~nTjYqrQ_+p(1UvpTMDf#4kzeWp@&ui22dSH*BN zLAv*5q~ir{*G4+(iEXbn_?2-V+U0)lK`|XPpz1HRBj4}hek_AWmbH${eZIK}k1lmJ z*=&CSe}TU{0{jci5BQ7BEyAOwR+HD+U%)TmzCMEoo+2FVpzf0$>6o8j<+0W=;5)^N zaHzS}WS#v*{L6hmPpxBx?*l8sf&Oap%77d2_6Kxg1W<$n-BrKo2>4dGJub#$#FF19 zYxE%hVm$P)ny|eshMOr!h8AGe!tl;|sk?gDu$o-z{vtf?qpZf)l(_iy&aC>sNsja~y)#PwMR^#25B7T%q@4OVlq0hPptr!kD z=zi{EINM{Snw&59H9`9)hhr!;X!odqe>$(1*VocLDyV;_zG!xHy8+v$dZ=+b+CzP? z+xa;nD2aW0tdiBu)xPN+1`og9-y5-AexP%tGSA8e_fpp5%B#%R?q}92Z}<9(`;Sl} z=^yM0{W~3gr1IUY$CYKQ$CXvA$CZs)GR9?V(T^V4#uB>oPKsxG6BTq;F4Uyl)tlyP>uit>D9`PI-@}d9r0l$#- z-1r1L0eI>cu##W&U+4as@xf3S+$yi{fTw<;)P$#ht@~@0C9L;WZs;4}sb8=@gr|SC z`)ie#c>M<)^^Nrxbkl!Dz`fY*K+VqaLH~5WINHckV}ts!3Zew7ZM*ctdINymturY4_W5q5VZ^NFVJi?E>Ht zQ+4TEyb%Zb(B8RgN^3v8e{qA=uR+%)#v>g47QmtYeZUA+I|#hA8_)`aGk?)80RKDQ zx#u0j^DF}K<{x~=?dEr{z2C%sT>kFUY-lm@z#a@=>E0>zJnLh>0r>D~J_PlD^8WmT z-oF4GKy^+qKQA!q$$ek)NwXK~_X5v>?a)Mix*xyT@4>vB#_Q_eq5p^y6zB5}8XWlb zPU9Qs@Nz-8_NB*I4yE5>@!-@y=Y3G$3;R0c~DMC z^hxJ?wm15bx?gQC`(xk(09swqKR!Y{IA(-#w&qJP5AomNZ$v&T!=*aqdRX(?K>iwP zlQa9VkYjIQo;d4b{_S~B(h0ap^Eh3kyC`qah4b~R3i!Z}*;YkWADW{qkNTZyoD-{K-GzFb?hkr&K5T!`73a*-{j8TdXO`|K9T~wqe>1ts?epp$kzH^%<^Le{J9}`S z`!#OKx*iIDZ26$HE`^blvCv)`P6i#rbj6uj2f;&%X{a9pv@s#jsrbQ0^bM z_pEciUe9ptqnol0U}buZffV`r(=+fDaKQf+D}wYPy|2XA`=Y$!>wP7@-bZ?7;%l4l zkfPa-TrPd#7k*Zhwp`ghLQkMDx6ixU;Uy=88>_6|guJ>auU5=AWP1-e|GyKy-%;fI zv(V@F%)obW0{s5jBHy2DeJ<&7k#CUTbIGUmMLtPCqaB=Jci!t8Jjt+uI76&f4Q?{@BXH@t>0=V z<~AH%avRk>pT9tRNIKE`GCFP{oX*4J?n`_&U+n9e_MVozgXc$o0*Mjd_mt~j_TXuE zRUdVaXEph%!?}I~KKRtSi1jP4?@_;&8(m8P7j|CwphqjG(KDUzNBoY0o{6690=73< zoYw+Ae4;z2EAq?s^1WAkXUxl6P40I1G|nHxVi}1xPa5yHng=x-d4t6p#pi3xW8V(z ze4?Zqhuxd#UWDZ)Y%k}{q_6xQUF*JvCrFRXqkUah4?o|7)jjW9Ob;-Nz43=V+V(T> z{n;Ic-{~@fovzT=^oTd_ww^G@@$Rs8-08cC>9<%q$M-J!fv0Xi_k_E{@`Hcl<@U$E zbo>(Hv2f&<9@|8}mD}@;`jL*;Jv=?yJ2~GQ9gWd5kcT^-E8KVpE^-+g!ty}E=VZFaGev!~iPdx5$nCER62G~JJO!ZawSwI~i=kyruQ+k~BHhqZow*P#}S@Un2 zx6SR|#fm3Gi}BrtyT5s-{WZHd)XdUk^X|(p<1O{0~Pw+lg%Tq41MG*=ow8x!`*Oo^om~;CM5Ob&ci%wjUnbk z$Di5ioq>;rO6&5?1s4ZC>SJ>OJF?m8-2q>V_ygg;DE#}v&xxFD_4@E*^pElngrEA6 ztzH{`XdmKNg`e{b+3HQ<$7l@kmxQ1D8|c)- z{8zZYI8MQS2(2rlz1sUtn+MlEwith;@&1zCW9I2m51?t%&*J#AZsGlRw&xw5F#P>7 zFN*%brc(dlua+132ULdy>!#4u{*>~3#L}Uk(cew}h4amOf`W;^`;vFWO?oq$( zGWxY&VJ^GBMmO=Vxu1HV{QxV--p_?nHyj!*kLnvIu){|U#_Y(#yG@ocjZ^)be=%|_J6 zc>kdp?`vtkqS=W0*mW}SMSYBZeX|kuG4|y&8&Myl|KDsxeeAkB(no#lIuZV;k1=j& z#(Q6yM`<>qKE^u|%~;peJqXQ4)W@#%fiLP~SJ;zA)W@!x%lnobqcntbw&J>zWsEQBS+-;kSm(2O3 zbw$0V88-aA4)=(qfj-n`(Qatpruv<-573t{|Dn~_aoUUGdKuEIUkzD5Lp8&9ZcAo(C?iKU{{;mPe7a$}(O#N=nGv&AMc3d`RpX$jT!vl^D&ue&iI?8hI zuy8}={Z4PssW#SMW%7|8%WECs)A0kuV{pv(#N>Q07u=!9_weL=RW3{K zc&{ORR;QvX|2;v6>~3{uq z2y49Q-AA|G^miT)eE4)P58yukpMU+W2m{^QHuHT9yg!Uj?WXqa!4EzT$G*O5LP4ch zA=L23y+W9Og&jkfIqzxTpY|#Q<>yw=uSWT@izEHJ>WEHse;?w}ujny)0H^!+(B2;^ zwfCo%73}BksQ(vG59bnna+_l}JV0ij?qkz@2kbXK?f=kx>>cx%0DgRT+z@^+&Av|@ zqKjOM<<`D8y{mIa2jy7o-#u&zv@aqZC%v%i|M}b24ut;JZ2b>kaUH_b4SBH7=Wnu~ zNcXcZZ|lo+@&o0PUlwxHK0Cl8zB%@&%TD{p5DwOeKbqGTp0r5@LtZGq_I)+S zxX(!YI-6sBr&8-(%`xsf%=&1Dn|klM@9v<7?_FknyTkwW$bV<}|2q8J!yo+U+Zuk> z=d5p2`2RNG*N30?_+@=-!_WPZS>LMg^Iqw!Zy@}%BUxWB{U`pOZk&f`9{UIS57Qns zk8+=8^T6^L+l5)V&7D2 zKl{Du80l(7zgO*eEBd|Z7|YdaupvyxSngJX4M{pizO?dpZppV+^n24W^0gKH-gJz7 zZ$-a19oy;UYBkvJO~-bJKkRqxt7=8RHyzs-@$C0%|5Ph~clTh#)9~p2uvUYHCmlN+ z@zw#eFC9A(e)fOUc)z~Ypgm8cUbo_%SnZ2!ML$^kGFuHA!Zg~;_PfBR0>5XMujA_e zH`wLmvV-2M`_SbVZr`@@LI>14Gl)+=^;r*`IFo#S+sfce?8ij>=iczL-$bI{`1>C} zwrwTLExVJqTL$;X$*Mng>z*3bPW2fMrIcc78acMS<3lpeYJSM9rkCV-NC2*naU62&t40Mz1(K@ zUGcJ?gT9(?;X|I>*S~?P*j_mP0-nEq8TdfGn39g-_x%2?od;EWnB7DC;%8Cr>c70` zd*8fyI=MG~^s}!=>NOkw<~`HNed`~7-An)PN5B1=v&sD{pZ|k|78?1$w0r}Og>Pewci4*ale z!_hy~zDdbN{R8BKbKT;9u^e?v4}Aefz%BTL$?L@kM?ByVzurVIy0h(VIxISgZd|*5 zcUuDc^nSJO(aRbhoWt#~a?~woPq^5B1AyH~4Cm4~MJ! zW$$yp>Qyy4=>8GAUkWoQBbA2t-$yDdV_(z{l=ee?Fi&5zU)AJ5q<^8$TaDQMsl^sj ztGvlq*08?_{%Z1GheN*f?|1)5$F( z`+@)ue>K_Z@xV|24)+5;BwDLn?fcUJ4}Uee#p3}_|B(A@l~;TJuU1(b`@Q&HP1SE< zr2T^hwtGVZeogEjdsXZwtGK+X$(uYL_)#;e$zZ^LpX*Jn^2(%PA)sf8$8Yj@z|+rh zU9Iv8-~Uystd9M66_-yn*^tAd1+FHqb$_jLmG9)lJ~d2H0DrH?zsBQ%pZ;syUo-uL z#jjPa^!fyN)a+`)@eAPTUlZ_``3vba+t-F33*e!rRX>GI{?LC_z^{n?Z!3NO8{kn- zs&0)4Pd^8W$PYah?Q4bqCGPLHzl6t3l-@l>e7E~AwZDvC>VA}-e(06%{{%dGRNB`H z|6KP&8iem~KWYX2)|41LN<}|Rm)?m-`itGqej5CB_al_>z?|=2v)>ru=-x!Yo4GYV z`w#Hj@Xq|8is?}jjf)Wvd1+rO{Bzt7DG?q$NS%L2JmjSI4}Rn%y9+lY8d%-)IygJ}BNg@w0EhYqxF549*)3RSFt_VZ1ff9AHg5Q0}k}u zQ=$v>X0fkD`-HOX7HA*v!>+1bLci_F()jLo4MzM@zpA|t*wOg7)xbReEk>W%7mF)2qFl*a-;U?+iU2nKR4Vqnh1s zim$nydM16D*x|u;o^lS?Rfg!_W5n0(C-_&let`6)x!nTn+hYIE*#A7erG}lK*lmC8 zQ+bg7=PbSM=hnIn`WL>B!^#Dg^v`F%qBe))u5>5IPw8#Ye(S&9?@qq%^C9yh9nSF? z`ZMYMEMG2%xIbU{fsSR+<0zl>1N~2=MEnwq&);k4W`2^d-dO1CuNy$sryB%l*=5I?lPWjeh7)$$bFck=#d#Z0H_JLrAwOWXAw{=3Ytd}eY*U(QdO zt4VtOkH6p6>vp*;1N(~n`lSND&IWg^Q2u}IZ}Gg|zxKEOwZHYR{Vmw9|2z9zu*ZK; zXdloX!GdnF#trQ&&b^_D8R@u$<4ilRht9!>o!7G_)Zgzr^&+;B=Cyg6UxMS|PZrMm z126l5k9z!w&HF&FaBdGR)rg%ptC?Ht*y{apyw~;CjcvS0&wlK~k^Ti7Z+5(l)7a;2KbMZ%E58O!5kiPs&%}< zd+vb$|4aBe&S5!f9k2KP3h-Ojluto!)#Uw-x7P7SjxUYwivw=$FRr&@<>ot>XuLyaM>omiWza;VlLQxSL&nYaQEMZh(Jz37+zKv%@2P zeJLLNtomyXEdLXw`hlKXHMuEIzr931^?y?y|2-vsuzau2<5v{obxsR(eOC!D`FL%f z?uVwOdtDy?a49|K1J>m6|4=Fy_52li{GmyDcV!-adnsRxFg0!m-R~;Jb6oziJpEIp zc+%CE$IqFh$IJ8ZJ*D!HkKK9vk4_uU@jB?bp}=Rxh4ADW3CK&<5(kKnXw7 zLn@44Q;KJ~IF17TpPshd$c5>DsT5EAtkX#U+a>&*4}g_m`oAy5ud;B>SL&X^k2tiA zzpmmBy?AOoku&A=TEUFDU7jw z8gFeeAe3mNvY6x1%H@;dF&;!dGtEQW{-o1LfO&z(f3j^4Z9kQVW84lquXk_(cV9v7 zSy#NLYvH>mrJv{NF^+^pvt7K`1NcldhL88x=$x0AW;hM&u6z3ts^ACCDo)WdhwTkkf|WO#U_rTI35zjODrc<2o$Kf&-29l&x{$UjT~PP%d_VCm?#<_mG=E*|SWbIkL?8wD`~}-TQ`~1$ zhx{#He2Cv;b_D!A`9{=4e%fbROt;C>X}>AtjxUS-wn)FWz(3uq2RMLcvCmfL@QQd> zIbN>+r=z{jFTFRGj@HA!iF_RMZ2errZj@t)ew5>P7sHlV#j#KGQJPnp7;eM8wb9n& zDfQWitsCfY*X5qy+4TIt)9Cr94A*m~=j+Lby=&=Su!{6q9{3~&@EiIhenT#&XOIh2 zU^;nF&Xfnz59sn{hu`V>7Ug%j%Wu1fyB=*U>e1FI@Raw{%k#$;x8;K~R`BZ_8R{!~D0;Vaq!Q!j8P;iGKZ@t6Yn2I0O*^`( z&|d{!%hL4m7d^hcUR&q*xw3-e=gQ4)7lC(lGwV7MAU*q?pN#Y|ey$8~{9M_>@w26S z!42(n)#T#=UtXuZiQ{I2zo?WC`{nFFU|qMzFw`nH#5(OD$Iq7TEronQAN!?05&3DI z_C{~dQJ$|9+c|^he8P|A@YbwaKASmyHuzsJ(Z%uC135hSS+n~^(C-5NH4ALM_aj(FYy0d2|wkrKj33LU5W9u!Cz70565X)z~9L6 zw5_v)Z@_;=2|xMiO`p*{80)lKIi5E7tc0KN?+pA~IG#3rf<6QP)@k+ewt)ZsSf_n6 z$I}M?tr9&P2M!1P4{$tPc?-wW2LGNCJm*ukIXv?JNGYD<;qTAmPZh?US`SCM^GoT- z_Z#wb>q~rK{2TN58>fxmkjMXaNe@`g>+<-M)8>D59)EQyp5@|tIOrcM;iq1%&eMOg z6i+>jakZs?Y!Y8C%hP?llrP8IFVEwshJG5#ARzL0*@BE~1B{8@gKg7IH2#WOvqVEosoji>E| z+}>J>r(VE%FnzNWk8!>BXQO@jlx;-NxK`~>*{|@KT(qADihMGvr$|Tw&d=0d9~Vj~3}PHbI`q=f?|t z$k&;dda`8h<#g3A?7!LyY@hm*Tg;1)!3y;*9fCD8K6Cmd*7!gnhufJzSJe=PnTbjzYNj z-x>H+o_v31WOB}YUmkp4ZkgEkFPZPK49^GOF*5@`m-!C;Qg}YE7V}!QIPZ|Iqr7Vs z+C|;lJJ^*r{; zNdCG9H(kf{=+6PK?jZ&KPOJBA{D=j9SXJwb++_gv37_`i%E;(5kZ#>ZSQx}9zLeSjLDBK}9MDOLTK9oM~qMYuyn`x^C7^*p^Z^sD1i z&nON3(0M1lU*Bo+24srKVf*H;r;Bh%7WM-AJ@Gz<4|c>p@jIzsHR&sd+xb3^M@-B} zFZuSrpMrxQUmEuwVxJ#A=pDX0-bVxf)|K>c2|XX|z^sv-OEhAULiIxOtj$3yZSyB{ zI-Fkc@l!^3&hPymozsKx`=D7ZW$RjvB&Sd&BCgRcI`w)s@#;(XLR<8vF%65FTqb*-qE|BJnMfwHT*u0&6j zbS1%ybA{j{S#h~;Ar&R$oJe2+vf>I7UqQcEGH_iov8*bUloAjHio%o*Cut zeEcEAJ)$ZJB*$?~zL0Jwr8ss(C!Jx^dC4F@NAbw(=kzC0+=&}Iw)N&*Yt3^{ovM<+ zcE(7L;SH*N_iydB*IsMwz1RL6jm!DPAS|5TVHCbI+vE5M-y49}ag-awM_pN-cv94K zespd)ZeLV7>0<4N|9a?mCcG$-q$B=Mh~FvrW&FTmNw&w^EBbXGKqOrMXp9p>9+o_o z<>6OhuWXn1e0o=;ypQp9(7#;nmsWxP`#gXCh}U1}TVMY1Jqx2_@m`v%r6k)I_>O`} z;R<|**qS!K7WfWZw8_cO0^j0&gr__{zr*iSWcPVkd?Ws-eC^7rr2 z9+lI(7uoX;w`}ixn(f}>=BJIg9`c+%7Rho+D3 zdat64$G>L373IBLJ<88Ux1S*1Zmvl3k9xh-vvle=;(ZtTxf-wWvW8KioL}XXKWiAc zq)~gP{TR9c2VmiJzjg$|)MR)CW0T|rQk(E)65V^n<&3s*324|lqls`Y-gBlLs2+V+ zXe7#OesCh-k4JoCp6f@A=iFypL^u>C6Hq1TRxrtt*^IWGWw|d_5JSG62BLj_Nv@cOHg}^4!|M3Lg}>XfnG5m(mN)jan7|h z?|XqG(l54bb{>}{yWXSnj{oTS-vg)^_vTS6A^$bz;hhN*0({}aexmm-sE-Jt5$ikV zVp!*pd*^-QG$mF2Uw)^8bmczh$;vqQJD7SD<6I94!`V0$`CqQ&KVHhObH0svKVjtt z(9hJXYB%3CBYy1d!CSmd%*gS#9Io$TEZ%D0?7rq>1ITw8#d_!eWbzgebR0}VOGn+w zm2e)$`0nex-0eRK1mFgc&T`A}?~A2+>o<75?(5yxd{OIQ-P_hXYbb7f*>%0IJGwUc zx}blZ_g`Z^5C@&6KH%``fybzu|B@--MyGi0cak2XEsIXNKJ4p&{tq)2V+q|ZIvws5+C?Y3$LO_Q&s{4c{#N&=f<9NGeWaki7t;qL*H_Sh`i-71 z9rb<2#yNg}THiO*{rs_yy4=)~|6+QN{7QIP+`~(+ZWtiFyG8ntex|4JoAOKyWe?GL z5b1}peOBDQYeu@JSG#>}D%w%tKj8WL0Nn7u!NYowl>PY{%kOvn(pc#CtH*|fzsC

4_IfY|DzgWpC@E@)IXP(^2^@H{FHCzf3%W+C)S73 zTh^}tm0|CiZUE(|hctd|Kga88EP%YiX}o@5g$0wTC){t%bA_w0*7=R({VwOy_rQlv z`o0hRMEiF)9>#I7# zbb7C$svk_h0fY29;pYRh-u-Fo5r>l<3%D_UPwn>j2YTHf+vfg```i~j>!IgGuPPqt zjlQmJEbxxC@c?*{pNN^hJ2XQ-Y-4L7W5~;V)p}bC$mnQ)$WsBQf_J*k_5T2PoC4OU zxtxu4NI!&~DBv{a`S{bz`4j49{p2B?v(UR~@wqsA7nXQv9!9-ja^AyOFK==_Z+1DK zO>P%g}*KZYAfdD*0b3#{oPq%(TLFWMdaOS2*2>AV)@$?WYh9u8o9B!Bdt(;fFv5yAZTn3vR}KJtR$ zb+O0KfOoaWQ$HF!*tZC&w*xB(#Ff{Tq@U)WWBMN3#3`c>*Hf$q`vn&6TEr0L#osTP z(QdTQ({DEaY;-yseH!ySY`>_MY&LkzPYkA-7R3HX&?my8i|HL?X<)a4`590C8(oJV zaDvmA|4-JR!h?E&5}(drVfmH;{C+qF=+|s_8RJjD(RW*ES9<01pZ`4b!TWsuJ^Y37 z4Dr{)@i+bYt^9@ewsyI{<3sMZ=KDOx^f}s?Uf_f@enyz%;>L=6J%9@glE>_b^K<3-v@VsKnnoZ&cOyZ9VSxxHf%#94+Vy`WZimKDPNe;A(!{ zyWo4K^Wk$ygri&-A37P5vFnIs32(Hc+>ceh6h5uQ$6d|ex>p*QWH`nv=?Q+Xgs$LE zXV;{E+X9d8F|SA`{kK)JOMRkw zIQ2HmX+Nu$_)?P1C7u-@GcIh~66Q7`S%z+L@Hy@dZ^M-2NK#Rutl zAKUn&Q9}T3G{zy?6`Ke*Iu<9p;rE_%j@to{jU#Dc$C*F?j{Rc&81?f#QTC+XDU^OD z-*W9()k8RUXn0V6^HU_u*FwHxJ{tWbI!S*wI(x1D>bhohlSKo6*jVwi1^xFn_~bX3 zZ%$J_K=MT8rUHJW=KSaOi4I$T5|X~7RPC>+L8b!E2i(_wXsct1)qBvRQ63F~8@<6J z$S0H++80evFZ}9hcF9!ek79lk^PQ=f|8W1HiF_x**ZOboL+AjwsR8qny$|`iO6$oQ zPBAlODArFJ=Z52*lv?uJmR=oq)o`V|?F$?ciBWI(?T|axo5|Y>(o( zk5`AYyzqB~JTbllj`8%@MEp4D%PF(q4Isx=2;YV2Bc1Hxluw$bM(~gI@q1t|{5U!+ zTs`JF*-nor&znwxE-nv%_pql+?@~^Wt%kmSF6_X1Cnw&&B|fL|uhr2oKilc@Rfd0Z z7Wk5bay>u8dJa1uYd%>Y3Vc9hRFkCV^KYy};ZMc5!ta+crTnfq!%Elwy6lb$D}SrD z(D+00K>HT?1t823Plh!wqJH7dc<;QM5~>#RiQFAG&nw;wSkv^_6Y$pJU-uIp7M;gY z5Bsye(c`IqOiw)I;RmjXczDc5{5mnO+uI3Z!HJHXk5KP1j-J*FJ3{YGUCtEn&2{c~ z#eS#qNly--el#uN{LJWgndhhcvx(_0r@zWk&mqa)y?hJP@1345d-MQ+GDUV)>vt3+ z-AGp?!N00)7a_e-rVgJ@{USZoTl>D`o2a|oPgedKFCclI4*%~SkbDb&%_oX?OV}9J z2Qa_QceJ8k)0*24js8g2`d|R*?3cn05%glbwYNk+hB@D8x3{~nhqGOD<56xn&R^s` z9{*wYlG1v~XvT4$PUiuLA0<$F9^bjJeA+KcFLT8uy9(6^=fCjGH+nGu$AX+E@3Ii| z{MB{CKdMX?_d(u{I()6~vwJK|*;JLGTHBRin>J9-y=k;7r4 zu--jZKe8X_w69*wXBORM@tRL)-l}oE{!vR#;O^^HafOA&MT)3j8y0Hwt@by&A4Dd&?#=GYo_-X-jUOKKc;Z2Lk~`Uf_1*G1 z2VuBIa@2eqhww}KQ0HeF^IZWkp77Mp8vwtFaHI2DTQ}wL-DdJ1RnOkeXYm($@#CQv zkGS2lG5*Y}<|&J*vc4_maYTSSNd!T67OyzI=~gC1_oC#66ymB$*2PTynJI%jlu z#Mk~0lcz@QT$_Key{soY>~grbcEB=9KJgyA$=gBdK{y{jhmBaj{A(xQlpTH9(sh1M za>IW71K?ezBkM^A_!smgbHf$yE8pi~z31GR@9+95EV-^FF87>2u;PM`LJpIbcJ`_L{M&j%cSF-}@{b|iZ>|Lpa2Zx7;k#{JO|kFO>E4)j51gGBZ`y+5aQ zs^ma+cj*oFW4qjg)}Ps7=g0c6*K-hjNvCtFlAFf-KI?C-s}@3@tp68q76SPVI{W;! zEnnI{Qvc6Izh10YNVh^BG+wMQ@P&Ok-Q;qi`dXd;z~x%f9r)MBJiquj-{5*-w`mEV z2Q!DxgG6%z zi*s35vzE3#m3=CXH6@x4)IaF?hFx74Ehq}(kiRB;jvrV?o|fIIJesm5+mU*Ef}V4=r_49x+y?B7bejKgd|CM; z?S5pt**@~Y*Ra1`F5g7_Y~{bdQl9N%`7c+>Z=9q2J1gah4a@)6O8NbBl;2h<|GiQE zizrX~B?YlTg2R>D{Y0f4?IEo9&sgsubGPlgXOfp6KpB>!e$1LytT;zZyV%>_SE4V7 zOFIMQ?n@PYaDExiDoQ!$8(x-hZm7V4ncv_fpKQY?9_Nzyl{lK zs{&7WEvvxe!c*ksZR@&e_StuM{Ei#l9|v5vV@I^F9_cz) z#B{bt@#L#%_SwkK_-Xdph?jkqc*_w__9Oi?>=z6ZkJ~SH!)^OJVUK!$ZW#P0mLER% zxP>V%bm@HmF^=C&{L^~~!>d1Q`O0?Im%rAwSFyflke___Q!9WEk%6PWk~91s^a$DM z#}U~?hT-Ku=J-6m$9#>)>4Of4VU`@hyk;$Ye0S^X@TTp@-(9kwmGQ{~A3`EsA3wd-OB#S%v{ZaLV}H4t&aV{B$MX#{i#n z+{Ju5z25Gp-0wsESO4(hAErLozxDiU8GAndY5z`i!#fe*VA}BVmkclAov+^J{q4<8 z_7254wF%~i+#>!s`c?H3k6(o1D&xBcbS|fVvXZ_S?a?|?^HcSk=v#*O67rYf9R|GY zQ;98|OchMrON5M7+oshY&?N|JFs|>pOfsoJ|BjMgON4qQP+5x9aV@#$H5W zdAdfYPvr1jL6_4ZkLS58y7E02wx|3K4Dr(Xqh_xPf^Q!7a$BBopYu1W9O`xRLqARh z{50;&a2Z8RWrtTkYMz`;IDReTT8<-cu?mLRsc=KryRY-QI)^<1W#2>+&-2o!9h3FE zh=26A@8Mq$)8{;bF}l|<^$25Ke>CBbgHPyhf5gA{BfF^9|Gk|z&Fe$D+A)H5L-eM- zLqxji+!_0U^$o{3C%cf!%O0K`bGg)dFkSC>YyCwyHt%1-+E8DQd7#t(V-fg{n)NU7 zS&yU>;pZD79=VuN>j~n4?mpd~pZdOO1MgqbmHC2vT&xS$`M!QR-P*gtiX}sr$G$&; zY{B+8m&g8~V>?jo>GV4P{*dMgI`2dM_1Bg8%oQj{{d}x4pZPDCAW}XEkL5mDDR*U* zd!kbAFDvEtSITi8pY^S+lzX*Qt}S2Wdg!GI*GHPaL(f`&)ROmFdbMBhy&g;7dzs7i zZ1T^7jRmo888$G9_)T_#vb>t!@NCJ@b^fCd@@RGp_;kauo{`)Z>oByJ{4llE^R@XR z)?rb9u?|}>I(;1mxnY0HZh-In+IXE!`gq*tU*;ko9(B9a5ewQnkos11mELJ|M*Dj` z-w4`4KGr#>sY6j7o|O;1GX#9-`rupp@AL5Rp!>|mo|C+DSz+xT@$|NRJlY@iH{yN% z-p>DM7X9H}0S`!7`y*d|Tg*cy+v(J=jgDv!2WPl5^(XU7ZocFF8TA+a+2P-}m~&md zA;z7fjyIPdgM26J`3Qr~Hr|DOL*q94^ANI7PQTQHaqRKrn=GOADCa?>8`F%gyJvv=7Y6s(47lVI!u{ABa9=nJ-2ZHFw>qCj{lphdGPq9??qTtHyt2vdJm?Ljw}^cTLn>+@;k=XyzY37vPyj(9!kH3lB~Ey7DZjCmZ%JHtNIn7@|M z$P<{mG>kp?Szrl;^lJ0>6nA>5-vr zUQYK1rw)2}1pQ_C!haFvSkLx@{EKlh@EO2w#?+CIZfc+VGv+ynw(|SjzAQaaOML&L zG5=4EeroqX)F=KE|Fjl({k7!R?1%W`2J|lJGLtWyAKQEp;eszdV{oc=!#Bbg)GPlS zd|~?PyL|Bm`N9{%;tS3jDflz_!u3|0FCtv<#RK0qU)VhKI+q8^>vu|D*tqpwzIcOt zv38oiDAqMGugEvN;N#ZeC(Q49`ll^_HnGRT^%y5vjw8lckN0c#yw98P{YlHGbxyj| ziWlQtTJV)$Owv9n^*M9W9zZ@V);kE3U)uZnj%cI9jzCU3Cv4-@631ugqM7R*&NJ)L z&Y`!*dI$W-dbh9OU+8lxPx|=BaUxw2>z$2^C0>5NL3&&7V_1Hq=jXm;?ayy}#_2F& z#mo+KHhaN;XmVcS^efhvvKz!cht4my?FR3(d4tB){ElN*Zv*$T>9h_mulvqyFL1hQ ze!S&w12b)$C5hBuQ&_jtr8jdnf^|Cnk&;>`%U)0&zq02Y&pGV|n+Xl$se?`CTJLi_ zWPPMR{q2wNuZQUmgk3@RWaqL!Y~SPG4JJIgTJo1BSFp=;^dYSFXx?9*w|q->gCnkI zbv`AHdwQ}PWJjEj(mrobEqTrCeNcm9y_WIA)D~-;Olf%|5v-kY-*ahN# z1t-~X#d|Rk*1qoun8)lK;qTr2@yP!l%@;k`PRkem-c^S@J>+0!`o}!|h|@&oXc0Ew#?$EZZWUDo(p?VdA@6KVxBj&)$@}lY<)WD;bG`6I?YSR@w>?|+Ry&c zIs111`1(}%2oK3c!)9^dyTGq^-rxGVox&Gz$r)O&m-&%;uCZXL<| zw*LNB+ZX&-v=8U+=(KN|J?C<+b0BJ0Z|#q#w`J~f<`oPA)t36z%1J})S&d;0E*KJ&n()HKY+4{WEvDSRr zci6+_@x<2SM=X0GcUr#`<4DL&dA$bGn%&&sh5V#F;vbh!*^!STFUR8%_)Ywid=1@z z09>)|3VTs?eOHVR_c?rq=?0=bx|cV_3gBj}`y$`=kyy6{zFN15UhVbUn~YuqfW;b% zbz8E~;%WDMZ|Qyo%P&X1+1CFXapjlx5Nkj5iRs1g_pWcn{I7-5=$8reihUo#W&JFF z4=RwoBAakKg6OUJ8uMR*{OJ;JAj9Op+3@-)3^Bs1YzO@`_nQs}9>DWgAV8YeHSq2< zozCgi8=g=12Xs%7aznWry4KT&ym?92t$yzr4Gp$d`Zj7eYBTp@}+(o-t0affhzA8yaM;rCAirNZ+~mvLW4&=GuOG5S1WLSxeTX?jwe25355Iu&RpkJPF3LiTnWz9 z2MtX79u9&4m89GFw=_@3Z^pEehZ&uu4?S)^_2x$W(zyrivvHiG+%ahOHE(A;`ERy(jzhD>lajN=lQOf#H%swH13&V?wDTyv$OOmz8{(;R{oOZPwCUMt z=TZ7Re#e#Wj{`2-u_M}7k955Qz;w1p@jS0F?L124XZ*DDC=oxt(8m{iFTwfqrf1CO zcuF~Ed*p~e^#xt}kpEEs5RWF}Pei$5e>e198uw8-pJU14$NgMGep8%tSp8|MciMP{ z0YUX~KK~5LQBL0h{Z!7k3wFqIzK=z|b;y@L>-PEKX9rzg_aG6jd!PG#==537WB7Dr zT0LAx75mpghrMC%ko}^J7wwn_UELMuUPK?w)7hS%d^6hfkmJMkCh2}6e9<@ixcAF$ zocE?2DE?sJQC<(2o$RQ?EB1dzJ-i#|9IgB&4-cR`Z_^Qve}{2AI~@By6Hy+WZgz}& zJgoPB*&l}x#d4Ex1)cBr{ZWn&9P!KZfm@01Q(iueaYXw>y$jd4Jp+8fd5H2Uz4%@n z;Lf$*{LC+aiNg_}WhH$?JvP^V^ZrV?^GfBW*a>iTz@r<1{5IihzjDiN#sc5>xPK-4 z`FEfF!Qnv<_eHon@@an4>hSwj)Mx(-bm_(W$>*TR2*T`2;`{a6+U=?LIJ}`4$6NE4 zSOoJu@#F1$&&TkYd&0^2lsY-;WX($zNX9X_jOyN!xeA-_!Fj|WG^c7x5?WPFUR>E z(K93V`@@qXIJHGTlcFgg~jyj%m+SP9K`s!pdxG5@8xEcFY;8&84 z>m>TypK(GmOg8{lV@z=#=b%N;ST}7y80T?9?-l29I{uyQ!>b+CbL5}0-a4aQ4VANg zm6yF#=l^uiy5PS{2_5;1`P`)Of$DAGy+%u~^qbBDRrR>$-wT1aty9kP_4ClRv5wvp z`=6WGtLRsZD~4~61tBMIu^)|jp1;aJZMpD?wd6)}COdYSUgSIV&l^5>0OZ(?;v9)T zB>+6b&JlPQ=MFmG>*pxKK1KN|w@>F4B_GsBmi~~ppLC~6JrRC(#OK4b<5+z7d)N8H zh~3wCIn5`PU++FrA6vQb(|52o^mk1B8?}F95&3QVO>QN>cv$nR z@;LXoF6t4Nw|se=V~G4}FZJa^m^)<0VjO)w;6onFZZb4C{9gzB{P8%S5cvp~oJ_d; z=74MC^BLg&g29dROC`AI3WK{BaHaRk^x-8qY57Mt1XLUXj??hX9om&>4;e3UcCo$lp2PDtg^R5r8^3k>8 z(GKR99hC4Wp|UeH=6}`e^I;YHF^;Fs6Tn_)_2%bporZsUv4MrWtnl(HSPS$jh)NXm z$G2JjLeCsxD)a*WQEqjww2W_A|1AU@%BS>8an54C>lyV|zQN~(=|%_woW4^lyJ;Tt z`L-SU2P|JB-qDxdq1>`XWxZs4b!EoI~yPM&-eLFqw{SBxZnqkUx%YU^sDKw zO8tf1ccbUqG7|aW86Q;nVV|eB-??9E5ohgZXP>k8bMWzY^>gswK(wd+u-C79uXD|V zwr>u*Z`3QhZ)2X$>tL|}C;pki$DHuP)e>L$ia%gaGk(7T$)rz}Pb3#@esR6p=9dT; z{POg-%`bMo>z{#NY3;^<;mjU*qYT7wSCVLgcsc$di+yEBr^-pxfs)9`_IWg!}dA6FcX1 z$l*{-eSD$zgnekEdC42sCz#B@{qLerM7PHLKe2H{@5Wa3N~_Q(U$yql#vknYLZ7U# zY}o(tc}v$*e`FEa#4*!9vMXRe#KN2xafH$N9?jF*@87@2`DfDMGQX`qoZaw#eXLtz zy)}Y(@|W&4RmUyOPliH2_ra&#sD6XPAHF;4fk!<#j`Svcl7W1v?>V*ifnwbf^*6|{ zaJ}<>-}jyGv7pUQh_cp&TDM4!vPoYrWQX?|UDC@wVeRHToGv>W>9PLiJdE?%?70^_ z|1F-ey>IY`HZLg8tH}3if1~E(pWc;{To!WpoKn|bZ}N3Ox{0x%>n4vEU37k_mi&^@L42Y6 zW$Mqye6L^tcv27=^RKjeYSm-ZKjuJd(wF%OLIM=(ss z{^0yFy*%EtMVPLZ_&XihVej|;GwfgOsn`?$KddQaKkJI`Jrwfik>dyEsPkX=1LE|ERg?2{EbeWpYw(MR{q>i2m) zYDd?VAP$`7M|H_{;LZ6i@fp3y--v5;e%1D8>(RbZ)J3OtrSzTjNw?-f=>KxN`tS92 z5I*TcIx)T6Zx;hK(X~HgD*2~res(zeaRoXUE+6x8PVLQukHdUn{76;SSm^J6b6(D{ z`e7{ELB1v5G+@ABje;lqWET_OWqcO{UzHc$q6_E6gnJ_VY~qmh{|G9v`0)20aC=|> zNBunA*!3s`M|%qK>Cb$*D;b%^Tjx ze)7u|{J&g+n~_Lx<#!9_+D|@Nf%7+IIKk)Ted)ROlV7XAIaz`u`(OQXL(BM;d{yix zho0uR!T8qv0n3;k+-`pFIePD4r-kbenP=|+_%Hk0OxK}OIQGZ;clvLJ*^epX(eJB1 z)sAOB{9Xyq+b{BYr@p_=>BmXv36CWt$1nBc*opFK-$VOLMS8SX-v^@no+7*${{=DS zBd&1Dr+NDb;(%gOq;F*|Sick&ZB3xqS#O*7sgG zPvAVQXwP+)y{Mn_9Clri9(a+?CAU%SA?0zL?X>PKC|>oEe(0*CNRRsR<*VF2^Y}jV zweQRMEh)Df?FHE49kT90M8f5dzhwFETz%a9;=Ldu4c8s4Ks%A?!|-=6e2c*&AN@vY zACG+0g>r1)OO9zG4lR?6L5DL2=>+QpS}drRfGKDi3* z;kZQlJrLuo)-yXEB9%bL4gUUH{{hbKJwJVYzqGv1@-pC*&y~Hz@L}f0cmlfdJuuSg zu@WBnZFf)WC(PR|oNw^{(>y`vC-T*S&!y~rj?ZzvAHhR_pWk%M%6D(FWV@eW{fl(^ zeek!!=lJ@QoqX{R$1`Qe3%{^M1A z@dND9hR`xioH3t!3`2Is)59roJSnJ=q4{um+ z23@u>H|Vn2{kiJbx?1bIH7g_k)$Uj6qw|qCw`X|VUYxUY`H=k}-iO)mP_Pbd`Y)BQ z_j&mC%Uu5R)oTr&&Ra@e*IefLbPf^c51k&d-x{z`)5t0YaGoEJAbwN z(ZhNd0rn7sBfb?MDLvod_3E5YHtGGX{K60WTLwqvvPthR^>=>LGEcw8+okfjH|+Hz zx`}`Jxf?yKelGaF>E%Q24|X`50r*$p=ZoNDM%@*1n6G}+@>5STp7VI9v(vbTZ1LD9 za)0?T^B;)y#$58jbphYG&2L-mPUBo_vCf(Bewh<4a6{1~dw#RQ z)4JeAwEqz9^637{i^u~JvH7088+~l8*Wbs}wjR&?wdBeoybHhD_;g$RneZT;s(AF` zJ_Cf%j^_!6nm#>caN#|RgABX+T^6&V7=gW>Bvasv~ z*-__Hg{7A{PoW#YKNiT(^>W?&5rEVCr1>4rm)(0jp0sAs{M@xxPablx5bb2V?v2B4 z=y>&cIoWX+A`|mzJkYp1j<_cN>D}WiK^O9wzRyefW;pF!z~V{Qo9!8Z_W8JtjBU?1 z3V1Fa$q0zvMEZF6S9!M;-@Pl&OR;m|vO_O89WlS6yN`G#ikf-K!jKAL!4p z+T90wSo&pAZ_I0pasL{R*La$5*k^bY_gO-oA9jITjDQtGEAYFQ@rH_PpaGzLq^Q-{5#;ho7)==@lQfg5*PvznVXd z&2xKXvCkIpc}|c5p01nyo{{v%IP^mk`C$LEdi0%D=_&DJ`Y@rD`R`H7uYM3d-O>L# z&ntT!=MOe-i1|YQ70&SK!`=b;{fh!#mCu}R*`(7+>z3}KzXz?{8r3^sXcqfDdM_I1 zGcBLykD9Nw!yBii>-(FUr*-=<4Ey;q%hw(A_IiOw)5}L#>9|L2@!8?v|45hpRNwQ{ zxUG802Nd=0VQ-K4R`bMmIp+_RJIKKuF2BR$^{zJF0dxF9Z>hgD&h=lh+{%e>wNHe5 zC0_0ZOHR74c0Yai0{bDotaXLPN5M~r9U<|t*ZcVgjuX!3-v494 z*V(artnR+L0Yy2z+d-@IQWU+K!A>($=A?pxhYuja(B zP1kJF`%U{-ANFiKe`oa3yq)6)IXJ~PlIYlNuY%rsueKjH7&?8oL~ygiLI2Bw{!6CQ zzXoe6%jLJd>h;Aqp>xIh{%h95T-;BiMhE>+-CXyyUaHWQHqdf^836y&OwVt_`^QQ@ ztz-v9{(uNB`nCO#jplqbq8|D{U#e=L>1eZ?}vM06$}p9Ed2@6x6hpaMAM zlYMUl@SBJ)($8luQaVo}}g;1wv zua5i8ikBUkc4L4{ihBrs4uyJ$Aa{d+)T`1%-M-8-y>tli-9w(A?OcnWC&KT8zZGE= zpJAWe{nqw+;oGrK{$`1f^2bk_{16Y)XD)lB*7-+~kL?}6{zc4_hLc<`^rXU(s)K3j8lz>9kHeZ)1P z&t%sWp1NmTov&$pzI~KC4|uXy`PACELI89+Me zsQKSrmw7qupFa@%n6G}t`iXKx`b)p%vA?lnm6vNRa{bH@5;P7IUK26n;lGIAW^ao9g0y*_i^ z=7a}6B0uK0xt?#^mmFcvyQ!LrbOcy`+UMt^neO(b?`O4U584GkE%11Y#h*d{!FaDV+JSnRpzp-wH+{+SEd)HuA?23+-}UDE4WQnc*M0%V zi6&B{&!%p-Y%}dUna59k)crLiAYAu-?q^3|vwG`GJzUs#B7fHfD=c9u>_=IA@2Ttj zw_E%|bQgO}`dRoDc!fRg%HZep9_A(=|FzZ2adNKtXz~IY8IJRtli&xrw)*di`F4KWGRvoXE!l*RU%J02`qS=9 zS4*xlKbr_VmOCEVg!38RvHzCpAAyiI0a^P;cg6T#f5fW8y3pG<7oIgc4b;~A>Ywa+ z=R?jv$X^5SuL8*Iu+t$sdfe!tdmQ~w`oNOKcgJ*IPWBv~zj)ws$BT2?wfH#}+!X1^ z?|Sh*YbVPye=WJ!!tH)^J`i8!k2}5R>espYLHv?Md&##vv8DCQZ0ovZ6@LFviQhHe za{j)ylJDP^^IeZh;AFp%-B{y#c60FQyS<(LOS(OO%+qE6(K|#nyFCm(xh3duYw&e^ zFFo7p@}+l;q#yN;nf3w7^LdDB@jcz{>#X|X+za(8)BCUU_sZIM`20rrb;rCXz0`F? z{|E>SH#KBlqI2$~*X9bnUVKwqkBHB5U-lIJ(?7ClPI&t&@IG0BC;eX!ewIB${oQqi zD;%v~WY491P%gRNVEbpBQ;c@ac}{Vsw_A3#g^)YK>EGF73509#5y^OtFO=8(T$eZU z=XnqITwxyUQb z_xOB0f5yOI9Qbw9A7d*$O#Mpw>Rx;KoZDRM@h2+v&MV3DXw%{LVn2=TAst4B+v6hX zPYEWUAFIIm8q$jKa;wWP;UKHQd87j8qJVR}0_X3i!EyP|S|Rs0pb#AABS!D=wU4v+ zJ{PRK>2zZ}z$JGMvK02+&~2P2X+2)EQ{~`?pkqF`%JT7i+p!Y9`C!xH+wfRB;d*_t z=jF}y7N2iiY`*Rf=W91wnB_?i*%xYdN)$AWajlk+Dv*i2E3?Wdg@@n&6`b27he6h_yj=sq`l-W@|P=Yeg8@0b``&TZM55` zwMo9QY2_REo+lmi6Jp-n$e_)mr_s68?rH7Wy4CWx>)Y&M(36sw4|#v*j&7k>_1>1g zE3NTq_>NDT9F_G2?IhK8IqO>i_~~V?M=1}7NUU)Aa-YwZ&yRf?aPsA@=gRNhA4h!o zUI6V<(hKa@Qzbc|9%oZHZ=g@Px)!`%&Y%1uBEm8KCjdk8*IGpCIlbxY{QT%s3_-u* z+}GR8hb;`g#?8i}d7)1@VS)VyWCfb1%kYR#E%Avy=kE+pjWD;%WfQ*Umr`>%O;LZ7|fnM1nK=My)8j7@|& zU;SCYtF8+e&-}E*Ald3dFiCH9#CR^Y954K2_o>3`zx-MjPX?mBtbDq=0ufo2x z#G;FLE5&bV&*u#X(X*EPzPEqK^K+atIN|qpEHnJokCLYe zKX#Aj)4s9zOZ?DS1ORXv?{FW$@XL8f9!_}Z91O#xr1Fi09hC8eR7-kTtSHC%hV@5< z?%Efd>pWurHLf3ozwoZoLHiT!_W1W-M`InY^V18#_oP#Lso%4|A^MMe`Lm>7r9T-@ zyj4EE?!OvcI8GzG{boCBiPxw0SLvdAC?95KxT%Z7_hpdD)A|cQFOZw`YvGI3bkvKh zO8Q9WYK!@9=)-)u+Z~vH{Vi?%l`oHV*WyO&l>du>ul5h8_A_^TT|~Wm+XuhW#kg&kB~#)f#{ceXfBZhiFaMk4U&(v;t6eVZ zX)>5(7X_Xx+vD?XyV~ud98o{~6#k)$igD>o<#$)gb0%CZFT1MI@x1bQ6yweW3c*p{ z*#DyY)X|6sigvE~NQ8mhmyl2NWBb>a`cwL$JRaSHYL&lC?@ypkjYDNTUt9dogXiY6 z;JJMcJU>##v(M=ueATY1p4yFfM7gfkJHY%tF4z6M|4I2P=97R;#dGI{jD@@~43}Ss zNH~Vy_v6g}P7Ckuc&nY)x%=6L}gPB|FgB=m}oYz0uc2Pc-b#u&YV0Qf|4= zM_lPNFW(X4@;GoH9n}6UJ(kVp%iMqT5Yoervct)5?@*#>vL*P1dY*1j00tYIgfFt~f7vl%ulajix$aw|zFV|NVe>VbfB;8Nfe_--jP?JlBjaO$Ojw z{0A)B_S@Kl15oRXUk2gYw;akU9LKYAxPFT#aGo?R{(}(@_|xKvBpkK%wD^sT^?BH| z_>~?{`#SNdhkcuL{Yiw!R(oG)UOl$@o7Oj@S9`g!)!*V_4_%1&`_E8^yC6-v{l3&-XxC4>cO&uY%8Ui}k;VayGwSvKO(G zyAkuhD?i_rA2Pz&>hH?W>ysa!CsUqmeFHsu>Q~x&mE-FPBoyzws5wq1I!Z z2j--^&KD#6KIrZ{LtouK={o1mi(S!XlRn<$7dt%FKXx&t8gOEMgm(!|R~7pP7dI_E z>s#f1=JV^a{bQmsPv$toc{1l?+E3+r9I6E8rZ@s&zJ9>r5Dz+?@29@pR+>lRojAjX z`Ork<+h<|jZz9+#sQ(u1{e{^Ro|yhGyn3|X|zH$GXRGf+AM zomX4I^80ptSW;$&oHM2Sc--C`V`!Fbq?|}HeZPk96-*nvIyy(q9zF5cKk zuUP&(=A(t3w1mfT_TOcW7xDP3z>E0k{90a>gHn0&o8YqDFVAAHE!M-OaF`o#aIP)7O7>*U`79MQQ-51l6<{;!noY3sa! zwX0NbzC89vCIe4YeyV~u*A3b~ei3l#IG=vG1c&%AslaQ)`slB@~u{$_;56m{>gf*Sn+-g-s7~0a=#3g z`k|~(dZ6M&!$vJD;-tQ`7jt`5NClH$2Ma)>^sH!zg$6^(8$V_ZRcHA4|B? z^z`y)X29EO@SgDaqk$h9{R-Pr;;$zdF7em0Um|7Pv7KNi<1hMJceDQAD$%uO+~<1l zw@dM5dXqnA()$l*;O+El(_uROoNjoBc1Ag;-yw_t=cLb)zZJc+t-=@V&$8b6CI}!t zD(jsq%k91rh2Zklj+aCui*xGv>Q@clEXGNl-^|VfVOcK57wHq}UEP3c9b=aOCE)b9U`#1O5DTM>@O^@y}uu1ny_Z#zFMoE9Vsit{}?vI2x z!&?4BHvi_FhV)2p_4R(f{AHty>{8TQr;IP0ztRidVgT7b&V1^F7KHo*VK~-KytxlR zU*{6i3mOO^ydd~=j|FW$bS@l2^grd~Pi(*Go8Ppz=HPv^zRmL=MZ1B{=^ar%3stCd zfS-!<8TI#8^55>^EswcR`PmQ05d9rdu5pg$dKRcc4)95U? z=`C(g(L3mh?^+u6RP|r>_uv+s1@3v4QRTExr*qk|FYDY!x|CW9{4!)- zlHSMF3-$WX89!9{RdS_w&+oe_BwmM>7zthu4 z(9R})j)z~=hkN?v`gBeydx*Jb>R;+L_PFk4XAisJNl(8~dNw-zUBIG!t5_@Q4SO@! zucXUpoR4@BUBz&#V~pAGzQ{91pV6JpciHV0oeVv>&VI*j{VRUpeD42+;aKmtSIf=SuPpZkD@Q9o=ra-hEBnHv zMVmeBIOwE)C*4W^mW|3R_a&!up(h)_tYRV!xeHK z@-4ZR{I=pfC(aXG&RxMVOjnEj`gXcc?6RGnKGfrKKY+s29P@rGc+3d zEatHTK$7jlcL{>uU5;CwJ`rSp%`f5CIESs8U5~N;2Hy9hEA-fC)56)ktK6^Xv24QY z)qVI@@($0Z`;hlou+SU&zNG3Y#t+qVPt+57kNDS0_zN%5w`Hfla9=9uGWK}rhkKfi z$C$$_uS?IU9|GUPj&-m1L-v8t4|lIMxTE8tAL2V*uU9`*mf<&& z|88L9&qmA#i*Z@w zXP;$DvUN^x%3XDyL(NgpE#!pqNIVPtY}XTX;zxVn)bF~7A^udkHvOFdvRmjqlr|s4 z{Qb$M!OdowPb@k^Kd8P!U&OrdU;2FFTh=$zecd`C*N`&$NhOy3Acok z9WftaJfb7Mp6DLnj-|{AzJA7ip8uf6;Q5c!phaU5M`MR;KkU!qMCoLk%E8ahoy^G_#;T!9Ehkfb+-J98QtH-oxkbxr{&gZ&rwIhKRCT5NKs06NN z&=|hJ!f}7ve!~K6=Sm^~Z*kJKfLNj3yUerqw}=nz7r*yUr%!kJvsxDZPY7#WNxopo zB0cnlK}B!IOo32!^Qa|=^5l_k!pFKL;Zc!d*|b?#XRVg`I?8yE|}jDcDvc^ii-Ct%?-P-(pD5)cJ?Qp6?G4vG zKH4i8BRJy_qpJ{}3Vs!SjgBAm_IF(G>&!{#cTNx2BL9i-sgM5w{44tIJ>c~*o*bfg zF`0h+htS^m&Pgrtj>vEF_KqVz`G)6GIg&`84!W4TePTcK{V|s>y}w}go&B)?PoNN7 zv7b2^VbJL$@GI~84x;~b4olyiqg;FdANCwUeb&D4vx!0L-&V(^2C!Vual-u;>iJa| zDz!hOe$SWRZS_zee6rM!`SLv$&wOY~a_4f#hhhAml zH`*U_!OM=gT<6PQ^?r$WEb=>E_HgWjm+>%pI$+JUeJFlfMB^>n^(&?JlV5sC7vy=h zbPtGhuH&F6eRLb_Gfl5=!z%CBOB{Z_e65FLe5GC^9a+z`aeA5OXFTCCgws8RzSKCb zahrO~#%cIAPIH3+j^*19y{x(j>x;fu4$2cAD z;EcV&ak|mr=dtu|RjaT^`oxR(A*YSU6mGb^Px%j~m&bLEW28f2Z$f}Wxa5oc^KFd_ zfKSJI%kzesy=w09XT!H#PV~2D-)grSVMTUK#En;Lbgsg}$6+JX=qEo^yQIPusAun@wyqov3&9 zbT5+pdZzJ8{ZMY7<5d|io{jP1j^nPcOY|UpvI*DY#OH@VdyN-UD;!VGBg*!3=3`Qk zZ)?yO9#Brwc^C-jUk(KV$8{uK-qEC&AZvtRA!6m;Dp2k1YV_E6k zh2|~HN2hZDvhOp0S>9y7EBE6oXg}Nca%q2n?d!q3gFcc^Yd5JlPSsXf5rMIzu)^QThCG`7vH&QE%NnYcB#{= z(Q&7xXOk~D|6J_!8v!yLF*QEcV|>hF+@^hyJy5e>&4`26cvnAecoomfv-OhT5Y0b1^n~#d~BypZo^vWh*GCJyMGhx03rI_W>vKWnYrH85nXJ`Y7IC%MQ3)WDhwV=Em=Y(MfP?$>%Km#_{{jX>h9e z)slbj{Soh#X6Jc3+v{$hw~T=p1f2Kk1@<_Tqh=1FVg3!an@iDhp?iG|ks}6xrFb!_E&=!CKU_ z&mkx6B8E@AqoH?Xvqz$zc*zn@_wojipDB%nioeRzyTe{H`WU4ZbUHWmZ=5^ceU9Jo zYn(G;={k2L`#{ZJb94GNS$>1dSK8s{rgW}K@Am4u?rg_zqfL5uBl{NU@G$CY)4}^O z`;aq)=$K8u>U@0I`9gdqJ3i?`id6SE$PaI0d*Xa|_Hi#yJ;waC#P{vW=WGyN?1%6? z5S`A0$u6q>Lw*09-9dP~x4>~$d|Q9QisRg#({l>-(rF*7mi(HvYt+@8**DpPC%|;{ zW#8UC|I4SR09STAy?Z-!U@gI+eLl{zy^I-N=JBHkf*<01uIN0r&*QTLP2@)V9(4ZI z`6itYoQ-ceg0+%AFucWg93QOx@Y(*x0&l3!O=agdSxrGl>OErE9$#G_))M~?apRot zwRJ;rjw{Yzwa+;^J?G+!R`MIpFB31BKejB|JK=N^eMk?wTJoahlU#5;qxR4rSNkHq zP503E2d2|6d$dWg1z(fjj4tusulPxHY2WW~cx}2IF}y^VcD`tT=#}hp`QdV_9gXxhv3!{KQCE~Z|oR2&13Wq+m>4`8v7z|w;#oQhOkpKYS&u4-hb4& zTfI-Icit&ibUGK;m>=gdU9IB%cklnH%=2}67lK5P;M7Vbfhcj&@ynk;RXW znK$iwgobD6&)xGo4Ns2iCy_PvX+!aDTK2rtqtWRTp0w%XuJmkIr`u6wPc8K2!!GaD z^L4dkuOX!Us`^UDPw;dOmUibdo}|Zd5OnW#&WPe8-IJPXIvl;nR>WTs@o|o|IB#^o z3rMeLajr$@zq5E3w9TjQF+ka4(SD#o2-)+Y--2(_As`MXxz%_`w*Cx~G%jl1*H{3? zm?gj7znXjegu;aDf5Oj`b$ig>8DqT0wcQ;{Oi%OO2RiLr***}$nm3(&eEr+X`0D2; zivEuAQFPS(IQ3JTj;Z0n`64-6=gWmxbsW(+lRxWxExsRmY^}k`4hEk++O#m!DR+%H z-<6Gde4*DuUWgCntURwFXSF&wDiANfSDM|s%)%-^7k|uzgARw&{88tTb?%~Tr}NR- z@Kbw>aUt5LaYp)B@QeLXs~!8_@YycuW!ZV8SF=r?vC(m}{m3>sUb4evmw9@&KJa~q zw=28C>7w)D`Liwu`QzSC`D&j>j^02i#ya*I@2{avu3)&%AjEuw&o}Z7J}&EhVciF8 zbaYwyY*W+yYgW0xF8bvm3l`^JTFFj}n9Cl-`5&F;yBb&Ze(u<)<4b)+j5QCF9bx!V zZ%3y0mt(!#eZasL>)mrib3QRZHBEqIYm_e8Ly|7{7;*A-2Q~`*QXt>_t*~pO6U7`r5J|l#=rRfD8hu3cKW$;rc)23 z^Zv%?H3$E35XMmt;c^{Be>{BM zgVOvWTNdqka}&9cpZHO)(J%&Aod18)g2nsk(rfMaYP=t%C$xWBrBAV+_MpQfy*OgD za2ts+g&vFcYkceCMj%`jpDf<97Cvn~=J2Gat9mS5N~)kf`wO`0U#(LvTyH6v)+rad zJkHQ_o~^idCVO%AkmK8!7kF3gq0xS=%ZNYud8U59&+sVl47*$-?mc9Wfq-y*Xb$C1 zb`IGar60@hH{v)2&N=va-dGfNlPZ3(&Z8as?@H&cbgyM@dq_aMV#w`7FHDZwl^f+(qUG$aH8xgK0f8=^A_7ycAj4k(fbB4pN+NOi+wMNIA zj1H0$+A~N2-GeR1lOnQz5>Ke!Bt5@@zo0w%jrx%L@Fy$#5dRu!lsn4#?N1}X{HVvi zGjJ(Od6*bU@2Bg0ykI`pv(7gR~U9$qfL%7U8j8i?(wgY_IlL+IB=AUkZNE zdVDg<0S@m;ia+M&J5qtPIb{8=c~Evi)8e!9R=MABe$#ne!XrJEU-PDHN#sATY30~o zQ#e0Snb@$l0u8;G_UCyvyaf&r%yW(Bs-G1-B{N4udjcPuuc|PgzPM+7< z>F=;-!_9!R%KhDI%~v_*zXJJ3C%pdANylgA{Su#-NMC8a!Z1nI!up0z>->Ky_e%zV z6Y(qh>jq*8Uh(cJ?5A_}SHT~#epmmEEdb%*^00raKdDz(pX}b6pNw7X>-OBq*v6;I zKkE%_GUdx3+>@*D(UVQfP~aiHiFyAFJak?Kc+rim_WsqpySy*Ud8znF>qYX%|4sgw zbpDVYm~n3b%GLS6?eLnXXr9BcrFX`7a@6^}xUVn~e5d;gqfNs%o8`XC8Sg7l$-@=* z72@8@04k@3ZFR(bg)_i^J@*v`qCK*Y7yNaxRXSvn)=iCbK4bBkcjnI?Gkso< z`AWY01q;irA^RWU(&h1vhR$2mAM<#<2hc738Rr`7ArIQ`>pK6a6&PLXu%l)CTneRHHR~N9m!uJ ztn)prpXst|kH)*s9G?ky%O?DRBR^m%m@Z8HC=^+j2E?`~2V({B*vL|3y7< zE_C#2M|c4ESv@^gEa7{^M3wEp7qp7^|9GsAa-em+>eF{H`5rNv0=@|RNG!Orp7?9% z3GMrnFFNoa^~A0J$Z^EKztiaW-0A%&H#+gRr&)e<($}GSSCIHJq0xE#H0#SIosZJT zu4H!Hv-9~=8uP=hOPqmfiQApUf8smcV<&yed_ep-@7#O-_d8s?drp@=xYGQtRji%; z=<*`{SAWW)6V2zlpZ2iUJKEpTx=H%D73b21CfqKkd2AMR(fWt{#`aVH?T1hM2Yt0~ z^k}?yH{t8o{?V|1$GD(@}ODey0$m(j>@)(4C^VrZ^QsxcEsgP^Iqxw#yQktsOLlHo3C{7zxbr< zLk>uA^_{P|n1KkCu=B6>03N6PDwWu8B~xM@GK9@jV7n>}2n zLmzUGPUw2;UsAed+8y@W2N8fP>tph<>0xAKy@zqJWi;07t&VxdSJE%+XUdJd!V@Qjz6dA5d-?Q|!A8S+cdO-2a;2Orc-tih1T*4-*ak z<2r}y@a~A$`JG1CvC{co|I|iggyTLQUBh18McBo@*oU9G+|%p3-5+hauXWP^_<{w_CCL3Tyen-2Q<2%XbVy~LILn%26koZe~Se46lO!9ff_J4TsbHJ)KABy44kk=+nK`^Xa{7&71WeqQ2KRI^y}N^y?Z4`H1=?zp6Lg z@B3+*@9AD4^#h5n^9#jyc0#Wfe0PP{t9MEDK4V!Q9Rocz&mmo2@4JkLtlUW(x61GT zy@2@g`vfN$4!pRIT2-1?P!A%?_?tfU;cZ`Q=P&OAKaKfnq0j#XwrlfQ+I7_&?b`P* zyj^?F(yoq`)cKL?y1xfs(4I<1{YHK+_WxWgaDSAs;PW`g)|mI}9_}^I z)@Kx*0pvG*8u2fBa>D1NDBldym)_L53+*dQzsxmX)_hCzz}+2_IN^uy^hpk-_bTYYFTQHRNhYJW6fXCkXKaBkG*(V#PmBDXtYKEDs3@t%Ar zI4qZ*&k)D?A2na&r|t)g90Nl63KJPX&yFPxwS}7mwHFl;(Z15+c@PWQxrF~?rdkBa5MnfK|{VBepJ!*>ZN340)uf~6Ze`Sv)6ml2v z2%q`plAkvGHyiv~a-Z=h`Q@7^S8Zo&;RVUxGQOa@D^A_psOH~|_xyggf_IrN>6rT0 z8!{XIMxXPnc{BLEjeker|9->)2W7W3Tl-eJA5wqX)1`k}i_Woj>%4Y-lczU2U4QF*;{c+q z9f5yc-8^CRUXPe@-Vco;eT)9VI${cRVVF+(x3Qpoj)d`AC)EcTLO#?>IJD!S zcyaE09OEoWq4RaT=UJZr*OD7qJQ=;gJo_H#di*>AveD=GOSroi#<_EZ>2&TqTjk-Y zgYJ)`9J6R1KaTbxD%leHOYff6H+%kUB{INif3GNy_OKn}@U48%Z^te#SAW?3Y*p~# zw_N`+1N5!dPkJ{2VfzvvA7%g0JEhq%moL4qH-P-?53T>YKWNn#?brD-eYc(Vw$nBu zxxeE|&riCLay!;}xE}Pe^_}$?`>i8c6YiheQTW@}v2O{<1;l0{&<>9p%;J2fvN~;T!*NWh(mH**&Q@Eol0Id_a4( z>7l5%{l1-_FJQf8KGMCN9lkua?@Jj!Rp1-GWqykEan!jM|LPm@2Tpo^H13Br<}J1O z(es_Z#x`v6eh7Nz8=kN*`TNn*dL`d*cae`x$v5n?aK7PD^ULSCwt_(UhW!PcY3tw( zd%Rq{i^6s+(qe*e^?i;BL(|S@@V>d$85%#zcpjSrPk{b5@Z1)7ZZ&_#ecZ!=ALw*9 z@YDGbuB(3v9WJ^vOgxJ9_Itb>-lKB(8`hd{>o#Oed%b@cX2H?L-pL~fn*I-eH)h3C zsF(Q&V05v8NoM9dx5&r&(CA|R1>fV}ba=dJ%7oH+bSbny0Tf4E%E z=`dG&Uz$NT%7ga9-$*+*m%i7;cH)0)CwipZ(T8}4|%-O${qZVZL-R314U#Q>rmFUfO z{xm97do>TQ4@9{O+}Av%JLWI4&ubiJzp=f1N1l%DK=AZv$TdBNsTzz<;qM*ub1=ob z@++)3?puM7aOu^)e%3j-?rS`~>uRS*ExFVp8uKnPU+aR==qIg5Mj%gCZ}_`A*EoJl z91iD!R{?_NO9N<6H^V{K5&UM$wlwhvt`~N-zl5)Mt_Yv$ql@taj^rb}Egnp!u4F9q zD%x%59-0*2h|-3CwmkT2h7;j(^n zy?DyvNvG+0@imK|?cCT#v_tQFu-wVI^E~U9yiJTXCO9`-SPHVaE-=k=>(fi6c1L$J$Xp zk%!Z}RQjF-*9YXIt~mds^+CGT1vCAiuZyJTbl*XEk)9yuX{~E0k0j9OX4dBVju><; zTWR6qUa;_u@55xreBRN<-|;TaQ=l|sbiP>ML)N&bc%5_MdWPqrsekDtZwo;O!WrFV z(Zzaprv4>WP%h@Ddf&XO_e~Z*0AYFnzi206KaK8oe049D_7FnozR|MVx~hqvCx93I zy>XtLY{`<_FLt}hK!i14o=Z=0yn3QCUTrSxD@P}ho@f4JmHfX^dS_1Kq3*5Cmj7d= z{OW)0&r>fqf%|y)q&vguiaz{xJ|;?xC*9H&&JSlQPs*e#&SUZD$4-R3VYS512XH?4 z)3<{O!{w`8{;2;xgRJ>#mrsU2i#+-2khkZ&0L%a6o_2ZS#}i_FfAWt?@w)%X{J;BS z?fm8X)>P{2w(!jQe&o(}dDgco>ia|~p5wt7zaFNqag1{D{!0N5eiQNI;d3eay$HVu_*VWsQ66DB*(=HKU#y`# z{3hbZ!{_+@MZlu|wDL5F!)YE@F5i8@?B%;LF-TPo?V{s2k0vF=mr+|{-bs3}2A!MS z;>)t6)%nL%OU1cL>b+n6?_WPX8{YyX_{_&8@h9_d{qx!LZ~cpNk^gt6!5={Wljy%H z{6^>JPM>_fv%rJ-_+I18e8*3}^`%Z*@8f3CX*RzJyvD<4KeL_W$5S2t&1nR24(`_t z5Ai|qPQ=AbMf;b!&-nlcv~T+1=>7vXSd00}9*0N!De0Dt z9kPhVJio`U`Md0(>>q$mWCxUek^N}#AF$$y&QUD{ULenD@+Z{KY4SaNwvQt$?a!2- zbS)c19Dc-n*)cU9q?ah&2cDtei{X`)-R9*6U-d-2JnzH$wa=*cHaNa?qbTXvgfBYk zoMG$uj@U`Ahd$L15kI{N(IL7rd zex!ruM+49cR^JB=tnITiy}#ENR8vilCz@=B<^y%miH>Mn|3|-<>!BQ_ZzeSIfzN;L z+GW9HXrA9E6JI?r>hU^{JzRlLd1t?if76@4Y;r~Wvf-t7DFROYK-3fKkh^wzc(})X z-G|Y;S;CuqK>nRg&IU_zMm{4xXUczDDgP7~b#U6Bl%6fmzXt;z^N~*v!6!eyh5R_k zkn=a?EbTzY!RZ{@neg~%>HWU^4xcZli#A(%(QhjFOy`ao$-kKfpL~t~!8fan?%ao@ zQ~d>>hds>n8PW@T=|h&i7$?f-ecA51)`gFGKCR2T2KHNo?3d&NsGQ;*`-Q-Rcn#fW z*=#+*^u_`|PnDm$-tf(DTkHP)n;n19lrB58(ZafanjLof5P$N4-UY`yGM0bnO7oI6 z6bQJXP44Udp62tzw|YGFBk>{L#B(9~m0^wNlAng9Qcg}8KG`A1TlSDf&EHuQo^`;Z zn~CRL7SHjES;}^Aqw@+&PKIN@Meoh(TzxjN&FWXX^VJ^Cjym14=bVm`8|ekj`wP6F z59oAXUgJsrxVKCFmOtKf`VU%mYY%@Fd=vYt(r;CHsmJmPJ~ zXr24@>%)(E!0@n##UGM4uIG$jAnr|M(YV`yU5BpVm&xFl9*f5L_38c9Sm6AV#rvxI zexLM)-o>cdDBd2oX7JDX4qy0Y6W)K>q|1@u>O13ttNs+f)soK|ovV6+_|SC?sC~ir zlO9hwA|GeZyPWDgNLgQ=2ZD)@^9v7I!Kpzn|6%t4-@nf&xf~kv1kt-O&+j{Ed}Y6& zTHL1%`M5OB(Qfdt*6FWj9Qo*}(tIuSiO%;F`=1LhG=y{xse$__w1;v2LrbI9zmg;3 zP5fF!6pnP|Y27=+E>cVW(Bko(<*)NcS&mL&0N4CX_rf^tWM6i*eWd)`IISUi2crv&Fp~S0_IRpYz|g zYvR1tRTkXc`Buv(xz;&b;@g9Tk@S%GYH!beD@T1u$`J0d_fp#375wVU+QR-^OV(J1 z+YflY(FfUj$OSx3O}6w{(B}Jp3C9rq9TDH?cwchkyRJun?M&Xa>0Oo&VbebEy7AWc zcJ%-exLSNaae8}CJp#+Q6gpA;2>XNgSIKV4`~#>T|KnbaimYd_KU+vesmaP9a z)Ia6Lp@)uJ{o5D&gkxypWeaN@CqFd-cRc*ACGSRPdOdT==llPh`pD&p>n^%R$7b_~ z;@&vqJm)w4>_dgZK9bH?)B1?V zx9Qxp2+lL!iC^lsalm6r8U&M_nfQEjTS@MhSn*`^V)x5@YW%a)+7$G8e((SF{k;EGy4ibnW_EUV_V`+~jbw6=LwG(<0X`IAj?Vqkefw%~ z3cVEhAp`g!V5TTHp8HktNcB0Kw37uaryC{J*g2>5V3B0n3(i}$V|9>etB71>{? zJx~5Zfk5-~A*e?W<=paWOXXdt%kzC8#^)x;g!zD2vEXC>HUk|~1BRh<BP_`i=N-u^`Kb{D2{#o^-xTEt|#S&_0YDkNur@$$j33&cBenGo^9)m;rTT zOR#^2Rl+=iTo2)4jhRpT5m;Y4*w>(YN)(Unp~LiMaNa>0&rmNq+UG`oK)5hJwnY3H zkO={Bp7bn7evjkv4ow;RL3+67Ia=&6?Wf_o8bL9L{e9Xm%7F2K=k!soMPj;%-=oHG z2K=LUmq@SF0-WU#A1V)IKE==E88R zP8H$>RJ;=Rm$lsLEg#;{(DOtbIYZ7Q7v|{120rLlo&J_tkT%l&_}e z2tA1Kb1yxoZxy;BWdA0-dGULdeusj7j(AhcKI3}#J>+Puf5dm3zSa3I9MR)`KNH#m z(lULqFZguMllbpHmea(Cq<0e^4#w8OUIqUcAO5#;x*9&){vYGR8?dhN)VHyp|3>`% zJ%0T3zWn$WJg6(_Q$Wl%&|kCz<9peLAU$@UjYIV5Tv9wR8>Z9#bsJb$v9REakW9=C z=@5?&<67=2~~enS$Y1t8pF^{SpUgo+2E_$ zt8H#PWB{I6zrbhx68*mt6mZczNZ%im_Jf7owZOew%un(b+gt3by?VSoPq^m$ z$S1vv>>=0Zc;s`TaTFZA^B}! z_?L&75Ap-ZQE56|m|``=J{|Il^<0lJ8H42swfB=7J}l1stfyp+mW(2OsrU^mht5Oy^qydO;+( zQ20LiC(st1lcsojCo!Nk&qw>bRrdu9^`S60Ewl?w$qY&S9&11%#`1Xq<0bxY!J?VH z81akp=Cdm2vf;@{X`Uqg7{@Ud#r*rKTEu-V)Q9z;+{UU6!}$4y z$Hn(&rSA<1xk&s$=bGp~jboU*KCGwUft!3BWb$|kuO=Qw`GNeVmWlHegbyLFkRKR6 zM7wPOKMWr+ynLwYA%>4`S*=(Eo&U>#e4tKOJ|0tt$A|L#*gtHK@Pi|th2wd81tI}l zz(np>jO0Gq)yFU6VaHPLqrD!>7xd-*M1E%Upmqh^DEF`*$ae@A@h#n73P=+5L4Of% zu2wu;O7Xx?SPta}^C1(qYmxs|iTSBN_rouZxBK!GC=5^Gm#0wN#(P7nDljo z7mJeKmMX>>tyeG|K$rQ72NFv0h>vt|SUdWj<&i&qmB*=&;_=*3M{rmkeb4fGk9-?w zz7X<>;5W9wl7Mf5U(6H66dsS`5&L26&BOGrCEdHjdayqMO?W!Wp8#+eLZ9BDtR|O4 z{t^%?36M8k8OAE0@4jL@R3+(UsS8-LtfDRVF`ePpM?Ax?k9dYpAMp&IKH~Mz6tzow zp=bLDAL@gi^fL%St*goB@@VVLH@@4&`tLlt~@S!$CT1)qM^I-^p>ilYK*$@R0Yuy{z0Q^c=b` zLFZVoA2{9v+VFCOKOBy+5Jl&gQE$ibZEVT&(R~G4ui^ZN^`w&i0rl!3jP;|R+6w-F z3qj!l4)bGuWLHT=!2_=9J(k80hjNT1!XKm~g~2Z)_>5^h2ijNM4&+@n?>NC}9L7@( zW-dTd1w14VgH*_60Eg(OQOFq!d=DPmtqM;;3!w)?8RWCz7MvjT-GXZQ>vQ^Hdptk> z#q}fi2@XVu9{wE?{>F@tz*7XK^0JmUhA)^wZ(o z8afQ4qx(rDUn>$>v}`8#WCbpsQHlq?Kqs{`m4~JJ#e2e1{lz>zK*+I~Ku>g1y&~On ze4GcoS8%kk@lYS~ZxaByR-b3Qkq&k-bhM6>zVjT-qGbVD+$a1@bt#fE)ki#{!dE<# zn?CY0xk>R=<`vT4Xx!2|umpf*?ThhDdLhQ^J=^aI0dN@4;G+B<@eD2zk8;UV4sQT{ z#BoLIXOtI^MP@`Ig9~ZReY$Uh>8wAks>kagisTH+W3TyE$WLP!FINKXV*iYyezMZw8Ox1Cn1fPEamlzv=rUbf3d0c+i}4wr9zykH@-VHNCA>pA!+R)Cm-HbaNKs$H zen{~`9vKC`tKyU9ZM?^d_;s!3_gT{AoS_Jxu8l$tOy})lJBaVp5j;%yk4Rth70>YN zBR|8hk9dY(AMtvRb~iv_#@AKg!uiq*zZMQ>2-PR>LhqrIT%d78dZ~22a567X@_^!r ze)tFC>h&8xqfbwd_q+p6KEWUq;BPmbGF9Tx)qh za9ym27?exs2dOxL$-dd67es<1yJAF79;S6iF7UG+(*^#CKD5rEd96nuUXJFCYV~LZ z9Kxr1Q;S8pH178ha#+%POgvs{w*?QQ9a|4SQ{#A;z9T~Yi4gq?X~WZr57e@Ga*Qb* zmB8ljQhynr$0zfAA@jLk6~D*_dPt)CWNKM5%UC)ol9fk$eImq=KAr*LG6;L>HRUiM zU7I*vXkFoYk@FS3CyV<3LKq{ijpeM~ARPp%)PwKjxYqD?T`%%}c$S}y<<}Rn@;zp- z;$CoMLV0R`C(J+S$bPgF=#38HK!NHB|6pu)qkwk`%O>98@Lm2UVA{22EQ7Tg+3&u+);7AY3fFQ=C%fDdpA+gMp;6i+1x`FBy_@jKpZ}9zNRx!en3Hd`N^Lk2~ z268-ze$n_W?cbQi9}?r#6YscQB)LcYg7g^#bS8SydpA}2RtxPTAh5s5>PP&bBfcpe zrDyS_{lj=VjH^UQ4_S=4055jo=W^c{U!Mo~O51UIlySOdb-7yhDhs<-b9_*| zYc;1!h|rre01stV+CK^H*EDXB9+@!aJ?Xa3M}Csau5}!KHr|0ip5?Z)e(q!NyTteI zXnbP+G-${5;z^EAA^%*fIUNH8{KWq#w~+qyeE!)!fd zmM=ue=MbS6|EKmgT33bi#oD3Y!?=(06S8J%ZyxrH2Xgw1M-k;KzuqHW(lb5U!5%h- ze_nc!7#H+zH|dvE?Rb*C2;?N9vYP#E5k!Kc{*eBU^?2#|LVvHO-&fV^l|rv~y6Z*w zB|Y8;z7Q^7c=`Vlzs7$lK1r{4t>yBL_KoPC58Xq~1tjVLk0g%_3;4K@-ji7XiQuZ{ zqj$yVTtX<^L+b(g)Uu&`yb5_rdS;JkR*ekr@1Y~RRfLGIx_?LWn@h}thB#i1bszdC+^Y8BiW;BKw(I#uan=4ps%>)4}^oa-Q~G2~NVRA>JEb zL=V>nA;(?39^_ZVn^bOzcR5-oNbi!WHf@PD@ zcaIRbfcB~5kmfueKADL6BWIAdIprkd5zAy*#`4?oM#X&NnYtO#KY** zdI#~Yhj56XH|;NEKpfU)3rg2R6c#FColbc#-2jwU6 z4eIq+Kh0~lOkN-20U@RGAqWVDAwN0`ETotmt`esFsWW&!X}vU-hYgMf><6gqtbCA4 z?8hTfz|s6d=LGQl4Ym`c#1eon@EBYZSx}l+(T;}k1ylID(DWS>dQUY2`pxJg^o_D! zyu6WDE0gx$M7^}XQ97CDBRN#s`D!J`(EGGxC#t%Bqj??S)kFRg;nR5l9IqHp@4G~Z zbp*X*Mf5L$JS?B!YuZnxeQo+)7x5Xr??(C!y-&^?42w#kkQYB|>!Y|n^ zJ@udSK!4P4UV{TAF0tPQ@1SrxwC3eY8}fRkdRp=DHUSszD|onaV+;?K_UHBBx&!$P zW8t)zw6ADP;`s=VwBD%*5w%xfk4zayy;$ z-FA9sP%R5&IlRxqK)x2d6z%iDy>XsT^n<5fEep2r2IzeP!8W4Fn`OafzCl`zK0@E$ zR-b33_X&c{cUVM_`cLkwjxwL{=iMK;uRny^=M|zpln5n){Mxf}WWOkZc+5%TO6s3z zAJgG~cfUR2k%GSRBWL=G2UhkOulI=G0Kg!0vW*5MX}r_25%{grFmaS$$Q~|5NyKVSYjjFZ!%K@&(+8e;g-O`&Siy!f%8aA2e^% zzQInwJNz%*^PzJeRojhd37NoAdBQigkL61MKI}ie6N>PK0DPsBxE)r~(^g|Gu+IsF z&~A_GSaf8c#(5AO&hMw-9*k?%dB!Ip1Cq+YH&k3)*m9uRfzQu&G%G4%5%&|@0AjNRo%a$dl9bH z0*|5}()(E)Jn{EtS&%q4qB~qMey!~I86C}{oEen6Y)TJ zk@Wnr5D5_n+(VdWGF+tw{JNk(NG_y4dB`$e^d;tl_V!`E!ziz`7z67{6ky=!-l_Nex9SZ# zzlG&KMtFF3CfE8vINSdklWPP&wl^fwa2o3jOqt2r+Xd~JR`YxXS_TTV2Zh1m`Hx;5 zKIx%mua5HYD{HPoxD!kFtoQDe_7;rn8}vsHgl99fh3hsHy$JW=CZbairgypN{V7kl zPe6Kw$V%rOyyfrt{aF8&zd$cQWf$|0=RU@3pj}*Ff-6V4fUaON|3T-cDL=1wAHz{) z<++|%-VR;Ibu97$$w{(zQaKv;n7t% zC%RXR<+(!TsAxxauc5*7Y zj5lvNBuk|AtZNg;Ki1bC>!W%++UXACh4AZ1$56~~;_Z3DH4vgGKj$x=hxj@L(}_Rk z^EBdD(#uMqeLW-vDSEOD#K#u-gG?Wz{TDjVL~u|()F1JEL+WRN^xgPKR-Y&TvIzDk zQ;B%+L%b1u0-vZK9f3N@9*6i$0J{v8H8jL=59Neh^X@mV_b!y7ddbdq2YJYRAb${i1PAu-Q;&A(d@kt+n2s$_ z80|wa4~Y2z>|d?0byy!`W8frv7?W7gKIC`1z;Zg*OW)zZ{Sn>+%oSK}(C`9eccpxW zNIk2@yFRe5iE~nPU(dV0U>^%&#iaM*;5}~E9DR?m#&QirxkT>6yv54Vxh%SOM|!R| zT(WY~KZC{jG1%W@<*VA=;CtyTp6q&FEK_8=;r&GNp5X7Vm_*V(zfz}@=A6Pv27*FR})v{<-0j?Jl;a8AK*oAttXm%eH z0j2dfeb8nY;cC@SD*I+V!Fn&;gx9r(7PU_i?Jx3Cg20iuEG$H}=C= zgt0&akzSF>g3^0U6p!Q6i%udx#(U98#3P;o00je`}C5ghGzgb2L@^<$*tPWYDy{|x1vAydr|0B3>q3gQ*y z3=5Oor*k-H|MYNjiH~~o*dTQ`_9H{IQv!`)d#Pd`#(5jVYK8?EKTci_;XxOq;)wTN zcb7@$S`EWELTLXM`5x<|bFZmGST)kTBFPUQ@r*Bh#52D15zqM2N4y@Iq;@v|FpMt& zE-${s!r+j<5MJY83_<@f01o-ZOMZxX$jHg)w*QO#bPtX0Zy=trKNOGinipS+`Y_&$ zFGW1b50n?6K2*~FH1eAs>cw>QCHc`40^q9fLpklcp&rBSa=ZiH^X%2KRAI-VeRuk< zFWQv>L|H*A{Oh3yg|Z-fcLVX80oCF-p?OK#w}*HvN#9G}$(xsjwBqG6pnfb@Cg3N! z?ZiP3j?TNGotTX?D39%78rxq?#9wauk={h#{Y{PL=@|e7ilR_?14b8`ukpJhP&M#B zM*zZu@X|XMbgqZa1I*;rvH6zuH-ksmuySxXZdiZ$bkD*VPy4-SpV33OY&ymQp9*}C zoKVA8V=*HVS2j91+@mo#b6Zwn+FW!0&ya&*de^1Xl z{(O?5CBL7F@z_7^K!0n=LxewEUm#v^UUbu`vTR!mUV`{T(x=5Z%!OpM+k3AI-0uZ= zu6t0g!Szdb@IzX{A8;8E#&*jhSx}OjL@&Z0;U!4U7CR+93Lo0LqtC*9ph16mOwe|7t#a60Z1%xbRGrM zu^$wV6egim%9lsCu^_`aXpUG0JL1|7Y#Px+~S zoHz84j`9!jNbhsdyzgmWVE7Pnhw8z0v7C2*J=(QHA~>>R;QA16%#j_0*4r+qrR5Ji%CuRm$VB$YWjuO7Bu1_WWXY+Ejzu?^;c!!`RPcQAv z?K9r`39Og0cslLZ(>y}w0LjjcdMWY=>PKw-gh&O4a+<}1&*Fu=#`&0?JApjx+{s{I zI&fs~r1SHXFI2sZpF6?zy%?|HxzC*lM2KctT_6d{4B~s3bWYp2kS7_$d2U+2de7sq zUS^gyoCn||FW|iK=Igx42P|K(n$t(Bm&MW_*hf+xW&!W|fDWyBo^7IC(#zGdcUVGj zRd}gN=0m;eJ?vMtdRy>}+f3Yd&l_BV2dRaxe1m0>&Z7gK(Rt&=$N3dt-bBbJt$&lS z6wn>w5fhUB8pWced4c3xs*iZ&1Yhwe0(`|YdF!Jdy+{5H01WaC%0(~z6@?!-oJSB| zV=N2GYCONf)f*YTZ-L_)hNt`dib(goe}nxA5b=mlFa6ac-b;TK@rb7cD39ZUt*iL< z3j0nZf|>#8J;1k#`2p+4enJ%ZaDJsB7wkJBUn0Z>Da87>CFby;QwtvUgmwYj;OHG{ERXhcclkg0_ims)tiQm)4FWiBJ^7mSZamk4te@HjYsGqT76$#r z!8}XGKl71V~i^t@*bk$~Pw$q}`UKOlKNOGis+Zgr{l<7N zx$S`ul-oAJH^@)Or-WCmAICq@Gb9oM;HbYizEHk<(gpdK^tBLCFU~(0Pv=>@$J@yy ziYL00eJKOxyF@6LF1{Naq~dyxXFAqvOM^e)W+F1c;W`833GT8)j0Ji@|Ipcna-aBF z$}jL@OXBIMm!m6fA$;-9s03$y9$%H8jiTQe@0JJWdeHoYe0~)7p@bcg)*Xl^PyA;< z`>2nGDjLY5p-*T(b=L>yJg|QFXK2Yg4q{DK2SCaY_}(ehU zZ@sLjFT(jcz)Ab_8$^A^Twb2`;pzQg(gz7Y-uHUk3cEtn4A{3qHKG3+E={*)M_*#~J3Mag@RNCM1;W)9}^Jwk*FE@&N`C ziLceN4lJJIqrA);->?qWQgFo(}LpdE_tTo5}Dmq$Mj3^G_&?M|tGtFIZQJei##Z zK1_EznC}yL2;stV$ggbNiF~$JJbfpJ8@@v#=m%kR)#fij&kBGI9pNEWz*z!uh^J5` z!*ig}2LM0F^?*N7FT#N>(RaMv_(Cb~mJ7hn=n~!aaKNSVdX^6JHHRDX<1ZYim?Xwa zIs9ak`^G9j<4$#=moHmn3( zs-AhWGBFyO99K|^~ z)@&;@(Wki3?2zKKZFajQ+d0^7%e9*dQp_S@pw;297UmAMj4!s@E#@?9ZlTFpY`0*4 zMHbOo&wRVZWOk(#TAfzt$s~)}T4=UR)Z2=kdRvY@3t%)mI_sTdE&6;*q2B7yV;%Q$ z9=-0Ych@_xINxb?SaYLA`{{*Ncbftjku=EW>|JOp&K)bFAGWxt$0DW|blXhqV$aF9fw0ZC6&3uP zoC3g|uK?Uxoi2T$&8g2p?ip)yIQ7Maz-HNFOYu@l2qK7{l+K+Hl@fyHhr z%(dt(g=RhH#9pR+Q(?A6Uto0+ROuvCd3=4&#>2`pBs0jxn)u@d=$wS=nYw4#86>#!}B>=U5xy#gz?& zv6%IlsYTfP&Yg=3OYEkiu(p|cK#fVCImlLM$<)L6FNQuiE%riFzTU!x1mM5|<*JY+3G0DH@WoIf}(s2$`HmZfO;6Og;0g5kXH(IStb?% zVJzl+SHxIbfhEFT43x4*l-TTf5m1LS$7U}uIBm9kN7W#VX2|VuM&w(w>?XS_g7I7g z1IK9xG6E<7ofF5m*T`qcadQCS&|%5X>0osjfueRJlMzs9nAJS7?SBm!P{BfgMHHEw zh**G5P-%dv0EeMIOE5*21$Y*aRp>NW*nkB|1zbDUmQQlQ3ZsH#r#{C^crwwg9|*(~ zwTpqvco#Zt%eI;T-TD$3Bv3El#v?Dx0T#R7Y_m96cE*~b8WfgJn-kaprAEFB1t}}; zwB>Obnq!?P2`3XDmU^2V3Sqj5i)3JbeX&CtE@G4v78hheyHI11-8R7rGENUd6+j^j ziNUUSXS9o1pi0Ix;4}*oyx3C`i!OKwA$K2z4AjG1Vx53p1Zb&Q7z_pxPaVnz*0mKv z_p>be?0lO8K(*%RK`cqsgT^OW>^7-QP7t6NO`BCxsAdNP9oSuOD*}0e!&Br;rLf*6 z1e%0rBCbR->W%|Ol{E(>iQYs*U+;7k5$1;>;W=drfK~L+EhgLS7Rw}yUVubsphlob zj@>rNLU5U(37F6wdb2er2SfwZ1L*~9UPY$TQwFKW5)ORe-;HZ;8dys7mWfUv3JP!R zg1ao^Pl>fG6Ri%IK62gJsRO;G@)DkSQDo{YlLN?(^%C<5aNVWc%quArC@vZM3tWjZ zJ%Q5&>(|UFFd5|llf#q(qlpH+H`@@(ktn7j1PmyNyx~>W3Db%LFi-vTDwUmW0T~bI zFk6aj4&+cAj8#hX&hKji66%YJvta7e=UH6Rlq7mob*2*0jJ{({C?^UH$nQcdbkO$# z5f3!U7efgK3c$?=3qpe^N0t-y46=DJD5D_qr5-pQ1wYFTG92~2YQq8)3D$dkjs>Px zrW`r+1*VDCg5m;{B~g*#k&%%|U%+jV4cc@ts3p;X`1GV9HcSW-(1)Y6(q~dE%ng?8 zJc}7-OMCTLBg#}GK$Dqd%Fix_$qR`Fz*XTon({EM4~A44z1_J4x_lNqEQ$b2?h8N4)?lcZ;$Ay4`s zX~|evd>~Z??cF4G&ur3y1{r`@-waDU95lY?8flaf3|_NIlS&-z?{13}b(gc{Y&vRfiOf@Qj6%Gt%SQ9X@jY7x)k`)*Euzuz% zYitSh61%GiW>(+RDZ%J2kZS>D6g5jGfI(gopezg_Nq7OcIk3Lem%!8riA!SjDAZUEPTb;Zi1kVYbXT#R5mNDTkt=*ZB^nd`Ci7<50(s1ue2)ZO| z(O6guV>18+C<8e0TFf5NCTT$})|-RD3}JVGRu6On@q!CHgx~C*m4>qQF2@eXew7Tu z$RLSvl3!WScV2)*D4XD5*~%tVWC*@qV+f3Z7EmY|q9dcD*#b*ql?YExG?JXc0@)T= z1=(TwJpSG6y+N|O{%&S5<=q#C3>cqgyQ$>983~#9%?Q29cMawfqfk)q zTc$Ug&OEavv6HD?@wnh^e!Hp)q z>NPSehF8nUJcqzGGthVe;pUe6<>Yb_weMWJt+)sm5ok7IOFQr-(NF6KSWe+&Vghvl zwQka#P^H2!m{egV5Ywkv*fWJ9TwiDbeuFAKXYu=@f&pdZKp6#4UYO{MP!D84560!F z#a;suoniSpREnT_3iapE+?=A&KbxFHED{X0FV=vLavaPfB+C; zCL7f~(7Bjw01X+J7h_FmD<(-~5IpZ8f~@X+KsK8BO2b_X+MZ0jDOMqvAv}t}Z&o|< zFdCO}XQDb_T+~#p*%y2~=idR-cb)$n+F^)v)^ksm_SiaQX*J~HROM0-c0geR_x>5B z9j+R(dh!!a8=mMSCapqSioQ4Ah&|av^3O=hK&A3E;Hl<397}zyq!#(CXedaR2^QFF zzF$4w`14s;Z&>R&?>Cs7!YW%$b7TfzG8CeTh3yq|)|2e3qH;;`N!;X$suyIbDR<3k z9?JQDUW$~%no8b7A*`uLn#B_il-5qNV2`!g@;roxuz%i5f5EjqGsnTI8%L)kJpLOE zhTt(GJYI7r^{F2ucrvQTQn$t|oKOEgA5ZrDqgdO*=wpDfv!pYJNHWi@g1d9Quf=W-+d&ttNs*FdH;NzMKit{U8 z$wWx6NMT3tiW_Q~0NC(~0v?wHiB|#}g*LA!+$i#jNW%S8UjX-E+mgOghK)6q_>31L zO9&6IW<*zfC2=Tyr691r;@#7V03+I~tu$9*_8>+;ujp!}I@XYk0D0Hg7ck7%T6hu% z=f)a}rqISkDsJj~W$MZH3WnLzt^Ai)B1I0f`ABpFj*xMJ(Yng37I6XybSkfCu$w>> z#|YNomFTVe-lBc`&lT`$bUs9t5~u@9cm z5F54Pj2ucb*kYmT-5Xkf4X=|J)pJO@bmFWATiRDUd*MM3H#Brgfy*`_RDtOK@o5W! z&Vy7EI957~1=A$L3@ZY_f^^8j*Xatvu(WYY0M~S^V%Xn0PeIkf#K8|zpb{VFmM(;7L?5O{bH&AV7 zy=n{pdw1CNqB%FMK-3B%-SZrS*lD+0;K(T)%k?rBi#qQ+oRF`Wt%#0xd5tXq1P}8F zvjbXx+`4!&&G#MUbmZc3IpdW^dAhzc!gV}2woZmGT+w)~(oz5ah=ZFY6 z``f``g9FZ{4gh=vn}B$3K#$Xu?R3cUt%Z4=DSs}ocX1ZW%ZnM{jI$#`{Du~o;f_xE zIxN}6c#=87WS(G!ja;k65m9I<2`_-7&`w)tN=|e`5DsG#PT-oQ2Hi;!B1u+cgY$1L zsSE!Mq^x{fRzv~pGr+-R33ml%K#m1Y=fN(c5s(3UGPxFq%u* zn1pzDLEJU5$lR1OOo-_a*&#}1Du%O1aLeGna0?(nwd- zRSf(%fPtM!7l)|1sn(qjR{A^zh=@j9*b1_n3_`as6tMIDa7x__NMkkuz81%U#*d7Y zz}adz2?{zloI7Mk>M;!t@8`q40C&>A=wQ3YB8B4z&&gN|;g$rc!dY-;9`s${K{%YztOHOC4I99D?iNx|L`i8Ei62P|-l#A*Wd z$Y}y=ikm+7rX!x%x4=yWGm;KO6T6@=)Zzf+MAZulh5AhBg1Dg2i_wd&6{Uc6ft|HZ zcjhF(0S=oPOgbW}7vQ2wRMJ?u@Zz0om?@WHk^yPZ6TCarv(NC5cUK5!FIp^cE~6RLbd|z%foQfBM<(i!L0^;*KT@P-dIozVc_A73I?pPxX9?V zfsnA-jX+4;*_JXe`miL#kl<#cwJ^s9c*0wF|H#SY3Z+V|(dtAgKmS^_>(mVhtgKi6 zfd)a94I4FX(zIFg;0HtWJVT3zTDA&p-6pJUyY`jgh7J+#D<&*mx^U97S5k8CK7IT3 ze|W&aL4$`3O&gXzJY&SjQT!rG?pW)%yn;eo(Rg5<;t3@aU6VMu;ZP{sHG!^RgvY{7 z#K6HDICKMnbf#qS0STu`O*qMeQ2-ZC%mPXqxmm>pMPPx0Eg+~*BA>C?X@$EZeB84X zepSVzK?gL#FZ#%U(tYrL&oD6EnN#5qV;CPK$uLOZLQ@_{Oy;9?kl)t<^B`W>ftk&H z!6*k-)aF8q6ECrNUYGI6WXfU}uW;caVl8Is2w##@2(y^z-Var3hJ$}ucASRwMb;t< z3~;=jL`yDyj#^sb>Pu|JaBvr9scbk(47Gu>iWlr`b{A+K9w8~3nHKPrrwgTnfn9jO zt4wgs2M&MYpa(8zx03)(*5U$>w0xVb2r~fZvqta?6+48g53_X0?VxwK3UKn*W4^Xv zHvvLYr?3`SSQVPx_otXdb03hlilsbmaiVn-2zb zd@(F7z|zid(y<2NZV{aJwCBPF20k&vJm7G_#hYw?m5bfsVsY8fg2P~jSrt@7)(Pk* zTzkO7+fWIZi(M?*0kyH4lMn)l$r`W~=DO(t3rHB&#dwGoKqblIz6%8^EA%XfwTyZ{ z{DgfuAeRkvOgQL((xSK!Zs1vRLH|LEGmA=f6o8crw2a=EflDHmpbxWw8jO+pwn-AB z4z(~o^^Aa%i~ZncTe|yV+aS)x9x?9xJvk4;&1X4U^N3@-0;AzoOz&2@|m-`gw=R-qqp^~wVyXGOtG-yJ^+m{#(Q_cY77?`7& zD%T4b2Ch~LhQS;dlZ5PIaU?T-aX^`Yz#s#F_hFZx$$rd;%tIG#2LM{IYT%U=H-~s! z({xX?y=^%lolXNxCQpi+@npvZHhI zo#^8|BO#@Aa-CAA3G@rB6Ij1-qsGCFTfs(uM}1d)pRmDUkA&q$IwH#>pH5hjuqEN` zgky|m#Q{$|daf8PdjGH}fv;BzuN4tM% z`_h4>MN=0}-8uE_RK=XIIjM6>7Oh-#c+qbwnydMLYK2;^P-_%wC3tfDq1C}Jr5}9n zLM``W(SBMDCa82e1*R!fD!Ga!D-`gWCl=JmWlCOJDO1a}ybd|kt<&mQOTIt6P)q1375I<*$wpl5)} z2mptKnUdtNh@>6%7=H%6PZ5DtNtI zCxgO%kdc9*)Iu-)WKdkCk^70x>NENpBi&@qi% zp@YW}wOXa0z$n0>Q_3_drA7_(_R|2|42RHHxmu}^>l7M4K!jYyk%XPrYBVxGxl#ov zftN@*z7)`VB~*l}j}pEjs?-1k*m=214fFz>Lo*r;fTELWI2yGItr`H<$N_swl|t<& zQz|(|l^UhiPpi`b75(H|xx!B^hbB34fiL}Zek!1=O083CR9ZE(%JJ+c^Hce$v@*F? zs{s5fpc1)S##u%Mcu;AT@PR~~252FNTGU!%B!xn&)B=UoDwPuO0XPSw0dSlUa=B8Y zLyBt^Kp_pFAFu~?3${~hbSfouL8p=F;6FJqjh{y6=O>s^p;Ia3&<_>h2L6))8>w_E zm7i9ys2q}II)DyPgm3o%V`)_ig-WLuoC|NXs8s+UU<%)o08Z0s<#L5e0r2uMp;jxj z03#v{UY-Ef^HZu}V90?Ef|y#ZMhS2Nr^EX|@VEh>KnrDo5`ap^{5qXZt_3~@xIlwd z0~VA@D6Cd0fp0m9RVtNChperGMu6!M8BkiG(P$MwF+K)i=&4k|?!aC!IFyJIEU#1o zL+Hf#ftQ73!2B|W3dgcqqXxua4M1d_%ugnUl^Xa)E(a-q16iw6Xk`kZ8jLz6WLLw( zAdDXrFsRf(NnmtjT{$2P$R`8JLw+bDhM*jV8uS>xUklv^RtDU`NP^)AJOuOwiNM(e zcwMIhiGW-Ktc^qm76YjO{eeQzYd&6q`Td}uz?di+kb{96VCd>#0BJ#R!srmp15yOg z18fT1#26Z+6bJ|v3@N1_NEsL|d~oaFmqreZ2#m$p8y-o4!4Iq;gLa@9rA7>S7+ijJ z0v~A9G`K~pHtjn^#U^wSdNud1iu?4f5_xze_Y>!F|H*f`Kkg>;%cphdDFb)1em3)` zeY%ACmJdEaUvFHc=mq`{6Z*xYzh+U>0`L~g%MvTCC+($V(_-0=hm#ka9;i8S zwLBT>{U331Q44>KSSB-z@DCs&&;_>P{;C-~XJRk@en|P^r99#5>%iOKjy}gxdZE@Z z1%{#bjnEHVcj^c9SJ?+O#;1J}Ck6gK3jFeeYoC~_l-+ZFh1-J;TzdOBs{m&oKws|O zO&{sI2CK^m4=kA2EROIrfPaVK-$@v)`au9j9i^;%+Jo@mQIuTv&d;@Ei(*S;vi-K- zX0(J{@OICYtx;)1i~rRyg?)}2+o_xbh@b~h>OxL(;hbVOyJ`#vDEP+@VHyi?*D-b&7>k z&+guP`<0T)8@JA~aP3d>dS017d~dzm7g)IK?XqWH3OPA9{Pralp88yRRM_YtANRR^ zjfG!7vi_xcTgN|dx_z64-=BTCWywP;ek#4Ks>JpR(k3R(PdWDT;@h=ZIOF&mV_fyS zPI={a0~U@SersUdqQH0FzulaLQG|7neT45V&Oxbr-Z&g-TA5PPB;r+TT-#) zgA+4WHMD4&BD{xWj)?1f9n^!?wDEl#^=U5BXYL9c=}F0 z3%CCD-LDqj{O0#7cbqKzW_ji2jW50ay8qqBSa^2-$MV(2teK&Ar?GHa8*FopEG^X;!$@OvGATJ+7`7c`SYCf?g|#3_vXbnSME8te%{@+EWGG(=aVOl zn{IBpyOo7cEZ;b9VNvp1@7>+O!g)af6*sTyo;r7THw(WJ6=g{e-TuL!ci&^-t!1Sr z4}Q~QS%b=BEUf9$zTJC)qc3!*JjKEvFK}3wwQ99FrSdEb*F7qm`rGR>?_^hAVBw#e z)>~lKUEKXdoQ`QdAow^ua3bw>Y?WvwQiHT@<|zg=dsPL_fAEv$^K=HEScrs_|oAy z36`(6?~x5);V&#(nm(;NH1#vtFcy9;czmxvgU7u0n`|@-kNCdX`f%&O1$E?R7XE2& zQB(6}`R8rq`7GSO3NUuD4po{+i3Y?EL_s~OV{cP!*-O&r?K!m z2bMpwV8$ae7RcwYaJMmw4|EYf<|9sD?`?D^9U+=g5#HXJ23S&2q(A7GC_?x1-ZO zJhSl)#RV4bn!I7{na7sjI;ptC!hy+aHw+js;q9Ll*I2meio`drJiTSMMtPfs!>47e z54;j`QV)tSMzoEZ|Kr8Afy=un`S*{Wuf1aRt>uB=rz;!q^lM$R&gjFo7Ac#v@bnWK zCO!Iaa`|j!D;73&j#nK2vgTS7F8Cp953_SlH*RT5ZnAwObv<_nI==>$$vzRd868zH3)~ zToiw2Ph8eU&-Nj!3-J~>Q-F|XX1}nuWQYG74+T}POUz{6&|rsA|1rZq z)O%3pL45~x9j51?jsyD5J#oVAxYb((;2((`1Jh$xo5(iRlR?FCI^6RhZe|(U4>An` zW4e#Y)@#m$d-yPa;$daD++;V}O=jyvFv2^*x`H0yyFp+>Q8M5&AiM)&(yd!PNq6y|Cczmi#J}{36!*z!a{BHJ8MsnCiqSl}7e&i-K51p6fH?eJHrz#81GTZ?Y&d=I?i z;Pu879HGFMvO4Sic|86QF`f1LQhhl5@elWZT>p4j82Dp+b_T3Ez{Zgi;a!fX$mqz9 zkuj06k#UjnkqMEVq9UWBqN1ZZM#V(MM#V+NMyAr#Ky$M#K$DWbc&6Pjf#zq?HC&q z8yg!J8y}kx+bJ$GE-EfMu47zGTx?uiTzp(YT&MWR_^9~k_>S>0@v-r7@$vBq@tqPP z6QUBL6FMfuB*Z4fCB!ErBy{QoAa;W0J3-Z*pjao2`Y#>l7_frF>rb2nWSHYAc(NPc zB!jm+asb(Qi(Q0yZdQ^V6K9kJvoQ-0Qx!XN3<IdpIkOwImDVo%2t`63P$Xm(B zsqz%tl&>guD^4grRGg}P+V2y^r;5+z=ha^;zE}OAxU9dbx~cd}c}rfqO_%P2Qs=$! z!phRw3m30>b=QnnG+Mv7Zrz9fe)2ilapXcX6}^Tth|o$-r*X8uO4f0yn#)_Du+*^CvJuZ+sN zcDpid*mKJ}M6?M@e}2{K7uRms_|mT3?`dk)4hrtvqxX>Y8;*UjO53FAL!sS!e1GXm z<$;4Leaq0+VI31X_v=4ka9aBC5u-*wnwf3M$#YD6yzHs9o40M>ck<@G%BX#ymjYvWg3t5^4Snl5U8%@D8&1uE6$v$G!Tr}Hmg|L8-# zYx!&HJkVL=9~Z7_RQ^_1b6V|we*S%WH|wWMtCOPjFTc^pKUmo}C0Z){;=gd;&sp}~Hw1EpU`}#S%mf!Gq=!$~+ zOlnZOLG28`Cgn3H_f<~s6%aISaEL}ze!9K7dkcAyLD^KLn4B0=ud`Y{`DD8(-SwBY$~wvt zl~Ge$B@d{rN`TIV=^h$1d06dWfF({>7qa?k%ij<6p9VHYG#-K#OQX~J)$?y&t4Zyq zbpq?w4p0Ru>(_t4uc5q=s&Q7&^Xlmu@Nj$4q+~5_jx+`{2>zCoWu+N$lPQ`TbF|W!n7Z&mTE{qE5ZG zoxAi-$#`V+qeio3?)*(q7EY`9zT&XC^ci`qsCcJ&wJ&KofZ2J9{RLi!$yyczVXN1%JPEoU!AKPQfO-) zY<&EQm$#L@y}Mzf2SfVw9h8dv|3umA2Tq+nd-d1rcE>zt@#5AUA~tN>S#jvZr{|ta zT(UHBUdTtMj#my!9rcJ-7ZBJc;_{V3TYR@3y?Q@0KP|WT@R5@res<>jyOm&HduYlz z)s$piGnJ;^CO1ki4v^UV4qdi z2HR~djl6DsB5I(re66mlGCEz_u`Z&XLD?dpML_u+)s!VoY6U&HNFAZ>0-V;^uYB)A&f4XlG_9>JuT+!Yi!an^cJIZ8FsuY79b=Kc-+2B^xP(rj5hkr0ldo&#`gR&fovSL}t8AtWs8geW7J}M{Th<_>tL3xc)*4FhYALt{ zmIcosGM%ZqN?T~-dR_GtvlWi7Ibbai<3qRQTgYHWViVh_Pr5AlB<gvFp{`}K zW_Rpm6St3>Q}Fa3`nngdUyZz*x^nU8=~r~G#%>z&Xh*}IJuUn0el4f{?60Og@Lg>A z#PF9UY)y~U-)cBJ>e#jxgWvBmU}2-Yu0OnW_N8tW&Dyov+yC6eFQ-48^+Vd3I@{L7 zyi^eUY2N0Jwr=B3%^dsE#l$&g`wY#qviIlAJ+kSY#vwzt-Kjga->!Xku3YVM>bI|Q zLYwWWuPAUXz5DEhb<;8)&io_tjfdq=&hP$2ms7{r86HiD-SgrLJ%5TlKBoVyUMJov zo-t+X0L{=>zg{)z__MPnFZ5gV;m=o+93f3pn)JH$^Xi+m{ygyA&kX|CXD(k^|F4{* zHwKlIoNwLq)X9V?eV4o`yR+k=^kw z1W(-@^T(_fKbN*Y{dUrh`hQ$#d0Jk4>Bn`3E|&Do+w-0)j0P-SxZw(x$k4 z_ivZ3tKxO)gLzBl{xE*Qs-HWI?RYS`|BOFgd}2h@%tw+J2DdnWYux46CJlJuOwcm4X_Yx#{! zmd;w+Z(i4pTiUkm@W%O>UoE=3<%6ajLsM$)I&*gXoCdXm%VzJpy3*F{>9D|IPj5Rj z@WP`nU%jO5zvh+b20xDfVzBA?8)w3QI8vzIwsLD!n^_-?eyFpqdDLFA4{EM$?&qJq zr{M94lizy2+rUqBublq;*xTWLgMPlca7v%6)7L$HK$m&_?a&sje@SRLB7ei6-){K7 z`CGefn{8p*@WQ?ye*N-}Iyu{}H$B?hHSLSF$EFPY<(b1P=bjndb;%M-{o`dXt_pj7 zm-g3yi?tuWb9eE6#&9eYp48WoP2@_wx zv`z9WD@uBvJ5le0Wx;wTIuJTYuvnlR<^_r7GLF383tL#4WX!0YY>dY@c{ot}Q^{2fvW6@oI*N7k9IR8{m<-A7g z>>E4n9k`@0>)WM<#7f7Q<^gp8Dhwt4ZPO`ireY4GE%jk~gBD=U9}nC!`$c67^p zw?~QXp~nK3z1HaXdmpzLn6M)t{r&dKhn1~*UVfwBxXeDQ)7m|8>-?hBX^Q>J3VPjs zKiTmh@QH>)AVFiyv#f_^S~y z5wp|xKfI=E=+p(xw!hubCcpK|=2u=I`}NtQ`(2xY9_?dnR6OCSXIC#c@?euovp)E% zR@|8S4SxA6WoKzlNbaTbKaWql)^OxTSM!Ta6TdHS^YzXtAB}i9-tSn{yuc@)+5YCG zS3lHVSvI{^kGro1ZQgPAyA2^Tj*OG#9zVY4+;gYCUU+`~hTJ{9GG#qie=_@gtAf$t zZy!ES_VUJsPdfhi@vjm6#x8#5`XgnZzc6du;NrENwi)D)PI4`q_oQ-huc1dY(+Va| zzW&*#Wv^|X+U4W*rMioUKlwWP#0SRA(`NrCpLwqTgST3LP`3Jfm(kP9H0jT+y8PQ{ z+rD#0KI{M5`Um^{w6EXhh-&+qV9l3jAAjHP&f_P>L@e5*sO48HXxNy$10EZb6x-#`kH21j zvGv?xf4#7+plN^kfztP$3tU;W{l_;CHuGD(qURgW#qK+Ga8C19xi_;OKBD|6TmJ6N z3i)beVz2yqTilh_-dolM{M0kJ#s1k z=t{=v&73y<#EDE1Z0+3l#FrW0pO|$q;`5)HcBK7> z789Udn8i^5^F8 z3$v2#uRs6ZjE2`bWi8)%I)2R7)5jk=P#o4giuOBZPsxj07@ht2Zu9s*Uif(9j3rS@ z#fr)?FaNw};OjGVX&Z0fnW~stcKu9_VRhEVqq!fI-f8u2_Bs2>%-w6dZThNwYQckl zjC|q!zP%>?TxV`tL(^K_ch0NR_nsL#JZDJJ50AJ$m>sepzfFf{a&MfA?lb;OUilko zU%dEP^Ifvv#s13Lu#b+_UHe1%)yKEp**A6L;SXNbl-K{@AyfXCX5C&{`_W&MjXHfs zd7dk~Y1F~X>)&0my^Goz7y0e`w=?&YY<}k2yDOg3)Jwko#ojJ&Z4ElTHTVC-yl#x?)WBs)?gcWqou(?B~QhcDUjAS+{1? z-8!l6${FR)t{T|w zefR6_p3IuD@aYwor`St(JTYvp*gs0z+%ok!(+fXFg}i(1&&dbBI<@H1H?qIf<|lst zGofp}uCrBNempmE_tdW*ElTJ+JE+#ny-)RfW$>0(izm#{G`alw+%p+|%fD>($xB_1 zG+KM&)W(}LKe<@$|LM8`7k3BGRr-H?W7B|;79R{avGUROmZE9LW;#!W~y$L_T4G5zabuOx=;fAZ_dXNUdq z*tMs>8+p0?hi^^1KBxPai=I0^qDkaU=i5`h|0Fu{=?0zm%eRGejT<`d+tqV=?0#k0 zlfUkNuFd=JewTdq;?5000h!kqzqRk?qn!h{wEb#by)Wv&`ReZPufB1k->D$y@PJR| zH_AEpbBmYEF^{A)tM{zfkBhmqS9Ye(D@Sj2xng~1W}i2EZMdA6I%G_@zx?x5ueBZV zUHu{L>bWt zmgC~9(WFFjd&$hpo>i&FT!G{;8iv7P% z75#Vi`J?wa`-U;c7r*=Ptf@Pnj-In?c3x_|>ysbb@xm6fCAvq%rb5k{bo-j+rWv2? zxqG*6X`27x>BTd9%kMleBhG02G2E%y^Zn2t7x%oqbb~5>XUl|o{lZsmc=7g-E@dAc zP^}A}5j5(L_xgR;?Sbi6rrdhy`!5^=mp*r0>^JWE`TR#GGrRA9?ZcgcmwPNewz2gx zt@_WoQ6UE}F8cAr+m%C$%2N82{Cel(?L-!U#)|(ox7^EA$zV_<9nm6ZKF9!V8?Ay8oFAA16KVLkmPus;eEe-r;bR0TsZ_5!6{JP|| zw{MhW3mt=krePu?gN9T8_AAHu?;J82Sr|ufw>VXBzCfE5c zv*L99CN00U=Kr$fM&;Lm^HwZ5u;t42x%;!PJ@{9Txkq1{dgqPBd5%|Jee9L>H|Is} zS#@Ez`LRAHzx?WpF(+@l_Sn`7b#Lsg6TarDyiJ{ZO}hF)-t!Ay>riiJ|CbJ?9ro1gz|=f+ZP#i8*((EhDGCh1H0OSZcwLeF-he=pwe3%a*x@2npKC%$Vc zZ&KyQqwUSF=_T8bI9}JOIBoOF)w$YjO=!Eg?e*$g5L0NQF^jdb7Vz5BH0%`$OaEzt|-`@V5M;BXjC>4f`@HWY*I=d)9BW-F^|7 z@Zx;@_NdCk&-~SL@J{E#)e9rIGjqZloICQkQ1;v754NaVNO5G?#T?07@2;L{>i)Dq zw&J3=ux95Eyx4N0%eTMhS1l9KW@^hVYh(L&`HBhQFp%d4)SbjaawXbE( zq0ifHT$E7t+1bg{?^NxUQ?WreJdytWz0;w=c~lqLo!e8s}A)Ux%si`KWA^{YF@p!rOC6`r;g+rIj&eE)dKeS z{z?ljE{kcmVf4g{%KZ=T3|e`($iv3RcFj0bG-ze%+nrjR(Dq$^aYs+@_jMP?k7V3i4zM)+2<~K9+G>H zETVY$fkTHHC_ju%Hl0uo?^F5xsohd*nYVLOs+DduspZH=V;9Wan&97YGtZH@^Tpk{d%ifl z_(i89=VzquJFzyRRjPSoYQ2pWUQD)@?YFnpO>>zhxnqrqyPvGRvZKDGaIVR(5@$6o z@b%4`_0zVO4UuUtbuL^vvB$x!FXnEzSv6b3W+~QacK$^BaoNvBd|X&J@A1abT?-`| zT;WfS&2b-jbYocVgOyhAw)~fS_0F9a<9f~syAnG(NKj|i#x z$vQyRcI~Z3*SFF0H7Y88B?entK3y8UB0+y={_&cwQ{;>;CVg-JK^!9O=|Mms;%5blo-L{%UQ}o2i@jiH4Q% z*|+8j-PW(vi1G`)y0anqmhL@!+1d5Tpj@^CrE~o}xom=_Q8s#hNStco!GBiR=ROYU zxnlh6Ly67y>{ytn`7pNFsdtLPuLthATB3X1J#t^y^>rf`M0*7fj038jyx(%j;8Tx| z9&S**@0f0(IH!A|5I;g5n-;dKFeNh>stDB=ht^n-zwF{G`Dv0^QEIl z_v(`WMKNQS$lGI^-cU`tLeED@J>thQDp_u19j;d*58iZ7Xa_ynf-u>>ayb9bQ~7YDbCPm)^ITvhlU}{=knvnzya8 z_u`bB7j-iiUwUDv{v&^%FVW4`%?oP3W!$uKNsSw<3o2xr#F8z|N z^rvab_omhF)L?Xr3;B<&XkL5V$fa=$|En;2;Sz4e(0>ZA!d)R%3|k zaccF-p}mVQOnkF&k!x-9evvhP*KfUVL!BC1$J$e7Ui)_F*RdQuAGZJR*y1J6_HCIp zPJ1k}-L37m`d=m%H{}ic+*VL`b)G!#UO8&tExT{Y+z)I1O-SgRFZaH-&GJP)JUrZ% z{r$%N(sT6uo@-H&Ee_6X^7Yr&vgg8ozueXOX1xL%2hIO-?E35Ei@%=TDc0WDBX^5Y z`3vMXD1-5H|Y|DI^QqRFN*39X8@+J16#m*OL9-ZfmxQ<V95z}6juP)qPtVe9v&GouQ#BuFc@7Gnf z?l`$RJumTPd)%&0MB%z8(=8oxs;DQW+k=G9>8^3QSKcA@mxkwCk!Ho@hV1;aG>g`@ zmE2JtDRLs&m*#6TN_;ft+OJwZW+`t&i~RYjzf?Wi&?Ns+-j=Sn3jHkiv`FG#gXSJio)npSy!T$Rw%Jt->+TJctx4)vt+t1p?YpC{B>Z~R{h*Dj8&E{$3-n>1qDEBsQeAM`mTrU#Y`E>^O4Re%f!^oz-5ibYCte>uGHDhw++nZW-9MWNv zqG8U>Wp53PK43B}?;rZ;lB(?`*U(E%zdYXe@%P#muct)JpE7krsafN5zw;kIy+m93 z=KE6XbH!CI+^Oo9UG5#ZKduzUrIgc!KmE1udgoTII`4zn`Fy7S zi>oy2cT;drGEQnbFO<5D(`sd-*bO+|CzHqSH@1yS(E&gVr8|L z#C*4kCG{u1mm7R~#Ngu%XPZ(c7pqY4!}zyR^*XeCJEvRy+m}YTUoYFf z|J-BWoYBi(IbF>YJESHTT7RtA>o=VmvGWJ-wS5y~-MYD*zu5P((NTRC$GwejyydKS zew`=v-W<3w`cR9?5s96DKHA%&w{uHuyQ|L&9#}fUdi0K_P5t68j^%k`x={UB%hay3 zj@~nF*swES@%>GYC7WZmZ5y0_+lBTk)Ul5)SA9LH;nJn2Ud^`XGzTZmTi3A5rKiP9 z{9xxRo@-mNdGy9|4I*xR+co1?fqTc+e8_h&_uJOnPXDNy|9I8eb(`yl)EHEI*wU{P z>fMSwv^}KD($L8%Ct831a(m|d@y~B&Pn~e4Pkhb0KW}^YE#K~&v3|tsMn|NUK}YkJ zDebtq-J0{FHrI<9!v8AY*jCAw%YDTpNHJ|_684FmANkEQ?)IsH<;Qhu(k*HCS^EZg zcku$dK=*g~OWShv`T6X^jNyIumk(<9THiPPRF6J3!@g`itE;E{)#XOL#If&2-?fxF zy}Qv)asgXA=ZNb=7v(#XDlc)NaQ&mf(e*F3X{}d(FLS!T!lK+3GU18iV%4^VR*zZL zL>imxWj}U4=Hh98)kt=2Y{2!;wP#;FD?@`fZCpzt+Kg0RewTEl zq_pqZExlVDS#~#ZXVkFSmCBF&+A#m<0xyym)hKc^N7Ya4{LgNyqIKoVH|!qyaNVV` zDV_sErj5;Mt2Sr&_~mm_x_&f2X%)L9&#Lc3!(TP2yJEljLE`p4$+Ma!mu>Yt&q?m_ zq-A^lK2YV=f>*EA4?BN$tnAb1!RyC$2Obz$vD%Q2%hjcN&X^PbW-52ZGcI}N2*=LU zcLR6k-MZe8y}SEWDm~wH$>Q@8{ zTie~&vTgFx7nlFN@aXBaB49%_zrv_ho6dd(?@NwDl&YOxA zm~pV>v^xm}f46))akBSP%!+~i#jz33hDFaUu=B5XQzkhyP3BKJ_g8Cre(P$VJWtI& z#~v5!x=monnd98aDxp`8f7;P2rc%Km%U|bye*WS-U2W8@QIl@B7yOrZ?V4{Z?%nR2-W*4nZdcQh$ao_BGC7-T;FgMk3rq$s^bp^ZY45>N%R3TrP zhevK%?FmhLw`j$M+l#*%a;uW)>a(W{;tOitUNkpVyQAGv*X*3h&foo> zH^m%v>~O`!eY&{M6pwp2{>F!2H-=XHSo}z>3Trw&nwxWgI(pNS<>Du)g56Qii`eE* zj{I_|x1&$**k;8GC6&56s^qti%}49T9G=vAy6L}hpjpG-30Jm6vGaxZY>7I&u*~f68V9GoucLjxxkUb(+OgY* zB>s*qv#UkW+sox|W@wF)CC9vu>sx!8J=EJe`B{s}&o^DQo;(z^VN;HNtA

EFeKKJ91`UkDezI`oJ@^HN}W6#~x?@t)HWzeec!#d{=8B^(ZuA7g_ zUCNh`a=4AQ@brA2eqEm1^X&<7|Br)upNJly-ZLpkzWn;QYRAoe$KU$iWNfv!!83lh zQGb7P?3?@2w>(|HZ%r6@ym#c`t#9O)rhV;P>F|<0^nB=Pecw-r@3Hva17o9l@qI@h ze)9Frio7GfE-v(PZmY|OB2s(Xw+gyls5|~mg#CQ!ecKk)-y8OCS5Lp~TMIp#Tsf)W zfgGCl)d!m={JnVfgXl4>S3M55TYgTT)Z_I(CtiL}-O)5LbX3BzeDl@$i|u_jKDO|c z-Ol;tcDnZr>(0)ouU<+ zi*(zyVRd%t?=PZTA-5MBIwn;vI-_*RxZf+b&TJ;^?Yq+AyGdF(>hI%Cmu%_WCVqQp z!TPn|k1klX+QM&Fy}cGJo3+x|>|l<_iM>}&Ovrot@UPl^mkeU(doM8!nl$aux8{*= zLnBV?3A%LTV{o4*1HUf*=TnQehj#Uy@=1T<@EcFj;&*ElT(i08;iMXcMh&WTt6%J* z<142xSSLOE>+YrC(eo$#qu&+uy!N?0KN??4Y0+ua?Oxk<^tB0&zg@jK_FG&&qp*I# z!sCmb*g5#5>!&?Rymhv)Cq2JBs!@f%*L2x2dwEQgO*178SG1j>|NN-S_BxXrUYfr} zdd_igbHRN@ieI*>m7?Ft!qJU9Q(Yx{=b83mX_PQd&AQocK&>DNq_R=S0!8YxU%)ly=o1&rIa7DqnNkC z^>;J6#$9g^ek0y?)m`S%h}qrZNB8TW?ez549TEn1t2*=ZkjoKAj-8TkzSFnAV7p(s z@AUMWgNFIDU+#4HnBZoenh#g)EguxsbHmh8wF>oZ8RU4Cps_V8II{hElc4w4n?b$` z?0o%?P4|777yPRF#icc!&4Mca9?>aj^oWpwlbSY`Bo9g&w|2(Wgw#|0xICBJYpeX1 zLw#B+e70Uc!OLx8GqgBy=d0g zZ6|7P$n9F>zZCbYQ*HmtFWo*gZy)iW{qwYJ_v!TkZFF7@h&FxN?{B*; zr|ZD6;BxGGgyJ{yB*gA)5m|Q9sQX(+Y?ycD>8vZiUcKnGqqnGA{={6nTSZry+1Oex zUOam7;N|aB_X>~wBJB`S?Q*p<4Ilqr+G+Oitt)Qn1}9IQ@ICzQ@rKX0hj$f-+MgN{ z)8kw2t8c?gxjSqXbn$#?)llCn_3`M<`Dcy@n^No5x9$De^$)q)CpFz%Pds^|rC2R% zl<2C;k|QFo@_vc-PZmgKRemw8Mo7zTVXxaP()2!{9em))h9=Vsb!%@e?mzVLakK1C zieF3|uzH&&qFkL1`irU4c3v?Q)-)SDvGUD-WrdnODcd77siZ$*M$gaf_WW4YsrXJ= z>oXM&EF1W~rsqDp-eULqe*|w8=7Jk%d$S88o38FK{mDUf4Z|fe&C>8qMiK<9l!Bm(}Lv{-&#FML(gt+a&Opqo9OfL50$Q} z!*cvAl(=~I@4JgXytRD_ecP_>k9)UP9Vi*r$iF1K-d{t;o>T z`MS=#9Q~m4i`OTImYUVqnNX#x|Iy09@eB8@e6sIT!=E+A#vQmbbw=KUNhgYzt~vK; zlLFZ`EG*-_{-jOKjW3$)+Lov4v`q~P?#?zpw_@}AneES2^p`Q6?OFWvo(NILx2clG z@t>>IO5XEC|Da%(a)ZlmX}^G7Pm%ZUTCTty@)uJzkSF5S7i;{H)(w;tZEkhQE>9TdENX8`xYGj(0W?Fu_Ky|sn+D& zs1>E^Y|#}fd-GbZtDV2+{v{G@opyC@nb+AjdSmkUKC@=4qD_qud7Y2Si;pID-#6jZ zb=Td-*-kazeS1pLyTg-;_`|X-PMmRY@jWTMUZ(uCVukm;pLllUKc8YsUcC0NZ+%S3 zzSWBTyma?{!_9&=?_bSrIIv!Wn4^NTg=Pqw4qKMbJ>3(W?b!9HM^|rdbGdc0xWi_5 ziy2khuB$w~P6>g_CU3&Lx!oJV1s~R+)zwYiB zd*w)H`KE`X2Gz6pN9p_QnH1Bt*5lB5-iKvkCn&@3ME5xwC%D_%*6((eMk|V3T<~S| z;FE&Rz3zS&>CY>YnvOYKF{bFly@zJ44m&s58IroCBE24{#f9Xx-=bo=)O>gLaGr`a zZZ1*mXf>|T#4U~+$F6PZK6yrO<@xj55?{C0&3)54zpSJ+zQ^z5-QH_^&8-&KtMHHV zSMpZ>J^WSVv{46ppSU%@w|d&PNxx5a{#o5w?o$odhxo`1UHeQvXr0*j@bEoN{*y)2 zzwokl^wTRvFG|_@YxfoRp=(^9MXRpXT-$BM@yo+P|MKRTQrn*5eijqf%A?P3 z4pH@f@g{!z5YLbEYVo<84}v<4@3h4EvU$!DhPruvURpZsnRxTz!_T_RFQ>_N_2;yS zWhD<(se>eyK1AA2wVHJ0Lc#bsR~9rSe{?Bc^KQH1t)>*~7}@GTr&RxrCr$Eso|=u7Z@p^bqZ|g_C3XC3c&SdSk0y2EF7@r)|8i1i?slmzoabTJosatd{pv}n?k;aqcM12V zyH7B=2d5(y5~`9bA%e(%0%f?YfJo4voZ=c3BnT-ZC{eU$c>NqX1Y%OMfSiq=t#WWf zVI{FZQkQff^%Vrcpf}o&!7EP=b^d2L9m=8Nyp&&!k{2?upk#HEJ0Qn9CS*-2~tXN!Se(f z3735ksn{S0lLV=P$f@6C?uAKBhGjuTB611J3k!!w2Fu8{1qD?QG$(sLC@3e{u4;ms zWK>dLglu;uQug5uzuQPX22Larab%Q7f+c+M2U3|uO0EaoNXF-aRKYF7tA|Vw)X7tk zY-d<-buwnCpiyvHQV^uEAZJjm9OQg2Q?ch9uoA49C`AL5SYmNLIjd9 zY4)@ul|3jplI(CUlV_isLSj$LjQJS2m6l)31f}k$*6X(xIb|7SfJiob-|Zn!oms zEehJMVbM@})hox0-q$qMSyuj49)a?R=2 zwa8=om5>}47;lqBe0mn~(In1lSp<&5|DwDc`NuDT@jb+uwfAHZCna+-Z7&Ikuc#8r zwIcobr8fB|OZ$rB0^{*yKd|i$j4up`_lpe8*jJ^<82mos7Z-`M{Z^isjUO172gL7G z3*|_GjkKjq7I8&D{H`oCV;ck82gU>2cd!0uK7s86;|&AG_mTE&pG!!bZI`b`DA$;n zUjoMoj5i`Vw!LF&hGsm^`96unPsH$)2{>L{Ql#Q5x+dF6PnQ`aK8q| zTa$Kd{ekg$B;JxAge-ya$;6qp-$3GQK1J(>as~O0bg51pSP~;}yhEf-X5+mf@yyom zA@M5YAHS@Y|JmfB60qC%`g1^WXB~iZXt0g`NuDTasCfSb<(0RPKsu6xiX1= zA~Ci<1IMWv(7s;%P%gcCP5J;z;&3_^{}9;k7wN~|UjpM_Nj$UdI7j0AYd60Hwy#a% zinJ(uFcxLnbltugZB%axP4v^RX%#Pb0 z60e%ceBwx)e@*0Tpj<-j<3>CAY&+}J3TE1BLk?R%1ao(YT(Cwca~@&vSBM&kKs-@y3hfH?oT zDscHt`m@)Q_9Rh(CJ(g?<>r#-J6ob!hh`i@{{4vU*P$fNUUT_l%+}BK4#{}`6tY3c zqe(}08oy+bx6^z^yN&7d)spu7Vx*6{9_eOr1C4Jc^C_0rlH&@|NB$v&#EVk1W6V>Y z#XKCE^znUkTz9W@%c%LjfzM9|jTdN>`RCacWc}>5lz;zXueZ}kd$#}iF%J^6s^NKz4ot2l< z`(>4PyZ-mQ_R9aBH(dSS^ZNf|{aU)cSrF$zdZ+eoR z58Q8yNW03k?ceP}GrmXp&vfi_OJKYwiL>uxM|=2sQI?qfT-cW!k1}*zHJJ~a2YcjG z;%9U|q#ygNa)IklLX~)(Z80SB0v+wP|IRE@- zb6y)T&KeSD`z0`5fUKAA!!M?G*)ly2?DZ+DJboQ>Zb{#d4l*9Uc<7_5Wx841P2+gK ztN$W}G1Gc-8IL@@9n$-2NS;hPZDEf*#IfmlEy-v0d@_?bzo^I~``!*bep)&{pJ#FQ z$oDhRJf81T75u!HL>xFT$>7ZABOjYS9}j)>#ijdJr$5g-e!a=W>~+2rd7nyeMYflW z%a*|Psc5`-=RdEX{O3FgZ7e5o_TJZ##Msw?@c|^x_F-VWFNw3)-N5)iB+kB%0^>;} zE+PN;B`|I$E|(UC@uwtKAd~o863c8py#<-Cw>ycm*L8D`(2RBMBIB@m-X`%wxGWa1IJrqYv-#%=h|Bu@`F#feyk*a~!1xvtXX6CMw`LLNKj&oDerFbO{&|>Ldw&13 z*YCjf^7CiUhrqZZi};Q##^0Vr{B9Qe=Ux`^`&q8!#@~}&%zkDGjMpX`&W>GRJd(sq^E-nq)ds-h-ISQ+_`DeHADoXN?b&-T-+ml1 znKl`yephkHp#M_6DSV2|DI>GEaUVGPC{7 zUNg=1^zV6g++4HLJF?F#>~rh4-@k|RnYZ*?KK&U^J|pi>@6YDMzCYM2gW+I$p5K0c ziJ%9RZCiTu|NZ}b_Xx5@y1eOI;-+15)41_Ra9Tu6H;DfkEHC-Ps$)a?JlOW|&l~o2 zS_k?+{+MPK=YRgmEZ#JWc%Lld30cIKXA!q$5&tiX_^mABud|4M%_2_jpONYIaxrQ5 z--=Evb4l*C$L=!4vD*Sh$Lr&{w3{!Ho81kG9x{AjblMl>MkwUwH|bxKPSFEMQNL(< zhpA|KE2gxD14%0q=kF&??&~*dP(N}bpn;?wD5=#qIwqdK+a5nI--;Axj2RrQ?LTOs zHa;y#$}*BWHjzF9hmo!$$xPG2{Cd-GiX{YDvMAKQEwlvL z+aBf}ouI{Cp`G+YK3hN69p=5r!X7a1?+I<-zk$iZ#?xuim|;Kvf=d>bm&Zb@`$M~s zwF6+@iR?ob4}|UQ$WkrLE0MLxc4YA&*k6S`1m_;}n!}fM$#Tb}(jDwa&(#B<cmz(YnZnnb8TQ=+5=kM3tAKn zZALa~VBXaaS``EBMYhDkym|n%m_K1A;CL$Hpbf*2`CoVjw09!=k!_PFJa z9q@e79)?yPfmS6$>yJUJk0bvJEl+_K{fB%CT7DW@J`XLq2(7#XE#d$0 zAFw~9SD;l_p=H;g-Y_Ap}%Qn@8{N_Akf}LZcZ84-xdOG&kpS&_Ye$h?;!V9474*0+7}M($_eex z4b6$5-N>E@m>1@U_8@x;!n{Zf?L+n#fq6+WXs$T4uq3oh0xjhK&`lP$|3zh>yRWoUI3Xn9p=MKx%x3|d(oTE+iiEMR-|wV>7f37P?UO|(GS>F@pt;jB9cW>BU(Fa`;ko= zIG(C6w3t7Ege>g)QHw0p!n_#S9tZPIWZe*$Hz2Eq!@LgJG709D{E6)W^A$~l_86hL z+0fQ`(7Hru#bRjP66E#JMr7$unD^PBMZ2MO`=D(Hpe4!BZe;HXm{*^K)*)-o!n^_5 zf^0vJ+FylMUxU^mJFmmM{|2=E9<=yAw9gC8y@!?{yFS3Y{VTK^S@8qrO~0X){DqnU z&o@^oXjK_#U0G<66k1Ug+8&8q9omhoY6SDbCeXIl&@N;jva}s+uT(*+J3}kFLUY}q zt!ij>cW525tQX9ykxji}-WCI`kA>C_fc6iB_QXMJ21DzSjmW~Gu)S#*v}!c8a1yk2 zGBh_8+BFT@lmIQA39Ukw&VqR#vUM)ZTN9yO$hJi=?>9q>mqXi;H7jA>jjUOP^2n;y zFz-b6Z-9BrMriFOXvt=18?t&Q%!_tGdyp0Ug);%K4_0L5ZkX5XftKxswje9_!Mp?6 zupj2-2cS&{p(S=`3$o)7%o`3PAAxpWg!UsVFT=d)KD6~AwDb|Q0$GKuL$)FdAH(rv zPoR~^`WG;7LiWFed8-fFkF0nH^G0MRvXH-kD&YB~L^dKjkp0NA4{$sUvKiThEc^)j zE0FcbR%AD_kiT#&VEg3AT4Xb_6WNa}{S3!fAsdiw$R1?T7dV~*S&M8!wj+Cxg?>1` z3|WP&N46k4k$uSGuW)?|WG%7@*^cZ%7Jh@{%aGN`24pL;3)zn>`3~2oMAjjjksZih zWYG^ez8qPDto;ewJCOazvR|;h2HAw{K=vVvf5Y(<$QEP=vIm)6slk3el+ZH?nH9(y zWCOAV*@5gq=J*gUsbb>qk}~Ymg1d7Gwvq2bs%-){m?})*u^@EyxaJ4>Fe< ztshx|tU)#)8|nG2?DforY%K`aXQ~P9K(_EPK-Ft0(j zA{$1+_TEv@l6YthvIW^S8n*Y1ffnhZ)yRhNFfaWF+A|qiGzHo<6 z+b%;pkv+&VdTlU!ek+mv$Vz&xGHb6v_T7N%7v6*xBP(vfyc$`HY(aJ+`;aB{+GsXk zC9>r%T)*Q9v>VxnEPo2ytB{?@+Gnu60ojHud=A@7kd??LWGk{0*@vv~!u6?<&B!)n zSt{&rL{@%;c^k4HS^5dKH;S@>&z}}#8?w9rZ0{@!Eh-1?E)UIBfR-Y=r7*9m1g)_J6yj6S%IuZ)+3vd?Z_@w$CDtNke$dLWJxp}PmV0oz`PW@ zFU)I^Ilkagz|Z3fWF4}K)^1?04_ag^vIkkzAFfY{tVgyYyOD(h;COOmEwUNeiR?#~ z4us>Yk@d)CWE-*z*@rCB!u83JRmeJI6S582iR?ud4ub2KA}f)#$VOx-hkhN1`do!{N**_h&*UyAj&4re%gtjC5knO8sdq1*#4b1D2ZOC3^@mknlhO9x> zBioRj$ObDM-;8WWb|L$agE2?^m%Cj;BI4-GO=gU1-OBXdklV0nDqA^~hFaC$a}w;(_DK zk+sMMWE-**S^W@>uS2#xhIu=(8`*~}dII}Pk(J0AWam@ZU-%5#^9h>!3@t&HBkRAw z_9kQ}vIkk{hy5kU+HWv#M%Mp;c^j?R%bveZWP5OS@OjLGEXoG+8f1AW%$t#I$R1=r zvQ!AiQy{yMbvaP`FlZmLG92dh$U<5Zh3zjnvM4X?uR=B;yOE_4u)hIWQxN8L$VOxf zvK`rlY$*)Kw;?-`J;;7!Q4u(vy(qN47_<{vS{&vz$R1=-N!Z?mY(af462DA^^T@&WzwV*|{p&iJEIxsJ;3vEG` z)rWZ{vKm>^5VqGMn~|lBV0(FEXbrL*S>6P;S0Q_w!o08e%hY(};t8=J%NxE9b} zWL*@@`;nzBVP1u7Y7O(UHqdf4w7564un)8wSzwBG=&m;$Xqwjg_v<9D;P z**yd1oknQiENJO$Xf?7C*@LW`2m2e5?Z{qaQzGo|LiQqa^I>}hvKraF0JirbtCzsM z5m~ep<}E9sWvijNHPEsoXg{)SJ_PS;i&nwyks>RQ)yO(zBeDhAj_gAA zB6F+Z{KUvIWF@i&S&wW&wjw)_UC16}KeA{IoWB%VfviT>Asdm+$W~+rvK!flEL;oc zD?yectB|$G24pj`4cUq8LG~ky$nCiJh5h^?MOGlIk#)#MWDBw#*@f&y=91w2M930k zIkE~_i)=tPBioRj$R1=rvS=NgzZ6-4tVY%$8<8!@c4QZ_7nvjXI0-!dVr2dvD1mte zvI<#)tVcE>Tag{eZe$;_kk+qc&tD0$99e~|MK&Otk*&zAekNPL3+27Y9KE*)YcEEY zAj^>z$Vy}tvKm=~tVPx#>yb^!R%8dV6WN9AM)o25kwu%}^HGW{M^+-Mk+sMMWHYi2 z*@^5y_9KfnqxmB%kd??9WIeJ0*^F#Kwj(=`-N+tfKQd>9+b2erAj^>z$ZBK_vL4xh zY(};rJCWVUK4c-irw04}6(LKJ<;W^zEwTaGjBG<@_xND*@u0jHncE7Fj}TdeEJl_f zOOa*Ba%2Uv3R#P+LpCCtkgdozWGAu<*^BH$=C;B8CqkAW%aIkxN@NwX23e16LN+7Y zke$dLWIwWKJKSC=vI1F+tV1>;TafL@E@Uq_Zmr zg!7dk%aK*cT4V#V8QF&HMD`&2kwv@Ue5J?=WHqu5*@$dGwj;Zcy~vym&QFXiLslYd zkoCwWWGk`**^TT&=60j`B1@6w$Vy~2vKCp7Y(%yoTaoR^PGk?V7uk<2+=I3sS&A%2 zRwApBwa9v86S5iEf^0>0AUlyg$bMweUby{IWI3`DS&ghk)*~B{&B#_{JF*kmjqFAC zBMbMz?G+$urWHquDS&wW&wjw)_-N-&+iGm%aPg5kyxIi_4An} z$mTw9{h}CXAFU71`a5WScxDx?56^6)_1~G@*tEVo%Zn$%^{EWdx&&zPbmW=Pa%9Uq zn77b+>}>s7T92KXTL{~WXuWlomm#aMX}xvUUb700C!zJ$SzeL^^U{sbN@Ojz6}H#W z`sr+Z*>;$h(R%4DFQ@g;nbpX8WZfY+o|D!?XZ_7bVV-k9t4~7fkROo znboxZIkTA7KWA3cdgsh~WI3&8&hma_J*{uf@=_0+zw;5a=`pn8Ikber8EpBY)a0Ji67y=m6oiEO0xr&-=k>q#@ak^RUXT5p=Qwu1`hlOS8P1)|X~B(E8HMns>0jme!YMdEF`+LHmg*lETiHOSU>aCD$h=GCi)J<;Ylgz{6gp_lC}_iE zXbr9R%*K~agLyx#_ssI1*)Z>*^`2SYNb5Z_s~5xes-@5tTHl$qSJ3*-%&Pscy*wG( zhs>qGyzwNop4MMxt*?)=y@6AF`U( zQ)YRoAQ=4o9L{=B2b=GRs@5!Mv+Fv=6%p%sXklV>X@_S=a`)SI~OMti47B z^SaK^>aNh@ZqN=jw6Qz1vKO=tS>FfdowWWjo1cN!J7#ujVS5X$U(E6nTECcCKODB# zj)vAFn~}YG*xrXMo(l6`THlzh-7THScow4?AWRV>9mm*v1!Mp=m*#PD>$i9X!FKh&@YYc5f zwl;@(LrZ9`HM9^}qJnujvc3zGmAT2GASWwc%wvw_yvVzwiDkcG6~ z7Hcm@)*zc6!1-8_r4M0Vf$V$)^B!a+t>4Awqd`{EdQ~iMLAJk!297K{nEQ8?3#H*3V!T(s~%o3S>PpN9$#< z_8w$Et(U>_Vpsv5;X#EOiS8)3EV9b7GX*QVGBdcir z3D)0=>L2K#t z+H5>6z22GGN3Tz2R>#5q4rJj_n70jscGK&J*?2a3eK50;ULVY?oC^C}>GicN&(Z5^ znSI#wdRvxP)9Y=SE%f?VW*0I?uXkm6;X*hc9lbu4>nEAb$WD3M$FH>WefU>0cPD7i z-_YXj(5hHyU4LjVvS9$s%LYQbkd0cHcO%ON!MqOHIRxfK@z54zH?q?R+bd^7+vh+V zmO(p~Lrc~`n>Ina0?y~)kB5xj@18$?ye;&8^~~A>u)oj^t-23w@<4lDLQDP7+K@kg zpW@fA7D7u4LA#4U%Ouc7WEZlu6l`yx_b+GLt1l1pMkTZ#SwrtP&iXr$C2e4TX*9GA z*-P&y&c@Tm!1i1$v}z=@!2qqC4{Z!MKY`!hy0tLxr1$q@>o?y2^ZQr6y({4NOT6_f z|NQ=kw+~sr3gwZt^nO`veI8^HyOef1~LzcV?H|9OAnzp|I-Tcw}>8A=`c{s7R`Ja4FVqMf9^QWR(j{ccX+Nr3={x}cGi~jv%5iu7T-(MfkMaGUBHY$E3 zdF&hCH*#eEA(118b7_wxk)9nlH0`nP&_OX=XAh2NKfi-LgfA3=k*g6ZwqV|W5h(1)~b4+!!5hVP&L>>$UD);VC0 z`XT|1$(MNjRY{DWJL@l}=Ml3#ve82|tP literal 0 HcmV?d00001 From c234605ae3fa9a1d0957a14e1889a620a56573b6 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 15 Oct 2024 15:56:19 +0400 Subject: [PATCH 504/529] move sanitize crate to sdk dir (#3073) --- Cargo.toml | 4 ++-- {sanitize => sdk/sanitize}/Cargo.toml | 0 {sanitize => sdk/sanitize}/src/lib.rs | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename {sanitize => sdk/sanitize}/Cargo.toml (100%) rename {sanitize => sdk/sanitize}/src/lib.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 5ec30da8b2c460..cdecc0af7dcfe4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -99,7 +99,6 @@ members = [ "rpc-test", "runtime", "runtime-transaction", - "sanitize", "sdk", "sdk/account", "sdk/account-info", @@ -126,6 +125,7 @@ members = [ "sdk/program-pack", "sdk/pubkey", "sdk/rent", + "sdk/sanitize", "sdk/serde-varint", "sdk/serialize-utils", "sdk/sha256-hasher", @@ -453,7 +453,7 @@ solana-quic-client = { path = "quic-client", version = "=2.1.0" } solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.1.0" } solana-remote-wallet = { path = "remote-wallet", version = "=2.1.0", default-features = false } solana-rent = { path = "sdk/rent", version = "=2.1.0", default-features = false } -solana-sanitize = { path = "sanitize", version = "=2.1.0" } +solana-sanitize = { path = "sdk/sanitize", version = "=2.1.0" } solana-serde-varint = { path = "sdk/serde-varint", version = "=2.1.0" } solana-serialize-utils = { path = "sdk/serialize-utils", version = "=2.1.0" } solana-sha256-hasher = { path = "sdk/sha256-hasher", version = "=2.1.0" } diff --git a/sanitize/Cargo.toml b/sdk/sanitize/Cargo.toml similarity index 100% rename from sanitize/Cargo.toml rename to sdk/sanitize/Cargo.toml diff --git a/sanitize/src/lib.rs b/sdk/sanitize/src/lib.rs similarity index 100% rename from sanitize/src/lib.rs rename to sdk/sanitize/src/lib.rs From cc1873f60630569f000ff922754ec89c8afa62ff Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 15 Oct 2024 16:21:56 +0400 Subject: [PATCH 505/529] remove solana-program from solana-feature-set (#3140) * remove solana-program from solana-feature-set * fix log-analyzer deps which this PR somehow breaks * add back Slot and Epoch aliases --- Cargo.lock | 7 +- log-analyzer/Cargo.toml | 1 + log-analyzer/src/main.rs | 2 +- programs/sbf/Cargo.lock | 6 +- sdk/feature-set/Cargo.toml | 6 +- sdk/feature-set/src/lib.rs | 434 ++++++++++++++++++------------------- 6 files changed, 234 insertions(+), 222 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a610aaaca948f4..d9fc78c2b7ca74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6658,9 +6658,13 @@ name = "solana-feature-set" version = "2.1.0" dependencies = [ "lazy_static", + "solana-clock", + "solana-epoch-schedule", "solana-frozen-abi", "solana-frozen-abi-macro", - "solana-program", + "solana-hash", + "solana-pubkey", + "solana-sha256-hasher", ] [[package]] @@ -7037,6 +7041,7 @@ dependencies = [ "byte-unit", "clap 3.2.23", "serde", + "serde_derive", "serde_json", "solana-logger", "solana-version", diff --git a/log-analyzer/Cargo.toml b/log-analyzer/Cargo.toml index 21df3661ddd0ea..fff8c9c2d3c745 100644 --- a/log-analyzer/Cargo.toml +++ b/log-analyzer/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } byte-unit = { workspace = true } clap = { version = "3.1.5", features = ["cargo"] } serde = { workspace = true } +serde_derive = { workspace = true } serde_json = { workspace = true } solana-logger = { workspace = true } solana-version = { workspace = true } diff --git a/log-analyzer/src/main.rs b/log-analyzer/src/main.rs index fcedd7c0dc46cf..32ce3fbb5a27fc 100644 --- a/log-analyzer/src/main.rs +++ b/log-analyzer/src/main.rs @@ -4,7 +4,7 @@ extern crate byte_unit; use { byte_unit::Byte, clap::{crate_description, crate_name, Arg, ArgMatches, Command}, - serde::{Deserialize, Serialize}, + serde_derive::{Deserialize, Serialize}, std::{collections::HashMap, fs, ops::Sub, path::PathBuf}, }; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2b2a17b0978b2f..a6a832fbbd3621 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5299,7 +5299,11 @@ name = "solana-feature-set" version = "2.1.0" dependencies = [ "lazy_static", - "solana-program", + "solana-clock", + "solana-epoch-schedule", + "solana-hash", + "solana-pubkey", + "solana-sha256-hasher", ] [[package]] diff --git a/sdk/feature-set/Cargo.toml b/sdk/feature-set/Cargo.toml index d6f2824b08982e..7352ff37a9680f 100644 --- a/sdk/feature-set/Cargo.toml +++ b/sdk/feature-set/Cargo.toml @@ -11,13 +11,17 @@ edition = { workspace = true } [dependencies] lazy_static = { workspace = true } +solana-clock = { workspace = true } +solana-epoch-schedule = { workspace = true } solana-frozen-abi = { workspace = true, optional = true, features = [ "frozen-abi", ] } solana-frozen-abi-macro = { workspace = true, optional = true, features = [ "frozen-abi", ] } -solana-program = { workspace = true } +solana-hash = { workspace = true } +solana-pubkey = { workspace = true } +solana-sha256-hasher = { workspace = true } [features] frozen-abi = [ diff --git a/sdk/feature-set/src/lib.rs b/sdk/feature-set/src/lib.rs index 6567404a947db5..83d59749758067 100644 --- a/sdk/feature-set/src/lib.rs +++ b/sdk/feature-set/src/lib.rs @@ -12,7 +12,7 @@ //! through these steps, the PR process will facilitate a keypair holder being picked. That //! person will generate the keypair, provide pubkey for PR, and ultimately enable the feature. //! 2. Add a public module for the feature, specifying keypair pubkey as the id with -//! `solana_program::declare_id!()` within the module. +//! `solana_pubkey::declare_id!()` within the module. //! Additionally, add an entry to `FEATURE_NAMES` map. //! 3. Add desired logic to check for and switch on feature availability. //! @@ -21,854 +21,852 @@ use { lazy_static::lazy_static, - solana_program::{ - clock::Slot, - epoch_schedule::EpochSchedule, - hash::{Hash, Hasher}, - pubkey::Pubkey, - stake_history::Epoch, - }, + solana_clock::{Epoch, Slot}, + solana_epoch_schedule::EpochSchedule, + solana_hash::Hash, + solana_pubkey::Pubkey, + solana_sha256_hasher::Hasher, std::collections::{HashMap, HashSet}, }; pub mod deprecate_rewards_sysvar { - solana_program::declare_id!("GaBtBJvmS4Arjj5W1NmFcyvPjsHN38UGYDq2MDwbs9Qu"); + solana_pubkey::declare_id!("GaBtBJvmS4Arjj5W1NmFcyvPjsHN38UGYDq2MDwbs9Qu"); } pub mod pico_inflation { - solana_program::declare_id!("4RWNif6C2WCNiKVW7otP4G7dkmkHGyKQWRpuZ1pxKU5m"); + solana_pubkey::declare_id!("4RWNif6C2WCNiKVW7otP4G7dkmkHGyKQWRpuZ1pxKU5m"); } pub mod full_inflation { pub mod devnet_and_testnet { - solana_program::declare_id!("DT4n6ABDqs6w4bnfwrXT9rsprcPf6cdDga1egctaPkLC"); + solana_pubkey::declare_id!("DT4n6ABDqs6w4bnfwrXT9rsprcPf6cdDga1egctaPkLC"); } pub mod mainnet { pub mod certusone { pub mod vote { - solana_program::declare_id!("BzBBveUDymEYoYzcMWNQCx3cd4jQs7puaVFHLtsbB6fm"); + solana_pubkey::declare_id!("BzBBveUDymEYoYzcMWNQCx3cd4jQs7puaVFHLtsbB6fm"); } pub mod enable { - solana_program::declare_id!("7XRJcS5Ud5vxGB54JbK9N2vBZVwnwdBNeJW1ibRgD9gx"); + solana_pubkey::declare_id!("7XRJcS5Ud5vxGB54JbK9N2vBZVwnwdBNeJW1ibRgD9gx"); } } } } pub mod secp256k1_program_enabled { - solana_program::declare_id!("E3PHP7w8kB7np3CTQ1qQ2tW3KCtjRSXBQgW9vM2mWv2Y"); + solana_pubkey::declare_id!("E3PHP7w8kB7np3CTQ1qQ2tW3KCtjRSXBQgW9vM2mWv2Y"); } pub mod spl_token_v2_multisig_fix { - solana_program::declare_id!("E5JiFDQCwyC6QfT9REFyMpfK2mHcmv1GUDySU1Ue7TYv"); + solana_pubkey::declare_id!("E5JiFDQCwyC6QfT9REFyMpfK2mHcmv1GUDySU1Ue7TYv"); } pub mod no_overflow_rent_distribution { - solana_program::declare_id!("4kpdyrcj5jS47CZb2oJGfVxjYbsMm2Kx97gFyZrxxwXz"); + solana_pubkey::declare_id!("4kpdyrcj5jS47CZb2oJGfVxjYbsMm2Kx97gFyZrxxwXz"); } pub mod filter_stake_delegation_accounts { - solana_program::declare_id!("GE7fRxmW46K6EmCD9AMZSbnaJ2e3LfqCZzdHi9hmYAgi"); + solana_pubkey::declare_id!("GE7fRxmW46K6EmCD9AMZSbnaJ2e3LfqCZzdHi9hmYAgi"); } pub mod require_custodian_for_locked_stake_authorize { - solana_program::declare_id!("D4jsDcXaqdW8tDAWn8H4R25Cdns2YwLneujSL1zvjW6R"); + solana_pubkey::declare_id!("D4jsDcXaqdW8tDAWn8H4R25Cdns2YwLneujSL1zvjW6R"); } pub mod spl_token_v2_self_transfer_fix { - solana_program::declare_id!("BL99GYhdjjcv6ys22C9wPgn2aTVERDbPHHo4NbS3hgp7"); + solana_pubkey::declare_id!("BL99GYhdjjcv6ys22C9wPgn2aTVERDbPHHo4NbS3hgp7"); } pub mod warp_timestamp_again { - solana_program::declare_id!("GvDsGDkH5gyzwpDhxNixx8vtx1kwYHH13RiNAPw27zXb"); + solana_pubkey::declare_id!("GvDsGDkH5gyzwpDhxNixx8vtx1kwYHH13RiNAPw27zXb"); } pub mod check_init_vote_data { - solana_program::declare_id!("3ccR6QpxGYsAbWyfevEtBNGfWV4xBffxRj2tD6A9i39F"); + solana_pubkey::declare_id!("3ccR6QpxGYsAbWyfevEtBNGfWV4xBffxRj2tD6A9i39F"); } pub mod secp256k1_recover_syscall_enabled { - solana_program::declare_id!("6RvdSWHh8oh72Dp7wMTS2DBkf3fRPtChfNrAo3cZZoXJ"); + solana_pubkey::declare_id!("6RvdSWHh8oh72Dp7wMTS2DBkf3fRPtChfNrAo3cZZoXJ"); } pub mod system_transfer_zero_check { - solana_program::declare_id!("BrTR9hzw4WBGFP65AJMbpAo64DcA3U6jdPSga9fMV5cS"); + solana_pubkey::declare_id!("BrTR9hzw4WBGFP65AJMbpAo64DcA3U6jdPSga9fMV5cS"); } pub mod blake3_syscall_enabled { - solana_program::declare_id!("HTW2pSyErTj4BV6KBM9NZ9VBUJVxt7sacNWcf76wtzb3"); + solana_pubkey::declare_id!("HTW2pSyErTj4BV6KBM9NZ9VBUJVxt7sacNWcf76wtzb3"); } pub mod dedupe_config_program_signers { - solana_program::declare_id!("8kEuAshXLsgkUEdcFVLqrjCGGHVWFW99ZZpxvAzzMtBp"); + solana_pubkey::declare_id!("8kEuAshXLsgkUEdcFVLqrjCGGHVWFW99ZZpxvAzzMtBp"); } pub mod verify_tx_signatures_len { - solana_program::declare_id!("EVW9B5xD9FFK7vw1SBARwMA4s5eRo5eKJdKpsBikzKBz"); + solana_pubkey::declare_id!("EVW9B5xD9FFK7vw1SBARwMA4s5eRo5eKJdKpsBikzKBz"); } pub mod vote_stake_checked_instructions { - solana_program::declare_id!("BcWknVcgvonN8sL4HE4XFuEVgfcee5MwxWPAgP6ZV89X"); + solana_pubkey::declare_id!("BcWknVcgvonN8sL4HE4XFuEVgfcee5MwxWPAgP6ZV89X"); } pub mod rent_for_sysvars { - solana_program::declare_id!("BKCPBQQBZqggVnFso5nQ8rQ4RwwogYwjuUt9biBjxwNF"); + solana_pubkey::declare_id!("BKCPBQQBZqggVnFso5nQ8rQ4RwwogYwjuUt9biBjxwNF"); } pub mod libsecp256k1_0_5_upgrade_enabled { - solana_program::declare_id!("DhsYfRjxfnh2g7HKJYSzT79r74Afa1wbHkAgHndrA1oy"); + solana_pubkey::declare_id!("DhsYfRjxfnh2g7HKJYSzT79r74Afa1wbHkAgHndrA1oy"); } pub mod tx_wide_compute_cap { - solana_program::declare_id!("5ekBxc8itEnPv4NzGJtr8BVVQLNMQuLMNQQj7pHoLNZ9"); + solana_pubkey::declare_id!("5ekBxc8itEnPv4NzGJtr8BVVQLNMQuLMNQQj7pHoLNZ9"); } pub mod spl_token_v2_set_authority_fix { - solana_program::declare_id!("FToKNBYyiF4ky9s8WsmLBXHCht17Ek7RXaLZGHzzQhJ1"); + solana_pubkey::declare_id!("FToKNBYyiF4ky9s8WsmLBXHCht17Ek7RXaLZGHzzQhJ1"); } pub mod merge_nonce_error_into_system_error { - solana_program::declare_id!("21AWDosvp3pBamFW91KB35pNoaoZVTM7ess8nr2nt53B"); + solana_pubkey::declare_id!("21AWDosvp3pBamFW91KB35pNoaoZVTM7ess8nr2nt53B"); } pub mod disable_fees_sysvar { - solana_program::declare_id!("JAN1trEUEtZjgXYzNBYHU9DYd7GnThhXfFP7SzPXkPsG"); + solana_pubkey::declare_id!("JAN1trEUEtZjgXYzNBYHU9DYd7GnThhXfFP7SzPXkPsG"); } pub mod stake_merge_with_unmatched_credits_observed { - solana_program::declare_id!("meRgp4ArRPhD3KtCY9c5yAf2med7mBLsjKTPeVUHqBL"); + solana_pubkey::declare_id!("meRgp4ArRPhD3KtCY9c5yAf2med7mBLsjKTPeVUHqBL"); } pub mod zk_token_sdk_enabled { - solana_program::declare_id!("zk1snxsc6Fh3wsGNbbHAJNHiJoYgF29mMnTSusGx5EJ"); + solana_pubkey::declare_id!("zk1snxsc6Fh3wsGNbbHAJNHiJoYgF29mMnTSusGx5EJ"); } pub mod curve25519_syscall_enabled { - solana_program::declare_id!("7rcw5UtqgDTBBv2EcynNfYckgdAaH1MAsCjKgXMkN7Ri"); + solana_pubkey::declare_id!("7rcw5UtqgDTBBv2EcynNfYckgdAaH1MAsCjKgXMkN7Ri"); } pub mod curve25519_restrict_msm_length { - solana_program::declare_id!("eca6zf6JJRjQsYYPkBHF3N32MTzur4n2WL4QiiacPCL"); + solana_pubkey::declare_id!("eca6zf6JJRjQsYYPkBHF3N32MTzur4n2WL4QiiacPCL"); } pub mod versioned_tx_message_enabled { - solana_program::declare_id!("3KZZ6Ks1885aGBQ45fwRcPXVBCtzUvxhUTkwKMR41Tca"); + solana_pubkey::declare_id!("3KZZ6Ks1885aGBQ45fwRcPXVBCtzUvxhUTkwKMR41Tca"); } pub mod libsecp256k1_fail_on_bad_count { - solana_program::declare_id!("8aXvSuopd1PUj7UhehfXJRg6619RHp8ZvwTyyJHdUYsj"); + solana_pubkey::declare_id!("8aXvSuopd1PUj7UhehfXJRg6619RHp8ZvwTyyJHdUYsj"); } pub mod libsecp256k1_fail_on_bad_count2 { - solana_program::declare_id!("54KAoNiUERNoWWUhTWWwXgym94gzoXFVnHyQwPA18V9A"); + solana_pubkey::declare_id!("54KAoNiUERNoWWUhTWWwXgym94gzoXFVnHyQwPA18V9A"); } pub mod instructions_sysvar_owned_by_sysvar { - solana_program::declare_id!("H3kBSaKdeiUsyHmeHqjJYNc27jesXZ6zWj3zWkowQbkV"); + solana_pubkey::declare_id!("H3kBSaKdeiUsyHmeHqjJYNc27jesXZ6zWj3zWkowQbkV"); } pub mod stake_program_advance_activating_credits_observed { - solana_program::declare_id!("SAdVFw3RZvzbo6DvySbSdBnHN4gkzSTH9dSxesyKKPj"); + solana_pubkey::declare_id!("SAdVFw3RZvzbo6DvySbSdBnHN4gkzSTH9dSxesyKKPj"); } pub mod credits_auto_rewind { - solana_program::declare_id!("BUS12ciZ5gCoFafUHWW8qaFMMtwFQGVxjsDheWLdqBE2"); + solana_pubkey::declare_id!("BUS12ciZ5gCoFafUHWW8qaFMMtwFQGVxjsDheWLdqBE2"); } pub mod demote_program_write_locks { - solana_program::declare_id!("3E3jV7v9VcdJL8iYZUMax9DiDno8j7EWUVbhm9RtShj2"); + solana_pubkey::declare_id!("3E3jV7v9VcdJL8iYZUMax9DiDno8j7EWUVbhm9RtShj2"); } pub mod ed25519_program_enabled { - solana_program::declare_id!("6ppMXNYLhVd7GcsZ5uV11wQEW7spppiMVfqQv5SXhDpX"); + solana_pubkey::declare_id!("6ppMXNYLhVd7GcsZ5uV11wQEW7spppiMVfqQv5SXhDpX"); } pub mod return_data_syscall_enabled { - solana_program::declare_id!("DwScAzPUjuv65TMbDnFY7AgwmotzWy3xpEJMXM3hZFaB"); + solana_pubkey::declare_id!("DwScAzPUjuv65TMbDnFY7AgwmotzWy3xpEJMXM3hZFaB"); } pub mod reduce_required_deploy_balance { - solana_program::declare_id!("EBeznQDjcPG8491sFsKZYBi5S5jTVXMpAKNDJMQPS2kq"); + solana_pubkey::declare_id!("EBeznQDjcPG8491sFsKZYBi5S5jTVXMpAKNDJMQPS2kq"); } pub mod sol_log_data_syscall_enabled { - solana_program::declare_id!("6uaHcKPGUy4J7emLBgUTeufhJdiwhngW6a1R9B7c2ob9"); + solana_pubkey::declare_id!("6uaHcKPGUy4J7emLBgUTeufhJdiwhngW6a1R9B7c2ob9"); } pub mod stakes_remove_delegation_if_inactive { - solana_program::declare_id!("HFpdDDNQjvcXnXKec697HDDsyk6tFoWS2o8fkxuhQZpL"); + solana_pubkey::declare_id!("HFpdDDNQjvcXnXKec697HDDsyk6tFoWS2o8fkxuhQZpL"); } pub mod do_support_realloc { - solana_program::declare_id!("75m6ysz33AfLA5DDEzWM1obBrnPQRSsdVQ2nRmc8Vuu1"); + solana_pubkey::declare_id!("75m6ysz33AfLA5DDEzWM1obBrnPQRSsdVQ2nRmc8Vuu1"); } pub mod prevent_calling_precompiles_as_programs { - solana_program::declare_id!("4ApgRX3ud6p7LNMJmsuaAcZY5HWctGPr5obAsjB3A54d"); + solana_pubkey::declare_id!("4ApgRX3ud6p7LNMJmsuaAcZY5HWctGPr5obAsjB3A54d"); } pub mod optimize_epoch_boundary_updates { - solana_program::declare_id!("265hPS8k8xJ37ot82KEgjRunsUp5w4n4Q4VwwiN9i9ps"); + solana_pubkey::declare_id!("265hPS8k8xJ37ot82KEgjRunsUp5w4n4Q4VwwiN9i9ps"); } pub mod remove_native_loader { - solana_program::declare_id!("HTTgmruMYRZEntyL3EdCDdnS6e4D5wRq1FA7kQsb66qq"); + solana_pubkey::declare_id!("HTTgmruMYRZEntyL3EdCDdnS6e4D5wRq1FA7kQsb66qq"); } pub mod send_to_tpu_vote_port { - solana_program::declare_id!("C5fh68nJ7uyKAuYZg2x9sEQ5YrVf3dkW6oojNBSc3Jvo"); + solana_pubkey::declare_id!("C5fh68nJ7uyKAuYZg2x9sEQ5YrVf3dkW6oojNBSc3Jvo"); } pub mod requestable_heap_size { - solana_program::declare_id!("CCu4boMmfLuqcmfTLPHQiUo22ZdUsXjgzPAURYaWt1Bw"); + solana_pubkey::declare_id!("CCu4boMmfLuqcmfTLPHQiUo22ZdUsXjgzPAURYaWt1Bw"); } pub mod disable_fee_calculator { - solana_program::declare_id!("2jXx2yDmGysmBKfKYNgLj2DQyAQv6mMk2BPh4eSbyB4H"); + solana_pubkey::declare_id!("2jXx2yDmGysmBKfKYNgLj2DQyAQv6mMk2BPh4eSbyB4H"); } pub mod add_compute_budget_program { - solana_program::declare_id!("4d5AKtxoh93Dwm1vHXUU3iRATuMndx1c431KgT2td52r"); + solana_pubkey::declare_id!("4d5AKtxoh93Dwm1vHXUU3iRATuMndx1c431KgT2td52r"); } pub mod nonce_must_be_writable { - solana_program::declare_id!("BiCU7M5w8ZCMykVSyhZ7Q3m2SWoR2qrEQ86ERcDX77ME"); + solana_pubkey::declare_id!("BiCU7M5w8ZCMykVSyhZ7Q3m2SWoR2qrEQ86ERcDX77ME"); } pub mod spl_token_v3_3_0_release { - solana_program::declare_id!("Ftok2jhqAqxUWEiCVRrfRs9DPppWP8cgTB7NQNKL88mS"); + solana_pubkey::declare_id!("Ftok2jhqAqxUWEiCVRrfRs9DPppWP8cgTB7NQNKL88mS"); } pub mod leave_nonce_on_success { - solana_program::declare_id!("E8MkiWZNNPGU6n55jkGzyj8ghUmjCHRmDFdYYFYHxWhQ"); + solana_pubkey::declare_id!("E8MkiWZNNPGU6n55jkGzyj8ghUmjCHRmDFdYYFYHxWhQ"); } pub mod reject_empty_instruction_without_program { - solana_program::declare_id!("9kdtFSrXHQg3hKkbXkQ6trJ3Ja1xpJ22CTFSNAciEwmL"); + solana_pubkey::declare_id!("9kdtFSrXHQg3hKkbXkQ6trJ3Ja1xpJ22CTFSNAciEwmL"); } pub mod fixed_memcpy_nonoverlapping_check { - solana_program::declare_id!("36PRUK2Dz6HWYdG9SpjeAsF5F3KxnFCakA2BZMbtMhSb"); + solana_pubkey::declare_id!("36PRUK2Dz6HWYdG9SpjeAsF5F3KxnFCakA2BZMbtMhSb"); } pub mod reject_non_rent_exempt_vote_withdraws { - solana_program::declare_id!("7txXZZD6Um59YoLMF7XUNimbMjsqsWhc7g2EniiTrmp1"); + solana_pubkey::declare_id!("7txXZZD6Um59YoLMF7XUNimbMjsqsWhc7g2EniiTrmp1"); } pub mod evict_invalid_stakes_cache_entries { - solana_program::declare_id!("EMX9Q7TVFAmQ9V1CggAkhMzhXSg8ECp7fHrWQX2G1chf"); + solana_pubkey::declare_id!("EMX9Q7TVFAmQ9V1CggAkhMzhXSg8ECp7fHrWQX2G1chf"); } pub mod allow_votes_to_directly_update_vote_state { - solana_program::declare_id!("Ff8b1fBeB86q8cjq47ZhsQLgv5EkHu3G1C99zjUfAzrq"); + solana_pubkey::declare_id!("Ff8b1fBeB86q8cjq47ZhsQLgv5EkHu3G1C99zjUfAzrq"); } pub mod max_tx_account_locks { - solana_program::declare_id!("CBkDroRDqm8HwHe6ak9cguPjUomrASEkfmxEaZ5CNNxz"); + solana_pubkey::declare_id!("CBkDroRDqm8HwHe6ak9cguPjUomrASEkfmxEaZ5CNNxz"); } pub mod require_rent_exempt_accounts { - solana_program::declare_id!("BkFDxiJQWZXGTZaJQxH7wVEHkAmwCgSEVkrvswFfRJPD"); + solana_pubkey::declare_id!("BkFDxiJQWZXGTZaJQxH7wVEHkAmwCgSEVkrvswFfRJPD"); } pub mod filter_votes_outside_slot_hashes { - solana_program::declare_id!("3gtZPqvPpsbXZVCx6hceMfWxtsmrjMzmg8C7PLKSxS2d"); + solana_pubkey::declare_id!("3gtZPqvPpsbXZVCx6hceMfWxtsmrjMzmg8C7PLKSxS2d"); } pub mod update_syscall_base_costs { - solana_program::declare_id!("2h63t332mGCCsWK2nqqqHhN4U9ayyqhLVFvczznHDoTZ"); + solana_pubkey::declare_id!("2h63t332mGCCsWK2nqqqHhN4U9ayyqhLVFvczznHDoTZ"); } pub mod stake_deactivate_delinquent_instruction { - solana_program::declare_id!("437r62HoAdUb63amq3D7ENnBLDhHT2xY8eFkLJYVKK4x"); + solana_pubkey::declare_id!("437r62HoAdUb63amq3D7ENnBLDhHT2xY8eFkLJYVKK4x"); } pub mod vote_withdraw_authority_may_change_authorized_voter { - solana_program::declare_id!("AVZS3ZsN4gi6Rkx2QUibYuSJG3S6QHib7xCYhG6vGJxU"); + solana_pubkey::declare_id!("AVZS3ZsN4gi6Rkx2QUibYuSJG3S6QHib7xCYhG6vGJxU"); } pub mod spl_associated_token_account_v1_0_4 { - solana_program::declare_id!("FaTa4SpiaSNH44PGC4z8bnGVTkSRYaWvrBs3KTu8XQQq"); + solana_pubkey::declare_id!("FaTa4SpiaSNH44PGC4z8bnGVTkSRYaWvrBs3KTu8XQQq"); } pub mod reject_vote_account_close_unless_zero_credit_epoch { - solana_program::declare_id!("ALBk3EWdeAg2WAGf6GPDUf1nynyNqCdEVmgouG7rpuCj"); + solana_pubkey::declare_id!("ALBk3EWdeAg2WAGf6GPDUf1nynyNqCdEVmgouG7rpuCj"); } pub mod add_get_processed_sibling_instruction_syscall { - solana_program::declare_id!("CFK1hRCNy8JJuAAY8Pb2GjLFNdCThS2qwZNe3izzBMgn"); + solana_pubkey::declare_id!("CFK1hRCNy8JJuAAY8Pb2GjLFNdCThS2qwZNe3izzBMgn"); } pub mod bank_transaction_count_fix { - solana_program::declare_id!("Vo5siZ442SaZBKPXNocthiXysNviW4UYPwRFggmbgAp"); + solana_pubkey::declare_id!("Vo5siZ442SaZBKPXNocthiXysNviW4UYPwRFggmbgAp"); } pub mod disable_bpf_deprecated_load_instructions { - solana_program::declare_id!("3XgNukcZWf9o3HdA3fpJbm94XFc4qpvTXc8h1wxYwiPi"); + solana_pubkey::declare_id!("3XgNukcZWf9o3HdA3fpJbm94XFc4qpvTXc8h1wxYwiPi"); } pub mod disable_bpf_unresolved_symbols_at_runtime { - solana_program::declare_id!("4yuaYAj2jGMGTh1sSmi4G2eFscsDq8qjugJXZoBN6YEa"); + solana_pubkey::declare_id!("4yuaYAj2jGMGTh1sSmi4G2eFscsDq8qjugJXZoBN6YEa"); } pub mod record_instruction_in_transaction_context_push { - solana_program::declare_id!("3aJdcZqxoLpSBxgeYGjPwaYS1zzcByxUDqJkbzWAH1Zb"); + solana_pubkey::declare_id!("3aJdcZqxoLpSBxgeYGjPwaYS1zzcByxUDqJkbzWAH1Zb"); } pub mod syscall_saturated_math { - solana_program::declare_id!("HyrbKftCdJ5CrUfEti6x26Cj7rZLNe32weugk7tLcWb8"); + solana_pubkey::declare_id!("HyrbKftCdJ5CrUfEti6x26Cj7rZLNe32weugk7tLcWb8"); } pub mod check_physical_overlapping { - solana_program::declare_id!("nWBqjr3gpETbiaVj3CBJ3HFC5TMdnJDGt21hnvSTvVZ"); + solana_pubkey::declare_id!("nWBqjr3gpETbiaVj3CBJ3HFC5TMdnJDGt21hnvSTvVZ"); } pub mod limit_secp256k1_recovery_id { - solana_program::declare_id!("7g9EUwj4j7CS21Yx1wvgWLjSZeh5aPq8x9kpoPwXM8n8"); + solana_pubkey::declare_id!("7g9EUwj4j7CS21Yx1wvgWLjSZeh5aPq8x9kpoPwXM8n8"); } pub mod disable_deprecated_loader { - solana_program::declare_id!("GTUMCZ8LTNxVfxdrw7ZsDFTxXb7TutYkzJnFwinpE6dg"); + solana_pubkey::declare_id!("GTUMCZ8LTNxVfxdrw7ZsDFTxXb7TutYkzJnFwinpE6dg"); } pub mod check_slice_translation_size { - solana_program::declare_id!("GmC19j9qLn2RFk5NduX6QXaDhVpGncVVBzyM8e9WMz2F"); + solana_pubkey::declare_id!("GmC19j9qLn2RFk5NduX6QXaDhVpGncVVBzyM8e9WMz2F"); } pub mod stake_split_uses_rent_sysvar { - solana_program::declare_id!("FQnc7U4koHqWgRvFaBJjZnV8VPg6L6wWK33yJeDp4yvV"); + solana_pubkey::declare_id!("FQnc7U4koHqWgRvFaBJjZnV8VPg6L6wWK33yJeDp4yvV"); } pub mod add_get_minimum_delegation_instruction_to_stake_program { - solana_program::declare_id!("St8k9dVXP97xT6faW24YmRSYConLbhsMJA4TJTBLmMT"); + solana_pubkey::declare_id!("St8k9dVXP97xT6faW24YmRSYConLbhsMJA4TJTBLmMT"); } pub mod error_on_syscall_bpf_function_hash_collisions { - solana_program::declare_id!("8199Q2gMD2kwgfopK5qqVWuDbegLgpuFUFHCcUJQDN8b"); + solana_pubkey::declare_id!("8199Q2gMD2kwgfopK5qqVWuDbegLgpuFUFHCcUJQDN8b"); } pub mod reject_callx_r10 { - solana_program::declare_id!("3NKRSwpySNwD3TvP5pHnRmkAQRsdkXWRr1WaQh8p4PWX"); + solana_pubkey::declare_id!("3NKRSwpySNwD3TvP5pHnRmkAQRsdkXWRr1WaQh8p4PWX"); } pub mod drop_redundant_turbine_path { - solana_program::declare_id!("4Di3y24QFLt5QEUPZtbnjyfQKfm6ZMTfa6Dw1psfoMKU"); + solana_pubkey::declare_id!("4Di3y24QFLt5QEUPZtbnjyfQKfm6ZMTfa6Dw1psfoMKU"); } pub mod executables_incur_cpi_data_cost { - solana_program::declare_id!("7GUcYgq4tVtaqNCKT3dho9r4665Qp5TxCZ27Qgjx3829"); + solana_pubkey::declare_id!("7GUcYgq4tVtaqNCKT3dho9r4665Qp5TxCZ27Qgjx3829"); } pub mod fix_recent_blockhashes { - solana_program::declare_id!("6iyggb5MTcsvdcugX7bEKbHV8c6jdLbpHwkncrgLMhfo"); + solana_pubkey::declare_id!("6iyggb5MTcsvdcugX7bEKbHV8c6jdLbpHwkncrgLMhfo"); } pub mod update_rewards_from_cached_accounts { - solana_program::declare_id!("28s7i3htzhahXQKqmS2ExzbEoUypg9krwvtK2M9UWXh9"); + solana_pubkey::declare_id!("28s7i3htzhahXQKqmS2ExzbEoUypg9krwvtK2M9UWXh9"); } pub mod enable_partitioned_epoch_reward { - solana_program::declare_id!("9bn2vTJUsUcnpiZWbu2woSKtTGW3ErZC9ERv88SDqQjK"); + solana_pubkey::declare_id!("9bn2vTJUsUcnpiZWbu2woSKtTGW3ErZC9ERv88SDqQjK"); } pub mod partitioned_epoch_rewards_superfeature { - solana_program::declare_id!("PERzQrt5gBD1XEe2c9XdFWqwgHY3mr7cYWbm5V772V8"); + solana_pubkey::declare_id!("PERzQrt5gBD1XEe2c9XdFWqwgHY3mr7cYWbm5V772V8"); } pub mod spl_token_v3_4_0 { - solana_program::declare_id!("Ftok4njE8b7tDffYkC5bAbCaQv5sL6jispYrprzatUwN"); + solana_pubkey::declare_id!("Ftok4njE8b7tDffYkC5bAbCaQv5sL6jispYrprzatUwN"); } pub mod spl_associated_token_account_v1_1_0 { - solana_program::declare_id!("FaTa17gVKoqbh38HcfiQonPsAaQViyDCCSg71AubYZw8"); + solana_pubkey::declare_id!("FaTa17gVKoqbh38HcfiQonPsAaQViyDCCSg71AubYZw8"); } pub mod default_units_per_instruction { - solana_program::declare_id!("J2QdYx8crLbTVK8nur1jeLsmc3krDbfjoxoea2V1Uy5Q"); + solana_pubkey::declare_id!("J2QdYx8crLbTVK8nur1jeLsmc3krDbfjoxoea2V1Uy5Q"); } pub mod stake_allow_zero_undelegated_amount { - solana_program::declare_id!("sTKz343FM8mqtyGvYWvbLpTThw3ixRM4Xk8QvZ985mw"); + solana_pubkey::declare_id!("sTKz343FM8mqtyGvYWvbLpTThw3ixRM4Xk8QvZ985mw"); } pub mod require_static_program_ids_in_transaction { - solana_program::declare_id!("8FdwgyHFEjhAdjWfV2vfqk7wA1g9X3fQpKH7SBpEv3kC"); + solana_pubkey::declare_id!("8FdwgyHFEjhAdjWfV2vfqk7wA1g9X3fQpKH7SBpEv3kC"); } pub mod stake_raise_minimum_delegation_to_1_sol { // This is a feature-proposal *feature id*. The feature keypair address is `GQXzC7YiSNkje6FFUk6sc2p53XRvKoaZ9VMktYzUMnpL`. - solana_program::declare_id!("9onWzzvCzNC2jfhxxeqRgs5q7nFAAKpCUvkj6T6GJK9i"); + solana_pubkey::declare_id!("9onWzzvCzNC2jfhxxeqRgs5q7nFAAKpCUvkj6T6GJK9i"); } pub mod stake_minimum_delegation_for_rewards { - solana_program::declare_id!("G6ANXD6ptCSyNd9znZm7j4dEczAJCfx7Cy43oBx3rKHJ"); + solana_pubkey::declare_id!("G6ANXD6ptCSyNd9znZm7j4dEczAJCfx7Cy43oBx3rKHJ"); } pub mod add_set_compute_unit_price_ix { - solana_program::declare_id!("98std1NSHqXi9WYvFShfVepRdCoq1qvsp8fsR2XZtG8g"); + solana_pubkey::declare_id!("98std1NSHqXi9WYvFShfVepRdCoq1qvsp8fsR2XZtG8g"); } pub mod disable_deploy_of_alloc_free_syscall { - solana_program::declare_id!("79HWsX9rpnnJBPcdNURVqygpMAfxdrAirzAGAVmf92im"); + solana_pubkey::declare_id!("79HWsX9rpnnJBPcdNURVqygpMAfxdrAirzAGAVmf92im"); } pub mod include_account_index_in_rent_error { - solana_program::declare_id!("2R72wpcQ7qV7aTJWUumdn8u5wmmTyXbK7qzEy7YSAgyY"); + solana_pubkey::declare_id!("2R72wpcQ7qV7aTJWUumdn8u5wmmTyXbK7qzEy7YSAgyY"); } pub mod add_shred_type_to_shred_seed { - solana_program::declare_id!("Ds87KVeqhbv7Jw8W6avsS1mqz3Mw5J3pRTpPoDQ2QdiJ"); + solana_pubkey::declare_id!("Ds87KVeqhbv7Jw8W6avsS1mqz3Mw5J3pRTpPoDQ2QdiJ"); } pub mod warp_timestamp_with_a_vengeance { - solana_program::declare_id!("3BX6SBeEBibHaVQXywdkcgyUk6evfYZkHdztXiDtEpFS"); + solana_pubkey::declare_id!("3BX6SBeEBibHaVQXywdkcgyUk6evfYZkHdztXiDtEpFS"); } pub mod separate_nonce_from_blockhash { - solana_program::declare_id!("Gea3ZkK2N4pHuVZVxWcnAtS6UEDdyumdYt4pFcKjA3ar"); + solana_pubkey::declare_id!("Gea3ZkK2N4pHuVZVxWcnAtS6UEDdyumdYt4pFcKjA3ar"); } pub mod enable_durable_nonce { - solana_program::declare_id!("4EJQtF2pkRyawwcTVfQutzq4Sa5hRhibF6QAK1QXhtEX"); + solana_pubkey::declare_id!("4EJQtF2pkRyawwcTVfQutzq4Sa5hRhibF6QAK1QXhtEX"); } pub mod vote_state_update_credit_per_dequeue { - solana_program::declare_id!("CveezY6FDLVBToHDcvJRmtMouqzsmj4UXYh5ths5G5Uv"); + solana_pubkey::declare_id!("CveezY6FDLVBToHDcvJRmtMouqzsmj4UXYh5ths5G5Uv"); } pub mod quick_bail_on_panic { - solana_program::declare_id!("DpJREPyuMZ5nDfU6H3WTqSqUFSXAfw8u7xqmWtEwJDcP"); + solana_pubkey::declare_id!("DpJREPyuMZ5nDfU6H3WTqSqUFSXAfw8u7xqmWtEwJDcP"); } pub mod nonce_must_be_authorized { - solana_program::declare_id!("HxrEu1gXuH7iD3Puua1ohd5n4iUKJyFNtNxk9DVJkvgr"); + solana_pubkey::declare_id!("HxrEu1gXuH7iD3Puua1ohd5n4iUKJyFNtNxk9DVJkvgr"); } pub mod nonce_must_be_advanceable { - solana_program::declare_id!("3u3Er5Vc2jVcwz4xr2GJeSAXT3fAj6ADHZ4BJMZiScFd"); + solana_pubkey::declare_id!("3u3Er5Vc2jVcwz4xr2GJeSAXT3fAj6ADHZ4BJMZiScFd"); } pub mod vote_authorize_with_seed { - solana_program::declare_id!("6tRxEYKuy2L5nnv5bgn7iT28MxUbYxp5h7F3Ncf1exrT"); + solana_pubkey::declare_id!("6tRxEYKuy2L5nnv5bgn7iT28MxUbYxp5h7F3Ncf1exrT"); } pub mod preserve_rent_epoch_for_rent_exempt_accounts { - solana_program::declare_id!("HH3MUYReL2BvqqA3oEcAa7txju5GY6G4nxJ51zvsEjEZ"); + solana_pubkey::declare_id!("HH3MUYReL2BvqqA3oEcAa7txju5GY6G4nxJ51zvsEjEZ"); } pub mod enable_bpf_loader_extend_program_ix { - solana_program::declare_id!("8Zs9W7D9MpSEtUWSQdGniZk2cNmV22y6FLJwCx53asme"); + solana_pubkey::declare_id!("8Zs9W7D9MpSEtUWSQdGniZk2cNmV22y6FLJwCx53asme"); } pub mod enable_early_verification_of_account_modifications { - solana_program::declare_id!("7Vced912WrRnfjaiKRiNBcbuFw7RrnLv3E3z95Y4GTNc"); + solana_pubkey::declare_id!("7Vced912WrRnfjaiKRiNBcbuFw7RrnLv3E3z95Y4GTNc"); } pub mod skip_rent_rewrites { - solana_program::declare_id!("CGB2jM8pwZkeeiXQ66kBMyBR6Np61mggL7XUsmLjVcrw"); + solana_pubkey::declare_id!("CGB2jM8pwZkeeiXQ66kBMyBR6Np61mggL7XUsmLjVcrw"); } pub mod prevent_crediting_accounts_that_end_rent_paying { - solana_program::declare_id!("812kqX67odAp5NFwM8D2N24cku7WTm9CHUTFUXaDkWPn"); + solana_pubkey::declare_id!("812kqX67odAp5NFwM8D2N24cku7WTm9CHUTFUXaDkWPn"); } pub mod cap_bpf_program_instruction_accounts { - solana_program::declare_id!("9k5ijzTbYPtjzu8wj2ErH9v45xecHzQ1x4PMYMMxFgdM"); + solana_pubkey::declare_id!("9k5ijzTbYPtjzu8wj2ErH9v45xecHzQ1x4PMYMMxFgdM"); } pub mod loosen_cpi_size_restriction { - solana_program::declare_id!("GDH5TVdbTPUpRnXaRyQqiKUa7uZAbZ28Q2N9bhbKoMLm"); + solana_pubkey::declare_id!("GDH5TVdbTPUpRnXaRyQqiKUa7uZAbZ28Q2N9bhbKoMLm"); } pub mod use_default_units_in_fee_calculation { - solana_program::declare_id!("8sKQrMQoUHtQSUP83SPG4ta2JDjSAiWs7t5aJ9uEd6To"); + solana_pubkey::declare_id!("8sKQrMQoUHtQSUP83SPG4ta2JDjSAiWs7t5aJ9uEd6To"); } pub mod compact_vote_state_updates { - solana_program::declare_id!("86HpNqzutEZwLcPxS6EHDcMNYWk6ikhteg9un7Y2PBKE"); + solana_pubkey::declare_id!("86HpNqzutEZwLcPxS6EHDcMNYWk6ikhteg9un7Y2PBKE"); } pub mod incremental_snapshot_only_incremental_hash_calculation { - solana_program::declare_id!("25vqsfjk7Nv1prsQJmA4Xu1bN61s8LXCBGUPp8Rfy1UF"); + solana_pubkey::declare_id!("25vqsfjk7Nv1prsQJmA4Xu1bN61s8LXCBGUPp8Rfy1UF"); } pub mod disable_cpi_setting_executable_and_rent_epoch { - solana_program::declare_id!("B9cdB55u4jQsDNsdTK525yE9dmSc5Ga7YBaBrDFvEhM9"); + solana_pubkey::declare_id!("B9cdB55u4jQsDNsdTK525yE9dmSc5Ga7YBaBrDFvEhM9"); } pub mod on_load_preserve_rent_epoch_for_rent_exempt_accounts { - solana_program::declare_id!("CpkdQmspsaZZ8FVAouQTtTWZkc8eeQ7V3uj7dWz543rZ"); + solana_pubkey::declare_id!("CpkdQmspsaZZ8FVAouQTtTWZkc8eeQ7V3uj7dWz543rZ"); } pub mod account_hash_ignore_slot { - solana_program::declare_id!("SVn36yVApPLYsa8koK3qUcy14zXDnqkNYWyUh1f4oK1"); + solana_pubkey::declare_id!("SVn36yVApPLYsa8koK3qUcy14zXDnqkNYWyUh1f4oK1"); } pub mod set_exempt_rent_epoch_max { - solana_program::declare_id!("5wAGiy15X1Jb2hkHnPDCM8oB9V42VNA9ftNVFK84dEgv"); + solana_pubkey::declare_id!("5wAGiy15X1Jb2hkHnPDCM8oB9V42VNA9ftNVFK84dEgv"); } pub mod relax_authority_signer_check_for_lookup_table_creation { - solana_program::declare_id!("FKAcEvNgSY79RpqsPNUV5gDyumopH4cEHqUxyfm8b8Ap"); + solana_pubkey::declare_id!("FKAcEvNgSY79RpqsPNUV5gDyumopH4cEHqUxyfm8b8Ap"); } pub mod stop_sibling_instruction_search_at_parent { - solana_program::declare_id!("EYVpEP7uzH1CoXzbD6PubGhYmnxRXPeq3PPsm1ba3gpo"); + solana_pubkey::declare_id!("EYVpEP7uzH1CoXzbD6PubGhYmnxRXPeq3PPsm1ba3gpo"); } pub mod vote_state_update_root_fix { - solana_program::declare_id!("G74BkWBzmsByZ1kxHy44H3wjwp5hp7JbrGRuDpco22tY"); + solana_pubkey::declare_id!("G74BkWBzmsByZ1kxHy44H3wjwp5hp7JbrGRuDpco22tY"); } pub mod cap_accounts_data_allocations_per_transaction { - solana_program::declare_id!("9gxu85LYRAcZL38We8MYJ4A9AwgBBPtVBAqebMcT1241"); + solana_pubkey::declare_id!("9gxu85LYRAcZL38We8MYJ4A9AwgBBPtVBAqebMcT1241"); } pub mod epoch_accounts_hash { - solana_program::declare_id!("5GpmAKxaGsWWbPp4bNXFLJxZVvG92ctxf7jQnzTQjF3n"); + solana_pubkey::declare_id!("5GpmAKxaGsWWbPp4bNXFLJxZVvG92ctxf7jQnzTQjF3n"); } pub mod remove_deprecated_request_unit_ix { - solana_program::declare_id!("EfhYd3SafzGT472tYQDUc4dPd2xdEfKs5fwkowUgVt4W"); + solana_pubkey::declare_id!("EfhYd3SafzGT472tYQDUc4dPd2xdEfKs5fwkowUgVt4W"); } pub mod disable_rehash_for_rent_epoch { - solana_program::declare_id!("DTVTkmw3JSofd8CJVJte8PXEbxNQ2yZijvVr3pe2APPj"); + solana_pubkey::declare_id!("DTVTkmw3JSofd8CJVJte8PXEbxNQ2yZijvVr3pe2APPj"); } pub mod increase_tx_account_lock_limit { - solana_program::declare_id!("9LZdXeKGeBV6hRLdxS1rHbHoEUsKqesCC2ZAPTPKJAbK"); + solana_pubkey::declare_id!("9LZdXeKGeBV6hRLdxS1rHbHoEUsKqesCC2ZAPTPKJAbK"); } pub mod limit_max_instruction_trace_length { - solana_program::declare_id!("GQALDaC48fEhZGWRj9iL5Q889emJKcj3aCvHF7VCbbF4"); + solana_pubkey::declare_id!("GQALDaC48fEhZGWRj9iL5Q889emJKcj3aCvHF7VCbbF4"); } pub mod check_syscall_outputs_do_not_overlap { - solana_program::declare_id!("3uRVPBpyEJRo1emLCrq38eLRFGcu6uKSpUXqGvU8T7SZ"); + solana_pubkey::declare_id!("3uRVPBpyEJRo1emLCrq38eLRFGcu6uKSpUXqGvU8T7SZ"); } pub mod enable_bpf_loader_set_authority_checked_ix { - solana_program::declare_id!("5x3825XS7M2A3Ekbn5VGGkvFoAg5qrRWkTrY4bARP1GL"); + solana_pubkey::declare_id!("5x3825XS7M2A3Ekbn5VGGkvFoAg5qrRWkTrY4bARP1GL"); } pub mod enable_alt_bn128_syscall { - solana_program::declare_id!("A16q37opZdQMCbe5qJ6xpBB9usykfv8jZaMkxvZQi4GJ"); + solana_pubkey::declare_id!("A16q37opZdQMCbe5qJ6xpBB9usykfv8jZaMkxvZQi4GJ"); } pub mod simplify_alt_bn128_syscall_error_codes { - solana_program::declare_id!("JDn5q3GBeqzvUa7z67BbmVHVdE3EbUAjvFep3weR3jxX"); + solana_pubkey::declare_id!("JDn5q3GBeqzvUa7z67BbmVHVdE3EbUAjvFep3weR3jxX"); } pub mod enable_alt_bn128_compression_syscall { - solana_program::declare_id!("EJJewYSddEEtSZHiqugnvhQHiWyZKjkFDQASd7oKSagn"); + solana_pubkey::declare_id!("EJJewYSddEEtSZHiqugnvhQHiWyZKjkFDQASd7oKSagn"); } pub mod enable_program_redeployment_cooldown { - solana_program::declare_id!("J4HFT8usBxpcF63y46t1upYobJgChmKyZPm5uTBRg25Z"); + solana_pubkey::declare_id!("J4HFT8usBxpcF63y46t1upYobJgChmKyZPm5uTBRg25Z"); } pub mod commission_updates_only_allowed_in_first_half_of_epoch { - solana_program::declare_id!("noRuG2kzACwgaY7TVmLRnUNPLKNVQE1fb7X55YWBehp"); + solana_pubkey::declare_id!("noRuG2kzACwgaY7TVmLRnUNPLKNVQE1fb7X55YWBehp"); } pub mod enable_turbine_fanout_experiments { - solana_program::declare_id!("D31EFnLgdiysi84Woo3of4JMu7VmasUS3Z7j9HYXCeLY"); + solana_pubkey::declare_id!("D31EFnLgdiysi84Woo3of4JMu7VmasUS3Z7j9HYXCeLY"); } pub mod disable_turbine_fanout_experiments { - solana_program::declare_id!("Gz1aLrbeQ4Q6PTSafCZcGWZXz91yVRi7ASFzFEr1U4sa"); + solana_pubkey::declare_id!("Gz1aLrbeQ4Q6PTSafCZcGWZXz91yVRi7ASFzFEr1U4sa"); } pub mod move_serialized_len_ptr_in_cpi { - solana_program::declare_id!("74CoWuBmt3rUVUrCb2JiSTvh6nXyBWUsK4SaMj3CtE3T"); + solana_pubkey::declare_id!("74CoWuBmt3rUVUrCb2JiSTvh6nXyBWUsK4SaMj3CtE3T"); } pub mod update_hashes_per_tick { - solana_program::declare_id!("3uFHb9oKdGfgZGJK9EHaAXN4USvnQtAFC13Fh5gGFS5B"); + solana_pubkey::declare_id!("3uFHb9oKdGfgZGJK9EHaAXN4USvnQtAFC13Fh5gGFS5B"); } pub mod enable_big_mod_exp_syscall { - solana_program::declare_id!("EBq48m8irRKuE7ZnMTLvLg2UuGSqhe8s8oMqnmja1fJw"); + solana_pubkey::declare_id!("EBq48m8irRKuE7ZnMTLvLg2UuGSqhe8s8oMqnmja1fJw"); } pub mod disable_builtin_loader_ownership_chains { - solana_program::declare_id!("4UDcAfQ6EcA6bdcadkeHpkarkhZGJ7Bpq7wTAiRMjkoi"); + solana_pubkey::declare_id!("4UDcAfQ6EcA6bdcadkeHpkarkhZGJ7Bpq7wTAiRMjkoi"); } pub mod cap_transaction_accounts_data_size { - solana_program::declare_id!("DdLwVYuvDz26JohmgSbA7mjpJFgX5zP2dkp8qsF2C33V"); + solana_pubkey::declare_id!("DdLwVYuvDz26JohmgSbA7mjpJFgX5zP2dkp8qsF2C33V"); } pub mod remove_congestion_multiplier_from_fee_calculation { - solana_program::declare_id!("A8xyMHZovGXFkorFqEmVH2PKGLiBip5JD7jt4zsUWo4H"); + solana_pubkey::declare_id!("A8xyMHZovGXFkorFqEmVH2PKGLiBip5JD7jt4zsUWo4H"); } pub mod enable_request_heap_frame_ix { - solana_program::declare_id!("Hr1nUA9b7NJ6eChS26o7Vi8gYYDDwWD3YeBfzJkTbU86"); + solana_pubkey::declare_id!("Hr1nUA9b7NJ6eChS26o7Vi8gYYDDwWD3YeBfzJkTbU86"); } pub mod prevent_rent_paying_rent_recipients { - solana_program::declare_id!("Fab5oP3DmsLYCiQZXdjyqT3ukFFPrsmqhXU4WU1AWVVF"); + solana_pubkey::declare_id!("Fab5oP3DmsLYCiQZXdjyqT3ukFFPrsmqhXU4WU1AWVVF"); } pub mod delay_visibility_of_program_deployment { - solana_program::declare_id!("GmuBvtFb2aHfSfMXpuFeWZGHyDeCLPS79s48fmCWCfM5"); + solana_pubkey::declare_id!("GmuBvtFb2aHfSfMXpuFeWZGHyDeCLPS79s48fmCWCfM5"); } pub mod apply_cost_tracker_during_replay { - solana_program::declare_id!("2ry7ygxiYURULZCrypHhveanvP5tzZ4toRwVp89oCNSj"); + solana_pubkey::declare_id!("2ry7ygxiYURULZCrypHhveanvP5tzZ4toRwVp89oCNSj"); } pub mod bpf_account_data_direct_mapping { - solana_program::declare_id!("EenyoWx9UMXYKpR8mW5Jmfmy2fRjzUtM7NduYMY8bx33"); + solana_pubkey::declare_id!("EenyoWx9UMXYKpR8mW5Jmfmy2fRjzUtM7NduYMY8bx33"); } pub mod add_set_tx_loaded_accounts_data_size_instruction { - solana_program::declare_id!("G6vbf1UBok8MWb8m25ex86aoQHeKTzDKzuZADHkShqm6"); + solana_pubkey::declare_id!("G6vbf1UBok8MWb8m25ex86aoQHeKTzDKzuZADHkShqm6"); } pub mod switch_to_new_elf_parser { - solana_program::declare_id!("Cdkc8PPTeTNUPoZEfCY5AyetUrEdkZtNPMgz58nqyaHD"); + solana_pubkey::declare_id!("Cdkc8PPTeTNUPoZEfCY5AyetUrEdkZtNPMgz58nqyaHD"); } pub mod round_up_heap_size { - solana_program::declare_id!("CE2et8pqgyQMP2mQRg3CgvX8nJBKUArMu3wfiQiQKY1y"); + solana_pubkey::declare_id!("CE2et8pqgyQMP2mQRg3CgvX8nJBKUArMu3wfiQiQKY1y"); } pub mod remove_bpf_loader_incorrect_program_id { - solana_program::declare_id!("2HmTkCj9tXuPE4ueHzdD7jPeMf9JGCoZh5AsyoATiWEe"); + solana_pubkey::declare_id!("2HmTkCj9tXuPE4ueHzdD7jPeMf9JGCoZh5AsyoATiWEe"); } pub mod include_loaded_accounts_data_size_in_fee_calculation { - solana_program::declare_id!("EaQpmC6GtRssaZ3PCUM5YksGqUdMLeZ46BQXYtHYakDS"); + solana_pubkey::declare_id!("EaQpmC6GtRssaZ3PCUM5YksGqUdMLeZ46BQXYtHYakDS"); } pub mod native_programs_consume_cu { - solana_program::declare_id!("8pgXCMNXC8qyEFypuwpXyRxLXZdpM4Qo72gJ6k87A6wL"); + solana_pubkey::declare_id!("8pgXCMNXC8qyEFypuwpXyRxLXZdpM4Qo72gJ6k87A6wL"); } pub mod simplify_writable_program_account_check { - solana_program::declare_id!("5ZCcFAzJ1zsFKe1KSZa9K92jhx7gkcKj97ci2DBo1vwj"); + solana_pubkey::declare_id!("5ZCcFAzJ1zsFKe1KSZa9K92jhx7gkcKj97ci2DBo1vwj"); } pub mod stop_truncating_strings_in_syscalls { - solana_program::declare_id!("16FMCmgLzCNNz6eTwGanbyN2ZxvTBSLuQ6DZhgeMshg"); + solana_pubkey::declare_id!("16FMCmgLzCNNz6eTwGanbyN2ZxvTBSLuQ6DZhgeMshg"); } pub mod clean_up_delegation_errors { - solana_program::declare_id!("Bj2jmUsM2iRhfdLLDSTkhM5UQRQvQHm57HSmPibPtEyu"); + solana_pubkey::declare_id!("Bj2jmUsM2iRhfdLLDSTkhM5UQRQvQHm57HSmPibPtEyu"); } pub mod vote_state_add_vote_latency { - solana_program::declare_id!("7axKe5BTYBDD87ftzWbk5DfzWMGyRvqmWTduuo22Yaqy"); + solana_pubkey::declare_id!("7axKe5BTYBDD87ftzWbk5DfzWMGyRvqmWTduuo22Yaqy"); } pub mod checked_arithmetic_in_fee_validation { - solana_program::declare_id!("5Pecy6ie6XGm22pc9d4P9W5c31BugcFBuy6hsP2zkETv"); + solana_pubkey::declare_id!("5Pecy6ie6XGm22pc9d4P9W5c31BugcFBuy6hsP2zkETv"); } pub mod last_restart_slot_sysvar { - solana_program::declare_id!("HooKD5NC9QNxk25QuzCssB8ecrEzGt6eXEPBUxWp1LaR"); + solana_pubkey::declare_id!("HooKD5NC9QNxk25QuzCssB8ecrEzGt6eXEPBUxWp1LaR"); } pub mod reduce_stake_warmup_cooldown { - solana_program::declare_id!("GwtDQBghCTBgmX2cpEGNPxTEBUTQRaDMGTr5qychdGMj"); + solana_pubkey::declare_id!("GwtDQBghCTBgmX2cpEGNPxTEBUTQRaDMGTr5qychdGMj"); } mod revise_turbine_epoch_stakes { - solana_program::declare_id!("BTWmtJC8U5ZLMbBUUA1k6As62sYjPEjAiNAT55xYGdJU"); + solana_pubkey::declare_id!("BTWmtJC8U5ZLMbBUUA1k6As62sYjPEjAiNAT55xYGdJU"); } pub mod enable_poseidon_syscall { - solana_program::declare_id!("FL9RsQA6TVUoh5xJQ9d936RHSebA1NLQqe3Zv9sXZRpr"); + solana_pubkey::declare_id!("FL9RsQA6TVUoh5xJQ9d936RHSebA1NLQqe3Zv9sXZRpr"); } pub mod timely_vote_credits { - solana_program::declare_id!("tvcF6b1TRz353zKuhBjinZkKzjmihXmBAHJdjNYw1sQ"); + solana_pubkey::declare_id!("tvcF6b1TRz353zKuhBjinZkKzjmihXmBAHJdjNYw1sQ"); } pub mod remaining_compute_units_syscall_enabled { - solana_program::declare_id!("5TuppMutoyzhUSfuYdhgzD47F92GL1g89KpCZQKqedxP"); + solana_pubkey::declare_id!("5TuppMutoyzhUSfuYdhgzD47F92GL1g89KpCZQKqedxP"); } pub mod enable_program_runtime_v2_and_loader_v4 { - solana_program::declare_id!("8oBxsYqnCvUTGzgEpxPcnVf7MLbWWPYddE33PftFeBBd"); + solana_pubkey::declare_id!("8oBxsYqnCvUTGzgEpxPcnVf7MLbWWPYddE33PftFeBBd"); } pub mod require_rent_exempt_split_destination { - solana_program::declare_id!("D2aip4BBr8NPWtU9vLrwrBvbuaQ8w1zV38zFLxx4pfBV"); + solana_pubkey::declare_id!("D2aip4BBr8NPWtU9vLrwrBvbuaQ8w1zV38zFLxx4pfBV"); } pub mod better_error_codes_for_tx_lamport_check { - solana_program::declare_id!("Ffswd3egL3tccB6Rv3XY6oqfdzn913vUcjCSnpvCKpfx"); + solana_pubkey::declare_id!("Ffswd3egL3tccB6Rv3XY6oqfdzn913vUcjCSnpvCKpfx"); } pub mod update_hashes_per_tick2 { - solana_program::declare_id!("EWme9uFqfy1ikK1jhJs8fM5hxWnK336QJpbscNtizkTU"); + solana_pubkey::declare_id!("EWme9uFqfy1ikK1jhJs8fM5hxWnK336QJpbscNtizkTU"); } pub mod update_hashes_per_tick3 { - solana_program::declare_id!("8C8MCtsab5SsfammbzvYz65HHauuUYdbY2DZ4sznH6h5"); + solana_pubkey::declare_id!("8C8MCtsab5SsfammbzvYz65HHauuUYdbY2DZ4sznH6h5"); } pub mod update_hashes_per_tick4 { - solana_program::declare_id!("8We4E7DPwF2WfAN8tRTtWQNhi98B99Qpuj7JoZ3Aikgg"); + solana_pubkey::declare_id!("8We4E7DPwF2WfAN8tRTtWQNhi98B99Qpuj7JoZ3Aikgg"); } pub mod update_hashes_per_tick5 { - solana_program::declare_id!("BsKLKAn1WM4HVhPRDsjosmqSg2J8Tq5xP2s2daDS6Ni4"); + solana_pubkey::declare_id!("BsKLKAn1WM4HVhPRDsjosmqSg2J8Tq5xP2s2daDS6Ni4"); } pub mod update_hashes_per_tick6 { - solana_program::declare_id!("FKu1qYwLQSiehz644H6Si65U5ZQ2cp9GxsyFUfYcuADv"); + solana_pubkey::declare_id!("FKu1qYwLQSiehz644H6Si65U5ZQ2cp9GxsyFUfYcuADv"); } pub mod validate_fee_collector_account { - solana_program::declare_id!("prpFrMtgNmzaNzkPJg9o753fVvbHKqNrNTm76foJ2wm"); + solana_pubkey::declare_id!("prpFrMtgNmzaNzkPJg9o753fVvbHKqNrNTm76foJ2wm"); } pub mod disable_rent_fees_collection { - solana_program::declare_id!("CJzY83ggJHqPGDq8VisV3U91jDJLuEaALZooBrXtnnLU"); + solana_pubkey::declare_id!("CJzY83ggJHqPGDq8VisV3U91jDJLuEaALZooBrXtnnLU"); } pub mod enable_zk_transfer_with_fee { - solana_program::declare_id!("zkNLP7EQALfC1TYeB3biDU7akDckj8iPkvh9y2Mt2K3"); + solana_pubkey::declare_id!("zkNLP7EQALfC1TYeB3biDU7akDckj8iPkvh9y2Mt2K3"); } pub mod drop_legacy_shreds { - solana_program::declare_id!("GV49KKQdBNaiv2pgqhS2Dy3GWYJGXMTVYbYkdk91orRy"); + solana_pubkey::declare_id!("GV49KKQdBNaiv2pgqhS2Dy3GWYJGXMTVYbYkdk91orRy"); } pub mod allow_commission_decrease_at_any_time { - solana_program::declare_id!("decoMktMcnmiq6t3u7g5BfgcQu91nKZr6RvMYf9z1Jb"); + solana_pubkey::declare_id!("decoMktMcnmiq6t3u7g5BfgcQu91nKZr6RvMYf9z1Jb"); } pub mod add_new_reserved_account_keys { - solana_program::declare_id!("8U4skmMVnF6k2kMvrWbQuRUT3qQSiTYpSjqmhmgfthZu"); + solana_pubkey::declare_id!("8U4skmMVnF6k2kMvrWbQuRUT3qQSiTYpSjqmhmgfthZu"); } pub mod consume_blockstore_duplicate_proofs { - solana_program::declare_id!("6YsBCejwK96GZCkJ6mkZ4b68oP63z2PLoQmWjC7ggTqZ"); + solana_pubkey::declare_id!("6YsBCejwK96GZCkJ6mkZ4b68oP63z2PLoQmWjC7ggTqZ"); } pub mod index_erasure_conflict_duplicate_proofs { - solana_program::declare_id!("dupPajaLy2SSn8ko42aZz4mHANDNrLe8Nw8VQgFecLa"); + solana_pubkey::declare_id!("dupPajaLy2SSn8ko42aZz4mHANDNrLe8Nw8VQgFecLa"); } pub mod merkle_conflict_duplicate_proofs { - solana_program::declare_id!("mrkPjRg79B2oK2ZLgd7S3AfEJaX9B6gAF3H9aEykRUS"); + solana_pubkey::declare_id!("mrkPjRg79B2oK2ZLgd7S3AfEJaX9B6gAF3H9aEykRUS"); } pub mod disable_bpf_loader_instructions { - solana_program::declare_id!("7WeS1vfPRgeeoXArLh7879YcB9mgE9ktjPDtajXeWfXn"); + solana_pubkey::declare_id!("7WeS1vfPRgeeoXArLh7879YcB9mgE9ktjPDtajXeWfXn"); } pub mod enable_zk_proof_from_account { - solana_program::declare_id!("zkiTNuzBKxrCLMKehzuQeKZyLtX2yvFcEKMML8nExU8"); + solana_pubkey::declare_id!("zkiTNuzBKxrCLMKehzuQeKZyLtX2yvFcEKMML8nExU8"); } pub mod cost_model_requested_write_lock_cost { - solana_program::declare_id!("wLckV1a64ngtcKPRGU4S4grVTestXjmNjxBjaKZrAcn"); + solana_pubkey::declare_id!("wLckV1a64ngtcKPRGU4S4grVTestXjmNjxBjaKZrAcn"); } pub mod enable_gossip_duplicate_proof_ingestion { - solana_program::declare_id!("FNKCMBzYUdjhHyPdsKG2LSmdzH8TCHXn3ytj8RNBS4nG"); + solana_pubkey::declare_id!("FNKCMBzYUdjhHyPdsKG2LSmdzH8TCHXn3ytj8RNBS4nG"); } pub mod chained_merkle_conflict_duplicate_proofs { - solana_program::declare_id!("chaie9S2zVfuxJKNRGkyTDokLwWxx6kD2ZLsqQHaDD8"); + solana_pubkey::declare_id!("chaie9S2zVfuxJKNRGkyTDokLwWxx6kD2ZLsqQHaDD8"); } pub mod enable_chained_merkle_shreds { - solana_program::declare_id!("7uZBkJXJ1HkuP6R3MJfZs7mLwymBcDbKdqbF51ZWLier"); + solana_pubkey::declare_id!("7uZBkJXJ1HkuP6R3MJfZs7mLwymBcDbKdqbF51ZWLier"); } pub mod remove_rounding_in_fee_calculation { - solana_program::declare_id!("BtVN7YjDzNE6Dk7kTT7YTDgMNUZTNgiSJgsdzAeTg2jF"); + solana_pubkey::declare_id!("BtVN7YjDzNE6Dk7kTT7YTDgMNUZTNgiSJgsdzAeTg2jF"); } pub mod enable_tower_sync_ix { - solana_program::declare_id!("tSynMCspg4xFiCj1v3TDb4c7crMR5tSBhLz4sF7rrNA"); + solana_pubkey::declare_id!("tSynMCspg4xFiCj1v3TDb4c7crMR5tSBhLz4sF7rrNA"); } pub mod deprecate_unused_legacy_vote_plumbing { - solana_program::declare_id!("6Uf8S75PVh91MYgPQSHnjRAPQq6an5BDv9vomrCwDqLe"); + solana_pubkey::declare_id!("6Uf8S75PVh91MYgPQSHnjRAPQq6an5BDv9vomrCwDqLe"); } pub mod reward_full_priority_fee { - solana_program::declare_id!("3opE3EzAKnUftUDURkzMgwpNgimBAypW1mNDYH4x4Zg7"); + solana_pubkey::declare_id!("3opE3EzAKnUftUDURkzMgwpNgimBAypW1mNDYH4x4Zg7"); } pub mod get_sysvar_syscall_enabled { - solana_program::declare_id!("CLCoTADvV64PSrnR6QXty6Fwrt9Xc6EdxSJE4wLRePjq"); + solana_pubkey::declare_id!("CLCoTADvV64PSrnR6QXty6Fwrt9Xc6EdxSJE4wLRePjq"); } pub mod abort_on_invalid_curve { - solana_program::declare_id!("FuS3FPfJDKSNot99ECLXtp3rueq36hMNStJkPJwWodLh"); + solana_pubkey::declare_id!("FuS3FPfJDKSNot99ECLXtp3rueq36hMNStJkPJwWodLh"); } pub mod migrate_feature_gate_program_to_core_bpf { - solana_program::declare_id!("4eohviozzEeivk1y9UbrnekbAFMDQyJz5JjA9Y6gyvky"); + solana_pubkey::declare_id!("4eohviozzEeivk1y9UbrnekbAFMDQyJz5JjA9Y6gyvky"); } pub mod vote_only_full_fec_sets { - solana_program::declare_id!("ffecLRhhakKSGhMuc6Fz2Lnfq4uT9q3iu9ZsNaPLxPc"); + solana_pubkey::declare_id!("ffecLRhhakKSGhMuc6Fz2Lnfq4uT9q3iu9ZsNaPLxPc"); } pub mod migrate_config_program_to_core_bpf { - solana_program::declare_id!("2Fr57nzzkLYXW695UdDxDeR5fhnZWSttZeZYemrnpGFV"); + solana_pubkey::declare_id!("2Fr57nzzkLYXW695UdDxDeR5fhnZWSttZeZYemrnpGFV"); } pub mod enable_get_epoch_stake_syscall { - solana_program::declare_id!("7mScTYkJXsbdrcwTQRs7oeCSXoJm4WjzBsRyf8bCU3Np"); + solana_pubkey::declare_id!("7mScTYkJXsbdrcwTQRs7oeCSXoJm4WjzBsRyf8bCU3Np"); } pub mod migrate_address_lookup_table_program_to_core_bpf { - solana_program::declare_id!("C97eKZygrkU4JxJsZdjgbUY7iQR7rKTr4NyDWo2E5pRm"); + solana_pubkey::declare_id!("C97eKZygrkU4JxJsZdjgbUY7iQR7rKTr4NyDWo2E5pRm"); } pub mod zk_elgamal_proof_program_enabled { - solana_program::declare_id!("zkhiy5oLowR7HY4zogXjCjeMXyruLqBwSWH21qcFtnv"); + solana_pubkey::declare_id!("zkhiy5oLowR7HY4zogXjCjeMXyruLqBwSWH21qcFtnv"); } pub mod verify_retransmitter_signature { - solana_program::declare_id!("BZ5g4hRbu5hLQQBdPyo2z9icGyJ8Khiyj3QS6dhWijTb"); + solana_pubkey::declare_id!("BZ5g4hRbu5hLQQBdPyo2z9icGyJ8Khiyj3QS6dhWijTb"); } pub mod move_stake_and_move_lamports_ixs { - solana_program::declare_id!("7bTK6Jis8Xpfrs8ZoUfiMDPazTcdPcTWheZFJTA5Z6X4"); + solana_pubkey::declare_id!("7bTK6Jis8Xpfrs8ZoUfiMDPazTcdPcTWheZFJTA5Z6X4"); } pub mod ed25519_precompile_verify_strict { - solana_program::declare_id!("ed9tNscbWLYBooxWA7FE2B5KHWs8A6sxfY8EzezEcoo"); + solana_pubkey::declare_id!("ed9tNscbWLYBooxWA7FE2B5KHWs8A6sxfY8EzezEcoo"); } pub mod vote_only_retransmitter_signed_fec_sets { - solana_program::declare_id!("RfEcA95xnhuwooVAhUUksEJLZBF7xKCLuqrJoqk4Zph"); + solana_pubkey::declare_id!("RfEcA95xnhuwooVAhUUksEJLZBF7xKCLuqrJoqk4Zph"); } pub mod move_precompile_verification_to_svm { - solana_program::declare_id!("9ypxGLzkMxi89eDerRKXWDXe44UY2z4hBig4mDhNq5Dp"); + solana_pubkey::declare_id!("9ypxGLzkMxi89eDerRKXWDXe44UY2z4hBig4mDhNq5Dp"); } pub mod enable_transaction_loading_failure_fees { - solana_program::declare_id!("PaymEPK2oqwT9TXAVfadjztH2H6KfLEB9Hhd5Q5frvP"); + solana_pubkey::declare_id!("PaymEPK2oqwT9TXAVfadjztH2H6KfLEB9Hhd5Q5frvP"); } pub mod enable_turbine_extended_fanout_experiments { - solana_program::declare_id!("BZn14Liea52wtBwrXUxTv6vojuTTmfc7XGEDTXrvMD7b"); + solana_pubkey::declare_id!("BZn14Liea52wtBwrXUxTv6vojuTTmfc7XGEDTXrvMD7b"); } pub mod deprecate_legacy_vote_ixs { - solana_program::declare_id!("depVvnQ2UysGrhwdiwU42tCadZL8GcBb1i2GYhMopQv"); + solana_pubkey::declare_id!("depVvnQ2UysGrhwdiwU42tCadZL8GcBb1i2GYhMopQv"); } pub mod disable_sbpf_v1_execution { - solana_program::declare_id!("TestFeature11111111111111111111111111111111"); + solana_pubkey::declare_id!("TestFeature11111111111111111111111111111111"); } pub mod reenable_sbpf_v1_execution { - solana_program::declare_id!("TestFeature21111111111111111111111111111111"); + solana_pubkey::declare_id!("TestFeature21111111111111111111111111111111"); } lazy_static! { @@ -1173,7 +1171,7 @@ impl FeatureSet { } /// Activate a feature - pub fn activate(&mut self, feature_id: &Pubkey, slot: u64) { + pub fn activate(&mut self, feature_id: &Pubkey, slot: Slot) { self.inactive.remove(feature_id); self.active.insert(*feature_id, slot); } From d08ef44fd8d04f7b293304f96317860e83b8e32a Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 15 Oct 2024 16:27:09 +0400 Subject: [PATCH 506/529] Move `solana_sdk::pubkey` functions to solana-pubkey (#2980) * move functions from solana_sdk::pubkey to solana_pubkey * fix test * remove serde_json from write_pubkey_file * remove serde_json * fix import * put new_rand behind a separate feature * lint * fix imports * fmt * deprecate sdk::pubkey::{read_pubkey_file, write_pubkey_file} and duplicate in solana-keygen * lint --- Cargo.lock | 1 + keygen/Cargo.toml | 1 + keygen/src/keygen.rs | 42 +++++++++++++++++++++++++++++++++++++---- programs/sbf/Cargo.lock | 1 + sdk/Cargo.toml | 3 ++- sdk/pubkey/Cargo.toml | 5 +++-- sdk/pubkey/src/lib.rs | 6 ++++++ sdk/src/pubkey.rs | 35 ++++++++++------------------------ 8 files changed, 62 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d9fc78c2b7ca74..7492ea20dc02f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6876,6 +6876,7 @@ dependencies = [ "clap 3.2.23", "dirs-next", "num_cpus", + "serde_json", "solana-clap-v3-utils", "solana-cli-config", "solana-derivation-path", diff --git a/keygen/Cargo.toml b/keygen/Cargo.toml index 4dd74305996883..93c5e4f2e4f064 100644 --- a/keygen/Cargo.toml +++ b/keygen/Cargo.toml @@ -14,6 +14,7 @@ bs58 = { workspace = true } clap = { version = "3.1.5", features = ["cargo"] } dirs-next = { workspace = true } num_cpus = { workspace = true } +serde_json = { workspace = true } solana-clap-v3-utils = { workspace = true } solana-cli-config = { workspace = true } solana-derivation-path = { workspace = true } diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index cbccb33e9a9095..6f0b9ff93adc18 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -30,7 +30,7 @@ use { solana_sdk::{ instruction::{AccountMeta, Instruction}, message::Message, - pubkey::{write_pubkey_file, Pubkey}, + pubkey::Pubkey, signature::{ keypair_from_seed, keypair_from_seed_and_derivation_path, write_keypair, write_keypair_file, Keypair, Signer, @@ -424,6 +424,21 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { ) } +fn write_pubkey_file(outfile: &str, pubkey: Pubkey) -> Result<(), Box> { + use std::io::Write; + + let printable = format!("{pubkey}"); + let serialized = serde_json::to_string(&printable)?; + + if let Some(outdir) = std::path::Path::new(&outfile).parent() { + std::fs::create_dir_all(outdir)?; + } + let mut f = std::fs::File::create(outfile)?; + f.write_all(&serialized.into_bytes())?; + + Ok(()) +} + fn main() -> Result<(), Box> { let default_num_threads = num_cpus::get().to_string(); let matches = app(&default_num_threads, solana_version::version!()) @@ -768,6 +783,14 @@ mod tests { tempfile::{tempdir, TempDir}, }; + fn read_pubkey_file(infile: &str) -> Result> { + let f = std::fs::File::open(infile)?; + let printable: String = serde_json::from_reader(f)?; + + use std::str::FromStr; + Ok(Pubkey::from_str(&printable)?) + } + fn process_test_command(args: &[&str]) -> Result<(), Box> { let default_num_threads = num_cpus::get().to_string(); let solana_version = solana_version::version!(); @@ -919,7 +942,7 @@ mod tests { ]) .unwrap(); - let result_pubkey = solana_sdk::pubkey::read_pubkey_file(&outfile_path).unwrap(); + let result_pubkey = read_pubkey_file(&outfile_path).unwrap(); assert_eq!(result_pubkey, expected_pubkey); } @@ -938,7 +961,7 @@ mod tests { ]) .unwrap(); - let result_pubkey = solana_sdk::pubkey::read_pubkey_file(&outfile_path).unwrap(); + let result_pubkey = read_pubkey_file(&outfile_path).unwrap(); assert_eq!(result_pubkey, expected_pubkey); } @@ -962,7 +985,7 @@ mod tests { ]) .unwrap(); - let result_pubkey = solana_sdk::pubkey::read_pubkey_file(&outfile_path).unwrap(); + let result_pubkey = read_pubkey_file(&outfile_path).unwrap(); assert_eq!(result_pubkey, expected_pubkey); } @@ -1129,4 +1152,15 @@ mod tests { ]) .unwrap(); } + + #[test] + fn test_read_write_pubkey() -> Result<(), std::boxed::Box> { + let filename = "test_pubkey.json"; + let pubkey = solana_sdk::pubkey::new_rand(); + write_pubkey_file(filename, pubkey)?; + let read = read_pubkey_file(filename)?; + assert_eq!(read, pubkey); + std::fs::remove_file(filename)?; + Ok(()) + } } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a6a832fbbd3621..e16c7d4a3a926c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5828,6 +5828,7 @@ dependencies = [ "getrandom 0.2.10", "js-sys", "num-traits", + "rand 0.8.5", "serde", "serde_derive", "solana-atomic-u64", diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 10eb2b0f8a6c8a..938e21a2aed8c0 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -33,6 +33,7 @@ full = [ "libsecp256k1", "sha3", "digest", + "solana-pubkey/rand", ] borsh = ["dep:borsh", "solana-program/borsh", "solana-secp256k1-recover/borsh"] dev-context-only-utils = ["qualifier_attr", "solana-account/dev-context-only-utils"] @@ -97,7 +98,7 @@ solana-frozen-abi-macro = { workspace = true, optional = true, features = [ ] } solana-program = { workspace = true } solana-program-memory = { workspace = true } -solana-pubkey = { workspace = true } +solana-pubkey = { workspace = true, default-features = false, features = ["std"] } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } diff --git a/sdk/pubkey/Cargo.toml b/sdk/pubkey/Cargo.toml index 50d7a5ed6bdef7..cdafef8a939f15 100644 --- a/sdk/pubkey/Cargo.toml +++ b/sdk/pubkey/Cargo.toml @@ -18,6 +18,7 @@ bytemuck = { workspace = true, optional = true } bytemuck_derive = { workspace = true, optional = true } five8_const = { workspace = true } num-traits = { workspace = true } +rand = { workspace = true, optional = true } serde = { workspace = true, optional = true } serde_derive = { workspace = true, optional = true } solana-atomic-u64 = { workspace = true } @@ -47,7 +48,6 @@ wasm-bindgen = { workspace = true } anyhow = { workspace = true } arbitrary = { workspace = true, features = ["derive"] } bs58 = { workspace = true, features = ["alloc"] } -rand = { workspace = true } # circular dev deps need to be path deps for `cargo publish` to be happy, # and for now the doc tests need solana-program solana-program = { path = "../program" } @@ -65,11 +65,12 @@ borsh = ["dep:borsh", "dep:borsh0-10", "std"] bytemuck = ["dep:bytemuck", "dep:bytemuck_derive"] curve25519 = ["dep:curve25519-dalek", "sha2"] default = ["std"] -dev-context-only-utils = ["dep:arbitrary", "std"] +dev-context-only-utils = ["dep:arbitrary", "rand"] frozen-abi = [ "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro" ] +rand = ["dep:rand", "std"] serde = ["dep:serde", "dep:serde_derive"] sha2 = ["dep:solana-sha256-hasher", "solana-sha256-hasher/sha2"] std = [] diff --git a/sdk/pubkey/src/lib.rs b/sdk/pubkey/src/lib.rs index e378c44caf3f06..d603d6a97b3b9b 100644 --- a/sdk/pubkey/src/lib.rs +++ b/sdk/pubkey/src/lib.rs @@ -1114,6 +1114,12 @@ macro_rules! pubkey { }; } +/// New random Pubkey for tests and benchmarks. +#[cfg(all(feature = "rand", not(target_os = "solana")))] +pub fn new_rand() -> Pubkey { + Pubkey::from(rand::random::<[u8; PUBKEY_BYTES]>()) +} + #[cfg(test)] mod tests { use {super::*, strum::IntoEnumIterator}; diff --git a/sdk/src/pubkey.rs b/sdk/src/pubkey.rs index 92d1365d03c5bf..344f0698444cb2 100644 --- a/sdk/src/pubkey.rs +++ b/sdk/src/pubkey.rs @@ -1,13 +1,13 @@ -//! Solana account addresses. - -pub use solana_program::pubkey::*; - -/// New random Pubkey for tests and benchmarks. #[cfg(feature = "full")] -pub fn new_rand() -> Pubkey { - Pubkey::from(rand::random::<[u8; PUBKEY_BYTES]>()) -} - +pub use solana_pubkey::new_rand; +#[cfg(target_os = "solana")] +pub use solana_pubkey::syscalls; +pub use solana_pubkey::{ + bytes_are_curve_point, ParsePubkeyError, Pubkey, PubkeyError, MAX_SEEDS, MAX_SEED_LEN, + PUBKEY_BYTES, +}; + +#[deprecated(since = "2.1.0")] #[cfg(feature = "full")] pub fn write_pubkey_file(outfile: &str, pubkey: Pubkey) -> Result<(), Box> { use std::io::Write; @@ -24,6 +24,7 @@ pub fn write_pubkey_file(outfile: &str, pubkey: Pubkey) -> Result<(), Box Result> { let f = std::fs::File::open(infile)?; @@ -32,19 +33,3 @@ pub fn read_pubkey_file(infile: &str) -> Result Result<(), Box> { - let filename = "test_pubkey.json"; - let pubkey = solana_sdk::pubkey::new_rand(); - write_pubkey_file(filename, pubkey)?; - let read = read_pubkey_file(filename)?; - assert_eq!(read, pubkey); - remove_file(filename)?; - Ok(()) - } -} From 19c1e1be709fdd96ff441dd46fcd02bda1eafe01 Mon Sep 17 00:00:00 2001 From: Joe C Date: Tue, 15 Oct 2024 20:38:16 +0700 Subject: [PATCH 507/529] SVM: paytube: update cache prep for token program (#3168) --- svm/examples/paytube/src/processor.rs | 48 +++++++++++---------------- 1 file changed, 20 insertions(+), 28 deletions(-) diff --git a/svm/examples/paytube/src/processor.rs b/svm/examples/paytube/src/processor.rs index 71eaccc956826b..03a8336209ca29 100644 --- a/svm/examples/paytube/src/processor.rs +++ b/svm/examples/paytube/src/processor.rs @@ -3,17 +3,18 @@ use { solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, solana_compute_budget::compute_budget::ComputeBudget, - solana_program_runtime::loaded_programs::{ - BlockRelation, ForkGraph, LoadProgramMetrics, ProgramCacheEntry, - }, - solana_sdk::{account::ReadableAccount, clock::Slot, feature_set::FeatureSet, transaction}, + solana_program_runtime::loaded_programs::{BlockRelation, ForkGraph, ProgramCacheEntry}, + solana_sdk::{clock::Slot, feature_set::FeatureSet, transaction}, solana_svm::{ account_loader::CheckedTransactionDetails, transaction_processing_callback::TransactionProcessingCallback, transaction_processor::TransactionBatchProcessor, }, solana_system_program::system_processor, - std::sync::{Arc, RwLock}, + std::{ + collections::HashSet, + sync::{Arc, RwLock}, + }, }; /// In order to use the `TransactionBatchProcessor`, another trait - Solana @@ -40,13 +41,25 @@ pub(crate) fn create_transaction_batch_processor>, ) -> TransactionBatchProcessor { - let processor = TransactionBatchProcessor::::default(); + // Create a new transaction batch processor. + // + // We're going to use slot 1 specifically because any programs we add will + // be deployed in slot 0, and they are delayed visibility until the next + // slot (1). + // This includes programs owned by BPF Loader v2, which are automatically + // marked as "depoyed" in slot 0. + // See `solana_svm::program_loader::load_program_with_pubkey` for more + // details. + let processor = TransactionBatchProcessor::::new( + /* slot */ 1, + /* epoch */ 1, + /* builtin_program_ids */ HashSet::new(), + ); { let mut cache = processor.program_cache.write().unwrap(); // Initialize the mocked fork graph. - // let fork_graph = Arc::new(RwLock::new(PayTubeForkGraph {})); cache.fork_graph = Some(Arc::downgrade(&fork_graph)); // Initialize a proper cache environment. @@ -55,27 +68,6 @@ pub(crate) fn create_transaction_batch_processor Date: Tue, 15 Oct 2024 07:22:53 -0700 Subject: [PATCH 508/529] svm: clear read-only non-signers for test txns (#3157) --- svm/tests/transaction_builder.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/svm/tests/transaction_builder.rs b/svm/tests/transaction_builder.rs index 803487773a63ec..664ea6237120cb 100644 --- a/svm/tests/transaction_builder.rs +++ b/svm/tests/transaction_builder.rs @@ -26,7 +26,7 @@ pub struct SanitizedTransactionBuilder { signed_readonly_accounts: Vec<(Pubkey, Signature)>, signed_mutable_accounts: Vec<(Pubkey, Signature)>, unsigned_readonly_accounts: Vec, - unsigned_mutable_account: Vec, + unsigned_mutable_accounts: Vec, } #[derive(PartialEq, Eq, Hash, Clone)] @@ -91,7 +91,7 @@ impl SanitizedTransactionBuilder { AccountType::SignerReadonly } (false, true) => { - self.unsigned_mutable_account.push(item.pubkey); + self.unsigned_mutable_accounts.push(item.pubkey); AccountType::Writable } (false, false) => { @@ -117,7 +117,7 @@ impl SanitizedTransactionBuilder { self.signed_mutable_accounts .len() .saturating_add(self.signed_readonly_accounts.len()) - .saturating_add(self.unsigned_mutable_account.len()) + .saturating_add(self.unsigned_mutable_accounts.len()) .saturating_add(self.unsigned_readonly_accounts.len()) .saturating_add(1), ); @@ -159,7 +159,7 @@ impl SanitizedTransactionBuilder { positions_lambda(key, AccountType::SignerReadonly); signatures.push(*signature); }); - self.unsigned_mutable_account + self.unsigned_mutable_accounts .iter() .for_each(|key| positions_lambda(key, AccountType::Writable)); self.unsigned_readonly_accounts @@ -232,8 +232,8 @@ impl SanitizedTransactionBuilder { self.num_readonly_unsigned_accounts = 0; self.signed_mutable_accounts.clear(); self.signed_readonly_accounts.clear(); - self.unsigned_mutable_account.clear(); - self.unsigned_mutable_account.clear(); + self.unsigned_mutable_accounts.clear(); + self.unsigned_readonly_accounts.clear(); instructions } From a018567931bebd6b71b818eedb37c1b62c63c889 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 15 Oct 2024 10:58:03 -0400 Subject: [PATCH 509/529] Supports accounts lt hash in ledger-tool (#3173) --- ledger-tool/src/args.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index e9149f7b9cab4b..61d3208fe848aa 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -345,6 +345,8 @@ pub fn get_accounts_db_config( create_ancient_storage, storage_access, scan_filter_for_shrinking, + enable_experimental_accumulator_hash: arg_matches + .is_present("accounts_db_experimental_accumulator_hash"), ..AccountsDbConfig::default() } } From 7b0a57316d0626312a704ee5b50603123d959f7c Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 15 Oct 2024 10:25:36 -0500 Subject: [PATCH 510/529] Scheduler: Improve TTL (#3161) Co-authored-by: Justin Starry --- accounts-db/src/accounts.rs | 42 ++-- core/src/banking_stage/consume_worker.rs | 223 ++++++++++++++++-- core/src/banking_stage/consumer.rs | 32 ++- .../immutable_deserialized_packet.rs | 42 +++- .../banking_stage/latest_unprocessed_votes.rs | 2 +- core/src/banking_stage/scheduler_messages.rs | 8 +- .../prio_graph_scheduler.rs | 46 ++-- .../scheduler_controller.rs | 127 ++++++++-- .../transaction_state.rs | 46 +++- .../transaction_state_container.rs | 6 +- .../unprocessed_packet_batches.rs | 16 +- .../unprocessed_transaction_storage.rs | 8 +- runtime/src/bank/address_lookup_table.rs | 18 +- sdk/program/src/address_lookup_table/state.rs | 14 ++ 14 files changed, 505 insertions(+), 125 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 2584f900edbc49..cce35988aff69d 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -81,12 +81,14 @@ impl Accounts { } } + /// Return loaded addresses and the deactivation slot. + /// If the table hasn't been deactivated, the deactivation slot is `u64::MAX`. pub fn load_lookup_table_addresses( &self, ancestors: &Ancestors, address_table_lookup: SVMMessageAddressTableLookup, slot_hashes: &SlotHashes, - ) -> std::result::Result { + ) -> std::result::Result<(LoadedAddresses, Slot), AddressLookupError> { let table_account = self .accounts_db .load_with_fixed_root(ancestors, address_table_lookup.account_key) @@ -98,18 +100,21 @@ impl Accounts { let lookup_table = AddressLookupTable::deserialize(table_account.data()) .map_err(|_ix_err| AddressLookupError::InvalidAccountData)?; - Ok(LoadedAddresses { - writable: lookup_table.lookup( - current_slot, - address_table_lookup.writable_indexes, - slot_hashes, - )?, - readonly: lookup_table.lookup( - current_slot, - address_table_lookup.readonly_indexes, - slot_hashes, - )?, - }) + Ok(( + LoadedAddresses { + writable: lookup_table.lookup( + current_slot, + address_table_lookup.writable_indexes, + slot_hashes, + )?, + readonly: lookup_table.lookup( + current_slot, + address_table_lookup.readonly_indexes, + slot_hashes, + )?, + }, + lookup_table.meta.deactivation_slot, + )) } else { Err(AddressLookupError::InvalidAccountOwner) } @@ -806,10 +811,13 @@ mod tests { SVMMessageAddressTableLookup::from(&address_table_lookup), &SlotHashes::default(), ), - Ok(LoadedAddresses { - writable: vec![table_addresses[0]], - readonly: vec![table_addresses[1]], - }), + Ok(( + LoadedAddresses { + writable: vec![table_addresses[0]], + readonly: vec![table_addresses[1]], + }, + u64::MAX + )), ); } diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index b676168bb04d4d..787901ffa521f8 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -107,7 +107,7 @@ impl ConsumeWorker { let output = self.consumer.process_and_record_aged_transactions( bank, &work.transactions, - &work.max_age_slots, + &work.max_ages, ); self.metrics.update_for_consume(&output); @@ -694,7 +694,7 @@ mod tests { crate::banking_stage::{ committer::Committer, qos_service::QosService, - scheduler_messages::{TransactionBatchId, TransactionId}, + scheduler_messages::{MaxAge, TransactionBatchId, TransactionId}, tests::{create_slow_genesis_config, sanitize_transactions, simulate_poh}, }, crossbeam_channel::unbounded, @@ -708,10 +708,25 @@ mod tests { vote_sender_types::ReplayVoteReceiver, }, solana_sdk::{ - genesis_config::GenesisConfig, poh_config::PohConfig, pubkey::Pubkey, - signature::Keypair, system_transaction, + address_lookup_table::AddressLookupTableAccount, + clock::{Slot, MAX_PROCESSING_AGE}, + genesis_config::GenesisConfig, + message::{ + v0::{self, LoadedAddresses}, + SimpleAddressLoader, VersionedMessage, + }, + poh_config::PohConfig, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + system_instruction, system_transaction, + transaction::{ + MessageHash, SanitizedTransaction, TransactionError, VersionedTransaction, + }, }, + solana_svm_transaction::svm_message::SVMMessage, std::{ + collections::HashSet, sync::{atomic::AtomicBool, RwLock}, thread::JoinHandle, }, @@ -742,6 +757,7 @@ mod tests { .. } = create_slow_genesis_config(10_000); let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); + let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), 1)); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) @@ -820,17 +836,21 @@ mod tests { )]); let bid = TransactionBatchId::new(0); let id = TransactionId::new(0); + let max_age = MaxAge { + epoch_invalidation_slot: bank.slot(), + alt_invalidation_slot: bank.slot(), + }; let work = ConsumeWork { batch_id: bid, ids: vec![id], transactions, - max_age_slots: vec![bank.slot()], + max_ages: vec![max_age], }; consume_sender.send(work).unwrap(); let consumed = consumed_receiver.recv().unwrap(); assert_eq!(consumed.work.batch_id, bid); assert_eq!(consumed.work.ids, vec![id]); - assert_eq!(consumed.work.max_age_slots, vec![bank.slot()]); + assert_eq!(consumed.work.max_ages, vec![max_age]); assert_eq!(consumed.retryable_indexes, vec![0]); drop(test_frame); @@ -865,17 +885,21 @@ mod tests { )]); let bid = TransactionBatchId::new(0); let id = TransactionId::new(0); + let max_age = MaxAge { + epoch_invalidation_slot: bank.slot(), + alt_invalidation_slot: bank.slot(), + }; let work = ConsumeWork { batch_id: bid, ids: vec![id], transactions, - max_age_slots: vec![bank.slot()], + max_ages: vec![max_age], }; consume_sender.send(work).unwrap(); let consumed = consumed_receiver.recv().unwrap(); assert_eq!(consumed.work.batch_id, bid); assert_eq!(consumed.work.ids, vec![id]); - assert_eq!(consumed.work.max_age_slots, vec![bank.slot()]); + assert_eq!(consumed.work.max_ages, vec![max_age]); assert_eq!(consumed.retryable_indexes, Vec::::new()); drop(test_frame); @@ -911,19 +935,23 @@ mod tests { let bid = TransactionBatchId::new(0); let id1 = TransactionId::new(1); let id2 = TransactionId::new(0); + let max_age = MaxAge { + epoch_invalidation_slot: bank.slot(), + alt_invalidation_slot: bank.slot(), + }; consume_sender .send(ConsumeWork { batch_id: bid, ids: vec![id1, id2], transactions: txs, - max_age_slots: vec![bank.slot(), bank.slot()], + max_ages: vec![max_age, max_age], }) .unwrap(); let consumed = consumed_receiver.recv().unwrap(); assert_eq!(consumed.work.batch_id, bid); assert_eq!(consumed.work.ids, vec![id1, id2]); - assert_eq!(consumed.work.max_age_slots, vec![bank.slot(), bank.slot()]); + assert_eq!(consumed.work.max_ages, vec![max_age, max_age]); assert_eq!(consumed.retryable_indexes, vec![1]); // id2 is retryable since lock conflict drop(test_frame); @@ -968,12 +996,16 @@ mod tests { let bid2 = TransactionBatchId::new(1); let id1 = TransactionId::new(1); let id2 = TransactionId::new(0); + let max_age = MaxAge { + epoch_invalidation_slot: bank.slot(), + alt_invalidation_slot: bank.slot(), + }; consume_sender .send(ConsumeWork { batch_id: bid1, ids: vec![id1], transactions: txs1, - max_age_slots: vec![bank.slot()], + max_ages: vec![max_age], }) .unwrap(); @@ -982,22 +1014,185 @@ mod tests { batch_id: bid2, ids: vec![id2], transactions: txs2, - max_age_slots: vec![bank.slot()], + max_ages: vec![max_age], }) .unwrap(); let consumed = consumed_receiver.recv().unwrap(); assert_eq!(consumed.work.batch_id, bid1); assert_eq!(consumed.work.ids, vec![id1]); - assert_eq!(consumed.work.max_age_slots, vec![bank.slot()]); + assert_eq!(consumed.work.max_ages, vec![max_age]); assert_eq!(consumed.retryable_indexes, Vec::::new()); let consumed = consumed_receiver.recv().unwrap(); assert_eq!(consumed.work.batch_id, bid2); assert_eq!(consumed.work.ids, vec![id2]); - assert_eq!(consumed.work.max_age_slots, vec![bank.slot()]); + assert_eq!(consumed.work.max_ages, vec![max_age]); assert_eq!(consumed.retryable_indexes, Vec::::new()); drop(test_frame); let _ = worker_thread.join().unwrap(); } + + #[test] + fn test_worker_ttl() { + let (test_frame, worker) = setup_test_frame(); + let TestFrame { + mint_keypair, + genesis_config, + bank, + poh_recorder, + consume_sender, + consumed_receiver, + .. + } = &test_frame; + let worker_thread = std::thread::spawn(move || worker.run()); + poh_recorder + .write() + .unwrap() + .set_bank_for_test(bank.clone()); + assert!(bank.slot() > 0); + + // No conflicts between transactions. Test 6 cases. + // 1. Epoch expiration, before slot => still succeeds due to resanitizing + // 2. Epoch expiration, on slot => succeeds normally + // 3. Epoch expiration, after slot => succeeds normally + // 4. ALT expiration, before slot => fails + // 5. ALT expiration, on slot => succeeds normally + // 6. ALT expiration, after slot => succeeds normally + let simple_transfer = || { + system_transaction::transfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + genesis_config.hash(), + ) + }; + let simple_v0_transfer = || { + let payer = Keypair::new(); + let to_pubkey = Pubkey::new_unique(); + let loaded_addresses = LoadedAddresses { + writable: vec![to_pubkey], + readonly: vec![], + }; + let loader = SimpleAddressLoader::Enabled(loaded_addresses); + SanitizedTransaction::try_create( + VersionedTransaction::try_new( + VersionedMessage::V0( + v0::Message::try_compile( + &payer.pubkey(), + &[system_instruction::transfer(&payer.pubkey(), &to_pubkey, 1)], + &[AddressLookupTableAccount { + key: Pubkey::new_unique(), // will fail if using **bank** to lookup + addresses: vec![to_pubkey], + }], + genesis_config.hash(), + ) + .unwrap(), + ), + &[&payer], + ) + .unwrap(), + MessageHash::Compute, + None, + loader, + &HashSet::default(), + ) + .unwrap() + }; + + let mut txs = sanitize_transactions(vec![ + simple_transfer(), + simple_transfer(), + simple_transfer(), + ]); + txs.push(simple_v0_transfer()); + txs.push(simple_v0_transfer()); + txs.push(simple_v0_transfer()); + let sanitized_txs = txs.clone(); + + // Fund the keypairs. + for tx in &txs { + bank.process_transaction(&system_transaction::transfer( + mint_keypair, + &tx.account_keys()[0], + 2, + genesis_config.hash(), + )) + .unwrap(); + } + + consume_sender + .send(ConsumeWork { + batch_id: TransactionBatchId::new(1), + ids: vec![ + TransactionId::new(0), + TransactionId::new(1), + TransactionId::new(2), + TransactionId::new(3), + TransactionId::new(4), + TransactionId::new(5), + ], + transactions: txs, + max_ages: vec![ + MaxAge { + epoch_invalidation_slot: bank.slot() - 1, + alt_invalidation_slot: Slot::MAX, + }, + MaxAge { + epoch_invalidation_slot: bank.slot(), + alt_invalidation_slot: Slot::MAX, + }, + MaxAge { + epoch_invalidation_slot: bank.slot() + 1, + alt_invalidation_slot: Slot::MAX, + }, + MaxAge { + epoch_invalidation_slot: u64::MAX, + alt_invalidation_slot: bank.slot() - 1, + }, + MaxAge { + epoch_invalidation_slot: u64::MAX, + alt_invalidation_slot: bank.slot(), + }, + MaxAge { + epoch_invalidation_slot: u64::MAX, + alt_invalidation_slot: bank.slot() + 1, + }, + ], + }) + .unwrap(); + + let consumed = consumed_receiver.recv().unwrap(); + assert_eq!(consumed.retryable_indexes, Vec::::new()); + // all but one succeed. 6 for initial funding + assert_eq!(bank.transaction_count(), 6 + 5); + + let already_processed_results = bank + .check_transactions( + &sanitized_txs, + &vec![Ok(()); sanitized_txs.len()], + MAX_PROCESSING_AGE, + &mut TransactionErrorMetrics::default(), + ) + .into_iter() + .map(|r| match r { + Ok(_) => Ok(()), + Err(err) => Err(err), + }) + .collect::>(); + assert_eq!( + already_processed_results, + vec![ + Err(TransactionError::AlreadyProcessed), + Err(TransactionError::AlreadyProcessed), + Err(TransactionError::AlreadyProcessed), + Ok(()), // <--- this transaction was not processed + Err(TransactionError::AlreadyProcessed), + Err(TransactionError::AlreadyProcessed) + ] + ); + + drop(test_frame); + let _ = worker_thread.join().unwrap(); + } } diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 57f1b1958b152c..ae04cc30ff0167 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -7,6 +7,7 @@ use { }, leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, qos_service::QosService, + scheduler_messages::MaxAge, unprocessed_transaction_storage::{ConsumeScannerPayload, UnprocessedTransactionStorage}, BankingStageStats, }, @@ -25,12 +26,12 @@ use { }, solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ - clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, + clock::{FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, fee::FeeBudgetLimits, message::SanitizedMessage, saturating_add_assign, timing::timestamp, - transaction::{self, AddressLoader, SanitizedTransaction, TransactionError}, + transaction::{self, SanitizedTransaction, TransactionError}, }, solana_svm::{ account_loader::{validate_fee_payer, TransactionCheckResult}, @@ -429,7 +430,7 @@ impl Consumer { &self, bank: &Arc, txs: &[SanitizedTransaction], - max_slot_ages: &[Slot], + max_ages: &[MaxAge], ) -> ProcessTransactionBatchOutput { let move_precompile_verification_to_svm = bank .feature_set @@ -438,8 +439,9 @@ impl Consumer { // Need to filter out transactions since they were sanitized earlier. // This means that the transaction may cross and epoch boundary (not allowed), // or account lookup tables may have been closed. - let pre_results = txs.iter().zip(max_slot_ages).map(|(tx, max_slot_age)| { - if *max_slot_age < bank.slot() { + let pre_results = txs.iter().zip(max_ages).map(|(tx, max_age)| { + if bank.slot() > max_age.epoch_invalidation_slot { + // Epoch has rolled over. Need to fully re-verify the transaction. // Pre-compiles are verified here. // Attempt re-sanitization after epoch-cross. // Re-sanitized transaction should be equal to the original transaction, @@ -451,18 +453,24 @@ impl Consumer { return Err(TransactionError::ResanitizationNeeded); } } else { + if bank.slot() > max_age.alt_invalidation_slot { + // The address table lookup **may** have expired, but the + // expiration is not guaranteed since there may have been + // skipped slot. + // If the addresses still resolve here, then the transaction is still + // valid, and we can continue with processing. + // If they do not, then the ATL has expired and the transaction + // can be dropped. + let (_addresses, _deactivation_slot) = + bank.load_addresses_from_ref(tx.message_address_table_lookups())?; + } + // Verify pre-compiles. if !move_precompile_verification_to_svm { verify_precompiles(tx, &bank.feature_set)?; } - - // Any transaction executed between sanitization time and now may have closed the lookup table(s). - // Above re-sanitization already loads addresses, so don't need to re-check in that case. - let lookup_tables = tx.message().message_address_table_lookups(); - if !lookup_tables.is_empty() { - bank.load_addresses(lookup_tables)?; - } } + Ok(()) }); self.process_and_record_transactions_with_pre_results(bank, txs, 0, pre_results) diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index b03f3d5d64d4e8..978e4f9b935c7e 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -2,20 +2,21 @@ use { super::packet_filter::PacketFilterFailure, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_perf::packet::Packet, + solana_runtime::bank::Bank, solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sanitize::SanitizeError, solana_sdk::{ + clock::Slot, hash::Hash, - message::Message, + message::{v0::LoadedAddresses, AddressLoaderError, Message, SimpleAddressLoader}, pubkey::Pubkey, signature::Signature, - transaction::{ - AddressLoader, SanitizedTransaction, SanitizedVersionedTransaction, - VersionedTransaction, - }, + transaction::{SanitizedTransaction, SanitizedVersionedTransaction, VersionedTransaction}, }, solana_short_vec::decode_shortu16_len, - solana_svm_transaction::instruction::SVMInstruction, + solana_svm_transaction::{ + instruction::SVMInstruction, message_address_table_lookup::SVMMessageAddressTableLookup, + }, std::{cmp::Ordering, collections::HashSet, mem::size_of}, thiserror::Error, }; @@ -111,15 +112,22 @@ impl ImmutableDeserializedPacket { // This function deserializes packets into transactions, computes the blake3 hash of transaction // messages. + // Additionally, this returns the minimum deactivation slot of the resolved addresses. pub fn build_sanitized_transaction( &self, votes_only: bool, - address_loader: impl AddressLoader, + bank: &Bank, reserved_account_keys: &HashSet, - ) -> Option { + ) -> Option<(SanitizedTransaction, Slot)> { if votes_only && !self.is_simple_vote() { return None; } + + // Resolve the lookup addresses and retrieve the min deactivation slot + let (loaded_addresses, deactivation_slot) = + Self::resolve_addresses_with_deactivation(self.transaction(), bank).ok()?; + let address_loader = SimpleAddressLoader::Enabled(loaded_addresses); + let tx = SanitizedTransaction::try_new( self.transaction().clone(), *self.message_hash(), @@ -128,7 +136,23 @@ impl ImmutableDeserializedPacket { reserved_account_keys, ) .ok()?; - Some(tx) + Some((tx, deactivation_slot)) + } + + fn resolve_addresses_with_deactivation( + transaction: &SanitizedVersionedTransaction, + bank: &Bank, + ) -> Result<(LoadedAddresses, Slot), AddressLoaderError> { + let Some(address_table_lookups) = transaction.get_message().message.address_table_lookups() + else { + return Ok((LoadedAddresses::default(), Slot::MAX)); + }; + + bank.load_addresses_from_ref( + address_table_lookups + .iter() + .map(SVMMessageAddressTableLookup::from), + ) } } diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index bb97142bda5e81..ae13c37caa5a0e 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -420,7 +420,7 @@ impl LatestUnprocessedVotes { } let deserialized_vote_packet = vote.vote.as_ref().unwrap().clone(); - let Some(sanitized_vote_transaction) = deserialized_vote_packet + let Some((sanitized_vote_transaction, _deactivation_slot)) = deserialized_vote_packet .build_sanitized_transaction( bank.vote_only_bank(), bank.as_ref(), diff --git a/core/src/banking_stage/scheduler_messages.rs b/core/src/banking_stage/scheduler_messages.rs index d93d2d6dbb6c52..29e9b99f50588a 100644 --- a/core/src/banking_stage/scheduler_messages.rs +++ b/core/src/banking_stage/scheduler_messages.rs @@ -35,13 +35,19 @@ impl Display for TransactionId { } } +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct MaxAge { + pub epoch_invalidation_slot: Slot, + pub alt_invalidation_slot: Slot, +} + /// Message: [Scheduler -> Worker] /// Transactions to be consumed (i.e. executed, recorded, and committed) pub struct ConsumeWork { pub batch_id: TransactionBatchId, pub ids: Vec, pub transactions: Vec, - pub max_age_slots: Vec, + pub max_ages: Vec, } /// Message: [Worker -> Scheduler] diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index 59ce92173ed26e..9f6fcc8388a364 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -9,7 +9,9 @@ use { crate::banking_stage::{ consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, read_write_account_set::ReadWriteAccountSet, - scheduler_messages::{ConsumeWork, FinishedConsumeWork, TransactionBatchId, TransactionId}, + scheduler_messages::{ + ConsumeWork, FinishedConsumeWork, MaxAge, TransactionBatchId, TransactionId, + }, transaction_scheduler::{ transaction_priority_id::TransactionPriorityId, transaction_state::TransactionState, }, @@ -19,10 +21,7 @@ use { prio_graph::{AccessKind, PrioGraph}, solana_cost_model::block_cost_limits::MAX_BLOCK_UNITS, solana_measure::measure_us, - solana_sdk::{ - pubkey::Pubkey, saturating_add_assign, slot_history::Slot, - transaction::SanitizedTransaction, - }, + solana_sdk::{pubkey::Pubkey, saturating_add_assign, transaction::SanitizedTransaction}, }; pub(crate) struct PrioGraphScheduler { @@ -202,13 +201,13 @@ impl PrioGraphScheduler { Ok(TransactionSchedulingInfo { thread_id, transaction, - max_age_slot, + max_age, cost, }) => { saturating_add_assign!(num_scheduled, 1); batches.transactions[thread_id].push(transaction); batches.ids[thread_id].push(id.id); - batches.max_age_slots[thread_id].push(max_age_slot); + batches.max_ages[thread_id].push(max_age); saturating_add_assign!(batches.total_cus[thread_id], cost); // If target batch size is reached, send only this batch. @@ -309,7 +308,7 @@ impl PrioGraphScheduler { batch_id, ids, transactions, - max_age_slots, + max_ages, }, retryable_indexes, }) => { @@ -321,8 +320,8 @@ impl PrioGraphScheduler { // Retryable transactions should be inserted back into the container let mut retryable_iter = retryable_indexes.into_iter().peekable(); - for (index, (id, transaction, max_age_slot)) in - izip!(ids, transactions, max_age_slots).enumerate() + for (index, (id, transaction, max_age)) in + izip!(ids, transactions, max_ages).enumerate() { if let Some(retryable_index) = retryable_iter.peek() { if *retryable_index == index { @@ -330,7 +329,7 @@ impl PrioGraphScheduler { id, SanitizedTransactionTTL { transaction, - max_age_slot, + max_age, }, ); retryable_iter.next(); @@ -392,7 +391,7 @@ impl PrioGraphScheduler { return Ok(0); } - let (ids, transactions, max_age_slots, total_cus) = batches.take_batch(thread_index); + let (ids, transactions, max_ages, total_cus) = batches.take_batch(thread_index); let batch_id = self .in_flight_tracker @@ -403,7 +402,7 @@ impl PrioGraphScheduler { batch_id, ids, transactions, - max_age_slots, + max_ages, }; self.consume_work_senders[thread_index] .send(work) @@ -477,7 +476,7 @@ pub(crate) struct SchedulingSummary { struct Batches { ids: Vec>, transactions: Vec>, - max_age_slots: Vec>, + max_ages: Vec>, total_cus: Vec, } @@ -486,7 +485,7 @@ impl Batches { Self { ids: vec![Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH); num_threads], transactions: vec![Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH); num_threads], - max_age_slots: vec![Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH); num_threads], + max_ages: vec![Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH); num_threads], total_cus: vec![0; num_threads], } } @@ -497,7 +496,7 @@ impl Batches { ) -> ( Vec, Vec, - Vec, + Vec, u64, ) { ( @@ -510,7 +509,7 @@ impl Batches { Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH), ), core::mem::replace( - &mut self.max_age_slots[thread_id], + &mut self.max_ages[thread_id], Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH), ), core::mem::replace(&mut self.total_cus[thread_id], 0), @@ -522,7 +521,7 @@ impl Batches { struct TransactionSchedulingInfo { thread_id: ThreadId, transaction: SanitizedTransaction, - max_age_slot: Slot, + max_age: MaxAge, cost: u64, } @@ -583,7 +582,7 @@ fn try_schedule_transaction( Ok(TransactionSchedulingInfo { thread_id, transaction: sanitized_transaction_ttl.transaction, - max_age_slot: sanitized_transaction_ttl.max_age_slot, + max_age: sanitized_transaction_ttl.max_age, cost, }) } @@ -599,8 +598,8 @@ mod tests { crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, solana_sdk::{ - compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, packet::Packet, - pubkey::Pubkey, signature::Keypair, signer::Signer, system_instruction, + clock::Slot, compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, + packet::Packet, pubkey::Pubkey, signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, }, std::{borrow::Borrow, sync::Arc}, @@ -686,7 +685,10 @@ mod tests { ); let transaction_ttl = SanitizedTransactionTTL { transaction, - max_age_slot: Slot::MAX, + max_age: MaxAge { + epoch_invalidation_slot: Slot::MAX, + alt_invalidation_slot: Slot::MAX, + }, }; const TEST_TRANSACTION_COST: u64 = 5000; container.insert_new_transaction( diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 995b1a5782702b..ddec4ec90711c8 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -19,6 +19,7 @@ use { forwarder::Forwarder, immutable_deserialized_packet::ImmutableDeserializedPacket, packet_deserializer::PacketDeserializer, + scheduler_messages::MaxAge, ForwardOption, LikeClusterInfo, TOTAL_BUFFERED_PACKETS, }, arrayvec::ArrayVec, @@ -30,7 +31,8 @@ use { solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ self, - clock::{FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, + address_lookup_table::state::estimate_last_valid_slot, + clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, fee::FeeBudgetLimits, saturating_add_assign, transaction::SanitizedTransaction, @@ -500,16 +502,25 @@ impl SchedulerController { // Convert to Arcs let packets: Vec<_> = packets.into_iter().map(Arc::new).collect(); // Sanitize packets, generate IDs, and insert into the container. - let bank = self.bank_forks.read().unwrap().working_bank(); - let last_slot_in_epoch = bank.epoch_schedule().get_last_slot_in_epoch(bank.epoch()); - let transaction_account_lock_limit = bank.get_transaction_account_lock_limit(); - let vote_only = bank.vote_only_bank(); + let (root_bank, working_bank) = { + let bank_forks = self.bank_forks.read().unwrap(); + let root_bank = bank_forks.root_bank(); + let working_bank = bank_forks.working_bank(); + (root_bank, working_bank) + }; + let alt_resolved_slot = root_bank.slot(); + let last_slot_in_epoch = working_bank + .epoch_schedule() + .get_last_slot_in_epoch(working_bank.epoch()); + let transaction_account_lock_limit = working_bank.get_transaction_account_lock_limit(); + let vote_only = working_bank.vote_only_bank(); const CHUNK_SIZE: usize = 128; let lock_results: [_; CHUNK_SIZE] = core::array::from_fn(|_| Ok(())); let mut arc_packets = ArrayVec::<_, CHUNK_SIZE>::new(); let mut transactions = ArrayVec::<_, CHUNK_SIZE>::new(); + let mut max_ages = ArrayVec::<_, CHUNK_SIZE>::new(); let mut fee_budget_limits_vec = ArrayVec::<_, CHUNK_SIZE>::new(); let mut error_counts = TransactionErrorMetrics::default(); @@ -521,31 +532,43 @@ impl SchedulerController { packet .build_sanitized_transaction( vote_only, - bank.as_ref(), - bank.get_reserved_account_keys(), + root_bank.as_ref(), + working_bank.get_reserved_account_keys(), ) - .map(|tx| (packet.clone(), tx)) + .map(|(tx, deactivation_slot)| (packet.clone(), tx, deactivation_slot)) }) .inspect(|_| saturating_add_assign!(post_sanitization_count, 1)) - .filter(|(_packet, tx)| { + .filter(|(_packet, tx, _deactivation_slot)| { validate_account_locks( tx.message().account_keys(), transaction_account_lock_limit, ) .is_ok() }) - .filter_map(|(packet, tx)| { + .filter_map(|(packet, tx, deactivation_slot)| { process_compute_budget_instructions(SVMMessage::program_instructions_iter(&tx)) - .map(|compute_budget| (packet, tx, compute_budget.into())) + .map(|compute_budget| { + (packet, tx, deactivation_slot, compute_budget.into()) + }) .ok() }) - .for_each(|(packet, tx, fee_budget_limits)| { + .for_each(|(packet, tx, deactivation_slot, fee_budget_limits)| { arc_packets.push(packet); transactions.push(tx); + max_ages.push(calculate_max_age( + last_slot_in_epoch, + deactivation_slot, + alt_resolved_slot, + )); fee_budget_limits_vec.push(fee_budget_limits); }); - let check_results = bank.check_transactions( + let check_results: Vec< + Result< + solana_svm::account_loader::CheckedTransactionDetails, + solana_sdk::transaction::TransactionError, + >, + > = working_bank.check_transactions( &transactions, &lock_results[..transactions.len()], MAX_PROCESSING_AGE, @@ -556,21 +579,26 @@ impl SchedulerController { let mut post_transaction_check_count: usize = 0; let mut num_dropped_on_capacity: usize = 0; let mut num_buffered: usize = 0; - for (((packet, transaction), fee_budget_limits), _check_result) in arc_packets - .drain(..) - .zip(transactions.drain(..)) - .zip(fee_budget_limits_vec.drain(..)) - .zip(check_results) - .filter(|(_, check_result)| check_result.is_ok()) + for ((((packet, transaction), max_age), fee_budget_limits), _check_result) in + arc_packets + .drain(..) + .zip(transactions.drain(..)) + .zip(max_ages.drain(..)) + .zip(fee_budget_limits_vec.drain(..)) + .zip(check_results) + .filter(|(_, check_result)| check_result.is_ok()) { saturating_add_assign!(post_transaction_check_count, 1); let transaction_id = self.transaction_id_generator.next(); - let (priority, cost) = - Self::calculate_priority_and_cost(&transaction, &fee_budget_limits, &bank); + let (priority, cost) = Self::calculate_priority_and_cost( + &transaction, + &fee_budget_limits, + &working_bank, + ); let transaction_ttl = SanitizedTransactionTTL { transaction, - max_age_slot: last_slot_in_epoch, + max_age, }; if self.container.insert_new_transaction( @@ -655,6 +683,34 @@ impl SchedulerController { } } +/// Given the last slot in the epoch, the minimum deactivation slot, +/// and the current slot, return the `MaxAge` that should be used for +/// the transaction. This is used to determine the maximum slot that a +/// transaction will be considered valid for, without re-resolving addresses +/// or resanitizing. +/// +/// This function considers the deactivation period of Address Table +/// accounts. If the deactivation period runs past the end of the epoch, +/// then the transaction is considered valid until the end of the epoch. +/// Otherwise, the transaction is considered valid until the deactivation +/// period. +/// +/// Since the deactivation period technically uses blocks rather than +/// slots, the value used here is the lower-bound on the deactivation +/// period, i.e. the transaction's address lookups are valid until +/// AT LEAST this slot. +fn calculate_max_age( + last_slot_in_epoch: Slot, + deactivation_slot: Slot, + current_slot: Slot, +) -> MaxAge { + let alt_min_expire_slot = estimate_last_valid_slot(deactivation_slot.min(current_slot)); + MaxAge { + epoch_invalidation_slot: last_slot_in_epoch, + alt_invalidation_slot: alt_min_expire_slot, + } +} + #[cfg(test)] mod tests { use { @@ -827,7 +883,7 @@ mod tests { batch_id: TransactionBatchId::new(0), ids: vec![], transactions: vec![], - max_age_slots: vec![], + max_ages: vec![], }, retryable_indexes: vec![], }) @@ -1158,4 +1214,29 @@ mod tests { .collect_vec(); assert_eq!(message_hashes, vec![&tx1_hash]); } + + #[test] + fn test_calculate_max_age() { + let current_slot = 100; + let last_slot_in_epoch = 1000; + + // ALT deactivation slot is delayed + assert_eq!( + calculate_max_age(last_slot_in_epoch, current_slot - 1, current_slot), + MaxAge { + epoch_invalidation_slot: last_slot_in_epoch, + alt_invalidation_slot: current_slot - 1 + + solana_sdk::slot_hashes::get_entries() as u64, + } + ); + + // no deactivation slot + assert_eq!( + calculate_max_age(last_slot_in_epoch, u64::MAX, current_slot), + MaxAge { + epoch_invalidation_slot: last_slot_in_epoch, + alt_invalidation_slot: current_slot + solana_sdk::slot_hashes::get_entries() as u64, + } + ); + } } diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state.rs b/core/src/banking_stage/transaction_scheduler/transaction_state.rs index 85af8217309e93..efb59be1b8b5b5 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state.rs @@ -1,13 +1,15 @@ use { - crate::banking_stage::immutable_deserialized_packet::ImmutableDeserializedPacket, - solana_sdk::{clock::Slot, transaction::SanitizedTransaction}, + crate::banking_stage::{ + immutable_deserialized_packet::ImmutableDeserializedPacket, scheduler_messages::MaxAge, + }, + solana_sdk::transaction::SanitizedTransaction, std::sync::Arc, }; /// Simple wrapper type to tie a sanitized transaction to max age slot. pub(crate) struct SanitizedTransactionTTL { pub(crate) transaction: SanitizedTransaction, - pub(crate) max_age_slot: Slot, + pub(crate) max_age: MaxAge, } /// TransactionState is used to track the state of a transaction in the transaction scheduler @@ -207,8 +209,9 @@ mod tests { use { super::*, solana_sdk::{ - compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, packet::Packet, - signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, + clock::Slot, compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, + packet::Packet, signature::Keypair, signer::Signer, system_instruction, + transaction::Transaction, }, }; @@ -230,7 +233,10 @@ mod tests { ); let transaction_ttl = SanitizedTransactionTTL { transaction: SanitizedTransaction::from_transaction_for_tests(tx), - max_age_slot: Slot::MAX, + max_age: MaxAge { + epoch_invalidation_slot: Slot::MAX, + alt_invalidation_slot: Slot::MAX, + }, }; const TEST_TRANSACTION_COST: u64 = 5000; TransactionState::new( @@ -271,11 +277,11 @@ mod tests { // Manually clone `SanitizedTransactionTTL` let SanitizedTransactionTTL { transaction, - max_age_slot, + max_age, } = transaction_state.transaction_ttl(); let transaction_ttl = SanitizedTransactionTTL { transaction: transaction.clone(), - max_age_slot: *max_age_slot, + max_age: *max_age, }; transaction_state.transition_to_unprocessed(transaction_ttl); // invalid transition } @@ -321,7 +327,13 @@ mod tests { transaction_state, TransactionState::Unprocessed { .. } )); - assert_eq!(transaction_ttl.max_age_slot, Slot::MAX); + assert_eq!( + transaction_ttl.max_age, + MaxAge { + epoch_invalidation_slot: Slot::MAX, + alt_invalidation_slot: Slot::MAX, + } + ); let _ = transaction_state.transition_to_pending(); assert!(matches!( @@ -339,7 +351,13 @@ mod tests { transaction_state, TransactionState::Unprocessed { .. } )); - assert_eq!(transaction_ttl.max_age_slot, Slot::MAX); + assert_eq!( + transaction_ttl.max_age, + MaxAge { + epoch_invalidation_slot: Slot::MAX, + alt_invalidation_slot: Slot::MAX, + } + ); // ensure transaction_ttl is not lost through state transitions let transaction_ttl = transaction_state.transition_to_pending(); @@ -354,6 +372,12 @@ mod tests { transaction_state, TransactionState::Unprocessed { .. } )); - assert_eq!(transaction_ttl.max_age_slot, Slot::MAX); + assert_eq!( + transaction_ttl.max_age, + MaxAge { + epoch_invalidation_slot: Slot::MAX, + alt_invalidation_slot: Slot::MAX, + } + ); } } diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index ed78b41983fa2a..7d40c66ec1b673 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -153,6 +153,7 @@ impl TransactionStateContainer { mod tests { use { super::*, + crate::banking_stage::scheduler_messages::MaxAge, solana_sdk::{ compute_budget::ComputeBudgetInstruction, hash::Hash, @@ -198,7 +199,10 @@ mod tests { ); let transaction_ttl = SanitizedTransactionTTL { transaction: tx, - max_age_slot: Slot::MAX, + max_age: MaxAge { + epoch_invalidation_slot: Slot::MAX, + alt_invalidation_slot: Slot::MAX, + }, }; const TEST_TRANSACTION_COST: u64 = 5000; (transaction_ttl, packet, priority, TEST_TRANSACTION_COST) diff --git a/core/src/banking_stage/unprocessed_packet_batches.rs b/core/src/banking_stage/unprocessed_packet_batches.rs index f92eeb09c57b54..3c4e0f66664dd2 100644 --- a/core/src/banking_stage/unprocessed_packet_batches.rs +++ b/core/src/banking_stage/unprocessed_packet_batches.rs @@ -307,13 +307,14 @@ mod tests { use { super::*, solana_perf::packet::PacketFlags, + solana_runtime::bank::Bank, solana_sdk::{ compute_budget::ComputeBudgetInstruction, message::Message, reserved_account_keys::ReservedAccountKeys, signature::{Keypair, Signer}, system_instruction, system_transaction, - transaction::{SimpleAddressLoader, Transaction}, + transaction::Transaction, }, solana_vote_program::{vote_state::TowerSync, vote_transaction}, }; @@ -475,6 +476,7 @@ mod tests { &keypair, None, ); + let bank = Bank::default_for_tests(); // packets with no votes { @@ -486,7 +488,7 @@ mod tests { let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( votes_only, - SimpleAddressLoader::Disabled, + &bank, &ReservedAccountKeys::empty_key_set(), ) }); @@ -496,7 +498,7 @@ mod tests { let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( votes_only, - SimpleAddressLoader::Disabled, + &bank, &ReservedAccountKeys::empty_key_set(), ) }); @@ -515,7 +517,7 @@ mod tests { let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( votes_only, - SimpleAddressLoader::Disabled, + &bank, &ReservedAccountKeys::empty_key_set(), ) }); @@ -525,7 +527,7 @@ mod tests { let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( votes_only, - SimpleAddressLoader::Disabled, + &bank, &ReservedAccountKeys::empty_key_set(), ) }); @@ -544,7 +546,7 @@ mod tests { let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( votes_only, - SimpleAddressLoader::Disabled, + &bank, &ReservedAccountKeys::empty_key_set(), ) }); @@ -554,7 +556,7 @@ mod tests { let txs = packet_vector.iter().filter_map(|tx| { tx.immutable_section().build_sanitized_transaction( votes_only, - SimpleAddressLoader::Disabled, + &bank, &ReservedAccountKeys::empty_key_set(), ) }); diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index f612f5eaf08b11..56e814acea9219 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -154,13 +154,15 @@ fn consume_scan_should_process_packet( return ProcessingDecision::Now; } - // Try to sanitize the packet + // Try to sanitize the packet. Ignore deactivation slot since we are + // immediately attempting to process the transaction. let (maybe_sanitized_transaction, sanitization_time_us) = measure_us!(packet .build_sanitized_transaction( bank.vote_only_bank(), bank, bank.get_reserved_account_keys(), - )); + ) + .map(|(tx, _deactivation_slot)| tx)); payload .slot_metrics_tracker @@ -799,7 +801,7 @@ impl ThreadLocalUnprocessedPackets { bank, bank.get_reserved_account_keys(), ) - .map(|transaction| (transaction, packet_index)) + .map(|(transaction, _deactivation_slot)| (transaction, packet_index)) }) .unzip(); diff --git a/runtime/src/bank/address_lookup_table.rs b/runtime/src/bank/address_lookup_table.rs index 4fa4e2bc0f570a..cb195202c9ddac 100644 --- a/runtime/src/bank/address_lookup_table.rs +++ b/runtime/src/bank/address_lookup_table.rs @@ -2,6 +2,7 @@ use { super::Bank, solana_sdk::{ address_lookup_table::error::AddressLookupError, + clock::Slot, message::{ v0::{LoadedAddresses, MessageAddressTableLookup}, AddressLoaderError, @@ -32,22 +33,25 @@ impl AddressLoader for &Bank { .iter() .map(SVMMessageAddressTableLookup::from), ) + .map(|(loaded_addresses, _deactivation_slot)| loaded_addresses) } } impl Bank { - /// Load addresses from an iterator of `SVMMessageAddressTableLookup`. + /// Load addresses from an iterator of `SVMMessageAddressTableLookup`, + /// additionally returning the minimum deactivation slot across all referenced ALTs pub fn load_addresses_from_ref<'a>( &self, address_table_lookups: impl Iterator>, - ) -> Result { + ) -> Result<(LoadedAddresses, Slot), AddressLoaderError> { let slot_hashes = self .transaction_processor .sysvar_cache() .get_slot_hashes() .map_err(|_| AddressLoaderError::SlotHashesSysvarNotFound)?; - address_table_lookups + let mut deactivation_slot = u64::MAX; + let loaded_addresses = address_table_lookups .map(|address_table_lookup| { self.rc .accounts @@ -56,8 +60,14 @@ impl Bank { address_table_lookup, &slot_hashes, ) + .map(|(loaded_addresses, table_deactivation_slot)| { + deactivation_slot = deactivation_slot.min(table_deactivation_slot); + loaded_addresses + }) .map_err(into_address_loader_error) }) - .collect::>() + .collect::>()?; + + Ok((loaded_addresses, deactivation_slot)) } } diff --git a/sdk/program/src/address_lookup_table/state.rs b/sdk/program/src/address_lookup_table/state.rs index 13a66637faa919..3136dd063f85da 100644 --- a/sdk/program/src/address_lookup_table/state.rs +++ b/sdk/program/src/address_lookup_table/state.rs @@ -1,6 +1,7 @@ #[cfg(feature = "frozen-abi")] use solana_frozen_abi_macro::{AbiEnumVisitor, AbiExample}; use { + crate::slot_hashes::get_entries, serde_derive::{Deserialize, Serialize}, solana_clock::Slot, solana_program::{ @@ -12,6 +13,19 @@ use { std::borrow::Cow, }; +/// The lookup table may be in a deactivating state until +/// the `deactivation_slot`` is no longer "recent". +/// This function returns a conservative estimate for the +/// last block that the table may be used for lookups. +/// This estimate may be incorrect due to skipped blocks, +/// however, if the current slot is lower than the returned +/// value, the table is guaranteed to still be in the +/// deactivating state. +#[inline] +pub fn estimate_last_valid_slot(deactivation_slot: Slot) -> Slot { + deactivation_slot.saturating_add(get_entries() as Slot) +} + /// The maximum number of addresses that a lookup table can hold pub const LOOKUP_TABLE_MAX_ADDRESSES: usize = 256; From a3b2c786e35f6bbc9087f8f458d55ef7cc961e1b Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 15 Oct 2024 19:38:00 +0400 Subject: [PATCH 511/529] extract native-token crate (#3072) * extract solana-native-token crate * fix import after rebase * add back llink --- Cargo.lock | 6 ++++++ Cargo.toml | 2 ++ programs/sbf/Cargo.lock | 6 ++++++ sdk/Cargo.toml | 1 + sdk/native-token/Cargo.toml | 13 +++++++++++++ .../src/native_token.rs => native-token/src/lib.rs} | 0 sdk/program/Cargo.toml | 1 + sdk/program/src/lib.rs | 4 ++-- sdk/src/fee.rs | 2 +- sdk/src/genesis_config.rs | 2 +- 10 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 sdk/native-token/Cargo.toml rename sdk/{program/src/native_token.rs => native-token/src/lib.rs} (100%) diff --git a/Cargo.lock b/Cargo.lock index 7492ea20dc02f3..057209c5eb032f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7118,6 +7118,10 @@ dependencies = [ "solana-define-syscall", ] +[[package]] +name = "solana-native-token" +version = "2.1.0" + [[package]] name = "solana-net-shaper" version = "2.1.0" @@ -7318,6 +7322,7 @@ dependencies = [ "solana-instruction", "solana-logger", "solana-msg", + "solana-native-token", "solana-program-error", "solana-program-memory", "solana-program-option", @@ -7897,6 +7902,7 @@ dependencies = [ "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", + "solana-native-token", "solana-program", "solana-program-memory", "solana-pubkey", diff --git a/Cargo.toml b/Cargo.toml index cdecc0af7dcfe4..8339070f9bbf39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,6 +116,7 @@ members = [ "sdk/instruction", "sdk/macro", "sdk/msg", + "sdk/native-token", "sdk/package-metadata", "sdk/package-metadata-macro", "sdk/program", @@ -432,6 +433,7 @@ solana-measure = { path = "measure", version = "=2.1.0" } solana-merkle-tree = { path = "merkle-tree", version = "=2.1.0" } solana-metrics = { path = "metrics", version = "=2.1.0" } solana-msg = { path = "sdk/msg", version = "=2.1.0" } +solana-native-token = { path = "sdk/native-token", version = "=2.1.0" } solana-net-utils = { path = "net-utils", version = "=2.1.0" } solana-nohash-hasher = "0.2.1" solana-notifier = { path = "notifier", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index e16c7d4a3a926c..78eca1fe59da24 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5584,6 +5584,10 @@ dependencies = [ "solana-define-syscall", ] +[[package]] +name = "solana-native-token" +version = "2.1.0" + [[package]] name = "solana-net-utils" version = "2.1.0" @@ -5701,6 +5705,7 @@ dependencies = [ "solana-hash", "solana-instruction", "solana-msg", + "solana-native-token", "solana-program-error", "solana-program-memory", "solana-program-option", @@ -6658,6 +6663,7 @@ dependencies = [ "solana-decode-error", "solana-derivation-path", "solana-feature-set", + "solana-native-token", "solana-program", "solana-program-memory", "solana-pubkey", diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 938e21a2aed8c0..2ab85402d1c3b1 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -96,6 +96,7 @@ solana-frozen-abi = { workspace = true, optional = true, features = [ solana-frozen-abi-macro = { workspace = true, optional = true, features = [ "frozen-abi", ] } +solana-native-token = { workspace = true } solana-program = { workspace = true } solana-program-memory = { workspace = true } solana-pubkey = { workspace = true, default-features = false, features = ["std"] } diff --git a/sdk/native-token/Cargo.toml b/sdk/native-token/Cargo.toml new file mode 100644 index 00000000000000..cac63f3e88597f --- /dev/null +++ b/sdk/native-token/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "solana-native-token" +description = "Definitions for the native SOL token and its fractional lamports." +documentation = "https://docs.rs/solana-native-token" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/src/native_token.rs b/sdk/native-token/src/lib.rs similarity index 100% rename from sdk/program/src/native_token.rs rename to sdk/native-token/src/lib.rs diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 391e5e148f11ec..c3d373fb3f7835 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -51,6 +51,7 @@ solana-instruction = { workspace = true, default-features = false, features = [ "std", ] } solana-msg = { workspace = true } +solana-native-token = { workspace = true } solana-program-error = { workspace = true, features = ["serde"] } solana-program-memory = { workspace = true } solana-program-option = { workspace = true } diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 11a2ccaab2f9d5..ae339c371fdde3 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -502,7 +502,6 @@ pub mod loader_v4; pub mod loader_v4_instruction; pub mod log; pub mod message; -pub mod native_token; pub mod nonce; pub mod program; pub mod program_error; @@ -541,7 +540,8 @@ pub use { solana_account_info::{self as account_info, debug_account_data}, solana_clock as clock, solana_msg::msg, - solana_program_option as program_option, solana_pubkey as pubkey, solana_rent as rent, + solana_native_token as native_token, solana_program_option as program_option, + solana_pubkey as pubkey, solana_rent as rent, }; /// The [config native program][np]. diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index 22d04812fa0d76..08a825cd1ba2a2 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -2,7 +2,7 @@ #[cfg(not(target_os = "solana"))] use solana_program::message::SanitizedMessage; -use {crate::native_token::sol_to_lamports, std::num::NonZeroU32}; +use {solana_native_token::sol_to_lamports, std::num::NonZeroU32}; /// A fee and its associated compute unit limit #[derive(Debug, Default, Clone, Eq, PartialEq)] diff --git a/sdk/src/genesis_config.rs b/sdk/src/genesis_config.rs index 6b48f8fd1e7645..ba82ee531e5ccc 100644 --- a/sdk/src/genesis_config.rs +++ b/sdk/src/genesis_config.rs @@ -9,7 +9,6 @@ use { fee_calculator::FeeRateGovernor, hash::{hash, Hash}, inflation::Inflation, - native_token::lamports_to_sol, poh_config::PohConfig, pubkey::Pubkey, rent::Rent, @@ -22,6 +21,7 @@ use { chrono::{TimeZone, Utc}, memmap2::Mmap, solana_account::{Account, AccountSharedData}, + solana_native_token::lamports_to_sol, std::{ collections::BTreeMap, fmt, From 75450a6542164ee4f64cadc7a97a24d25ff37e2a Mon Sep 17 00:00:00 2001 From: Joe C Date: Tue, 15 Oct 2024 23:15:03 +0700 Subject: [PATCH 512/529] SVM: API: rename `new` to `new_uninitialized` (#3170) --- runtime/src/bank.rs | 4 ++-- .../json-rpc/server/src/rpc_process.rs | 2 +- svm/examples/paytube/src/processor.rs | 2 +- svm/src/transaction_processor.rs | 21 ++++++++++++++++++- svm/tests/concurrent_tests.rs | 11 +++++----- svm/tests/conformance.rs | 3 ++- svm/tests/integration_test.rs | 4 ++-- 7 files changed, 33 insertions(+), 14 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6770010b5d1b84..0975a6dbccde23 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1048,7 +1048,7 @@ impl Bank { }; bank.transaction_processor = - TransactionBatchProcessor::new(bank.slot, bank.epoch, HashSet::default()); + TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch, HashSet::default()); let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64; bank.accounts_data_size_initial = accounts_data_size_initial; @@ -1702,7 +1702,7 @@ impl Bank { }; bank.transaction_processor = - TransactionBatchProcessor::new(bank.slot, bank.epoch, HashSet::default()); + TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch, HashSet::default()); let thread_pool = ThreadPoolBuilder::new() .thread_name(|i| format!("solBnkNewFlds{i:02}")) diff --git a/svm/examples/json-rpc/server/src/rpc_process.rs b/svm/examples/json-rpc/server/src/rpc_process.rs index ed239323b462b4..f0721783c9d38d 100644 --- a/svm/examples/json-rpc/server/src/rpc_process.rs +++ b/svm/examples/json-rpc/server/src/rpc_process.rs @@ -208,7 +208,7 @@ impl JsonRpcRequestProcessor { (pubkey, acc_data) }) .collect(); - let batch_processor = TransactionBatchProcessor::::new( + let batch_processor = TransactionBatchProcessor::::new_uninitialized( EXECUTION_SLOT, EXECUTION_EPOCH, HashSet::new(), diff --git a/svm/examples/paytube/src/processor.rs b/svm/examples/paytube/src/processor.rs index 03a8336209ca29..663c1b5044665e 100644 --- a/svm/examples/paytube/src/processor.rs +++ b/svm/examples/paytube/src/processor.rs @@ -50,7 +50,7 @@ pub(crate) fn create_transaction_batch_processor::new( + let processor = TransactionBatchProcessor::::new_uninitialized( /* slot */ 1, /* epoch */ 1, /* builtin_program_ids */ HashSet::new(), diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 92706997471c52..91f79952254095 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -189,7 +189,20 @@ impl Default for TransactionBatchProcessor { } impl TransactionBatchProcessor { - pub fn new(slot: Slot, epoch: Epoch, builtin_program_ids: HashSet) -> Self { + /// Create a new, uninitialized `TransactionBatchProcessor`. + /// + /// In this context, uninitialized means that the `TransactionBatchProcessor` + /// has been initialized with an empty program cache. The cache contains no + /// programs (including builtins) and has not been configured with a valid + /// fork graph. + /// + /// When using this method, it's advisable to call `set_fork_graph_in_program_cache` + /// as well as `add_builtin` to configure the cache before using the processor. + pub fn new_uninitialized( + slot: Slot, + epoch: Epoch, + builtin_program_ids: HashSet, + ) -> Self { Self { slot, epoch, @@ -199,6 +212,12 @@ impl TransactionBatchProcessor { } } + /// Create a new `TransactionBatchProcessor` from the current instance, but + /// with the provided slot and epoch. + /// + /// * Inherits the program cache and builtin program ids from the current + /// instance. + /// * Resets the sysvar cache. pub fn new_from(&self, slot: Slot, epoch: Epoch) -> Self { Self { slot, diff --git a/svm/tests/concurrent_tests.rs b/svm/tests/concurrent_tests.rs index 2e84fbba243663..4c547675784577 100644 --- a/svm/tests/concurrent_tests.rs +++ b/svm/tests/concurrent_tests.rs @@ -40,7 +40,8 @@ mod transaction_builder; fn program_cache_execution(threads: usize) { let mut mock_bank = MockBankCallback::default(); - let batch_processor = TransactionBatchProcessor::::new(5, 5, HashSet::new()); + let batch_processor = + TransactionBatchProcessor::::new_uninitialized(5, 5, HashSet::new()); let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); batch_processor.program_cache.write().unwrap().fork_graph = Some(Arc::downgrade(&fork_graph)); @@ -126,11 +127,9 @@ fn test_program_cache_with_exhaustive_scheduler() { // correctly. fn svm_concurrent() { let mock_bank = Arc::new(MockBankCallback::default()); - let batch_processor = Arc::new(TransactionBatchProcessor::::new( - 5, - 2, - HashSet::new(), - )); + let batch_processor = Arc::new( + TransactionBatchProcessor::::new_uninitialized(5, 2, HashSet::new()), + ); let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); create_executable_environment( diff --git a/svm/tests/conformance.rs b/svm/tests/conformance.rs index dc521bc36eee15..7bc9ee5184b2c6 100644 --- a/svm/tests/conformance.rs +++ b/svm/tests/conformance.rs @@ -244,7 +244,8 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool create_program_runtime_environment_v1(&feature_set, &compute_budget, false, false).unwrap(); mock_bank.override_feature_set(feature_set); - let batch_processor = TransactionBatchProcessor::::new(42, 2, HashSet::new()); + let batch_processor = + TransactionBatchProcessor::::new_uninitialized(42, 2, HashSet::new()); let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); { diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 6b1325a643d2f0..9f781607aa3112 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -872,7 +872,7 @@ fn execute_test_entry(test_entry: SvmTestEntry) { .insert(*pubkey, account.clone()); } - let batch_processor = TransactionBatchProcessor::::new( + let batch_processor = TransactionBatchProcessor::::new_uninitialized( EXECUTION_SLOT, EXECUTION_EPOCH, HashSet::new(), @@ -1059,7 +1059,7 @@ fn svm_inspect_account() { // Load and execute the transaction - let batch_processor = TransactionBatchProcessor::::new( + let batch_processor = TransactionBatchProcessor::::new_uninitialized( EXECUTION_SLOT, EXECUTION_EPOCH, HashSet::new(), From c32d0df3f7c98d9190780d5019ab0cc45bca1dca Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 15 Oct 2024 21:14:16 +0400 Subject: [PATCH 513/529] avoid solana-program in inline-spl (#3178) --- Cargo.lock | 2 +- inline-spl/Cargo.toml | 4 +++- inline-spl/src/associated_token_account.rs | 4 ++-- inline-spl/src/token.rs | 8 ++++---- inline-spl/src/token_2022.rs | 2 +- programs/sbf/Cargo.lock | 2 +- 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 057209c5eb032f..73bd1a01af5205 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6846,7 +6846,7 @@ name = "solana-inline-spl" version = "2.1.0" dependencies = [ "bytemuck", - "solana-program", + "solana-pubkey", ] [[package]] diff --git a/inline-spl/Cargo.toml b/inline-spl/Cargo.toml index 82aa5907ce0aa2..132e10f07ab533 100644 --- a/inline-spl/Cargo.toml +++ b/inline-spl/Cargo.toml @@ -11,7 +11,9 @@ edition = { workspace = true } [dependencies] bytemuck = { workspace = true } -solana-program = { workspace = true, default-features = false } +solana-pubkey = { workspace = true, default-features = false, features = [ + "bytemuck", +] } [lib] crate-type = ["lib"] diff --git a/inline-spl/src/associated_token_account.rs b/inline-spl/src/associated_token_account.rs index 2048c5b743d222..289dc0f8555e47 100644 --- a/inline-spl/src/associated_token_account.rs +++ b/inline-spl/src/associated_token_account.rs @@ -1,6 +1,6 @@ // Partial SPL Associated Token Account declarations inlined to avoid an external dependency on the spl-associated-token-account crate -solana_program::declare_id!("ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL"); +solana_pubkey::declare_id!("ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL"); pub mod program_v1_1_0 { - solana_program::declare_id!("NatA1Zyo48dJ7yuwR7cGURwhskKA8ywUyxb9GvG7mTC"); + solana_pubkey::declare_id!("NatA1Zyo48dJ7yuwR7cGURwhskKA8ywUyxb9GvG7mTC"); } diff --git a/inline-spl/src/token.rs b/inline-spl/src/token.rs index 1a495d8ca3a241..af456c3b6242a7 100644 --- a/inline-spl/src/token.rs +++ b/inline-spl/src/token.rs @@ -1,10 +1,10 @@ /// Partial SPL Token declarations inlined to avoid an external dependency on the spl-token crate -use solana_program::pubkey::{Pubkey, PUBKEY_BYTES}; +use solana_pubkey::{Pubkey, PUBKEY_BYTES}; -solana_program::declare_id!("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA"); +solana_pubkey::declare_id!("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA"); pub mod program_v3_4_0 { - solana_program::declare_id!("NToK4t5AQzxPNpUA84DkxgfXaVDbDQQjpHKCqsbY46B"); + solana_pubkey::declare_id!("NToK4t5AQzxPNpUA84DkxgfXaVDbDQQjpHKCqsbY46B"); } /* @@ -72,7 +72,7 @@ impl GenericTokenAccount for Account { } pub mod native_mint { - solana_program::declare_id!("So11111111111111111111111111111111111111112"); + solana_pubkey::declare_id!("So11111111111111111111111111111111111111112"); /* Mint { diff --git a/inline-spl/src/token_2022.rs b/inline-spl/src/token_2022.rs index 4b0e0d1b3c05db..fafa2b4cfce68a 100644 --- a/inline-spl/src/token_2022.rs +++ b/inline-spl/src/token_2022.rs @@ -1,7 +1,7 @@ /// Partial SPL Token declarations inlined to avoid an external dependency on the spl-token-2022 crate use crate::token::{self, GenericTokenAccount}; -solana_program::declare_id!("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb"); +solana_pubkey::declare_id!("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb"); // `spl_token_program_2022::extension::AccountType::Account` ordinal value pub const ACCOUNTTYPE_ACCOUNT: u8 = 2; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 78eca1fe59da24..8d7a199fc4fa6b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5420,7 +5420,7 @@ name = "solana-inline-spl" version = "2.1.0" dependencies = [ "bytemuck", - "solana-program", + "solana-pubkey", ] [[package]] From 72963e1469031fbc72d3d87a1f5e568e98f734ce Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 15 Oct 2024 13:49:27 -0400 Subject: [PATCH 514/529] Advises kernel to use random access for disk bucket mmaps (#2140) --- bucket_map/src/bucket_storage.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index a55923c142d850..95f05cfdfaa679 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -433,6 +433,10 @@ impl BucketStorage { std::env::current_dir(), ); }); + // Access to the disk bucket files are random (excluding the linear search on collisions), + // so advise the kernel to treat the mmaps as such. + #[cfg(unix)] + mmap.advise(memmap2::Advice::Random).unwrap(); measure_mmap.stop(); stats .new_file_us From 1e800b1e0b5b699813d835d35595a03fa70326a4 Mon Sep 17 00:00:00 2001 From: dmakarov Date: Tue, 15 Oct 2024 14:35:04 -0400 Subject: [PATCH 515/529] Aggressively shrink ancient storages when shrink isn't too busy. (#2946) * Tweak ancient packing algorithm * Minor change * Feedback * Remove redundancy * Correction * Revert correction * Loop * Add test * Fix clippy * Comments * Comment * Comments * Pop ancients * Revert * Checks * Move reverse * Typo * Popped * Sort * Format * Revert sort, back to reverse * Fix comment --- accounts-db/src/accounts_db.rs | 125 ++++++++++++++++++++++++- accounts-db/src/ancient_append_vecs.rs | 19 +++- 2 files changed, 141 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 29582ab7e1ff7f..e2353b99fce81d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -141,6 +141,11 @@ const MAX_ITEMS_PER_CHUNK: Slot = 2_500; // This allows us to split up accounts index accesses across multiple threads. const SHRINK_COLLECT_CHUNK_SIZE: usize = 50; +/// The number of shrink candidate slots that is small enough so that +/// additional storages from ancient slots can be added to the +/// candidates for shrinking. +const SHRINK_INSERT_ANCIENT_THRESHOLD: usize = 10; + #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub enum CreateAncientStorage { /// ancient storages are created by appending @@ -1501,6 +1506,14 @@ pub struct AccountsDb { /// Flag to indicate if the experimental accounts lattice hash is enabled. /// (For R&D only; a feature-gate also exists to turn this on and make it a part of consensus.) pub is_experimental_accumulator_hash_enabled: AtomicBool, + + /// These are the ancient storages that could be valuable to + /// shrink, sorted by amount of dead bytes. The elements + /// are popped from the end of the vector, hence the sorting is + /// expected to be from the smallest dead bytes to the largest. + /// Members are Slot and capacity. If capacity is smaller, then + /// that means the storage was already shrunk. + pub(crate) best_ancient_slots_to_shrink: RwLock>, } /// results from 'split_storages_ancient' @@ -1860,6 +1873,7 @@ impl AccountsDb { is_experimental_accumulator_hash_enabled: default_accounts_db_config .enable_experimental_accumulator_hash .into(), + best_ancient_slots_to_shrink: RwLock::default(), } } @@ -4401,8 +4415,12 @@ impl AccountsDb { let shrink_candidates_slots = std::mem::take(&mut *self.shrink_candidate_slots.lock().unwrap()); + self.shrink_stats + .initial_candidates_count + .store(shrink_candidates_slots.len() as u64, Ordering::Relaxed); + let candidates_count = shrink_candidates_slots.len(); - let ((shrink_slots, shrink_slots_next_batch), select_time_us) = measure_us!({ + let ((mut shrink_slots, shrink_slots_next_batch), select_time_us) = measure_us!({ if let AccountShrinkThreshold::TotalSpace { shrink_ratio } = self.shrink_ratio { let (shrink_slots, shrink_slots_next_batch) = self.select_candidates_by_total_usage(&shrink_candidates_slots, shrink_ratio); @@ -4423,6 +4441,30 @@ impl AccountsDb { } }); + // If there are too few slots to shrink, add an ancient slot + // for shrinking. The best ancient slots to shrink are + // assumed to be in reverse order. + if shrink_slots.len() < SHRINK_INSERT_ANCIENT_THRESHOLD { + let mut ancients = self.best_ancient_slots_to_shrink.write().unwrap(); + while let Some((slot, capacity)) = ancients.pop() { + if let Some(store) = self.storage.get_slot_storage_entry(slot) { + if !shrink_slots.contains(&slot) + && capacity == store.capacity() + && Self::is_candidate_for_shrink(self, &store) + { + let ancient_bytes_added_to_shrink = store.alive_bytes() as u64; + shrink_slots.insert(slot, store); + self.shrink_stats + .ancient_bytes_added_to_shrink + .fetch_add(ancient_bytes_added_to_shrink, Ordering::Relaxed); + self.shrink_stats + .ancient_slots_added_to_shrink + .fetch_add(1, Ordering::Relaxed); + break; + } + } + } + } if shrink_slots.is_empty() && shrink_slots_next_batch .as_ref() @@ -9252,7 +9294,9 @@ pub mod tests { accounts_hash::MERKLE_FANOUT, accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude}, ancient_append_vecs, - append_vec::{test_utils::TempFile, AppendVec, AppendVecStoredAccountMeta}, + append_vec::{ + aligned_stored_size, test_utils::TempFile, AppendVec, AppendVecStoredAccountMeta, + }, storable_accounts::AccountForStorage, }, assert_matches::assert_matches, @@ -12154,6 +12198,83 @@ pub mod tests { ); } + /// This test creates an ancient storage with three alive accounts + /// of various sizes. It then simulates killing one of the + /// accounts in a more recent (non-ancient) slot by overwriting + /// the account that has the smallest data size. The dead account + /// is expected to be deleted from its ancient storage in the + /// process of shrinking candidate slots. The capacity of the + /// storage after shrinking is expected to be the sum of alive + /// bytes of the two remaining alive ancient accounts. + #[test] + fn test_shrink_candidate_slots_with_dead_ancient_account() { + solana_logger::setup(); + let epoch_schedule = EpochSchedule::default(); + let num_ancient_slots = 3; + // Prepare 3 append vecs to combine [medium, big, small] + let account_data_sizes = vec![1000, 2000, 150]; + let (db, starting_ancient_slot) = + create_db_with_storages_and_index_with_customized_account_size_per_slot( + true, + num_ancient_slots, + account_data_sizes, + ); + db.add_root(starting_ancient_slot); + let slots_to_combine: Vec = + (starting_ancient_slot..starting_ancient_slot + num_ancient_slots as Slot).collect(); + db.combine_ancient_slots(slots_to_combine, CAN_RANDOMLY_SHRINK_FALSE); + let storage = db.get_storage_for_slot(starting_ancient_slot).unwrap(); + let ancient_accounts = db.get_unique_accounts_from_storage(&storage); + // Check that three accounts are indeed present in the combined storage. + assert_eq!(ancient_accounts.stored_accounts.len(), 3); + // Find an ancient account with smallest data length. + // This will be a dead account, overwritten in the current slot. + let modified_account_pubkey = ancient_accounts + .stored_accounts + .iter() + .min_by(|a, b| a.data_len.cmp(&b.data_len)) + .unwrap() + .pubkey; + let modified_account_owner = *AccountSharedData::default().owner(); + let modified_account = AccountSharedData::new(223, 0, &modified_account_owner); + let ancient_append_vec_offset = db.ancient_append_vec_offset.unwrap().abs(); + let current_slot = epoch_schedule.slots_per_epoch + ancient_append_vec_offset as u64 + 1; + // Simulate killing of the ancient account by overwriting it in the current slot. + db.store_for_tests( + current_slot, + &[(&modified_account_pubkey, &modified_account)], + ); + db.calculate_accounts_delta_hash(current_slot); + db.add_root_and_flush_write_cache(current_slot); + // This should remove the dead ancient account from the index. + db.clean_accounts_for_tests(); + db.shrink_ancient_slots(&epoch_schedule); + let storage = db.get_storage_for_slot(starting_ancient_slot).unwrap(); + let created_accounts = db.get_unique_accounts_from_storage(&storage); + // The dead account should still be in the ancient storage, + // because the storage wouldn't be shrunk with normal alive to + // capacity ratio. + assert_eq!(created_accounts.stored_accounts.len(), 3); + db.shrink_candidate_slots(&epoch_schedule); + let storage = db.get_storage_for_slot(starting_ancient_slot).unwrap(); + let created_accounts = db.get_unique_accounts_from_storage(&storage); + // At this point the dead ancient account should be removed + // and storage capacity shrunk to the sum of alive bytes of + // accounts it holds. This is the data lengths of the + // accounts plus the length of their metadata. + assert_eq!( + created_accounts.capacity as usize, + aligned_stored_size(1000) + aligned_stored_size(2000) + ); + // The above check works only when the AppendVec storage is + // used. More generally the pubkey of the smallest account + // shouldn't be present in the shrunk storage, which is + // validated by the following scan of the storage accounts. + storage.accounts.scan_pubkeys(|pubkey| { + assert_ne!(pubkey, &modified_account_pubkey); + }); + } + #[test] fn test_select_candidates_by_total_usage_no_candidates() { // no input candidates -- none should be selected diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 9c788c8e668917..5d645c9560cc39 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -79,6 +79,9 @@ struct AncientSlotInfos { total_alive_bytes_shrink: Saturating, /// total alive bytes across all slots total_alive_bytes: Saturating, + /// slots that have dead accounts and thus the corresponding slot + /// storages can be shrunk + best_slots_to_shrink: Vec<(Slot, u64)>, } impl AncientSlotInfos { @@ -177,8 +180,13 @@ impl AncientSlotInfos { * tuning.percent_of_alive_shrunk_data / 100, ); + // At this point self.shrink_indexes have been sorted by the + // largest amount of dead bytes first in the corresponding + // storages. + self.best_slots_to_shrink = Vec::with_capacity(self.shrink_indexes.len()); for info_index in &self.shrink_indexes { let info = &mut self.all_infos[*info_index]; + self.best_slots_to_shrink.push((info.slot, info.capacity)); if bytes_to_shrink_due_to_ratio.0 >= threshold_bytes { // we exceeded the amount to shrink due to alive ratio, so don't shrink this one just due to 'should_shrink' // It MAY be shrunk based on total capacity still. @@ -188,6 +196,10 @@ impl AncientSlotInfos { bytes_to_shrink_due_to_ratio += info.alive_bytes; } } + // Reverse the vector so that the elements with the largest + // dead bytes are popped first when used to extend the + // shrinking candidates. + self.best_slots_to_shrink.reverse(); } /// after this function, only slots that were chosen to shrink are marked with @@ -396,7 +408,12 @@ impl AccountsDb { self.shrink_ancient_stats .slots_considered .fetch_add(sorted_slots.len() as u64, Ordering::Relaxed); - let ancient_slot_infos = self.collect_sort_filter_ancient_slots(sorted_slots, &tuning); + let mut ancient_slot_infos = self.collect_sort_filter_ancient_slots(sorted_slots, &tuning); + + std::mem::swap( + &mut *self.best_ancient_slots_to_shrink.write().unwrap(), + &mut ancient_slot_infos.best_slots_to_shrink, + ); if ancient_slot_infos.all_infos.is_empty() { return; // nothing to do From 598f7aedf98bf6be2d5f6afd5f14e8c6070ab20a Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 15 Oct 2024 23:02:46 +0400 Subject: [PATCH 516/529] Extract entrypoint crate (#2430) * extract entrypoint crate * missing re-export * fix path * fmt * rename to solana-program-entrypoint * move to program-entrypoint dir * update lock files --- Cargo.lock | 11 ++++++ Cargo.toml | 2 + programs/sbf/Cargo.lock | 11 ++++++ sdk/program-entrypoint/Cargo.toml | 19 ++++++++++ .../src/lib.rs} | 38 ++++++++++--------- sdk/program/Cargo.toml | 1 + sdk/program/src/lib.rs | 10 +++-- 7 files changed, 70 insertions(+), 22 deletions(-) create mode 100644 sdk/program-entrypoint/Cargo.toml rename sdk/{program/src/entrypoint.rs => program-entrypoint/src/lib.rs} (95%) diff --git a/Cargo.lock b/Cargo.lock index 73bd1a01af5205..ab43b132ce8135 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7323,6 +7323,7 @@ dependencies = [ "solana-logger", "solana-msg", "solana-native-token", + "solana-program-entrypoint", "solana-program-error", "solana-program-memory", "solana-program-option", @@ -7343,6 +7344,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "solana-program-entrypoint" +version = "2.1.0" +dependencies = [ + "solana-account-info", + "solana-msg", + "solana-program-error", + "solana-pubkey", +] + [[package]] name = "solana-program-error" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 8339070f9bbf39..21d9b16dbd069c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,6 +120,7 @@ members = [ "sdk/package-metadata", "sdk/package-metadata-macro", "sdk/program", + "sdk/program-entrypoint", "sdk/program-error", "sdk/program-memory", "sdk/program-option", @@ -408,6 +409,7 @@ solana-define-syscall = { path = "define-syscall", version = "=2.1.0" } solana-derivation-path = { path = "sdk/derivation-path", version = "=2.1.0" } solana-download-utils = { path = "download-utils", version = "=2.1.0" } solana-entry = { path = "entry", version = "=2.1.0" } +solana-program-entrypoint = { path = "sdk/program-entrypoint", version = "=2.1.0" } solana-epoch-schedule = { path = "sdk/epoch-schedule", version = "=2.1.0" } solana-faucet = { path = "faucet", version = "=2.1.0" } solana-feature-set = { path = "sdk/feature-set", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 8d7a199fc4fa6b..b3fac0dc9df73e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5706,6 +5706,7 @@ dependencies = [ "solana-instruction", "solana-msg", "solana-native-token", + "solana-program-entrypoint", "solana-program-error", "solana-program-memory", "solana-program-option", @@ -5724,6 +5725,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "solana-program-entrypoint" +version = "2.1.0" +dependencies = [ + "solana-account-info", + "solana-msg", + "solana-program-error", + "solana-pubkey", +] + [[package]] name = "solana-program-error" version = "2.1.0" diff --git a/sdk/program-entrypoint/Cargo.toml b/sdk/program-entrypoint/Cargo.toml new file mode 100644 index 00000000000000..80f4760b4ad174 --- /dev/null +++ b/sdk/program-entrypoint/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "solana-program-entrypoint" +description = "The Solana BPF program entrypoint supported by the latest BPF loader." +documentation = "https://docs.rs/solana-program-entrypoint" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-account-info = { workspace = true } +solana-msg = { workspace = true } +solana-program-error = { workspace = true } +solana-pubkey = { workspace = true, default-features = false } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/src/entrypoint.rs b/sdk/program-entrypoint/src/lib.rs similarity index 95% rename from sdk/program/src/entrypoint.rs rename to sdk/program-entrypoint/src/lib.rs index f360d5ef2b2ece..1893b79b4a527a 100644 --- a/sdk/program/src/entrypoint.rs +++ b/sdk/program-entrypoint/src/lib.rs @@ -6,8 +6,9 @@ extern crate alloc; use { - crate::{account_info::AccountInfo, pubkey::Pubkey}, alloc::vec::Vec, + solana_account_info::AccountInfo, + solana_pubkey::Pubkey, std::{ alloc::Layout, cell::RefCell, @@ -17,7 +18,11 @@ use { slice::{from_raw_parts, from_raw_parts_mut}, }, }; -pub use {solana_account_info::MAX_PERMITTED_DATA_INCREASE, solana_program_error::ProgramResult}; +// need to re-export msg for custom_heap_default macro +pub use { + solana_account_info::MAX_PERMITTED_DATA_INCREASE, solana_msg::msg as __msg, + solana_program_error::ProgramResult, +}; /// User implemented function to process an instruction /// @@ -97,13 +102,11 @@ pub const NON_DUP_MARKER: u8 = u8::MAX; /// #[cfg(not(feature = "no-entrypoint"))] /// pub mod entrypoint { /// -/// use solana_program::{ -/// account_info::AccountInfo, -/// entrypoint, -/// entrypoint::ProgramResult, -/// msg, -/// pubkey::Pubkey, -/// }; +/// use solana_account_info::AccountInfo; +/// use solana_program_entrypoint::entrypoint; +/// use solana_program_entrypoint::ProgramResult; +/// use solana_msg::msg; +/// use solana_pubkey::Pubkey; /// /// entrypoint!(process_instruction); /// @@ -125,10 +128,9 @@ macro_rules! entrypoint { /// # Safety #[no_mangle] pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64 { - let (program_id, accounts, instruction_data) = - unsafe { $crate::entrypoint::deserialize(input) }; + let (program_id, accounts, instruction_data) = unsafe { $crate::deserialize(input) }; match $process_instruction(program_id, &accounts, instruction_data) { - Ok(()) => $crate::entrypoint::SUCCESS, + Ok(()) => $crate::SUCCESS, Err(error) => error.into(), } } @@ -170,7 +172,7 @@ macro_rules! entrypoint_no_alloc { const MAX_ACCOUNT_INFOS: usize = 64; let mut accounts = [UNINIT_ACCOUNT_INFO; MAX_ACCOUNT_INFOS]; let (program_id, num_accounts, instruction_data) = - unsafe { $crate::entrypoint::deserialize_into(input, &mut accounts) }; + unsafe { $crate::deserialize_into(input, &mut accounts) }; // Use `slice_assume_init_ref` once it's stabilized let accounts = &*(&accounts[..num_accounts] as *const [MaybeUninit>] as *const [AccountInfo<'_>]); @@ -178,7 +180,7 @@ macro_rules! entrypoint_no_alloc { #[inline(never)] fn call_program(program_id: &Pubkey, accounts: &[AccountInfo], data: &[u8]) -> u64 { match $process_instruction(program_id, accounts, data) { - Ok(()) => $crate::entrypoint::SUCCESS, + Ok(()) => $crate::SUCCESS, Err(error) => error.into(), } } @@ -214,9 +216,9 @@ macro_rules! custom_heap_default { () => { #[cfg(all(not(feature = "custom-heap"), target_os = "solana"))] #[global_allocator] - static A: $crate::entrypoint::BumpAllocator = $crate::entrypoint::BumpAllocator { - start: $crate::entrypoint::HEAP_START_ADDRESS as usize, - len: $crate::entrypoint::HEAP_LENGTH, + static A: $crate::BumpAllocator = $crate::BumpAllocator { + start: $crate::HEAP_START_ADDRESS as usize, + len: $crate::HEAP_LENGTH, }; }; } @@ -272,7 +274,7 @@ macro_rules! custom_panic_default { #[no_mangle] fn custom_panic(info: &core::panic::PanicInfo<'_>) { // Full panic reporting - $crate::msg!("{}", info); + $crate::__msg!("{}", info); } }; } diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index c3d373fb3f7835..8d15f64d2a22a1 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -52,6 +52,7 @@ solana-instruction = { workspace = true, default-features = false, features = [ ] } solana-msg = { workspace = true } solana-native-token = { workspace = true } +solana-program-entrypoint = { workspace = true } solana-program-error = { workspace = true, features = ["serde"] } solana-program-memory = { workspace = true } solana-program-option = { workspace = true } diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index ae339c371fdde3..e029208fa26639 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -483,7 +483,6 @@ pub mod bpf_loader_deprecated; pub mod bpf_loader_upgradeable; pub mod compute_units; pub mod ed25519_program; -pub mod entrypoint; pub mod entrypoint_deprecated; pub mod epoch_rewards; pub mod epoch_schedule; @@ -540,10 +539,13 @@ pub use { solana_account_info::{self as account_info, debug_account_data}, solana_clock as clock, solana_msg::msg, - solana_native_token as native_token, solana_program_option as program_option, - solana_pubkey as pubkey, solana_rent as rent, + solana_native_token as native_token, + solana_program_entrypoint::{ + self as entrypoint, custom_heap_default, custom_panic_default, entrypoint, + entrypoint_no_alloc, + }, + solana_program_option as program_option, solana_pubkey as pubkey, solana_rent as rent, }; - /// The [config native program][np]. /// /// [np]: https://docs.solanalabs.com/runtime/programs#config-program From 1740ea2f2cc757e88a7964c0edae43c8ffa654b8 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 15 Oct 2024 23:55:39 +0400 Subject: [PATCH 517/529] move account_utils to account crate (#3174) * move account_utils to account crate * remove superfluous import --- Cargo.lock | 2 ++ programs/sbf/Cargo.lock | 1 + sdk/account/Cargo.toml | 4 +++- sdk/account/src/lib.rs | 2 ++ sdk/{src/account_utils.rs => account/src/state_traits.rs} | 6 +++--- sdk/src/lib.rs | 6 +++++- 6 files changed, 16 insertions(+), 5 deletions(-) rename sdk/{src/account_utils.rs => account/src/state_traits.rs} (93%) diff --git a/Cargo.lock b/Cargo.lock index ab43b132ce8135..2c94a4d9d482e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5652,8 +5652,10 @@ dependencies = [ "solana-account", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-instruction", "solana-logger", "solana-program", + "solana-pubkey", ] [[package]] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b3fac0dc9df73e..da726ceaeca3a8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4712,6 +4712,7 @@ dependencies = [ "serde", "serde_bytes", "serde_derive", + "solana-instruction", "solana-program", ] diff --git a/sdk/account/Cargo.toml b/sdk/account/Cargo.toml index 015736eebe08c4..62d35391142d64 100644 --- a/sdk/account/Cargo.toml +++ b/sdk/account/Cargo.toml @@ -17,14 +17,16 @@ serde_bytes = { workspace = true, optional = true } serde_derive = { workspace = true, optional = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } +solana-instruction = { workspace = true, optional = true } solana-logger = { workspace = true, optional = true } solana-program = { workspace = true } [dev-dependencies] solana-account = { path = ".", features = ["dev-context-only-utils"] } +solana-pubkey = { workspace = true } [features] -bincode = ["dep:bincode", "serde"] +bincode = ["dep:bincode", "dep:solana-instruction", "serde"] dev-context-only-utils = ["bincode", "dep:qualifier_attr"] frozen-abi = [ "dep:solana-frozen-abi", diff --git a/sdk/account/src/lib.rs b/sdk/account/src/lib.rs index 762e41a5dd5abf..0d6df6ac6d8da4 100644 --- a/sdk/account/src/lib.rs +++ b/sdk/account/src/lib.rs @@ -29,6 +29,8 @@ use { sync::Arc, }, }; +#[cfg(feature = "bincode")] +pub mod state_traits; /// An Account with data that is stored on chain #[repr(C)] diff --git a/sdk/src/account_utils.rs b/sdk/account/src/state_traits.rs similarity index 93% rename from sdk/src/account_utils.rs rename to sdk/account/src/state_traits.rs index 7338d64cc33498..9b44e43eab159e 100644 --- a/sdk/src/account_utils.rs +++ b/sdk/account/src/state_traits.rs @@ -1,9 +1,9 @@ //! Useful extras for `Account` state. use { - crate::instruction::InstructionError, + crate::{Account, AccountSharedData}, bincode::ErrorKind, - solana_account::{Account, AccountSharedData}, + solana_instruction::error::InstructionError, std::cell::Ref, }; @@ -64,7 +64,7 @@ where #[cfg(test)] mod tests { - use {super::*, crate::pubkey::Pubkey, solana_account::AccountSharedData}; + use {super::*, solana_pubkey::Pubkey}; #[test] fn test_account_state() { diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index ed07e8c7806b57..e1d9503a75de20 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -58,7 +58,6 @@ pub use solana_program::{ }; #[cfg(feature = "borsh")] pub use solana_program::{borsh, borsh0_10, borsh1}; -pub mod account_utils; pub mod client; pub mod commitment_config; pub mod compute_budget; @@ -108,6 +107,11 @@ pub mod wasm; #[deprecated(since = "2.1.0", note = "Use `solana-account` crate instead")] pub use solana_account as account; +#[deprecated( + since = "2.1.0", + note = "Use `solana_account::state_traits` crate instead" +)] +pub use solana_account::state_traits as account_utils; #[deprecated(since = "2.1.0", note = "Use `solana-bn254` crate instead")] pub use solana_bn254 as alt_bn128; #[deprecated(since = "2.1.0", note = "Use `solana-decode-error` crate instead")] From 20460f5fef86da47781f9006afb7c6759b310f6b Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 16 Oct 2024 00:04:30 +0400 Subject: [PATCH 518/529] fix doc link in solana-epoch-schedule (#3175) * fix doc link in solana-epoch-schedule * fmt --- sdk/epoch-schedule/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/epoch-schedule/src/lib.rs b/sdk/epoch-schedule/src/lib.rs index e0d9d1c24e3b30..3ee21acdb9a12e 100644 --- a/sdk/epoch-schedule/src/lib.rs +++ b/sdk/epoch-schedule/src/lib.rs @@ -10,6 +10,8 @@ //! though the length of an epoch does — during the initial launch of //! the chain there is a "warmup" period, where epochs are short, with subsequent //! epochs increasing in slots until they last for [`DEFAULT_SLOTS_PER_EPOCH`]. +//! +//! [`DEFAULT_SLOTS_PER_EPOCH`]: https://docs.rs/solana-clock/latest/solana_clock/constant.DEFAULT_SLOTS_PER_EPOCH.html #![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![no_std] #[cfg(feature = "frozen-abi")] From 20aa3be7d7c04c55c82d222788b6d509124dc6fc Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 15 Oct 2024 16:08:13 -0400 Subject: [PATCH 519/529] Rehashing is not allowed to change accounts (#3180) --- runtime/src/bank.rs | 25 +++++++++++++++++++++- runtime/src/bank/tests.rs | 45 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0975a6dbccde23..0a4cb785f2c091 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2869,11 +2869,34 @@ impl Bank { stake_weighted_timestamp } + /// Recalculates the bank hash + /// + /// This is used by ledger-tool when creating a snapshot, which + /// recalcuates the bank hash. + /// + /// Note that the account state is *not* allowed to change by rehashing. + /// If it does, this function will panic. + /// If modifying accounts in ledger-tool is needed, create a new bank. pub fn rehash(&self) { + let get_delta_hash = || { + self.rc + .accounts + .accounts_db + .get_accounts_delta_hash(self.slot()) + }; + let mut hash = self.hash.write().unwrap(); + let curr_accounts_delta_hash = get_delta_hash(); let new = self.hash_internal_state(); + if let Some(curr_accounts_delta_hash) = curr_accounts_delta_hash { + let new_accounts_delta_hash = get_delta_hash().unwrap(); + assert_eq!( + new_accounts_delta_hash, curr_accounts_delta_hash, + "rehashing is not allowed to change the account state", + ); + } if new != *hash { - warn!("Updating bank hash to {}", new); + warn!("Updating bank hash to {new}"); *hash = new; } } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 454aa32215de11..9a8324d58a0165 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13252,3 +13252,48 @@ fn test_bank_epoch_stakes() { ); } } + +#[test] +fn test_rehash_good() { + let ten_sol = 10 * LAMPORTS_PER_SOL; + let (genesis_config, _mint) = create_genesis_config(ten_sol); + let bank = Bank::new_for_tests(&genesis_config); + + let lamports = 123_456_789; + let account = AccountSharedData::new(lamports, 0, &Pubkey::default()); + let pubkey = Pubkey::new_unique(); + bank.store_account_and_update_capitalization(&pubkey, &account); + + // freeze the bank to trigger hash calculation + bank.freeze(); + + // ensure the bank hash is the same before and after rehashing + let prev_bank_hash = bank.hash(); + bank.rehash(); + let post_bank_hash = bank.hash(); + assert_eq!(post_bank_hash, prev_bank_hash); +} + +#[test] +#[should_panic(expected = "rehashing is not allowed to change the account state")] +fn test_rehash_bad() { + let ten_sol = 10 * LAMPORTS_PER_SOL; + let (genesis_config, _mint) = create_genesis_config(ten_sol); + let bank = Bank::new_for_tests(&genesis_config); + + let mut account = AccountSharedData::new(ten_sol, 0, &Pubkey::default()); + let pubkey = Pubkey::new_unique(); + bank.store_account_and_update_capitalization(&pubkey, &account); + + // freeze the bank to trigger hash calculation + bank.freeze(); + + // change an account, which will cause rehashing to panic + account.checked_add_lamports(ten_sol).unwrap(); + bank.rc + .accounts + .store_accounts_cached((bank.slot(), [(&pubkey, &account)].as_slice())); + + // let the show begin + bank.rehash(); +} From 54887936ce2484127d02d0a624b705c4b4fe6678 Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Tue, 15 Oct 2024 14:52:19 -0700 Subject: [PATCH 520/529] svm: test account loader edge cases (#3045) tests intended to ensure account loader v2 conforms to existing behavior --- svm/src/account_loader.rs | 487 +++++++++++++++++++++++++++++++++++++- 1 file changed, 483 insertions(+), 4 deletions(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index be4014fa69c4a4..2e47957d7f31dd 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -571,19 +571,21 @@ mod tests { nonce::state::Versions as NonceVersions, solana_compute_budget::{compute_budget::ComputeBudget, compute_budget_limits}, solana_feature_set::FeatureSet, - solana_program_runtime::loaded_programs::{ProgramCacheEntry, ProgramCacheForTxBatch}, + solana_program_runtime::loaded_programs::{ + ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheForTxBatch, + }, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, - bpf_loader_upgradeable, + bpf_loader, bpf_loader_upgradeable, epoch_schedule::EpochSchedule, hash::Hash, - instruction::CompiledInstruction, + instruction::{AccountMeta, CompiledInstruction, Instruction}, message::{ v0::{LoadedAddresses, LoadedMessage}, LegacyMessage, Message, MessageHeader, SanitizedMessage, }, native_loader, - native_token::sol_to_lamports, + native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, nonce, pubkey::Pubkey, rent::Rent, @@ -686,6 +688,28 @@ mod tests { )) } + fn new_unchecked_sanitized_transaction_with_writable_program( + program_id: Pubkey, + fee_payer: Pubkey, + ) -> SanitizedTransaction { + let mut message = Message::new( + &[Instruction::new_with_bytes(program_id, &[], vec![])], + Some(&fee_payer), + ); + message.header.num_readonly_unsigned_accounts = 0; + + let legacy_message = LegacyMessage { + message: Cow::Owned(message), + is_writable_account_cache: vec![true, true], + }; + + SanitizedTransaction::new_for_tests( + SanitizedMessage::Legacy(legacy_message), + vec![Signature::default()], + false, + ) + } + fn load_accounts_aux_test( tx: Transaction, accounts: &[TransactionAccount], @@ -1378,6 +1402,178 @@ mod tests { assert_eq!(result.err(), Some(TransactionError::AccountNotFound)); } + #[test] + fn test_load_transaction_accounts_program_account_executable_bypass() { + // currently, the account loader retrieves read-only non-instruction accounts from the program cache + // it creates a mock AccountSharedData with the executable flag set to true + // however, it does not check whether these accounts are actually executable before doing so + // this affects consensus: a transaction that uses a cached non-executable program executes and fails + // but if the transaction gets the program from accounts-db, it will be dropped during account loading + // this test enforces the current behavior, so that future account loader changes do not break consensus + + let mut mock_bank = TestCallbacks::default(); + let account_keypair = Keypair::new(); + let program_keypair = Keypair::new(); + + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(200); + mock_bank + .accounts_map + .insert(account_keypair.pubkey(), account_data.clone()); + + let mut program_data = AccountSharedData::default(); + program_data.set_lamports(200); + program_data.set_owner(bpf_loader::id()); + mock_bank + .accounts_map + .insert(program_keypair.pubkey(), program_data); + + let mut loader_data = AccountSharedData::default(); + loader_data.set_lamports(200); + loader_data.set_executable(true); + loader_data.set_owner(native_loader::id()); + mock_bank + .accounts_map + .insert(bpf_loader::id(), loader_data.clone()); + mock_bank + .accounts_map + .insert(native_loader::id(), loader_data); + + let mut error_metrics = TransactionErrorMetrics::default(); + let mut loaded_programs = ProgramCacheForTxBatch::default(); + + let transaction = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[Instruction::new_with_bytes( + program_keypair.pubkey(), + &[], + vec![], + )], + Some(&account_keypair.pubkey()), + &[&account_keypair], + Hash::default(), + )); + + let result = load_transaction_accounts( + &mock_bank, + transaction.message(), + LoadedTransactionAccount { + account: account_data.clone(), + ..LoadedTransactionAccount::default() + }, + &ComputeBudgetLimits::default(), + &mut error_metrics, + None, + &FeatureSet::default(), + &RentCollector::default(), + &loaded_programs, + ); + + // without cache, program is invalid + assert_eq!( + result.err(), + Some(TransactionError::InvalidProgramForExecution) + ); + + loaded_programs.replenish( + program_keypair.pubkey(), + Arc::new(ProgramCacheEntry::default()), + ); + + let result = load_transaction_accounts( + &mock_bank, + transaction.message(), + LoadedTransactionAccount { + account: account_data.clone(), + ..LoadedTransactionAccount::default() + }, + &ComputeBudgetLimits::default(), + &mut error_metrics, + None, + &FeatureSet::default(), + &RentCollector::default(), + &loaded_programs, + ); + + // with cache, executable flag is bypassed + let mut cached_program = AccountSharedData::default(); + cached_program.set_owner(native_loader::id()); + cached_program.set_executable(true); + + assert_eq!( + result.unwrap(), + LoadedTransactionAccounts { + accounts: vec![ + (account_keypair.pubkey(), account_data.clone()), + (program_keypair.pubkey(), cached_program), + ], + program_indices: vec![vec![1]], + rent: 0, + rent_debits: RentDebits::default(), + loaded_accounts_data_size: 0, + } + ); + + let transaction = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[Instruction::new_with_bytes( + program_keypair.pubkey(), + &[], + vec![AccountMeta::new_readonly(program_keypair.pubkey(), false)], + )], + Some(&account_keypair.pubkey()), + &[&account_keypair], + Hash::default(), + )); + + let result = load_transaction_accounts( + &mock_bank, + transaction.message(), + LoadedTransactionAccount { + account: account_data.clone(), + ..LoadedTransactionAccount::default() + }, + &ComputeBudgetLimits::default(), + &mut error_metrics, + None, + &FeatureSet::default(), + &RentCollector::default(), + &loaded_programs, + ); + + // including program as instruction account bypasses executable bypass + assert_eq!( + result.err(), + Some(TransactionError::InvalidProgramForExecution) + ); + + let transaction = new_unchecked_sanitized_transaction_with_writable_program( + program_keypair.pubkey(), + account_keypair.pubkey(), + ); + + let result = load_transaction_accounts( + &mock_bank, + transaction.message(), + LoadedTransactionAccount { + account: account_data.clone(), + ..LoadedTransactionAccount::default() + }, + &ComputeBudgetLimits::default(), + &mut error_metrics, + None, + &FeatureSet::default(), + &RentCollector::default(), + &loaded_programs, + ); + + // including program as writable bypasses executable bypass + assert_eq!( + result.err(), + Some(TransactionError::InvalidProgramForExecution) + ); + } + #[test] fn test_load_transaction_accounts_program_account_no_data() { let key1 = Keypair::new(); @@ -2214,4 +2410,287 @@ mod tests { assert_eq!(actual_inspected_accounts, expected_inspected_accounts,); } + + #[test] + fn test_load_transaction_accounts_data_sizes() { + let mut mock_bank = TestCallbacks::default(); + + let mut next_size = 1; + let mut make_account = |pubkey, owner, executable| { + let size = next_size; + let account = AccountSharedData::create( + LAMPORTS_PER_SOL, + vec![0; size], + owner, + executable, + u64::MAX, + ); + + mock_bank.accounts_map.insert(pubkey, account.clone()); + + // accounts are counted at most twice + // by multiplying account size by 4, we ensure all totals are unique + next_size *= 4; + + (size as u32, account) + }; + + let (native_loader_size, _) = make_account(native_loader::id(), native_loader::id(), true); + let (bpf_loader_size, _) = make_account(bpf_loader::id(), native_loader::id(), true); + let (upgradeable_loader_size, _) = + make_account(bpf_loader_upgradeable::id(), native_loader::id(), true); + + let program1_keypair = Keypair::new(); + let program1 = program1_keypair.pubkey(); + let (program1_size, _) = make_account(program1, bpf_loader::id(), true); + + let program2 = Pubkey::new_unique(); + let (program2_size, _) = make_account(program2, bpf_loader_upgradeable::id(), true); + + let programdata2 = Pubkey::new_unique(); + let (programdata2_size, _) = + make_account(programdata2, bpf_loader_upgradeable::id(), false); + + let fee_payer_keypair = Keypair::new(); + let fee_payer = fee_payer_keypair.pubkey(); + let (fee_payer_size, fee_payer_account) = + make_account(fee_payer, system_program::id(), false); + + let account1 = Pubkey::new_unique(); + let (account1_size, _) = make_account(account1, program1, false); + + let account2 = Pubkey::new_unique(); + let (account2_size, _) = make_account(account2, program2, false); + + let test_transaction_data_size_with_cache = |transaction, cache, expected_size| { + let loaded_transaction_accounts = load_transaction_accounts( + &mock_bank, + &transaction, + LoadedTransactionAccount { + account: fee_payer_account.clone(), + loaded_size: fee_payer_size as usize, + rent_collected: 0, + }, + &ComputeBudgetLimits::default(), + &mut TransactionErrorMetrics::default(), + None, + &FeatureSet::default(), + &RentCollector::default(), + &cache, + ) + .unwrap(); + + assert_eq!( + loaded_transaction_accounts.loaded_accounts_data_size, + expected_size + ); + }; + + let test_data_size_with_cache = |instructions: Vec<_>, cache, expected_size| { + let transaction = SanitizedTransaction::from_transaction_for_tests( + Transaction::new_signed_with_payer( + &instructions, + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + ), + ); + + test_transaction_data_size_with_cache(transaction, cache, expected_size) + }; + + for account_meta in [AccountMeta::new, AccountMeta::new_readonly] { + let test_data_size = |instructions, expected_size| { + test_data_size_with_cache( + instructions, + ProgramCacheForTxBatch::default(), + expected_size, + ) + }; + + // one program plus loader + let ixns = vec![Instruction::new_with_bytes(program1, &[], vec![])]; + test_data_size(ixns, program1_size + bpf_loader_size + fee_payer_size); + + // two programs, two loaders, two accounts + let ixns = vec![ + Instruction::new_with_bytes(program1, &[], vec![account_meta(account1, false)]), + Instruction::new_with_bytes(program2, &[], vec![account_meta(account2, false)]), + ]; + test_data_size( + ixns, + account1_size + + account2_size + + program1_size + + program2_size + + bpf_loader_size + + upgradeable_loader_size + + fee_payer_size, + ); + + // ordinary owners not counted + let ixns = vec![Instruction::new_with_bytes( + program1, + &[], + vec![account_meta(account2, false)], + )]; + test_data_size( + ixns, + account2_size + program1_size + bpf_loader_size + fee_payer_size, + ); + + // program and loader counted once + let ixns = vec![ + Instruction::new_with_bytes(program1, &[], vec![]), + Instruction::new_with_bytes(program1, &[], vec![]), + ]; + test_data_size(ixns, program1_size + bpf_loader_size + fee_payer_size); + + // native loader not counted if loader + let ixns = vec![Instruction::new_with_bytes(bpf_loader::id(), &[], vec![])]; + test_data_size(ixns, bpf_loader_size + fee_payer_size); + + // native loader counted if instruction + let ixns = vec![Instruction::new_with_bytes( + bpf_loader::id(), + &[], + vec![account_meta(native_loader::id(), false)], + )]; + test_data_size(ixns, bpf_loader_size + native_loader_size + fee_payer_size); + + // native loader counted if invoked + let ixns = vec![Instruction::new_with_bytes( + native_loader::id(), + &[], + vec![], + )]; + test_data_size(ixns, native_loader_size + fee_payer_size); + + // native loader counted once if invoked and instruction + let ixns = vec![Instruction::new_with_bytes( + native_loader::id(), + &[], + vec![account_meta(native_loader::id(), false)], + )]; + test_data_size(ixns, native_loader_size + fee_payer_size); + + // loader counted twice if included in instruction + let ixns = vec![Instruction::new_with_bytes( + program1, + &[], + vec![account_meta(bpf_loader::id(), false)], + )]; + test_data_size(ixns, program1_size + bpf_loader_size * 2 + fee_payer_size); + + // cover that case with multiple loaders to be sure + let ixns = vec![ + Instruction::new_with_bytes( + program1, + &[], + vec![ + account_meta(bpf_loader::id(), false), + account_meta(bpf_loader_upgradeable::id(), false), + ], + ), + Instruction::new_with_bytes(program2, &[], vec![account_meta(account1, false)]), + Instruction::new_with_bytes( + bpf_loader_upgradeable::id(), + &[], + vec![account_meta(account1, false)], + ), + ]; + test_data_size( + ixns, + account1_size + + program1_size + + program2_size + + bpf_loader_size * 2 + + upgradeable_loader_size * 2 + + fee_payer_size, + ); + + // loader counted twice even if included first + let ixns = vec![ + Instruction::new_with_bytes(bpf_loader::id(), &[], vec![]), + Instruction::new_with_bytes(program1, &[], vec![]), + ]; + test_data_size(ixns, program1_size + bpf_loader_size * 2 + fee_payer_size); + + // fee-payer counted once + let ixns = vec![Instruction::new_with_bytes( + program1, + &[], + vec![account_meta(fee_payer, false)], + )]; + test_data_size(ixns, program1_size + bpf_loader_size + fee_payer_size); + + // edge cases involving program cache + let mut program_cache = ProgramCacheForTxBatch::default(); + + let program2_entry = ProgramCacheEntry { + account_size: (program2_size + programdata2_size) as usize, + account_owner: ProgramCacheEntryOwner::LoaderV3, + ..ProgramCacheEntry::default() + }; + program_cache.replenish(program2, Arc::new(program2_entry)); + + // normal function call uses the combined cache size + let ixns = vec![Instruction::new_with_bytes(program2, &[], vec![])]; + test_data_size_with_cache( + ixns, + program_cache.clone(), + program2_size + programdata2_size + upgradeable_loader_size + fee_payer_size, + ); + + // program as instruction account bypasses the cache + let ixns = vec![Instruction::new_with_bytes( + program2, + &[], + vec![account_meta(program2, false)], + )]; + test_data_size_with_cache( + ixns, + program_cache.clone(), + program2_size + upgradeable_loader_size + fee_payer_size, + ); + + // programdata as instruction account double-counts it + let ixns = vec![Instruction::new_with_bytes( + program2, + &[], + vec![account_meta(programdata2, false)], + )]; + test_data_size_with_cache( + ixns, + program_cache.clone(), + program2_size + programdata2_size * 2 + upgradeable_loader_size + fee_payer_size, + ); + + // both as instruction accounts, for completeness + let ixns = vec![Instruction::new_with_bytes( + program2, + &[], + vec![ + account_meta(program2, false), + account_meta(programdata2, false), + ], + )]; + test_data_size_with_cache( + ixns, + program_cache.clone(), + program2_size + programdata2_size + upgradeable_loader_size + fee_payer_size, + ); + + // writable program bypasses the cache + let tx = new_unchecked_sanitized_transaction_with_writable_program(program2, fee_payer); + test_transaction_data_size_with_cache( + tx, + program_cache.clone(), + program2_size + upgradeable_loader_size + fee_payer_size, + ); + + // NOTE for the new loader we *must* also test arbitrary permutations of the cache transactions + // to ensure that the batched loading is overridden on a tx-per-tx basis + } + } } From a458839ff1114c614ca416f17c4bf2c2da9c9af6 Mon Sep 17 00:00:00 2001 From: dmakarov Date: Tue, 15 Oct 2024 19:33:54 -0400 Subject: [PATCH 521/529] Use VecDeque for best_ancient_slots_to_shrink to avoid vec reversing (#3187) --- accounts-db/src/accounts_db.rs | 14 ++++++-------- accounts-db/src/ancient_append_vecs.rs | 13 +++++-------- 2 files changed, 11 insertions(+), 16 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index e2353b99fce81d..b16d5700157609 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -97,7 +97,7 @@ use { std::{ borrow::Cow, boxed::Box, - collections::{BTreeSet, HashMap, HashSet}, + collections::{BTreeSet, HashMap, HashSet, VecDeque}, fs, hash::{Hash as StdHash, Hasher as StdHasher}, io::Result as IoResult, @@ -1508,12 +1508,11 @@ pub struct AccountsDb { pub is_experimental_accumulator_hash_enabled: AtomicBool, /// These are the ancient storages that could be valuable to - /// shrink, sorted by amount of dead bytes. The elements - /// are popped from the end of the vector, hence the sorting is - /// expected to be from the smallest dead bytes to the largest. + /// shrink, sorted by amount of dead bytes. The elements + /// are sorted from the largest dead bytes to the smallest. /// Members are Slot and capacity. If capacity is smaller, then /// that means the storage was already shrunk. - pub(crate) best_ancient_slots_to_shrink: RwLock>, + pub(crate) best_ancient_slots_to_shrink: RwLock>, } /// results from 'split_storages_ancient' @@ -4442,11 +4441,10 @@ impl AccountsDb { }); // If there are too few slots to shrink, add an ancient slot - // for shrinking. The best ancient slots to shrink are - // assumed to be in reverse order. + // for shrinking. if shrink_slots.len() < SHRINK_INSERT_ANCIENT_THRESHOLD { let mut ancients = self.best_ancient_slots_to_shrink.write().unwrap(); - while let Some((slot, capacity)) = ancients.pop() { + while let Some((slot, capacity)) = ancients.pop_front() { if let Some(store) = self.storage.get_slot_storage_entry(slot) { if !shrink_slots.contains(&slot) && capacity == store.capacity() diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 5d645c9560cc39..148c543b4c6302 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -21,7 +21,7 @@ use { solana_measure::measure_us, solana_sdk::clock::Slot, std::{ - collections::HashMap, + collections::{HashMap, VecDeque}, num::{NonZeroU64, Saturating}, sync::{atomic::Ordering, Arc, Mutex}, }, @@ -81,7 +81,7 @@ struct AncientSlotInfos { total_alive_bytes: Saturating, /// slots that have dead accounts and thus the corresponding slot /// storages can be shrunk - best_slots_to_shrink: Vec<(Slot, u64)>, + best_slots_to_shrink: VecDeque<(Slot, u64)>, } impl AncientSlotInfos { @@ -183,10 +183,11 @@ impl AncientSlotInfos { // At this point self.shrink_indexes have been sorted by the // largest amount of dead bytes first in the corresponding // storages. - self.best_slots_to_shrink = Vec::with_capacity(self.shrink_indexes.len()); + self.best_slots_to_shrink = VecDeque::with_capacity(self.shrink_indexes.len()); for info_index in &self.shrink_indexes { let info = &mut self.all_infos[*info_index]; - self.best_slots_to_shrink.push((info.slot, info.capacity)); + self.best_slots_to_shrink + .push_back((info.slot, info.capacity)); if bytes_to_shrink_due_to_ratio.0 >= threshold_bytes { // we exceeded the amount to shrink due to alive ratio, so don't shrink this one just due to 'should_shrink' // It MAY be shrunk based on total capacity still. @@ -196,10 +197,6 @@ impl AncientSlotInfos { bytes_to_shrink_due_to_ratio += info.alive_bytes; } } - // Reverse the vector so that the elements with the largest - // dead bytes are popped first when used to extend the - // shrinking candidates. - self.best_slots_to_shrink.reverse(); } /// after this function, only slots that were chosen to shrink are marked with From a8aef04122068ec36a7af0721e36ee58efa0bef2 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 16 Oct 2024 12:39:37 +0400 Subject: [PATCH 522/529] Extract precompile-error crate (#2300) * extract precompile-error crate * update PrecompileError usage * remove thiserror from precompile-error crate * fmt * remove num-derive * fix imports after rebase * sort deps * sort deps * fmt --- Cargo.lock | 10 +++++ Cargo.toml | 2 + programs/sbf/Cargo.lock | 10 +++++ sdk/Cargo.toml | 3 ++ sdk/precompile-error/Cargo.toml | 17 +++++++ sdk/precompile-error/src/lib.rs | 76 ++++++++++++++++++++++++++++++++ sdk/src/ed25519_instruction.rs | 3 +- sdk/src/precompiles.rs | 31 ++----------- sdk/src/secp256k1_instruction.rs | 3 +- 9 files changed, 126 insertions(+), 29 deletions(-) create mode 100644 sdk/precompile-error/Cargo.toml create mode 100644 sdk/precompile-error/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 2c94a4d9d482e5..2acd2130e695de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7269,6 +7269,14 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-precompile-error" +version = "2.1.0" +dependencies = [ + "num-traits", + "solana-decode-error", +] + [[package]] name = "solana-program" version = "2.1.0" @@ -7914,8 +7922,10 @@ dependencies = [ "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-instruction", "solana-logger", "solana-native-token", + "solana-precompile-error", "solana-program", "solana-program-memory", "solana-pubkey", diff --git a/Cargo.toml b/Cargo.toml index 21d9b16dbd069c..03e897c488d865 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,6 +119,7 @@ members = [ "sdk/native-token", "sdk/package-metadata", "sdk/package-metadata-macro", + "sdk/precompile-error", "sdk/program", "sdk/program-entrypoint", "sdk/program-error", @@ -444,6 +445,7 @@ solana-package-metadata-macro = { path = "sdk/package-metadata-macro", version = solana-perf = { path = "perf", version = "=2.1.0" } solana-poh = { path = "poh", version = "=2.1.0" } solana-poseidon = { path = "poseidon", version = "=2.1.0" } +solana-precompile-error = { path = "sdk/precompile-error", version = "=2.1.0" } solana-program = { path = "sdk/program", version = "=2.1.0", default-features = false } solana-program-error = { path = "sdk/program-error", version = "=2.1.0" } solana-program-memory = { path = "sdk/program-memory", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index da726ceaeca3a8..7e987d12543ce6 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5663,6 +5663,14 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-precompile-error" +version = "2.1.0" +dependencies = [ + "num-traits", + "solana-decode-error", +] + [[package]] name = "solana-program" version = "2.1.0" @@ -6675,7 +6683,9 @@ dependencies = [ "solana-decode-error", "solana-derivation-path", "solana-feature-set", + "solana-instruction", "solana-native-token", + "solana-precompile-error", "solana-program", "solana-program-memory", "solana-pubkey", diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 2ab85402d1c3b1..4872798563020d 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -34,6 +34,7 @@ full = [ "sha3", "digest", "solana-pubkey/rand", + "dep:solana-precompile-error" ] borsh = ["dep:borsh", "solana-program/borsh", "solana-secp256k1-recover/borsh"] dev-context-only-utils = ["qualifier_attr", "solana-account/dev-context-only-utils"] @@ -96,7 +97,9 @@ solana-frozen-abi = { workspace = true, optional = true, features = [ solana-frozen-abi-macro = { workspace = true, optional = true, features = [ "frozen-abi", ] } +solana-instruction = { workspace = true } solana-native-token = { workspace = true } +solana-precompile-error = { workspace = true, optional = true } solana-program = { workspace = true } solana-program-memory = { workspace = true } solana-pubkey = { workspace = true, default-features = false, features = ["std"] } diff --git a/sdk/precompile-error/Cargo.toml b/sdk/precompile-error/Cargo.toml new file mode 100644 index 00000000000000..06ba26c958c940 --- /dev/null +++ b/sdk/precompile-error/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "solana-precompile-error" +description = "Solana PrecompileError type" +documentation = "https://docs.rs/solana-precompile-error" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +num-traits = { workspace = true } +solana-decode-error = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/precompile-error/src/lib.rs b/sdk/precompile-error/src/lib.rs new file mode 100644 index 00000000000000..7bf6195866794d --- /dev/null +++ b/sdk/precompile-error/src/lib.rs @@ -0,0 +1,76 @@ +/// Precompile errors +use {core::fmt, solana_decode_error::DecodeError}; + +/// Precompile errors +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PrecompileError { + InvalidPublicKey, + InvalidRecoveryId, + InvalidSignature, + InvalidDataOffsets, + InvalidInstructionDataSize, +} + +impl num_traits::FromPrimitive for PrecompileError { + #[inline] + fn from_i64(n: i64) -> Option { + if n == PrecompileError::InvalidPublicKey as i64 { + Some(PrecompileError::InvalidPublicKey) + } else if n == PrecompileError::InvalidRecoveryId as i64 { + Some(PrecompileError::InvalidRecoveryId) + } else if n == PrecompileError::InvalidSignature as i64 { + Some(PrecompileError::InvalidSignature) + } else if n == PrecompileError::InvalidDataOffsets as i64 { + Some(PrecompileError::InvalidDataOffsets) + } else if n == PrecompileError::InvalidInstructionDataSize as i64 { + Some(PrecompileError::InvalidInstructionDataSize) + } else { + None + } + } + #[inline] + fn from_u64(n: u64) -> Option { + Self::from_i64(n as i64) + } +} + +impl num_traits::ToPrimitive for PrecompileError { + #[inline] + fn to_i64(&self) -> Option { + Some(match *self { + PrecompileError::InvalidPublicKey => PrecompileError::InvalidPublicKey as i64, + PrecompileError::InvalidRecoveryId => PrecompileError::InvalidRecoveryId as i64, + PrecompileError::InvalidSignature => PrecompileError::InvalidSignature as i64, + PrecompileError::InvalidDataOffsets => PrecompileError::InvalidDataOffsets as i64, + PrecompileError::InvalidInstructionDataSize => { + PrecompileError::InvalidInstructionDataSize as i64 + } + }) + } + #[inline] + fn to_u64(&self) -> Option { + self.to_i64().map(|x| x as u64) + } +} + +impl std::error::Error for PrecompileError {} + +impl fmt::Display for PrecompileError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + PrecompileError::InvalidPublicKey => f.write_str("public key is not valid"), + PrecompileError::InvalidRecoveryId => f.write_str("id is not valid"), + PrecompileError::InvalidSignature => f.write_str("signature is not valid"), + PrecompileError::InvalidDataOffsets => f.write_str("offset not valid"), + PrecompileError::InvalidInstructionDataSize => { + f.write_str("instruction is incorrect size") + } + } + } +} + +impl DecodeError for PrecompileError { + fn type_of() -> &'static str { + "PrecompileError" + } +} diff --git a/sdk/src/ed25519_instruction.rs b/sdk/src/ed25519_instruction.rs index 45a8f1c2c2d932..688ef0dade9d39 100644 --- a/sdk/src/ed25519_instruction.rs +++ b/sdk/src/ed25519_instruction.rs @@ -5,11 +5,12 @@ #![cfg(feature = "full")] use { - crate::{instruction::Instruction, precompiles::PrecompileError}, bytemuck::bytes_of, bytemuck_derive::{Pod, Zeroable}, ed25519_dalek::{ed25519::signature::Signature, Signer, Verifier}, solana_feature_set::{ed25519_precompile_verify_strict, FeatureSet}, + solana_instruction::Instruction, + solana_precompile_error::PrecompileError, }; pub const PUBKEY_SERIALIZED_SIZE: usize = 32; diff --git a/sdk/src/precompiles.rs b/sdk/src/precompiles.rs index 087066e1b23be5..4812b81e347717 100644 --- a/sdk/src/precompiles.rs +++ b/sdk/src/precompiles.rs @@ -2,36 +2,13 @@ #![cfg(feature = "full")] +#[deprecated(since = "2.1.0", note = "Use `solana-precompile-error` crate instead.")] +pub use solana_precompile_error::PrecompileError; use { - lazy_static::lazy_static, - num_derive::{FromPrimitive, ToPrimitive}, - solana_decode_error::DecodeError, - solana_feature_set::FeatureSet, - solana_program::{instruction::CompiledInstruction, pubkey::Pubkey}, - thiserror::Error, + lazy_static::lazy_static, solana_feature_set::FeatureSet, + solana_program::instruction::CompiledInstruction, solana_pubkey::Pubkey, }; -/// Precompile errors -#[derive(Error, Debug, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)] -pub enum PrecompileError { - #[error("public key is not valid")] - InvalidPublicKey, - #[error("id is not valid")] - InvalidRecoveryId, - #[error("signature is not valid")] - InvalidSignature, - #[error("offset not valid")] - InvalidDataOffsets, - #[error("instruction is incorrect size")] - InvalidInstructionDataSize, -} - -impl DecodeError for PrecompileError { - fn type_of() -> &'static str { - "PrecompileError" - } -} - /// All precompiled programs must implement the `Verify` function pub type Verify = fn(&[u8], &[&[u8]], &FeatureSet) -> std::result::Result<(), PrecompileError>; diff --git a/sdk/src/secp256k1_instruction.rs b/sdk/src/secp256k1_instruction.rs index af7681672ba147..ee5751b7cf8ef9 100644 --- a/sdk/src/secp256k1_instruction.rs +++ b/sdk/src/secp256k1_instruction.rs @@ -788,10 +788,11 @@ #![cfg(feature = "full")] use { - crate::{instruction::Instruction, precompiles::PrecompileError}, digest::Digest, serde_derive::{Deserialize, Serialize}, solana_feature_set::FeatureSet, + solana_instruction::Instruction, + solana_precompile_error::PrecompileError, }; pub const HASHED_PUBKEY_SERIALIZED_SIZE: usize = 20; From 6a70b6c3906c9a294177c3d1d5b7b5e525c57208 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 16 Oct 2024 08:58:56 -0500 Subject: [PATCH 523/529] TransactionCost - hold transaction reference (#3162) --- Cargo.lock | 1 + core/Cargo.toml | 5 +- .../forward_packet_batches_by_accounts.rs | 36 ++- core/src/banking_stage/qos_service.rs | 46 +++- cost-model/Cargo.toml | 3 + cost-model/benches/cost_tracker.rs | 45 +++- cost-model/src/cost_model.rs | 232 ++++++++-------- cost-model/src/cost_tracker.rs | 255 +++++++++--------- cost-model/src/transaction_cost.rs | 165 +++++++----- 9 files changed, 433 insertions(+), 355 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2acd2130e695de..0ce1971cb71b8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6495,6 +6495,7 @@ dependencies = [ "rand 0.8.5", "solana-builtins-default-costs", "solana-compute-budget", + "solana-cost-model", "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", diff --git a/core/Cargo.toml b/core/Cargo.toml index 4d0797908627f5..344d0360d7337c 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -105,6 +105,7 @@ serde_json = { workspace = true } serial_test = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-core = { path = ".", features = ["dev-context-only-utils"] } +solana-cost-model = { workspace = true, features = ["dev-context-only-utils"] } solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-poh = { workspace = true, features = ["dev-context-only-utils"] } @@ -123,9 +124,7 @@ test-case = { workspace = true } sysctl = { workspace = true } [features] -dev-context-only-utils = [ - "solana-runtime/dev-context-only-utils", -] +dev-context-only-utils = ["solana-runtime/dev-context-only-utils"] frozen-abi = [ "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", diff --git a/core/src/banking_stage/forward_packet_batches_by_accounts.rs b/core/src/banking_stage/forward_packet_batches_by_accounts.rs index 1d86cfb9753b1b..67b323c2876a18 100644 --- a/core/src/banking_stage/forward_packet_batches_by_accounts.rs +++ b/core/src/banking_stage/forward_packet_batches_by_accounts.rs @@ -9,6 +9,7 @@ use { solana_feature_set::FeatureSet, solana_perf::packet::Packet, solana_sdk::transaction::SanitizedTransaction, + solana_svm_transaction::svm_message::SVMMessage, std::sync::Arc, }; @@ -146,7 +147,7 @@ impl ForwardPacketBatchesByAccounts { // put into batch #3 to satisfy all batch limits. fn get_batch_index_by_updated_costs( &self, - tx_cost: &TransactionCost, + tx_cost: &TransactionCost, updated_costs: &UpdatedCosts, ) -> usize { let Some(batch_index_by_block_limit) = @@ -170,11 +171,14 @@ mod tests { use { super::*, crate::banking_stage::unprocessed_packet_batches::DeserializedPacket, - solana_cost_model::transaction_cost::UsageCostDetails, + solana_cost_model::transaction_cost::{UsageCostDetails, WritableKeysTransaction}, solana_feature_set::FeatureSet, solana_sdk::{ - compute_budget::ComputeBudgetInstruction, message::Message, pubkey::Pubkey, - system_instruction, transaction::Transaction, + compute_budget::ComputeBudgetInstruction, + message::{Message, TransactionSignatureDetails}, + pubkey::Pubkey, + system_instruction, + transaction::Transaction, }, }; @@ -206,6 +210,21 @@ mod tests { (sanitized_transaction, deserialized_packet, limit_ratio) } + fn zero_transaction_cost() -> TransactionCost<'static, WritableKeysTransaction> { + static DUMMY_TRANSACTION: WritableKeysTransaction = WritableKeysTransaction(vec![]); + + TransactionCost::Transaction(UsageCostDetails { + transaction: &DUMMY_TRANSACTION, + signature_cost: 0, + write_lock_cost: 0, + data_bytes_cost: 0, + programs_execution_cost: 0, + loaded_accounts_data_size_cost: 0, + allocated_accounts_data_size: 0, + signature_details: TransactionSignatureDetails::new(0, 0, 0), + }) + } + #[test] fn test_try_add_packet_to_multiple_batches() { // setup two transactions, one has high priority that writes to hot account, the @@ -351,8 +370,9 @@ mod tests { ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); forward_packet_batches_by_accounts.batch_vote_limit = test_cost + 1; + let dummy_transaction = WritableKeysTransaction(vec![]); let transaction_cost = TransactionCost::SimpleVote { - writable_accounts: vec![], + transaction: &dummy_transaction, }; assert_eq!( 0, @@ -382,7 +402,7 @@ mod tests { ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); forward_packet_batches_by_accounts.batch_block_limit = test_cost + 1; - let transaction_cost = TransactionCost::Transaction(UsageCostDetails::default()); + let transaction_cost = zero_transaction_cost(); assert_eq!( 0, forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( @@ -411,7 +431,7 @@ mod tests { ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); forward_packet_batches_by_accounts.batch_account_limit = test_cost + 1; - let transaction_cost = TransactionCost::Transaction(UsageCostDetails::default()); + let transaction_cost = zero_transaction_cost(); assert_eq!( 0, forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( @@ -445,7 +465,7 @@ mod tests { forward_packet_batches_by_accounts.batch_vote_limit = test_cost / 2 + 1; forward_packet_batches_by_accounts.batch_account_limit = test_cost / 3 + 1; - let transaction_cost = TransactionCost::Transaction(UsageCostDetails::default()); + let transaction_cost = zero_transaction_cost(); assert_eq!( 2, forward_packet_batches_by_accounts.get_batch_index_by_updated_costs( diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index 21e19be6f0ec52..0d9b2f02a32ee9 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -16,6 +16,7 @@ use { saturating_add_assign, transaction::{self, SanitizedTransaction, TransactionError}, }, + solana_svm_transaction::svm_message::SVMMessage, std::sync::atomic::{AtomicU64, Ordering}, }; @@ -39,12 +40,15 @@ impl QosService { /// include in the slot, and accumulate costs in the cost tracker. /// Returns a vector of results containing selected transaction costs, and the number of /// transactions that were *NOT* selected. - pub fn select_and_accumulate_transaction_costs( + pub fn select_and_accumulate_transaction_costs<'a>( &self, bank: &Bank, - transactions: &[SanitizedTransaction], + transactions: &'a [SanitizedTransaction], pre_results: impl Iterator>, - ) -> (Vec>, u64) { + ) -> ( + Vec>>, + u64, + ) { let transaction_costs = self.compute_transaction_costs(&bank.feature_set, transactions.iter(), pre_results); let (transactions_qos_cost_results, num_included) = self.select_transactions_per_cost( @@ -71,7 +75,7 @@ impl QosService { feature_set: &FeatureSet, transactions: impl Iterator, pre_results: impl Iterator>, - ) -> Vec> { + ) -> Vec>> { let mut compute_cost_time = Measure::start("compute_cost_time"); let txs_costs: Vec<_> = transactions .zip(pre_results) @@ -95,9 +99,14 @@ impl QosService { fn select_transactions_per_cost<'a>( &self, transactions: impl Iterator, - transactions_costs: impl Iterator>, + transactions_costs: impl Iterator< + Item = transaction::Result>, + >, bank: &Bank, - ) -> (Vec>, usize) { + ) -> ( + Vec>>, + usize, + ) { let mut cost_tracking_time = Measure::start("cost_tracking_time"); let mut cost_tracker = bank.write_cost_tracker().unwrap(); let mut num_included = 0; @@ -153,7 +162,9 @@ impl QosService { /// Removes transaction costs from the cost tracker if not committed or recorded, or /// updates the transaction costs for committed transactions. pub fn remove_or_update_costs<'a>( - transaction_cost_results: impl Iterator>, + transaction_cost_results: impl Iterator< + Item = &'a transaction::Result>, + >, transaction_committed_status: Option<&Vec>, bank: &Bank, ) { @@ -172,7 +183,9 @@ impl QosService { /// For recorded transactions, remove units reserved by uncommitted transaction, or update /// units for committed transactions. fn remove_or_update_recorded_transaction_costs<'a>( - transaction_cost_results: impl Iterator>, + transaction_cost_results: impl Iterator< + Item = &'a transaction::Result>, + >, transaction_committed_status: &Vec, bank: &Bank, ) { @@ -210,7 +223,9 @@ impl QosService { /// Remove reserved units for transaction batch that unsuccessfully recorded. fn remove_unrecorded_transaction_costs<'a>( - transaction_cost_results: impl Iterator>, + transaction_cost_results: impl Iterator< + Item = &'a transaction::Result>, + >, bank: &Bank, ) { let mut cost_tracker = bank.write_cost_tracker().unwrap(); @@ -326,8 +341,8 @@ impl QosService { // rollup transaction cost details, eg signature_cost, write_lock_cost, data_bytes_cost and // execution_cost from the batch of transactions selected for block. - fn accumulate_batched_transaction_costs<'a>( - transactions_costs: impl Iterator>, + fn accumulate_batched_transaction_costs<'a, Tx: SVMMessage + 'a>( + transactions_costs: impl Iterator>>, ) -> BatchedTransactionDetails { let mut batched_transaction_details = BatchedTransactionDetails::default(); transactions_costs.for_each(|cost| match cost { @@ -609,10 +624,11 @@ mod tests { use { super::*, itertools::Itertools, - solana_cost_model::transaction_cost::UsageCostDetails, + solana_cost_model::transaction_cost::{UsageCostDetails, WritableKeysTransaction}, solana_runtime::genesis_utils::{create_genesis_config, GenesisConfigInfo}, solana_sdk::{ hash::Hash, + message::TransactionSignatureDetails, signature::{Keypair, Signer}, system_transaction, }, @@ -935,15 +951,19 @@ mod tests { let programs_execution_cost = 10; let num_txs = 4; + let dummy_transaction = WritableKeysTransaction(vec![]); let tx_cost_results: Vec<_> = (0..num_txs) .map(|n| { if n % 2 == 0 { Ok(TransactionCost::Transaction(UsageCostDetails { + transaction: &dummy_transaction, signature_cost, write_lock_cost, data_bytes_cost, programs_execution_cost, - ..UsageCostDetails::default() + loaded_accounts_data_size_cost: 0, + allocated_accounts_data_size: 0, + signature_details: TransactionSignatureDetails::new(0, 0, 0), })) } else { Err(TransactionError::WouldExceedMaxBlockCostLimit) diff --git a/cost-model/Cargo.toml b/cost-model/Cargo.toml index 54564e1e0dd9ca..56948b20462a27 100644 --- a/cost-model/Cargo.toml +++ b/cost-model/Cargo.toml @@ -35,6 +35,8 @@ name = "solana_cost_model" [dev-dependencies] itertools = { workspace = true } rand = "0.8.5" +# See order-crates-for-publishing.py for using this unusual `path = "."` +solana-cost-model = { path = ".", features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-system-program = { workspace = true } @@ -45,6 +47,7 @@ test-case = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [features] +dev-context-only-utils = [] frozen-abi = [ "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", diff --git a/cost-model/benches/cost_tracker.rs b/cost-model/benches/cost_tracker.rs index 0c41ecc4f37cf3..a7b8b107d09a8d 100644 --- a/cost-model/benches/cost_tracker.rs +++ b/cost-model/benches/cost_tracker.rs @@ -4,15 +4,15 @@ use { itertools::Itertools, solana_cost_model::{ cost_tracker::CostTracker, - transaction_cost::{TransactionCost, UsageCostDetails}, + transaction_cost::{TransactionCost, UsageCostDetails, WritableKeysTransaction}, }, - solana_sdk::pubkey::Pubkey, + solana_sdk::{message::TransactionSignatureDetails, pubkey::Pubkey}, test::Bencher, }; struct BenchSetup { cost_tracker: CostTracker, - tx_costs: Vec, + transactions: Vec, } fn setup(num_transactions: usize, contentious_transactions: bool) -> BenchSetup { @@ -22,36 +22,54 @@ fn setup(num_transactions: usize, contentious_transactions: bool) -> BenchSetup let max_accounts_per_tx = 128; let pubkey = Pubkey::new_unique(); - let tx_costs = (0..num_transactions) + let transactions = (0..num_transactions) .map(|_| { - let mut usage_cost_details = UsageCostDetails::default(); + let mut writable_accounts = Vec::with_capacity(max_accounts_per_tx); (0..max_accounts_per_tx).for_each(|_| { let writable_account_key = if contentious_transactions { pubkey } else { Pubkey::new_unique() }; - usage_cost_details - .writable_accounts - .push(writable_account_key) + writable_accounts.push(writable_account_key) }); - usage_cost_details.programs_execution_cost = 9999; - TransactionCost::Transaction(usage_cost_details) + WritableKeysTransaction(writable_accounts) }) .collect_vec(); BenchSetup { cost_tracker, - tx_costs, + transactions, } } +fn get_costs( + transactions: &[WritableKeysTransaction], +) -> Vec> { + transactions + .iter() + .map(|transaction| { + TransactionCost::Transaction(UsageCostDetails { + transaction, + signature_cost: 0, + write_lock_cost: 0, + data_bytes_cost: 0, + programs_execution_cost: 9999, + loaded_accounts_data_size_cost: 0, + allocated_accounts_data_size: 0, + signature_details: TransactionSignatureDetails::new(0, 0, 0), + }) + }) + .collect_vec() +} + #[bench] fn bench_cost_tracker_non_contentious_transaction(bencher: &mut Bencher) { let BenchSetup { mut cost_tracker, - tx_costs, + transactions, } = setup(1024, false); + let tx_costs = get_costs(&transactions); bencher.iter(|| { for tx_cost in tx_costs.iter() { @@ -67,8 +85,9 @@ fn bench_cost_tracker_non_contentious_transaction(bencher: &mut Bencher) { fn bench_cost_tracker_contentious_transaction(bencher: &mut Bencher) { let BenchSetup { mut cost_tracker, - tx_costs, + transactions, } = setup(1024, true); + let tx_costs = get_costs(&transactions); bencher.iter(|| { for tx_cost in tx_costs.iter() { diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index c1a36a7ac15e2b..6fb9eee2d86158 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -7,7 +7,6 @@ use { crate::{block_cost_limits::*, transaction_cost::*}, - log::*, solana_builtins_default_costs::BUILTIN_INSTRUCTION_COSTS, solana_compute_budget::compute_budget_limits::{ DEFAULT_HEAP_COST, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, @@ -19,6 +18,7 @@ use { compute_budget::{self, ComputeBudgetInstruction}, fee::FeeStructure, instruction::CompiledInstruction, + message::TransactionSignatureDetails, program_utils::limited_deserialize, pubkey::Pubkey, saturating_add_assign, @@ -42,70 +42,82 @@ enum SystemProgramAccountAllocation { } impl CostModel { - pub fn calculate_cost( - transaction: &SanitizedTransaction, + pub fn calculate_cost<'a>( + transaction: &'a SanitizedTransaction, feature_set: &FeatureSet, - ) -> TransactionCost { + ) -> TransactionCost<'a, SanitizedTransaction> { if transaction.is_simple_vote_transaction() { - TransactionCost::SimpleVote { - writable_accounts: Self::get_writable_accounts(transaction), - } + TransactionCost::SimpleVote { transaction } } else { - let mut tx_cost = UsageCostDetails::new_with_default_capacity(); - - Self::get_signature_cost(&mut tx_cost, transaction, feature_set); - Self::get_write_lock_cost(&mut tx_cost, transaction, feature_set); - Self::get_transaction_cost(&mut tx_cost, transaction, feature_set); - tx_cost.allocated_accounts_data_size = + let (signatures_count_detail, signature_cost) = + Self::get_signature_cost(transaction, feature_set); + let write_lock_cost = Self::get_write_lock_cost(transaction, feature_set); + let (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) = + Self::get_transaction_cost(transaction, feature_set); + let allocated_accounts_data_size = Self::calculate_allocated_accounts_data_size(transaction); - debug!("transaction {:?} has cost {:?}", transaction, tx_cost); - TransactionCost::Transaction(tx_cost) + let usage_cost_details = UsageCostDetails { + transaction, + signature_cost, + write_lock_cost, + data_bytes_cost, + programs_execution_cost, + loaded_accounts_data_size_cost, + allocated_accounts_data_size, + signature_details: signatures_count_detail, + }; + + TransactionCost::Transaction(usage_cost_details) } } // Calculate executed transaction CU cost, with actual execution and loaded accounts size // costs. - pub fn calculate_cost_for_executed_transaction( - transaction: &SanitizedTransaction, + pub fn calculate_cost_for_executed_transaction<'a>( + transaction: &'a SanitizedTransaction, actual_programs_execution_cost: u64, actual_loaded_accounts_data_size_bytes: u32, feature_set: &FeatureSet, - ) -> TransactionCost { + ) -> TransactionCost<'a, SanitizedTransaction> { if transaction.is_simple_vote_transaction() { - TransactionCost::SimpleVote { - writable_accounts: Self::get_writable_accounts(transaction), - } + TransactionCost::SimpleVote { transaction } } else { - let mut tx_cost = UsageCostDetails::new_with_default_capacity(); + let (signatures_count_detail, signature_cost) = + Self::get_signature_cost(transaction, feature_set); + let write_lock_cost = Self::get_write_lock_cost(transaction, feature_set); - Self::get_signature_cost(&mut tx_cost, transaction, feature_set); - Self::get_write_lock_cost(&mut tx_cost, transaction, feature_set); - Self::get_instructions_data_cost(&mut tx_cost, transaction); - tx_cost.allocated_accounts_data_size = + let instructions_data_cost = Self::get_instructions_data_cost(transaction); + let allocated_accounts_data_size = Self::calculate_allocated_accounts_data_size(transaction); - tx_cost.programs_execution_cost = actual_programs_execution_cost; - tx_cost.loaded_accounts_data_size_cost = Self::calculate_loaded_accounts_data_size_cost( + let programs_execution_cost = actual_programs_execution_cost; + let loaded_accounts_data_size_cost = Self::calculate_loaded_accounts_data_size_cost( actual_loaded_accounts_data_size_bytes, feature_set, ); - TransactionCost::Transaction(tx_cost) + let usage_cost_details = UsageCostDetails { + transaction, + signature_cost, + write_lock_cost, + data_bytes_cost: instructions_data_cost, + programs_execution_cost, + loaded_accounts_data_size_cost, + allocated_accounts_data_size, + signature_details: signatures_count_detail, + }; + + TransactionCost::Transaction(usage_cost_details) } } + /// Returns signature details and the total signature cost fn get_signature_cost( - tx_cost: &mut UsageCostDetails, transaction: &SanitizedTransaction, feature_set: &FeatureSet, - ) { + ) -> (TransactionSignatureDetails, u64) { let signatures_count_detail = transaction.message().get_signature_details(); - tx_cost.num_transaction_signatures = signatures_count_detail.num_transaction_signatures(); - tx_cost.num_secp256k1_instruction_signatures = - signatures_count_detail.num_secp256k1_instruction_signatures(); - tx_cost.num_ed25519_instruction_signatures = - signatures_count_detail.num_ed25519_instruction_signatures(); let ed25519_verify_cost = if feature_set.is_active(&feature_set::ed25519_precompile_verify_strict::id()) { @@ -114,7 +126,7 @@ impl CostModel { ED25519_VERIFY_COST }; - tx_cost.signature_cost = signatures_count_detail + let signature_cost = signatures_count_detail .num_transaction_signatures() .saturating_mul(SIGNATURE_COST) .saturating_add( @@ -127,44 +139,34 @@ impl CostModel { .num_ed25519_instruction_signatures() .saturating_mul(ed25519_verify_cost), ); + + (signatures_count_detail, signature_cost) } - fn get_writable_accounts(transaction: &SanitizedTransaction) -> Vec { - let message = transaction.message(); + fn get_writable_accounts(message: &impl SVMMessage) -> impl Iterator { message .account_keys() .iter() .enumerate() - .filter_map(|(i, k)| { - if message.is_writable(i) { - Some(*k) - } else { - None - } - }) - .collect() + .filter_map(|(i, k)| message.is_writable(i).then_some(k)) } - fn get_write_lock_cost( - tx_cost: &mut UsageCostDetails, - transaction: &SanitizedTransaction, - feature_set: &FeatureSet, - ) { - tx_cost.writable_accounts = Self::get_writable_accounts(transaction); + /// Returns the total write-lock cost. + fn get_write_lock_cost(transaction: &impl SVMMessage, feature_set: &FeatureSet) -> u64 { let num_write_locks = if feature_set.is_active(&feature_set::cost_model_requested_write_lock_cost::id()) { - transaction.message().num_write_locks() + transaction.num_write_locks() } else { - tx_cost.writable_accounts.len() as u64 + Self::get_writable_accounts(transaction).count() as u64 }; - tx_cost.write_lock_cost = WRITE_LOCK_UNITS.saturating_mul(num_write_locks); + WRITE_LOCK_UNITS.saturating_mul(num_write_locks) } + /// Return (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) fn get_transaction_cost( - tx_cost: &mut UsageCostDetails, transaction: &impl SVMMessage, feature_set: &FeatureSet, - ) { + ) -> (u64, u64, u64) { let mut programs_execution_costs = 0u64; let mut loaded_accounts_data_size_cost = 0u64; let mut data_bytes_len_total = 0u64; @@ -219,15 +221,15 @@ impl CostModel { } } - tx_cost.programs_execution_cost = programs_execution_costs; - tx_cost.loaded_accounts_data_size_cost = loaded_accounts_data_size_cost; - tx_cost.data_bytes_cost = data_bytes_len_total / INSTRUCTION_DATA_BYTES_COST; + ( + programs_execution_costs, + loaded_accounts_data_size_cost, + data_bytes_len_total / INSTRUCTION_DATA_BYTES_COST, + ) } - fn get_instructions_data_cost( - tx_cost: &mut UsageCostDetails, - transaction: &SanitizedTransaction, - ) { + /// Return the instruction data bytes cost. + fn get_instructions_data_cost(transaction: &SanitizedTransaction) -> u64 { let ix_data_bytes_len_total: u64 = transaction .message() .instructions() @@ -235,7 +237,7 @@ impl CostModel { .map(|instruction| instruction.data.len() as u64) .sum(); - tx_cost.data_bytes_cost = ix_data_bytes_len_total / INSTRUCTION_DATA_BYTES_COST; + ix_data_bytes_len_total / INSTRUCTION_DATA_BYTES_COST } pub fn calculate_loaded_accounts_data_size_cost( @@ -319,6 +321,8 @@ impl CostModel { mod tests { use { super::*, + itertools::Itertools, + log::debug, solana_sdk::{ compute_budget::{self, ComputeBudgetInstruction}, fee::ACCOUNT_DATA_COST_PAGE_SIZE, @@ -526,14 +530,11 @@ mod tests { .get(&system_program::id()) .unwrap(); - let mut tx_cost = UsageCostDetails::default(); - CostModel::get_transaction_cost( - &mut tx_cost, - &simple_transaction, - &FeatureSet::all_enabled(), - ); - assert_eq!(*expected_execution_cost, tx_cost.programs_execution_cost); - assert_eq!(3, tx_cost.data_bytes_cost); + let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = + CostModel::get_transaction_cost(&simple_transaction, &FeatureSet::all_enabled()); + + assert_eq!(*expected_execution_cost, program_execution_cost); + assert_eq!(3, data_bytes_cost); } #[test] @@ -554,17 +555,13 @@ mod tests { let token_transaction = SanitizedTransaction::from_transaction_for_tests(tx); debug!("token_transaction {:?}", token_transaction); - let mut tx_cost = UsageCostDetails::default(); - CostModel::get_transaction_cost( - &mut tx_cost, - &token_transaction, - &FeatureSet::all_enabled(), - ); + let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = + CostModel::get_transaction_cost(&token_transaction, &FeatureSet::all_enabled()); assert_eq!( DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, - tx_cost.programs_execution_cost + program_execution_cost ); - assert_eq!(0, tx_cost.data_bytes_cost); + assert_eq!(0, data_bytes_cost); } #[test] @@ -581,7 +578,7 @@ mod tests { { let tx_cost = CostModel::calculate_cost(&simple_transaction, &FeatureSet::default()); assert_eq!(WRITE_LOCK_UNITS, tx_cost.write_lock_cost()); - assert_eq!(1, tx_cost.writable_accounts().len()); + assert_eq!(1, tx_cost.writable_accounts().count()); } // Feature enabled - write lock is demoted but still counts towards cost @@ -589,7 +586,7 @@ mod tests { let tx_cost = CostModel::calculate_cost(&simple_transaction, &FeatureSet::all_enabled()); assert_eq!(2 * WRITE_LOCK_UNITS, tx_cost.write_lock_cost()); - assert_eq!(1, tx_cost.writable_accounts().len()); + assert_eq!(1, tx_cost.writable_accounts().count()); } } @@ -619,15 +616,12 @@ mod tests { ); let token_transaction = SanitizedTransaction::from_transaction_for_tests(tx); - let mut tx_cost = UsageCostDetails::default(); - CostModel::get_transaction_cost( - &mut tx_cost, - &token_transaction, - &FeatureSet::all_enabled(), - ); + let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = + CostModel::get_transaction_cost(&token_transaction, &FeatureSet::all_enabled()); + // If cu-limit is specified, that would the cost for all programs - assert_eq!(12_345, tx_cost.programs_execution_cost); - assert_eq!(1, tx_cost.data_bytes_cost); + assert_eq!(12_345, program_execution_cost); + assert_eq!(1, data_bytes_cost); } #[test] @@ -664,13 +658,9 @@ mod tests { ); let token_transaction = SanitizedTransaction::from_transaction_for_tests(tx); - let mut tx_cost = UsageCostDetails::default(); - CostModel::get_transaction_cost( - &mut tx_cost, - &token_transaction, - &FeatureSet::all_enabled(), - ); - assert_eq!(0, tx_cost.programs_execution_cost); + let (program_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = + CostModel::get_transaction_cost(&token_transaction, &FeatureSet::all_enabled()); + assert_eq!(0, program_execution_cost); } #[test] @@ -695,10 +685,10 @@ mod tests { .unwrap(); let expected_cost = program_cost * 2; - let mut tx_cost = UsageCostDetails::default(); - CostModel::get_transaction_cost(&mut tx_cost, &tx, &FeatureSet::all_enabled()); - assert_eq!(expected_cost, tx_cost.programs_execution_cost); - assert_eq!(6, tx_cost.data_bytes_cost); + let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = + CostModel::get_transaction_cost(&tx, &FeatureSet::all_enabled()); + assert_eq!(expected_cost, program_execution_cost); + assert_eq!(6, data_bytes_cost); } #[test] @@ -726,10 +716,10 @@ mod tests { debug!("many random transaction {:?}", tx); let expected_cost = DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 2; - let mut tx_cost = UsageCostDetails::default(); - CostModel::get_transaction_cost(&mut tx_cost, &tx, &FeatureSet::all_enabled()); - assert_eq!(expected_cost, tx_cost.programs_execution_cost); - assert_eq!(0, tx_cost.data_bytes_cost); + let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = + CostModel::get_transaction_cost(&tx, &FeatureSet::all_enabled()); + assert_eq!(expected_cost, program_execution_cost); + assert_eq!(0, data_bytes_cost); } #[test] @@ -756,11 +746,12 @@ mod tests { ); let tx_cost = CostModel::calculate_cost(&tx, &FeatureSet::all_enabled()); - assert_eq!(2 + 2, tx_cost.writable_accounts().len()); - assert_eq!(signer1.pubkey(), tx_cost.writable_accounts()[0]); - assert_eq!(signer2.pubkey(), tx_cost.writable_accounts()[1]); - assert_eq!(key1, tx_cost.writable_accounts()[2]); - assert_eq!(key2, tx_cost.writable_accounts()[3]); + let writable_accounts = tx_cost.writable_accounts().collect_vec(); + assert_eq!(2 + 2, writable_accounts.len()); + assert_eq!(signer1.pubkey(), *writable_accounts[0]); + assert_eq!(signer2.pubkey(), *writable_accounts[1]); + assert_eq!(key1, *writable_accounts[2]); + assert_eq!(key2, *writable_accounts[3]); } #[test] @@ -787,7 +778,7 @@ mod tests { let tx_cost = CostModel::calculate_cost(&tx, &FeatureSet::all_enabled()); assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); assert_eq!(*expected_execution_cost, tx_cost.programs_execution_cost()); - assert_eq!(2, tx_cost.writable_accounts().len()); + assert_eq!(2, tx_cost.writable_accounts().count()); assert_eq!( expected_loaded_accounts_data_size_cost, tx_cost.loaded_accounts_data_size_cost() @@ -823,7 +814,7 @@ mod tests { let tx_cost = CostModel::calculate_cost(&tx, &feature_set); assert_eq!(expected_account_cost, tx_cost.write_lock_cost()); assert_eq!(expected_execution_cost, tx_cost.programs_execution_cost()); - assert_eq!(2, tx_cost.writable_accounts().len()); + assert_eq!(2, tx_cost.writable_accounts().count()); assert_eq!( expected_loaded_accounts_data_size_cost, tx_cost.loaded_accounts_data_size_cost() @@ -850,12 +841,12 @@ mod tests { .unwrap(); let expected_bpf_cost = DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT; - let mut tx_cost = UsageCostDetails::default(); - CostModel::get_transaction_cost(&mut tx_cost, &transaction, &FeatureSet::all_enabled()); + let (program_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = + CostModel::get_transaction_cost(&transaction, &FeatureSet::all_enabled()); assert_eq!( expected_builtin_cost + expected_bpf_cost as u64, - tx_cost.programs_execution_cost + program_execution_cost ); } @@ -881,9 +872,8 @@ mod tests { .get(&compute_budget::id()) .unwrap(); - let mut tx_cost = UsageCostDetails::default(); - CostModel::get_transaction_cost(&mut tx_cost, &transaction, &FeatureSet::all_enabled()); - - assert_eq!(expected_cost, tx_cost.programs_execution_cost); + let (program_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = + CostModel::get_transaction_cost(&transaction, &FeatureSet::all_enabled()); + assert_eq!(expected_cost, program_execution_cost); } } diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 8caaa2ef3168cc..3b9382bba0a901 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -9,6 +9,7 @@ use { solana_sdk::{ clock::Slot, pubkey::Pubkey, saturating_add_assign, transaction::TransactionError, }, + solana_svm_transaction::svm_message::SVMMessage, std::{cmp::Ordering, collections::HashMap}, }; @@ -157,7 +158,10 @@ impl CostTracker { .saturating_sub(in_flight_transaction_count); } - pub fn try_add(&mut self, tx_cost: &TransactionCost) -> Result { + pub fn try_add( + &mut self, + tx_cost: &TransactionCost, + ) -> Result { self.would_fit(tx_cost)?; let updated_costliest_account_cost = self.add_transaction_cost(tx_cost); Ok(UpdatedCosts { @@ -168,7 +172,7 @@ impl CostTracker { pub fn update_execution_cost( &mut self, - estimated_tx_cost: &TransactionCost, + estimated_tx_cost: &TransactionCost, actual_execution_units: u64, actual_loaded_accounts_data_size_cost: u64, ) { @@ -194,7 +198,7 @@ impl CostTracker { } } - pub fn remove(&mut self, tx_cost: &TransactionCost) { + pub fn remove(&mut self, tx_cost: &TransactionCost) { self.remove_transaction_cost(tx_cost); } @@ -263,7 +267,10 @@ impl CostTracker { .unwrap_or_default() } - fn would_fit(&self, tx_cost: &TransactionCost) -> Result<(), CostTrackerError> { + fn would_fit( + &self, + tx_cost: &TransactionCost, + ) -> Result<(), CostTrackerError> { let cost: u64 = tx_cost.sum(); if tx_cost.is_simple_vote() { @@ -292,7 +299,7 @@ impl CostTracker { } // check each account against account_cost_limit, - for account_key in tx_cost.writable_accounts().iter() { + for account_key in tx_cost.writable_accounts() { match self.cost_by_writable_accounts.get(account_key) { Some(chained_cost) => { if chained_cost.saturating_add(cost) > self.account_cost_limit { @@ -309,7 +316,7 @@ impl CostTracker { } // Returns the highest account cost for all write-lock accounts `TransactionCost` updated - fn add_transaction_cost(&mut self, tx_cost: &TransactionCost) -> u64 { + fn add_transaction_cost(&mut self, tx_cost: &TransactionCost) -> u64 { saturating_add_assign!( self.allocated_accounts_data_size, tx_cost.allocated_accounts_data_size() @@ -330,7 +337,7 @@ impl CostTracker { self.add_transaction_execution_cost(tx_cost, tx_cost.sum()) } - fn remove_transaction_cost(&mut self, tx_cost: &TransactionCost) { + fn remove_transaction_cost(&mut self, tx_cost: &TransactionCost) { let cost = tx_cost.sum(); self.sub_transaction_execution_cost(tx_cost, cost); self.allocated_accounts_data_size = self @@ -352,11 +359,11 @@ impl CostTracker { /// Return the costliest account cost that were updated by `TransactionCost` fn add_transaction_execution_cost( &mut self, - tx_cost: &TransactionCost, + tx_cost: &TransactionCost, adjustment: u64, ) -> u64 { let mut costliest_account_cost = 0; - for account_key in tx_cost.writable_accounts().iter() { + for account_key in tx_cost.writable_accounts() { let account_cost = self .cost_by_writable_accounts .entry(*account_key) @@ -373,8 +380,12 @@ impl CostTracker { } /// Subtract extra execution units from cost_tracker - fn sub_transaction_execution_cost(&mut self, tx_cost: &TransactionCost, adjustment: u64) { - for account_key in tx_cost.writable_accounts().iter() { + fn sub_transaction_execution_cost( + &mut self, + tx_cost: &TransactionCost, + adjustment: u64, + ) { + for account_key in tx_cost.writable_accounts() { let account_cost = self .cost_by_writable_accounts .entry(*account_key) @@ -400,17 +411,11 @@ impl CostTracker { mod tests { use { super::*, - crate::transaction_cost::*, + crate::transaction_cost::{WritableKeysTransaction, *}, solana_sdk::{ - hash::Hash, - reserved_account_keys::ReservedAccountKeys, + message::TransactionSignatureDetails, signature::{Keypair, Signer}, - system_transaction, - transaction::{ - MessageHash, SanitizedTransaction, SimpleAddressLoader, VersionedTransaction, - }, }, - solana_vote_program::{vote_state::TowerSync, vote_transaction}, std::cmp, }; @@ -427,53 +432,45 @@ mod tests { } } - fn test_setup() -> (Keypair, Hash) { + fn test_setup() -> Keypair { solana_logger::setup(); - (Keypair::new(), Hash::new_unique()) + Keypair::new() } - fn build_simple_transaction( - mint_keypair: &Keypair, - start_hash: &Hash, - ) -> (SanitizedTransaction, TransactionCost) { - let keypair = Keypair::new(); - let simple_transaction = SanitizedTransaction::from_transaction_for_tests( - system_transaction::transfer(mint_keypair, &keypair.pubkey(), 2, *start_hash), - ); - let mut tx_cost = UsageCostDetails::new_with_capacity(1); - tx_cost.programs_execution_cost = 5; - tx_cost.writable_accounts.push(mint_keypair.pubkey()); - - (simple_transaction, TransactionCost::Transaction(tx_cost)) - } - - fn build_simple_vote_transaction( - mint_keypair: &Keypair, - start_hash: &Hash, - ) -> (SanitizedTransaction, TransactionCost) { - let keypair = Keypair::new(); - let transaction = vote_transaction::new_tower_sync_transaction( - TowerSync::from(vec![(42, 1)]), - *start_hash, - mint_keypair, - &keypair, - &keypair, - None, - ); - let vote_transaction = SanitizedTransaction::try_create( - VersionedTransaction::from(transaction), - MessageHash::Compute, - Some(true), - SimpleAddressLoader::Disabled, - &ReservedAccountKeys::empty_key_set(), - ) - .unwrap(); - - let writable_accounts = vec![mint_keypair.pubkey()]; - ( - vote_transaction, - TransactionCost::SimpleVote { writable_accounts }, - ) + fn build_simple_transaction(mint_keypair: &Keypair) -> WritableKeysTransaction { + WritableKeysTransaction(vec![mint_keypair.pubkey()]) + } + + fn simple_usage_cost_details( + transaction: &WritableKeysTransaction, + programs_execution_cost: u64, + ) -> UsageCostDetails { + UsageCostDetails { + transaction, + signature_cost: 0, + write_lock_cost: 0, + data_bytes_cost: 0, + programs_execution_cost, + loaded_accounts_data_size_cost: 0, + allocated_accounts_data_size: 0, + signature_details: TransactionSignatureDetails::new(0, 0, 0), + } + } + + fn simple_transaction_cost( + transaction: &WritableKeysTransaction, + programs_execution_cost: u64, + ) -> TransactionCost { + TransactionCost::Transaction(simple_usage_cost_details( + transaction, + programs_execution_cost, + )) + } + + fn simple_vote_transaction_cost( + transaction: &WritableKeysTransaction, + ) -> TransactionCost { + TransactionCost::SimpleVote { transaction } } #[test] @@ -488,8 +485,9 @@ mod tests { #[test] fn test_cost_tracker_ok_add_one() { - let (mint_keypair, start_hash) = test_setup(); - let (_tx, tx_cost) = build_simple_transaction(&mint_keypair, &start_hash); + let mint_keypair = test_setup(); + let tx = build_simple_transaction(&mint_keypair); + let tx_cost = simple_transaction_cost(&tx, 5); let cost = tx_cost.sum(); // build testee to have capacity for one simple transaction @@ -504,8 +502,9 @@ mod tests { #[test] fn test_cost_tracker_ok_add_one_vote() { - let (mint_keypair, start_hash) = test_setup(); - let (_tx, tx_cost) = build_simple_vote_transaction(&mint_keypair, &start_hash); + let mint_keypair = test_setup(); + let tx = build_simple_transaction(&mint_keypair); + let tx_cost = simple_vote_transaction_cost(&tx); let cost = tx_cost.sum(); // build testee to have capacity for one simple transaction @@ -520,8 +519,9 @@ mod tests { #[test] fn test_cost_tracker_add_data() { - let (mint_keypair, start_hash) = test_setup(); - let (_tx, mut tx_cost) = build_simple_transaction(&mint_keypair, &start_hash); + let mint_keypair = test_setup(); + let tx = build_simple_transaction(&mint_keypair); + let mut tx_cost = simple_transaction_cost(&tx, 5); if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost { usage_cost.allocated_accounts_data_size = 1; } else { @@ -539,11 +539,13 @@ mod tests { #[test] fn test_cost_tracker_ok_add_two_same_accounts() { - let (mint_keypair, start_hash) = test_setup(); + let mint_keypair = test_setup(); // build two transactions with same signed account - let (_tx1, tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let tx1 = build_simple_transaction(&mint_keypair); + let tx_cost1 = simple_transaction_cost(&tx1, 5); let cost1 = tx_cost1.sum(); - let (_tx2, tx_cost2) = build_simple_transaction(&mint_keypair, &start_hash); + let tx2 = build_simple_transaction(&mint_keypair); + let tx_cost2 = simple_transaction_cost(&tx2, 5); let cost2 = tx_cost2.sum(); // build testee to have capacity for two simple transactions, with same accounts @@ -564,12 +566,15 @@ mod tests { #[test] fn test_cost_tracker_ok_add_two_diff_accounts() { - let (mint_keypair, start_hash) = test_setup(); + let mint_keypair = test_setup(); // build two transactions with diff accounts let second_account = Keypair::new(); - let (_tx1, tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let tx1 = build_simple_transaction(&mint_keypair); + let tx_cost1 = simple_transaction_cost(&tx1, 5); let cost1 = tx_cost1.sum(); - let (_tx2, tx_cost2) = build_simple_transaction(&second_account, &start_hash); + + let tx2 = build_simple_transaction(&second_account); + let tx_cost2 = simple_transaction_cost(&tx2, 5); let cost2 = tx_cost2.sum(); // build testee to have capacity for two simple transactions, with same accounts @@ -590,11 +595,13 @@ mod tests { #[test] fn test_cost_tracker_chain_reach_limit() { - let (mint_keypair, start_hash) = test_setup(); + let mint_keypair = test_setup(); // build two transactions with same signed account - let (_tx1, tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let tx1 = build_simple_transaction(&mint_keypair); + let tx_cost1 = simple_transaction_cost(&tx1, 5); let cost1 = tx_cost1.sum(); - let (_tx2, tx_cost2) = build_simple_transaction(&mint_keypair, &start_hash); + let tx2 = build_simple_transaction(&mint_keypair); + let tx_cost2 = simple_transaction_cost(&tx2, 5); let cost2 = tx_cost2.sum(); // build testee to have capacity for two simple transactions, but not for same accounts @@ -612,12 +619,14 @@ mod tests { #[test] fn test_cost_tracker_reach_limit() { - let (mint_keypair, start_hash) = test_setup(); + let mint_keypair = test_setup(); // build two transactions with diff accounts let second_account = Keypair::new(); - let (_tx1, tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let tx1 = build_simple_transaction(&mint_keypair); + let tx_cost1 = simple_transaction_cost(&tx1, 5); let cost1 = tx_cost1.sum(); - let (_tx2, tx_cost2) = build_simple_transaction(&second_account, &start_hash); + let tx2 = build_simple_transaction(&second_account); + let tx_cost2 = simple_transaction_cost(&tx2, 5); let cost2 = tx_cost2.sum(); // build testee to have capacity for each chain, but not enough room for both transactions @@ -636,12 +645,14 @@ mod tests { #[test] fn test_cost_tracker_reach_vote_limit() { - let (mint_keypair, start_hash) = test_setup(); + let mint_keypair = test_setup(); // build two mocking vote transactions with diff accounts let second_account = Keypair::new(); - let (_tx1, tx_cost1) = build_simple_vote_transaction(&mint_keypair, &start_hash); - let (_tx2, tx_cost2) = build_simple_vote_transaction(&second_account, &start_hash); + let tx1 = build_simple_transaction(&mint_keypair); + let tx_cost1 = simple_vote_transaction_cost(&tx1); let cost1 = tx_cost1.sum(); + let tx2 = build_simple_transaction(&second_account); + let tx_cost2 = simple_vote_transaction_cost(&tx2); let cost2 = tx_cost2.sum(); // build testee to have capacity for each chain, but not enough room for both votes @@ -658,18 +669,21 @@ mod tests { // however there is room for none-vote tx3 { let third_account = Keypair::new(); - let (_tx3, tx_cost3) = build_simple_transaction(&third_account, &start_hash); + let tx3 = build_simple_transaction(&third_account); + let tx_cost3 = simple_transaction_cost(&tx3, 5); assert!(testee.would_fit(&tx_cost3).is_ok()); } } #[test] fn test_cost_tracker_reach_data_block_limit() { - let (mint_keypair, start_hash) = test_setup(); + let mint_keypair = test_setup(); // build two transactions with diff accounts let second_account = Keypair::new(); - let (_tx1, mut tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); - let (_tx2, mut tx_cost2) = build_simple_transaction(&second_account, &start_hash); + let tx1 = build_simple_transaction(&mint_keypair); + let mut tx_cost1 = simple_transaction_cost(&tx1, 5); + let tx2 = build_simple_transaction(&second_account); + let mut tx_cost2 = simple_transaction_cost(&tx2, 5); if let TransactionCost::Transaction(ref mut usage_cost) = tx_cost1 { usage_cost.allocated_accounts_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA; } else { @@ -695,13 +709,16 @@ mod tests { #[test] fn test_cost_tracker_remove() { - let (mint_keypair, start_hash) = test_setup(); + let mint_keypair = test_setup(); // build two transactions with diff accounts let second_account = Keypair::new(); - let (_tx1, tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); - let (_tx2, tx_cost2) = build_simple_transaction(&second_account, &start_hash); + let tx1 = build_simple_transaction(&mint_keypair); + let tx_cost1 = simple_transaction_cost(&tx1, 5); + let tx2 = build_simple_transaction(&second_account); + let tx_cost2 = simple_transaction_cost(&tx2, 5); let cost1 = tx_cost1.sum(); let cost2 = tx_cost2.sum(); + // build testee let mut testee = CostTracker::new(cost1 + cost2, cost1 + cost2, cost1 + cost2); @@ -738,11 +755,8 @@ mod tests { // | acct3 | $cost | // and block_cost = $cost { - let tx_cost = TransactionCost::Transaction(UsageCostDetails { - writable_accounts: vec![acct1, acct2, acct3], - programs_execution_cost: cost, - ..UsageCostDetails::default() - }); + let transaction = WritableKeysTransaction(vec![acct1, acct2, acct3]); + let tx_cost = simple_transaction_cost(&transaction, cost); assert!(testee.try_add(&tx_cost).is_ok()); let (_costliest_account, costliest_account_cost) = testee.find_costliest_account(); assert_eq!(cost, testee.block_cost); @@ -756,11 +770,8 @@ mod tests { // | acct3 | $cost | // and block_cost = $cost * 2 { - let tx_cost = TransactionCost::Transaction(UsageCostDetails { - writable_accounts: vec![acct2], - programs_execution_cost: cost, - ..UsageCostDetails::default() - }); + let transaction = WritableKeysTransaction(vec![acct2]); + let tx_cost = simple_transaction_cost(&transaction, cost); assert!(testee.try_add(&tx_cost).is_ok()); let (costliest_account, costliest_account_cost) = testee.find_costliest_account(); assert_eq!(cost * 2, testee.block_cost); @@ -776,11 +787,8 @@ mod tests { // | acct3 | $cost | // and block_cost = $cost * 2 { - let tx_cost = TransactionCost::Transaction(UsageCostDetails { - writable_accounts: vec![acct1, acct2], - programs_execution_cost: cost, - ..UsageCostDetails::default() - }); + let transaction = WritableKeysTransaction(vec![acct1, acct2]); + let tx_cost = simple_transaction_cost(&transaction, cost); assert!(testee.try_add(&tx_cost).is_err()); let (costliest_account, costliest_account_cost) = testee.find_costliest_account(); assert_eq!(cost * 2, testee.block_cost); @@ -800,11 +808,8 @@ mod tests { let block_max = account_max * 3; // for three accts let mut testee = CostTracker::new(account_max, block_max, block_max); - let tx_cost = TransactionCost::Transaction(UsageCostDetails { - writable_accounts: vec![acct1, acct2, acct3], - programs_execution_cost: cost, - ..UsageCostDetails::default() - }); + let transaction = WritableKeysTransaction(vec![acct1, acct2, acct3]); + let tx_cost = simple_transaction_cost(&transaction, cost); let mut expected_block_cost = tx_cost.sum(); let expected_tx_count = 1; assert!(testee.try_add(&tx_cost).is_ok()); @@ -885,16 +890,16 @@ mod tests { let estimated_programs_execution_cost = 100; let estimated_loaded_accounts_data_size_cost = 200; let number_writeble_accounts = 3; - let writable_accounts = std::iter::repeat_with(Pubkey::new_unique) - .take(number_writeble_accounts) - .collect(); - - let tx_cost = TransactionCost::Transaction(UsageCostDetails { - writable_accounts, - programs_execution_cost: estimated_programs_execution_cost, - loaded_accounts_data_size_cost: estimated_loaded_accounts_data_size_cost, - ..UsageCostDetails::default() - }); + let transaction = WritableKeysTransaction( + std::iter::repeat_with(Pubkey::new_unique) + .take(number_writeble_accounts) + .collect(), + ); + + let mut usage_cost = + simple_usage_cost_details(&transaction, estimated_programs_execution_cost); + usage_cost.loaded_accounts_data_size_cost = estimated_loaded_accounts_data_size_cost; + let tx_cost = TransactionCost::Transaction(usage_cost); // confirm tx_cost is only made up by programs_execution_cost and // loaded_accounts_data_size_cost let estimated_tx_cost = tx_cost.sum(); @@ -952,12 +957,8 @@ mod tests { let mut cost_tracker = CostTracker::default(); let cost = 100u64; - let tx_cost = TransactionCost::Transaction(UsageCostDetails { - writable_accounts: vec![Pubkey::new_unique()], - programs_execution_cost: cost, - ..UsageCostDetails::default() - }); - + let transaction = WritableKeysTransaction(vec![Pubkey::new_unique()]); + let tx_cost = simple_transaction_cost(&transaction, cost); cost_tracker.add_transaction_cost(&tx_cost); // assert cost_tracker is reverted to default assert_eq!(1, cost_tracker.transaction_count); diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index 3065162c5ee22b..fcfdfdda195aa1 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -1,4 +1,8 @@ -use {crate::block_cost_limits, solana_sdk::pubkey::Pubkey}; +use { + crate::block_cost_limits, + solana_sdk::{message::TransactionSignatureDetails, pubkey::Pubkey}, + solana_svm_transaction::svm_message::SVMMessage, +}; /// TransactionCost is used to represent resources required to process /// a transaction, denominated in CU (eg. Compute Units). @@ -11,12 +15,12 @@ use {crate::block_cost_limits, solana_sdk::pubkey::Pubkey}; const SIMPLE_VOTE_USAGE_COST: u64 = 3428; #[derive(Debug)] -pub enum TransactionCost { - SimpleVote { writable_accounts: Vec }, - Transaction(UsageCostDetails), +pub enum TransactionCost<'a, Tx: SVMMessage> { + SimpleVote { transaction: &'a Tx }, + Transaction(UsageCostDetails<'a, Tx>), } -impl TransactionCost { +impl<'a, Tx: SVMMessage> TransactionCost<'a, Tx> { pub fn sum(&self) -> u64 { #![allow(clippy::assertions_on_constants)] match self { @@ -85,112 +89,133 @@ impl TransactionCost { } } - pub fn writable_accounts(&self) -> &[Pubkey] { - match self { - Self::SimpleVote { writable_accounts } => writable_accounts, - Self::Transaction(usage_cost) => &usage_cost.writable_accounts, - } + pub fn writable_accounts(&self) -> impl Iterator { + let transaction = match self { + Self::SimpleVote { transaction } => transaction, + Self::Transaction(usage_cost) => usage_cost.transaction, + }; + transaction + .account_keys() + .iter() + .enumerate() + .filter_map(|(index, key)| transaction.is_writable(index).then_some(key)) } pub fn num_transaction_signatures(&self) -> u64 { match self { Self::SimpleVote { .. } => 1, - Self::Transaction(usage_cost) => usage_cost.num_transaction_signatures, + Self::Transaction(usage_cost) => { + usage_cost.signature_details.num_transaction_signatures() + } } } pub fn num_secp256k1_instruction_signatures(&self) -> u64 { match self { Self::SimpleVote { .. } => 0, - Self::Transaction(usage_cost) => usage_cost.num_secp256k1_instruction_signatures, + Self::Transaction(usage_cost) => usage_cost + .signature_details + .num_secp256k1_instruction_signatures(), } } pub fn num_ed25519_instruction_signatures(&self) -> u64 { match self { Self::SimpleVote { .. } => 0, - Self::Transaction(usage_cost) => usage_cost.num_ed25519_instruction_signatures, + Self::Transaction(usage_cost) => usage_cost + .signature_details + .num_ed25519_instruction_signatures(), } } } -const MAX_WRITABLE_ACCOUNTS: usize = 256; - // costs are stored in number of 'compute unit's #[derive(Debug)] -pub struct UsageCostDetails { - pub writable_accounts: Vec, +pub struct UsageCostDetails<'a, Tx: SVMMessage> { + pub transaction: &'a Tx, pub signature_cost: u64, pub write_lock_cost: u64, pub data_bytes_cost: u64, pub programs_execution_cost: u64, pub loaded_accounts_data_size_cost: u64, pub allocated_accounts_data_size: u64, - pub num_transaction_signatures: u64, - pub num_secp256k1_instruction_signatures: u64, - pub num_ed25519_instruction_signatures: u64, + pub signature_details: TransactionSignatureDetails, } -impl Default for UsageCostDetails { - fn default() -> Self { - Self { - writable_accounts: Vec::with_capacity(MAX_WRITABLE_ACCOUNTS), - signature_cost: 0u64, - write_lock_cost: 0u64, - data_bytes_cost: 0u64, - programs_execution_cost: 0u64, - loaded_accounts_data_size_cost: 0u64, - allocated_accounts_data_size: 0u64, - num_transaction_signatures: 0u64, - num_secp256k1_instruction_signatures: 0u64, - num_ed25519_instruction_signatures: 0u64, - } +impl<'a, Tx: SVMMessage> UsageCostDetails<'a, Tx> { + pub fn sum(&self) -> u64 { + self.signature_cost + .saturating_add(self.write_lock_cost) + .saturating_add(self.data_bytes_cost) + .saturating_add(self.programs_execution_cost) + .saturating_add(self.loaded_accounts_data_size_cost) } } -#[cfg(test)] -impl PartialEq for UsageCostDetails { - fn eq(&self, other: &Self) -> bool { - fn to_hash_set(v: &[Pubkey]) -> std::collections::HashSet<&Pubkey> { - v.iter().collect() - } +#[cfg(feature = "dev-context-only-utils")] +#[derive(Debug)] +pub struct WritableKeysTransaction(pub Vec); - self.signature_cost == other.signature_cost - && self.write_lock_cost == other.write_lock_cost - && self.data_bytes_cost == other.data_bytes_cost - && self.programs_execution_cost == other.programs_execution_cost - && self.loaded_accounts_data_size_cost == other.loaded_accounts_data_size_cost - && self.allocated_accounts_data_size == other.allocated_accounts_data_size - && self.num_transaction_signatures == other.num_transaction_signatures - && self.num_secp256k1_instruction_signatures - == other.num_secp256k1_instruction_signatures - && self.num_ed25519_instruction_signatures == other.num_ed25519_instruction_signatures - && to_hash_set(&self.writable_accounts) == to_hash_set(&other.writable_accounts) +#[cfg(feature = "dev-context-only-utils")] +impl SVMMessage for WritableKeysTransaction { + fn num_total_signatures(&self) -> u64 { + unimplemented!("WritableKeysTransaction::num_total_signatures") } -} -#[cfg(test)] -impl Eq for UsageCostDetails {} - -impl UsageCostDetails { - #[cfg(test)] - pub fn new_with_capacity(capacity: usize) -> Self { - Self { - writable_accounts: Vec::with_capacity(capacity), - ..Self::default() - } + fn num_write_locks(&self) -> u64 { + unimplemented!("WritableKeysTransaction::num_write_locks") } - pub fn new_with_default_capacity() -> Self { - Self::default() + fn recent_blockhash(&self) -> &solana_sdk::hash::Hash { + unimplemented!("WritableKeysTransaction::recent_blockhash") } - pub fn sum(&self) -> u64 { - self.signature_cost - .saturating_add(self.write_lock_cost) - .saturating_add(self.data_bytes_cost) - .saturating_add(self.programs_execution_cost) - .saturating_add(self.loaded_accounts_data_size_cost) + fn num_instructions(&self) -> usize { + unimplemented!("WritableKeysTransaction::num_instructions") + } + + fn instructions_iter( + &self, + ) -> impl Iterator { + core::iter::empty() + } + + fn program_instructions_iter( + &self, + ) -> impl Iterator { + core::iter::empty() + } + + fn account_keys(&self) -> solana_sdk::message::AccountKeys { + solana_sdk::message::AccountKeys::new(&self.0, None) + } + + fn fee_payer(&self) -> &Pubkey { + unimplemented!("WritableKeysTransaction::fee_payer") + } + + fn is_writable(&self, _index: usize) -> bool { + true + } + + fn is_signer(&self, _index: usize) -> bool { + unimplemented!("WritableKeysTransaction::is_signer") + } + + fn is_invoked(&self, _key_index: usize) -> bool { + unimplemented!("WritableKeysTransaction::is_invoked") + } + + fn num_lookup_tables(&self) -> usize { + unimplemented!("WritableKeysTransaction::num_lookup_tables") + } + + fn message_address_table_lookups( + &self, + ) -> impl Iterator< + Item = solana_svm_transaction::message_address_table_lookup::SVMMessageAddressTableLookup, + > { + core::iter::empty() } } From 69916f1077bd96e2c4aa17383ddf9abd7ec1fc77 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 16 Oct 2024 15:10:00 +0000 Subject: [PATCH 524/529] rolls back chained Merkle shreds for testnet downgrade (#3194) --- turbine/src/broadcast_stage/standard_broadcast_run.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 09f3380bf9d8df..6696db6fb039ef 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -508,7 +508,7 @@ fn should_chain_merkle_shreds(_slot: Slot, cluster_type: ClusterType) -> bool { ClusterType::Development => true, ClusterType::Devnet => false, ClusterType::MainnetBeta => false, - ClusterType::Testnet => true, + ClusterType::Testnet => false, } } From 5eeb40a51ace7b3a808b9e13040f3e10f0ea2c18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Oct 2024 23:13:00 +0800 Subject: [PATCH 525/529] build(deps): bump openssl from 0.10.66 to 0.10.67 (#3191) * build(deps): bump openssl from 0.10.66 to 0.10.67 Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.66 to 0.10.67. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.66...openssl-v0.10.67) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- programs/sbf/Cargo.lock | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ce1971cb71b8e..3485ef18b25913 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3936,9 +3936,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "7b8cefcf97f41316955f9294cd61f639bdcfa9f2f230faac6cb896aa8ab64704" dependencies = [ "bitflags 2.6.0", "cfg-if 1.0.0", @@ -3977,9 +3977,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7e987d12543ce6..93e0c757ae706f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3277,9 +3277,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "7b8cefcf97f41316955f9294cd61f639bdcfa9f2f230faac6cb896aa8ab64704" dependencies = [ "bitflags 2.6.0", "cfg-if 1.0.0", @@ -3318,9 +3318,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", From b94fd0b354fe795f346015689559357f6d67c39c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Oct 2024 23:13:11 +0800 Subject: [PATCH 526/529] build(deps): bump hyper from 0.14.30 to 0.14.31 (#3192) * build(deps): bump hyper from 0.14.30 to 0.14.31 Bumps [hyper](https://github.com/hyperium/hyper) from 0.14.30 to 0.14.31. - [Release notes](https://github.com/hyperium/hyper/releases) - [Changelog](https://github.com/hyperium/hyper/blob/v0.14.31/CHANGELOG.md) - [Commits](https://github.com/hyperium/hyper/compare/v0.14.30...v0.14.31) --- updated-dependencies: - dependency-name: hyper dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3485ef18b25913..2a6fe845388228 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2874,9 +2874,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", diff --git a/Cargo.toml b/Cargo.toml index 03e897c488d865..a7a1426d12989f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -283,7 +283,7 @@ histogram = "0.6.9" hmac = "0.12.1" http = "0.2.12" humantime = "2.0.1" -hyper = "0.14.30" +hyper = "0.14.31" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.13" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 93e0c757ae706f..f77b1cfa252e34 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2198,9 +2198,9 @@ checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", From 42122042d09396fe1fe54d51a5ce38435c22b4c1 Mon Sep 17 00:00:00 2001 From: Jon C Date: Wed, 16 Oct 2024 18:45:10 +0200 Subject: [PATCH 527/529] cli: Simulate for compute units in `validator-info publish` (#3164) #### Problem Similar to the other PRs like #2710, we should simulate transactions to find out the CUs consumed before sending them out. #### Summary of changes Similar to #2710, use `Simulated` compute units and perform the simulation before sending out the transaction. Also, add a test. --- cli/src/validator_info.rs | 2 +- cli/tests/validator_info.rs | 57 +++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 cli/tests/validator_info.rs diff --git a/cli/src/validator_info.rs b/cli/src/validator_info.rs index e50d74b2d4359f..296c85d99f300a 100644 --- a/cli/src/validator_info.rs +++ b/cli/src/validator_info.rs @@ -349,7 +349,7 @@ pub fn process_set_validator_info( vec![config.signers[0]] }; - let compute_unit_limit = ComputeUnitLimit::Default; + let compute_unit_limit = ComputeUnitLimit::Simulated; let build_message = |lamports| { let keys = keys.clone(); if balance == 0 { diff --git a/cli/tests/validator_info.rs b/cli/tests/validator_info.rs new file mode 100644 index 00000000000000..293fa2dc4a71bb --- /dev/null +++ b/cli/tests/validator_info.rs @@ -0,0 +1,57 @@ +use { + serde_json::json, + solana_cli::{ + check_balance, + cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}, + }, + solana_faucet::faucet::run_local_faucet, + solana_rpc_client::rpc_client::RpcClient, + solana_sdk::{ + commitment_config::CommitmentConfig, + signature::{keypair_from_seed, Keypair, Signer}, + }, + solana_streamer::socket::SocketAddrSpace, + solana_test_validator::TestValidator, + test_case::test_case, +}; + +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_publish(compute_unit_price: Option) { + solana_logger::setup(); + + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + + let validator_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); + let mut config_validator = CliConfig::recent_for_tests(); + config_validator.json_rpc_url = test_validator.rpc_url(); + config_validator.signers = vec![&validator_keypair]; + + request_and_confirm_airdrop( + &rpc_client, + &config_validator, + &config_validator.signers[0].pubkey(), + 100_000_000_000, + ) + .unwrap(); + check_balance!( + 100_000_000_000, + &rpc_client, + &config_validator.signers[0].pubkey() + ); + + config_validator.command = CliCommand::SetValidatorInfo { + validator_info: json!({ "name": "test" }), + force_keybase: true, + info_pubkey: None, + compute_unit_price, + }; + process_command(&config_validator).unwrap(); +} From 5ab9e266a2651db9a1e69c76ff03145a0695aba5 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 16 Oct 2024 13:03:03 -0400 Subject: [PATCH 528/529] Supports rehashing with accounts lt hash (#3186) --- runtime/src/bank.rs | 14 ++++++++------ runtime/src/bank/accounts_lt_hash.rs | 5 ++--- runtime/src/snapshot_bank_utils.rs | 20 ++------------------ 3 files changed, 12 insertions(+), 27 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0a4cb785f2c091..2a9a955768177a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2928,6 +2928,11 @@ impl Bank { // freeze is a one-way trip, idempotent self.freeze_started.store(true, Relaxed); + if self.is_accounts_lt_hash_enabled() { + // updating the accounts lt hash must happen *outside* of hash_internal_state() so + // that rehash() can be called and *not* modify self.accounts_lt_hash. + self.update_accounts_lt_hash(); + } *hash = self.hash_internal_state(); self.rc.accounts.accounts_db.mark_slot_frozen(self.slot()); } @@ -5454,10 +5459,6 @@ impl Bank { hash = hashv(&[hash.as_ref(), epoch_accounts_hash.as_ref().as_ref()]); }; - let accounts_lt_hash_checksum = self - .is_accounts_lt_hash_enabled() - .then(|| self.update_accounts_lt_hash()); - let buf = self .hard_forks .read() @@ -5517,8 +5518,9 @@ impl Bank { } else { "".to_string() }, - if let Some(accounts_lt_hash_checksum) = accounts_lt_hash_checksum { - format!(", accounts_lt_hash checksum: {accounts_lt_hash_checksum}") + if self.is_accounts_lt_hash_enabled() { + let checksum = self.accounts_lt_hash.lock().unwrap().0.checksum(); + format!(", accounts_lt_hash checksum: {checksum}") } else { String::new() }, diff --git a/runtime/src/bank/accounts_lt_hash.rs b/runtime/src/bank/accounts_lt_hash.rs index 2fe0fceb3f3072..f32ce16ac4a30d 100644 --- a/runtime/src/bank/accounts_lt_hash.rs +++ b/runtime/src/bank/accounts_lt_hash.rs @@ -2,7 +2,7 @@ use { super::Bank, rayon::prelude::*, solana_accounts_db::accounts_db::AccountsDb, - solana_lattice_hash::lt_hash::{Checksum as LtChecksum, LtHash}, + solana_lattice_hash::lt_hash::LtHash, solana_measure::{meas_dur, measure::Measure}, solana_sdk::{ account::{accounts_equal, AccountSharedData}, @@ -29,12 +29,11 @@ impl Bank { /// - mix in its current state /// /// Since this function is non-idempotent, it should only be called once per bank. - pub fn update_accounts_lt_hash(&self) -> LtChecksum { + pub fn update_accounts_lt_hash(&self) { debug_assert!(self.is_accounts_lt_hash_enabled()); let delta_lt_hash = self.calculate_delta_lt_hash(); let mut accounts_lt_hash = self.accounts_lt_hash.lock().unwrap(); accounts_lt_hash.0.mix_in(&delta_lt_hash); - accounts_lt_hash.0.checksum() } /// Calculates the lt hash *of only this slot* diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 2547935c52841d..9d6546a9e8cf18 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -930,19 +930,7 @@ fn bank_to_full_snapshot_archive_with( .accounts_db .set_latest_full_snapshot_slot(bank.slot()); bank.squash(); // Bank may not be a root - - // Rehashing is not currently supported when the accounts lt hash is enabled. - // This is because rehashing will *re-mix-in* all the accounts stored in this bank into the - // accounts lt hash! This is incorrect, as the accounts lt hash would change, even if the bank - // was *not* manually modified by the caller. - // We can re-allow rehasing if we change the Bank to hold its parent's accounts lt hash plus a - // *delta* accounts lt hash, and then Bank::hash_internal_state() will only recalculate the - // delta accounts lt hash. - // Another option is to consider if manual modification should even be allowed in the first - // place. Disallowing it would solve these issues. - if !bank.is_accounts_lt_hash_enabled() { - bank.rehash(); // Bank accounts may have been manually modified by the caller - } + bank.rehash(); // Bank may have been manually modified by the caller bank.force_flush_accounts_cache(); bank.clean_accounts(); let calculated_accounts_hash = @@ -1005,11 +993,7 @@ pub fn bank_to_incremental_snapshot_archive( .accounts_db .set_latest_full_snapshot_slot(full_snapshot_slot); bank.squash(); // Bank may not be a root - - // See the comment in bank_to_full_snapshot_archive() when calling rehash() - if !bank.is_accounts_lt_hash_enabled() { - bank.rehash(); // Bank accounts may have been manually modified by the caller - } + bank.rehash(); // Bank may have been manually modified by the caller bank.force_flush_accounts_cache(); bank.clean_accounts(); let calculated_incremental_accounts_hash = From 1e02135a7f47e54f16bae0b730f8b1d1edd84c8f Mon Sep 17 00:00:00 2001 From: Kevin Rodriguez <_@kevinrodriguez.io> Date: Wed, 16 Oct 2024 11:50:15 -0600 Subject: [PATCH 529/529] feat: adds voting cluster authority sigs --- cli/src/cli.rs | 5 ++++ cli/src/vote.rs | 15 ++++++++--- ledger/src/staking_utils.rs | 3 +++ local-cluster/src/local_cluster.rs | 19 +++++++++++++- programs/vote/src/vote_processor.rs | 39 +++++++++++++++++++++++++---- programs/vote/src/vote_state/mod.rs | 5 ++++ sdk/program/src/vote/instruction.rs | 7 ++++-- 7 files changed, 82 insertions(+), 11 deletions(-) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index e846539f9e4216..f21651422e7621 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -339,6 +339,7 @@ pub enum CliCommand { memo: Option, fee_payer: SignerIndex, compute_unit_price: Option, + cluster_authority: SignerIndex, }, ShowVoteAccount { pubkey: Pubkey, @@ -1475,6 +1476,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { memo, fee_payer, compute_unit_price, + cluster_authority, } => process_create_vote_account( &rpc_client, config, @@ -1492,6 +1494,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { memo.as_ref(), *fee_payer, *compute_unit_price, + *cluster_authority, ), CliCommand::ShowVoteAccount { pubkey: vote_account_pubkey, @@ -2139,6 +2142,7 @@ mod tests { memo: None, fee_payer: 0, compute_unit_price: None, + cluster_authority: 3, }; config.signers = vec![&keypair, &bob_keypair, &identity_keypair]; let result = process_command(&config); @@ -2398,6 +2402,7 @@ mod tests { memo: None, fee_payer: 0, compute_unit_price: None, + cluster_authority: 3, }; config.signers = vec![&keypair, &bob_keypair, &identity_keypair]; assert!(process_command(&config).is_err()); diff --git a/cli/src/vote.rs b/cli/src/vote.rs index ab9a4897342b24..e03d0af54b231a 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -33,9 +33,7 @@ use { solana_rpc_client_api::config::RpcGetVoteAccountsConfig, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ - account::Account, commitment_config::CommitmentConfig, message::Message, - native_token::lamports_to_sol, pubkey::Pubkey, system_instruction::SystemError, - transaction::Transaction, + account::Account, commitment_config::CommitmentConfig, message::Message, native_token::lamports_to_sol, pubkey::Pubkey, signer::Signer, system_instruction::SystemError, transaction::Transaction }, solana_vote_program::{ vote_error::VoteError, @@ -456,6 +454,8 @@ pub fn parse_create_vote_account( let seed = matches.value_of("seed").map(|s| s.to_string()); let (identity_account, identity_pubkey) = signer_of(matches, "identity_account", wallet_manager)?; + let (cluster_authority, cluster_authority_pubkey) = + signer_of(matches, "cluster_authority", wallet_manager)?; let commission = value_t_or_exit!(matches, "commission", u8); let authorized_voter = pubkey_of_signer(matches, "authorized_voter", wallet_manager)?; let authorized_withdrawer = @@ -511,6 +511,7 @@ pub fn parse_create_vote_account( memo, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), compute_unit_price, + cluster_authority: signer_info.index_of(cluster_authority_pubkey).unwrap(), }, signers: signer_info.signers, }) @@ -804,6 +805,7 @@ pub fn process_create_vote_account( memo: Option<&String>, fee_payer: SignerIndex, compute_unit_price: Option, + cluster_authority: SignerIndex, ) -> ProcessResult { let vote_account = config.signers[vote_account]; let vote_account_pubkey = vote_account.pubkey(); @@ -856,6 +858,7 @@ pub fn process_create_vote_account( }; let ixs = vote_instruction::create_account_with_config( + &config.signers[cluster_authority].pubkey(), &config.signers[0].pubkey(), to, &vote_init, @@ -1816,6 +1819,7 @@ mod tests { memo: None, fee_payer: 0, compute_unit_price: None, + cluster_authority: 3, }, signers: vec![ Box::new(read_keypair_file(&default_keypair_file).unwrap()), @@ -1850,6 +1854,7 @@ mod tests { memo: None, fee_payer: 0, compute_unit_price: None, + cluster_authority: 3, }, signers: vec![ Box::new(read_keypair_file(&default_keypair_file).unwrap()), @@ -1891,6 +1896,7 @@ mod tests { memo: None, fee_payer: 0, compute_unit_price: None, + cluster_authority: 3, }, signers: vec![ Box::new(read_keypair_file(&default_keypair_file).unwrap()), @@ -1944,6 +1950,7 @@ mod tests { memo: None, fee_payer: 0, compute_unit_price: None, + cluster_authority: 3, }, signers: vec![ Box::new(read_keypair_file(&default_keypair_file).unwrap()), @@ -1987,6 +1994,7 @@ mod tests { memo: None, fee_payer: 0, compute_unit_price: None, + cluster_authority: 3, }, signers: vec![ Box::new(read_keypair_file(&default_keypair_file).unwrap()), @@ -2026,6 +2034,7 @@ mod tests { memo: None, fee_payer: 0, compute_unit_price: None, + cluster_authority: 3, }, signers: vec![ Box::new(read_keypair_file(&default_keypair_file).unwrap()), diff --git a/ledger/src/staking_utils.rs b/ledger/src/staking_utils.rs index 75d1352792662b..d0ace8d8cbecee 100644 --- a/ledger/src/staking_utils.rs +++ b/ledger/src/staking_utils.rs @@ -29,8 +29,10 @@ pub(crate) mod tests { vote_account: &Keypair, validator_identity_account: &Keypair, amount: u64, + cluster_authority: &Keypair, ) { let vote_pubkey = vote_account.pubkey(); + let cluster_authority_pubkey = cluster_authority.pubkey(); fn process_instructions(bank: &Bank, keypairs: &T, ixs: &[Instruction]) { let tx = Transaction::new_signed_with_payer( ixs, @@ -45,6 +47,7 @@ pub(crate) mod tests { bank, &[from_account, vote_account, validator_identity_account], &vote_instruction::create_account_with_config( + &cluster_authority_pubkey, &from_account.pubkey(), &vote_pubkey, &VoteInit { diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index d47adcea941313..b9a9036b1c4713 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -155,6 +155,7 @@ impl LocalCluster { cluster_lamports: u64, lamports_per_node: u64, socket_addr_space: SocketAddrSpace, + cluster_authority_keypair: Arc, ) -> Self { Self::new( &mut ClusterConfig::new_with_equal_stakes( @@ -163,6 +164,7 @@ impl LocalCluster { lamports_per_node, ), socket_addr_space, + cluster_authority_keypair, ) } @@ -187,7 +189,11 @@ impl LocalCluster { } } - pub fn new(config: &mut ClusterConfig, socket_addr_space: SocketAddrSpace) -> Self { + pub fn new( + config: &mut ClusterConfig, + socket_addr_space: SocketAddrSpace, + cluster_authority_keypair: Arc, + ) -> Self { assert_eq!(config.validator_configs.len(), config.node_stakes.len()); let connection_cache = if config.tpu_use_quic { @@ -391,6 +397,7 @@ impl LocalCluster { key.clone(), node_pubkey_to_vote_key.get(&key.pubkey()).cloned(), socket_addr_space, + cluster_authority_keypair, ); } @@ -402,6 +409,7 @@ impl LocalCluster { 0, Arc::new(Keypair::new()), None, + cluster_authority_keypair.clone(), socket_addr_space, ); }); @@ -447,6 +455,7 @@ impl LocalCluster { stake: u64, validator_keypair: Arc, voting_keypair: Option>, + cluster_authority_keypair: Arc, socket_addr_space: SocketAddrSpace, ) -> Pubkey { self.do_add_validator( @@ -455,6 +464,7 @@ impl LocalCluster { stake, validator_keypair, voting_keypair, + cluster_authority_keypair, socket_addr_space, ) } @@ -467,6 +477,7 @@ impl LocalCluster { validator_keypair: Arc, voting_keypair: Option>, socket_addr_space: SocketAddrSpace, + cluster_authority_keypair: Arc, ) -> Pubkey { self.do_add_validator( validator_config, @@ -474,6 +485,7 @@ impl LocalCluster { stake, validator_keypair, voting_keypair, + cluster_authority_keypair, socket_addr_space, ) } @@ -485,6 +497,7 @@ impl LocalCluster { stake: u64, validator_keypair: Arc, mut voting_keypair: Option>, + cluster_authority_keypair: Arc, socket_addr_space: SocketAddrSpace, ) -> Pubkey { let client = self.build_tpu_quic_client().expect("tpu_client"); @@ -520,6 +533,7 @@ impl LocalCluster { Self::setup_vote_and_stake_accounts( &client, voting_keypair.as_ref().unwrap(), + &cluster_authority_keypair, &validator_keypair, stake, ) @@ -757,10 +771,12 @@ impl LocalCluster { fn setup_vote_and_stake_accounts( client: &QuicTpuClient, vote_account: &Keypair, + cluster_authority_keypair: &Keypair, from_account: &Arc, amount: u64, ) -> Result<()> { let vote_account_pubkey = vote_account.pubkey(); + let cluster_authority_pubkey = cluster_authority_keypair.pubkey(); let node_pubkey = from_account.pubkey(); info!( "setup_vote_and_stake_accounts: {}, {}, amount: {}", @@ -778,6 +794,7 @@ impl LocalCluster { { // 1) Create vote account let instructions = vote_instruction::create_account_with_config( + &cluster_authority_pubkey, &from_account.pubkey(), &vote_account_pubkey, &VoteInit { diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index 8cb4db468f9b72..4218e5c8353a8f 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -10,10 +10,7 @@ use { sysvar_cache::get_sysvar_with_account_check, }, solana_sdk::{ - instruction::InstructionError, - program_utils::limited_deserialize, - pubkey::Pubkey, - transaction_context::{BorrowedAccount, InstructionContext, TransactionContext}, + account::ReadableAccount, instruction::InstructionError, program_utils::limited_deserialize, pubkey::Pubkey, system_program, transaction_context::{BorrowedAccount, InstructionContext, TransactionContext} }, std::collections::HashSet, }; @@ -74,7 +71,34 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - vote_state::initialize_account(&mut me, &vote_init, &signers, &clock) + + // Account 3 is AUTH1111111111111111111111111111111111111111 + // Which contains the cluster authority pubkey bytes. + let auth_account = invoke_context.transaction_context.get_account_at_index(3)?; + let auth_account_key = invoke_context + .transaction_context + .get_key_of_account_at_index(3)?; + if auth_account_key + != &solana_program::pubkey!("AUTH1111111111111111111111111111111111111111") + { + return Err(InstructionError::MissingAccount); + } + let auth_account = auth_account.borrow(); + if auth_account.owner() != &system_program::ID { + return Err(InstructionError::InvalidAccountOwner); + } + let authorized_signer = auth_account.deserialize_data::(); + if let Err(_) = authorized_signer { + return Err(InstructionError::InvalidArgument); + } + + vote_state::initialize_account( + &mut me, + &vote_init, + &signers, + &clock, + &authorized_signer.unwrap(), + ) } VoteInstruction::Authorize(voter_pubkey, vote_authorize) => { let clock = @@ -1764,9 +1788,11 @@ mod tests { #[test] fn test_create_account_vote_state_1_14_11() { + let cluster_authority = Pubkey::new_unique(); let node_pubkey = Pubkey::new_unique(); let vote_pubkey = Pubkey::new_unique(); let instructions = create_account_with_config( + &cluster_authority, &node_pubkey, &vote_pubkey, &VoteInit { @@ -1802,9 +1828,11 @@ mod tests { #[test] fn test_create_account_vote_state_current() { + let cluster_authority = Pubkey::new_unique(); let node_pubkey = Pubkey::new_unique(); let vote_pubkey = Pubkey::new_unique(); let instructions = create_account_with_config( + &cluster_authority, &node_pubkey, &vote_pubkey, &VoteInit { @@ -1844,6 +1872,7 @@ mod tests { fn test_vote_process_instruction() { solana_logger::setup(); let instructions = create_account_with_config( + &Pubkey::new_unique(), &Pubkey::new_unique(), &Pubkey::new_unique(), &VoteInit::default(), diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 24b480c6198d84..50086c0d9d175f 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -1,6 +1,7 @@ //! Vote state, vote program //! Receive and processes votes from validators pub use solana_program::vote::state::{vote_state_versions::*, *}; +use solana_sdk::pubkey; use { log::*, serde_derive::{Deserialize, Serialize}, @@ -1031,6 +1032,7 @@ pub fn initialize_account( vote_init: &VoteInit, signers: &HashSet, clock: &Clock, + cluster_authority_signer: &Pubkey, ) -> Result<(), InstructionError> { if vote_account.get_data().len() != VoteStateVersions::vote_state_size_of(true) { return Err(InstructionError::InvalidAccountData); @@ -1041,6 +1043,9 @@ pub fn initialize_account( return Err(InstructionError::AccountAlreadyInitialized); } + // Authorized signer must sign. (Cluster authority) + verify_authorized_signer(cluster_authority_signer, signers)?; + // node must agree to accept this vote account verify_authorized_signer(&vote_init.node_pubkey, signers)?; diff --git a/sdk/program/src/vote/instruction.rs b/sdk/program/src/vote/instruction.rs index e707c9e06d05bd..1e0a946bd7b42c 100644 --- a/sdk/program/src/vote/instruction.rs +++ b/sdk/program/src/vote/instruction.rs @@ -241,11 +241,13 @@ impl VoteInstruction { } } -fn initialize_account(vote_pubkey: &Pubkey, vote_init: &VoteInit) -> Instruction { +fn initialize_account(vote_pubkey: &Pubkey, vote_init: &VoteInit, cluster_authority: &Pubkey) -> Instruction { let account_metas = vec![ AccountMeta::new(*vote_pubkey, false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new_readonly(sysvar::clock::id(), false), + AccountMeta::new_readonly(solana_program::pubkey!("AUTH1111111111111111111111111111111111111111"), false), + AccountMeta::new_readonly(*cluster_authority, true), AccountMeta::new_readonly(vote_init.node_pubkey, true), ]; @@ -271,6 +273,7 @@ impl<'a> Default for CreateVoteAccountConfig<'a> { } pub fn create_account_with_config( + cluster_authority: &Pubkey, from_pubkey: &Pubkey, vote_pubkey: &Pubkey, vote_init: &VoteInit, @@ -279,7 +282,7 @@ pub fn create_account_with_config( ) -> Vec { let create_ix = system_instruction::create_account(from_pubkey, vote_pubkey, lamports, config.space, &id()); - let init_ix = initialize_account(vote_pubkey, vote_init); + let init_ix = initialize_account(vote_pubkey, vote_init, cluster_authority); vec![create_ix, init_ix] }

( + pub fn notify_account_at_accounts_update( &self, slot: Slot, account: &AccountSharedData, txn: &Option<&SanitizedTransaction>, pubkey: &Pubkey, - write_version_producer: &mut P, - ) where - P: Iterator, - { + write_version: u64, + ) { if let Some(accounts_update_notifier) = &self.accounts_update_notifier { accounts_update_notifier.notify_account_update( slot, account, txn, pubkey, - write_version_producer.next().unwrap(), + write_version, ); } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 95661ce5318c7a..ee138a1b516a3c 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3806,9 +3806,10 @@ impl Bank { &durable_nonce, lamports_per_signature, ); - self.rc - .accounts - .store_cached((self.slot(), accounts_to_store.as_slice()), &transactions); + self.rc.accounts.store_cached( + (self.slot(), accounts_to_store.as_slice()), + transactions.as_deref(), + ); }); self.collect_rent(&processing_results); diff --git a/svm/src/account_saver.rs b/svm/src/account_saver.rs index 0ecbd181e6698f..ca3c45dbfd50f3 100644 --- a/svm/src/account_saver.rs +++ b/svm/src/account_saver.rs @@ -45,7 +45,7 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( processing_results: &'a mut [TransactionProcessingResult], durable_nonce: &DurableNonce, lamports_per_signature: u64, -) -> (Vec<(&'a Pubkey, &'a AccountSharedData)>, Vec>) { +) -> (Vec<(&'a Pubkey, &'a AccountSharedData)>, Option>) { let collect_capacity = max_number_of_accounts_to_collect(txs, processing_results); let mut accounts = Vec::with_capacity(collect_capacity); let mut transactions = Vec::with_capacity(collect_capacity); @@ -87,12 +87,12 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( } } } - (accounts, transactions) + (accounts, Some(transactions)) } fn collect_accounts_for_successful_tx<'a, T: SVMMessage>( collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, - collected_account_transactions: &mut Vec>, + collected_account_transactions: &mut Vec<&'a T>, transaction: &'a T, transaction_accounts: &'a [TransactionAccount], ) { @@ -109,13 +109,13 @@ fn collect_accounts_for_successful_tx<'a, T: SVMMessage>( }) { collected_accounts.push((address, account)); - collected_account_transactions.push(Some(transaction)); + collected_account_transactions.push(transaction); } } fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, - collected_account_transactions: &mut Vec>, + collected_account_transactions: &mut Vec<&'a T>, transaction: &'a T, rollback_accounts: &'a mut RollbackAccounts, durable_nonce: &DurableNonce, @@ -125,7 +125,7 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( match rollback_accounts { RollbackAccounts::FeePayerOnly { fee_payer_account } => { collected_accounts.push((fee_payer_address, &*fee_payer_account)); - collected_account_transactions.push(Some(transaction)); + collected_account_transactions.push(transaction); } RollbackAccounts::SameNonceAndFeePayer { nonce } => { // Since we know we are dealing with a valid nonce account, @@ -134,14 +134,14 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( .try_advance_nonce(*durable_nonce, lamports_per_signature) .unwrap(); collected_accounts.push((nonce.address(), nonce.account())); - collected_account_transactions.push(Some(transaction)); + collected_account_transactions.push(transaction); } RollbackAccounts::SeparateNonceAndFeePayer { nonce, fee_payer_account, } => { collected_accounts.push((fee_payer_address, &*fee_payer_account)); - collected_account_transactions.push(Some(transaction)); + collected_account_transactions.push(transaction); // Since we know we are dealing with a valid nonce account, // unwrap is safe here @@ -149,7 +149,7 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( .try_advance_nonce(*durable_nonce, lamports_per_signature) .unwrap(); collected_accounts.push((nonce.address(), nonce.account())); - collected_account_transactions.push(Some(transaction)); + collected_account_transactions.push(transaction); } } } @@ -294,9 +294,10 @@ mod tests { .iter() .any(|(pubkey, _account)| *pubkey == &keypair1.pubkey())); + let transactions = transactions.unwrap(); assert_eq!(transactions.len(), 2); - assert!(transactions.iter().any(|txn| txn.unwrap().eq(&tx0))); - assert!(transactions.iter().any(|txn| txn.unwrap().eq(&tx1))); + assert!(transactions.iter().any(|txn| (*txn).eq(&tx0))); + assert!(transactions.iter().any(|txn| (*txn).eq(&tx1))); } #[test] From cddcf8b46a54f581dfe0e95f6132a574c7048b8f Mon Sep 17 00:00:00 2001 From: Brennan Date: Wed, 28 Aug 2024 11:59:39 -0700 Subject: [PATCH 246/529] refactor fork choice (#2701) * refactor fork choice --- core/src/consensus/fork_choice.rs | 624 +++++++++++++++++------------- 1 file changed, 360 insertions(+), 264 deletions(-) diff --git a/core/src/consensus/fork_choice.rs b/core/src/consensus/fork_choice.rs index cddb9db01d2d1c..cb5ddabfbafcf4 100644 --- a/core/src/consensus/fork_choice.rs +++ b/core/src/consensus/fork_choice.rs @@ -22,6 +22,16 @@ pub struct SelectVoteAndResetForkResult { pub heaviest_fork_failures: Vec, } +struct CandidateVoteAndResetBanks<'a> { + // A bank that the validator will vote on given it passes all + // remaining vote checks + candidate_vote_bank: Option<&'a Arc>, + // A bank that the validator will reset its PoH to regardless + // of voting behavior + reset_bank: Option<&'a Arc>, + switch_fork_decision: SwitchForkDecision, +} + pub trait ForkChoice { type ForkChoiceKey; fn compute_bank_stats( @@ -54,7 +64,43 @@ pub trait ForkChoice { ) -> Vec; } -fn select_forks_failed_switch_threshold( +fn last_vote_able_to_land( + reset_bank: Option<&Bank>, + progress: &ProgressMap, + tower: &Tower, +) -> bool { + let Some(heaviest_bank_on_same_voted_fork) = reset_bank else { + // No reset bank means we are in the middle of dump & repair. Last vote + // landing is irrelevant. + return true; + }; + + let Some(last_voted_slot) = tower.last_voted_slot() else { + // No previous vote. + return true; + }; + + let Some(my_latest_landed_vote_slot) = + progress.my_latest_landed_vote(heaviest_bank_on_same_voted_fork.slot()) + else { + // We've either never landed a vote or fork has been pruned or is in the + // middle of dump & repair. Either way, no need to super refresh. + return true; + }; + + // Check if our last vote is able to land in order to determine if we should + // super refresh to vote at the tip. If any of the following are true, we + // don't need to super refresh: + // 1. Last vote has landed + my_latest_landed_vote_slot >= last_voted_slot + // 2. Already voting at the tip + || last_voted_slot >= heaviest_bank_on_same_voted_fork.slot() + // 3. Last vote is withink slot hashes, regular refresh is enough + || heaviest_bank_on_same_voted_fork + .is_in_slot_hashes_history(&last_voted_slot) +} + +fn recheck_fork_decision_failed_switch_threshold( reset_bank: Option<&Bank>, progress: &ProgressMap, tower: &Tower, @@ -64,82 +110,293 @@ fn select_forks_failed_switch_threshold( total_stake: u64, switch_fork_decision: SwitchForkDecision, ) -> SwitchForkDecision { - let last_vote_unable_to_land = match reset_bank { - Some(heaviest_bank_on_same_voted_fork) => { - match tower.last_voted_slot() { - Some(last_voted_slot) => { - match progress.my_latest_landed_vote(heaviest_bank_on_same_voted_fork.slot()) { - Some(my_latest_landed_vote) => - // Last vote did not land - { - my_latest_landed_vote < last_voted_slot - // If we are already voting at the tip, there is nothing we can do. - && last_voted_slot < heaviest_bank_on_same_voted_fork.slot() - // Last vote outside slot hashes of the tip of fork - && !heaviest_bank_on_same_voted_fork - .is_in_slot_hashes_history(&last_voted_slot) - } - None => false, - } - } - None => false, - } - } - None => false, - }; - - if last_vote_unable_to_land { + if !last_vote_able_to_land(reset_bank, progress, tower) { // If we reach here, these assumptions are true: // 1. We can't switch because of threshold - // 2. Our last vote was on a non-duplicate/confirmed slot - // 3. Our last vote is now outside slot hashes history of the tip of fork + // 2. Our last vote is now outside slot hashes history of the tip of fork // So, there was no hope of this last vote ever landing again. // In this case, we do want to obey threshold, yet try to register our vote on // the current fork, so we choose to vote at the tip of current fork instead. // This will not cause longer lockout because lockout doesn't double after 512 // slots, it might be enough to get majority vote. - SwitchForkDecision::SameFork + return SwitchForkDecision::SameFork; + } + + // If we can't switch, then reset to the the next votable bank on the same + // fork as our last vote, but don't vote. + + // We don't just reset to the heaviest fork when switch threshold fails because + // a situation like this can occur: + + /* Figure 1: + slot 0 + | + slot 1 + / \ + slot 2 (last vote) | + | slot 8 (10%) + slot 4 (9%) + */ + + // Imagine 90% of validators voted on slot 4, but only 9% landed. If everybody that fails + // the switch threshold abandons slot 4 to build on slot 8 (because it's *currently* heavier), + // then there will be no blocks to include the votes for slot 4, and the network halts + // because 90% of validators can't vote + info!( + "Waiting to switch vote to {}, + resetting to slot {:?} for now, + switch proof stake: {}, + threshold stake: {}, + total stake: {}", + heaviest_bank_slot, + reset_bank.as_ref().map(|b| b.slot()), + switch_proof_stake, + total_stake as f64 * SWITCH_FORK_THRESHOLD, + total_stake + ); + failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( + heaviest_bank_slot, + switch_proof_stake, + total_stake, + )); + switch_fork_decision +} + +fn select_candidates_failed_switch<'a>( + heaviest_bank: &'a Arc, + heaviest_bank_on_same_voted_fork: Option<&'a Arc>, + progress: &'a ProgressMap, + tower: &Tower, + failure_reasons: &mut Vec, + switch_proof_stake: u64, + total_stake: u64, + initial_switch_fork_decision: SwitchForkDecision, +) -> CandidateVoteAndResetBanks<'a> { + // If our last vote is unable to land (even through normal refresh), then we + // temporarily "super" refresh our vote to the tip of our last voted fork. + let final_switch_fork_decision = recheck_fork_decision_failed_switch_threshold( + heaviest_bank_on_same_voted_fork.map(|bank| bank.as_ref()), + progress, + tower, + heaviest_bank.slot(), + failure_reasons, + switch_proof_stake, + total_stake, + initial_switch_fork_decision, + ); + let candidate_vote_bank = if final_switch_fork_decision.can_vote() { + // We need to "super" refresh our vote to the tip of our last voted fork + // because our last vote is unable to land. This is inferred by + // initially determining we can't vote but then determining we can vote + // on the same fork. + heaviest_bank_on_same_voted_fork } else { - // If we can't switch and our last vote was on a non-duplicate/confirmed slot, then - // reset to the the next votable bank on the same fork as our last vote, - // but don't vote. + // Just return the original vote candidate (the heaviest bank) for + // logging purposes. We can't actually vote on it, but this will allow + // us to check if there are any additional voting failures besides the + // switch threshold. + Some(heaviest_bank) + }; + CandidateVoteAndResetBanks { + candidate_vote_bank, + reset_bank: heaviest_bank_on_same_voted_fork, + switch_fork_decision: final_switch_fork_decision, + } +} + +fn select_candidates_failed_switch_duplicate_rollback<'a>( + heaviest_bank: &'a Arc, + latest_duplicate_ancestor: Slot, + failure_reasons: &mut Vec, + initial_switch_fork_decision: SwitchForkDecision, +) -> CandidateVoteAndResetBanks<'a> { + // If we can't switch and our last vote was on an unconfirmed, duplicate + // slot, then we need to reset to the heaviest bank, even if the heaviest + // bank is not a descendant of the last vote. + // + // Usually for switch threshold failures, we reset to the heaviest + // descendant of the last vote, but in this case, the last vote was on a + // duplicate branch. + // + // We reset to the heaviest bank because in the case of *unconfirmed* + // duplicate slots, somebody needs to generate an alternative branch to + // escape a situation like a 50-50 split where both partitions have voted on + // different versions of the same duplicate slot. + // + // Unlike the situation described in `Figure 1` above, this is safe. To see + // why, imagine the same situation described in Figure 1 above occurs, but + // slot 2 is a duplicate block. There are now a few cases: + // + // Note first that DUPLICATE_THRESHOLD + SWITCH_FORK_THRESHOLD + + // DUPLICATE_LIVENESS_THRESHOLD = 1; + // + // 1) > DUPLICATE_THRESHOLD of the network voted on some version of slot 2. + // Because duplicate slots can be confirmed by gossip, unlike the situation + // described in `Figure 1`, we don't need those votes to land in a + // descendant to confirm slot 2. Once slot 2 is confirmed by gossip votes, + // that fork is added back to the fork choice set and falls back into normal + // fork choice, which is covered by the `FailedSwitchThreshold` case above + // (everyone will resume building on their last voted fork, slot 4, since + // slot 8 doesn't have enough stake for switch threshold) + // + // 2) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, + // > SWITCH_FORK_THRESHOLD of the network voted on slot 8. Then everybody + // abandons the duplicate fork from fork choice and builds on slot 8's fork. + // They can also vote on slot 8's fork because it has sufficient weight to + // pass the switching threshold. + // + // 3) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, + // <= SWITCH_FORK_THRESHOLD of the network voted on slot 8. This means more + // than DUPLICATE_LIVENESS_THRESHOLD of the network is gone, so we cannot + // guarantee progress anyways. + // + // Note: the heaviest fork is never descended from a known unconfirmed + // duplicate slot because the fork choice rule ensures that (marks it as an + // invalid candidate). Thus, it's safe to use as the reset bank. + let reset_bank = Some(heaviest_bank); + info!( + "Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?}", + heaviest_bank.slot(), + reset_bank.as_ref().map(|b| b.slot()), + latest_duplicate_ancestor, + ); + failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( + heaviest_bank.slot(), + 0, // In this case we never actually performed the switch check, 0 for now + 0, + )); + CandidateVoteAndResetBanks { + candidate_vote_bank: None, + reset_bank, + switch_fork_decision: initial_switch_fork_decision, + } +} + +fn select_candidate_vote_and_reset_banks<'a>( + heaviest_bank: &'a Arc, + heaviest_bank_on_same_voted_fork: Option<&'a Arc>, + progress: &'a ProgressMap, + tower: &'a Tower, + failure_reasons: &mut Vec, + initial_switch_fork_decision: SwitchForkDecision, +) -> CandidateVoteAndResetBanks<'a> { + match initial_switch_fork_decision { + SwitchForkDecision::FailedSwitchThreshold(switch_proof_stake, total_stake) => { + select_candidates_failed_switch( + heaviest_bank, + heaviest_bank_on_same_voted_fork, + progress, + tower, + failure_reasons, + switch_proof_stake, + total_stake, + initial_switch_fork_decision, + ) + } + SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor) => { + select_candidates_failed_switch_duplicate_rollback( + heaviest_bank, + latest_duplicate_ancestor, + failure_reasons, + initial_switch_fork_decision, + ) + } + SwitchForkDecision::SameFork | SwitchForkDecision::SwitchProof(_) => { + CandidateVoteAndResetBanks { + candidate_vote_bank: Some(heaviest_bank), + reset_bank: Some(heaviest_bank), + switch_fork_decision: initial_switch_fork_decision, + } + } + } +} - // We don't just reset to the heaviest fork when switch threshold fails because - // a situation like this can occur: +// Checks for all possible reasons we might not be able to vote on the candidate +// bank. Records any failure reasons, and doesn't early return so we can be sure +// to record all possible reasons. +fn can_vote_on_candidate_bank( + candidate_vote_bank_slot: Slot, + progress: &ProgressMap, + tower: &Tower, + failure_reasons: &mut Vec, + switch_fork_decision: &SwitchForkDecision, +) -> bool { + let ( + is_locked_out, + vote_thresholds, + propagated_stake, + is_leader_slot, + fork_weight, + total_threshold_stake, + total_epoch_stake, + ) = { + let fork_stats = progress.get_fork_stats(candidate_vote_bank_slot).unwrap(); + let propagated_stats = &progress + .get_propagated_stats(candidate_vote_bank_slot) + .unwrap(); + ( + fork_stats.is_locked_out, + &fork_stats.vote_threshold, + propagated_stats.propagated_validators_stake, + propagated_stats.is_leader_slot, + fork_stats.fork_weight(), + fork_stats.total_stake, + propagated_stats.total_epoch_stake, + ) + }; - /* Figure 1: - slot 0 - | - slot 1 - / \ - slot 2 (last vote) | - | slot 8 (10%) - slot 4 (9%) - */ + // Check if we are locked out. + if is_locked_out { + failure_reasons.push(HeaviestForkFailures::LockedOut(candidate_vote_bank_slot)); + } + + // Check if we failed any of the vote thresholds. + let mut threshold_passed = true; + for threshold_failure in vote_thresholds { + let &ThresholdDecision::FailedThreshold(vote_depth, fork_stake) = threshold_failure else { + continue; + }; + failure_reasons.push(HeaviestForkFailures::FailedThreshold( + candidate_vote_bank_slot, + vote_depth, + fork_stake, + total_threshold_stake, + )); + // Ignore shallow checks for voting purposes + if (vote_depth as usize) >= tower.threshold_depth { + threshold_passed = false; + } + } - // Imagine 90% of validators voted on slot 4, but only 9% landed. If everybody that fails - // the switch threshold abandons slot 4 to build on slot 8 (because it's *currently* heavier), - // then there will be no blocks to include the votes for slot 4, and the network halts - // because 90% of validators can't vote + // Check if our last leader slot has been propagated. + // If we reach here, the candidate_vote_bank exists in the bank_forks, so it isn't + // dumped and should exist in progress map. + let propagation_confirmed = is_leader_slot + || progress + .get_leader_propagation_slot_must_exist(candidate_vote_bank_slot) + .0; + if !propagation_confirmed { + failure_reasons.push(HeaviestForkFailures::NoPropagatedConfirmation( + candidate_vote_bank_slot, + propagated_stake, + total_epoch_stake, + )); + } + + if !is_locked_out + && threshold_passed + && propagation_confirmed + && switch_fork_decision.can_vote() + { info!( - "Waiting to switch vote to {}, - resetting to slot {:?} for now, - switch proof stake: {}, - threshold stake: {}, - total stake: {}", - heaviest_bank_slot, - reset_bank.as_ref().map(|b| b.slot()), - switch_proof_stake, - total_stake as f64 * SWITCH_FORK_THRESHOLD, - total_stake + "voting: {} {:.1}%", + candidate_vote_bank_slot, + 100.0 * fork_weight ); - failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( - heaviest_bank_slot, - switch_proof_stake, - total_stake, - )); - switch_fork_decision + true + } else { + false } } @@ -179,221 +436,60 @@ pub fn select_vote_and_reset_forks( // switch_threshold fails // 3) The best "selected" bank is on a different fork, // switch_threshold succeeds - let mut failure_reasons = vec![]; - struct CandidateVoteAndResetBanks<'a> { - // A bank that the validator will vote on given it passes all - // remaining vote checks - candidate_vote_bank: Option<&'a Arc>, - // A bank that the validator will reset its PoH to regardless - // of voting behavior - reset_bank: Option<&'a Arc>, - switch_fork_decision: SwitchForkDecision, - } - let candidate_vote_and_reset_banks = { - let switch_fork_decision: SwitchForkDecision = tower.check_switch_threshold( - heaviest_bank.slot(), - ancestors, - descendants, - progress, - heaviest_bank.total_epoch_stake(), - heaviest_bank - .epoch_vote_accounts(heaviest_bank.epoch()) - .expect("Bank epoch vote accounts must contain entry for the bank's own epoch"), - latest_validator_votes_for_frozen_banks, - fork_choice, - ); - - match switch_fork_decision { - SwitchForkDecision::FailedSwitchThreshold(switch_proof_stake, total_stake) => { - let final_switch_fork_decision = select_forks_failed_switch_threshold( - heaviest_bank_on_same_voted_fork.map(|bank| bank.as_ref()), - progress, - tower, - heaviest_bank.slot(), - &mut failure_reasons, - switch_proof_stake, - total_stake, - switch_fork_decision, - ); - let candidate_vote_bank = if final_switch_fork_decision.can_vote() { - // The only time we would still vote despite `!switch_fork_decision.can_vote()` - // is if we switched the vote candidate to `heaviest_bank_on_same_voted_fork` - // because we needed to refresh the vote to the tip of our last voted fork. - heaviest_bank_on_same_voted_fork - } else { - // Otherwise, we should just return the original vote candidate, the heaviest bank - // for logging purposes, namely to check if there are any additional voting failures - // besides the switch threshold - Some(heaviest_bank) - }; - CandidateVoteAndResetBanks { - candidate_vote_bank, - reset_bank: heaviest_bank_on_same_voted_fork, - switch_fork_decision: final_switch_fork_decision, - } - } - SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor) => { - // If we can't switch and our last vote was on an unconfirmed, duplicate slot, - // then we need to reset to the heaviest bank, even if the heaviest bank is not - // a descendant of the last vote (usually for switch threshold failures we reset - // to the heaviest descendant of the last vote, but in this case, the last vote - // was on a duplicate branch). This is because in the case of *unconfirmed* duplicate - // slots, somebody needs to generate an alternative branch to escape a situation - // like a 50-50 split where both partitions have voted on different versions of the - // same duplicate slot. - - // Unlike the situation described in `Figure 1` above, this is safe. To see why, - // imagine the same situation described in Figure 1 above occurs, but slot 2 is - // a duplicate block. There are now a few cases: - // - // Note first that DUPLICATE_THRESHOLD + SWITCH_FORK_THRESHOLD + DUPLICATE_LIVENESS_THRESHOLD = 1; - // - // 1) > DUPLICATE_THRESHOLD of the network voted on some version of slot 2. Because duplicate slots can be confirmed - // by gossip, unlike the situation described in `Figure 1`, we don't need those - // votes to land in a descendant to confirm slot 2. Once slot 2 is confirmed by - // gossip votes, that fork is added back to the fork choice set and falls back into - // normal fork choice, which is covered by the `FailedSwitchThreshold` case above - // (everyone will resume building on their last voted fork, slot 4, since slot 8 - // doesn't have for switch threshold) - // - // 2) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, > SWITCH_FORK_THRESHOLD of the network voted - // on slot 8. Then everybody abandons the duplicate fork from fork choice and both builds - // on slot 8's fork. They can also vote on slot 8's fork because it has sufficient weight - // to pass the switching threshold - // - // 3) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, <= SWITCH_FORK_THRESHOLD of the network voted - // on slot 8. This means more than DUPLICATE_LIVENESS_THRESHOLD of the network is gone, so we cannot - // guarantee progress anyways - - // Note the heaviest fork is never descended from a known unconfirmed duplicate slot - // because the fork choice rule ensures that (marks it as an invalid candidate), - // thus it's safe to use as the reset bank. - let reset_bank = Some(heaviest_bank); - info!( - "Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?}", - heaviest_bank.slot(), - reset_bank.as_ref().map(|b| b.slot()), - latest_duplicate_ancestor, - ); - failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( - heaviest_bank.slot(), - 0, // In this case we never actually performed the switch check, 0 for now - 0, - )); - CandidateVoteAndResetBanks { - candidate_vote_bank: None, - reset_bank, - switch_fork_decision, - } - } - _ => CandidateVoteAndResetBanks { - candidate_vote_bank: Some(heaviest_bank), - reset_bank: Some(heaviest_bank), - switch_fork_decision, - }, - } - }; + let initial_switch_fork_decision: SwitchForkDecision = tower.check_switch_threshold( + heaviest_bank.slot(), + ancestors, + descendants, + progress, + heaviest_bank.total_epoch_stake(), + heaviest_bank + .epoch_vote_accounts(heaviest_bank.epoch()) + .expect("Bank epoch vote accounts must contain entry for the bank's own epoch"), + latest_validator_votes_for_frozen_banks, + fork_choice, + ); + let mut failure_reasons = vec![]; let CandidateVoteAndResetBanks { candidate_vote_bank, reset_bank, switch_fork_decision, - } = candidate_vote_and_reset_banks; + } = select_candidate_vote_and_reset_banks( + heaviest_bank, + heaviest_bank_on_same_voted_fork, + progress, + tower, + &mut failure_reasons, + initial_switch_fork_decision, + ); - if let Some(candidate_vote_bank) = candidate_vote_bank { - // If there's a bank to potentially vote on, then make the remaining - // checks - let ( - is_locked_out, - vote_thresholds, - propagated_stake, - is_leader_slot, - fork_weight, - total_threshold_stake, - total_epoch_stake, - ) = { - let fork_stats = progress.get_fork_stats(candidate_vote_bank.slot()).unwrap(); - let propagated_stats = &progress - .get_propagated_stats(candidate_vote_bank.slot()) - .unwrap(); - ( - fork_stats.is_locked_out, - &fork_stats.vote_threshold, - propagated_stats.propagated_validators_stake, - propagated_stats.is_leader_slot, - fork_stats.fork_weight(), - fork_stats.total_stake, - propagated_stats.total_epoch_stake, - ) + let Some(candidate_vote_bank) = candidate_vote_bank else { + // No viable candidate to vote on. + return SelectVoteAndResetForkResult { + vote_bank: None, + reset_bank: reset_bank.cloned(), + heaviest_fork_failures: failure_reasons, }; + }; - // If we reach here, the candidate_vote_bank exists in the bank_forks, so it isn't - // dumped and should exist in progress map. - let propagation_confirmed = is_leader_slot - || progress - .get_leader_propagation_slot_must_exist(candidate_vote_bank.slot()) - .0; - - if is_locked_out { - failure_reasons.push(HeaviestForkFailures::LockedOut(candidate_vote_bank.slot())); - } - let mut threshold_passed = true; - for threshold_failure in vote_thresholds { - let &ThresholdDecision::FailedThreshold(vote_depth, fork_stake) = threshold_failure - else { - continue; - }; - failure_reasons.push(HeaviestForkFailures::FailedThreshold( - candidate_vote_bank.slot(), - vote_depth, - fork_stake, - total_threshold_stake, - )); - // Ignore shallow checks for voting purposes - if (vote_depth as usize) >= tower.threshold_depth { - threshold_passed = false; - } - } - if !propagation_confirmed { - failure_reasons.push(HeaviestForkFailures::NoPropagatedConfirmation( - candidate_vote_bank.slot(), - propagated_stake, - total_epoch_stake, - )); - } - - if !is_locked_out - && threshold_passed - && propagation_confirmed - && switch_fork_decision.can_vote() - { - info!( - "voting: {} {:.1}%", - candidate_vote_bank.slot(), - 100.0 * fork_weight - ); - SelectVoteAndResetForkResult { - vote_bank: Some((candidate_vote_bank.clone(), switch_fork_decision)), - reset_bank: Some(candidate_vote_bank.clone()), - heaviest_fork_failures: failure_reasons, - } - } else { - SelectVoteAndResetForkResult { - vote_bank: None, - reset_bank: reset_bank.cloned(), - heaviest_fork_failures: failure_reasons, - } - } - } else if reset_bank.is_some() { + if can_vote_on_candidate_bank( + candidate_vote_bank.slot(), + progress, + tower, + &mut failure_reasons, + &switch_fork_decision, + ) { + // We can vote! SelectVoteAndResetForkResult { - vote_bank: None, - reset_bank: reset_bank.cloned(), + vote_bank: Some((candidate_vote_bank.clone(), switch_fork_decision)), + reset_bank: Some(candidate_vote_bank.clone()), heaviest_fork_failures: failure_reasons, } } else { + // Unable to vote on the candidate bank. SelectVoteAndResetForkResult { vote_bank: None, - reset_bank: None, + reset_bank: reset_bank.cloned(), heaviest_fork_failures: failure_reasons, } } From e8cc9dc12e7dbf2e6054d6404556d134c70106a5 Mon Sep 17 00:00:00 2001 From: Tyera Date: Wed, 28 Aug 2024 14:25:14 -0600 Subject: [PATCH 247/529] Format string literals in solana_rpc_client_api (#2769) Format strings --- rpc-client-api/src/custom_error.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/rpc-client-api/src/custom_error.rs b/rpc-client-api/src/custom_error.rs index 2e54e8edd22e02..8ef3bfae0faa33 100644 --- a/rpc-client-api/src/custom_error.rs +++ b/rpc-client-api/src/custom_error.rs @@ -115,7 +115,8 @@ impl From for Error { } => Self { code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP), message: format!( - "Block {slot} cleaned up, does not exist on node. First available block: {first_available_block}", + "Block {slot} cleaned up, does not exist on node. First available block: \ + {first_available_block}", ), data: None, }, @@ -178,8 +179,8 @@ impl From for Error { JSON_RPC_SERVER_ERROR_KEY_EXCLUDED_FROM_SECONDARY_INDEX, ), message: format!( - "{index_key} excluded from account secondary indexes; \ - this RPC method unavailable for key" + "{index_key} excluded from account secondary indexes; this RPC method \ + unavailable for key" ), data: None, }, @@ -211,8 +212,8 @@ impl From for Error { code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION), message: format!( "Transaction version ({version}) is not supported by the requesting client. \ - Please try the request again with the following configuration parameter: \ - \"maxSupportedTransactionVersion\": {version}" + Please try the request again with the following configuration parameter: \ + \"maxSupportedTransactionVersion\": {version}" ), data: None, }, @@ -223,7 +224,11 @@ impl From for Error { context_slot, })), }, - RpcCustomError::EpochRewardsPeriodActive { slot, current_block_height, rewards_complete_block_height } => Self { + RpcCustomError::EpochRewardsPeriodActive { + slot, + current_block_height, + rewards_complete_block_height, + } => Self { code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_EPOCH_REWARDS_PERIOD_ACTIVE), message: format!("Epoch rewards period still active at slot {slot}"), data: Some(serde_json::json!(EpochRewardsPeriodActiveErrorData { From 184469c1f03f1edd1e04ae396b50423276e0de16 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 28 Aug 2024 15:34:16 -0500 Subject: [PATCH 248/529] add comment (#2775) --- accounts-db/src/accounts_db.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9f43360e4f1b71..17d92aff4e2daf 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1233,6 +1233,7 @@ impl AccountStorageEntry { } } + /// returns # of accounts remaining in the storage fn remove_accounts( &self, num_bytes: usize, @@ -8030,11 +8031,13 @@ impl AccountsDb { store.slot(), *slot ); if offsets.len() == store.count() { + // all remaining alive accounts in the storage are being removed, so the entire storage/slot is dead store.remove_accounts(store.alive_bytes(), reset_accounts, offsets.len()); self.dirty_stores.insert(*slot, store.clone()); dead_slots.insert(*slot); } else { + // not all accounts are being removed, so figure out sizes of accounts we are removing and update the alive bytes and alive account count let (_, us) = measure_us!({ let mut offsets = offsets.iter().cloned().collect::>(); // sort so offsets are in order. This improves efficiency of loading the accounts. From 726a14f0f270d45c314f47575d4fe982ef03f070 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 28 Aug 2024 17:44:03 -0400 Subject: [PATCH 249/529] replay: do not refresh votes for hot spare validators (#2770) --- core/src/consensus.rs | 6 ++++++ core/src/replay_stage.rs | 16 +++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 96fbbc6b68d0bd..5271af556e493f 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -230,6 +230,8 @@ pub(crate) enum BlockhashStatus { Uninitialized, /// Non voting validator NonVoting, + /// Hot spare validator + HotSpare, /// Successfully generated vote tx with blockhash Blockhash(Hash), } @@ -575,6 +577,10 @@ impl Tower { self.last_vote_tx_blockhash = BlockhashStatus::NonVoting; } + pub(crate) fn mark_last_vote_tx_blockhash_hot_spare(&mut self) { + self.last_vote_tx_blockhash = BlockhashStatus::HotSpare; + } + pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option { let vote_account = bank.get_vote_account(vote_account_pubkey)?; let vote_state = vote_account.vote_state(); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 398a15601ba3ee..f1a92daa2650f5 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -129,7 +129,11 @@ enum ForkReplayMode { enum GenerateVoteTxResult { // non voting validator, not eligible for refresh + // until authorized keypair is overriden NonVoting, + // hot spare validator, not eligble for refresh + // until set identity is invoked + HotSpare, // failed generation, eligible for refresh Failed, Tx(Transaction), @@ -139,6 +143,10 @@ impl GenerateVoteTxResult { fn is_non_voting(&self) -> bool { matches!(self, Self::NonVoting) } + + fn is_hot_spare(&self) -> bool { + matches!(self, Self::HotSpare) + } } // Implement a destructor for the ReplayStage thread to signal it exited @@ -2491,7 +2499,7 @@ impl ReplayStage { vote_state.node_pubkey, node_keypair.pubkey() ); - return GenerateVoteTxResult::Failed; + return GenerateVoteTxResult::HotSpare; } let Some(authorized_voter_pubkey) = vote_state.get_authorized_voter(bank.epoch()) else { @@ -2586,9 +2594,9 @@ impl ReplayStage { // If we are a non voting validator or have an incorrect setup preventing us from // generating vote txs, no need to refresh let last_vote_tx_blockhash = match tower.last_vote_tx_blockhash() { - // Since the checks in vote generation are deterministic, if we were non voting + // Since the checks in vote generation are deterministic, if we were non voting or hot spare // on the original vote, the refresh will also fail. No reason to refresh. - BlockhashStatus::NonVoting => return, + BlockhashStatus::NonVoting | BlockhashStatus::HotSpare => return, // In this case we have not voted since restart, it is unclear if we are non voting. // Attempt to refresh. BlockhashStatus::Uninitialized => None, @@ -2651,6 +2659,8 @@ impl ReplayStage { last_vote_refresh_time.last_refresh_time = Instant::now(); } else if vote_tx_result.is_non_voting() { tower.mark_last_vote_tx_blockhash_non_voting(); + } else if vote_tx_result.is_hot_spare() { + tower.mark_last_vote_tx_blockhash_hot_spare(); } } From 7e6399ab7244f9947ac52baa9bcd3bceec5191d0 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Wed, 28 Aug 2024 14:48:00 -0700 Subject: [PATCH 250/529] Wen restart send first heaviest fork when entering the aggregate loop. (#2735) * Send out HeaviestFork the first time entering the loop. * Hack progress_changed and progress_last_sent so we don't need first_time_entering_loop. --- wen-restart/src/wen_restart.rs | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 85f54779a854f2..9e89fa968ee8b3 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -686,13 +686,15 @@ pub(crate) fn aggregate_restart_heaviest_fork( .unwrap() .total_active_stake = total_active_stake; - let mut progress_last_sent = Instant::now(); let mut cursor = solana_gossip::crds::Cursor::default(); - let mut progress_changed = false; + // Init progress_changed to true and progress_last_sent to old time so we can send out the first Gossip message. + let mut progress_changed = true; + let mut progress_last_sent = Instant::now() + .checked_sub(Duration::from_secs(HEAVIEST_REFRESH_INTERVAL_IN_SECONDS)) + .unwrap(); let majority_stake_required = (total_stake as f64 / 100.0 * adjusted_threshold_percent as f64).round() as u64; let mut total_active_stake_higher_than_supermajority = false; - let mut first_time_entering_loop = true; loop { if exit.load(Ordering::Relaxed) { return Err(WenRestartError::Exiting.into()); @@ -745,10 +747,8 @@ pub(crate) fn aggregate_restart_heaviest_fork( // the first time. if progress_last_sent.elapsed().as_secs() >= HEAVIEST_REFRESH_INTERVAL_IN_SECONDS || can_exit - || first_time_entering_loop || saw_supermajority_first_time { - first_time_entering_loop = false; cluster_info.push_restart_heaviest_fork( heaviest_fork_slot, heaviest_fork_hash, @@ -3014,6 +3014,22 @@ mod tests { exit.clone(), Some(WenRestartError::Exiting), ); + // Find the first HeaviestFork message sent out entering the loop. + let my_pubkey = test_state.cluster_info.id(); + let mut found_myself = false; + while !found_myself { + sleep(Duration::from_millis(100)); + test_state.cluster_info.flush_push_queue(); + for gossip_record in test_state + .cluster_info + .get_restart_heaviest_fork(&mut cursor) + { + if gossip_record.from == my_pubkey && gossip_record.observed_stake > 0 { + found_myself = true; + break; + } + } + } // Simulating everyone sending out the first RestartHeaviestFork message, Gossip propagation takes // time, so the observed_stake is probably smaller than actual active stake. We should send out // heaviest fork indicating we have active stake exceeding supermajority. @@ -3042,7 +3058,6 @@ mod tests { now, ); } - let my_pubkey = test_state.cluster_info.id(); let mut found_myself = false; let expected_active_stake = (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT - NON_CONFORMING_VALIDATOR_PERCENT) @@ -3086,6 +3101,7 @@ mod tests { }), ..Default::default() }; + let different_bankhash = Hash::new_unique(); let validators_to_take: usize = ((WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT - NON_CONFORMING_VALIDATOR_PERCENT) From 68e86944e7f49e11ac3015f66bef5ddb0bf57e70 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 29 Aug 2024 11:55:54 +0800 Subject: [PATCH 251/529] metrics: remove clean_stored_dead_slots-ms (#2756) --- accounts-db/src/accounts_db.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 17d92aff4e2daf..63c03ada543eef 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8270,7 +8270,6 @@ impl AccountsDb { pubkeys_removed_from_accounts_index, ); measure.stop(); - inc_new_counter_info!("clean_stored_dead_slots-ms", measure.as_ms() as usize); self.clean_accounts_stats .clean_stored_dead_slots_us .fetch_add(measure.as_us(), Ordering::Relaxed); From 6b9b2c44d54ce9181c9c774e8c4a9ea9ddb3c134 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Aug 2024 20:14:30 +0800 Subject: [PATCH 252/529] build(deps): bump rustc_version from 0.4.0 to 0.4.1 (#2784) Bumps [rustc_version](https://github.com/djc/rustc-version-rs) from 0.4.0 to 0.4.1. - [Release notes](https://github.com/djc/rustc-version-rs/releases) - [Commits](https://github.com/djc/rustc-version-rs/compare/v0.4.0...v0.4.1) --- updated-dependencies: - dependency-name: rustc_version dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 60 +++++++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 328615b0d6efd1..0a120c26db966a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -87,7 +87,7 @@ dependencies = [ "hex", "hyper", "log", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_derive", "serde_json", @@ -499,7 +499,7 @@ dependencies = [ "num-bigint 0.4.6", "num-traits", "paste", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "zeroize", ] @@ -1256,7 +1256,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" dependencies = [ - "rustc_version 0.4.0", + "rustc_version 0.4.1", ] [[package]] @@ -4861,9 +4861,9 @@ dependencies = [ [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver 1.0.23", ] @@ -5552,7 +5552,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "seqlock", "serde", "serde_bytes", @@ -5592,7 +5592,7 @@ dependencies = [ "log", "num-derive", "num-traits", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "solana-log-collector", "solana-program", "solana-program-runtime", @@ -5753,7 +5753,7 @@ dependencies = [ "log", "rand 0.8.5", "rayon", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_derive", "solana-frozen-abi", @@ -5847,7 +5847,7 @@ dependencies = [ "lazy_static", "log", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "solana-address-lookup-table-program", "solana-bpf-loader-program", "solana-compute-budget-program", @@ -6094,7 +6094,7 @@ dependencies = [ name = "solana-compute-budget" version = "2.1.0" dependencies = [ - "rustc_version 0.4.0", + "rustc_version 0.4.1", "solana-frozen-abi", "solana-sdk", ] @@ -6177,7 +6177,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "rustls", "serde", "serde_bytes", @@ -6251,7 +6251,7 @@ dependencies = [ "lazy_static", "log", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "solana-builtins-default-costs", "solana-compute-budget", "solana-frozen-abi", @@ -6409,7 +6409,7 @@ dependencies = [ "im", "log", "memmap2", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_bytes", "serde_derive", @@ -6425,7 +6425,7 @@ version = "2.1.0" dependencies = [ "proc-macro2", "quote", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "syn 2.0.76", ] @@ -6511,7 +6511,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_bytes", "serde_derive", @@ -6614,7 +6614,7 @@ dependencies = [ "rayon", "reed-solomon-erasure", "rocksdb", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "scopeguard", "serde", "serde_bytes", @@ -6875,7 +6875,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -6970,7 +6970,7 @@ dependencies = [ "parking_lot 0.12.3", "qualifier_attr", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_bytes", "serde_derive", @@ -7019,7 +7019,7 @@ dependencies = [ "num-traits", "percentage", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "solana-compute-budget", "solana-frozen-abi", @@ -7351,7 +7351,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "regex", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_derive", "serde_json", @@ -7411,7 +7411,7 @@ dependencies = [ "criterion", "log", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "solana-builtins-default-costs", "solana-compute-budget", "solana-program", @@ -7461,7 +7461,7 @@ dependencies = [ "qualifier_attr", "rand 0.7.3", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_bytes", "serde_derive", @@ -7506,7 +7506,7 @@ dependencies = [ "anyhow", "borsh 1.5.1", "libsecp256k1", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "solana-define-syscall", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -7541,7 +7541,7 @@ version = "2.1.0" dependencies = [ "assert_matches", "bincode", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_json", "solana-frozen-abi", @@ -7572,7 +7572,7 @@ dependencies = [ "bincode", "log", "proptest", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "solana-compute-budget", "solana-config-program", "solana-log-collector", @@ -7590,7 +7590,7 @@ version = "2.1.0" dependencies = [ "assert_matches", "bincode", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "solana-program-test", "solana-sdk", "solana-vote-program", @@ -7696,7 +7696,7 @@ dependencies = [ "prost", "qualifier_attr", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_derive", "shuttle", @@ -8083,7 +8083,7 @@ name = "solana-version" version = "2.1.0" dependencies = [ "log", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "semver 1.0.23", "serde", "serde_derive", @@ -8101,7 +8101,7 @@ dependencies = [ "itertools 0.12.1", "log", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_derive", "solana-frozen-abi", @@ -8119,7 +8119,7 @@ dependencies = [ "log", "num-derive", "num-traits", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde_derive", "solana-frozen-abi", From f6b77526f1c884bad7058e76bac4ad5177579813 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 29 Aug 2024 15:46:29 +0200 Subject: [PATCH 253/529] Adjustments of loader-v4 (part 2) (#2750) * Splits transfer authority and finalize into two instructions. * Adds next-version-forwarding to finalization. * Makes loader-v4 a program runtime v1 loader. --- Cargo.lock | 1 + cargo-registry/src/crate_handler.rs | 2 +- cli/src/program_v4.rs | 280 ++++++++++- program-runtime/src/loaded_programs.rs | 8 +- programs/bpf_loader/src/lib.rs | 17 +- programs/bpf_loader/src/syscalls/mod.rs | 30 +- programs/loader-v4/Cargo.toml | 1 + programs/loader-v4/src/lib.rs | 469 +++++++++--------- programs/loader-v4/test_elfs/out/invalid.so | Bin 1232 -> 0 bytes .../loader-v4/test_elfs/out/relative_call.so | Bin 5384 -> 0 bytes .../loader-v4/test_elfs/out/rodata_section.so | Bin 5424 -> 0 bytes programs/sbf/Cargo.lock | 1 + runtime/src/bank.rs | 5 +- sdk/program/src/loader_v4.rs | 50 +- sdk/program/src/loader_v4_instruction.rs | 13 +- svm/src/program_loader.rs | 6 +- svm/src/transaction_processor.rs | 5 +- 17 files changed, 588 insertions(+), 300 deletions(-) delete mode 100644 programs/loader-v4/test_elfs/out/invalid.so delete mode 100644 programs/loader-v4/test_elfs/out/relative_call.so delete mode 100644 programs/loader-v4/test_elfs/out/rodata_section.so diff --git a/Cargo.lock b/Cargo.lock index 0a120c26db966a..ec7478b40acbdc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6662,6 +6662,7 @@ version = "2.1.0" dependencies = [ "bincode", "log", + "solana-bpf-loader-program", "solana-compute-budget", "solana-log-collector", "solana-measure", diff --git a/cargo-registry/src/crate_handler.rs b/cargo-registry/src/crate_handler.rs index 3354af6c892f03..465f9cc0020d4c 100644 --- a/cargo-registry/src/crate_handler.rs +++ b/cargo-registry/src/crate_handler.rs @@ -110,7 +110,7 @@ impl Program { return Err("Signer doesn't match program ID".into()); } - let mut program_data = read_and_verify_elf(self.path.as_ref()) + let mut program_data = read_and_verify_elf(self.path.as_ref(), &client.rpc_client) .map_err(|e| format!("failed to read the program: {}", e))?; if APPEND_CRATE_TO_ELF { diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index 6e81de1f9ff3ae..917bcec52771a1 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -5,6 +5,7 @@ use { common_error_adapter, log_instruction_custom_error_ex, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, }, + feature::{status_from_account, CliFeatureStatus}, program::calculate_max_chunk_size, }, clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, @@ -31,10 +32,12 @@ use { solana_rpc_client_api::{ config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSendTransactionConfig}, filter::{Memcmp, RpcFilterType}, + request::MAX_MULTIPLE_ACCOUNTS, }, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, + feature_set::{FeatureSet, FEATURE_NAMES}, hash::Hash, instruction::Instruction, loader_v4::{ @@ -75,9 +78,15 @@ pub enum ProgramV4CliCommand { program_address: Pubkey, authority_signer_index: SignerIndex, }, + TransferAuthority { + program_address: Pubkey, + authority_signer_index: SignerIndex, + new_authority_signer_index: SignerIndex, + }, Finalize { program_address: Pubkey, authority_signer_index: SignerIndex, + next_version_signer_index: SignerIndex, }, Show { account_pubkey: Option, @@ -192,6 +201,38 @@ impl ProgramV4SubCommands for App<'_, '_> { ), ), ) + .subcommand( + SubCommand::with_name("transfer-authority") + .about("Transfer the authority of a program to a different address") + .arg( + Arg::with_name("program-id") + .long("program-id") + .value_name("PROGRAM_ID") + .takes_value(true) + .help("Executable program's address"), + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help( + "Current program authority [default: the default configured keypair]", + ), + ) + .arg( + Arg::with_name("new-authority") + .long("new-authority") + .value_name("NEW_AUTHORITY_SIGNER") + .takes_value(true) + .required(true) + .validator(is_valid_signer) + .help( + "New program authority", + ), + ), + ) .subcommand( SubCommand::with_name("finalize") .about("Finalize a program to make it immutable") @@ -211,6 +252,16 @@ impl ProgramV4SubCommands for App<'_, '_> { .help( "Program authority [default: the default configured keypair]", ), + ) + .arg( + Arg::with_name("next-version") + .long("next-version") + .value_name("NEXT_VERSION") + .takes_value(true) + .validator(is_valid_signer) + .help( + "Reserves the address and links it as the programs next-version, which is a hint that frontends can show to users", + ), ), ) .subcommand( @@ -365,7 +416,7 @@ pub fn parse_program_v4_subcommand( signers: signer_info.signers, } } - ("finalize", Some(matches)) => { + ("transfer-authority", Some(matches)) => { let mut bulk_signers = vec![Some( default_signer.signer_from_path(matches, wallet_manager)?, )]; @@ -373,16 +424,55 @@ pub fn parse_program_v4_subcommand( let (authority, authority_pubkey) = signer_of(matches, "authority", wallet_manager)?; bulk_signers.push(authority); + let (new_authority, new_authority_pubkey) = + signer_of(matches, "new-authority", wallet_manager)?; + bulk_signers.push(new_authority); + let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; CliCommandInfo { - command: CliCommand::ProgramV4(ProgramV4CliCommand::Finalize { + command: CliCommand::ProgramV4(ProgramV4CliCommand::TransferAuthority { program_address: pubkey_of(matches, "program-id") .expect("Program address is missing"), authority_signer_index: signer_info .index_of(authority_pubkey) .expect("Authority signer is missing"), + new_authority_signer_index: signer_info + .index_of(new_authority_pubkey) + .expect("Authority signer is missing"), + }), + signers: signer_info.signers, + } + } + ("finalize", Some(matches)) => { + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let (authority, authority_pubkey) = signer_of(matches, "authority", wallet_manager)?; + bulk_signers.push(authority); + + if let Ok((next_version, _next_version_pubkey)) = + signer_of(matches, "next-version", wallet_manager) + { + bulk_signers.push(next_version); + } + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + let authority_signer_index = signer_info + .index_of(authority_pubkey) + .expect("Authority signer is missing"); + + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::Finalize { + program_address: pubkey_of(matches, "program-id") + .expect("Program address is missing"), + authority_signer_index, + next_version_signer_index: pubkey_of(matches, "next-version") + .and_then(|pubkey| signer_info.index_of(Some(pubkey))) + .unwrap_or(authority_signer_index), }), signers: signer_info.signers, } @@ -414,7 +504,36 @@ pub fn parse_program_v4_subcommand( Ok(response) } -pub fn read_and_verify_elf(program_location: &str) -> Result, Box> { +fn fetch_feature_set(rpc_client: &RpcClient) -> Result> { + let mut feature_set = FeatureSet::default(); + for feature_ids in FEATURE_NAMES + .keys() + .cloned() + .collect::>() + .chunks(MAX_MULTIPLE_ACCOUNTS) + { + rpc_client + .get_multiple_accounts(feature_ids)? + .into_iter() + .zip(feature_ids) + .for_each(|(account, feature_id)| { + let activation_slot = account.and_then(status_from_account); + + if let Some(CliFeatureStatus::Active(slot)) = activation_slot { + feature_set.activate(feature_id, slot); + } + }); + } + + Ok(feature_set) +} + +pub fn read_and_verify_elf( + program_location: &str, + rpc_client: &RpcClient, +) -> Result, Box> { + let feature_set = fetch_feature_set(rpc_client)?; + let mut file = File::open(program_location) .map_err(|err| format!("Unable to open program file: {err}"))?; let mut program_data = Vec::new(); @@ -423,10 +542,13 @@ pub fn read_and_verify_elf(program_location: &str) -> Result, Box::from_elf(&program_data, Arc::new(program_runtime_environment)) .map_err(|err| format!("ELF error: {err}"))?; @@ -471,7 +593,7 @@ pub fn process_program_v4_subcommand( program_signer_index, authority_signer_index, } => { - let program_data = read_and_verify_elf(program_location)?; + let program_data = read_and_verify_elf(program_location, &rpc_client)?; let program_len = program_data.len() as u32; process_deploy_program( @@ -489,7 +611,7 @@ pub fn process_program_v4_subcommand( buffer_signer_index, authority_signer_index, } => { - let program_data = read_and_verify_elf(program_location)?; + let program_data = read_and_verify_elf(program_location, &rpc_client)?; let program_len = program_data.len() as u32; let buffer_signer = buffer_signer_index.map(|index| config.signers[index]); @@ -510,13 +632,25 @@ pub fn process_program_v4_subcommand( &ProgramV4CommandConfig::new_from_cli_config(config, authority_signer_index), program_address, ), + ProgramV4CliCommand::TransferAuthority { + program_address, + authority_signer_index, + new_authority_signer_index, + } => process_transfer_authority_of_program( + rpc_client, + &ProgramV4CommandConfig::new_from_cli_config(config, authority_signer_index), + program_address, + config.signers[*new_authority_signer_index], + ), ProgramV4CliCommand::Finalize { program_address, authority_signer_index, + next_version_signer_index, } => process_finalize_program( rpc_client, &ProgramV4CommandConfig::new_from_cli_config(config, authority_signer_index), program_address, + config.signers[*next_version_signer_index], ), ProgramV4CliCommand::Show { account_pubkey, @@ -703,10 +837,11 @@ fn process_undeploy_program( Ok(config.output_format.formatted_string(&program_id)) } -fn process_finalize_program( +fn process_transfer_authority_of_program( rpc_client: Arc, config: &ProgramV4CommandConfig, program_address: &Pubkey, + new_authority: &dyn Signer, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; @@ -714,7 +849,35 @@ fn process_finalize_program( &[loader_v4::transfer_authority( program_address, &config.authority.pubkey(), - None, + &new_authority.pubkey(), + )], + Some(&config.payer.pubkey()), + &blockhash, + )]; + check_payer(&rpc_client, config, 0, &message, &[], &[])?; + + send_messages(rpc_client, config, &message, &[], &[], None)?; + + let program_id = CliProgramId { + program_id: program_address.to_string(), + signature: None, + }; + Ok(config.output_format.formatted_string(&program_id)) +} + +fn process_finalize_program( + rpc_client: Arc, + config: &ProgramV4CommandConfig, + program_address: &Pubkey, + next_version: &dyn Signer, +) -> ProcessResult { + let blockhash = rpc_client.get_latest_blockhash()?; + + let message = [Message::new_with_blockhash( + &[loader_v4::finalize( + program_address, + &config.authority.pubkey(), + &next_version.pubkey(), )], Some(&config.payer.pubkey()), &blockhash, @@ -752,7 +915,7 @@ fn process_show( Ok(config.output_format.formatted_string(&CliProgramV4 { program_id: account_pubkey.to_string(), owner: account.owner.to_string(), - authority: state.authority_address.to_string(), + authority: state.authority_address_or_next_version.to_string(), last_deploy_slot: state.slot, data_len: account .data @@ -1131,11 +1294,11 @@ fn build_retract_instruction( if let Ok(LoaderV4State { slot: _, - authority_address, + authority_address_or_next_version, status, }) = solana_loader_v4_program::get_state(&account.data) { - if authority != authority_address { + if authority != authority_address_or_next_version { return Err( "Program authority does not match with the provided authority address".into(), ); @@ -1170,11 +1333,11 @@ fn build_truncate_instructions( } else { if let Ok(LoaderV4State { slot: _, - authority_address, + authority_address_or_next_version, status, }) = solana_loader_v4_program::get_state(&account.data) { - if authority != authority_address { + if authority != authority_address_or_next_version { return Err( "Program authority does not match with the provided authority address".into(), ); @@ -1279,7 +1442,7 @@ fn get_programs( programs.push(CliProgramV4 { program_id: program.to_string(), owner: account.owner.to_string(), - authority: state.authority_address.to_string(), + authority: state.authority_address_or_next_version.to_string(), last_deploy_slot: state.slot, status: status.to_string(), data_len: account @@ -1601,6 +1764,30 @@ mod tests { .is_err()); } + #[test] + fn test_transfer_authority() { + let mut config = CliConfig::default(); + + let payer = keypair_from_seed(&[1u8; 32]).unwrap(); + let program_signer = keypair_from_seed(&[2u8; 32]).unwrap(); + let authority_signer = program_authority(); + let new_authority_signer = program_authority(); + + config.signers.push(&payer); + config.signers.push(&authority_signer); + config.signers.push(&new_authority_signer); + + let config = ProgramV4CommandConfig::new_from_cli_config(&config, &1); + + assert!(process_transfer_authority_of_program( + Arc::new(rpc_client_with_program_deployed()), + &config, + &program_signer.pubkey(), + &new_authority_signer, + ) + .is_ok()); + } + #[test] fn test_finalize() { let mut config = CliConfig::default(); @@ -1608,9 +1795,11 @@ mod tests { let payer = keypair_from_seed(&[1u8; 32]).unwrap(); let program_signer = keypair_from_seed(&[2u8; 32]).unwrap(); let authority_signer = program_authority(); + let next_version_signer = keypair_from_seed(&[4u8; 32]).unwrap(); config.signers.push(&payer); config.signers.push(&authority_signer); + config.signers.push(&next_version_signer); let config = ProgramV4CommandConfig::new_from_cli_config(&config, &1); @@ -1618,6 +1807,7 @@ mod tests { Arc::new(rpc_client_with_program_deployed()), &config, &program_signer.pubkey(), + &next_version_signer, ) .is_ok()); } @@ -1801,6 +1991,56 @@ mod tests { ); } + #[test] + #[allow(clippy::cognitive_complexity)] + fn test_cli_parse_transfer_authority() { + let test_commands = get_clap_app("test", "desc", "version"); + + let default_keypair = Keypair::new(); + let keypair_file = make_tmp_path("keypair_file"); + write_keypair_file(&default_keypair, &keypair_file).unwrap(); + let default_signer = DefaultSigner::new("", &keypair_file); + + let program_keypair = Keypair::new(); + let program_keypair_file = make_tmp_path("program_keypair_file"); + write_keypair_file(&program_keypair, &program_keypair_file).unwrap(); + + let authority_keypair = Keypair::new(); + let authority_keypair_file = make_tmp_path("authority_keypair_file"); + write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); + + let new_authority_keypair = Keypair::new(); + let new_authority_keypair_file = make_tmp_path("new_authority_keypair_file"); + write_keypair_file(&new_authority_keypair, &new_authority_keypair_file).unwrap(); + + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program-v4", + "transfer-authority", + "--program-id", + &program_keypair_file, + "--authority", + &authority_keypair_file, + "--new-authority", + &new_authority_keypair_file, + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::ProgramV4(ProgramV4CliCommand::TransferAuthority { + program_address: program_keypair.pubkey(), + authority_signer_index: 1, + new_authority_signer_index: 2, + }), + signers: vec![ + Box::new(read_keypair_file(&keypair_file).unwrap()), + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), + Box::new(read_keypair_file(&new_authority_keypair_file).unwrap()), + ], + } + ); + } + #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_finalize() { @@ -1819,6 +2059,10 @@ mod tests { let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); + let next_version_keypair = Keypair::new(); + let next_version_keypair_file = make_tmp_path("next_version_keypair_file"); + write_keypair_file(&next_version_keypair, &next_version_keypair_file).unwrap(); + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program-v4", @@ -1827,6 +2071,8 @@ mod tests { &program_keypair_file, "--authority", &authority_keypair_file, + "--next-version", + &next_version_keypair_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), @@ -1834,10 +2080,12 @@ mod tests { command: CliCommand::ProgramV4(ProgramV4CliCommand::Finalize { program_address: program_keypair.pubkey(), authority_signer_index: 1, + next_version_signer_index: 2, }), signers: vec![ Box::new(read_keypair_file(&keypair_file).unwrap()), - Box::new(read_keypair_file(&authority_keypair_file).unwrap()) + Box::new(read_keypair_file(&authority_keypair_file).unwrap()), + Box::new(read_keypair_file(&next_version_keypair_file).unwrap()), ], } ); diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index db5bb8224bff4f..e7b49a38951623 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -1189,7 +1189,7 @@ impl ProgramCache { pub fn get_flattened_entries( &self, include_program_runtime_v1: bool, - include_program_runtime_v2: bool, + _include_program_runtime_v2: bool, ) -> Vec<(Pubkey, Arc)> { match &self.index { IndexImplementation::V1 { entries, .. } => entries @@ -1199,11 +1199,7 @@ impl ProgramCache { .iter() .filter_map(move |program| match program.program { ProgramCacheEntryType::Loaded(_) => { - if (program.account_owner != ProgramCacheEntryOwner::LoaderV4 - && include_program_runtime_v1) - || (program.account_owner == ProgramCacheEntryOwner::LoaderV4 - && include_program_runtime_v2) - { + if include_program_runtime_v1 { Some((*id, program.clone())) } else { None diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 58ef907746f5ac..400806bb4c98bd 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -1332,7 +1332,7 @@ fn common_close_account( Ok(()) } -fn execute<'a, 'b: 'a>( +pub fn execute<'a, 'b: 'a>( executable: &'a Executable>, invoke_context: &'a mut InvokeContext<'b>, ) -> Result<(), Box> { @@ -1513,8 +1513,9 @@ fn execute<'a, 'b: 'a>( pub mod test_utils { use { - super::*, solana_program_runtime::loaded_programs::DELAY_VISIBILITY_SLOT_OFFSET, - solana_sdk::account::ReadableAccount, + super::*, + solana_program_runtime::loaded_programs::DELAY_VISIBILITY_SLOT_OFFSET, + solana_sdk::{account::ReadableAccount, loader_v4::LoaderV4State}, }; pub fn load_all_invoked_programs(invoke_context: &mut InvokeContext) { @@ -1536,6 +1537,11 @@ pub mod test_utils { let owner = account.owner(); if check_loader_id(owner) { + let programdata_data_offset = if loader_v4::check_id(owner) { + LoaderV4State::program_data_offset() + } else { + 0 + }; let pubkey = invoke_context .transaction_context .get_key_of_account_at_index(index) @@ -1544,7 +1550,10 @@ pub mod test_utils { if let Ok(loaded_program) = load_program_from_bytes( None, &mut load_program_metrics, - account.data(), + account + .data() + .get(programdata_data_offset.min(account.data().len())..) + .unwrap(), owner, account.data().len(), 0, diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 876b734af19156..21b643a9fa89c6 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -284,9 +284,6 @@ pub fn create_program_runtime_environment_v1<'a>( let get_sysvar_syscall_enabled = feature_set.is_active(&get_sysvar_syscall_enabled::id()); let enable_get_epoch_stake_syscall = feature_set.is_active(&enable_get_epoch_stake_syscall::id()); - // !!! ATTENTION !!! - // When adding new features for RBPF here, - // also add them to `Bank::apply_builtin_program_feature_transitions()`. let config = Config { max_call_depth: compute_budget.max_call_depth, @@ -491,6 +488,33 @@ pub fn create_program_runtime_environment_v1<'a>( Ok(BuiltinProgram::new_loader(config, result)) } +pub fn create_program_runtime_environment_v2<'a>( + compute_budget: &ComputeBudget, + debugging_features: bool, +) -> BuiltinProgram> { + let config = Config { + max_call_depth: compute_budget.max_call_depth, + stack_frame_size: compute_budget.stack_frame_size, + enable_address_translation: true, // To be deactivated once we have BTF inference and verification + enable_stack_frame_gaps: false, + instruction_meter_checkpoint_distance: 10000, + enable_instruction_meter: true, + enable_instruction_tracing: debugging_features, + enable_symbol_and_section_labels: debugging_features, + reject_broken_elfs: true, + noop_instruction_rate: 256, + sanitize_user_provided_values: true, + external_internal_function_hash_collision: true, + reject_callx_r10: true, + enable_sbpf_v1: false, + enable_sbpf_v2: true, + optimize_rodata: true, + aligned_memory_mapping: true, + // Warning, do not use `Config::default()` so that configuration here is explicit. + }; + BuiltinProgram::new_loader(config, FunctionRegistry::default()) +} + fn address_is_aligned(address: u64) -> bool { (address as *mut T as usize) .checked_rem(align_of::()) diff --git a/programs/loader-v4/Cargo.toml b/programs/loader-v4/Cargo.toml index 952b3bf1cfe511..55e6e702f3e163 100644 --- a/programs/loader-v4/Cargo.toml +++ b/programs/loader-v4/Cargo.toml @@ -10,6 +10,7 @@ edition = { workspace = true } [dependencies] log = { workspace = true } +solana-bpf-loader-program = { workspace = true } solana-compute-budget = { workspace = true } solana-log-collector = { workspace = true } solana-measure = { workspace = true } diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 91b15e33b27755..f8e2d2f6eec80f 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -1,5 +1,5 @@ use { - solana_compute_budget::compute_budget::ComputeBudget, + solana_bpf_loader_program::execute, solana_log_collector::{ic_logger_msg, LogCollector}, solana_measure::measure::Measure, solana_program_runtime::{ @@ -8,19 +8,9 @@ use { LoadProgramMetrics, ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType, DELAY_VISIBILITY_SLOT_OFFSET, }, - stable_log, - }, - solana_rbpf::{ - aligned_memory::AlignedMemory, - declare_builtin_function, ebpf, - elf::Executable, - error::ProgramResult, - memory_region::{MemoryMapping, MemoryRegion}, - program::{BuiltinProgram, FunctionRegistry}, - vm::{Config, ContextObject, EbpfVm}, }, + solana_rbpf::{declare_builtin_function, memory_region::MemoryMapping}, solana_sdk::{ - entrypoint::SUCCESS, instruction::InstructionError, loader_v4::{self, LoaderV4State, LoaderV4Status, DEPLOYMENT_COOLDOWN_IN_SLOTS}, loader_v4_instruction::LoaderV4Instruction, @@ -63,132 +53,6 @@ fn get_state_mut(data: &mut [u8]) -> Result<&mut LoaderV4State, InstructionError } } -pub fn create_program_runtime_environment_v2<'a>( - compute_budget: &ComputeBudget, - debugging_features: bool, -) -> BuiltinProgram> { - let config = Config { - max_call_depth: compute_budget.max_call_depth, - stack_frame_size: compute_budget.stack_frame_size, - enable_address_translation: true, // To be deactivated once we have BTF inference and verification - enable_stack_frame_gaps: false, - instruction_meter_checkpoint_distance: 10000, - enable_instruction_meter: true, - enable_instruction_tracing: debugging_features, - enable_symbol_and_section_labels: debugging_features, - reject_broken_elfs: true, - noop_instruction_rate: 256, - sanitize_user_provided_values: true, - external_internal_function_hash_collision: true, - reject_callx_r10: true, - enable_sbpf_v1: false, - enable_sbpf_v2: true, - optimize_rodata: true, - aligned_memory_mapping: true, - // Warning, do not use `Config::default()` so that configuration here is explicit. - }; - BuiltinProgram::new_loader(config, FunctionRegistry::default()) -} - -fn calculate_heap_cost(heap_size: u32, heap_cost: u64) -> u64 { - const KIBIBYTE: u64 = 1024; - const PAGE_SIZE_KB: u64 = 32; - u64::from(heap_size) - .saturating_add(PAGE_SIZE_KB.saturating_mul(KIBIBYTE).saturating_sub(1)) - .checked_div(PAGE_SIZE_KB.saturating_mul(KIBIBYTE)) - .expect("PAGE_SIZE_KB * KIBIBYTE > 0") - .saturating_sub(1) - .saturating_mul(heap_cost) -} - -/// Create the SBF virtual machine -pub fn create_vm<'a, 'b>( - invoke_context: &'a mut InvokeContext<'b>, - program: &'a Executable>, -) -> Result>, Box> { - let config = program.get_config(); - let sbpf_version = program.get_sbpf_version(); - let compute_budget = invoke_context.get_compute_budget(); - let heap_size = compute_budget.heap_size; - invoke_context.consume_checked(calculate_heap_cost(heap_size, compute_budget.heap_cost))?; - let mut stack = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled(config.stack_size()); - let mut heap = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled( - usize::try_from(compute_budget.heap_size).unwrap(), - ); - let stack_len = stack.len(); - let regions: Vec = vec![ - program.get_ro_region(), - MemoryRegion::new_writable_gapped(stack.as_slice_mut(), ebpf::MM_STACK_START, 0), - MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START), - ]; - let log_collector = invoke_context.get_log_collector(); - let memory_mapping = MemoryMapping::new(regions, config, sbpf_version).map_err(|err| { - ic_logger_msg!(log_collector, "Failed to create SBF VM: {}", err); - Box::new(InstructionError::ProgramEnvironmentSetupFailure) - })?; - Ok(EbpfVm::new( - program.get_loader().clone(), - sbpf_version, - invoke_context, - memory_mapping, - stack_len, - )) -} - -fn execute<'a, 'b: 'a>( - invoke_context: &'a mut InvokeContext<'b>, - executable: &'a Executable>, -) -> Result<(), Box> { - // We dropped the lifetime tracking in the Executor by setting it to 'static, - // thus we need to reintroduce the correct lifetime of InvokeContext here again. - let executable = unsafe { - std::mem::transmute::< - &'a Executable>, - &'a Executable>, - >(executable) - }; - let log_collector = invoke_context.get_log_collector(); - let stack_height = invoke_context.get_stack_height(); - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let program_id = *instruction_context.get_last_program_key(transaction_context)?; - #[cfg(any(target_os = "windows", not(target_arch = "x86_64")))] - let use_jit = false; - #[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] - let use_jit = executable.get_compiled_program().is_some(); - - let compute_meter_prev = invoke_context.get_remaining(); - let mut create_vm_time = Measure::start("create_vm"); - let mut vm = create_vm(invoke_context, executable)?; - create_vm_time.stop(); - - let mut execute_time = Measure::start("execute"); - stable_log::program_invoke(&log_collector, &program_id, stack_height); - let (compute_units_consumed, result) = vm.execute_program(executable, !use_jit); - drop(vm); - ic_logger_msg!( - log_collector, - "Program {} consumed {} of {} compute units", - &program_id, - compute_units_consumed, - compute_meter_prev - ); - execute_time.stop(); - - let timings = &mut invoke_context.timings; - timings.create_vm_us = timings.create_vm_us.saturating_add(create_vm_time.as_us()); - timings.execute_us = timings.execute_us.saturating_add(execute_time.as_us()); - - match result { - ProgramResult::Ok(status) if status != SUCCESS => { - let error: InstructionError = status.into(); - Err(error.into()) - } - ProgramResult::Err(error) => Err(error.into()), - _ => Ok(()), - } -} - fn check_program_account( log_collector: &Option>>, instruction_context: &InstructionContext, @@ -208,7 +72,7 @@ fn check_program_account( ic_logger_msg!(log_collector, "Authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - if state.authority_address != *authority_address { + if state.authority_address_or_next_version != *authority_address { ic_logger_msg!(log_collector, "Incorrect authority provided"); return Err(InstructionError::IncorrectAuthority); } @@ -336,7 +200,7 @@ pub fn process_instruction_truncate( let state = get_state_mut(program.get_data_mut()?)?; state.slot = 0; state.status = LoaderV4Status::Retracted; - state.authority_address = *authority_address; + state.authority_address_or_next_version = *authority_address; } } Ok(()) @@ -515,32 +379,68 @@ pub fn process_instruction_transfer_authority( .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; let new_authority_address = instruction_context .get_index_of_instruction_account_in_transaction(2) - .and_then(|index| transaction_context.get_key_of_account_at_index(index)) - .ok() - .cloned(); - let _state = check_program_account( + .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; + let state = check_program_account( &log_collector, instruction_context, &program, authority_address, )?; + if !instruction_context.is_instruction_account_signer(2)? { + ic_logger_msg!(log_collector, "New authority did not sign"); + return Err(InstructionError::MissingRequiredSignature); + } + if state.authority_address_or_next_version == *new_authority_address { + ic_logger_msg!(log_collector, "No change"); + return Err(InstructionError::InvalidArgument); + } let state = get_state_mut(program.get_data_mut()?)?; - if let Some(new_authority_address) = new_authority_address { - if !instruction_context.is_instruction_account_signer(2)? { - ic_logger_msg!(log_collector, "New authority did not sign"); - return Err(InstructionError::MissingRequiredSignature); - } - if state.authority_address == new_authority_address { - ic_logger_msg!(log_collector, "No change"); - return Err(InstructionError::InvalidArgument); - } - state.authority_address = new_authority_address; - } else if matches!(state.status, LoaderV4Status::Deployed) { - state.status = LoaderV4Status::Finalized; - } else { + state.authority_address_or_next_version = *new_authority_address; + Ok(()) +} + +pub fn process_instruction_finalize( + invoke_context: &mut InvokeContext, +) -> Result<(), InstructionError> { + let log_collector = invoke_context.get_log_collector(); + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + let program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let authority_address = instruction_context + .get_index_of_instruction_account_in_transaction(1) + .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; + let state = check_program_account( + &log_collector, + instruction_context, + &program, + authority_address, + )?; + if !matches!(state.status, LoaderV4Status::Deployed) { ic_logger_msg!(log_collector, "Program must be deployed to be finalized"); return Err(InstructionError::InvalidArgument); } + drop(program); + let next_version = + instruction_context.try_borrow_instruction_account(transaction_context, 2)?; + if !loader_v4::check_id(next_version.get_owner()) { + ic_logger_msg!(log_collector, "Next version is not owned by loader"); + return Err(InstructionError::InvalidAccountOwner); + } + let state_of_next_version = get_state(next_version.get_data())?; + if state_of_next_version.authority_address_or_next_version != *authority_address { + ic_logger_msg!(log_collector, "Next version has a different authority"); + return Err(InstructionError::IncorrectAuthority); + } + if matches!(state_of_next_version.status, LoaderV4Status::Finalized) { + ic_logger_msg!(log_collector, "Next version is finalized"); + return Err(InstructionError::Immutable); + } + let address_of_next_version = *next_version.get_key(); + drop(next_version); + let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let state = get_state_mut(program.get_data_mut()?)?; + state.authority_address_or_next_version = address_of_next_version; + state.status = LoaderV4Status::Finalized; Ok(()) } @@ -581,6 +481,7 @@ pub fn process_instruction_inner( LoaderV4Instruction::TransferAuthority => { process_instruction_transfer_authority(invoke_context) } + LoaderV4Instruction::Finalize => process_instruction_finalize(invoke_context), } .map_err(|err| Box::new(err) as Box) } else { @@ -614,7 +515,7 @@ pub fn process_instruction_inner( ic_logger_msg!(log_collector, "Program is not deployed"); Err(Box::new(InstructionError::UnsupportedProgramId) as Box) } - ProgramCacheEntryType::Loaded(executable) => execute(invoke_context, executable), + ProgramCacheEntryType::Loaded(executable) => execute(executable, invoke_context), _ => { Err(Box::new(InstructionError::UnsupportedProgramId) as Box) } @@ -627,6 +528,7 @@ pub fn process_instruction_inner( mod tests { use { super::*, + solana_bpf_loader_program::test_utils, solana_program_runtime::invoke_context::mock_process_instruction, solana_sdk::{ account::{ @@ -641,51 +543,6 @@ mod tests { std::{fs::File, io::Read, path::Path}, }; - pub fn load_all_invoked_programs(invoke_context: &mut InvokeContext) { - let mut load_program_metrics = LoadProgramMetrics::default(); - let num_accounts = invoke_context.transaction_context.get_number_of_accounts(); - for index in 0..num_accounts { - let account = invoke_context - .transaction_context - .get_account_at_index(index) - .expect("Failed to get the account") - .borrow(); - - let owner = account.owner(); - if loader_v4::check_id(owner) { - let pubkey = invoke_context - .transaction_context - .get_key_of_account_at_index(index) - .expect("Failed to get account key"); - - if let Some(programdata) = - account.data().get(LoaderV4State::program_data_offset()..) - { - if let Ok(loaded_program) = ProgramCacheEntry::new( - &loader_v4::id(), - invoke_context - .program_cache_for_tx_batch - .environments - .program_runtime_v2 - .clone(), - 0, - 0, - programdata, - account.data().len(), - &mut load_program_metrics, - ) { - invoke_context - .program_cache_for_tx_batch - .set_slot_for_tests(0); - invoke_context - .program_cache_for_tx_batch - .store_modified_entry(*pubkey, Arc::new(loaded_program)); - } - } - } - } - } - fn process_instruction( program_indices: Vec, instruction_data: &[u8], @@ -712,14 +569,7 @@ mod tests { expected_result, Entrypoint::vm, |invoke_context| { - invoke_context - .program_cache_for_tx_batch - .environments - .program_runtime_v2 = Arc::new(create_program_runtime_environment_v2( - &ComputeBudget::default(), - false, - )); - load_all_invoked_programs(invoke_context); + test_utils::load_all_invoked_programs(invoke_context); }, |_invoke_context| {}, ) @@ -730,7 +580,9 @@ mod tests { status: LoaderV4Status, path: &str, ) -> AccountSharedData { - let path = Path::new("test_elfs/out/").join(path).with_extension("so"); + let path = Path::new("../bpf_loader/test_elfs/out/") + .join(path) + .with_extension("so"); let mut file = File::open(path).expect("file open failed"); let mut elf_bytes = Vec::new(); file.read_to_end(&mut elf_bytes).unwrap(); @@ -744,7 +596,7 @@ mod tests { ); let state = get_state_mut(program_account.data_as_mut_slice()).unwrap(); state.slot = 0; - state.authority_address = authority_address; + state.authority_address_or_next_version = authority_address; state.status = status; program_account.data_as_mut_slice()[loader_v4::LoaderV4State::program_data_offset()..] .copy_from_slice(&elf_bytes); @@ -768,7 +620,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "relative_call", + "noop_unaligned", ), ), ( @@ -780,7 +632,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Finalized, - "relative_call", + "noop_unaligned", ), ), ( @@ -866,7 +718,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "relative_call", + "noop_unaligned", ), ), ( @@ -878,7 +730,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "relative_call", + "noop_unaligned", ), ), ( @@ -963,7 +815,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "relative_call", + "noop_unaligned", ), ), ( @@ -983,7 +835,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata_section", + "noop_aligned", ), ), ( @@ -991,7 +843,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "relative_call", + "noop_unaligned", ), ), ( @@ -1220,7 +1072,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata_section", + "noop_aligned", ), ), ( @@ -1232,7 +1084,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "relative_call", + "noop_unaligned", ), ), ( @@ -1244,7 +1096,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "invalid", + "callx-r10-sbfv1", ), ), (clock::id(), clock(1000)), @@ -1368,7 +1220,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "rodata_section", + "noop_aligned", ), ), ( @@ -1384,7 +1236,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata_section", + "noop_aligned", ), ), (clock::id(), clock(1000)), @@ -1448,7 +1300,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "rodata_section", + "noop_aligned", ), ), ( @@ -1456,7 +1308,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata_section", + "noop_aligned", ), ), ( @@ -1495,12 +1347,119 @@ mod tests { ); assert_eq!(accounts[0].lamports(), transaction_accounts[0].1.lamports()); - // Finalize program - let accounts = process_instruction( + // Error: No new authority provided + process_instruction( vec![], &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), transaction_accounts.clone(), &[(0, false, true), (3, true, false)], + Err(InstructionError::NotEnoughAccountKeys), + ); + + // Error: Program is uninitialized + process_instruction( + vec![], + &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), + transaction_accounts.clone(), + &[(2, false, true), (3, true, false), (4, true, false)], + Err(InstructionError::AccountDataTooSmall), + ); + + // Error: New authority did not sign + process_instruction( + vec![], + &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), + transaction_accounts.clone(), + &[(0, false, true), (3, true, false), (4, false, false)], + Err(InstructionError::MissingRequiredSignature), + ); + + // Error: Authority did not change + process_instruction( + vec![], + &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), + transaction_accounts, + &[(0, false, true), (3, true, false), (3, true, false)], + Err(InstructionError::InvalidArgument), + ); + + test_loader_instruction_general_errors(LoaderV4Instruction::TransferAuthority); + } + + #[test] + fn test_loader_instruction_finalize() { + let authority_address = Pubkey::new_unique(); + let transaction_accounts = vec![ + ( + Pubkey::new_unique(), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Deployed, + "noop_aligned", + ), + ), + ( + Pubkey::new_unique(), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Retracted, + "noop_aligned", + ), + ), + ( + Pubkey::new_unique(), + load_program_account_from_elf( + authority_address, + LoaderV4Status::Finalized, + "noop_aligned", + ), + ), + ( + Pubkey::new_unique(), + load_program_account_from_elf( + Pubkey::new_unique(), + LoaderV4Status::Retracted, + "noop_aligned", + ), + ), + ( + Pubkey::new_unique(), + AccountSharedData::new(0, 0, &loader_v4::id()), + ), + ( + authority_address, + AccountSharedData::new(0, 0, &Pubkey::new_unique()), + ), + ( + clock::id(), + create_account_shared_data_for_test(&clock::Clock::default()), + ), + ( + rent::id(), + create_account_shared_data_for_test(&rent::Rent::default()), + ), + ]; + + // Finalize program with a next version + let accounts = process_instruction( + vec![], + &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), + transaction_accounts.clone(), + &[(0, false, true), (5, true, false), (1, false, false)], + Ok(()), + ); + assert_eq!( + accounts[0].data().len(), + transaction_accounts[0].1.data().len(), + ); + assert_eq!(accounts[0].lamports(), transaction_accounts[0].1.lamports()); + + // Finalize program with itself as next version + let accounts = process_instruction( + vec![], + &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), + transaction_accounts.clone(), + &[(0, false, true), (5, true, false), (0, false, false)], Ok(()), ); assert_eq!( @@ -1512,37 +1471,55 @@ mod tests { // Error: Program must be deployed to be finalized process_instruction( vec![], - &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), + &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), - &[(1, false, true), (3, true, false)], + &[(1, false, true), (5, true, false)], Err(InstructionError::InvalidArgument), ); // Error: Program is uninitialized process_instruction( vec![], - &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), + &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), - &[(2, false, true), (3, true, false), (4, true, false)], + &[(4, false, true), (5, true, false)], Err(InstructionError::AccountDataTooSmall), ); - // Error: New authority did not sign + // Error: Next version not owned by loader process_instruction( vec![], - &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), + &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), - &[(0, false, true), (3, true, false), (4, false, false)], - Err(InstructionError::MissingRequiredSignature), + &[(0, false, true), (5, true, false), (5, false, false)], + Err(InstructionError::InvalidAccountOwner), ); - // Error: Authority did not change + // Error: Program is uninitialized process_instruction( vec![], - &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), - transaction_accounts, - &[(0, false, true), (3, true, false), (3, true, false)], - Err(InstructionError::InvalidArgument), + &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), + transaction_accounts.clone(), + &[(0, false, true), (5, true, false), (4, false, false)], + Err(InstructionError::AccountDataTooSmall), + ); + + // Error: Next version is finalized + process_instruction( + vec![], + &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), + transaction_accounts.clone(), + &[(0, false, true), (5, true, false), (2, false, false)], + Err(InstructionError::Immutable), + ); + + // Error: Incorrect authority of next version + process_instruction( + vec![], + &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), + transaction_accounts.clone(), + &[(0, false, true), (5, true, false), (3, false, false)], + Err(InstructionError::IncorrectAuthority), ); test_loader_instruction_general_errors(LoaderV4Instruction::TransferAuthority); @@ -1558,7 +1535,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Finalized, - "rodata_section", + "noop_aligned", ), ), ( @@ -1574,7 +1551,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "rodata_section", + "noop_aligned", ), ), ( @@ -1582,7 +1559,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Finalized, - "invalid", + "callx-r10-sbfv1", ), ), ]; @@ -1593,7 +1570,7 @@ mod tests { &[0, 1, 2, 3], transaction_accounts.clone(), &[(1, false, true)], - Err(InstructionError::Custom(42)), + Ok(()), ); // Error: Program not owned by loader diff --git a/programs/loader-v4/test_elfs/out/invalid.so b/programs/loader-v4/test_elfs/out/invalid.so deleted file mode 100644 index cfa645538380122264eac463a4083ae73b2dcf5f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1232 zcmbW0&q~8U5XPs8TK^qHQ9MbmUP22BMLkrkq7c1%l3EKENm|l9SUmUyLNDI?NIrlM z;mxZv$@hg4$iWV5e!H{t%?!Jn_si(YtJQ?&%dglffUAv-smBPBwzS08@0!%J*yXu+ z>c}m~NWsL?FX@<}PMK%Wk@p`l{tcrT*Z1(e5i0;$0rgTfY!U2^UG^m<@4C3`7Ec*wffMxzdOIZ zxnS9aY5m&Zy#%&UBa3s&-BNxZb@iLp$9~oO;{8+4yheKi{9R_KPd1R(^tMGk-;2>| zQ_pup^4(}iW=PMa1UfSx>u}>6fje}7)RTwmXYKjW9|T;(X?6q(C;!3EbIROJEJ_7 diff --git a/programs/loader-v4/test_elfs/out/relative_call.so b/programs/loader-v4/test_elfs/out/relative_call.so deleted file mode 100644 index 9f24730e209597b7bb901191dc47dca8bd3a6887..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5384 zcmeHLzi-n}5PnSx=9fe%5@JG@m>96^rb!HRD560^3Kb(|i5$nFp|KMj2Q})z#EP;o zFv35Ql|O@jfrT~P_ugFzrbtMPypvwOdw2KUJzx9=-`iIEab;~yiBy%NcP<%V-K7Qj zvT4vI*j1sb((gs(=$LbGZQ#dCfbD?ofbD?ofbD?ofbD?ofbD?ofbGD4-GSMLd3=We-_a-fBV4Hu z+!g0CDa+;Z^$lYxWBN#2EP{8zcY*l!uL znGKMRZ+$JqMQR)ea?^m)vj@_B_FrQA6-GWb@BSW8Ym+LvgwUVU(voCFJ{yk)S)tm4 z>?q0ayY2R#8`M4D+m@i;5job?H{H8cFP^ueE0{ViI}g}?ObD(#???D6mD70#wvhL< z&OEPIB*%sFdb2D_2eDQrv(#XbD=!|UDLs?&$XY~Q!ox|C8_l4~lVMa0-Xxtk8V`_n8g2BWMqDWdqOlPCS4c^HKCFziKJ{d%)etA$DH zEb7)z>NF)%t-0Os{a)nvlX@cxqLy0ee`oJt|KZaok8}h6@drJ|I(^@GFLS4PDSvsc zg!=<-mE#bPd@`pRvt7@;NEbf8{<;j_ZQKe`Paq3sLw{a;2qMX_G;)e=Fi!PbK}mI&mWLYddHy}zB0f3 VCre$4eeehVZJt|xmO#cb{|AeCfu;Ze diff --git a/programs/loader-v4/test_elfs/out/rodata_section.so b/programs/loader-v4/test_elfs/out/rodata_section.so deleted file mode 100644 index 8868f7e63d7e85b0963ee207dc796f3c0b2fdd51..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5424 zcmeHL&ubGw6rTLhrnaEAAUWwAgW!_IrVTx%wH1VI@gSmJ!e+DDpxKno3eBm1h5v^K z5B@3rH#`Ym;`?^rx5OnE@g&TH+3(Gp@4Ywkm|2*)J`JANYc)e@n2oXr>p%xCcM8P{ z3{2lN<-27z4LG+jbPu0G7!7FEMkEhoO=(vBFit+OB@IHX`rWACjc~vxzJ!wqk$!ct z-Imr-zmDkeBYqVfLM-zZX7_a!a20SBa20SBa20SBa20SBa20SBa20SB_^&JQO)kM! zR`630_u+-!kH6@u9xFan#GM^-zW~!#KYbqHb=&OfxP~*omdhDT$N|A!9Kw71R?#(W z#r#b{^p?iIqxNmZ2a10S$iJhrfNmKePO@}yIh!OIEjf$t0Kp{r7^jCe2u`fu^Sa(% zNn5KRcGLV=RtF!6p=RoNUT_%y4T$wd`5u8Td6pJhfu;yHoX1f%nI&G>?~SAP`}<_xNBw9tj>mo+`=@55p2L&3=SOERp5+Vr#}8_T#{GkE z=yT&)=qtkd(zGE8_;?S3SZU~6-KbT5wSQN*$lZ_z=}3b=-<9}&RPJApM0gIczp>Yu zpZlNhSzVAt;E%Y>&vRkLhTQb$`Dp2Z bool { !instruction_data.is_empty() && 4 == instruction_data[0] } +pub fn is_finalize_instruction(instruction_data: &[u8]) -> bool { + !instruction_data.is_empty() && 5 == instruction_data[0] +} + /// Returns the instructions required to initialize a program/buffer account. pub fn create_buffer( payer_address: &Pubkey, @@ -183,20 +188,32 @@ pub fn retract(program_address: &Pubkey, authority: &Pubkey) -> Instruction { pub fn transfer_authority( program_address: &Pubkey, authority: &Pubkey, - new_authority: Option<&Pubkey>, + new_authority: &Pubkey, ) -> Instruction { - let mut accounts = vec![ + let accounts = vec![ AccountMeta::new(*program_address, false), AccountMeta::new_readonly(*authority, true), + AccountMeta::new_readonly(*new_authority, true), ]; - if let Some(new_auth) = new_authority { - accounts.push(AccountMeta::new_readonly(*new_auth, true)); - } - Instruction::new_with_bincode(id(), &LoaderV4Instruction::TransferAuthority, accounts) } +/// Returns the instructions required to finalize program. +pub fn finalize( + program_address: &Pubkey, + authority: &Pubkey, + next_version_program_address: &Pubkey, +) -> Instruction { + let accounts = vec![ + AccountMeta::new(*program_address, false), + AccountMeta::new_readonly(*authority, true), + AccountMeta::new_readonly(*next_version_program_address, false), + ]; + + Instruction::new_with_bincode(id(), &LoaderV4Instruction::Finalize, accounts) +} + #[cfg(test)] mod tests { use {super::*, crate::system_program, memoffset::offset_of}; @@ -204,7 +221,10 @@ mod tests { #[test] fn test_layout() { assert_eq!(offset_of!(LoaderV4State, slot), 0x00); - assert_eq!(offset_of!(LoaderV4State, authority_address), 0x08); + assert_eq!( + offset_of!(LoaderV4State, authority_address_or_next_version), + 0x08 + ); assert_eq!(offset_of!(LoaderV4State, status), 0x28); assert_eq!(LoaderV4State::program_data_offset(), 0x30); } @@ -335,7 +355,7 @@ mod tests { let program = Pubkey::new_unique(); let authority = Pubkey::new_unique(); let new_authority = Pubkey::new_unique(); - let instruction = transfer_authority(&program, &authority, Some(&new_authority)); + let instruction = transfer_authority(&program, &authority, &new_authority); assert!(is_transfer_authority_instruction(&instruction.data)); assert_eq!(instruction.program_id, id()); assert_eq!(instruction.accounts.len(), 3); @@ -354,15 +374,19 @@ mod tests { fn test_transfer_authority_finalize_instruction() { let program = Pubkey::new_unique(); let authority = Pubkey::new_unique(); - let instruction = transfer_authority(&program, &authority, None); - assert!(is_transfer_authority_instruction(&instruction.data)); + let next_version = Pubkey::new_unique(); + let instruction = finalize(&program, &authority, &next_version); + assert!(is_finalize_instruction(&instruction.data)); assert_eq!(instruction.program_id, id()); - assert_eq!(instruction.accounts.len(), 2); + assert_eq!(instruction.accounts.len(), 3); assert_eq!(instruction.accounts[0].pubkey, program); assert!(instruction.accounts[0].is_writable); assert!(!instruction.accounts[0].is_signer); assert_eq!(instruction.accounts[1].pubkey, authority); assert!(!instruction.accounts[1].is_writable); assert!(instruction.accounts[1].is_signer); + assert_eq!(instruction.accounts[2].pubkey, next_version); + assert!(!instruction.accounts[2].is_writable); + assert!(!instruction.accounts[2].is_signer); } } diff --git a/sdk/program/src/loader_v4_instruction.rs b/sdk/program/src/loader_v4_instruction.rs index d2e0e041c6fcbc..de6b3a75881801 100644 --- a/sdk/program/src/loader_v4_instruction.rs +++ b/sdk/program/src/loader_v4_instruction.rs @@ -61,12 +61,17 @@ pub enum LoaderV4Instruction { /// Transfers the authority over a program account. /// - /// WARNING: Using this instruction without providing a new authority - /// finalizes the program (it becomes immutable). - /// /// # Account references /// 0. `[writable]` The program account to change the authority of. /// 1. `[signer]` The current authority of the program. - /// 2. `[signer]` The new authority of the program. Optional if program is currently deployed. + /// 2. `[signer]` The new authority of the program. TransferAuthority, + + /// Finalizes the program account, rendering it immutable. + /// + /// # Account references + /// 0. `[writable]` The program account to change the authority of. + /// 1. `[signer]` The current authority of the program. + /// 2. `[]` The next version of the program (can be itself). + Finalize, } diff --git a/svm/src/program_loader.rs b/svm/src/program_loader.rs index 70d6b3d8108f6d..2249bd86bdfb50 100644 --- a/svm/src/program_loader.rs +++ b/svm/src/program_loader.rs @@ -380,7 +380,7 @@ mod tests { let loader_data = LoaderV4State { slot: 25, - authority_address: Pubkey::new_unique(), + authority_address_or_next_version: Pubkey::new_unique(), status: LoaderV4Status::Deployed, }; let encoded = unsafe { @@ -731,7 +731,7 @@ mod tests { let loader_data = LoaderV4State { slot: 0, - authority_address: Pubkey::new_unique(), + authority_address_or_next_version: Pubkey::new_unique(), status: LoaderV4Status::Deployed, }; let encoded = unsafe { @@ -932,7 +932,7 @@ mod tests { let state = LoaderV4State { slot: 58, - authority_address: Pubkey::new_unique(), + authority_address_or_next_version: Pubkey::new_unique(), status: LoaderV4Status::Deployed, }; let encoded = unsafe { diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 75ddbca4ca1f73..affda575d8d949 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -20,9 +20,10 @@ use { }, log::debug, percentage::Percentage, - solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, + solana_bpf_loader_program::syscalls::{ + create_program_runtime_environment_v1, create_program_runtime_environment_v2, + }, solana_compute_budget::compute_budget::ComputeBudget, - solana_loader_v4_program::create_program_runtime_environment_v2, solana_log_collector::LogCollector, solana_measure::{measure::Measure, measure_us}, solana_program_runtime::{ From da81bb838266f3159ae35c2b6a690f13aa0a28f9 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 29 Aug 2024 09:22:58 -0500 Subject: [PATCH 254/529] add clean metric for # slots cleaned (#2777) --- accounts-db/src/accounts_db.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 63c03ada543eef..0634ab6ba2a1da 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1962,6 +1962,7 @@ struct CleanAccountsStats { clean_stored_dead_slots_us: AtomicU64, uncleaned_roots_slot_list_1: AtomicU64, get_account_sizes_us: AtomicU64, + slots_cleaned: AtomicU64, } impl CleanAccountsStats { @@ -3603,6 +3604,13 @@ impl AccountsDb { .swap(0, Ordering::Relaxed), i64 ), + ( + "slots_cleaned", + self.clean_accounts_stats + .slots_cleaned + .swap(0, Ordering::Relaxed), + i64 + ), ( "clean_old_root_us", self.clean_accounts_stats @@ -8020,6 +8028,10 @@ impl AccountsDb { assert!(reclaimed_offsets.contains_key(&expected_slot)); } + self.clean_accounts_stats + .slots_cleaned + .fetch_add(reclaimed_offsets.len() as u64, Ordering::Relaxed); + reclaimed_offsets.iter().for_each(|(slot, offsets)| { if let Some(store) = self .storage From b0bcbc1da931cbbdfe4c84318e4426b159b867bb Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 29 Aug 2024 23:47:12 +0900 Subject: [PATCH 255/529] Format strings in core directory (#2782) Temporarily set format_string = true in rustfmt.toml and run fmt. This wraps long strings that prevented fmt from working properly in the whole source file(s). Several string that were carefully formatted for readability were left as-is instead of accepting the changes from format_strings --- core/benches/consumer.rs | 5 +- core/src/accounts_hash_verifier.rs | 27 ++-- .../banking_stage/latest_unprocessed_votes.rs | 3 +- core/src/banking_stage/qos_service.rs | 56 +++++--- core/src/consensus.rs | 57 ++++---- core/src/consensus/fork_choice.rs | 3 +- .../consensus/heaviest_subtree_fork_choice.rs | 11 +- core/src/cost_update_service.rs | 2 +- core/src/repair/ancestor_hashes_service.rs | 13 +- .../src/repair/cluster_slot_state_verifier.rs | 16 ++- core/src/repair/duplicate_repair_status.rs | 31 +++-- core/src/repair/repair_generic_traversal.rs | 9 +- core/src/repair/repair_service.rs | 4 +- core/src/replay_stage.rs | 129 +++++++++++------- core/src/snapshot_packager_service.rs | 51 +++---- .../pending_snapshot_packages.rs | 3 +- .../snapshot_gossip_manager.rs | 11 +- core/src/system_monitor_service.rs | 9 +- core/src/tpu_entry_notifier.rs | 6 +- core/src/validator.rs | 31 ++--- core/src/warm_quic_cache_service.rs | 9 +- core/tests/fork-selection.rs | 17 +-- core/tests/snapshots.rs | 42 ++++-- 23 files changed, 329 insertions(+), 216 deletions(-) diff --git a/core/benches/consumer.rs b/core/benches/consumer.rs index d736b93ef96ffd..ec615016f29a92 100644 --- a/core/benches/consumer.rs +++ b/core/benches/consumer.rs @@ -141,9 +141,8 @@ fn bench_process_and_record_transactions(bencher: &mut Bencher, batch_size: usiz assert_eq!( TRANSACTIONS_PER_ITERATION % batch_size, 0, - "batch_size must be a factor of \ - `TRANSACTIONS_PER_ITERATION` ({TRANSACTIONS_PER_ITERATION}) \ - so that bench results are easily comparable" + "batch_size must be a factor of `TRANSACTIONS_PER_ITERATION` \ + ({TRANSACTIONS_PER_ITERATION}) so that bench results are easily comparable" ); let batches_per_iteration = TRANSACTIONS_PER_ITERATION / batch_size; diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 78b6c2d0298c80..bdf0a760fabdf2 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -76,7 +76,10 @@ impl AccountsHashVerifier { &snapshot_config, )); if let Err(err) = result { - error!("Stopping AccountsHashVerifier! Fatal error while processing accounts package: {err}"); + error!( + "Stopping AccountsHashVerifier! Fatal error while processing accounts \ + package: {err}" + ); exit.store(true, Ordering::Relaxed); break; } @@ -144,7 +147,8 @@ impl AccountsHashVerifier { .count(); assert!( num_eah_packages <= 1, - "Only a single EAH accounts package is allowed at a time! count: {num_eah_packages}" + "Only a single EAH accounts package is allowed at a time! count: \ + {num_eah_packages}" ); // Get the two highest priority requests, `y` and `z`. @@ -261,12 +265,12 @@ impl AccountsHashVerifier { accounts_db.get_accounts_hash(base_slot) else { panic!( - "incremental snapshot requires accounts hash and capitalization \ - from the full snapshot it is based on \n\ - package: {accounts_package:?} \n\ - accounts hashes: {:?} \n\ - incremental accounts hashes: {:?} \n\ - full snapshot archives: {:?} \n\ + "incremental snapshot requires accounts hash and capitalization from \ + the full snapshot it is based on\n\ + package: {accounts_package:?}\n\ + accounts hashes: {:?}\n\ + incremental accounts hashes: {:?}\n\ + full snapshot archives: {:?}\n\ bank snapshots: {:?}", accounts_db.get_accounts_hashes(), accounts_db.get_incremental_accounts_hashes(), @@ -344,10 +348,9 @@ impl AccountsHashVerifier { HashStats::default(), ); panic!( - "accounts hash capitalization mismatch: expected {}, but calculated {} (then recalculated {})", - accounts_package.expected_capitalization, - lamports, - second_accounts_hash.1, + "accounts hash capitalization mismatch: expected {}, but calculated {} (then \ + recalculated {})", + accounts_package.expected_capitalization, lamports, second_accounts_hash.1, ); } diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 1ead68e564c9ce..0ddaaeafa4ac7e 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -355,7 +355,8 @@ impl LatestUnprocessedVotes { .and_then(|account| from_account::(&account)); if slot_hashes.is_none() { error!( - "Slot hashes sysvar doesn't exist on bank {}. Including all votes without filtering", + "Slot hashes sysvar doesn't exist on bank {}. Including all votes without \ + filtering", bank.slot() ); } diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index 6fe35c46f54e03..8af53ca4d9e7b3 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -101,25 +101,43 @@ impl QosService { let mut cost_tracking_time = Measure::start("cost_tracking_time"); let mut cost_tracker = bank.write_cost_tracker().unwrap(); let mut num_included = 0; - let select_results = transactions.zip(transactions_costs) - .map(|(tx, cost)| { - match cost { - Ok(cost) => { - match cost_tracker.try_add(&cost) { - Ok(UpdatedCosts{updated_block_cost, updated_costliest_account_cost}) => { - debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}, updated costliest account cost {}", bank.slot(), tx, cost, updated_block_cost, updated_costliest_account_cost); - self.metrics.stats.selected_txs_count.fetch_add(1, Ordering::Relaxed); - num_included += 1; - Ok(cost) - }, - Err(e) => { - debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", bank.slot(), tx, cost, e); - Err(TransactionError::from(e)) - } - } - }, - Err(e) => Err(e), - } + let select_results = transactions + .zip(transactions_costs) + .map(|(tx, cost)| match cost { + Ok(cost) => match cost_tracker.try_add(&cost) { + Ok(UpdatedCosts { + updated_block_cost, + updated_costliest_account_cost, + }) => { + debug!( + "slot {:?}, transaction {:?}, cost {:?}, fit into current block, \ + current block cost {}, updated costliest account cost {}", + bank.slot(), + tx, + cost, + updated_block_cost, + updated_costliest_account_cost + ); + self.metrics + .stats + .selected_txs_count + .fetch_add(1, Ordering::Relaxed); + num_included += 1; + Ok(cost) + } + Err(e) => { + debug!( + "slot {:?}, transaction {:?}, cost {:?}, not fit into current block, \ + '{:?}'", + bank.slot(), + tx, + cost, + e + ); + Err(TransactionError::from(e)) + } + }, + Err(e) => Err(e), }) .collect(); cost_tracker.add_transactions_in_flight(num_included); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 5271af556e493f..79446125f5b819 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -800,7 +800,10 @@ impl Tower { ancestors: &HashMap>, last_vote_ancestors: &HashSet, ) -> Option { - trace!("Checking if {candidate_slot} is a valid switching proof vote from {last_voted_slot} to {switch_slot}"); + trace!( + "Checking if {candidate_slot} is a valid switching proof vote from {last_voted_slot} \ + to {switch_slot}" + ); // Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not // want to count votes on the same fork. if Self::is_descendant_slot(candidate_slot, last_voted_slot, ancestors)? { @@ -923,9 +926,10 @@ impl Tower { // `switch < last` is needed not to warn! this message just because of using // newer snapshots on validator restart let message = format!( - "bank_forks doesn't have corresponding data for the stray restored \ - last vote({last_voted_slot}), meaning some inconsistency between saved tower and ledger." - ); + "bank_forks doesn't have corresponding data for the stray restored last \ + vote({last_voted_slot}), meaning some inconsistency between saved tower and \ + ledger." + ); warn!("{}", message); datapoint_warn!("tower_warn", ("warn", message, String)); } @@ -1030,8 +1034,9 @@ impl Tower { return suspended_decision_due_to_major_unsynced_ledger(); } else { panic!( - "Should never consider switching to ancestor ({switch_slot}) of last vote: {last_voted_slot}, ancestors({last_vote_ancestors:?})", - ); + "Should never consider switching to ancestor ({switch_slot}) of last vote: \ + {last_voted_slot}, ancestors({last_vote_ancestors:?})", + ); } } @@ -1254,7 +1259,8 @@ impl Tower { let lockout = *fork_stake as f64 / total_stake as f64; trace!( - "fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} total_stake: {}", + "fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} \ + total_stake: {}", slot, threshold_vote.slot(), lockout, @@ -1419,9 +1425,8 @@ impl Tower { // While this validator's voting is suspended this way, // suspended_decision_due_to_major_unsynced_ledger() will be also touched. let message = format!( - "For some reason, we're REPROCESSING slots which has already been \ - voted and ROOTED by us; \ - VOTING will be SUSPENDED UNTIL {last_voted_slot}!", + "For some reason, we're REPROCESSING slots which has already been voted and \ + ROOTED by us; VOTING will be SUSPENDED UNTIL {last_voted_slot}!", ); error!("{}", message); datapoint_error!("tower_error", ("error", message, String)); @@ -1549,7 +1554,8 @@ impl Tower { self.last_vote = VoteTransaction::from(Vote::default()); } else { info!( - "{} restored votes (out of {}) were on different fork or are upcoming votes on unrooted slots: {:?}!", + "{} restored votes (out of {}) were on different fork or are upcoming votes on \ + unrooted slots: {:?}!", self.voted_slots().len(), original_votes_len, self.voted_slots() @@ -1623,8 +1629,8 @@ pub enum TowerError { WrongTower(String), #[error( - "The tower is too old: \ - newest slot in tower ({0}) << oldest slot in available history ({1})" + "The tower is too old: newest slot in tower ({0}) << oldest slot in available history \ + ({1})" )] TooOldTower(Slot, Slot), @@ -1704,13 +1710,15 @@ pub fn reconcile_blockstore_roots_with_external_source( Ordering::Equal => false, Ordering::Less => panic!( "last_blockstore_root({last_blockstore_root}) is skipped while traversing \ - blockstore (currently at {current}) from external root ({external_source:?})!?", + blockstore (currently at {current}) from external root \ + ({external_source:?})!?", ), }) .collect(); if !new_roots.is_empty() { info!( - "Reconciling slots as root based on external root: {:?} (external: {:?}, blockstore: {})", + "Reconciling slots as root based on external root: {:?} (external: {:?}, \ + blockstore: {})", new_roots, external_source, last_blockstore_root ); @@ -1733,9 +1741,9 @@ pub fn reconcile_blockstore_roots_with_external_source( // That's because we might have a chance of recovering properly with // newer snapshot. warn!( - "Couldn't find any ancestor slots from external source ({:?}) \ - towards blockstore root ({}); blockstore pruned or only \ - tower moved into new ledger or just hard fork?", + "Couldn't find any ancestor slots from external source ({:?}) towards blockstore \ + root ({}); blockstore pruned or only tower moved into new ledger or just hard \ + fork?", external_source, last_blockstore_root, ); } @@ -3251,9 +3259,10 @@ pub mod test { } #[test] - #[should_panic(expected = "last_blockstore_root(3) is skipped while \ - traversing blockstore (currently at 1) from \ - external root (Tower(4))!?")] + #[should_panic( + expected = "last_blockstore_root(3) is skipped while traversing blockstore (currently at \ + 1) from external root (Tower(4))!?" + )] fn test_reconcile_blockstore_roots_with_tower_panic_no_common_root() { solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -3522,7 +3531,8 @@ pub mod test { let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history); assert_eq!( format!("{}", result.unwrap_err()), - "The tower is too old: newest slot in tower (0) << oldest slot in available history (1)" + "The tower is too old: newest slot in tower (0) << oldest slot in available history \ + (1)" ); } @@ -3601,7 +3611,8 @@ pub mod test { let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history); assert_eq!( format!("{}", result.unwrap_err()), - "The tower is fatally inconsistent with blockstore: not too old once after got too old?" + "The tower is fatally inconsistent with blockstore: not too old once after got too \ + old?" ); } diff --git a/core/src/consensus/fork_choice.rs b/core/src/consensus/fork_choice.rs index cb5ddabfbafcf4..04c0b43fa05844 100644 --- a/core/src/consensus/fork_choice.rs +++ b/core/src/consensus/fork_choice.rs @@ -256,7 +256,8 @@ fn select_candidates_failed_switch_duplicate_rollback<'a>( // invalid candidate). Thus, it's safe to use as the reset bank. let reset_bank = Some(heaviest_bank); info!( - "Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?}", + "Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: \ + {:?}", heaviest_bank.slot(), reset_bank.as_ref().map(|b| b.slot()), latest_duplicate_ancestor, diff --git a/core/src/consensus/heaviest_subtree_fork_choice.rs b/core/src/consensus/heaviest_subtree_fork_choice.rs index 39b06c5eb4d091..5e4b3089d31e87 100644 --- a/core/src/consensus/heaviest_subtree_fork_choice.rs +++ b/core/src/consensus/heaviest_subtree_fork_choice.rs @@ -144,7 +144,11 @@ impl ForkInfo { ) { if let Some(latest_invalid_ancestor) = self.latest_invalid_ancestor { if latest_invalid_ancestor <= newly_valid_ancestor { - info!("Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was duplicate confirmed", my_key, latest_invalid_ancestor, newly_valid_ancestor); + info!( + "Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was \ + duplicate confirmed", + my_key, latest_invalid_ancestor, newly_valid_ancestor + ); self.latest_invalid_ancestor = None; } } @@ -1188,8 +1192,9 @@ impl HeaviestSubtreeForkChoice { // validator has been running, so we must be able to fetch best_slots for all of // them. panic!( - "a bank at last_voted_slot({last_voted_slot_hash:?}) is a frozen bank so must have been \ - added to heaviest_subtree_fork_choice at time of freezing", + "a bank at last_voted_slot({last_voted_slot_hash:?}) is a frozen \ + bank so must have been added to heaviest_subtree_fork_choice at \ + time of freezing", ) } else { // fork_infos doesn't have corresponding data for the stale stray last vote, diff --git a/core/src/cost_update_service.rs b/core/src/cost_update_service.rs index 58ef6c48ed7721..de12b3a703ee0a 100644 --- a/core/src/cost_update_service.rs +++ b/core/src/cost_update_service.rs @@ -60,7 +60,7 @@ impl CostUpdateService { let slot = bank.slot(); trace!( "inflight transaction count is {in_flight_transaction_count} \ - for slot {slot} after {loop_count} iteration(s)" + for slot {slot} after {loop_count} iteration(s)" ); cost_tracker.report_stats(slot); break; diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 92e5f1ad9d7c4e..a20794189a19ff 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -559,8 +559,10 @@ impl AncestorHashesService { // order to vote. // This fits the alternate criteria we use in `find_epoch_slots_frozen_dead_slots` // so we can upgrade it to `repairable_dead_slot_pool`. - info!("{pruned_slot} is part of a popular pruned fork however we previously marked it as dead. - Upgrading as dead duplicate confirmed"); + info!( + "{pruned_slot} is part of a popular pruned fork however we previously \ + marked it as dead. Upgrading as dead duplicate confirmed" + ); dead_slot_pool.remove(&pruned_slot); repairable_dead_slot_pool.insert(pruned_slot); } else if repairable_dead_slot_pool.contains(&pruned_slot) { @@ -568,8 +570,11 @@ impl AncestorHashesService { // ignore the additional information that `pruned_slot` is popular pruned. // This is similar to the above case where `pruned_slot` was first pruned // and then marked dead duplicate confirmed. - info!("Received pruned duplicate confirmed status for {pruned_slot} that was previously marked - dead duplicate confirmed. Ignoring and processing it as dead duplicate confirmed."); + info!( + "Received pruned duplicate confirmed status for {pruned_slot} that \ + was previously marked dead duplicate confirmed. Ignoring and \ + processing it as dead duplicate confirmed." + ); } else { popular_pruned_slot_pool.insert(pruned_slot); } diff --git a/core/src/repair/cluster_slot_state_verifier.rs b/core/src/repair/cluster_slot_state_verifier.rs index ab1928cee5c568..375cbe466c45f3 100644 --- a/core/src/repair/cluster_slot_state_verifier.rs +++ b/core/src/repair/cluster_slot_state_verifier.rs @@ -456,8 +456,8 @@ fn check_epoch_slots_hash_against_bank_status( assert!(is_popular_pruned); // The cluster sample found the troublesome slot which caused this fork to be pruned warn!( - "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but we - have pruned it due to incorrect ancestry" + "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but \ + we have pruned it due to incorrect ancestry" ); } } @@ -644,8 +644,8 @@ fn on_epoch_slots_frozen( if let Some(duplicate_confirmed_hash) = duplicate_confirmed_hash { if epoch_slots_frozen_hash != duplicate_confirmed_hash { warn!( - "EpochSlots sample returned slot {} with hash {}, but we already saw - duplicate confirmation on hash: {:?}", + "EpochSlots sample returned slot {} with hash {}, but we already saw \ + duplicate confirmation on hash: {:?}", slot, epoch_slots_frozen_hash, duplicate_confirmed_hash ); } @@ -676,9 +676,11 @@ fn on_epoch_slots_frozen( } fn on_popular_pruned_fork(slot: Slot) -> Vec { - warn!("{slot} is part of a pruned fork which has reached the DUPLICATE_THRESHOLD aggregating across descendants - and slot versions. It is suspected to be duplicate or have an ancestor that is duplicate. - Notifying ancestor_hashes_service"); + warn!( + "{slot} is part of a pruned fork which has reached the DUPLICATE_THRESHOLD aggregating \ + across descendants and slot versions. It is suspected to be duplicate or have an \ + ancestor that is duplicate. Notifying ancestor_hashes_service" + ); vec![ResultingStateChange::SendAncestorHashesReplayUpdate( AncestorHashesReplayUpdate::PopularPrunedFork(slot), )] diff --git a/core/src/repair/duplicate_repair_status.rs b/core/src/repair/duplicate_repair_status.rs index 75956a64d6e58f..c39d194c604c22 100644 --- a/core/src/repair/duplicate_repair_status.rs +++ b/core/src/repair/duplicate_repair_status.rs @@ -319,14 +319,14 @@ impl AncestorRequestStatus { agreed_response[*mismatch_i]; let mismatch_our_frozen_hash = blockstore.get_bank_hash(mismatch_slot); info!( - "When processing the ancestor sample for {}, there was a mismatch - for {mismatch_slot}: we had frozen hash {:?} and the cluster agreed upon - {mismatch_agreed_upon_hash}. However for a later ancestor {ancestor_slot} - we have agreement on {our_frozen_hash} as the bank hash. This should never - be possible, something is wrong or the cluster sample is invalid. - Rejecting and queuing the ancestor hashes request for retry", - self.requested_mismatched_slot, - mismatch_our_frozen_hash + "When processing the ancestor sample for {}, there was a mismatch \ + for {mismatch_slot}: we had frozen hash {:?} and the cluster agreed \ + upon {mismatch_agreed_upon_hash}. However for a later ancestor \ + {ancestor_slot} we have agreement on {our_frozen_hash} as the bank \ + hash. This should never be possible, something is wrong or the \ + cluster sample is invalid. Rejecting and queuing the ancestor hashes \ + request for retry", + self.requested_mismatched_slot, mismatch_our_frozen_hash ); return DuplicateAncestorDecision::InvalidSample; } @@ -346,13 +346,14 @@ impl AncestorRequestStatus { let (mismatch_slot, mismatch_agreed_upon_hash) = agreed_response[*mismatch_i]; info!( - "When processing the ancestor sample for {}, an earlier ancestor {mismatch_slot} - was agreed upon by the cluster with hash {mismatch_agreed_upon_hash} but not - frozen in our blockstore. However for a later ancestor {ancestor_slot} we have - agreement on {our_frozen_hash} as the bank hash. This should only be possible if - we have just started from snapshot and immediately encountered a duplicate block on - a popular pruned fork, otherwise something is seriously wrong. Continuing with the - repair", + "When processing the ancestor sample for {}, an earlier ancestor \ + {mismatch_slot} was agreed upon by the cluster with hash \ + {mismatch_agreed_upon_hash} but not frozen in our blockstore. \ + However for a later ancestor {ancestor_slot} we have agreement on \ + {our_frozen_hash} as the bank hash. This should only be possible if \ + we have just started from snapshot and immediately encountered a \ + duplicate block on a popular pruned fork, otherwise something is \ + seriously wrong. Continuing with the repair", self.requested_mismatched_slot ); } diff --git a/core/src/repair/repair_generic_traversal.rs b/core/src/repair/repair_generic_traversal.rs index 3e704149cb4dd5..35b5276bcbbaa4 100644 --- a/core/src/repair/repair_generic_traversal.rs +++ b/core/src/repair/repair_generic_traversal.rs @@ -139,7 +139,8 @@ pub fn get_closest_completion( ( "error", format!( - "last_index + 1 < shred_count. last_index={last_index} shred_count={shred_count}", + "last_index + 1 < shred_count. last_index={last_index} \ + shred_count={shred_count}", ), String ), @@ -153,9 +154,9 @@ pub fn get_closest_completion( ( "error", format!( - "last_index < slot_meta.consumed. last_index={} slot_meta.consumed={}", - last_index, - slot_meta.consumed, + "last_index < slot_meta.consumed. last_index={} \ + slot_meta.consumed={}", + last_index, slot_meta.consumed, ), String ), diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 7d3d6dd54213e4..f69aea596960e2 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -824,8 +824,8 @@ impl RepairService { // Select weighted sample of valid peers if no valid peer was passed in. if repair_peers.is_empty() { debug!( - "No pubkey was provided or no valid repair socket was found. \ - Sampling a set of repair peers instead." + "No pubkey was provided or no valid repair socket was found. Sampling a set of \ + repair peers instead." ); repair_peers = Self::get_repair_peers(cluster_info.clone(), cluster_slots, slot); } diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index f1a92daa2650f5..4bf1f5cef37d73 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -205,10 +205,10 @@ impl PartitionInfo { heaviest_fork_failures: Vec, ) { if self.partition_start_time.is_none() && partition_detected { - warn!("PARTITION DETECTED waiting to join heaviest fork: {} last vote: {:?}, reset slot: {}", - heaviest_slot, - last_voted_slot, - reset_bank_slot, + warn!( + "PARTITION DETECTED waiting to join heaviest fork: {} last vote: {:?}, reset \ + slot: {}", + heaviest_slot, last_voted_slot, reset_bank_slot, ); datapoint_info!( "replay_stage-partition-start", @@ -580,11 +580,9 @@ impl ReplayStage { Ok(tower) => tower, Err(err) => { error!( - "Unable to load new tower when attempting to change identity from {} to {} on - ReplayStage startup, Exiting: {}", - my_old_pubkey, - my_pubkey, - err + "Unable to load new tower when attempting to change identity from {} \ + to {} on ReplayStage startup, Exiting: {}", + my_old_pubkey, my_pubkey, err ); // drop(_exit) will set the exit flag, eventually tearing down the entire process return; @@ -1037,11 +1035,9 @@ impl ReplayStage { Ok(tower) => tower, Err(err) => { error!( - "Unable to load new tower when attempting to change identity - from {} to {} on set-identity, Exiting: {}", - my_old_pubkey, - my_pubkey, - err + "Unable to load new tower when attempting to change \ + identity from {} to {} on set-identity, Exiting: {}", + my_old_pubkey, my_pubkey, err ); // drop(_exit) will set the exit flag, eventually tearing down the entire process return; @@ -1214,7 +1210,10 @@ impl ReplayStage { match tower { Ok(tower) => Ok(tower), Err(err) if err.is_file_missing() => { - warn!("Failed to load tower, file missing for {}: {}. Creating a new tower from bankforks.", node_pubkey, err); + warn!( + "Failed to load tower, file missing for {node_pubkey}: {err}. Creating a new \ + tower from bankforks." + ); Ok(Tower::new_from_bankforks( &bank_forks.read().unwrap(), node_pubkey, @@ -1222,7 +1221,10 @@ impl ReplayStage { )) } Err(err) if err.is_too_old() => { - warn!("Failed to load tower, too old for {}: {}. Creating a new tower from bankforks.", node_pubkey, err); + warn!( + "Failed to load tower, too old for {node_pubkey}: {err}. Creating a new tower \ + from bankforks." + ); Ok(Tower::new_from_bankforks( &bank_forks.read().unwrap(), node_pubkey, @@ -1284,10 +1286,9 @@ impl ReplayStage { ) { if retransmit_info.reached_retransmit_threshold() { info!( - "Retrying retransmit: latest_leader_slot={} slot={} retransmit_info={:?}", - latest_leader_slot, - slot, - &retransmit_info, + "Retrying retransmit: latest_leader_slot={} slot={} \ + retransmit_info={:?}", + latest_leader_slot, slot, &retransmit_info, ); datapoint_info!( metric_name, @@ -1318,7 +1319,11 @@ impl ReplayStage { // It is possible that bank corresponding to `start_slot` has been // dumped, so we need to double check it exists before proceeding if !progress.contains(&start_slot) { - warn!("Poh start slot {start_slot}, is missing from progress map. This indicates that we are in the middle of a dump and repair. Skipping retransmission of unpropagated leader slots"); + warn!( + "Poh start slot {start_slot}, is missing from progress map. This indicates that \ + we are in the middle of a dump and repair. Skipping retransmission of \ + unpropagated leader slots" + ); return; } @@ -1509,7 +1514,9 @@ impl ReplayStage { } // Should not dump slots for which we were the leader - if Some(*my_pubkey) == leader_schedule_cache.slot_leader_at(*duplicate_slot, None) { + if Some(*my_pubkey) + == leader_schedule_cache.slot_leader_at(*duplicate_slot, None) + { if let Some(bank) = bank_forks.read().unwrap().get(*duplicate_slot) { bank_hash_details::write_bank_hash_details_file(&bank) .map_err(|err| { @@ -1517,14 +1524,18 @@ impl ReplayStage { }) .ok(); } else { - warn!("Unable to get bank for slot {duplicate_slot} from bank forks \ - while attempting to write bank hash details file"); + warn!( + "Unable to get bank for slot {duplicate_slot} from bank forks \ + while attempting to write bank hash details file" + ); } - panic!("We are attempting to dump a block that we produced. \ - This indicates that we are producing duplicate blocks, \ - or that there is a bug in our runtime/replay code which \ - causes us to compute different bank hashes than the rest of the cluster. \ - We froze slot {duplicate_slot} with hash {frozen_hash:?} while the cluster hash is {correct_hash}"); + panic!( + "We are attempting to dump a block that we produced. This indicates \ + that we are producing duplicate blocks, or that there is a bug in \ + our runtime/replay code which causes us to compute different bank \ + hashes than the rest of the cluster. We froze slot {duplicate_slot} \ + with hash {frozen_hash:?} while the cluster hash is {correct_hash}" + ); } let attempt_no = purge_repair_slot_counter @@ -1532,11 +1543,13 @@ impl ReplayStage { .and_modify(|x| *x += 1) .or_insert(1); if *attempt_no > MAX_REPAIR_RETRY_LOOP_ATTEMPTS { - panic!("We have tried to repair duplicate slot: {duplicate_slot} more than {MAX_REPAIR_RETRY_LOOP_ATTEMPTS} times \ - and are unable to freeze a block with bankhash {correct_hash}, \ - instead we have a block with bankhash {frozen_hash:?}. \ - This is most likely a bug in the runtime. \ - At this point manual intervention is needed to make progress. Exiting"); + panic!( + "We have tried to repair duplicate slot: {duplicate_slot} more than \ + {MAX_REPAIR_RETRY_LOOP_ATTEMPTS} times and are unable to freeze a \ + block with bankhash {correct_hash}, instead we have a block with \ + bankhash {frozen_hash:?}. This is most likely a bug in the runtime. \ + At this point manual intervention is needed to make progress. Exiting" + ); } Self::purge_unconfirmed_duplicate_slot( @@ -1597,8 +1610,15 @@ impl ReplayStage { } in ancestor_duplicate_slots_receiver.try_iter() { warn!( - "{} ReplayStage notified of duplicate slot from ancestor hashes service but we observed as {}: {:?}", - pubkey, if request_type.is_pruned() {"pruned"} else {"dead"}, (epoch_slots_frozen_slot, epoch_slots_frozen_hash), + "{} ReplayStage notified of duplicate slot from ancestor hashes service but we \ + observed as {}: {:?}", + pubkey, + if request_type.is_pruned() { + "pruned" + } else { + "dead" + }, + (epoch_slots_frozen_slot, epoch_slots_frozen_hash), ); let epoch_slots_frozen_state = EpochSlotsFrozenState::new_from_state( epoch_slots_frozen_slot, @@ -1726,7 +1746,10 @@ impl ReplayStage { // replay on successful repair of the parent. If this block is also a duplicate, it // will be handled in the next round of repair/replay - so we just clear the dead // flag for now. - warn!("not purging descendant {} of slot {} as it is dead. resetting dead flag instead", slot, duplicate_slot); + warn!( + "not purging descendant {slot} of slot {duplicate_slot} as it is dead. \ + resetting dead flag instead" + ); // Clear the "dead" flag allowing ReplayStage to start replaying // this slot once the parent is repaired blockstore.remove_dead_slot(slot).unwrap(); @@ -1839,7 +1862,8 @@ impl ReplayStage { { assert_eq!( prev_hash, duplicate_confirmed_hash, - "Additional duplicate confirmed notification for slot {confirmed_slot} with a different hash" + "Additional duplicate confirmed notification for slot {confirmed_slot} \ + with a different hash" ); // Already processed this signal continue; @@ -2058,8 +2082,9 @@ impl ReplayStage { let Some(parent) = bank_forks.read().unwrap().get(parent_slot) else { warn!( - "Poh recorder parent slot {parent_slot} is missing from bank_forks. This indicates \ - that we are in the middle of a dump and repair. Unable to start leader"); + "Poh recorder parent slot {parent_slot} is missing from bank_forks. This \ + indicates that we are in the middle of a dump and repair. Unable to start leader" + ); return false; }; @@ -2106,8 +2131,12 @@ impl ReplayStage { ); if !Self::check_propagation_for_start_leader(poh_slot, parent_slot, progress_map) { - let latest_unconfirmed_leader_slot = progress_map.get_latest_leader_slot_must_exist(parent_slot) - .expect("In order for propagated check to fail, latest leader must exist in progress map"); + let latest_unconfirmed_leader_slot = progress_map + .get_latest_leader_slot_must_exist(parent_slot) + .expect( + "In order for propagated check to fail, latest leader must exist in \ + progress map", + ); if poh_slot != skipped_slots_info.last_skipped_slot { datapoint_info!( "replay_stage-skip_leader_slot", @@ -2516,8 +2545,10 @@ impl ReplayStage { .find(|keypair| keypair.pubkey() == authorized_voter_pubkey) { None => { - warn!("The authorized keypair {} for vote account {} is not available. Unable to vote", - authorized_voter_pubkey, vote_account_pubkey); + warn!( + "The authorized keypair {authorized_voter_pubkey} for vote account \ + {vote_account_pubkey} is not available. Unable to vote" + ); return GenerateVoteTxResult::NonVoting; } Some(authorized_voter_keypair) => authorized_voter_keypair, @@ -2584,7 +2615,8 @@ impl ReplayStage { { last_vote_refresh_time.last_print_time = Instant::now(); info!( - "Last landed vote for slot {} in bank {} is greater than the current last vote for slot: {} tracked by Tower", + "Last landed vote for slot {} in bank {} is greater than the current last vote \ + for slot: {} tracked by Tower", my_latest_landed_vote, heaviest_bank_on_same_fork.slot(), last_voted_slot @@ -3107,8 +3139,8 @@ impl ReplayStage { let replay_progress = bank_progress.replay_progress.clone(); let r_replay_progress = replay_progress.read().unwrap(); debug!( - "bank {} has completed replay from blockstore, \ - contribute to update cost with {:?}", + "bank {} has completed replay from blockstore, contribute to update cost with \ + {:?}", bank.slot(), r_replay_stats.batch_execute.totals ); @@ -3793,7 +3825,8 @@ impl ReplayStage { if let Some(prev_hash) = duplicate_confirmed_slots.insert(*slot, *frozen_hash) { assert_eq!( prev_hash, *frozen_hash, - "Additional duplicate confirmed notification for slot {slot} with a different hash" + "Additional duplicate confirmed notification for slot {slot} with a different \ + hash" ); // Already processed this signal continue; diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index f9c40e4f9b13fe..274d63cbd31f04 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -49,9 +49,8 @@ impl SnapshotPackagerService { break; } - let Some(snapshot_package) = Self::get_next_snapshot_package( - &pending_snapshot_packages, - ) + let Some(snapshot_package) = + Self::get_next_snapshot_package(&pending_snapshot_packages) else { std::thread::sleep(Self::LOOP_LIMITER); continue; @@ -67,36 +66,42 @@ impl SnapshotPackagerService { // Archiving the snapshot package is not allowed to fail. // AccountsBackgroundService calls `clean_accounts()` with a value for // latest_full_snapshot_slot that requires this archive call to succeed. - let (archive_result, archive_time_us) = measure_us!(snapshot_utils::serialize_and_archive_snapshot_package( - snapshot_package, - &snapshot_config, - )); + let (archive_result, archive_time_us) = + measure_us!(snapshot_utils::serialize_and_archive_snapshot_package( + snapshot_package, + &snapshot_config, + )); if let Err(err) = archive_result { - error!("Stopping SnapshotPackagerService! Fatal error while archiving snapshot package: {err}"); + error!( + "Stopping SnapshotPackagerService! Fatal error while archiving \ + snapshot package: {err}" + ); exit.store(true, Ordering::Relaxed); break; } - if let Some(snapshot_gossip_manager) = snapshot_gossip_manager.as_mut() { - snapshot_gossip_manager.push_snapshot_hash(snapshot_kind, (snapshot_slot, snapshot_hash)); + snapshot_gossip_manager + .push_snapshot_hash(snapshot_kind, (snapshot_slot, snapshot_hash)); } - let (_, purge_archives_time_us) = measure_us!(snapshot_utils::purge_old_snapshot_archives( - &snapshot_config.full_snapshot_archives_dir, - &snapshot_config.incremental_snapshot_archives_dir, - snapshot_config.maximum_full_snapshot_archives_to_retain, - snapshot_config.maximum_incremental_snapshot_archives_to_retain, - )); + let (_, purge_archives_time_us) = + measure_us!(snapshot_utils::purge_old_snapshot_archives( + &snapshot_config.full_snapshot_archives_dir, + &snapshot_config.incremental_snapshot_archives_dir, + snapshot_config.maximum_full_snapshot_archives_to_retain, + snapshot_config.maximum_incremental_snapshot_archives_to_retain, + )); // Now that this snapshot package has been archived, it is safe to remove // all bank snapshots older than this slot. We want to keep the bank // snapshot *at this slot* so that it can be used during restarts, when // booting from local state. - let (_, purge_bank_snapshots_time_us) = measure_us!(snapshot_utils::purge_bank_snapshots_older_than_slot( - &snapshot_config.bank_snapshots_dir, - snapshot_slot, - )); + let (_, purge_bank_snapshots_time_us) = + measure_us!(snapshot_utils::purge_bank_snapshots_older_than_slot( + &snapshot_config.bank_snapshots_dir, + snapshot_slot, + )); let handling_time_us = measure_handling.end_as_us(); datapoint_info!( @@ -109,11 +114,7 @@ impl SnapshotPackagerService { purge_bank_snapshots_time_us, i64 ), - ( - "purge_old_archives_time_us", - purge_archives_time_us, - i64 - ), + ("purge_old_archives_time_us", purge_archives_time_us, i64), ); } info!("SnapshotPackagerService has stopped"); diff --git a/core/src/snapshot_packager_service/pending_snapshot_packages.rs b/core/src/snapshot_packager_service/pending_snapshot_packages.rs index 726a3dd39477f7..9edf9624a88dd8 100644 --- a/core/src/snapshot_packager_service/pending_snapshot_packages.rs +++ b/core/src/snapshot_packager_service/pending_snapshot_packages.rs @@ -59,7 +59,8 @@ impl PendingSnapshotPackages { old: {pending_incremental_snapshot_package:?}, new: {snapshot_package:?}", ); info!( - "overwrote pending incremental snapshot package, old slot: {}, new slot: {}", + "overwrote pending incremental snapshot package, old slot: {}, new slot: \ + {}", pending_incremental_snapshot_package.slot, snapshot_package.slot, ); } diff --git a/core/src/snapshot_packager_service/snapshot_gossip_manager.rs b/core/src/snapshot_packager_service/snapshot_gossip_manager.rs index d4ab9863642e09..d7e1f89abc263f 100644 --- a/core/src/snapshot_packager_service/snapshot_gossip_manager.rs +++ b/core/src/snapshot_packager_service/snapshot_gossip_manager.rs @@ -102,9 +102,10 @@ impl SnapshotGossipManager { .as_mut() .expect("there must already be a full snapshot hash"); assert_eq!( - base_slot, latest_snapshot_hashes.full.0.0, - "the incremental snapshot's base slot ({}) must match the latest full snapshot's slot ({})", - base_slot, latest_snapshot_hashes.full.0.0, + base_slot, latest_snapshot_hashes.full.0 .0, + "the incremental snapshot's base slot ({}) must match the latest full snapshot's slot \ + ({})", + base_slot, latest_snapshot_hashes.full.0 .0, ); latest_snapshot_hashes.incremental = Some(incremental_snapshot_hash); } @@ -129,8 +130,8 @@ impl SnapshotGossipManager { .collect(), ) .expect( - "Bug! The programmer contract has changed for push_snapshot_hashes() \ - and a new error case has been added that has not been handled here.", + "Bug! The programmer contract has changed for push_snapshot_hashes() and a new \ + error case has been added that has not been handled here.", ); } } diff --git a/core/src/system_monitor_service.rs b/core/src/system_monitor_service.rs index bec85780fc3ede..455698fe300186 100644 --- a/core/src/system_monitor_service.rs +++ b/core/src/system_monitor_service.rs @@ -466,8 +466,13 @@ impl SystemMonitorService { .map(|(key, interesting_limit, current_value)| { datapoint_warn!("os-config", (key, *current_value, i64)); match interesting_limit { - InterestingLimit::Recommend(recommended_value) if current_value < recommended_value => { - warn!(" {key}: recommended={recommended_value} current={current_value}, too small"); + InterestingLimit::Recommend(recommended_value) + if current_value < recommended_value => + { + warn!( + " {key}: recommended={recommended_value}, current={current_value} \ + too small" + ); false } InterestingLimit::Recommend(recommended_value) => { diff --git a/core/src/tpu_entry_notifier.rs b/core/src/tpu_entry_notifier.rs index 22994455e88814..583ef343510ff7 100644 --- a/core/src/tpu_entry_notifier.rs +++ b/core/src/tpu_entry_notifier.rs @@ -85,14 +85,16 @@ impl TpuEntryNotifier { starting_transaction_index: *current_transaction_index, }) { warn!( - "Failed to send slot {slot:?} entry {index:?} from Tpu to EntryNotifierService, error {err:?}", + "Failed to send slot {slot:?} entry {index:?} from Tpu to EntryNotifierService, \ + error {err:?}", ); } *current_transaction_index += entry.transactions.len(); if let Err(err) = broadcast_entry_sender.send((bank, (entry, tick_height))) { warn!( - "Failed to send slot {slot:?} entry {index:?} from Tpu to BroadcastStage, error {err:?}", + "Failed to send slot {slot:?} entry {index:?} from Tpu to BroadcastStage, error \ + {err:?}", ); // If the BroadcastStage channel is closed, the validator has halted. Try to exit // gracefully. diff --git a/core/src/validator.rs b/core/src/validator.rs index 0f99e4c4768497..6f42c60c1c9f5c 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -609,8 +609,8 @@ impl Validator { expected_shred_version, ) .context( - "Failed to backup and clear shreds with incorrect \ - shred version from blockstore", + "Failed to backup and clear shreds with incorrect shred version from \ + blockstore", )?; } } @@ -682,9 +682,8 @@ impl Validator { .and_then(|geyser_plugin_service| geyser_plugin_service.get_block_metadata_notifier()); info!( - "Geyser plugin: accounts_update_notifier: {}, \ - transaction_notifier: {}, \ - entry_notifier: {}", + "Geyser plugin: accounts_update_notifier: {}, transaction_notifier: {}, \ + entry_notifier: {}", accounts_update_notifier.is_some(), transaction_notifier.is_some(), entry_notifier.is_some() @@ -1695,9 +1694,8 @@ fn check_poh_speed(bank: &Bank, maybe_hash_samples: Option) -> Result<(), V (hashes_per_slot as f64 / target_slot_duration.as_secs_f64()) as u64; info!( - "PoH speed check: \ - computed hashes per second {my_hashes_per_second}, \ - target hashes per second {target_hashes_per_second}" + "PoH speed check: computed hashes per second {my_hashes_per_second}, target hashes per \ + second {target_hashes_per_second}" ); if my_hashes_per_second < target_hashes_per_second { return Err(ValidatorError::PohTooSlow { @@ -1777,20 +1775,21 @@ fn post_process_restored_tower( } if should_require_tower && voting_has_been_active { return Err(format!( - "Requested mandatory tower restore failed: {err}. \ - And there is an existing vote_account containing actual votes. \ - Aborting due to possible conflicting duplicate votes" + "Requested mandatory tower restore failed: {err}. And there is an existing \ + vote_account containing actual votes. Aborting due to possible conflicting \ + duplicate votes" )); } if err.is_file_missing() && !voting_has_been_active { // Currently, don't protect against spoofed snapshots with no tower at all info!( - "Ignoring expected failed tower restore because this is the initial \ - validator start with the vote account..." + "Ignoring expected failed tower restore because this is the initial validator \ + start with the vote account..." ); } else { error!( - "Rebuilding a new tower from the latest vote account due to failed tower restore: {}", + "Rebuilding a new tower from the latest vote account due to failed tower \ + restore: {}", err ); } @@ -2396,8 +2395,8 @@ fn wait_for_supermajority( std::cmp::Ordering::Less => return Ok(false), std::cmp::Ordering::Greater => { error!( - "Ledger does not have enough data to wait for supermajority, \ - please enable snapshot fetch. Has {} needs {}", + "Ledger does not have enough data to wait for supermajority, please \ + enable snapshot fetch. Has {} needs {}", bank.slot(), wait_for_supermajority_slot ); diff --git a/core/src/warm_quic_cache_service.rs b/core/src/warm_quic_cache_service.rs index e4a67cbe993169..fd86e2af9f766f 100644 --- a/core/src/warm_quic_cache_service.rs +++ b/core/src/warm_quic_cache_service.rs @@ -39,7 +39,7 @@ impl WarmQuicCacheService { let slot_jitter = thread_rng().gen_range(-CACHE_JITTER_SLOT..CACHE_JITTER_SLOT); let mut maybe_last_leader = None; while !exit.load(Ordering::Relaxed) { - let leader_pubkey = poh_recorder + let leader_pubkey = poh_recorder .read() .unwrap() .leader_after_n_slots((CACHE_OFFSET_SLOT + slot_jitter) as u64); @@ -49,12 +49,15 @@ impl WarmQuicCacheService { { maybe_last_leader = Some(leader_pubkey); if let Some(Ok(addr)) = cluster_info - .lookup_contact_info(&leader_pubkey, |node| node.tpu(Protocol::QUIC)) + .lookup_contact_info(&leader_pubkey, |node| { + node.tpu(Protocol::QUIC) + }) { let conn = connection_cache.get_connection(&addr); if let Err(err) = conn.send_data(&[]) { warn!( - "Failed to warmup QUIC connection to the leader {:?}, Error {:?}", + "Failed to warmup QUIC connection to the leader {:?}, \ + Error {:?}", leader_pubkey, err ); } diff --git a/core/tests/fork-selection.rs b/core/tests/fork-selection.rs index 02385a1e378c88..d54fbfba8094b4 100644 --- a/core/tests/fork-selection.rs +++ b/core/tests/fork-selection.rs @@ -561,14 +561,15 @@ fn test_with_partitions( trunk.0 }; println!( - "time: {}, tip converged: {}, trunk id: {}, trunk time: {}, trunk converged {}, trunk height {}", - time, - calc_tip_converged(&towers, &converge_map), - trunk.0, - trunk_time, - trunk.1, - calc_fork_depth(&fork_tree, trunk.0) - ); + "time: {}, tip converged: {}, trunk id: {}, trunk time: {}, trunk converged {}, \ + trunk height {}", + time, + calc_tip_converged(&towers, &converge_map), + trunk.0, + trunk_time, + trunk.1, + calc_fork_depth(&fork_tree, trunk.0) + ); if break_early && calc_tip_converged(&towers, &converge_map) == len { break; } diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 2a6c77ddb0a0c0..6e17f5a9cfb0f2 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -434,8 +434,14 @@ fn test_bank_forks_incremental_snapshot( INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 5; const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 2 - 1; - info!("Running bank forks incremental snapshot test, full snapshot interval: {} slots, incremental snapshot interval: {} slots, last slot: {}, set root interval: {} slots", - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, LAST_SLOT, SET_ROOT_INTERVAL); + info!( + "Running bank forks incremental snapshot test, full snapshot interval: {} slots, \ + incremental snapshot interval: {} slots, last slot: {}, set root interval: {} slots", + FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + LAST_SLOT, + SET_ROOT_INTERVAL + ); let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, @@ -444,8 +450,20 @@ fn test_bank_forks_incremental_snapshot( FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, ); - trace!("SnapshotTestConfig:\naccounts_dir: {}\nbank_snapshots_dir: {}\nfull_snapshot_archives_dir: {}\nincremental_snapshot_archives_dir: {}", - snapshot_test_config.accounts_dir.display(), snapshot_test_config.bank_snapshots_dir.path().display(), snapshot_test_config.full_snapshot_archives_dir.path().display(), snapshot_test_config.incremental_snapshot_archives_dir.path().display()); + trace!( + "SnapshotTestConfig:\naccounts_dir: {}\nbank_snapshots_dir: \ + {}\nfull_snapshot_archives_dir: {}\nincremental_snapshot_archives_dir: {}", + snapshot_test_config.accounts_dir.display(), + snapshot_test_config.bank_snapshots_dir.path().display(), + snapshot_test_config + .full_snapshot_archives_dir + .path() + .display(), + snapshot_test_config + .incremental_snapshot_archives_dir + .path() + .display() + ); let bank_forks = snapshot_test_config.bank_forks.clone(); let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; @@ -637,11 +655,11 @@ fn test_snapshots_with_background_services( info!("Running snapshots with background services test..."); trace!( "Test configuration parameters:\ - \n\tfull snapshot archive interval: {} slots\ - \n\tincremental snapshot archive interval: {} slots\ - \n\tbank snapshot interval: {} slots\ - \n\tset root interval: {} slots\ - \n\tlast slot: {}", + \n\tfull snapshot archive interval: {} slots\ + \n\tincremental snapshot archive interval: {} slots\ + \n\tbank snapshot interval: {} slots\ + \n\tset root interval: {} slots\ + \n\tlast slot: {}", FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, BANK_SNAPSHOT_INTERVAL_SLOTS, @@ -773,7 +791,8 @@ fn test_snapshots_with_background_services( { assert!( timer.elapsed() < MAX_WAIT_DURATION, - "Waiting for full snapshot {slot} exceeded the {MAX_WAIT_DURATION:?} maximum wait duration!", + "Waiting for full snapshot {slot} exceeded the {MAX_WAIT_DURATION:?} maximum \ + wait duration!", ); std::thread::sleep(Duration::from_secs(1)); } @@ -791,7 +810,8 @@ fn test_snapshots_with_background_services( { assert!( timer.elapsed() < MAX_WAIT_DURATION, - "Waiting for incremental snapshot {slot} exceeded the {MAX_WAIT_DURATION:?} maximum wait duration!", + "Waiting for incremental snapshot {slot} exceeded the {MAX_WAIT_DURATION:?} \ + maximum wait duration!", ); std::thread::sleep(Duration::from_secs(1)); } From da2a31921bacc64586100c41b84c674778fc37ba Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 29 Aug 2024 10:55:11 -0400 Subject: [PATCH 256/529] Fixes error string in hash-cache-tool (#2778) --- accounts-db/accounts-hash-cache-tool/src/main.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 8e3d0ff225e5cf..ecd30f30085e0e 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -193,12 +193,14 @@ fn main() { .get_matches(); let subcommand = matches.subcommand(); - let subcommand_str = subcommand.0; + let mut command_str = subcommand.0.to_string(); match subcommand { (CMD_INSPECT, Some(subcommand_matches)) => cmd_inspect(&matches, subcommand_matches), (CMD_SEARCH, Some(subcommand_matches)) => cmd_search(&matches, subcommand_matches), (CMD_DIFF, Some(subcommand_matches)) => { let diff_subcommand = subcommand_matches.subcommand(); + command_str += " "; + command_str += diff_subcommand.0; match diff_subcommand { (CMD_DIFF_FILES, Some(diff_subcommand_matches)) => { cmd_diff_files(&matches, diff_subcommand_matches) @@ -215,7 +217,7 @@ fn main() { _ => unreachable!(), } .unwrap_or_else(|err| { - eprintln!("Error: '{subcommand_str}' failed: {err}"); + eprintln!("Error: '{command_str}' failed: {err}"); std::process::exit(1); }); } From 2a59e61dfd20c9003f7c0d61d46f50dd43009b72 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 29 Aug 2024 10:52:54 -0500 Subject: [PATCH 257/529] TransactionView - sanitization checks (#2757) --- transaction-view/benches/transaction_view.rs | 23 +- .../src/address_table_lookup_meta.rs | 33 +- transaction-view/src/bytes.rs | 19 +- transaction-view/src/lib.rs | 1 + transaction-view/src/message_header_meta.rs | 4 +- transaction-view/src/result.rs | 9 +- transaction-view/src/sanitize.rs | 544 ++++++++++++++++++ transaction-view/src/signature_meta.rs | 4 +- .../src/static_account_keys_meta.rs | 4 +- transaction-view/src/transaction_meta.rs | 16 +- transaction-view/src/transaction_view.rs | 47 +- 11 files changed, 673 insertions(+), 31 deletions(-) create mode 100644 transaction-view/src/sanitize.rs diff --git a/transaction-view/benches/transaction_view.rs b/transaction-view/benches/transaction_view.rs index 79a4393be5207e..10901023908fcd 100644 --- a/transaction-view/benches/transaction_view.rs +++ b/transaction-view/benches/transaction_view.rs @@ -15,7 +15,7 @@ use { signature::Keypair, signer::Signer, system_instruction, - transaction::VersionedTransaction, + transaction::{SanitizedVersionedTransaction, VersionedTransaction}, }, }; @@ -41,11 +41,30 @@ fn bench_transactions_parsing( }); }); + // Legacy Transaction Parsing and Sanitize checks + group.bench_function("SanitizedVersionedTransaction", |c| { + c.iter(|| { + for bytes in serialized_transactions.iter() { + let tx = bincode::deserialize::(black_box(bytes)).unwrap(); + let _ = SanitizedVersionedTransaction::try_new(tx).unwrap(); + } + }); + }); + // New Transaction Parsing group.bench_function("TransactionView", |c| { c.iter(|| { for bytes in serialized_transactions.iter() { - let _ = TransactionView::try_new(black_box(bytes.as_ref())).unwrap(); + let _ = TransactionView::try_new_unsanitized(black_box(bytes.as_ref())).unwrap(); + } + }); + }); + + // New Transaction Parsing and Sanitize checks + group.bench_function("TransactionView (Sanitized)", |c| { + c.iter(|| { + for bytes in serialized_transactions.iter() { + let _ = TransactionView::try_new_sanitized(black_box(bytes.as_ref())).unwrap(); } }); }); diff --git a/transaction-view/src/address_table_lookup_meta.rs b/transaction-view/src/address_table_lookup_meta.rs index bbce7817cda5fc..01f3b2cab831de 100644 --- a/transaction-view/src/address_table_lookup_meta.rs +++ b/transaction-view/src/address_table_lookup_meta.rs @@ -4,7 +4,7 @@ use { advance_offset_for_array, advance_offset_for_type, check_remaining, optimized_read_compressed_u16, read_byte, read_slice_data, read_type, }, - result::{Result, TransactionParsingError}, + result::{Result, TransactionViewError}, }, solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Signature}, solana_svm_transaction::message_address_table_lookup::SVMMessageAddressTableLookup, @@ -51,6 +51,10 @@ pub(crate) struct AddressTableLookupMeta { pub(crate) num_address_table_lookups: u8, /// The offset to the first address table lookup in the transaction. pub(crate) offset: u16, + /// The total number of writable lookup accounts in the transaction. + pub(crate) total_writable_lookup_accounts: u16, + /// The total number of readonly lookup accounts in the transaction. + pub(crate) total_readonly_lookup_accounts: u16, } impl AddressTableLookupMeta { @@ -66,7 +70,7 @@ impl AddressTableLookupMeta { const _: () = assert!(MAX_ATLS_PER_PACKET & 0b1000_0000 == 0); let num_address_table_lookups = read_byte(bytes, offset)?; if num_address_table_lookups > MAX_ATLS_PER_PACKET { - return Err(TransactionParsingError); + return Err(TransactionViewError::ParseError); } // Check that the remaining bytes are enough to hold the ATLs. @@ -80,6 +84,13 @@ impl AddressTableLookupMeta { // length is less than u16::MAX, so we can safely cast to u16. let address_table_lookups_offset = *offset as u16; + // Check that there is no chance of overflow when calculating the total + // number of writable and readonly lookup accounts using a u32. + const _: () = + assert!(u16::MAX as usize * MAX_ATLS_PER_PACKET as usize <= u32::MAX as usize); + let mut total_writable_lookup_accounts: u32 = 0; + let mut total_readonly_lookup_accounts: u32 = 0; + // The ATLs do not have a fixed size. So we must iterate over // each ATL to find the total size of the ATLs in the packet, // and check for any malformed ATLs or buffer overflows. @@ -94,16 +105,24 @@ impl AddressTableLookupMeta { // Read the number of write indexes, and then update the offset. let num_write_accounts = optimized_read_compressed_u16(bytes, offset)?; + total_writable_lookup_accounts = + total_writable_lookup_accounts.wrapping_add(u32::from(num_write_accounts)); advance_offset_for_array::(bytes, offset, num_write_accounts)?; // Read the number of read indexes, and then update the offset. let num_read_accounts = optimized_read_compressed_u16(bytes, offset)?; - advance_offset_for_array::(bytes, offset, num_read_accounts)? + total_readonly_lookup_accounts = + total_readonly_lookup_accounts.wrapping_add(u32::from(num_read_accounts)); + advance_offset_for_array::(bytes, offset, num_read_accounts)?; } Ok(Self { num_address_table_lookups, offset: address_table_lookups_offset, + total_writable_lookup_accounts: u16::try_from(total_writable_lookup_accounts) + .map_err(|_| TransactionViewError::SanitizeError)?, + total_readonly_lookup_accounts: u16::try_from(total_readonly_lookup_accounts) + .map_err(|_| TransactionViewError::SanitizeError)?, }) } } @@ -195,6 +214,8 @@ mod tests { assert_eq!(meta.num_address_table_lookups, 0); assert_eq!(meta.offset, 1); assert_eq!(offset, bytes.len()); + assert_eq!(meta.total_writable_lookup_accounts, 0); + assert_eq!(meta.total_readonly_lookup_accounts, 0); } #[test] @@ -221,6 +242,8 @@ mod tests { assert_eq!(meta.num_address_table_lookups, 1); assert_eq!(meta.offset, 1); assert_eq!(offset, bytes.len()); + assert_eq!(meta.total_writable_lookup_accounts, 3); + assert_eq!(meta.total_readonly_lookup_accounts, 3); } #[test] @@ -234,7 +257,7 @@ mod tests { MessageAddressTableLookup { account_key: Pubkey::new_unique(), writable_indexes: vec![1, 2, 3], - readonly_indexes: vec![4, 5, 6], + readonly_indexes: vec![4, 5], }, ])) .unwrap(); @@ -243,6 +266,8 @@ mod tests { assert_eq!(meta.num_address_table_lookups, 2); assert_eq!(meta.offset, 1); assert_eq!(offset, bytes.len()); + assert_eq!(meta.total_writable_lookup_accounts, 6); + assert_eq!(meta.total_readonly_lookup_accounts, 5); } #[test] diff --git a/transaction-view/src/bytes.rs b/transaction-view/src/bytes.rs index 9e2724e3cac6de..6a147b69c2038e 100644 --- a/transaction-view/src/bytes.rs +++ b/transaction-view/src/bytes.rs @@ -1,4 +1,4 @@ -use crate::result::{Result, TransactionParsingError}; +use crate::result::{Result, TransactionViewError}; /// Check that the buffer has at least `len` bytes remaining starting at /// `offset`. Returns Err if the buffer is too short. @@ -12,7 +12,7 @@ use crate::result::{Result, TransactionParsingError}; #[inline(always)] pub fn check_remaining(bytes: &[u8], offset: usize, num_bytes: usize) -> Result<()> { if num_bytes > bytes.len().wrapping_sub(offset) { - Err(TransactionParsingError) + Err(TransactionViewError::ParseError) } else { Ok(()) } @@ -24,7 +24,10 @@ pub fn check_remaining(bytes: &[u8], offset: usize, num_bytes: usize) -> Result< pub fn read_byte(bytes: &[u8], offset: &mut usize) -> Result { // Implicitly checks that the offset is within bounds, no need // to call `check_remaining` explicitly here. - let value = bytes.get(*offset).copied().ok_or(TransactionParsingError); + let value = bytes + .get(*offset) + .copied() + .ok_or(TransactionViewError::ParseError); *offset = offset.wrapping_add(1); value } @@ -49,10 +52,10 @@ pub fn read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result { // to call check_remaining explicitly here. let byte = *bytes .get(offset.wrapping_add(i)) - .ok_or(TransactionParsingError)?; + .ok_or(TransactionViewError::ParseError)?; // non-minimal encoding or overflow if (i > 0 && byte == 0) || (i == 2 && byte > 3) { - return Err(TransactionParsingError); + return Err(TransactionViewError::ParseError); } result |= ((byte & 0x7F) as u16) << shift; shift = shift.wrapping_add(7); @@ -86,7 +89,7 @@ pub fn optimized_read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result let mut result = 0u16; // First byte - let byte1 = *bytes.get(*offset).ok_or(TransactionParsingError)?; + let byte1 = *bytes.get(*offset).ok_or(TransactionViewError::ParseError)?; result |= (byte1 & 0x7F) as u16; if byte1 & 0x80 == 0 { *offset = offset.wrapping_add(1); @@ -96,9 +99,9 @@ pub fn optimized_read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result // Second byte let byte2 = *bytes .get(offset.wrapping_add(1)) - .ok_or(TransactionParsingError)?; + .ok_or(TransactionViewError::ParseError)?; if byte2 == 0 || byte2 & 0x80 != 0 { - return Err(TransactionParsingError); // non-minimal encoding or overflow + return Err(TransactionViewError::ParseError); // non-minimal encoding or overflow } result |= ((byte2 & 0x7F) as u16) << 7; *offset = offset.wrapping_add(2); diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index def04240b2aab7..4058c88fa83034 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -8,6 +8,7 @@ mod address_table_lookup_meta; mod instructions_meta; mod message_header_meta; pub mod result; +mod sanitize; mod signature_meta; pub mod static_account_keys_meta; pub mod transaction_data; diff --git a/transaction-view/src/message_header_meta.rs b/transaction-view/src/message_header_meta.rs index dfc04766958a28..b9f40e3cb7ef11 100644 --- a/transaction-view/src/message_header_meta.rs +++ b/transaction-view/src/message_header_meta.rs @@ -1,7 +1,7 @@ use { crate::{ bytes::read_byte, - result::{Result, TransactionParsingError}, + result::{Result, TransactionViewError}, }, solana_sdk::message::MESSAGE_VERSION_PREFIX, }; @@ -49,7 +49,7 @@ impl MessageHeaderMeta { let version = message_prefix & !MESSAGE_VERSION_PREFIX; match version { 0 => (TransactionVersion::V0, read_byte(bytes, offset)?), - _ => return Err(TransactionParsingError), + _ => return Err(TransactionViewError::ParseError), } } else { // Legacy transaction. The `message_prefix` that was just read is diff --git a/transaction-view/src/result.rs b/transaction-view/src/result.rs index 1997a784b73650..b94c6b26e63a58 100644 --- a/transaction-view/src/result.rs +++ b/transaction-view/src/result.rs @@ -1,3 +1,8 @@ #[derive(Debug, PartialEq, Eq)] -pub struct TransactionParsingError; -pub type Result = core::result::Result; // no distinction between errors for now +#[repr(u8)] // repr(u8) is used to ensure that the enum is represented as a single byte in memory. +pub enum TransactionViewError { + ParseError, + SanitizeError, +} + +pub type Result = core::result::Result; diff --git a/transaction-view/src/sanitize.rs b/transaction-view/src/sanitize.rs new file mode 100644 index 00000000000000..b1aff7bb70cdd7 --- /dev/null +++ b/transaction-view/src/sanitize.rs @@ -0,0 +1,544 @@ +use crate::{ + result::{Result, TransactionViewError}, + transaction_data::TransactionData, + transaction_view::UnsanitizedTransactionView, +}; + +pub(crate) fn sanitize(view: &UnsanitizedTransactionView) -> Result<()> { + sanitize_signatures(view)?; + sanitize_account_access(view)?; + sanitize_instructions(view)?; + sanitize_address_table_lookups(view) +} + +fn sanitize_signatures(view: &UnsanitizedTransactionView) -> Result<()> { + // Check the required number of signatures matches the number of signatures. + if view.num_signatures() != view.num_required_signatures() { + return Err(TransactionViewError::SanitizeError); + } + + // Each signature is associated with a unique static public key. + // Check that there are at least as many static account keys as signatures. + if view.num_static_account_keys() < view.num_signatures() { + return Err(TransactionViewError::SanitizeError); + } + + Ok(()) +} + +fn sanitize_account_access(view: &UnsanitizedTransactionView) -> Result<()> { + // Check there is no overlap of signing area and readonly non-signing area. + // We have already checked that `num_required_signatures` is less than or equal to `num_static_account_keys`, + // so it is safe to use wrapping arithmetic. + if view.num_readonly_unsigned_accounts() + > view + .num_static_account_keys() + .wrapping_sub(view.num_required_signatures()) + { + return Err(TransactionViewError::SanitizeError); + } + + // Check there is at least 1 writable fee-payer account. + if view.num_readonly_signed_accounts() >= view.num_required_signatures() { + return Err(TransactionViewError::SanitizeError); + } + + // Check there are not more than 256 accounts. + if total_number_of_accounts(view) > 256 { + return Err(TransactionViewError::SanitizeError); + } + + Ok(()) +} + +fn sanitize_instructions(view: &UnsanitizedTransactionView) -> Result<()> { + // already verified there is at least one static account. + let max_program_id_index = view.num_static_account_keys().wrapping_sub(1); + // verified that there are no more than 256 accounts in `sanitize_account_access` + let max_account_index = total_number_of_accounts(view).wrapping_sub(1) as u8; + + for instruction in view.instructions_iter() { + // Check that program indexes are static account keys. + if instruction.program_id_index > max_program_id_index { + return Err(TransactionViewError::SanitizeError); + } + + // Check that the program index is not the fee-payer. + if instruction.program_id_index == 0 { + return Err(TransactionViewError::SanitizeError); + } + + // Check that all account indexes are valid. + for account_index in instruction.accounts.iter().copied() { + if account_index > max_account_index { + return Err(TransactionViewError::SanitizeError); + } + } + } + + Ok(()) +} + +fn sanitize_address_table_lookups( + view: &UnsanitizedTransactionView, +) -> Result<()> { + for address_table_lookup in view.address_table_lookup_iter() { + // Check that there is at least one account lookup. + if address_table_lookup.writable_indexes.is_empty() + && address_table_lookup.readonly_indexes.is_empty() + { + return Err(TransactionViewError::SanitizeError); + } + } + + Ok(()) +} + +fn total_number_of_accounts(view: &UnsanitizedTransactionView) -> u16 { + u16::from(view.num_static_account_keys()) + .saturating_add(view.total_writable_lookup_accounts()) + .saturating_add(view.total_readonly_lookup_accounts()) +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::transaction_view::TransactionView, + solana_sdk::{ + hash::Hash, + instruction::CompiledInstruction, + message::{ + v0::{self, MessageAddressTableLookup}, + Message, MessageHeader, VersionedMessage, + }, + pubkey::Pubkey, + signature::Signature, + system_instruction, + transaction::VersionedTransaction, + }, + }; + + fn create_legacy_transaction( + num_signatures: u8, + header: MessageHeader, + account_keys: Vec, + instructions: Vec, + ) -> VersionedTransaction { + VersionedTransaction { + signatures: vec![Signature::default(); num_signatures as usize], + message: VersionedMessage::Legacy(Message { + header, + account_keys, + recent_blockhash: Hash::default(), + instructions, + }), + } + } + + fn create_v0_transaction( + num_signatures: u8, + header: MessageHeader, + account_keys: Vec, + instructions: Vec, + address_table_lookups: Vec, + ) -> VersionedTransaction { + VersionedTransaction { + signatures: vec![Signature::default(); num_signatures as usize], + message: VersionedMessage::V0(v0::Message { + header, + account_keys, + recent_blockhash: Hash::default(), + instructions, + address_table_lookups, + }), + } + } + + fn multiple_transfers() -> VersionedTransaction { + let payer = Pubkey::new_unique(); + VersionedTransaction { + signatures: vec![Signature::default()], // 1 signature to be valid. + message: VersionedMessage::Legacy(Message::new( + &[ + system_instruction::transfer(&payer, &Pubkey::new_unique(), 1), + system_instruction::transfer(&payer, &Pubkey::new_unique(), 1), + ], + Some(&payer), + )), + } + } + + #[test] + fn test_sanitize_multiple_transfers() { + let transaction = multiple_transfers(); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert!(view.sanitize().is_ok()); + } + + #[test] + fn test_sanitize_signatures() { + // Too few signatures. + { + let transaction = create_legacy_transaction( + 1, + MessageHeader { + num_required_signatures: 2, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + (0..3).map(|_| Pubkey::new_unique()).collect(), + vec![], + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_signatures(&view), + Err(TransactionViewError::SanitizeError) + ); + } + + // Too many signatures. + { + let transaction = create_legacy_transaction( + 2, + MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + (0..3).map(|_| Pubkey::new_unique()).collect(), + vec![], + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_signatures(&view), + Err(TransactionViewError::SanitizeError) + ); + } + + // Not enough static accounts. + { + let transaction = create_legacy_transaction( + 2, + MessageHeader { + num_required_signatures: 2, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + (0..1).map(|_| Pubkey::new_unique()).collect(), + vec![], + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_signatures(&view), + Err(TransactionViewError::SanitizeError) + ); + } + + // Not enough static accounts - with look up accounts + { + let transaction = create_v0_transaction( + 2, + MessageHeader { + num_required_signatures: 2, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + (0..1).map(|_| Pubkey::new_unique()).collect(), + vec![], + vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0, 1, 2, 3, 4, 5], + readonly_indexes: vec![6, 7, 8], + }], + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_signatures(&view), + Err(TransactionViewError::SanitizeError) + ); + } + } + + #[test] + fn test_sanitize_account_access() { + // Overlap of signing and readonly non-signing accounts. + { + let transaction = create_legacy_transaction( + 1, + MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 2, + }, + (0..2).map(|_| Pubkey::new_unique()).collect(), + vec![], + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_account_access(&view), + Err(TransactionViewError::SanitizeError) + ); + } + + // Not enough writable accounts. + { + let transaction = create_legacy_transaction( + 1, + MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 1, + num_readonly_unsigned_accounts: 0, + }, + (0..2).map(|_| Pubkey::new_unique()).collect(), + vec![], + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_account_access(&view), + Err(TransactionViewError::SanitizeError) + ); + } + + // Too many accounts. + { + let transaction = create_v0_transaction( + 2, + MessageHeader { + num_required_signatures: 2, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + (0..1).map(|_| Pubkey::new_unique()).collect(), + vec![], + vec![ + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: (0..100).collect(), + readonly_indexes: (100..200).collect(), + }, + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: (100..200).collect(), + readonly_indexes: (0..100).collect(), + }, + ], + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_account_access(&view), + Err(TransactionViewError::SanitizeError) + ); + } + } + + #[test] + fn test_sanitize_instructions() { + let num_signatures = 1; + let header = MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 1, + }; + let account_keys = vec![ + Pubkey::new_unique(), + Pubkey::new_unique(), + Pubkey::new_unique(), + ]; + let valid_instructions = vec![ + CompiledInstruction { + program_id_index: 1, + accounts: vec![0, 1], + data: vec![1, 2, 3], + }, + CompiledInstruction { + program_id_index: 2, + accounts: vec![1, 0], + data: vec![3, 2, 1, 4], + }, + ]; + let atls = vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0, 1], + readonly_indexes: vec![2], + }]; + + // Verify that the unmodified transaction(s) are valid/sanitized. + { + let transaction = create_legacy_transaction( + num_signatures, + header, + account_keys.clone(), + valid_instructions.clone(), + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert!(sanitize_instructions(&view).is_ok()); + + let transaction = create_v0_transaction( + num_signatures, + header, + account_keys.clone(), + valid_instructions.clone(), + atls.clone(), + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert!(sanitize_instructions(&view).is_ok()); + } + + for instruction_index in 0..valid_instructions.len() { + // Invalid program index. + { + let mut instructions = valid_instructions.clone(); + instructions[instruction_index].program_id_index = account_keys.len() as u8; + let transaction = create_legacy_transaction( + num_signatures, + header, + account_keys.clone(), + instructions, + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_instructions(&view), + Err(TransactionViewError::SanitizeError) + ); + } + + // Invalid program index with lookups. + { + let mut instructions = valid_instructions.clone(); + instructions[instruction_index].program_id_index = account_keys.len() as u8; + let transaction = create_v0_transaction( + num_signatures, + header, + account_keys.clone(), + instructions, + atls.clone(), + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_instructions(&view), + Err(TransactionViewError::SanitizeError) + ); + } + + // Program index is fee-payer. + { + let mut instructions = valid_instructions.clone(); + instructions[instruction_index].program_id_index = 0; + let transaction = create_legacy_transaction( + num_signatures, + header, + account_keys.clone(), + instructions, + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_instructions(&view), + Err(TransactionViewError::SanitizeError) + ); + } + + // Invalid account index. + { + let mut instructions = valid_instructions.clone(); + instructions[instruction_index] + .accounts + .push(account_keys.len() as u8); + let transaction = create_legacy_transaction( + num_signatures, + header, + account_keys.clone(), + instructions, + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_instructions(&view), + Err(TransactionViewError::SanitizeError) + ); + } + + // Invalid account index with v0. + { + let num_lookup_accounts = + atls[0].writable_indexes.len() + atls[0].readonly_indexes.len(); + let total_accounts = (account_keys.len() + num_lookup_accounts) as u8; + let mut instructions = valid_instructions.clone(); + instructions[instruction_index] + .accounts + .push(total_accounts); + let transaction = create_v0_transaction( + num_signatures, + header, + account_keys.clone(), + instructions, + atls.clone(), + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_instructions(&view), + Err(TransactionViewError::SanitizeError) + ); + } + } + } + + #[test] + fn test_sanitize_address_table_lookups() { + fn create_transaction(empty_index: usize) -> VersionedTransaction { + let payer = Pubkey::new_unique(); + let mut address_table_lookups = vec![ + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0, 1], + readonly_indexes: vec![], + }, + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0, 1], + readonly_indexes: vec![], + }, + ]; + address_table_lookups[empty_index].writable_indexes.clear(); + create_v0_transaction( + 1, + MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + vec![payer], + vec![], + address_table_lookups, + ) + } + + for empty_index in 0..2 { + let transaction = create_transaction(empty_index); + assert_eq!( + transaction.message.address_table_lookups().unwrap().len(), + 2 + ); + let data = bincode::serialize(&transaction).unwrap(); + let view = TransactionView::try_new_unsanitized(data.as_ref()).unwrap(); + assert_eq!( + sanitize_address_table_lookups(&view), + Err(TransactionViewError::SanitizeError) + ); + } + } +} diff --git a/transaction-view/src/signature_meta.rs b/transaction-view/src/signature_meta.rs index 8d98554e195a11..227649483ccfe3 100644 --- a/transaction-view/src/signature_meta.rs +++ b/transaction-view/src/signature_meta.rs @@ -1,7 +1,7 @@ use { crate::{ bytes::{advance_offset_for_array, read_byte}, - result::{Result, TransactionParsingError}, + result::{Result, TransactionViewError}, }, solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Signature}, }; @@ -35,7 +35,7 @@ impl SignatureMeta { let num_signatures = read_byte(bytes, offset)?; if num_signatures == 0 || num_signatures > MAX_SIGNATURES_PER_PACKET { - return Err(TransactionParsingError); + return Err(TransactionViewError::ParseError); } let signature_offset = *offset as u16; diff --git a/transaction-view/src/static_account_keys_meta.rs b/transaction-view/src/static_account_keys_meta.rs index f1f3b64f42bf83..46bd6fb5babf5d 100644 --- a/transaction-view/src/static_account_keys_meta.rs +++ b/transaction-view/src/static_account_keys_meta.rs @@ -1,7 +1,7 @@ use { crate::{ bytes::{advance_offset_for_array, read_byte}, - result::{Result, TransactionParsingError}, + result::{Result, TransactionViewError}, }, solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey}, }; @@ -30,7 +30,7 @@ impl StaticAccountKeysMeta { let num_static_accounts = read_byte(bytes, offset)?; if num_static_accounts == 0 || num_static_accounts > MAX_STATIC_ACCOUNTS_PER_PACKET { - return Err(TransactionParsingError); + return Err(TransactionViewError::ParseError); } // We also know that the offset must be less than 3 here, since the diff --git a/transaction-view/src/transaction_meta.rs b/transaction-view/src/transaction_meta.rs index 39fe9d5700fc5a..67a9179b894734 100644 --- a/transaction-view/src/transaction_meta.rs +++ b/transaction-view/src/transaction_meta.rs @@ -4,7 +4,7 @@ use { bytes::advance_offset_for_type, instructions_meta::{InstructionsIterator, InstructionsMeta}, message_header_meta::{MessageHeaderMeta, TransactionVersion}, - result::{Result, TransactionParsingError}, + result::{Result, TransactionViewError}, signature_meta::SignatureMeta, static_account_keys_meta::StaticAccountKeysMeta, }, @@ -46,13 +46,15 @@ impl TransactionMeta { TransactionVersion::Legacy => AddressTableLookupMeta { num_address_table_lookups: 0, offset: 0, + total_writable_lookup_accounts: 0, + total_readonly_lookup_accounts: 0, }, TransactionVersion::V0 => AddressTableLookupMeta::try_new(bytes, &mut offset)?, }; // Verify that the entire transaction was parsed. if offset != bytes.len() { - return Err(TransactionParsingError); + return Err(TransactionViewError::ParseError); } Ok(Self { @@ -105,6 +107,16 @@ impl TransactionMeta { self.address_table_lookup.num_address_table_lookups } + /// Return the number of writable lookup accounts in the transaction. + pub(crate) fn total_writable_lookup_accounts(&self) -> u16 { + self.address_table_lookup.total_writable_lookup_accounts + } + + /// Return the number of readonly lookup accounts in the transaction. + pub(crate) fn total_readonly_lookup_accounts(&self) -> u16 { + self.address_table_lookup.total_readonly_lookup_accounts + } + /// Return the offset to the message. pub(crate) fn message_offset(&self) -> u16 { self.message_header.offset diff --git a/transaction-view/src/transaction_view.rs b/transaction-view/src/transaction_view.rs index 6ee705e6c5560e..e1b4a98d37f56b 100644 --- a/transaction-view/src/transaction_view.rs +++ b/transaction-view/src/transaction_view.rs @@ -2,11 +2,16 @@ use { crate::{ address_table_lookup_meta::AddressTableLookupIterator, instructions_meta::InstructionsIterator, message_header_meta::TransactionVersion, - result::Result, transaction_data::TransactionData, transaction_meta::TransactionMeta, + result::Result, sanitize::sanitize, transaction_data::TransactionData, + transaction_meta::TransactionMeta, }, solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Signature}, }; +// alias for convenience +pub type UnsanitizedTransactionView = TransactionView; +pub type SanitizedTransactionView = TransactionView; + /// A view into a serialized transaction. /// /// This struct provides access to the transaction data without @@ -14,19 +19,37 @@ use { /// about the layout of the serialized transaction. /// The owned `data` is abstracted through the `TransactionData` trait, /// so that different containers for the serialized transaction can be used. -pub struct TransactionView { +pub struct TransactionView { data: D, meta: TransactionMeta, } -impl TransactionView { - /// Creates a new `TransactionView` from the given serialized transaction data. - /// Returns an error if the data does not meet the expected format. - pub fn try_new(data: D) -> Result { +impl TransactionView { + /// Creates a new `TransactionView` without running sanitization checks. + pub fn try_new_unsanitized(data: D) -> Result { let meta = TransactionMeta::try_new(data.data())?; Ok(Self { data, meta }) } + /// Sanitizes the transaction view, returning a sanitized view on success. + pub fn sanitize(self) -> Result> { + sanitize(&self)?; + Ok(SanitizedTransactionView { + data: self.data, + meta: self.meta, + }) + } +} + +impl TransactionView { + /// Creates a new `TransactionView`, running sanitization checks. + pub fn try_new_sanitized(data: D) -> Result { + let unsanitized_view = TransactionView::try_new_unsanitized(data)?; + unsanitized_view.sanitize() + } +} + +impl TransactionView { /// Return the number of signatures in the transaction. pub fn num_signatures(&self) -> u8 { self.meta.num_signatures() @@ -67,6 +90,16 @@ impl TransactionView { self.meta.num_address_table_lookups() } + /// Return the number of writable lookup accounts in the transaction. + pub fn total_writable_lookup_accounts(&self) -> u16 { + self.meta.total_writable_lookup_accounts() + } + + /// Return the number of readonly lookup accounts in the transaction. + pub fn total_readonly_lookup_accounts(&self) -> u16 { + self.meta.total_readonly_lookup_accounts() + } + /// Return the slice of signatures in the transaction. pub fn signatures(&self) -> &[Signature] { let data = self.data(); @@ -130,7 +163,7 @@ mod tests { fn verify_transaction_view_meta(tx: &VersionedTransaction) { let bytes = bincode::serialize(tx).unwrap(); - let view = TransactionView::try_new(bytes.as_ref()).unwrap(); + let view = TransactionView::try_new_unsanitized(bytes.as_ref()).unwrap(); assert_eq!(view.num_signatures(), tx.signatures.len() as u8); From 39af57f29eb891e94601888654f2c7e7e3843a28 Mon Sep 17 00:00:00 2001 From: ripatel-fd Date: Thu, 29 Aug 2024 18:29:22 +0200 Subject: [PATCH 258/529] Support SSLKEYLOGFILE (#2539) This change will help production debugging of QUIC traffic which is otherwise impossible to analyze. Activates logging of TLS encryption keys for incoming encryption keys when the SSLKEYLOGFILE env var is set. Otherwise (by default) does not log anything or change any existing behavior. Co-authored-by: Richard Patel --- core/src/repair/quic_endpoint.rs | 3 ++- streamer/src/quic.rs | 3 ++- turbine/src/quic_endpoint.rs | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index 20484ddb3ed1b6..abaf6d03483f10 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -9,7 +9,7 @@ use { EndpointConfig, IdleTimeout, ReadError, ReadToEndError, RecvStream, SendStream, ServerConfig, TokioRuntime, TransportConfig, VarInt, WriteError, }, - rustls::{Certificate, PrivateKey}, + rustls::{Certificate, KeyLogFile, PrivateKey}, serde_bytes::ByteBuf, solana_quic_client::nonblocking::quic_client::SkipServerVerification, solana_runtime::bank_forks::BankForks, @@ -174,6 +174,7 @@ fn new_server_config(cert: Certificate, key: PrivateKey) -> Result Result Date: Thu, 29 Aug 2024 13:48:16 -0500 Subject: [PATCH 259/529] TransactionView: inline simple accessors (#2789) --- .../src/address_table_lookup_meta.rs | 1 + transaction-view/src/instructions_meta.rs | 1 + transaction-view/src/transaction_meta.rs | 16 ++++++++++++++++ transaction-view/src/transaction_view.rs | 16 ++++++++++++++++ 4 files changed, 34 insertions(+) diff --git a/transaction-view/src/address_table_lookup_meta.rs b/transaction-view/src/address_table_lookup_meta.rs index 01f3b2cab831de..297ea71c245767 100644 --- a/transaction-view/src/address_table_lookup_meta.rs +++ b/transaction-view/src/address_table_lookup_meta.rs @@ -137,6 +137,7 @@ pub struct AddressTableLookupIterator<'a> { impl<'a> Iterator for AddressTableLookupIterator<'a> { type Item = SVMMessageAddressTableLookup<'a>; + #[inline] fn next(&mut self) -> Option { if self.index < self.num_address_table_lookups { self.index = self.index.wrapping_add(1); diff --git a/transaction-view/src/instructions_meta.rs b/transaction-view/src/instructions_meta.rs index 5b0e6153ad45d3..42a0bdd9825b98 100644 --- a/transaction-view/src/instructions_meta.rs +++ b/transaction-view/src/instructions_meta.rs @@ -81,6 +81,7 @@ pub struct InstructionsIterator<'a> { impl<'a> Iterator for InstructionsIterator<'a> { type Item = SVMInstruction<'a>; + #[inline] fn next(&mut self) -> Option { if self.index < self.num_instructions { self.index = self.index.wrapping_add(1); diff --git a/transaction-view/src/transaction_meta.rs b/transaction-view/src/transaction_meta.rs index 67a9179b894734..376ac6b2c08cb5 100644 --- a/transaction-view/src/transaction_meta.rs +++ b/transaction-view/src/transaction_meta.rs @@ -68,56 +68,67 @@ impl TransactionMeta { } /// Return the number of signatures in the transaction. + #[inline] pub(crate) fn num_signatures(&self) -> u8 { self.signature.num_signatures } /// Return the version of the transaction. + #[inline] pub(crate) fn version(&self) -> TransactionVersion { self.message_header.version } /// Return the number of required signatures in the transaction. + #[inline] pub(crate) fn num_required_signatures(&self) -> u8 { self.message_header.num_required_signatures } /// Return the number of readonly signed accounts in the transaction. + #[inline] pub(crate) fn num_readonly_signed_accounts(&self) -> u8 { self.message_header.num_readonly_signed_accounts } /// Return the number of readonly unsigned accounts in the transaction. + #[inline] pub(crate) fn num_readonly_unsigned_accounts(&self) -> u8 { self.message_header.num_readonly_unsigned_accounts } /// Return the number of static account keys in the transaction. + #[inline] pub(crate) fn num_static_account_keys(&self) -> u8 { self.static_account_keys.num_static_accounts } /// Return the number of instructions in the transaction. + #[inline] pub(crate) fn num_instructions(&self) -> u16 { self.instructions.num_instructions } /// Return the number of address table lookups in the transaction. + #[inline] pub(crate) fn num_address_table_lookups(&self) -> u8 { self.address_table_lookup.num_address_table_lookups } /// Return the number of writable lookup accounts in the transaction. + #[inline] pub(crate) fn total_writable_lookup_accounts(&self) -> u16 { self.address_table_lookup.total_writable_lookup_accounts } /// Return the number of readonly lookup accounts in the transaction. + #[inline] pub(crate) fn total_readonly_lookup_accounts(&self) -> u16 { self.address_table_lookup.total_readonly_lookup_accounts } /// Return the offset to the message. + #[inline] pub(crate) fn message_offset(&self) -> u16 { self.message_header.offset } @@ -129,6 +140,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. + #[inline] pub(crate) unsafe fn signatures<'a>(&self, bytes: &'a [u8]) -> &'a [Signature] { // Verify at compile time there are no alignment constraints. const _: () = assert!( @@ -161,6 +173,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. + #[inline] pub(crate) unsafe fn static_account_keys<'a>(&self, bytes: &'a [u8]) -> &'a [Pubkey] { // Verify at compile time there are no alignment constraints. const _: () = assert!(core::mem::align_of::() == 1, "Pubkey alignment"); @@ -191,6 +204,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. + #[inline] pub(crate) unsafe fn recent_blockhash<'a>(&self, bytes: &'a [u8]) -> &'a Hash { // Verify at compile time there are no alignment constraints. const _: () = assert!(core::mem::align_of::() == 1, "Hash alignment"); @@ -210,6 +224,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. + #[inline] pub(crate) unsafe fn instructions_iter<'a>(&self, bytes: &'a [u8]) -> InstructionsIterator<'a> { InstructionsIterator { bytes, @@ -223,6 +238,7 @@ impl TransactionMeta { /// # Safety /// - This function must be called with the same `bytes` slice that was /// used to create the `TransactionMeta` instance. + #[inline] pub(crate) unsafe fn address_table_lookup_iter<'a>( &self, bytes: &'a [u8], diff --git a/transaction-view/src/transaction_view.rs b/transaction-view/src/transaction_view.rs index e1b4a98d37f56b..0c9a9b49063a5f 100644 --- a/transaction-view/src/transaction_view.rs +++ b/transaction-view/src/transaction_view.rs @@ -51,56 +51,67 @@ impl TransactionView { impl TransactionView { /// Return the number of signatures in the transaction. + #[inline] pub fn num_signatures(&self) -> u8 { self.meta.num_signatures() } /// Return the version of the transaction. + #[inline] pub fn version(&self) -> TransactionVersion { self.meta.version() } /// Return the number of required signatures in the transaction. + #[inline] pub fn num_required_signatures(&self) -> u8 { self.meta.num_required_signatures() } /// Return the number of readonly signed accounts in the transaction. + #[inline] pub fn num_readonly_signed_accounts(&self) -> u8 { self.meta.num_readonly_signed_accounts() } /// Return the number of readonly unsigned accounts in the transaction. + #[inline] pub fn num_readonly_unsigned_accounts(&self) -> u8 { self.meta.num_readonly_unsigned_accounts() } /// Return the number of static account keys in the transaction. + #[inline] pub fn num_static_account_keys(&self) -> u8 { self.meta.num_static_account_keys() } /// Return the number of instructions in the transaction. + #[inline] pub fn num_instructions(&self) -> u16 { self.meta.num_instructions() } /// Return the number of address table lookups in the transaction. + #[inline] pub fn num_address_table_lookups(&self) -> u8 { self.meta.num_address_table_lookups() } /// Return the number of writable lookup accounts in the transaction. + #[inline] pub fn total_writable_lookup_accounts(&self) -> u16 { self.meta.total_writable_lookup_accounts() } /// Return the number of readonly lookup accounts in the transaction. + #[inline] pub fn total_readonly_lookup_accounts(&self) -> u16 { self.meta.total_readonly_lookup_accounts() } /// Return the slice of signatures in the transaction. + #[inline] pub fn signatures(&self) -> &[Signature] { let data = self.data(); // SAFETY: `meta` was created from `data`. @@ -108,6 +119,7 @@ impl TransactionView { } /// Return the slice of static account keys in the transaction. + #[inline] pub fn static_account_keys(&self) -> &[Pubkey] { let data = self.data(); // SAFETY: `meta` was created from `data`. @@ -115,6 +127,7 @@ impl TransactionView { } /// Return the recent blockhash in the transaction. + #[inline] pub fn recent_blockhash(&self) -> &Hash { let data = self.data(); // SAFETY: `meta` was created from `data`. @@ -122,6 +135,7 @@ impl TransactionView { } /// Return an iterator over the instructions in the transaction. + #[inline] pub fn instructions_iter(&self) -> InstructionsIterator { let data = self.data(); // SAFETY: `meta` was created from `data`. @@ -129,6 +143,7 @@ impl TransactionView { } /// Return an iterator over the address table lookups in the transaction. + #[inline] pub fn address_table_lookup_iter(&self) -> AddressTableLookupIterator { let data = self.data(); // SAFETY: `meta` was created from `data`. @@ -143,6 +158,7 @@ impl TransactionView { /// Return the serialized **message** data. /// This does not include the signatures. + #[inline] pub fn message_data(&self) -> &[u8] { &self.data()[usize::from(self.meta.message_offset())..] } From 1acdfdebcae61b9ba566af2c72b17630d4b3f596 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 29 Aug 2024 15:24:42 -0500 Subject: [PATCH 260/529] make shrink use in-mem idx info (#2689) * make shrink use in-mem idx info * fix cli arg typo * remove has_written_abnormal_entry_to_disk_flag. * pr reviews: local counter * remove the field that hold index entry in memory for shrinking * clippy * default test/bench config to use onlyabnormalwithverify scan option * remove comments * share shrink account populate code --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 150 ++++++++++++------ accounts-db/src/accounts_index.rs | 54 ++++++- .../accounts_index/in_mem_accounts_index.rs | 2 +- accounts-db/src/ancient_append_vecs.rs | 5 +- ledger-tool/src/args.rs | 30 +++- validator/src/cli.rs | 16 ++ validator/src/main.rs | 16 +- 7 files changed, 218 insertions(+), 55 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 0634ab6ba2a1da..1cd8979dd69009 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -42,10 +42,10 @@ use { ZERO_LAMPORT_ACCOUNT_LT_HASH, }, accounts_index::{ - in_mem_accounts_index::StartupStats, AccountMapEntry, AccountSecondaryIndexes, - AccountsIndex, AccountsIndexConfig, AccountsIndexRootsStats, AccountsIndexScanResult, - DiskIndexValue, IndexKey, IndexValue, IsCached, RefCount, ScanConfig, ScanResult, - SlotList, UpsertReclaim, ZeroLamport, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, + in_mem_accounts_index::StartupStats, AccountSecondaryIndexes, AccountsIndex, + AccountsIndexConfig, AccountsIndexRootsStats, AccountsIndexScanResult, DiskIndexValue, + IndexKey, IndexValue, IsCached, RefCount, ScanConfig, ScanFilter, ScanResult, SlotList, + UpsertReclaim, ZeroLamport, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, ACCOUNTS_INDEX_CONFIG_FOR_TESTING, }, accounts_index_storage::Startup, @@ -484,9 +484,6 @@ pub(crate) struct ShrinkCollect<'a, T: ShrinkCollectRefs<'a>> { pub(crate) total_starting_accounts: usize, /// true if all alive accounts are zero lamports pub(crate) all_are_zero_lamports: bool, - /// index entries that need to be held in memory while shrink is in progress - /// These aren't read - they are just held so that entries cannot be flushed. - pub(crate) _index_entries_being_shrunk: Vec>, } pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { @@ -503,6 +500,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { test_partitioned_epoch_rewards: TestPartitionedEpochRewards::CompareResults, test_skip_rewrites_but_include_in_bank_hash: false, storage_access: StorageAccess::Mmap, + scan_filter_for_shrinking: ScanFilter::OnlyAbnormalWithVerify, }; pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), @@ -518,6 +516,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig test_partitioned_epoch_rewards: TestPartitionedEpochRewards::None, test_skip_rewrites_but_include_in_bank_hash: false, storage_access: StorageAccess::Mmap, + scan_filter_for_shrinking: ScanFilter::OnlyAbnormalWithVerify, }; pub type BinnedHashData = Vec>; @@ -531,8 +530,6 @@ struct LoadAccountsIndexForShrink<'a, T: ShrinkCollectRefs<'a>> { zero_lamport_single_ref_pubkeys: Vec<&'a Pubkey>, /// true if all alive accounts are zero lamport accounts all_are_zero_lamports: bool, - /// index entries we need to hold onto to keep them from getting flushed - index_entries_being_shrunk: Vec>, } /// reference an account found during scanning a storage. This is a byval struct to replace @@ -611,6 +608,7 @@ pub struct AccountsDbConfig { pub create_ancient_storage: CreateAncientStorage, pub test_partitioned_epoch_rewards: TestPartitionedEpochRewards, pub storage_access: StorageAccess, + pub scan_filter_for_shrinking: ScanFilter, } #[cfg(not(test))] @@ -1506,6 +1504,9 @@ pub struct AccountsDb { /// method to use for accessing storages storage_access: StorageAccess, + /// index scan filtering for shrinking + scan_filter_for_shrinking: ScanFilter, + /// this will live here until the feature for partitioned epoch rewards is activated. /// At that point, this and other code can be deleted. pub partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig, @@ -2032,6 +2033,8 @@ pub struct ShrinkStats { skipped_shrink: AtomicU64, dead_accounts: AtomicU64, alive_accounts: AtomicU64, + index_scan_returned_none: AtomicU64, + index_scan_returned_some: AtomicU64, accounts_loaded: AtomicU64, purged_zero_lamports: AtomicU64, accounts_not_found_in_index: AtomicU64, @@ -2047,6 +2050,16 @@ impl ShrinkStats { self.num_slots_shrunk.swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "index_scan_returned_none", + self.index_scan_returned_none.swap(0, Ordering::Relaxed), + i64 + ), + ( + "index_scan_returned_some", + self.index_scan_returned_some.swap(0, Ordering::Relaxed), + i64 + ), ( "storage_read_elapsed", self.storage_read_elapsed.swap(0, Ordering::Relaxed) as i64, @@ -2159,6 +2172,20 @@ impl ShrinkAncientStats { .swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "index_scan_returned_none", + self.shrink_stats + .index_scan_returned_none + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "index_scan_returned_some", + self.shrink_stats + .index_scan_returned_some + .swap(0, Ordering::Relaxed), + i64 + ), ( "storage_read_elapsed", self.shrink_stats @@ -2509,6 +2536,7 @@ impl AccountsDb { exhaustively_verify_refcounts: false, accounts_file_provider: AccountsFileProvider::default(), storage_access: StorageAccess::default(), + scan_filter_for_shrinking: ScanFilter::default(), partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig::default(), epoch_accounts_hash_manager: EpochAccountsHashManager::new_invalid(), test_skip_rewrites_but_include_in_bank_hash: false, @@ -2615,6 +2643,11 @@ impl AccountsDb { .map(|config| config.storage_access) .unwrap_or_default(); + let scan_filter_for_shrinking = accounts_db_config + .as_ref() + .map(|config| config.scan_filter_for_shrinking) + .unwrap_or_default(); + let paths_is_empty = paths.is_empty(); let mut new = Self { paths, @@ -2637,6 +2670,7 @@ impl AccountsDb { exhaustively_verify_refcounts, test_skip_rewrites_but_include_in_bank_hash, storage_access, + scan_filter_for_shrinking, ..Self::default_with_accounts_index( accounts_index, base_working_path, @@ -3388,6 +3422,7 @@ impl AccountsDb { }, None, false, + ScanFilter::All, ); }); found_not_zero_accum.fetch_add(found_not_zero, Ordering::Relaxed); @@ -3915,28 +3950,17 @@ impl AccountsDb { let mut alive = 0; let mut dead = 0; let mut index = 0; + let mut index_scan_returned_some_count = 0; + let mut index_scan_returned_none_count = 0; let mut all_are_zero_lamports = true; - let mut index_entries_being_shrunk = Vec::with_capacity(accounts.len()); let latest_full_snapshot_slot = self.latest_full_snapshot_slot(); self.accounts_index.scan( accounts.iter().map(|account| account.pubkey()), - |pubkey, slots_refs, entry| { + |pubkey, slots_refs, _entry| { let mut result = AccountsIndexScanResult::OnlyKeepInMemoryIfDirty; - if let Some((slot_list, ref_count)) = slots_refs { - let stored_account = &accounts[index]; - let is_alive = slot_list.iter().any(|(slot, _acct_info)| { - // if the accounts index contains an entry at this slot, then the append vec we're asking about contains this item and thus, it is alive at this slot - *slot == slot_to_shrink - }); - if !is_alive { - // This pubkey was found in the storage, but no longer exists in the index. - // It would have had a ref to the storage from the initial store, but it will - // not exist in the re-written slot. Unref it to keep the index consistent with - // rewriting the storage entries. - unrefed_pubkeys.push(pubkey); - result = AccountsIndexScanResult::Unref; - dead += 1; - } else if stored_account.is_zero_lamport() + let stored_account = &accounts[index]; + let mut do_populate_accounts_for_shrink = |ref_count, slot_list| { + if stored_account.is_zero_lamport() && ref_count == 1 && latest_full_snapshot_slot .map(|latest_full_snapshot_slot| { @@ -3952,25 +3976,54 @@ impl AccountsDb { [*pubkey].into_iter(), ); } else { - // Hold onto the index entry arc so that it cannot be flushed. - // Since we are shrinking these entries, we need to disambiguate storage ids during this period and those only exist in the in-memory accounts index. - index_entries_being_shrunk.push(Arc::clone(entry.unwrap())); all_are_zero_lamports &= stored_account.is_zero_lamport(); alive_accounts.add(ref_count, stored_account, slot_list); alive += 1; } + }; + if let Some((slot_list, ref_count)) = slots_refs { + index_scan_returned_some_count += 1; + let is_alive = slot_list.iter().any(|(slot, _acct_info)| { + // if the accounts index contains an entry at this slot, then the append vec we're asking about contains this item and thus, it is alive at this slot + *slot == slot_to_shrink + }); + + if !is_alive { + // This pubkey was found in the storage, but no longer exists in the index. + // It would have had a ref to the storage from the initial store, but it will + // not exist in the re-written slot. Unref it to keep the index consistent with + // rewriting the storage entries. + unrefed_pubkeys.push(pubkey); + result = AccountsIndexScanResult::Unref; + dead += 1; + } else { + do_populate_accounts_for_shrink(ref_count, slot_list); + } } else { - stats - .accounts_not_found_in_index - .fetch_add(1, Ordering::Relaxed); + index_scan_returned_none_count += 1; + // getting None here means the account is 'normal' and was written to disk. This means it must have ref_count=1 and + // slot_list.len() = 1. This means it must be alive in this slot. This is by far the most common case. + // Note that we could get Some(...) here if the account is in the in mem index because it is hot. + // Note this could also mean the account isn't on disk either. That would indicate a bug in accounts db. + // Account is alive. + let ref_count = 1; + let slot_list = [(slot_to_shrink, AccountInfo::default())]; + do_populate_accounts_for_shrink(ref_count, &slot_list); } index += 1; result }, None, true, + self.scan_filter_for_shrinking, ); assert_eq!(index, std::cmp::min(accounts.len(), count)); + stats + .index_scan_returned_some + .fetch_add(index_scan_returned_some_count, Ordering::Relaxed); + stats + .index_scan_returned_none + .fetch_add(index_scan_returned_none_count, Ordering::Relaxed); stats.alive_accounts.fetch_add(alive, Ordering::Relaxed); stats.dead_accounts.fetch_add(dead, Ordering::Relaxed); @@ -3979,7 +4032,6 @@ impl AccountsDb { unrefed_pubkeys, zero_lamport_single_ref_pubkeys, all_are_zero_lamports, - index_entries_being_shrunk, } } @@ -4091,7 +4143,6 @@ impl AccountsDb { .num_duplicated_accounts .fetch_add(*num_duplicated_accounts as u64, Ordering::Relaxed); let all_are_zero_lamports_collect = Mutex::new(true); - let index_entries_being_shrunk_outer = Mutex::new(Vec::default()); self.thread_pool_clean.install(|| { stored_accounts .par_chunks(SHRINK_COLLECT_CHUNK_SIZE) @@ -4101,7 +4152,6 @@ impl AccountsDb { mut unrefed_pubkeys, all_are_zero_lamports, mut zero_lamport_single_ref_pubkeys, - mut index_entries_being_shrunk, } = self.load_accounts_index_for_shrink(stored_accounts, stats, slot); // collect @@ -4117,10 +4167,6 @@ impl AccountsDb { .lock() .unwrap() .append(&mut zero_lamport_single_ref_pubkeys); - index_entries_being_shrunk_outer - .lock() - .unwrap() - .append(&mut index_entries_being_shrunk); if !all_are_zero_lamports { *all_are_zero_lamports_collect.lock().unwrap() = false; } @@ -4160,7 +4206,6 @@ impl AccountsDb { alive_total_bytes, total_starting_accounts: len, all_are_zero_lamports: all_are_zero_lamports_collect.into_inner().unwrap(), - _index_entries_being_shrunk: index_entries_being_shrunk_outer.into_inner().unwrap(), } } @@ -4192,6 +4237,7 @@ impl AccountsDb { |_pubkey, _slots_refs, _entry| AccountsIndexScanResult::Unref, Some(AccountsIndexScanResult::Unref), false, + ScanFilter::All, ); zero_lamport_single_ref_pubkeys.iter().for_each(|k| { @@ -4317,6 +4363,7 @@ impl AccountsDb { }, None, true, + ScanFilter::All, ); return; } @@ -8160,6 +8207,7 @@ impl AccountsDb { }, Some(AccountsIndexScanResult::Unref), false, + ScanFilter::All, ) }); }); @@ -9260,6 +9308,7 @@ impl AccountsDb { }, None, false, + ScanFilter::All, ); timings .rent_paying @@ -16470,9 +16519,13 @@ pub mod tests { #[test] fn test_combine_ancient_slots_simple() { - for alive in [false, true] { - _ = get_one_ancient_append_vec_and_others(alive, 0); - } + // We used to test 'alive = false' with the old shrinking algorithm, but + // not any more with the new shrinking algorithm. 'alive = false' means + // that we will have account entries that's in the storages but not in + // accounts-db index. This violate the assumption in accounts-db, which + // the new shrinking algorithm now depends on. Therefore, we don't test + // 'alive = false'. + _ = get_one_ancient_append_vec_and_others(true, 0); } fn get_all_accounts_from_storages<'a>( @@ -16877,8 +16930,15 @@ pub mod tests { solana_logger::setup(); // combine 2-4 slots into a single ancient append vec for num_normal_slots in 1..3 { - // but some slots contain only dead accounts - for dead_accounts in 0..=num_normal_slots { + // We used to test dead_accounts for [0..=num_normal_slots]. This + // works with old shrinking algorithm, but no longer works with the + // new shrinking algorithm. The new shrinking algorithm requires + // that there should be no accounts entries, which are in the + // storage but not in the accounts-db index. And we expect that this + // assumption to be held by accounts-db. Therefore, we don't test + // dead_accounts anymore. By setting dead_accounts to 0, we + // effectively skip dead_accounts removal in this test. + for dead_accounts in [0] { let mut originals = Vec::default(); // ancient_slot: contains ancient append vec // ancient_slot + 1: contains normal append vec with 1 alive account diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 6c0a1ec62c8f27..77014bced0b261 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -78,6 +78,22 @@ pub(crate) struct GenerateIndexResult { pub duplicates: Option>, } +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +/// which accounts `scan` should load from disk +pub enum ScanFilter { + /// Scan both in-memory and on-disk index + #[default] + All, + + /// abnormal = ref_count != 1 or slot list.len() != 1 + /// Scan only in-memory index and skip on-disk index + OnlyAbnormal, + + /// Similar to `OnlyAbnormal but also check on-disk index to verify the + /// entry on-disk is indeed normal. + OnlyAbnormalWithVerify, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq)] /// how accounts index 'upsert' should handle reclaims pub enum UpsertReclaim { @@ -1399,6 +1415,7 @@ impl + Into> AccountsIndex { mut callback: F, avoid_callback_result: Option, provide_entry_in_callback: bool, + filter: ScanFilter, ) where F: FnMut( &'a Pubkey, @@ -1416,10 +1433,8 @@ impl + Into> AccountsIndex { lock = Some(&self.account_maps[bin]); last_bin = bin; } - // SAFETY: The caller must ensure that if `provide_entry_in_callback` is true, and - // if it's possible for `callback` to clone the entry Arc, then it must also add - // the entry to the in-mem cache if the entry is made dirty. - lock.as_ref().unwrap().get_internal(pubkey, |entry| { + + let mut internal_callback = |entry: Option<&AccountMapEntry>| { let mut cache = false; match entry { Some(locked_entry) => { @@ -1449,7 +1464,36 @@ impl + Into> AccountsIndex { } } (cache, ()) - }); + }; + + match filter { + ScanFilter::All => { + // SAFETY: The caller must ensure that if `provide_entry_in_callback` is true, and + // if it's possible for `callback` to clone the entry Arc, then it must also add + // the entry to the in-mem cache if the entry is made dirty. + lock.as_ref() + .unwrap() + .get_internal(pubkey, internal_callback); + } + ScanFilter::OnlyAbnormal | ScanFilter::OnlyAbnormalWithVerify => { + let found = lock + .as_ref() + .unwrap() + .get_only_in_mem(pubkey, false, |entry| { + internal_callback(entry); + entry.is_some() + }); + if !found && matches!(filter, ScanFilter::OnlyAbnormalWithVerify) { + lock.as_ref().unwrap().get_internal(pubkey, |entry| { + assert!(entry.is_some(), "{pubkey}, entry: {entry:?}"); + let entry = entry.unwrap(); + assert_eq!(entry.ref_count(), 1, "{pubkey}"); + assert_eq!(entry.slot_list.read().unwrap().len(), 1, "{pubkey}"); + (false, ()) + }); + } + } + } }); } diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index 15610713a69652..5566ab4420cfd8 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -281,7 +281,7 @@ impl + Into> InMemAccountsIndex( + pub(super) fn get_only_in_mem( &self, pubkey: &K, update_age: bool, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 67cdeb223d4e3a..24ec415b792a3f 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -13,7 +13,7 @@ use { ShrinkCollectAliveSeparatedByRefs, ShrinkStatsSub, }, accounts_file::AccountsFile, - accounts_index::AccountsIndexScanResult, + accounts_index::{AccountsIndexScanResult, ScanFilter}, active_stats::ActiveStatItem, storable_accounts::{StorableAccounts, StorableAccountsBySlot}, }, @@ -500,6 +500,7 @@ impl AccountsDb { }, None, true, + ScanFilter::All, ); }); }); @@ -3876,7 +3877,6 @@ pub mod tests { alive_total_bytes: 0, total_starting_accounts: 0, all_are_zero_lamports: false, - _index_entries_being_shrunk: Vec::default(), }; let accounts_to_combine = AccountsToCombine { accounts_keep_slots: HashMap::default(), @@ -3893,6 +3893,7 @@ pub mod tests { }, None, false, + ScanFilter::All, ); // should have removed all of them assert!(expected_ref_counts.is_empty()); diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 510cbab05dba84..d2bc0f691e130f 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -4,7 +4,7 @@ use { solana_accounts_db::{ accounts_db::{AccountsDb, AccountsDbConfig, CreateAncientStorage}, accounts_file::StorageAccess, - accounts_index::{AccountsIndexConfig, IndexLimitMb}, + accounts_index::{AccountsIndexConfig, IndexLimitMb, ScanFilter}, partitioned_rewards::TestPartitionedEpochRewards, utils::create_and_canonicalize_directories, }, @@ -78,6 +78,20 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { clean", ) .hidden(hidden_unless_forced()), + Arg::with_name("accounts_db_scan_filter_for_shrinking") + .long("accounts-db-scan-filter-for-shrinking") + .takes_value(true) + .possible_values(&["all", "only-abnormal", "only-abnormal-with-verify"]) + .help( + "Debug option to use different type of filtering for accounts index scan in \ + shrinking. \"all\" will scan both in-memory and on-disk accounts index, which is the default. \ + \"only-abnormal\" will scan in-memory accounts index only for abnormal entries and \ + skip scanning on-disk accounts index by assuming that on-disk accounts index contains \ + only normal accounts index entry. \"only-abnormal-with-verify\" is similar to \ + \"only-abnormal\", which will scan in-memory index for abnormal entries, but will also \ + verify that on-disk account entries are indeed normal.", + ) + .hidden(hidden_unless_forced()), Arg::with_name("accounts_db_test_skip_rewrites") .long("accounts-db-test-skip-rewrites") .help( @@ -296,6 +310,19 @@ pub fn get_accounts_db_config( }) .unwrap_or_default(); + let scan_filter_for_shrinking = arg_matches + .value_of("accounts_db_scan_filter_for_shrinking") + .map(|filter| match filter { + "all" => ScanFilter::All, + "only-abnormal" => ScanFilter::OnlyAbnormal, + "only-abnormal-with-verify" => ScanFilter::OnlyAbnormalWithVerify, + _ => { + // clap will enforce one of the above values is given + unreachable!("invalid value given to accounts_db_scan_filter_for_shrinking") + } + }) + .unwrap_or_default(); + AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_tool_ledger_path), @@ -309,6 +336,7 @@ pub fn get_accounts_db_config( .is_present("accounts_db_test_skip_rewrites"), create_ancient_storage, storage_access, + scan_filter_for_shrinking, ..AccountsDbConfig::default() } } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 440cf155d8d892..83b43c07b5b76c 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1313,6 +1313,22 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { ) .hidden(hidden_unless_forced()), ) + .arg( + Arg::with_name("accounts_db_scan_filter_for_shrinking") + .long("accounts-db-scan-filter-for-shrinking") + .takes_value(true) + .possible_values(&["all", "only-abnormal", "only-abnormal-with-verify"]) + .help( + "Debug option to use different type of filtering for accounts index scan in \ + shrinking. \"all\" will scan both in-memory and on-disk accounts index, which is the default. \ + \"only-abnormal\" will scan in-memory accounts index only for abnormal entries and \ + skip scanning on-disk accounts index by assuming that on-disk accounts index contains \ + only normal accounts index entry. \"only-abnormal-with-verify\" is similar to \ + \"only-abnormal\", which will scan in-memory index for abnormal entries, but will also \ + verify that on-disk account entries are indeed normal.", + ) + .hidden(hidden_unless_forced()), + ) .arg( Arg::with_name("accounts_db_test_skip_rewrites") .long("accounts-db-test-skip-rewrites") diff --git a/validator/src/main.rs b/validator/src/main.rs index 3738eabced96de..c3cedd49828d06 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -21,7 +21,7 @@ use { accounts_file::StorageAccess, accounts_index::{ AccountIndex, AccountSecondaryIndexes, AccountSecondaryIndexesIncludeExclude, - AccountsIndexConfig, IndexLimitMb, + AccountsIndexConfig, IndexLimitMb, ScanFilter, }, partitioned_rewards::TestPartitionedEpochRewards, utils::{create_all_accounts_run_and_snapshot_dirs, create_and_canonicalize_directories}, @@ -1271,6 +1271,19 @@ pub fn main() { }) .unwrap_or_default(); + let scan_filter_for_shrinking = matches + .value_of("accounts_db_scan_filter_for_shrinking") + .map(|filter| match filter { + "all" => ScanFilter::All, + "only-abnormal" => ScanFilter::OnlyAbnormal, + "only-abnormal-with-verify" => ScanFilter::OnlyAbnormalWithVerify, + _ => { + // clap will enforce one of the above values is given + unreachable!("invalid value given to accounts_db_scan_filter_for_shrinking") + } + }) + .unwrap_or_default(); + let accounts_db_config = AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_path.clone()), @@ -1287,6 +1300,7 @@ pub fn main() { test_skip_rewrites_but_include_in_bank_hash: matches .is_present("accounts_db_test_skip_rewrites"), storage_access, + scan_filter_for_shrinking, ..AccountsDbConfig::default() }; From ae932561565d34179bc26f6f739cc87e93191107 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:59:48 -0400 Subject: [PATCH 261/529] update RuntimeTransaction to use ComputeBudgetInstructionDetails in static meta (#2772) refactor: update RuntiemTransaction to use ComputeBudgetInstructionDetails in static meta --- .../src/runtime_transaction.rs | 63 +++++++------------ runtime-transaction/src/transaction_meta.rs | 39 +++--------- 2 files changed, 33 insertions(+), 69 deletions(-) diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index 2a8772ce168977..966a24156d084f 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -11,11 +11,12 @@ //! with its dynamic metadata loaded. use { crate::{ - instructions_processor::process_compute_budget_instructions, + compute_budget_instruction_details::*, transaction_meta::{DynamicMeta, StaticMeta, TransactionMeta}, }, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_sdk::{ + feature_set::FeatureSet, hash::Hash, message::AddressLoader, pubkey::Pubkey, @@ -26,7 +27,8 @@ use { std::collections::HashSet, }; -#[derive(Debug, Clone, Eq, PartialEq)] +#[cfg_attr(test, derive(Eq, PartialEq))] +#[derive(Debug)] pub struct RuntimeTransaction { transaction: T, // transaction meta is a collection of fields, it is updated @@ -53,14 +55,10 @@ impl StaticMeta for RuntimeTransaction { fn is_simple_vote_tx(&self) -> bool { self.meta.is_simple_vote_tx } - fn compute_unit_limit(&self) -> u32 { - self.meta.compute_unit_limit - } - fn compute_unit_price(&self) -> u64 { - self.meta.compute_unit_price - } - fn loaded_accounts_bytes(&self) -> u32 { - self.meta.loaded_accounts_bytes + fn compute_budget_limits(&self, _feature_set: &FeatureSet) -> Result { + self.meta + .compute_budget_instruction_details + .sanitize_and_convert_to_compute_budget_limits() } } @@ -72,34 +70,24 @@ impl RuntimeTransaction { message_hash: Option, is_simple_vote_tx: Option, ) -> Result { - let mut meta = TransactionMeta::default(); - meta.set_is_simple_vote_tx( - is_simple_vote_tx - .unwrap_or_else(|| is_simple_vote_transaction(&sanitized_versioned_tx)), - ); - - meta.set_message_hash( - message_hash.unwrap_or_else(|| sanitized_versioned_tx.get_message().message.hash()), - ); - - let ComputeBudgetLimits { - compute_unit_limit, - compute_unit_price, - loaded_accounts_bytes, - .. - } = process_compute_budget_instructions( + let is_simple_vote_tx = is_simple_vote_tx + .unwrap_or_else(|| is_simple_vote_transaction(&sanitized_versioned_tx)); + let message_hash = + message_hash.unwrap_or_else(|| sanitized_versioned_tx.get_message().message.hash()); + let compute_budget_instruction_details = ComputeBudgetInstructionDetails::try_from( sanitized_versioned_tx .get_message() .program_instructions_iter() .map(|(program_id, ix)| (program_id, SVMInstruction::from(ix))), )?; - meta.set_compute_unit_limit(compute_unit_limit); - meta.set_compute_unit_price(compute_unit_price); - meta.set_loaded_accounts_bytes(loaded_accounts_bytes.get()); Ok(Self { transaction: sanitized_versioned_tx, - meta, + meta: TransactionMeta { + message_hash, + is_simple_vote_tx, + compute_budget_instruction_details, + }, }) } } @@ -302,17 +290,14 @@ mod tests { assert_eq!(&hash, runtime_transaction_static.message_hash()); assert!(!runtime_transaction_static.is_simple_vote_tx()); - assert_eq!( - compute_unit_limit, - runtime_transaction_static.compute_unit_limit() - ); - assert_eq!( - compute_unit_price, - runtime_transaction_static.compute_unit_price() - ); + let compute_budget_limits = runtime_transaction_static + .compute_budget_limits(&FeatureSet::default()) + .unwrap(); + assert_eq!(compute_unit_limit, compute_budget_limits.compute_unit_limit); + assert_eq!(compute_unit_price, compute_budget_limits.compute_unit_price); assert_eq!( loaded_accounts_bytes, - runtime_transaction_static.loaded_accounts_bytes() + compute_budget_limits.loaded_accounts_bytes.get() ); } } diff --git a/runtime-transaction/src/transaction_meta.rs b/runtime-transaction/src/transaction_meta.rs index f46fa39c3ab71b..a2b3a746c5ebae 100644 --- a/runtime-transaction/src/transaction_meta.rs +++ b/runtime-transaction/src/transaction_meta.rs @@ -11,16 +11,18 @@ //! The StaticMeta and DynamicMeta traits are accessor traits on the //! RuntimeTransaction types, not the TransactionMeta itself. //! -use solana_sdk::hash::Hash; +use { + crate::compute_budget_instruction_details::ComputeBudgetInstructionDetails, + solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, + solana_sdk::{feature_set::FeatureSet, hash::Hash, transaction::Result}, +}; /// metadata can be extracted statically from sanitized transaction, /// for example: message hash, simple-vote-tx flag, limits set by instructions pub trait StaticMeta { fn message_hash(&self) -> &Hash; fn is_simple_vote_tx(&self) -> bool; - fn compute_unit_limit(&self) -> u32; - fn compute_unit_price(&self) -> u64; - fn loaded_accounts_bytes(&self) -> u32; + fn compute_budget_limits(&self, feature_set: &FeatureSet) -> Result; } /// Statically loaded meta is a supertrait of Dynamically loaded meta, when @@ -30,33 +32,10 @@ pub trait StaticMeta { /// on-chain ALT, examples are: transaction usage costs, nonce account. pub trait DynamicMeta: StaticMeta {} -#[derive(Clone, Debug, Default, PartialEq, Eq)] +#[cfg_attr(test, derive(Eq, PartialEq))] +#[derive(Debug, Default)] pub struct TransactionMeta { pub(crate) message_hash: Hash, pub(crate) is_simple_vote_tx: bool, - pub(crate) compute_unit_limit: u32, - pub(crate) compute_unit_price: u64, - pub(crate) loaded_accounts_bytes: u32, -} - -impl TransactionMeta { - pub(crate) fn set_message_hash(&mut self, message_hash: Hash) { - self.message_hash = message_hash; - } - - pub(crate) fn set_is_simple_vote_tx(&mut self, is_simple_vote_tx: bool) { - self.is_simple_vote_tx = is_simple_vote_tx; - } - - pub(crate) fn set_compute_unit_limit(&mut self, compute_unit_limit: u32) { - self.compute_unit_limit = compute_unit_limit; - } - - pub(crate) fn set_compute_unit_price(&mut self, compute_unit_price: u64) { - self.compute_unit_price = compute_unit_price; - } - - pub(crate) fn set_loaded_accounts_bytes(&mut self, loaded_accounts_bytes: u32) { - self.loaded_accounts_bytes = loaded_accounts_bytes; - } + pub(crate) compute_budget_instruction_details: ComputeBudgetInstructionDetails, } From b2de58bf682909549eda76146f382fdda1b5e700 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 30 Aug 2024 06:16:46 +0900 Subject: [PATCH 262/529] Remove banking stage forwarder counters (#2788) The counters are either unused or have the data covered by a different datapoint --- core/src/banking_stage/forwarder.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index acb34b8b4dc1e9..4d39ea65dfc7b8 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -228,11 +228,13 @@ impl Forwarder { usize, Option, ) { - let (res, num_packets, forward_us, leader_pubkey) = + let (res, num_packets, _forward_us, leader_pubkey) = self.forward_packets(forward_option, forwardable_packets); + if let Err(ref err) = res { + warn!("failed to forward packets: {err}"); + } if num_packets > 0 { - inc_new_counter_info!("banking_stage-forwarded_packets", num_packets); if let ForwardOption::ForwardTpuVote = forward_option { banking_stage_stats .forwarded_vote_count @@ -242,12 +244,6 @@ impl Forwarder { .forwarded_transaction_count .fetch_add(num_packets, Ordering::Relaxed); } - - inc_new_counter_info!("banking_stage-forward-us", forward_us as usize, 1000, 1000); - - if res.is_err() { - inc_new_counter_info!("banking_stage-forward_packets-failed-batches", 1); - } } (res, num_packets, leader_pubkey) From a72f981370c3f566fc1becf024f3178da041547a Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 30 Aug 2024 06:28:00 +0900 Subject: [PATCH 263/529] Remove counters from sigverify functions (#2785) These counters should be removed for several reasons: - Counters in general are bad (multiple atomics for a single value) - The counters are at debug level, so effectively unused - The reported values can be determined from SigVerifierStats The only thing lost is whether verification was done by cpu/gpu/disabled verifier. But, most validators are not running gpu's, so ripping this out now and it can be re-added more properly if more validators shift to using gpu's. --- perf/src/sigverify.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 0d29bfe571b81b..e10675270471ea 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -11,7 +11,6 @@ use { recycler::Recycler, }, rayon::{prelude::*, ThreadPool}, - solana_metrics::inc_new_counter_debug, solana_rayon_threadlimit::get_thread_count, solana_sdk::{ hash::Hash, @@ -531,7 +530,6 @@ pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool, pa } }); }); - inc_new_counter_debug!("ed25519_verify_cpu", packet_count); } pub fn ed25519_verify_disabled(batches: &mut [PacketBatch]) { @@ -542,7 +540,6 @@ pub fn ed25519_verify_disabled(batches: &mut [PacketBatch]) { .par_iter_mut() .for_each(|p| p.meta_mut().set_discard(false)) }); - inc_new_counter_debug!("ed25519_verify_disabled", packet_count); } pub fn copy_return_values(sig_lens: I, out: &PinnedVec, rvs: &mut [Vec]) @@ -672,7 +669,6 @@ pub fn ed25519_verify( trace!("done verify"); copy_return_values(sig_lens, &out, &mut rvs); mark_disabled(batches, &rvs); - inc_new_counter_debug!("ed25519_verify_gpu", valid_packet_count); } #[cfg(test)] From 5866bfb80b2dccb8c7023623230c0e45d247889e Mon Sep 17 00:00:00 2001 From: Joe C Date: Fri, 30 Aug 2024 10:06:48 +0800 Subject: [PATCH 264/529] SVM: Wire up `SVMRentCollector` (#2753) * SVM: account loader: wire up `SVMRentCollector` * SVM: transaction account state info: wire up `SVMRentCollector` * Runtime: fee distribution: wire up `SVMRentCollector` * Runtime: bank: wire up `SVMRentCollector` * SVM: drop everything unused --- Cargo.lock | 2 +- programs/sbf/Cargo.lock | 2 +- runtime/src/bank.rs | 6 +- runtime/src/bank/fee_distribution.rs | 18 +- svm/Cargo.toml | 2 +- svm/src/account_loader.rs | 35 +-- svm/src/account_rent_state.rs | 310 ---------------------- svm/src/lib.rs | 4 - svm/src/transaction_account_state_info.rs | 43 +-- svm/src/transaction_processor.rs | 20 +- 10 files changed, 72 insertions(+), 370 deletions(-) delete mode 100644 svm/src/account_rent_state.rs diff --git a/Cargo.lock b/Cargo.lock index ec7478b40acbdc..50235b9769c27c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7711,12 +7711,12 @@ dependencies = [ "solana-log-collector", "solana-logger", "solana-measure", - "solana-metrics", "solana-program-runtime", "solana-runtime-transaction", "solana-sdk", "solana-svm", "solana-svm-conformance", + "solana-svm-rent-collector", "solana-svm-transaction", "solana-system-program", "solana-timings", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index cfd32813c20a72..e2d707ac3271ce 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6447,10 +6447,10 @@ dependencies = [ "solana-loader-v4-program", "solana-log-collector", "solana-measure", - "solana-metrics", "solana-program-runtime", "solana-runtime-transaction", "solana-sdk", + "solana-svm-rent-collector", "solana-svm-transaction", "solana-system-program", "solana-timings", diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d7757b33fb98bb..040729cd5ebf90 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3478,9 +3478,7 @@ impl Bank { timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_us); let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature(); - // TODO: Pass into `TransactionProcessingEnvironment` in place of - // `rent_collector` when SVM supports the new `SVMRentCollector` trait. - let _rent_collector_with_metrics = + let rent_collector_with_metrics = RentCollectorWithMetrics::new(self.rent_collector.clone()); let processing_environment = TransactionProcessingEnvironment { blockhash, @@ -3489,7 +3487,7 @@ impl Bank { feature_set: Arc::clone(&self.feature_set), fee_structure: Some(&self.fee_structure), lamports_per_signature, - rent_collector: Some(&self.rent_collector), + rent_collector: Some(&rent_collector_with_metrics), }; let sanitized_output = self diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 383521c016179f..89d0add35df7b0 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -12,7 +12,7 @@ use { system_program, transaction::SanitizedTransaction, }, - solana_svm::account_rent_state::RentState, + solana_svm_rent_collector::svm_rent_collector::SVMRentCollector, solana_vote::vote_account::VoteAccountsHashMap, std::{result::Result, sync::atomic::Ordering::Relaxed}, thiserror::Error, @@ -148,16 +148,16 @@ impl Bank { return Err(DepositFeeError::InvalidAccountOwner); } - let rent = &self.rent_collector().rent; - let recipient_pre_rent_state = RentState::from_account(&account, rent); + let recipient_pre_rent_state = self.rent_collector().get_account_rent_state(&account); let distribution = account.checked_add_lamports(fees); if distribution.is_err() { return Err(DepositFeeError::LamportOverflow); } - let recipient_post_rent_state = RentState::from_account(&account, rent); - let rent_state_transition_allowed = - recipient_post_rent_state.transition_allowed_from(&recipient_pre_rent_state); + let recipient_post_rent_state = self.rent_collector().get_account_rent_state(&account); + let rent_state_transition_allowed = self + .rent_collector() + .transition_allowed(&recipient_pre_rent_state, &recipient_post_rent_state); if !rent_state_transition_allowed { return Err(DepositFeeError::InvalidRentPayingAccount); } @@ -334,6 +334,7 @@ pub mod tests { account::AccountSharedData, native_token::sol_to_lamports, pubkey, rent::Rent, signature::Signer, }, + solana_svm_rent_collector::rent_state::RentState, std::sync::RwLock, }; @@ -633,8 +634,7 @@ pub mod tests { genesis_config.rent = Rent::default(); // Ensure rent is non-zero, as genesis_utils sets Rent::free by default let bank = Bank::new_for_tests(&genesis_config); - let rent = &bank.rent_collector().rent; - let rent_exempt_minimum = rent.minimum_balance(0); + let rent_exempt_minimum = bank.rent_collector().get_rent().minimum_balance(0); // Make one validator have an empty identity account let mut empty_validator_account = bank @@ -671,7 +671,7 @@ pub mod tests { let account = bank .get_account_with_fixed_root(address) .unwrap_or_default(); - RentState::from_account(&account, rent) + bank.rent_collector().get_account_rent_state(&account) }; // Assert starting RentStates diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 994fc9d59d4a04..9f0d80c4d1f5ad 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -24,10 +24,10 @@ solana-frozen-abi-macro = { workspace = true, optional = true } solana-loader-v4-program = { workspace = true } solana-log-collector = { workspace = true } solana-measure = { workspace = true } -solana-metrics = { workspace = true } solana-program-runtime = { workspace = true } solana-runtime-transaction = { workspace = true } solana-sdk = { workspace = true } +solana-svm-rent-collector = { workspace = true } solana-svm-transaction = { workspace = true } solana-system-program = { workspace = true } solana-timings = { workspace = true } diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index c7165eca8ff4a5..f743e6bcba5860 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -1,7 +1,6 @@ use { crate::{ account_overrides::AccountOverrides, - account_rent_state::RentState, nonce_info::NonceInfo, rollback_accounts::RollbackAccounts, transaction_error_metrics::TransactionErrorMetrics, @@ -18,7 +17,7 @@ use { nonce::State as NonceState, pubkey::Pubkey, rent::RentDue, - rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH}, + rent_collector::{CollectedInfo, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, saturating_add_assign, sysvar::{ @@ -28,6 +27,7 @@ use { transaction::{Result, TransactionError}, transaction_context::{IndexOfAccount, TransactionAccount}, }, + solana_svm_rent_collector::svm_rent_collector::SVMRentCollector, solana_svm_transaction::svm_message::SVMMessage, solana_system_program::{get_system_account_kind, SystemAccountKind}, std::num::NonZeroU32, @@ -100,12 +100,12 @@ pub struct FeesOnlyTransaction { /// rent exempt. pub fn collect_rent_from_account( feature_set: &FeatureSet, - rent_collector: &RentCollector, + rent_collector: &dyn SVMRentCollector, address: &Pubkey, account: &mut AccountSharedData, ) -> CollectedInfo { if !feature_set.is_active(&feature_set::disable_rent_fees_collection::id()) { - rent_collector.collect_from_existing_account(address, account) + rent_collector.collect_rent(address, account) } else { // When rent fee collection is disabled, we won't collect rent for any account. If there // are any rent paying accounts, their `rent_epoch` won't change either. However, if the @@ -135,7 +135,7 @@ pub fn validate_fee_payer( payer_account: &mut AccountSharedData, payer_index: IndexOfAccount, error_metrics: &mut TransactionErrorMetrics, - rent_collector: &RentCollector, + rent_collector: &dyn SVMRentCollector, fee: u64, ) -> Result<()> { if payer_account.lamports() == 0 { @@ -151,7 +151,9 @@ pub fn validate_fee_payer( SystemAccountKind::Nonce => { // Should we ever allow a fees charge to zero a nonce account's // balance. The state MUST be set to uninitialized in that case - rent_collector.rent.minimum_balance(NonceState::size()) + rent_collector + .get_rent() + .minimum_balance(NonceState::size()) } }; @@ -164,13 +166,13 @@ pub fn validate_fee_payer( TransactionError::InsufficientFundsForFee })?; - let payer_pre_rent_state = RentState::from_account(payer_account, &rent_collector.rent); + let payer_pre_rent_state = rent_collector.get_account_rent_state(payer_account); payer_account .checked_sub_lamports(fee) .map_err(|_| TransactionError::InsufficientFundsForFee)?; - let payer_post_rent_state = RentState::from_account(payer_account, &rent_collector.rent); - RentState::check_rent_state_with_account( + let payer_post_rent_state = rent_collector.get_account_rent_state(payer_account); + rent_collector.check_rent_state_with_account( &payer_pre_rent_state, &payer_post_rent_state, payer_address, @@ -191,7 +193,7 @@ pub(crate) fn load_accounts( error_metrics: &mut TransactionErrorMetrics, account_overrides: Option<&AccountOverrides>, feature_set: &FeatureSet, - rent_collector: &RentCollector, + rent_collector: &dyn SVMRentCollector, loaded_programs: &ProgramCacheForTxBatch, ) -> Vec { txs.iter() @@ -218,7 +220,7 @@ fn load_transaction( error_metrics: &mut TransactionErrorMetrics, account_overrides: Option<&AccountOverrides>, feature_set: &FeatureSet, - rent_collector: &RentCollector, + rent_collector: &dyn SVMRentCollector, loaded_programs: &ProgramCacheForTxBatch, ) -> TransactionLoadResult { match validation_result { @@ -274,7 +276,7 @@ fn load_transaction_accounts( error_metrics: &mut TransactionErrorMetrics, account_overrides: Option<&AccountOverrides>, feature_set: &FeatureSet, - rent_collector: &RentCollector, + rent_collector: &dyn SVMRentCollector, loaded_programs: &ProgramCacheForTxBatch, ) -> Result { let mut tx_rent: TransactionRent = 0; @@ -409,7 +411,7 @@ fn load_transaction_account( instruction_accounts: &[&u8], account_overrides: Option<&AccountOverrides>, feature_set: &FeatureSet, - rent_collector: &RentCollector, + rent_collector: &dyn SVMRentCollector, loaded_programs: &ProgramCacheForTxBatch, ) -> Result<(LoadedTransactionAccount, bool)> { let mut account_found = true; @@ -1848,18 +1850,19 @@ mod tests { let compute_budget = ComputeBudget::new(u64::from( compute_budget_limits::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, )); + let rent_collector = RentCollector::default(); let transaction_context = TransactionContext::new( loaded_transaction.accounts, - Rent::default(), + rent_collector.get_rent().clone(), compute_budget.max_instruction_stack_depth, compute_budget.max_instruction_trace_length, ); assert_eq!( TransactionAccountStateInfo::new( - &Rent::default(), &transaction_context, - sanitized_tx.message() + sanitized_tx.message(), + &rent_collector, ) .len(), num_accounts, diff --git a/svm/src/account_rent_state.rs b/svm/src/account_rent_state.rs deleted file mode 100644 index 7e3501d0d6c649..00000000000000 --- a/svm/src/account_rent_state.rs +++ /dev/null @@ -1,310 +0,0 @@ -use { - log::*, - solana_sdk::{ - account::{AccountSharedData, ReadableAccount}, - pubkey::Pubkey, - rent::Rent, - transaction::{Result, TransactionError}, - transaction_context::{IndexOfAccount, TransactionContext}, - }, -}; - -#[derive(Debug, PartialEq, Eq)] -pub enum RentState { - /// account.lamports == 0 - Uninitialized, - /// 0 < account.lamports < rent-exempt-minimum - RentPaying { - lamports: u64, // account.lamports() - data_size: usize, // account.data().len() - }, - /// account.lamports >= rent-exempt-minimum - RentExempt, -} - -impl RentState { - /// Return a new RentState instance for a given account and rent. - pub fn from_account(account: &AccountSharedData, rent: &Rent) -> Self { - if account.lamports() == 0 { - Self::Uninitialized - } else if rent.is_exempt(account.lamports(), account.data().len()) { - Self::RentExempt - } else { - Self::RentPaying { - data_size: account.data().len(), - lamports: account.lamports(), - } - } - } - - /// Check whether a transition from the pre_rent_state to this - /// state is valid. - pub fn transition_allowed_from(&self, pre_rent_state: &RentState) -> bool { - match self { - Self::Uninitialized | Self::RentExempt => true, - Self::RentPaying { - data_size: post_data_size, - lamports: post_lamports, - } => { - match pre_rent_state { - Self::Uninitialized | Self::RentExempt => false, - Self::RentPaying { - data_size: pre_data_size, - lamports: pre_lamports, - } => { - // Cannot remain RentPaying if resized or credited. - post_data_size == pre_data_size && post_lamports <= pre_lamports - } - } - } - } - } - - pub(crate) fn check_rent_state( - pre_rent_state: Option<&Self>, - post_rent_state: Option<&Self>, - transaction_context: &TransactionContext, - index: IndexOfAccount, - ) -> Result<()> { - if let Some((pre_rent_state, post_rent_state)) = pre_rent_state.zip(post_rent_state) { - let expect_msg = - "account must exist at TransactionContext index if rent-states are Some"; - Self::check_rent_state_with_account( - pre_rent_state, - post_rent_state, - transaction_context - .get_key_of_account_at_index(index) - .expect(expect_msg), - &transaction_context - .get_account_at_index(index) - .expect(expect_msg) - .borrow(), - index, - )?; - } - Ok(()) - } - - pub(super) fn check_rent_state_with_account( - pre_rent_state: &Self, - post_rent_state: &Self, - address: &Pubkey, - account_state: &AccountSharedData, - account_index: IndexOfAccount, - ) -> Result<()> { - Self::submit_rent_state_metrics(pre_rent_state, post_rent_state); - if !solana_sdk::incinerator::check_id(address) - && !post_rent_state.transition_allowed_from(pre_rent_state) - { - debug!( - "Account {} not rent exempt, state {:?}", - address, account_state, - ); - let account_index = account_index as u8; - Err(TransactionError::InsufficientFundsForRent { account_index }) - } else { - Ok(()) - } - } - - fn submit_rent_state_metrics(pre_rent_state: &Self, post_rent_state: &Self) { - match (pre_rent_state, post_rent_state) { - (&RentState::Uninitialized, &RentState::RentPaying { .. }) => { - inc_new_counter_info!("rent_paying_err-new_account", 1); - } - (&RentState::RentPaying { .. }, &RentState::RentPaying { .. }) => { - inc_new_counter_info!("rent_paying_ok-legacy", 1); - } - (_, &RentState::RentPaying { .. }) => { - inc_new_counter_info!("rent_paying_err-other", 1); - } - _ => {} - } - } -} - -#[cfg(test)] -mod tests { - use {super::*, solana_sdk::pubkey::Pubkey}; - - #[test] - fn test_from_account() { - let program_id = Pubkey::new_unique(); - let uninitialized_account = AccountSharedData::new(0, 0, &Pubkey::default()); - - let account_data_size = 100; - - let rent = Rent::free(); - let rent_exempt_account = AccountSharedData::new(1, account_data_size, &program_id); // if rent is free, all accounts with non-zero lamports and non-empty data are rent-exempt - - assert_eq!( - RentState::from_account(&uninitialized_account, &rent), - RentState::Uninitialized - ); - assert_eq!( - RentState::from_account(&rent_exempt_account, &rent), - RentState::RentExempt - ); - - let rent = Rent::default(); - let rent_minimum_balance = rent.minimum_balance(account_data_size); - let rent_paying_account = AccountSharedData::new( - rent_minimum_balance.saturating_sub(1), - account_data_size, - &program_id, - ); - let rent_exempt_account = AccountSharedData::new( - rent.minimum_balance(account_data_size), - account_data_size, - &program_id, - ); - - assert_eq!( - RentState::from_account(&uninitialized_account, &rent), - RentState::Uninitialized - ); - assert_eq!( - RentState::from_account(&rent_paying_account, &rent), - RentState::RentPaying { - data_size: account_data_size, - lamports: rent_paying_account.lamports(), - } - ); - assert_eq!( - RentState::from_account(&rent_exempt_account, &rent), - RentState::RentExempt - ); - } - - #[test] - fn test_transition_allowed_from() { - let post_rent_state = RentState::Uninitialized; - assert!(post_rent_state.transition_allowed_from(&RentState::Uninitialized)); - assert!(post_rent_state.transition_allowed_from(&RentState::RentExempt)); - assert!( - post_rent_state.transition_allowed_from(&RentState::RentPaying { - data_size: 0, - lamports: 1, - }) - ); - - let post_rent_state = RentState::RentExempt; - assert!(post_rent_state.transition_allowed_from(&RentState::Uninitialized)); - assert!(post_rent_state.transition_allowed_from(&RentState::RentExempt)); - assert!( - post_rent_state.transition_allowed_from(&RentState::RentPaying { - data_size: 0, - lamports: 1, - }) - ); - let post_rent_state = RentState::RentPaying { - data_size: 2, - lamports: 5, - }; - assert!(!post_rent_state.transition_allowed_from(&RentState::Uninitialized)); - assert!(!post_rent_state.transition_allowed_from(&RentState::RentExempt)); - assert!( - !post_rent_state.transition_allowed_from(&RentState::RentPaying { - data_size: 3, - lamports: 5 - }) - ); - assert!( - !post_rent_state.transition_allowed_from(&RentState::RentPaying { - data_size: 1, - lamports: 5 - }) - ); - // Transition is always allowed if there is no account data resize or - // change in account's lamports. - assert!( - post_rent_state.transition_allowed_from(&RentState::RentPaying { - data_size: 2, - lamports: 5 - }) - ); - // Transition is always allowed if there is no account data resize and - // account's lamports is reduced. - assert!( - post_rent_state.transition_allowed_from(&RentState::RentPaying { - data_size: 2, - lamports: 7 - }) - ); - // Transition is not allowed if the account is credited with more - // lamports and remains rent-paying. - assert!( - !post_rent_state.transition_allowed_from(&RentState::RentPaying { - data_size: 2, - lamports: 3 - }), - ); - } - - #[test] - fn test_check_rent_state_with_account() { - let pre_rent_state = RentState::RentPaying { - data_size: 2, - lamports: 3, - }; - - let post_rent_state = RentState::RentPaying { - data_size: 2, - lamports: 5, - }; - let account_index = 2 as IndexOfAccount; - let key = Pubkey::new_unique(); - let result = RentState::check_rent_state_with_account( - &pre_rent_state, - &post_rent_state, - &key, - &AccountSharedData::default(), - account_index, - ); - assert_eq!( - result.err(), - Some(TransactionError::InsufficientFundsForRent { - account_index: account_index as u8 - }) - ); - - let result = RentState::check_rent_state_with_account( - &pre_rent_state, - &post_rent_state, - &solana_sdk::incinerator::id(), - &AccountSharedData::default(), - account_index, - ); - assert!(result.is_ok()); - } - - #[test] - fn test_check_rent_state() { - let context = TransactionContext::new( - vec![(Pubkey::new_unique(), AccountSharedData::default())], - Rent::default(), - 20, - 20, - ); - - let pre_rent_state = RentState::RentPaying { - data_size: 2, - lamports: 3, - }; - - let post_rent_state = RentState::RentPaying { - data_size: 2, - lamports: 5, - }; - - let result = - RentState::check_rent_state(Some(&pre_rent_state), Some(&post_rent_state), &context, 0); - assert_eq!( - result.err(), - Some(TransactionError::InsufficientFundsForRent { account_index: 0 }) - ); - - let result = RentState::check_rent_state(None, Some(&post_rent_state), &context, 0); - assert!(result.is_ok()); - } -} diff --git a/svm/src/lib.rs b/svm/src/lib.rs index b031ce7d6e1c53..a9686f2cf81da9 100644 --- a/svm/src/lib.rs +++ b/svm/src/lib.rs @@ -3,7 +3,6 @@ pub mod account_loader; pub mod account_overrides; -pub mod account_rent_state; pub mod account_saver; pub mod message_processor; pub mod nonce_info; @@ -18,9 +17,6 @@ pub mod transaction_processing_callback; pub mod transaction_processing_result; pub mod transaction_processor; -#[macro_use] -extern crate solana_metrics; - #[cfg_attr(feature = "frozen-abi", macro_use)] #[cfg(feature = "frozen-abi")] extern crate solana_frozen_abi_macro; diff --git a/svm/src/transaction_account_state_info.rs b/svm/src/transaction_account_state_info.rs index 123b572e758868..3bf90adcd3cba6 100644 --- a/svm/src/transaction_account_state_info.rs +++ b/svm/src/transaction_account_state_info.rs @@ -1,12 +1,11 @@ use { - crate::account_rent_state::RentState, solana_sdk::{ account::ReadableAccount, native_loader, - rent::Rent, transaction::Result, transaction_context::{IndexOfAccount, TransactionContext}, }, + solana_svm_rent_collector::{rent_state::RentState, svm_rent_collector::SVMRentCollector}, solana_svm_transaction::svm_message::SVMMessage, }; @@ -17,9 +16,9 @@ pub(crate) struct TransactionAccountStateInfo { impl TransactionAccountStateInfo { pub(crate) fn new( - rent: &Rent, transaction_context: &TransactionContext, message: &impl SVMMessage, + rent_collector: &dyn SVMRentCollector, ) -> Vec { (0..message.account_keys().len()) .map(|i| { @@ -33,7 +32,7 @@ impl TransactionAccountStateInfo { // balances; however they will never be loaded as writable debug_assert!(!native_loader::check_id(account.owner())); - Some(RentState::from_account(&account, rent)) + Some(rent_collector.get_account_rent_state(&account)) } else { None }; @@ -54,11 +53,12 @@ impl TransactionAccountStateInfo { pre_state_infos: &[Self], post_state_infos: &[Self], transaction_context: &TransactionContext, + rent_collector: &dyn SVMRentCollector, ) -> Result<()> { for (i, (pre_state_info, post_state_info)) in pre_state_infos.iter().zip(post_state_infos).enumerate() { - RentState::check_rent_state( + rent_collector.check_rent_state( pre_state_info.rent_state.as_ref(), post_state_info.rent_state.as_ref(), transaction_context, @@ -72,16 +72,14 @@ impl TransactionAccountStateInfo { #[cfg(test)] mod test { use { - crate::{ - account_rent_state::RentState, - transaction_account_state_info::TransactionAccountStateInfo, - }, + super::*, solana_sdk::{ account::AccountSharedData, hash::Hash, instruction::CompiledInstruction, message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, rent::Rent, + rent_collector::RentCollector, reserved_account_keys::ReservedAccountKeys, signature::{Keypair, Signer}, transaction::TransactionError, @@ -91,7 +89,7 @@ mod test { #[test] fn test_new() { - let rent = Rent::default(); + let rent_collector = RentCollector::default(); let key1 = Keypair::new(); let key2 = Keypair::new(); let key3 = Keypair::new(); @@ -126,8 +124,14 @@ mod test { (key3.pubkey(), AccountSharedData::default()), ]; - let context = TransactionContext::new(transaction_accounts, rent.clone(), 20, 20); - let result = TransactionAccountStateInfo::new(&rent, &context, &sanitized_message); + let context = TransactionContext::new( + transaction_accounts, + rent_collector.get_rent().clone(), + 20, + 20, + ); + let result = + TransactionAccountStateInfo::new(&context, &sanitized_message, &rent_collector); assert_eq!( result, vec![ @@ -145,7 +149,7 @@ mod test { #[test] #[should_panic(expected = "message and transaction context out of sync, fatal")] fn test_new_panic() { - let rent = Rent::default(); + let rent_collector = RentCollector::default(); let key1 = Keypair::new(); let key2 = Keypair::new(); let key3 = Keypair::new(); @@ -180,12 +184,19 @@ mod test { (key3.pubkey(), AccountSharedData::default()), ]; - let context = TransactionContext::new(transaction_accounts, rent.clone(), 20, 20); - let _result = TransactionAccountStateInfo::new(&rent, &context, &sanitized_message); + let context = TransactionContext::new( + transaction_accounts, + rent_collector.get_rent().clone(), + 20, + 20, + ); + let _result = + TransactionAccountStateInfo::new(&context, &sanitized_message, &rent_collector); } #[test] fn test_verify_changes() { + let rent_collector = RentCollector::default(); let key1 = Keypair::new(); let key2 = Keypair::new(); let pre_rent_state = vec![ @@ -211,6 +222,7 @@ mod test { &pre_rent_state, &post_rent_state, &context, + &rent_collector, ); assert!(result.is_ok()); @@ -234,6 +246,7 @@ mod test { &pre_rent_state, &post_rent_state, &context, + &rent_collector, ); assert_eq!( result.err(), diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index affda575d8d949..58ca47269d02f5 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -49,6 +49,7 @@ use { transaction::{self, TransactionError}, transaction_context::{ExecutionRecord, TransactionContext}, }, + solana_svm_rent_collector::svm_rent_collector::SVMRentCollector, solana_svm_transaction::{svm_message::SVMMessage, svm_transaction::SVMTransaction}, solana_timings::{ExecuteTimingType, ExecuteTimings}, solana_type_overrides::sync::{atomic::Ordering, Arc, RwLock, RwLockReadGuard}, @@ -132,7 +133,7 @@ pub struct TransactionProcessingEnvironment<'a> { /// Lamports per signature to charge per transaction. pub lamports_per_signature: u64, /// Rent collector to use for the transaction batch. - pub rent_collector: Option<&'a RentCollector>, + pub rent_collector: Option<&'a dyn SVMRentCollector>, } #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -377,7 +378,7 @@ impl TransactionBatchProcessor { check_results: Vec, feature_set: &FeatureSet, fee_structure: &FeeStructure, - rent_collector: &RentCollector, + rent_collector: &dyn SVMRentCollector, error_counters: &mut TransactionErrorMetrics, ) -> Vec { sanitized_txs @@ -412,7 +413,7 @@ impl TransactionBatchProcessor { checked_details: CheckedTransactionDetails, feature_set: &FeatureSet, fee_structure: &FeeStructure, - rent_collector: &RentCollector, + rent_collector: &dyn SVMRentCollector, error_counters: &mut TransactionErrorMetrics, ) -> transaction::Result { let compute_budget_limits = process_compute_budget_instructions( @@ -727,10 +728,10 @@ impl TransactionBatchProcessor { Some(lamports_sum) } - let rent = environment + let default_rent_collector = RentCollector::default(); + let rent_collector = environment .rent_collector - .map(|rent_collector| rent_collector.rent.clone()) - .unwrap_or_default(); + .unwrap_or(&default_rent_collector); let lamports_before_tx = transaction_accounts_lamports_sum(&transaction_accounts, tx).unwrap_or(0); @@ -741,7 +742,7 @@ impl TransactionBatchProcessor { let mut transaction_context = TransactionContext::new( transaction_accounts, - rent.clone(), + rent_collector.get_rent().clone(), compute_budget.max_instruction_stack_depth, compute_budget.max_instruction_trace_length, ); @@ -749,7 +750,7 @@ impl TransactionBatchProcessor { transaction_context.set_signature(tx.signature()); let pre_account_state_info = - TransactionAccountStateInfo::new(&rent, &transaction_context, tx); + TransactionAccountStateInfo::new(&transaction_context, tx, rent_collector); let log_collector = if config.recording_config.enable_log_recording { match config.log_messages_bytes_limit { @@ -803,11 +804,12 @@ impl TransactionBatchProcessor { let mut status = process_result .and_then(|info| { let post_account_state_info = - TransactionAccountStateInfo::new(&rent, &transaction_context, tx); + TransactionAccountStateInfo::new(&transaction_context, tx, rent_collector); TransactionAccountStateInfo::verify_changes( &pre_account_state_info, &post_account_state_info, &transaction_context, + rent_collector, ) .map(|_| info) }) From 4470f6d96ec1ef85ee4137065d3482136f227061 Mon Sep 17 00:00:00 2001 From: Tyera Date: Thu, 29 Aug 2024 21:06:47 -0600 Subject: [PATCH 265/529] SIMD-0118: fix `total_rewards` for recalculation (#2780) * Add new feature key * Wrap existing code with new feature * Extend test harness * Make test fail * Populate EpochRewards::total_rewards from PointValue * Remove superfluous struct field * Fixup tests --- programs/bpf_loader/src/syscalls/mod.rs | 8 ++- programs/stake/src/stake_instruction.rs | 4 +- rpc/src/rpc.rs | 12 +++- .../partitioned_epoch_rewards/calculation.rs | 39 ++++++------ .../partitioned_epoch_rewards/distribution.rs | 15 ++++- .../src/bank/partitioned_epoch_rewards/mod.rs | 59 ++++++++++++++----- .../bank/partitioned_epoch_rewards/sysvar.rs | 54 +++++++++++++---- runtime/src/bank/sysvar_cache.rs | 8 ++- sdk/program/src/epoch_rewards.rs | 4 +- sdk/src/feature_set.rs | 5 ++ 10 files changed, 152 insertions(+), 56 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 21b643a9fa89c6..47105457372988 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -42,7 +42,8 @@ use { enable_big_mod_exp_syscall, enable_get_epoch_stake_syscall, enable_partitioned_epoch_reward, enable_poseidon_syscall, error_on_syscall_bpf_function_hash_collisions, get_sysvar_syscall_enabled, - last_restart_slot_sysvar, reject_callx_r10, remaining_compute_units_syscall_enabled, + last_restart_slot_sysvar, partitioned_epoch_rewards_superfeature, reject_callx_r10, + remaining_compute_units_syscall_enabled, }, hash::{Hash, Hasher}, instruction::{AccountMeta, InstructionError, ProcessedSiblingInstruction}, @@ -273,8 +274,9 @@ pub fn create_program_runtime_environment_v1<'a>( let blake3_syscall_enabled = feature_set.is_active(&blake3_syscall_enabled::id()); let curve25519_syscall_enabled = feature_set.is_active(&curve25519_syscall_enabled::id()); let disable_fees_sysvar = feature_set.is_active(&disable_fees_sysvar::id()); - let epoch_rewards_syscall_enabled = - feature_set.is_active(&enable_partitioned_epoch_reward::id()); + let epoch_rewards_syscall_enabled = feature_set + .is_active(&enable_partitioned_epoch_reward::id()) + || feature_set.is_active(&partitioned_epoch_rewards_superfeature::id()); let disable_deploy_of_alloc_free_syscall = reject_deployment_of_broken_elfs && feature_set.is_active(&disable_deploy_of_alloc_free_syscall::id()); let last_restart_slot_syscall_enabled = feature_set.is_active(&last_restart_slot_sysvar::id()); diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 30671825108904..086a1bf044e025 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -66,8 +66,8 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| }; // The EpochRewards sysvar only exists after the - // enable_partitioned_epoch_reward feature is activated. If it exists, check - // the `active` field + // partitioned_epoch_rewards_superfeature feature is activated. If it + // exists, check the `active` field let epoch_rewards_active = invoke_context .get_sysvar_cache() .get_epoch_rewards() diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index d9b057c9fd178b..76ad5119f9844c 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -610,12 +610,20 @@ impl JsonRpcRequestProcessor { // epoch let bank = self.get_bank_with_config(context_config)?; - // DO NOT CLEAN UP with feature_set::enable_partitioned_epoch_reward + // DO NOT CLEAN UP with feature_set::partitioned_epoch_rewards_superfeature // This logic needs to be retained indefinitely to support historical // rewards before and after feature activation. let partitioned_epoch_reward_enabled_slot = bank .feature_set - .activated_slot(&feature_set::enable_partitioned_epoch_reward::id()); + .activated_slot(&feature_set::partitioned_epoch_rewards_superfeature::id()) + .or_else(|| { + // The order of these checks should not matter, since we will + // not ever have both features active on a live cluster. This + // check can be removed with + // feature_set::enable_partitioned_epoch_reward + bank.feature_set + .activated_slot(&feature_set::enable_partitioned_epoch_reward::id()) + }); let partitioned_epoch_reward_enabled = partitioned_epoch_reward_enabled_slot .map(|slot| slot <= first_confirmed_block_in_epoch) .unwrap_or(false); diff --git a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs index 257a531f3e04d4..4d179c753991ab 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs @@ -50,7 +50,7 @@ impl Bank { let CalculateRewardsAndDistributeVoteRewardsResult { total_rewards, distributed_rewards, - total_points, + point_value, stake_rewards_by_partition, } = self.calculate_rewards_and_distribute_vote_rewards( parent_epoch, @@ -80,7 +80,7 @@ impl Bank { distributed_rewards, distribution_starting_block_height, num_partitions, - total_points, + point_value, ); datapoint_info!( @@ -105,12 +105,11 @@ impl Bank { vote_account_rewards, stake_rewards_by_partition, old_vote_balance_and_staked, - validator_rewards, validator_rate, foundation_rate, prev_epoch_duration_in_years, capitalization, - total_points, + point_value, } = self.calculate_rewards_for_partitioning( prev_epoch, reward_calc_tracer, @@ -136,11 +135,11 @@ impl Bank { self.assert_validator_rewards_paid(validator_rewards_paid); // verify that we didn't pay any more than we expected to - assert!(validator_rewards >= validator_rewards_paid + total_stake_rewards_lamports); + assert!(point_value.rewards >= validator_rewards_paid + total_stake_rewards_lamports); info!( "distributed vote rewards: {} out of {}, remaining {}", - validator_rewards_paid, validator_rewards, total_stake_rewards_lamports + validator_rewards_paid, point_value.rewards, total_stake_rewards_lamports ); let (num_stake_accounts, num_vote_accounts) = { @@ -179,7 +178,7 @@ impl Bank { CalculateRewardsAndDistributeVoteRewardsResult { total_rewards: validator_rewards_paid + total_stake_rewards_lamports, distributed_rewards: validator_rewards_paid, - total_points, + point_value, stake_rewards_by_partition, } } @@ -231,7 +230,7 @@ impl Bank { let CalculateValidatorRewardsResult { vote_rewards_accounts: vote_account_rewards, stake_reward_calculation: mut stake_rewards, - total_points, + point_value, } = self .calculate_validator_rewards( prev_epoch, @@ -260,12 +259,11 @@ impl Bank { total_stake_rewards_lamports: stake_rewards.total_stake_rewards_lamports, }, old_vote_balance_and_staked, - validator_rewards, validator_rate, foundation_rate, prev_epoch_duration_in_years, capitalization, - total_points, + point_value, } } @@ -288,12 +286,11 @@ impl Bank { metrics, ) .map(|point_value| { - let total_points = point_value.points; let (vote_rewards_accounts, stake_reward_calculation) = self .calculate_stake_vote_rewards( &reward_calculate_param, rewarded_epoch, - point_value, + point_value.clone(), thread_pool, reward_calc_tracer, metrics, @@ -301,7 +298,7 @@ impl Bank { CalculateValidatorRewardsResult { vote_rewards_accounts, stake_reward_calculation, - total_points, + point_value, } }) } @@ -601,7 +598,8 @@ mod tests { null_tracer, partitioned_epoch_rewards::{ tests::{ - create_default_reward_bank, create_reward_bank, RewardBank, SLOTS_PER_EPOCH, + create_default_reward_bank, create_reward_bank, + create_reward_bank_with_specific_stakes, RewardBank, SLOTS_PER_EPOCH, }, EpochRewardStatus, StartBlockHeightAndRewards, }, @@ -988,11 +986,14 @@ mod tests { #[test] fn test_recalculate_partitioned_rewards() { - let expected_num_delegations = 4; + let expected_num_delegations = 3; let num_rewards_per_block = 2; // Distribute 4 rewards over 2 blocks - let RewardBank { bank, .. } = create_reward_bank( - expected_num_delegations, + let mut stakes = vec![2_000_000_000; expected_num_delegations]; + // Add stake large enough to be affected by total-rewards discrepancy + stakes.push(40_000_000_000); + let RewardBank { bank, .. } = create_reward_bank_with_specific_stakes( + stakes, num_rewards_per_block, SLOTS_PER_EPOCH - 1, ); @@ -1012,6 +1013,7 @@ mod tests { stake_rewards_by_partition: expected_stake_rewards, .. }, + point_value, .. } = bank.calculate_rewards_for_partitioning( rewarded_epoch, @@ -1035,6 +1037,9 @@ mod tests { assert_eq!(expected_stake_rewards.len(), recalculated_rewards.len()); compare_stake_rewards(&expected_stake_rewards, recalculated_rewards); + let sysvar = bank.get_epoch_rewards_sysvar(); + assert_eq!(point_value.rewards, sysvar.total_rewards); + // Advance to first distribution slot let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), SLOTS_PER_EPOCH + 1); diff --git a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs index dec02ccd928bbe..8a0fa2af2b3086 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs @@ -255,7 +255,7 @@ mod tests { }, sysvar, }, - solana_stake_program::stake_state, + solana_stake_program::{points::PointValue, stake_state}, solana_vote_program::vote_state, }; @@ -349,13 +349,22 @@ mod tests { create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); + bank.activate_feature(&feature_set::partitioned_epoch_rewards_superfeature::id()); // Set up epoch_rewards sysvar with rewards with 1e9 lamports to distribute. let total_rewards = 1_000_000_000; let num_partitions = 2; // num_partitions is arbitrary and unimportant for this test let total_points = (total_rewards * 42) as u128; // total_points is arbitrary for the purposes of this test - bank.create_epoch_rewards_sysvar(total_rewards, 0, 42, num_partitions, total_points); + bank.create_epoch_rewards_sysvar( + total_rewards, + 0, + 42, + num_partitions, + PointValue { + rewards: total_rewards, + points: total_points, + }, + ); let pre_epoch_rewards_account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); let expected_balance = bank.get_minimum_balance_for_rent_exemption(pre_epoch_rewards_account.data().len()); diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index f4d439d7a9ea43..894b058ca2f8a8 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -18,6 +18,7 @@ use { reward_info::RewardInfo, stake::state::{Delegation, Stake, StakeStateV2}, }, + solana_stake_program::points::PointValue, solana_vote::vote_account::VoteAccounts, std::sync::Arc, }; @@ -95,11 +96,24 @@ struct StakeRewardCalculation { total_stake_rewards_lamports: u64, } -#[derive(Debug, Default)] +#[derive(Debug)] struct CalculateValidatorRewardsResult { vote_rewards_accounts: VoteRewardsAccounts, stake_reward_calculation: StakeRewardCalculation, - total_points: u128, + point_value: PointValue, +} + +impl Default for CalculateValidatorRewardsResult { + fn default() -> Self { + Self { + vote_rewards_accounts: VoteRewardsAccounts::default(), + stake_reward_calculation: StakeRewardCalculation::default(), + point_value: PointValue { + points: 0, + rewards: 0, + }, + } + } } /// hold reward calc info to avoid recalculation across functions @@ -116,12 +130,11 @@ pub(super) struct PartitionedRewardsCalculation { pub(super) vote_account_rewards: VoteRewardsAccounts, pub(super) stake_rewards_by_partition: StakeRewardCalculationPartitioned, pub(super) old_vote_balance_and_staked: u64, - pub(super) validator_rewards: u64, pub(super) validator_rate: f64, pub(super) foundation_rate: f64, pub(super) prev_epoch_duration_in_years: f64, pub(super) capitalization: u64, - total_points: u128, + point_value: PointValue, } /// result of calculating the stake rewards at beginning of new epoch @@ -133,14 +146,16 @@ pub(super) struct StakeRewardCalculationPartitioned { } pub(super) struct CalculateRewardsAndDistributeVoteRewardsResult { - /// total rewards for the epoch (including both vote rewards and stake rewards) + /// total rewards to be distributed in the epoch (including both vote + /// rewards and stake rewards) pub(super) total_rewards: u64, /// distributed vote rewards pub(super) distributed_rewards: u64, - /// total rewards points calculated for the current epoch, where points + /// total rewards and points calculated for the current epoch, where points /// equals the sum of (delegated stake * credits observed) for all - /// delegations - pub(super) total_points: u128, + /// delegations and rewards are the lamports to split across all stake and + /// vote accounts + pub(super) point_value: PointValue, /// stake rewards that still need to be distributed, grouped by partition pub(super) stake_rewards_by_partition: Vec, } @@ -180,6 +195,9 @@ impl Bank { pub(super) fn is_partitioned_rewards_feature_enabled(&self) -> bool { self.feature_set .is_active(&feature_set::enable_partitioned_epoch_reward::id()) + || self + .feature_set + .is_active(&feature_set::partitioned_epoch_rewards_superfeature::id()) } pub(crate) fn set_epoch_reward_status_active( @@ -347,17 +365,25 @@ mod tests { stake_account_stores_per_block: u64, advance_num_slots: u64, ) -> RewardBank { - let validator_keypairs = (0..expected_num_delegations) + create_reward_bank_with_specific_stakes( + vec![2_000_000_000; expected_num_delegations], + stake_account_stores_per_block, + advance_num_slots, + ) + } + + pub(super) fn create_reward_bank_with_specific_stakes( + stakes: Vec, + stake_account_stores_per_block: u64, + advance_num_slots: u64, + ) -> RewardBank { + let validator_keypairs = (0..stakes.len()) .map(|_| ValidatorVoteKeypairs::new_rand()) .collect::>(); let GenesisConfigInfo { mut genesis_config, .. - } = create_genesis_config_with_vote_accounts( - 1_000_000_000, - &validator_keypairs, - vec![2_000_000_000; expected_num_delegations], - ); + } = create_genesis_config_with_vote_accounts(1_000_000_000, &validator_keypairs, stakes); genesis_config.epoch_schedule = EpochSchedule::new(SLOTS_PER_EPOCH); let mut accounts_db_config: AccountsDbConfig = ACCOUNTS_DB_CONFIG_FOR_TESTING.clone(); @@ -456,7 +482,7 @@ mod tests { let mut bank = Bank::new_for_tests(&genesis_config); assert!(!bank.is_partitioned_rewards_feature_enabled()); - bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); + bank.activate_feature(&feature_set::partitioned_epoch_rewards_superfeature::id()); assert!(bank.is_partitioned_rewards_feature_enabled()); } @@ -950,6 +976,9 @@ mod tests { genesis_config .accounts .remove(&feature_set::enable_partitioned_epoch_reward::id()); + genesis_config + .accounts + .remove(&feature_set::partitioned_epoch_rewards_superfeature::id()); let bank = Bank::new_for_tests(&genesis_config); diff --git a/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs index feb1d93461e983..624630f39712c9 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs @@ -3,8 +3,9 @@ use { log::info, solana_sdk::{ account::{create_account_shared_data_with_fields as create_account, from_account}, - sysvar, + feature_set, sysvar, }, + solana_stake_program::points::PointValue, }; impl Bank { @@ -28,10 +29,18 @@ impl Bank { distributed_rewards: u64, distribution_starting_block_height: u64, num_partitions: u64, - total_points: u128, + point_value: PointValue, ) { assert!(self.is_partitioned_rewards_code_enabled()); + let total_rewards = if self + .feature_set + .is_active(&feature_set::partitioned_epoch_rewards_superfeature::id()) + { + point_value.rewards + } else { + total_rewards + }; assert!(total_rewards >= distributed_rewards); let parent_blockhash = self.last_blockhash(); @@ -40,7 +49,7 @@ impl Bank { distribution_starting_block_height, num_partitions, parent_blockhash, - total_points, + total_points: point_value.points, total_rewards, distributed_rewards, active: true, @@ -81,10 +90,17 @@ impl Bank { /// Update EpochRewards sysvar with distributed rewards pub(in crate::bank::partitioned_epoch_rewards) fn set_epoch_rewards_sysvar_to_inactive(&self) { let mut epoch_rewards = self.get_epoch_rewards_sysvar(); - assert_eq!( - epoch_rewards.distributed_rewards, - epoch_rewards.total_rewards - ); + if self + .feature_set + .is_active(&feature_set::partitioned_epoch_rewards_superfeature::id()) + { + assert!(epoch_rewards.total_rewards >= epoch_rewards.distributed_rewards); + } else { + assert_eq!( + epoch_rewards.distributed_rewards, + epoch_rewards.total_rewards + ); + } epoch_rewards.active = false; self.update_sysvar_account(&sysvar::epoch_rewards::id(), |account| { @@ -132,11 +148,15 @@ mod tests { create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); + bank.activate_feature(&feature_set::partitioned_epoch_rewards_superfeature::id()); - let total_rewards = 1_000_000_000; // a large rewards so that the sysvar account is rent-exempted. + let total_rewards = 1_000_000_000; let num_partitions = 2; // num_partitions is arbitrary and unimportant for this test let total_points = (total_rewards * 42) as u128; // total_points is arbitrary for the purposes of this test + let point_value = PointValue { + rewards: total_rewards, + points: total_points, + }; // create epoch rewards sysvar let expected_epoch_rewards = sysvar::epoch_rewards::EpochRewards { @@ -155,7 +175,13 @@ mod tests { sysvar::epoch_rewards::EpochRewards::default() ); - bank.create_epoch_rewards_sysvar(total_rewards, 10, 42, num_partitions, total_points); + bank.create_epoch_rewards_sysvar( + total_rewards, + 10, + 42, + num_partitions, + point_value.clone(), + ); let account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); let expected_balance = bank.get_minimum_balance_for_rent_exemption(account.data().len()); // Expected balance is the sysvar rent-exempt balance @@ -169,7 +195,13 @@ mod tests { let bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), parent_slot + 1); // Also note that running `create_epoch_rewards_sysvar()` against a bank // with an existing EpochRewards sysvar clobbers the previous values - bank.create_epoch_rewards_sysvar(total_rewards, 10, 42, num_partitions, total_points); + bank.create_epoch_rewards_sysvar( + total_rewards, + 10, + 42, + num_partitions, + point_value.clone(), + ); let expected_epoch_rewards = sysvar::epoch_rewards::EpochRewards { distribution_starting_block_height: 42, diff --git a/runtime/src/bank/sysvar_cache.rs b/runtime/src/bank/sysvar_cache.rs index b350b6f37c018f..1c7cea037d23af 100644 --- a/runtime/src/bank/sysvar_cache.rs +++ b/runtime/src/bank/sysvar_cache.rs @@ -9,6 +9,7 @@ mod tests { feature_set, genesis_config::create_genesis_config, pubkey::Pubkey, sysvar::epoch_rewards::EpochRewards, }, + solana_stake_program::points::PointValue, std::sync::Arc, }; @@ -109,7 +110,7 @@ mod tests { drop(bank1_sysvar_cache); // inject a reward sysvar for test - bank1.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); + bank1.activate_feature(&feature_set::partitioned_epoch_rewards_superfeature::id()); let num_partitions = 2; // num_partitions is arbitrary and unimportant for this test let total_points = 42_000; // total_points is arbitrary for the purposes of this test let expected_epoch_rewards = EpochRewards { @@ -126,7 +127,10 @@ mod tests { expected_epoch_rewards.distributed_rewards, expected_epoch_rewards.distribution_starting_block_height, num_partitions, - total_points, + PointValue { + rewards: 100, + points: total_points, + }, ); bank1 diff --git a/sdk/program/src/epoch_rewards.rs b/sdk/program/src/epoch_rewards.rs index bec4b101fee9fa..e4cc81705b1174 100644 --- a/sdk/program/src/epoch_rewards.rs +++ b/sdk/program/src/epoch_rewards.rs @@ -29,7 +29,9 @@ pub struct EpochRewards { /// delegations pub total_points: u128, - /// The total rewards for the current epoch, in lamports + /// The total rewards calculated for the current epoch. This may be greater + /// than the total `distributed_rewards` at the end of the rewards period, + /// due to rounding and inability to deliver rewards smaller than 1 lamport. pub total_rewards: u64, /// The rewards currently distributed for the current epoch, in lamports diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 7f8ad417bb10f4..02cc2594d30593 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -369,6 +369,10 @@ pub mod enable_partitioned_epoch_reward { solana_sdk::declare_id!("9bn2vTJUsUcnpiZWbu2woSKtTGW3ErZC9ERv88SDqQjK"); } +pub mod partitioned_epoch_rewards_superfeature { + solana_sdk::declare_id!("PERzQrt5gBD1XEe2c9XdFWqwgHY3mr7cYWbm5V772V8"); +} + pub mod spl_token_v3_4_0 { solana_sdk::declare_id!("Ftok4njE8b7tDffYkC5bAbCaQv5sL6jispYrprzatUwN"); } @@ -1066,6 +1070,7 @@ lazy_static! { (enable_transaction_loading_failure_fees::id(), "Enable fees for some additional transaction failures SIMD-0082"), (enable_turbine_extended_fanout_experiments::id(), "enable turbine extended fanout experiments #"), (deprecate_legacy_vote_ixs::id(), "Deprecate legacy vote instructions"), + (partitioned_epoch_rewards_superfeature::id(), "replaces enable_partitioned_epoch_reward to enable partitioned rewards at epoch boundary SIMD-0118"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 91222126ffe249b3b5d9fc98c76292e94da6886d Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 30 Aug 2024 11:44:06 +0800 Subject: [PATCH 266/529] metrics: remove vote-account-close (#2783) --- programs/vote/src/vote_state/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 9c709ab0885fd2..851faa957db9fe 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -4,7 +4,6 @@ pub use solana_program::vote::state::{vote_state_versions::*, *}; use { log::*, serde_derive::{Deserialize, Serialize}, - solana_metrics::datapoint_debug, solana_program::vote::{error::VoteError, program::id}, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, @@ -1004,11 +1003,9 @@ pub fn withdraw( .unwrap_or(false); if reject_active_vote_account_close { - datapoint_debug!("vote-account-close", ("reject-active", 1, i64)); return Err(VoteError::ActiveVoteAccountClose.into()); } else { // Deinitialize upon zero-balance - datapoint_debug!("vote-account-close", ("allow", 1, i64)); set_vote_account_state(&mut vote_account, VoteState::default())?; } } else { From f79cea14b830c1c733e357ccd7e187142698c999 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Fri, 30 Aug 2024 11:44:16 +0800 Subject: [PATCH 267/529] metrics: remove retry_to_get_account_accessor-panic (#2755) --- accounts-db/src/accounts_db.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1cd8979dd69009..93dd408a2c3726 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -5687,7 +5687,6 @@ impl AccountsDb { // Notice the subtle `?` at previous line, we bail out pretty early if missing. if new_slot == slot && new_storage_location.is_store_id_equal(&storage_location) { - inc_new_counter_info!("retry_to_get_account_accessor-panic", 1); let message = format!( "Bad index entry detected ({}, {}, {:?}, {:?}, {:?}, {:?})", pubkey, From 8a219de8fde9c8470d8133b287a38a50d043cf89 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Fri, 30 Aug 2024 09:02:51 -0500 Subject: [PATCH 268/529] metric: add unref_zero_count stat (#2791) * add unref_zero_count stat * put unref_zero_count stat on accounts_index --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 7 +++++++ accounts-db/src/accounts_index.rs | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 93dd408a2c3726..8a71e8ad765eba 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3717,6 +3717,13 @@ impl AccountsDb { .swap(0, Ordering::Relaxed), i64 ), + ( + "unref_zero_count", + self.accounts_index + .unref_zero_count + .swap(0, Ordering::Relaxed), + i64 + ), ( "ancient_account_cleans", ancient_account_cleans.load(Ordering::Relaxed), diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 77014bced0b261..fbd99409a50771 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -687,6 +687,8 @@ pub struct AccountsIndex + Into> { pub active_scans: AtomicUsize, /// # of slots between latest max and latest scan pub max_distance_to_min_scan_slot: AtomicU64, + // # of unref when the account's ref_count is zero + pub unref_zero_count: AtomicU64, /// populated at generate_index time - accounts that could possibly be rent paying pub rent_paying_accounts_by_partition: OnceLock, @@ -724,6 +726,7 @@ impl + Into> AccountsIndex { roots_removed: AtomicUsize::default(), active_scans: AtomicUsize::default(), max_distance_to_min_scan_slot: AtomicU64::default(), + unref_zero_count: AtomicU64::default(), rent_paying_accounts_by_partition: OnceLock::default(), } } @@ -1452,6 +1455,7 @@ impl + Into> AccountsIndex { AccountsIndexScanResult::Unref => { if locked_entry.unref() { info!("scan: refcount of item already at 0: {pubkey}"); + self.unref_zero_count.fetch_add(1, Ordering::Relaxed); } true } From b345960389581f47b8018062241c811079299702 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 30 Aug 2024 10:06:22 -0500 Subject: [PATCH 269/529] account_saver moved to runtime (#2773) --- {svm => runtime}/src/account_saver.rs | 20 ++++++++++---------- runtime/src/bank.rs | 2 +- runtime/src/lib.rs | 1 + svm/src/lib.rs | 1 - 4 files changed, 12 insertions(+), 12 deletions(-) rename {svm => runtime}/src/account_saver.rs (99%) diff --git a/svm/src/account_saver.rs b/runtime/src/account_saver.rs similarity index 99% rename from svm/src/account_saver.rs rename to runtime/src/account_saver.rs index ca3c45dbfd50f3..4875413539107b 100644 --- a/svm/src/account_saver.rs +++ b/runtime/src/account_saver.rs @@ -1,15 +1,15 @@ use { - crate::{ + solana_sdk::{ + account::AccountSharedData, nonce::state::DurableNonce, pubkey::Pubkey, + transaction_context::TransactionAccount, + }, + solana_svm::{ rollback_accounts::RollbackAccounts, transaction_processing_result::{ ProcessedTransaction, TransactionProcessingResult, TransactionProcessingResultExtensions, }, }, - solana_sdk::{ - account::AccountSharedData, nonce::state::DurableNonce, pubkey::Pubkey, - transaction_context::TransactionAccount, - }, solana_svm_transaction::svm_message::SVMMessage, }; @@ -158,11 +158,6 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( mod tests { use { super::*, - crate::{ - account_loader::{FeesOnlyTransaction, LoadedTransaction}, - nonce_info::NonceInfo, - transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, - }, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, @@ -181,6 +176,11 @@ mod tests { system_instruction, system_program, transaction::{Result, SanitizedTransaction, Transaction, TransactionError}, }, + solana_svm::{ + account_loader::{FeesOnlyTransaction, LoadedTransaction}, + nonce_info::NonceInfo, + transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, + }, std::collections::HashMap, }; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 040729cd5ebf90..82c3985211220b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -35,6 +35,7 @@ //! already been signed and verified. use { crate::{ + account_saver::collect_accounts_to_store, bank::{ builtins::{BuiltinPrototype, BUILTINS, STATELESS_BUILTINS}, metrics::*, @@ -154,7 +155,6 @@ use { solana_svm::{ account_loader::{collect_rent_from_account, LoadedTransaction}, account_overrides::AccountOverrides, - account_saver::collect_accounts_to_store, transaction_commit_result::{CommittedTransaction, TransactionCommitResult}, transaction_error_metrics::TransactionErrorMetrics, transaction_execution_result::{ diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index c3772735b9ef42..d066628c717b34 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -4,6 +4,7 @@ #[macro_use] extern crate lazy_static; +mod account_saver; pub mod accounts_background_service; pub mod bank; pub mod bank_client; diff --git a/svm/src/lib.rs b/svm/src/lib.rs index a9686f2cf81da9..f3cbbaa0f9cb18 100644 --- a/svm/src/lib.rs +++ b/svm/src/lib.rs @@ -3,7 +3,6 @@ pub mod account_loader; pub mod account_overrides; -pub mod account_saver; pub mod message_processor; pub mod nonce_info; pub mod program_loader; From b9c9ecccbb05d9da774d600bdbef2cf210c57fa8 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 30 Aug 2024 11:22:33 -0700 Subject: [PATCH 270/529] Add wen_restart to multinode-demo. (#2648) * Add wen_restart to multinode-demo. * Use skipSetup during wen_restart, otherwise tower will be wiped. * Add comments about why skipSetup is required. * Change comments and use -n instead of comparison. * Add --wen_restart to bootstrap as well. --- multinode-demo/bootstrap-validator.sh | 3 +++ multinode-demo/validator.sh | 3 +++ net/net.sh | 12 ++++++++++++ net/remote/remote-node.sh | 10 ++++++++++ 4 files changed, 28 insertions(+) diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index 374a9288f11597..471756254cb5db 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -112,6 +112,9 @@ while [[ -n $1 ]]; do elif [[ $1 == --block-production-method ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --wen-restart ]]; then + args+=("$1" "$2") + shift 2 else echo "Unknown argument: $1" $program --help diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 77082f6589245b..d4e081c8893858 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -182,6 +182,9 @@ while [[ -n $1 ]]; do elif [[ $1 == --block-production-method ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --wen-restart ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 = -h ]]; then usage "$@" else diff --git a/net/net.sh b/net/net.sh index b82872f5046d8e..94fa429ace5086 100755 --- a/net/net.sh +++ b/net/net.sh @@ -145,6 +145,9 @@ Operate a configured testnet startnode/stopnode-specific options: -i [ip address] - IP Address of the node to start or stop + startnode specific option: + --wen-restart [proto_file] - Use given proto file (create if non-exist) and apply wen_restat + startclients-specific options: $CLIENT_OPTIONS @@ -350,6 +353,7 @@ startBootstrapLeader() { \"$TMPFS_ACCOUNTS\" \ \"$disableQuic\" \ \"$enableUdp\" \ + \"$maybeWenRestart\" \ " ) >> "$logFile" 2>&1 || { @@ -424,6 +428,7 @@ startNode() { \"$TMPFS_ACCOUNTS\" \ \"$disableQuic\" \ \"$enableUdp\" \ + \"$maybeWenRestart\" \ " ) >> "$logFile" 2>&1 & declare pid=$! @@ -836,6 +841,7 @@ disableQuic=false enableUdp=false clientType=tpu-client maybeUseUnstakedConnection="" +maybeWenRestart="" command=$1 [[ -n $command ]] || usage @@ -983,6 +989,12 @@ while [[ -n $1 ]]; do elif [[ $1 = --use-unstaked-connection ]]; then maybeUseUnstakedConnection="$1" shift 1 + elif [[ $1 = --wen-restart ]]; then + # wen_restart needs tower storage to be there, so set skipSetup to true + # to avoid erasing the tower storage on disk. + skipSetup=true + maybeWenRestart="$2" + shift 2 else usage "Unknown long option: $1" fi diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index 882f7891702cde..fe3f6a1d38dbca 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -30,6 +30,7 @@ extraPrimordialStakes="${21:=0}" tmpfsAccounts="${22:false}" disableQuic="${23}" enableUdp="${24}" +maybeWenRestart="${25}" set +x @@ -298,6 +299,11 @@ cat >> ~/solana/on-reboot < faucet.log 2>&1 & EOF fi + + if [[ -n "$maybeWenRestart" ]]; then + args+=(--wen-restart "$maybeWenRestart") + fi + # shellcheck disable=SC2206 # Don't want to double quote $extraNodeArgs args+=($extraNodeArgs) @@ -429,6 +435,10 @@ EOF args+=(--tpu-enable-udp) fi + if [[ -n "$maybeWenRestart" ]]; then + args+=(--wen-restart "$maybeWenRestart") + fi + cat >> ~/solana/on-reboot < validator.log.\$now 2>&1 & From dbaea8fe1753e10cf75e7132e7267068760eeb16 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 30 Aug 2024 11:52:47 -0700 Subject: [PATCH 271/529] Wen restart: the argument is one proto file now instead of a directory. (#2800) We only read and write one single proto file for wen_restart now. --- validator/src/cli.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 83b43c07b5b76c..5feba2f801a5e1 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1578,7 +1578,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { Arg::with_name("wen_restart") .long("wen-restart") .hidden(hidden_unless_forced()) - .value_name("DIR") + .value_name("FILE") .takes_value(true) .required(false) .conflicts_with("wait_for_supermajority") From 465934291f4d505e17f9c6e59266149d2a793c34 Mon Sep 17 00:00:00 2001 From: Brooks Date: Sat, 31 Aug 2024 08:37:29 -0400 Subject: [PATCH 272/529] Calls inspect_account() on the fee payer (#2718) --- svm/src/account_loader.rs | 125 ++++++++++++++++- svm/src/transaction_processor.rs | 94 ++++++++++++- svm/tests/integration_test.rs | 230 +++++++++++++++++++++++++++++++ svm/tests/mock_bank.rs | 18 ++- 4 files changed, 462 insertions(+), 5 deletions(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index f743e6bcba5860..0799ac8ba3cec7 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -608,12 +608,15 @@ mod tests { transaction::{Result, SanitizedTransaction, Transaction, TransactionError}, transaction_context::{TransactionAccount, TransactionContext}, }, - std::{borrow::Cow, collections::HashMap, sync::Arc}, + std::{borrow::Cow, cell::RefCell, collections::HashMap, sync::Arc}, }; #[derive(Default)] struct TestCallbacks { accounts_map: HashMap, + #[allow(clippy::type_complexity)] + inspected_accounts: + RefCell, /* is_writable */ bool)>>>, } impl TransactionProcessingCallback for TestCallbacks { @@ -624,6 +627,23 @@ mod tests { fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { self.accounts_map.get(pubkey).cloned() } + + fn inspect_account( + &self, + address: &Pubkey, + account_state: AccountState, + is_writable: bool, + ) { + let account = match account_state { + AccountState::Dead => None, + AccountState::Alive(account) => Some(account.clone()), + }; + self.inspected_accounts + .borrow_mut() + .entry(*address) + .or_default() + .push((account, is_writable)); + } } fn load_accounts_with_features_and_rent( @@ -640,7 +660,10 @@ mod tests { for (pubkey, account) in accounts { accounts_map.insert(*pubkey, account.clone()); } - let callbacks = TestCallbacks { accounts_map }; + let callbacks = TestCallbacks { + accounts_map, + ..Default::default() + }; load_accounts( &callbacks, &[sanitized_tx], @@ -929,7 +952,10 @@ mod tests { for (pubkey, account) in accounts { accounts_map.insert(*pubkey, account.clone()); } - let callbacks = TestCallbacks { accounts_map }; + let callbacks = TestCallbacks { + accounts_map, + ..Default::default() + }; load_accounts( &callbacks, &[tx], @@ -2108,4 +2134,97 @@ mod tests { assert_eq!(account.rent_epoch(), 0); assert_eq!(account.lamports(), 0); } + + // Ensure `TransactionProcessingCallback::inspect_account()` is called when + // loading accounts for transaction processing. + #[test] + fn test_inspect_account_non_fee_payer() { + let mut mock_bank = TestCallbacks::default(); + + let address0 = Pubkey::new_unique(); // <-- fee payer + let address1 = Pubkey::new_unique(); // <-- initially alive + let address2 = Pubkey::new_unique(); // <-- initially dead + let address3 = Pubkey::new_unique(); // <-- program + + let mut account0 = AccountSharedData::default(); + account0.set_lamports(1_000_000_000); + mock_bank.accounts_map.insert(address0, account0.clone()); + + let mut account1 = AccountSharedData::default(); + account1.set_lamports(2_000_000_000); + mock_bank.accounts_map.insert(address1, account1.clone()); + + // account2 *not* added to the bank's accounts_map + + let mut account3 = AccountSharedData::default(); + account3.set_lamports(4_000_000_000); + account3.set_executable(true); + account3.set_owner(native_loader::id()); + mock_bank.accounts_map.insert(address3, account3.clone()); + + let message = Message { + account_keys: vec![address0, address1, address2, address3], + header: MessageHeader::default(), + instructions: vec![ + CompiledInstruction { + program_id_index: 3, + accounts: vec![0], + data: vec![], + }, + CompiledInstruction { + program_id_index: 3, + accounts: vec![1, 2], + data: vec![], + }, + CompiledInstruction { + program_id_index: 3, + accounts: vec![1], + data: vec![], + }, + ], + recent_blockhash: Hash::new_unique(), + }; + let sanitized_message = new_unchecked_sanitized_message(message); + let sanitized_transaction = SanitizedTransaction::new_for_tests( + sanitized_message, + vec![Signature::new_unique()], + false, + ); + let validation_result = Ok(ValidatedTransactionDetails { + loaded_fee_payer_account: LoadedTransactionAccount { + account: account0.clone(), + ..LoadedTransactionAccount::default() + }, + ..ValidatedTransactionDetails::default() + }); + let _load_results = load_accounts( + &mock_bank, + &[sanitized_transaction], + vec![validation_result], + &mut TransactionErrorMetrics::default(), + None, + &FeatureSet::default(), + &RentCollector::default(), + &ProgramCacheForTxBatch::default(), + ); + + // ensure the loaded accounts are inspected + let mut actual_inspected_accounts: Vec<_> = mock_bank + .inspected_accounts + .borrow() + .iter() + .map(|(k, v)| (*k, v.clone())) + .collect(); + actual_inspected_accounts.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + let mut expected_inspected_accounts = vec![ + // *not* key0, since it is loaded during fee payer validation + (address1, vec![(Some(account1), true)]), + (address2, vec![(None, true)]), + (address3, vec![(Some(account3), false)]), + ]; + expected_inspected_accounts.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + assert_eq!(actual_inspected_accounts, expected_inspected_accounts,); + } } diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 58ca47269d02f5..c6a5e43149949e 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -15,7 +15,7 @@ use { transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, - transaction_processing_callback::TransactionProcessingCallback, + transaction_processing_callback::{AccountState, TransactionProcessingCallback}, transaction_processing_result::{ProcessedTransaction, TransactionProcessingResult}, }, log::debug, @@ -434,6 +434,12 @@ impl TransactionBatchProcessor { return Err(TransactionError::AccountNotFound); }; + callbacks.inspect_account( + fee_payer_address, + AccountState::Alive(&fee_payer_account), + true, // <-- is_writable + ); + let fee_payer_loaded_rent_epoch = fee_payer_account.rent_epoch(); let fee_payer_rent_debit = collect_rent_from_account( feature_set, @@ -1034,6 +1040,9 @@ mod tests { #[derive(Default, Clone)] pub struct MockBankCallback { pub account_shared_data: Arc>>, + #[allow(clippy::type_complexity)] + pub inspected_accounts: + Arc, /* is_writable */ bool)>>>>, } impl TransactionProcessingCallback for MockBankCallback { @@ -1065,6 +1074,24 @@ mod tests { .unwrap() .insert(*program_id, account_data); } + + fn inspect_account( + &self, + address: &Pubkey, + account_state: AccountState, + is_writable: bool, + ) { + let account = match account_state { + AccountState::Dead => None, + AccountState::Alive(account) => Some(account.clone()), + }; + self.inspected_accounts + .write() + .unwrap() + .entry(*address) + .or_default() + .push((account, is_writable)); + } } #[test] @@ -1853,6 +1880,7 @@ mod tests { mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); let mock_bank = MockBankCallback { account_shared_data: Arc::new(RwLock::new(mock_accounts)), + ..Default::default() }; let mut error_counters = TransactionErrorMetrics::default(); @@ -1930,6 +1958,7 @@ mod tests { mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); let mock_bank = MockBankCallback { account_shared_data: Arc::new(RwLock::new(mock_accounts)), + ..Default::default() }; let mut error_counters = TransactionErrorMetrics::default(); @@ -2014,6 +2043,7 @@ mod tests { mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); let mock_bank = MockBankCallback { account_shared_data: Arc::new(RwLock::new(mock_accounts)), + ..Default::default() }; let mut error_counters = TransactionErrorMetrics::default(); @@ -2051,6 +2081,7 @@ mod tests { mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); let mock_bank = MockBankCallback { account_shared_data: Arc::new(RwLock::new(mock_accounts)), + ..Default::default() }; let mut error_counters = TransactionErrorMetrics::default(); @@ -2086,6 +2117,7 @@ mod tests { mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); let mock_bank = MockBankCallback { account_shared_data: Arc::new(RwLock::new(mock_accounts)), + ..Default::default() }; let mut error_counters = TransactionErrorMetrics::default(); @@ -2177,6 +2209,7 @@ mod tests { mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); let mock_bank = MockBankCallback { account_shared_data: Arc::new(RwLock::new(mock_accounts)), + ..Default::default() }; let mut error_counters = TransactionErrorMetrics::default(); @@ -2242,6 +2275,7 @@ mod tests { mock_accounts.insert(*fee_payer_address, fee_payer_account.clone()); let mock_bank = MockBankCallback { account_shared_data: Arc::new(RwLock::new(mock_accounts)), + ..Default::default() }; let mut error_counters = TransactionErrorMetrics::default(); @@ -2294,6 +2328,7 @@ mod tests { let mock_bank = MockBankCallback { account_shared_data: Arc::new(RwLock::new(mock_accounts)), + ..Default::default() }; let mut error_counters = TransactionErrorMetrics::default(); @@ -2318,4 +2353,61 @@ mod tests { result.err() ); } + + // Ensure `TransactionProcessingCallback::inspect_account()` is called when + // validating the fee payer, since that's when the fee payer account is loaded. + #[test] + fn test_inspect_account_fee_payer() { + let fee_payer_address = Pubkey::new_unique(); + let fee_payer_account = AccountSharedData::new_rent_epoch( + 123_000_000_000, + 0, + &Pubkey::default(), + RENT_EXEMPT_RENT_EPOCH, + ); + let mock_bank = MockBankCallback::default(); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(fee_payer_address, fee_payer_account.clone()); + + let message = new_unchecked_sanitized_message(Message::new_with_blockhash( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(2000u32), + ComputeBudgetInstruction::set_compute_unit_price(1_000_000_000), + ], + Some(&fee_payer_address), + &Hash::new_unique(), + )); + let batch_processor = TransactionBatchProcessor::::default(); + batch_processor + .validate_transaction_fee_payer( + &mock_bank, + None, + &message, + CheckedTransactionDetails { + nonce: None, + lamports_per_signature: 5000, + }, + &FeatureSet::default(), + &FeeStructure::default(), + &RentCollector::default(), + &mut TransactionErrorMetrics::default(), + ) + .unwrap(); + + // ensure the fee payer is an inspected account + let actual_inspected_accounts: Vec<_> = mock_bank + .inspected_accounts + .read() + .unwrap() + .iter() + .map(|(k, v)| (*k, v.clone())) + .collect(); + assert_eq!( + actual_inspected_accounts.as_slice(), + &[(fee_payer_address, vec![(Some(fee_payer_account), true)])], + ); + } } diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 37a4870812c2d8..08f6db09101e04 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -655,3 +655,233 @@ fn execute_test_entry(test_entry: SvmTestEntry) { } } } + +#[test] +fn svm_inspect_account() { + let mock_bank = MockBankCallback::default(); + let mut expected_inspected_accounts: HashMap<_, Vec<_>> = HashMap::new(); + + let transfer_program = + deploy_program("simple-transfer".to_string(), DEPLOYMENT_SLOT, &mock_bank); + + let fee_payer_keypair = Keypair::new(); + let sender_keypair = Keypair::new(); + + let fee_payer = fee_payer_keypair.pubkey(); + let sender = sender_keypair.pubkey(); + let recipient = Pubkey::new_unique(); + let system = system_program::id(); + + // Setting up the accounts for the transfer + + // fee payer + let mut fee_payer_account = AccountSharedData::default(); + fee_payer_account.set_lamports(80_020); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(fee_payer, fee_payer_account.clone()); + expected_inspected_accounts + .entry(fee_payer) + .or_default() + .push((Some(fee_payer_account.clone()), true)); + + // sender + let mut sender_account = AccountSharedData::default(); + sender_account.set_lamports(11_000_000); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(sender, sender_account.clone()); + expected_inspected_accounts + .entry(sender) + .or_default() + .push((Some(sender_account.clone()), true)); + + // recipient -- initially dead + expected_inspected_accounts + .entry(recipient) + .or_default() + .push((None, true)); + + let instruction = Instruction::new_with_bytes( + transfer_program, + &u64::to_be_bytes(1_000_000), + vec![ + AccountMeta::new(sender, true), + AccountMeta::new(recipient, false), + AccountMeta::new_readonly(system, false), + ], + ); + let transaction = Transaction::new_signed_with_payer( + &[instruction], + Some(&fee_payer), + &[&fee_payer_keypair, &sender_keypair], + Hash::default(), + ); + let sanitized_transaction = SanitizedTransaction::from_transaction_for_tests(transaction); + let transaction_check = Ok(CheckedTransactionDetails { + nonce: None, + lamports_per_signature: 20, + }); + + // Load and execute the transaction + + let batch_processor = TransactionBatchProcessor::::new( + EXECUTION_SLOT, + EXECUTION_EPOCH, + HashSet::new(), + ); + + let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); + + create_executable_environment( + fork_graph.clone(), + &mock_bank, + &mut batch_processor.program_cache.write().unwrap(), + ); + + // The sysvars must be put in the cache + batch_processor.fill_missing_sysvar_cache_entries(&mock_bank); + register_builtins(&mock_bank, &batch_processor); + + let _result = batch_processor.load_and_execute_sanitized_transactions( + &mock_bank, + &[sanitized_transaction], + vec![transaction_check], + &TransactionProcessingEnvironment::default(), + &TransactionProcessingConfig::default(), + ); + + // the system account is modified during transaction processing, + // so set the expected inspected account afterwards. + let system_account = mock_bank + .account_shared_data + .read() + .unwrap() + .get(&system) + .cloned(); + expected_inspected_accounts + .entry(system) + .or_default() + .push((system_account, false)); + + // do another transfer; recipient should be alive now + + // fee payer + let mut fee_payer_account = AccountSharedData::default(); + fee_payer_account.set_lamports(80_000); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(fee_payer, fee_payer_account.clone()); + expected_inspected_accounts + .entry(fee_payer) + .or_default() + .push((Some(fee_payer_account.clone()), true)); + + // sender + let mut sender_account = AccountSharedData::default(); + sender_account.set_lamports(10_000_000); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(sender, sender_account.clone()); + expected_inspected_accounts + .entry(sender) + .or_default() + .push((Some(sender_account.clone()), true)); + + // recipient -- now alive + let mut recipient_account = AccountSharedData::default(); + recipient_account.set_lamports(1_000_000); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(recipient, recipient_account.clone()); + expected_inspected_accounts + .entry(recipient) + .or_default() + .push((Some(recipient_account.clone()), true)); + + let instruction = Instruction::new_with_bytes( + transfer_program, + &u64::to_be_bytes(456), + vec![ + AccountMeta::new(sender, true), + AccountMeta::new(recipient, false), + AccountMeta::new_readonly(system, false), + ], + ); + let transaction = Transaction::new_signed_with_payer( + &[instruction], + Some(&fee_payer), + &[&fee_payer_keypair, &sender_keypair], + Hash::default(), + ); + let sanitized_transaction = SanitizedTransaction::from_transaction_for_tests(transaction); + let transaction_check = Ok(CheckedTransactionDetails { + nonce: None, + lamports_per_signature: 20, + }); + + // Load and execute the second transaction + let _result = batch_processor.load_and_execute_sanitized_transactions( + &mock_bank, + &[sanitized_transaction], + vec![transaction_check], + &TransactionProcessingEnvironment::default(), + &TransactionProcessingConfig::default(), + ); + + // the system account is modified during transaction processing, + // so set the expected inspected account afterwards. + let system_account = mock_bank + .account_shared_data + .read() + .unwrap() + .get(&system) + .cloned(); + expected_inspected_accounts + .entry(system) + .or_default() + .push((system_account, false)); + + // Ensure all the expected inspected accounts were inspected + let actual_inspected_accounts = mock_bank.inspected_accounts.read().unwrap().clone(); + for (expected_pubkey, expected_account) in &expected_inspected_accounts { + let actual_account = actual_inspected_accounts.get(expected_pubkey).unwrap(); + assert_eq!( + expected_account, actual_account, + "pubkey: {expected_pubkey}", + ); + } + + // The transfer program account is also loaded during transaction processing, however the + // account state passed to `inspect_account()` is *not* the same as what is held by + // MockBankCallback::account_shared_data. So we check the transfer program differently. + // + // First ensure we have the correct number of inspected accounts, correctly counting the + // transfer program. + let num_expected_inspected_accounts: usize = + expected_inspected_accounts.values().map(Vec::len).sum(); + let num_actual_inspected_accounts: usize = + actual_inspected_accounts.values().map(Vec::len).sum(); + assert_eq!( + num_expected_inspected_accounts + 2, + num_actual_inspected_accounts, + ); + + // And second, ensure the inspected transfer program accounts are alive and not writable. + let actual_transfer_program_accounts = + actual_inspected_accounts.get(&transfer_program).unwrap(); + for actual_transfer_program_account in actual_transfer_program_accounts { + assert!(actual_transfer_program_account.0.is_some()); + assert!(!actual_transfer_program_account.1); + } +} diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index ee6a6692a7b0fe..6ece85bfcc1385 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -25,7 +25,7 @@ use { sysvar::SysvarId, }, solana_svm::{ - transaction_processing_callback::TransactionProcessingCallback, + transaction_processing_callback::{AccountState, TransactionProcessingCallback}, transaction_processor::TransactionBatchProcessor, }, solana_type_overrides::sync::{Arc, RwLock}, @@ -56,6 +56,9 @@ impl ForkGraph for MockForkGraph { pub struct MockBankCallback { pub feature_set: Arc, pub account_shared_data: Arc>>, + #[allow(clippy::type_complexity)] + pub inspected_accounts: + Arc, /* is_writable */ bool)>>>>, } impl TransactionProcessingCallback for MockBankCallback { @@ -87,6 +90,19 @@ impl TransactionProcessingCallback for MockBankCallback { .unwrap() .insert(*program_id, account_data); } + + fn inspect_account(&self, address: &Pubkey, account_state: AccountState, is_writable: bool) { + let account = match account_state { + AccountState::Dead => None, + AccountState::Alive(account) => Some(account.clone()), + }; + self.inspected_accounts + .write() + .unwrap() + .entry(*address) + .or_default() + .push((account, is_writable)); + } } impl MockBankCallback { From eb37b21d4d5ed29d1bf40c9ca7c64509681a2a09 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Mon, 2 Sep 2024 18:55:02 +0900 Subject: [PATCH 273/529] [syscalls] Update mod exp compute cost (#2807) --- compute-budget/src/compute_budget.rs | 11 ++++++++--- programs/bpf_loader/src/syscalls/mod.rs | 12 ++++++++---- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/compute-budget/src/compute_budget.rs b/compute-budget/src/compute_budget.rs index da04296a7e3080..5539e812645349 100644 --- a/compute-budget/src/compute_budget.rs +++ b/compute-budget/src/compute_budget.rs @@ -103,8 +103,12 @@ pub struct ComputeBudget { /// + alt_bn128_pairing_one_pair_cost_other * (num_elems - 1) pub alt_bn128_pairing_one_pair_cost_first: u64, pub alt_bn128_pairing_one_pair_cost_other: u64, - /// Big integer modular exponentiation cost - pub big_modular_exponentiation_cost: u64, + /// Big integer modular exponentiation base cost + pub big_modular_exponentiation_base_cost: u64, + /// Big integer moduler exponentiation cost divisor + /// The modular exponentiation cost is computed as + /// `input_length`/`big_modular_exponentiation_cost_divisor` + `big_modular_exponentiation_base_cost` + pub big_modular_exponentiation_cost_divisor: u64, /// Coefficient `a` of the quadratic function which determines the number /// of compute units consumed to call poseidon syscall for a given number /// of inputs. @@ -180,7 +184,8 @@ impl ComputeBudget { alt_bn128_multiplication_cost: 3_840, alt_bn128_pairing_one_pair_cost_first: 36_364, alt_bn128_pairing_one_pair_cost_other: 12_121, - big_modular_exponentiation_cost: 33, + big_modular_exponentiation_base_cost: 190, + big_modular_exponentiation_cost_divisor: 2, poseidon_cost_coefficient_a: 61, poseidon_cost_coefficient_c: 542, get_remaining_compute_units_cost: 100, diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 47105457372988..7661a000da2938 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -1710,13 +1710,15 @@ declare_builtin_function!( let input_len: u64 = std::cmp::max(input_len, params.modulus_len); let budget = invoke_context.get_compute_budget(); + // the compute units are calculated by the quadratic equation `0.5 input_len^2 + 190` consume_compute_meter( invoke_context, budget.syscall_base_cost.saturating_add( input_len .saturating_mul(input_len) - .checked_div(budget.big_modular_exponentiation_cost) - .unwrap_or(u64::MAX), + .checked_div(budget.big_modular_exponentiation_cost_divisor) + .unwrap_or(u64::MAX) + .saturating_add(budget.big_modular_exponentiation_base_cost), ), )?; @@ -4719,7 +4721,8 @@ mod tests { let budget = invoke_context.get_compute_budget(); invoke_context.mock_set_remaining( budget.syscall_base_cost - + (MAX_LEN * MAX_LEN) / budget.big_modular_exponentiation_cost, + + (MAX_LEN * MAX_LEN) / budget.big_modular_exponentiation_cost_divisor + + budget.big_modular_exponentiation_base_cost, ); let result = SyscallBigModExp::rust( @@ -4760,7 +4763,8 @@ mod tests { let budget = invoke_context.get_compute_budget(); invoke_context.mock_set_remaining( budget.syscall_base_cost - + (INV_LEN * INV_LEN) / budget.big_modular_exponentiation_cost, + + (INV_LEN * INV_LEN) / budget.big_modular_exponentiation_cost_divisor + + budget.big_modular_exponentiation_base_cost, ); let result = SyscallBigModExp::rust( From 1ee7da583704c4181732f5a97f8d0419674fcf12 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:36:20 +0800 Subject: [PATCH 274/529] build(deps): bump indexmap from 2.4.0 to 2.5.0 (#2811) * build(deps): bump indexmap from 2.4.0 to 2.5.0 Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.4.0 to 2.5.0. - [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.4.0...2.5.0) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 22 +++++++++++----------- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 50235b9769c27c..5f4fb761132966 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2575,7 +2575,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.4.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util 0.7.11", @@ -2946,9 +2946,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -5174,7 +5174,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "itoa", "ryu", "serde", @@ -5537,7 +5537,7 @@ dependencies = [ "dashmap", "ed25519-dalek", "index_list", - "indexmap 2.4.0", + "indexmap 2.5.0", "itertools 0.12.1", "lazy_static", "libsecp256k1", @@ -6040,7 +6040,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.4.0", + "indexmap 2.5.0", "indicatif", "log", "quinn", @@ -6130,7 +6130,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.4.0", + "indexmap 2.5.0", "indicatif", "log", "rand 0.8.5", @@ -6502,7 +6502,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.4.0", + "indexmap 2.5.0", "itertools 0.12.1", "log", "lru", @@ -7659,7 +7659,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.4.0", + "indexmap 2.5.0", "itertools 0.12.1", "libc", "log", @@ -7848,7 +7848,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.4.0", + "indexmap 2.5.0", "indicatif", "pickledb", "serde", @@ -7899,7 +7899,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.4.0", + "indexmap 2.5.0", "indicatif", "log", "rayon", @@ -9132,7 +9132,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "toml_datetime", "winnow 0.5.16", ] @@ -9143,7 +9143,7 @@ version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "serde", "serde_spanned", "toml_datetime", diff --git a/Cargo.toml b/Cargo.toml index fe3592daaa5042..37fa7dec73acf7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -258,7 +258,7 @@ hyper = "0.14.30" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.13" -indexmap = "2.4.0" +indexmap = "2.5.0" indicatif = "0.17.8" itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index e2d707ac3271ce..5ee3696d3ce2cc 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1943,7 +1943,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.4.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util 0.7.1", @@ -2288,9 +2288,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -4313,7 +4313,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "itoa", "ryu", "serde", @@ -4558,7 +4558,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "index_list", - "indexmap 2.4.0", + "indexmap 2.5.0", "itertools 0.12.1", "lazy_static", "log", @@ -4803,7 +4803,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.4.0", + "indexmap 2.5.0", "indicatif", "log", "quinn", @@ -4861,7 +4861,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.4.0", + "indexmap 2.5.0", "log", "rand 0.8.5", "rayon", @@ -5101,7 +5101,7 @@ dependencies = [ "clap", "crossbeam-channel", "flate2", - "indexmap 2.4.0", + "indexmap 2.5.0", "itertools 0.12.1", "log", "lru", @@ -6408,7 +6408,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.4.0", + "indexmap 2.5.0", "itertools 0.12.1", "libc", "log", @@ -6546,7 +6546,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.4.0", + "indexmap 2.5.0", "indicatif", "log", "rayon", @@ -7595,7 +7595,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "toml_datetime", "winnow", ] From 85bd69288a4e1283e1801413d205b642f5f9b8f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:17:38 +0800 Subject: [PATCH 275/529] build(deps): bump async-trait from 0.1.81 to 0.1.82 (#2813) * build(deps): bump async-trait from 0.1.81 to 0.1.82 Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.81 to 0.1.82. - [Release notes](https://github.com/dtolnay/async-trait/releases) - [Commits](https://github.com/dtolnay/async-trait/compare/0.1.81...0.1.82) --- updated-dependencies: - dependency-name: async-trait dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5f4fb761132966..8be84ee3f696ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -718,9 +718,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 37fa7dec73acf7..c74a3401499e95 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -188,7 +188,7 @@ assert_cmd = "2.0" assert_matches = "1.5.0" async-channel = "1.9.0" async-mutex = "1.4.0" -async-trait = "0.1.81" +async-trait = "0.1.82" atty = "0.2.11" backoff = "0.4.0" base64 = "0.22.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 5ee3696d3ce2cc..069a56abe7ed8f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -503,9 +503,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", From fb507d7d9173058d05557c603f8b5d905264fa12 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:17:48 +0800 Subject: [PATCH 276/529] build(deps): bump syn from 2.0.76 to 2.0.77 (#2812) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.76 to 2.0.77. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.76...2.0.77) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 68 +++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8be84ee3f696ed..4d108ba3497fb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -724,7 +724,7 @@ checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -877,7 +877,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1034,7 +1034,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "syn_derive", ] @@ -1166,7 +1166,7 @@ checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1777,7 +1777,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1788,7 +1788,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1850,7 +1850,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1974,7 +1974,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2080,7 +2080,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2350,7 +2350,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3676,7 +3676,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3749,7 +3749,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -4374,7 +4374,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5098,7 +5098,7 @@ checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5153,7 +5153,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5203,7 +5203,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6426,7 +6426,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6853,7 +6853,7 @@ version = "2.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "toml 0.8.12", ] @@ -7497,7 +7497,7 @@ dependencies = [ "bs58", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -8354,7 +8354,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -8366,7 +8366,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.76", + "syn 2.0.77", "thiserror", ] @@ -8425,7 +8425,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -8613,9 +8613,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.76" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -8631,7 +8631,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -8817,7 +8817,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -8829,7 +8829,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "test-case-core", ] @@ -8865,7 +8865,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -9002,7 +9002,7 @@ source = "git+https://github.com/anza-xyz/solana-tokio.git?rev=7cf47705faacf7bf0 dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -9246,7 +9246,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -9556,7 +9556,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "wasm-bindgen-shared", ] @@ -9590,7 +9590,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9949,7 +9949,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -9969,7 +9969,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] From 6e23e69f092fa98cdf172c21e30b244ff1e4bfdf Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 3 Sep 2024 17:57:13 +0800 Subject: [PATCH 277/529] chore: bump curve25519-dalek from 3.2.1 to 4.1.3 (#2252) * bump curve25519-dalek from 3.2.1 to 4.1.3 * Update .github/scripts/downstream-project-spl-common.sh Co-authored-by: samkim-crypto * Update .github/scripts/downstream-project-spl-common.sh Co-authored-by: samkim-crypto * Update .github/scripts/downstream-project-spl-common.sh Co-authored-by: samkim-crypto * Update Cargo.toml Co-authored-by: samkim-crypto * remove opt level hack * add comment for opt level --------- Co-authored-by: samkim-crypto --- .../scripts/downstream-project-spl-common.sh | 3 + Cargo.lock | 88 +++++++++++-------- Cargo.toml | 44 +++------- curves/curve25519/src/edwards.rs | 12 ++- curves/curve25519/src/ristretto.rs | 12 ++- curves/curve25519/src/scalar.rs | 8 +- perf/src/sigverify.rs | 4 +- programs/sbf/Cargo.lock | 77 +++++++++------- sdk/program/src/pubkey.rs | 16 ++-- zk-sdk/Cargo.toml | 4 +- zk-sdk/src/encryption/elgamal.rs | 15 ++-- zk-sdk/src/encryption/pedersen.rs | 12 ++- zk-sdk/src/range_proof/generators.rs | 4 +- zk-sdk/src/range_proof/inner_product.rs | 8 +- zk-sdk/src/range_proof/mod.rs | 15 ++-- zk-sdk/src/range_proof/util.rs | 12 +-- .../ciphertext_ciphertext_equality.rs | 2 +- .../ciphertext_commitment_equality.rs | 2 +- .../grouped_ciphertext_validity/handles_2.rs | 2 +- .../grouped_ciphertext_validity/handles_3.rs | 2 +- zk-sdk/src/sigma_proofs/mod.rs | 15 ++-- .../src/sigma_proofs/percentage_with_cap.rs | 2 +- zk-sdk/src/sigma_proofs/pubkey_validity.rs | 4 +- zk-sdk/src/sigma_proofs/zero_ciphertext.rs | 2 +- zk-token-sdk/Cargo.toml | 4 +- zk-token-sdk/src/encryption/elgamal.rs | 28 ++++-- zk-token-sdk/src/encryption/pedersen.rs | 11 ++- zk-token-sdk/src/instruction/zero_balance.rs | 2 +- zk-token-sdk/src/range_proof/generators.rs | 4 +- zk-token-sdk/src/range_proof/inner_product.rs | 8 +- zk-token-sdk/src/range_proof/mod.rs | 15 ++-- zk-token-sdk/src/range_proof/util.rs | 12 +-- .../ciphertext_ciphertext_equality_proof.rs | 2 +- .../ciphertext_commitment_equality_proof.rs | 2 +- zk-token-sdk/src/sigma_proofs/fee_proof.rs | 2 +- .../handles_2.rs | 2 +- .../handles_3.rs | 2 +- zk-token-sdk/src/sigma_proofs/mod.rs | 15 ++-- zk-token-sdk/src/sigma_proofs/pubkey_proof.rs | 4 +- .../src/sigma_proofs/zero_balance_proof.rs | 2 +- 40 files changed, 272 insertions(+), 208 deletions(-) diff --git a/.github/scripts/downstream-project-spl-common.sh b/.github/scripts/downstream-project-spl-common.sh index 779af8f2568110..e70c10d070b7e9 100644 --- a/.github/scripts/downstream-project-spl-common.sh +++ b/.github/scripts/downstream-project-spl-common.sh @@ -27,3 +27,6 @@ fi # anza migration stopgap. can be removed when agave is fully recommended for public usage. sed -i 's/solana-geyser-plugin-interface/agave-geyser-plugin-interface/g' ./Cargo.toml + +# should be removed when spl bump their curve25519-dalek +sed -i "s/^curve25519-dalek =.*/curve25519-dalek = \"4.1.3\"/" token/confidential-transfer/proof-generation/Cargo.toml diff --git a/Cargo.lock b/Cargo.lock index 4d108ba3497fb8..fbea4c6dc74cc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -951,7 +951,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -963,7 +963,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", "generic-array 0.14.7", ] @@ -985,12 +984,6 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "borsh" version = "0.10.3" @@ -1746,16 +1739,45 @@ dependencies = [ [[package]] name = "curve25519-dalek" version = "3.2.1" -source = "git+https://github.com/anza-xyz/curve25519-dalek.git?rev=b500cdc2a920cd5bff9e2dd974d7b97349d61464#b500cdc2a920cd5bff9e2dd974d7b97349d61464" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" dependencies = [ "byteorder", "digest 0.9.0", "rand_core 0.5.1", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "rand_core 0.6.4", + "rustc_version 0.4.1", "serde", "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.76", +] + [[package]] name = "darling" version = "0.20.1" @@ -2010,7 +2032,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.2.1", "ed25519", "rand 0.7.3", "serde", @@ -2185,6 +2207,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da" +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "filedescriptor" version = "0.8.1" @@ -5277,18 +5305,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", -] - [[package]] name = "sha3" version = "0.10.8" @@ -6273,7 +6289,7 @@ version = "2.1.0" dependencies = [ "bytemuck", "bytemuck_derive", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "solana-program", "thiserror", ] @@ -6866,7 +6882,7 @@ dependencies = [ "bincode", "bv", "caps", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "dlopen2", "fnv", "lazy_static", @@ -6957,7 +6973,7 @@ dependencies = [ "bytemuck_derive", "console_error_panic_hook", "console_log", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "getrandom 0.2.10", "itertools 0.12.1", "js-sys", @@ -6978,7 +6994,7 @@ dependencies = [ "serde_json", "serial_test", "sha2 0.10.8", - "sha3 0.10.8", + "sha3", "solana-atomic-u64", "solana-decode-error", "solana-define-syscall", @@ -7439,7 +7455,7 @@ dependencies = [ "bytemuck_derive", "byteorder", "chrono", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "derivation-path", "digest 0.10.7", "ed25519-dalek", @@ -7469,7 +7485,7 @@ dependencies = [ "serde_json", "serde_with", "sha2 0.10.8", - "sha3 0.10.8", + "sha3", "siphasher", "solana-bn254", "solana-decode-error", @@ -8202,17 +8218,17 @@ dependencies = [ "bincode", "bytemuck", "bytemuck_derive", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "itertools 0.12.1", "lazy_static", "merlin", "num-derive", "num-traits", - "rand 0.7.3", + "rand 0.8.5", "serde", "serde_derive", "serde_json", - "sha3 0.9.1", + "sha3", "solana-program", "solana-sdk", "subtle", @@ -8227,7 +8243,7 @@ version = "2.1.0" dependencies = [ "bytemuck", "criterion", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "num-derive", "num-traits", "solana-log-collector", @@ -8241,7 +8257,7 @@ name = "solana-zk-token-proof-program-tests" version = "2.1.0" dependencies = [ "bytemuck", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "solana-compute-budget", "solana-program-test", "solana-sdk", @@ -8258,17 +8274,17 @@ dependencies = [ "bytemuck", "bytemuck_derive", "byteorder", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "itertools 0.12.1", "lazy_static", "merlin", "num-derive", "num-traits", - "rand 0.7.3", + "rand 0.8.5", "serde", "serde_derive", "serde_json", - "sha3 0.9.1", + "sha3", "solana-curve25519", "solana-program", "solana-sdk", diff --git a/Cargo.toml b/Cargo.toml index c74a3401499e95..77eb27a151acf2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -220,7 +220,7 @@ criterion-stats = "0.3.0" crossbeam-channel = "0.5.13" csv = "1.3.0" ctrlc = "3.4.5" -curve25519-dalek = "3.2.1" +curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core"] } dashmap = "5.5.3" derivation-path = { version = "0.2.0", default-features = false } derivative = "2.2.0" @@ -543,38 +543,16 @@ solana-program = { path = "sdk/program" } solana-zk-sdk = { path = "zk-sdk" } solana-zk-token-sdk = { path = "zk-token-sdk" } -# Our dependency tree has `curve25519-dalek` v3.2.1. They have removed the -# constraint in the next major release. The commit that removes the `zeroize` -# constraint was added to multiple release branches, but not to the 3.2 branch. -# -# `curve25519-dalek` maintainers are saying they do not want to invest any more -# time in the 3.2 release: -# -# https://github.com/dalek-cryptography/curve25519-dalek/issues/452#issuecomment-1749809428 -# -# So we have to fork and create our own release, based on v3.2.1, with the -# commit that removed `zeroize` constraint on the `main` branch cherry-picked on -# top. -# -# `curve25519-dalek` v3.2.1 release: -# -# https://github.com/dalek-cryptography/curve25519-dalek/releases/tag/3.2.1 -# -# Corresponds to commit -# -# https://github.com/dalek-cryptography/curve25519-dalek/commit/29e5c29b0e5c6821e4586af58b0d0891dd2ec639 -# -# Comparison with `b500cdc2a920cd5bff9e2dd974d7b97349d61464`: -# -# https://github.com/dalek-cryptography/curve25519-dalek/compare/3.2.1...solana-labs:curve25519-dalek:b500cdc2a920cd5bff9e2dd974d7b97349d61464 -# -# Or, using the branch name instead of the hash: -# -# https://github.com/dalek-cryptography/curve25519-dalek/compare/3.2.1...solana-labs:curve25519-dalek:3.2.1-unpin-zeroize -# -[patch.crates-io.curve25519-dalek] -git = "https://github.com/anza-xyz/curve25519-dalek.git" -rev = "b500cdc2a920cd5bff9e2dd974d7b97349d61464" +# curve25519-dalek uses the simd backend by default in v4 if possible, +# which has very slow performance on some platforms with opt-level 0, +# which is the default for dev and test builds. +# This slowdown causes certain interactions in the solana-test-validator, +# such as verifying ZK proofs in transactions, to take much more than 400ms, +# creating problems in the testing environment. +# To enable better performance in solana-test-validator during tests and dev builds, +# we override the opt-level to 3 for the crate. +[profile.dev.package.curve25519-dalek] +opt-level = 3 # Solana RPC nodes experience stalls when running with `tokio` containing this # commit: diff --git a/curves/curve25519/src/edwards.rs b/curves/curve25519/src/edwards.rs index 4de6bf81456601..4b4893da6410d2 100644 --- a/curves/curve25519/src/edwards.rs +++ b/curves/curve25519/src/edwards.rs @@ -63,7 +63,10 @@ mod target_arch { type Error = Curve25519Error; fn try_from(pod: &PodEdwardsPoint) -> Result { - CompressedEdwardsY::from_slice(&pod.0) + let Ok(compressed_edwards_y) = CompressedEdwardsY::from_slice(&pod.0) else { + return Err(Curve25519Error::PodConversion); + }; + compressed_edwards_y .decompress() .ok_or(Curve25519Error::PodConversion) } @@ -73,9 +76,10 @@ mod target_arch { type Point = Self; fn validate_point(&self) -> bool { - CompressedEdwardsY::from_slice(&self.0) - .decompress() - .is_some() + let Ok(compressed_edwards_y) = CompressedEdwardsY::from_slice(&self.0) else { + return false; + }; + compressed_edwards_y.decompress().is_some() } } diff --git a/curves/curve25519/src/ristretto.rs b/curves/curve25519/src/ristretto.rs index e0b47c15f1dfbe..1c71bfeed95fd3 100644 --- a/curves/curve25519/src/ristretto.rs +++ b/curves/curve25519/src/ristretto.rs @@ -63,7 +63,10 @@ mod target_arch { type Error = Curve25519Error; fn try_from(pod: &PodRistrettoPoint) -> Result { - CompressedRistretto::from_slice(&pod.0) + let Ok(compressed_ristretto) = CompressedRistretto::from_slice(&pod.0) else { + return Err(Curve25519Error::PodConversion); + }; + compressed_ristretto .decompress() .ok_or(Curve25519Error::PodConversion) } @@ -73,9 +76,10 @@ mod target_arch { type Point = Self; fn validate_point(&self) -> bool { - CompressedRistretto::from_slice(&self.0) - .decompress() - .is_some() + let Ok(compressed_ristretto) = CompressedRistretto::from_slice(&self.0) else { + return false; + }; + compressed_ristretto.decompress().is_some() } } diff --git a/curves/curve25519/src/scalar.rs b/curves/curve25519/src/scalar.rs index f840a27c1b4980..5df77de1aa1e01 100644 --- a/curves/curve25519/src/scalar.rs +++ b/curves/curve25519/src/scalar.rs @@ -18,7 +18,9 @@ mod target_arch { type Error = Curve25519Error; fn try_from(pod: &PodScalar) -> Result { - Scalar::from_canonical_bytes(pod.0).ok_or(Curve25519Error::PodConversion) + Scalar::from_canonical_bytes(pod.0) + .into_option() + .ok_or(Curve25519Error::PodConversion) } } @@ -32,7 +34,9 @@ mod target_arch { type Error = Curve25519Error; fn try_from(pod: PodScalar) -> Result { - Scalar::from_canonical_bytes(pod.0).ok_or(Curve25519Error::PodConversion) + Scalar::from_canonical_bytes(pod.0) + .into_option() + .ok_or(Curve25519Error::PodConversion) } } } diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index e10675270471ea..d58f27879ba393 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -1276,7 +1276,7 @@ mod tests { for _ in 0..1_000_000 { thread_rng().fill(&mut input); let ans = get_checked_scalar(&input); - let ref_ans = Scalar::from_canonical_bytes(input); + let ref_ans = Scalar::from_canonical_bytes(input).into_option(); if let Some(ref_ans) = ref_ans { passed += 1; assert_eq!(ans.unwrap(), ref_ans.to_bytes()); @@ -1311,7 +1311,7 @@ mod tests { for _ in 0..1_000_000 { thread_rng().fill(&mut input); let ans = check_packed_ge_small_order(&input); - let ref_ge = CompressedEdwardsY::from_slice(&input); + let ref_ge = CompressedEdwardsY::from_slice(&input).unwrap(); if let Some(ref_element) = ref_ge.decompress() { if ref_element.is_small_order() { assert!(!ans); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 069a56abe7ed8f..b1758b3277f087 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -709,7 +709,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding", "generic-array", ] @@ -722,12 +721,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "borsh" version = "0.10.3" @@ -1219,11 +1212,39 @@ dependencies = [ "byteorder 1.5.0", "digest 0.9.0", "rand_core 0.5.1", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "rand_core 0.6.4", + "rustc_version", "serde", "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.58", +] + [[package]] name = "darling" version = "0.20.1" @@ -1461,7 +1482,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.2.1", "ed25519", "rand 0.7.3", "serde", @@ -1639,6 +1660,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da" +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "filetime" version = "0.2.10" @@ -4368,18 +4395,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug", -] - [[package]] name = "sha3" version = "0.10.8" @@ -4977,7 +4992,7 @@ version = "2.1.0" dependencies = [ "bytemuck", "bytemuck_derive", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "solana-program", "thiserror", ] @@ -5320,7 +5335,7 @@ dependencies = [ "bincode", "bv", "caps", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "dlopen2", "fnv", "lazy_static", @@ -5379,7 +5394,7 @@ dependencies = [ "bytemuck_derive", "console_error_panic_hook", "console_log", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "getrandom 0.2.10", "js-sys", "lazy_static", @@ -5394,7 +5409,7 @@ dependencies = [ "serde_bytes", "serde_derive", "sha2 0.10.8", - "sha3 0.10.8", + "sha3", "solana-atomic-u64", "solana-decode-error", "solana-define-syscall", @@ -6272,7 +6287,7 @@ dependencies = [ "serde_json", "serde_with", "sha2 0.10.8", - "sha3 0.10.8", + "sha3", "siphasher", "solana-bn254", "solana-decode-error", @@ -6768,17 +6783,17 @@ dependencies = [ "bincode", "bytemuck", "bytemuck_derive", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "itertools 0.12.1", "lazy_static", "merlin", "num-derive", "num-traits", - "rand 0.7.3", + "rand 0.8.5", "serde", "serde_derive", "serde_json", - "sha3 0.9.1", + "sha3", "solana-program", "solana-sdk", "subtle", @@ -6809,17 +6824,17 @@ dependencies = [ "bytemuck", "bytemuck_derive", "byteorder 1.5.0", - "curve25519-dalek", + "curve25519-dalek 4.1.3", "itertools 0.12.1", "lazy_static", "merlin", "num-derive", "num-traits", - "rand 0.7.3", + "rand 0.8.5", "serde", "serde_derive", "serde_json", - "sha3 0.9.1", + "sha3", "solana-curve25519", "solana-program", "solana-sdk", diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index 5d3433c1247f90..1c1dcaed092544 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -172,9 +172,12 @@ impl TryFrom<&str> for Pubkey { pub fn bytes_are_curve_point>(_bytes: T) -> bool { #[cfg(not(target_os = "solana"))] { - curve25519_dalek::edwards::CompressedEdwardsY::from_slice(_bytes.as_ref()) - .decompress() - .is_some() + let Ok(compressed_edwards_y) = + curve25519_dalek::edwards::CompressedEdwardsY::from_slice(_bytes.as_ref()) + else { + return false; + }; + compressed_edwards_y.decompress().is_some() } #[cfg(target_os = "solana")] unimplemented!(); @@ -933,12 +936,7 @@ mod tests { if let Ok(program_address) = Pubkey::create_program_address(&[&bytes1, &bytes2], &program_id) { - let is_on_curve = curve25519_dalek::edwards::CompressedEdwardsY::from_slice( - &program_address.to_bytes(), - ) - .decompress() - .is_some(); - assert!(!is_on_curve); + assert!(!program_address.is_on_curve()); assert!(!addresses.contains(&program_address)); addresses.push(program_address); } diff --git a/zk-sdk/Cargo.toml b/zk-sdk/Cargo.toml index 236c7a5e06dbf3..c7ffa7569e0069 100644 --- a/zk-sdk/Cargo.toml +++ b/zk-sdk/Cargo.toml @@ -28,11 +28,11 @@ bincode = { workspace = true } curve25519-dalek = { workspace = true, features = ["serde"] } itertools = { workspace = true } lazy_static = { workspace = true } -rand = { version = "0.7" } +rand = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } -sha3 = "0.9" +sha3 = { workspace = true } solana-sdk = { workspace = true } subtle = { workspace = true } zeroize = { workspace = true, features = ["zeroize_derive"] } diff --git a/zk-sdk/src/encryption/elgamal.rs b/zk-sdk/src/encryption/elgamal.rs index c0d90fb148d26c..2ba02df21c9d5d 100644 --- a/zk-sdk/src/encryption/elgamal.rs +++ b/zk-sdk/src/encryption/elgamal.rs @@ -316,7 +316,7 @@ impl ElGamalPubkey { /// Derives the `ElGamalPubkey` that uniquely corresponds to an `ElGamalSecretKey`. pub fn new(secret: &ElGamalSecretKey) -> Self { let s = &secret.0; - assert!(s != &Scalar::zero()); + assert!(s != &Scalar::ZERO); ElGamalPubkey(s.invert() * &(*H)) } @@ -380,9 +380,12 @@ impl TryFrom<&[u8]> for ElGamalPubkey { if bytes.len() != ELGAMAL_PUBKEY_LEN { return Err(ElGamalError::PubkeyDeserialization); } + let Ok(compressed_ristretto) = CompressedRistretto::from_slice(bytes) else { + return Err(ElGamalError::PubkeyDeserialization); + }; Ok(ElGamalPubkey( - CompressedRistretto::from_slice(bytes) + compressed_ristretto .decompress() .ok_or(ElGamalError::PubkeyDeserialization)?, )) @@ -551,6 +554,7 @@ impl TryFrom<&[u8]> for ElGamalSecretKey { match bytes.try_into() { Ok(bytes) => Ok(ElGamalSecretKey::from( Scalar::from_canonical_bytes(bytes) + .into_option() .ok_or(ElGamalError::SecretKeyDeserialization)?, )), _ => Err(ElGamalError::SecretKeyDeserialization), @@ -737,10 +741,11 @@ impl DecryptHandle { if bytes.len() != DECRYPT_HANDLE_LEN { return None; } + let Ok(compressed_ristretto) = CompressedRistretto::from_slice(bytes) else { + return None; + }; - Some(DecryptHandle( - CompressedRistretto::from_slice(bytes).decompress()?, - )) + compressed_ristretto.decompress().map(DecryptHandle) } } diff --git a/zk-sdk/src/encryption/pedersen.rs b/zk-sdk/src/encryption/pedersen.rs index dfa6f93dcf5ec9..2dc20cb0b520bf 100644 --- a/zk-sdk/src/encryption/pedersen.rs +++ b/zk-sdk/src/encryption/pedersen.rs @@ -89,7 +89,9 @@ impl PedersenOpening { pub fn from_bytes(bytes: &[u8]) -> Option { match bytes.try_into() { - Ok(bytes) => Scalar::from_canonical_bytes(bytes).map(PedersenOpening), + Ok(bytes) => Scalar::from_canonical_bytes(bytes) + .into_option() + .map(PedersenOpening), _ => None, } } @@ -183,9 +185,11 @@ impl PedersenCommitment { return None; } - Some(PedersenCommitment( - CompressedRistretto::from_slice(bytes).decompress()?, - )) + let Ok(compressed_ristretto) = CompressedRistretto::from_slice(bytes) else { + return None; + }; + + compressed_ristretto.decompress().map(PedersenCommitment) } } diff --git a/zk-sdk/src/range_proof/generators.rs b/zk-sdk/src/range_proof/generators.rs index 901f4f83027a41..42bacbec55e71e 100644 --- a/zk-sdk/src/range_proof/generators.rs +++ b/zk-sdk/src/range_proof/generators.rs @@ -4,14 +4,14 @@ use { digest::{ExtendableOutput, Update, XofReader}, ristretto::RistrettoPoint, }, - sha3::{Sha3XofReader, Shake256}, + sha3::{Shake256, Shake256Reader}, }; const MAX_GENERATOR_LENGTH: usize = u32::MAX as usize; /// Generators for Pedersen vector commitments that are used for inner-product proofs. struct GeneratorsChain { - reader: Sha3XofReader, + reader: Shake256Reader, } impl GeneratorsChain { diff --git a/zk-sdk/src/range_proof/inner_product.rs b/zk-sdk/src/range_proof/inner_product.rs index d45a38a9afa4ff..fef991817e016c 100644 --- a/zk-sdk/src/range_proof/inner_product.rs +++ b/zk-sdk/src/range_proof/inner_product.rs @@ -412,8 +412,10 @@ impl InnerProductProof { let pos = 2 * lg_n * 32; let a = Scalar::from_canonical_bytes(util::read32(&slice[pos..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; let b = Scalar::from_canonical_bytes(util::read32(&slice[pos + 32..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; Ok(InnerProductProof { L_vec, R_vec, a, b }) @@ -441,7 +443,7 @@ mod tests { let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut OsRng)).collect(); let c = util::inner_product(&a, &b).unwrap(); - let G_factors: Vec = iter::repeat(Scalar::one()).take(n).collect(); + let G_factors: Vec = iter::repeat(Scalar::ONE).take(n).collect(); let y_inv = Scalar::random(&mut OsRng); let H_factors: Vec = util::exp_iter(y_inv).take(n).collect(); @@ -478,7 +480,7 @@ mod tests { assert!(proof .verify( n, - iter::repeat(Scalar::one()).take(n), + iter::repeat(Scalar::ONE).take(n), util::exp_iter(y_inv).take(n), &P, &Q, @@ -493,7 +495,7 @@ mod tests { assert!(proof .verify( n, - iter::repeat(Scalar::one()).take(n), + iter::repeat(Scalar::ONE).take(n), util::exp_iter(y_inv).take(n), &P, &Q, diff --git a/zk-sdk/src/range_proof/mod.rs b/zk-sdk/src/range_proof/mod.rs index 3cae18a8bb0f3c..fb2431dfa1d5ae 100644 --- a/zk-sdk/src/range_proof/mod.rs +++ b/zk-sdk/src/range_proof/mod.rs @@ -180,16 +180,16 @@ impl RangeProof { let mut i = 0; let mut exp_z = z * z; - let mut exp_y = Scalar::one(); + let mut exp_y = Scalar::ONE; for (amount_i, n_i) in amounts.iter().zip(bit_lengths.iter()) { - let mut exp_2 = Scalar::one(); + let mut exp_2 = Scalar::ONE; for j in 0..(*n_i) { // `j` is guaranteed to be at most `u64::BITS` (a 6-bit number) and therefore, // casting is lossless and right shift can be safely unwrapped let a_L_j = Scalar::from(amount_i.checked_shr(j as u32).unwrap() & 1); - let a_R_j = a_L_j - Scalar::one(); + let a_R_j = a_L_j - Scalar::ONE; l_poly.0[i] = a_L_j - z; l_poly.1[i] = s_L[i]; @@ -224,7 +224,7 @@ impl RangeProof { // z^2 * V_1 + z^3 * V_2 + ... + z^{m+1} * V_m + delta(y, z)*G + x*T_1 + x^2*T_2 let x = transcript.challenge_scalar(b"x"); - let mut agg_opening = Scalar::zero(); + let mut agg_opening = Scalar::ZERO; let mut exp_z = z; for opening in openings { exp_z *= z; @@ -255,7 +255,7 @@ impl RangeProof { let w = transcript.challenge_scalar(b"w"); let Q = w * &(*G); - let G_factors: Vec = iter::repeat(Scalar::one()).take(nm).collect(); + let G_factors: Vec = iter::repeat(Scalar::ONE).take(nm).collect(); let H_factors: Vec = util::exp_iter(y.invert()).take(nm).collect(); // generate challenge `c` for consistency with the verifier's transcript @@ -358,7 +358,7 @@ impl RangeProof { let value_commitment_scalars = util::exp_iter(z).take(m).map(|z_exp| c * zz * z_exp); let mega_check = RistrettoPoint::optional_multiscalar_mul( - iter::once(Scalar::one()) + iter::once(Scalar::ONE) .chain(iter::once(x)) .chain(iter::once(c * x)) .chain(iter::once(c * x * x)) @@ -421,10 +421,13 @@ impl RangeProof { let T_2 = CompressedRistretto(util::read32(&slice[3 * 32..])); let t_x = Scalar::from_canonical_bytes(util::read32(&slice[4 * 32..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; let t_x_blinding = Scalar::from_canonical_bytes(util::read32(&slice[5 * 32..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; let e_blinding = Scalar::from_canonical_bytes(util::read32(&slice[6 * 32..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; let ipp_proof = InnerProductProof::from_bytes(&slice[7 * 32..])?; diff --git a/zk-sdk/src/range_proof/util.rs b/zk-sdk/src/range_proof/util.rs index 29af3821596cc3..fecaf8460381b7 100644 --- a/zk-sdk/src/range_proof/util.rs +++ b/zk-sdk/src/range_proof/util.rs @@ -9,7 +9,7 @@ pub struct VecPoly1(pub Vec, pub Vec); impl VecPoly1 { pub fn zero(n: usize) -> Self { - VecPoly1(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) + VecPoly1(vec![Scalar::ZERO; n], vec![Scalar::ZERO; n]) } pub fn inner_product(&self, rhs: &VecPoly1) -> Option { @@ -30,7 +30,7 @@ impl VecPoly1 { pub fn eval(&self, x: Scalar) -> Vec { let n = self.0.len(); - let mut out = vec![Scalar::zero(); n]; + let mut out = vec![Scalar::ZERO; n]; #[allow(clippy::needless_range_loop)] for i in 0..n { out[i] = self.0[i] + self.1[i] * x; @@ -72,7 +72,7 @@ impl Iterator for ScalarExp { /// Return an iterator of the powers of `x`. pub fn exp_iter(x: Scalar) -> ScalarExp { - let next_exp_x = Scalar::one(); + let next_exp_x = Scalar::ONE; ScalarExp { x, next_exp_x } } @@ -81,7 +81,7 @@ pub fn add_vec(a: &[Scalar], b: &[Scalar]) -> Vec { // throw some error //println!("lengths of vectors don't match for vector addition"); } - let mut out = vec![Scalar::zero(); b.len()]; + let mut out = vec![Scalar::ZERO; b.len()]; for i in 0..a.len() { out[i] = a[i] + b[i]; } @@ -101,7 +101,7 @@ pub fn read32(data: &[u8]) -> [u8; 32] { /// \\] /// Errors if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Option { - let mut out = Scalar::zero(); + let mut out = Scalar::ZERO; if a.len() != b.len() { return None; } @@ -123,7 +123,7 @@ pub fn sum_of_powers(x: &Scalar, n: usize) -> Scalar { return Scalar::from(n as u64); } let mut m = n; - let mut result = Scalar::one() + x; + let mut result = Scalar::ONE + x; let mut factor = *x; while m > 2 { factor = factor * factor; diff --git a/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs b/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs index 910d0a9a3ea556..319a889b760681 100644 --- a/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs +++ b/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs @@ -189,7 +189,7 @@ impl CiphertextCiphertextEqualityProof { vec![ &self.z_s, // z_s &(-&c), // -c - &(-&Scalar::one()), // -identity + &(-&Scalar::ONE), // -identity &(&w * &self.z_x), // w * z_x &(&w * &self.z_s), // w * z_s &(&w_negated * &c), // -w * c diff --git a/zk-sdk/src/sigma_proofs/ciphertext_commitment_equality.rs b/zk-sdk/src/sigma_proofs/ciphertext_commitment_equality.rs index 341d8e5a3aee2b..3f5fc58e3e2799 100644 --- a/zk-sdk/src/sigma_proofs/ciphertext_commitment_equality.rs +++ b/zk-sdk/src/sigma_proofs/ciphertext_commitment_equality.rs @@ -176,7 +176,7 @@ impl CiphertextCommitmentEqualityProof { vec![ &self.z_s, // z_s &(-&c), // -c - &(-&Scalar::one()), // -identity + &(-&Scalar::ONE), // -identity &(&w * &self.z_x), // w * z_x &(&w * &self.z_s), // w * z_s &(&w_negated * &c), // -w * c diff --git a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_2.rs b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_2.rs index 2b5ecd44dc5843..688d3cf73d5c88 100644 --- a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_2.rs +++ b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_2.rs @@ -176,7 +176,7 @@ impl GroupedCiphertext2HandlesValidityProof { &self.z_r, // z_r &self.z_x, // z_x &(-&c), // -c - &-(&Scalar::one()), // -identity + &-(&Scalar::ONE), // -identity &(&w * &self.z_r), // w * z_r &(&w_negated * &c), // -w * c &w_negated, // -w diff --git a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs index a825eabb6235af..ab917b6b7319ee 100644 --- a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs +++ b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs @@ -197,7 +197,7 @@ impl GroupedCiphertext3HandlesValidityProof { &self.z_r, // z_r &self.z_x, // z_x &(-&c), // -c - &-(&Scalar::one()), // -identity + &-(&Scalar::ONE), // -identity &(&w * &self.z_r), // w * z_r &(&w_negated * &c), // -w * c &w_negated, // -w diff --git a/zk-sdk/src/sigma_proofs/mod.rs b/zk-sdk/src/sigma_proofs/mod.rs index f6d6c8d9557890..c8b847a61399b3 100644 --- a/zk-sdk/src/sigma_proofs/mod.rs +++ b/zk-sdk/src/sigma_proofs/mod.rs @@ -66,10 +66,15 @@ use { fn ristretto_point_from_optional_slice( optional_slice: Option<&[u8]>, ) -> Result { - optional_slice - .and_then(|slice| (slice.len() == RISTRETTO_POINT_LEN).then_some(slice)) - .map(CompressedRistretto::from_slice) - .ok_or(SigmaProofVerificationError::Deserialization) + let Some(slice) = optional_slice else { + return Err(SigmaProofVerificationError::Deserialization); + }; + + if slice.len() != RISTRETTO_POINT_LEN { + return Err(SigmaProofVerificationError::Deserialization); + } + + CompressedRistretto::from_slice(slice).map_err(|_| SigmaProofVerificationError::Deserialization) } /// Deserializes an optional slice of bytes to a scalar. @@ -83,6 +88,6 @@ fn canonical_scalar_from_optional_slice( optional_slice .and_then(|slice| (slice.len() == SCALAR_LEN).then_some(slice)) // if chunk is the wrong length, convert to None .and_then(|slice| slice.try_into().ok()) // convert to array - .and_then(Scalar::from_canonical_bytes) + .and_then(|slice| Scalar::from_canonical_bytes(slice).into_option()) .ok_or(SigmaProofVerificationError::Deserialization) } diff --git a/zk-sdk/src/sigma_proofs/percentage_with_cap.rs b/zk-sdk/src/sigma_proofs/percentage_with_cap.rs index 18a8e1efe5cadb..64e2b1794753d8 100644 --- a/zk-sdk/src/sigma_proofs/percentage_with_cap.rs +++ b/zk-sdk/src/sigma_proofs/percentage_with_cap.rs @@ -393,7 +393,7 @@ impl PercentageWithCapProof { c_max_proof, -c_max_proof * m, -z_max, - Scalar::one(), + Scalar::ONE, w * z_x, w * z_delta_real, -w * c_equality, diff --git a/zk-sdk/src/sigma_proofs/pubkey_validity.rs b/zk-sdk/src/sigma_proofs/pubkey_validity.rs index 4166a543b77e76..5e3c08f5b0630a 100644 --- a/zk-sdk/src/sigma_proofs/pubkey_validity.rs +++ b/zk-sdk/src/sigma_proofs/pubkey_validity.rs @@ -65,7 +65,7 @@ impl PubkeyValidityProof { // extract the relevant scalar and Ristretto points from the input let s = elgamal_keypair.secret().get_scalar(); - assert!(s != &Scalar::zero()); + assert!(s != &Scalar::ZERO); let s_inv = s.invert(); // generate a random masking factor that also serves as a nonce @@ -109,7 +109,7 @@ impl PubkeyValidityProof { .ok_or(SigmaProofVerificationError::Deserialization)?; let check = RistrettoPoint::vartime_multiscalar_mul( - vec![&self.z, &(-&c), &(-&Scalar::one())], + vec![&self.z, &(-&c), &(-&Scalar::ONE)], vec![&(*H), P, &Y], ); diff --git a/zk-sdk/src/sigma_proofs/zero_ciphertext.rs b/zk-sdk/src/sigma_proofs/zero_ciphertext.rs index 498758aaa9b295..f598210af6018b 100644 --- a/zk-sdk/src/sigma_proofs/zero_ciphertext.rs +++ b/zk-sdk/src/sigma_proofs/zero_ciphertext.rs @@ -136,7 +136,7 @@ impl ZeroCiphertextProof { vec![ &self.z, // z &(-&c), // -c - &(-&Scalar::one()), // -identity + &(-&Scalar::ONE), // -identity &(&w * &self.z), // w * z &(&w_negated * &c), // -w * c &w_negated, // -w diff --git a/zk-token-sdk/Cargo.toml b/zk-token-sdk/Cargo.toml index cc4f785550a73b..20d43f998f2eaa 100644 --- a/zk-token-sdk/Cargo.toml +++ b/zk-token-sdk/Cargo.toml @@ -30,11 +30,11 @@ curve25519-dalek = { workspace = true, features = ["serde"] } itertools = { workspace = true } lazy_static = { workspace = true } merlin = { workspace = true } -rand = { version = "0.7" } +rand = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } -sha3 = "0.9" +sha3 = { workspace = true } solana-sdk = { workspace = true } subtle = { workspace = true } zeroize = { workspace = true, features = ["zeroize_derive"] } diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index e90b98920abfd4..d0fcaf8b48bd41 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -359,7 +359,7 @@ impl ElGamalPubkey { #[allow(non_snake_case)] pub fn new(secret: &ElGamalSecretKey) -> Self { let s = &secret.0; - assert!(s != &Scalar::zero()); + assert_ne!(s, &Scalar::ZERO); ElGamalPubkey(s.invert() * &(*H)) } @@ -378,10 +378,11 @@ impl ElGamalPubkey { if bytes.len() != ELGAMAL_PUBKEY_LEN { return None; } + let Ok(compressed_ristretto) = CompressedRistretto::from_slice(bytes) else { + return None; + }; - Some(ElGamalPubkey( - CompressedRistretto::from_slice(bytes).decompress()?, - )) + compressed_ristretto.decompress().map(ElGamalPubkey) } /// Encrypts an amount under the public key. @@ -441,8 +442,12 @@ impl TryFrom<&[u8]> for ElGamalPubkey { return Err(ElGamalError::PubkeyDeserialization); } + let Ok(compressed_ristretto) = CompressedRistretto::from_slice(bytes) else { + return Err(ElGamalError::PubkeyDeserialization); + }; + Ok(ElGamalPubkey( - CompressedRistretto::from_slice(bytes) + compressed_ristretto .decompress() .ok_or(ElGamalError::PubkeyDeserialization)?, )) @@ -553,7 +558,9 @@ impl ElGamalSecretKey { #[deprecated(since = "2.0.0", note = "please use `try_from()` instead")] pub fn from_bytes(bytes: &[u8]) -> Option { match bytes.try_into() { - Ok(bytes) => Scalar::from_canonical_bytes(bytes).map(ElGamalSecretKey), + Ok(bytes) => Scalar::from_canonical_bytes(bytes) + .map(ElGamalSecretKey) + .into(), _ => None, } } @@ -612,6 +619,7 @@ impl TryFrom<&[u8]> for ElGamalSecretKey { match bytes.try_into() { Ok(bytes) => Ok(ElGamalSecretKey::from( Scalar::from_canonical_bytes(bytes) + .into_option() .ok_or(ElGamalError::SecretKeyDeserialization)?, )), _ => Err(ElGamalError::SecretKeyDeserialization), @@ -800,9 +808,11 @@ impl DecryptHandle { return None; } - Some(DecryptHandle( - CompressedRistretto::from_slice(bytes).decompress()?, - )) + let Ok(compressed_ristretto) = CompressedRistretto::from_slice(bytes) else { + return None; + }; + + compressed_ristretto.decompress().map(DecryptHandle) } } diff --git a/zk-token-sdk/src/encryption/pedersen.rs b/zk-token-sdk/src/encryption/pedersen.rs index 2de593771590e6..bb3f94e43a329d 100644 --- a/zk-token-sdk/src/encryption/pedersen.rs +++ b/zk-token-sdk/src/encryption/pedersen.rs @@ -99,7 +99,9 @@ impl PedersenOpening { pub fn from_bytes(bytes: &[u8]) -> Option { match bytes.try_into() { - Ok(bytes) => Scalar::from_canonical_bytes(bytes).map(PedersenOpening), + Ok(bytes) => Scalar::from_canonical_bytes(bytes) + .map(PedersenOpening) + .into(), _ => None, } } @@ -192,10 +194,11 @@ impl PedersenCommitment { if bytes.len() != PEDERSEN_COMMITMENT_LEN { return None; } + let Ok(compressed_ristretto) = CompressedRistretto::from_slice(bytes) else { + return None; + }; - Some(PedersenCommitment( - CompressedRistretto::from_slice(bytes).decompress()?, - )) + compressed_ristretto.decompress().map(PedersenCommitment) } } diff --git a/zk-token-sdk/src/instruction/zero_balance.rs b/zk-token-sdk/src/instruction/zero_balance.rs index 7671fb21cc4569..12edda8f89a0ff 100644 --- a/zk-token-sdk/src/instruction/zero_balance.rs +++ b/zk-token-sdk/src/instruction/zero_balance.rs @@ -1,7 +1,7 @@ //! The zero-balance proof instruction. //! //! A zero-balance proof is defined with respect to a twisted ElGamal ciphertext. The proof -//! certifies that a given ciphertext encrypts the message 0 in the field (`Scalar::zero()`). To +//! certifies that a given ciphertext encrypts the message 0 in the field (`Scalar::ZERO`). To //! generate the proof, a prover must provide the decryption key for the ciphertext. #[cfg(not(target_os = "solana"))] diff --git a/zk-token-sdk/src/range_proof/generators.rs b/zk-token-sdk/src/range_proof/generators.rs index b33ce32001a359..f452cd74dde376 100644 --- a/zk-token-sdk/src/range_proof/generators.rs +++ b/zk-token-sdk/src/range_proof/generators.rs @@ -4,7 +4,7 @@ use { digest::{ExtendableOutput, Update, XofReader}, ristretto::RistrettoPoint, }, - sha3::{Sha3XofReader, Shake256}, + sha3::{Shake256, Shake256Reader}, }; #[cfg(not(target_os = "solana"))] @@ -12,7 +12,7 @@ const MAX_GENERATOR_LENGTH: usize = u32::MAX as usize; /// Generators for Pedersen vector commitments that are used for inner-product proofs. struct GeneratorsChain { - reader: Sha3XofReader, + reader: Shake256Reader, } impl GeneratorsChain { diff --git a/zk-token-sdk/src/range_proof/inner_product.rs b/zk-token-sdk/src/range_proof/inner_product.rs index 44e8e0674a3d6a..3360dcf8809fba 100644 --- a/zk-token-sdk/src/range_proof/inner_product.rs +++ b/zk-token-sdk/src/range_proof/inner_product.rs @@ -412,8 +412,10 @@ impl InnerProductProof { let pos = 2 * lg_n * 32; let a = Scalar::from_canonical_bytes(util::read32(&slice[pos..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; let b = Scalar::from_canonical_bytes(util::read32(&slice[pos + 32..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; Ok(InnerProductProof { L_vec, R_vec, a, b }) @@ -442,7 +444,7 @@ mod tests { let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut OsRng)).collect(); let c = util::inner_product(&a, &b).unwrap(); - let G_factors: Vec = iter::repeat(Scalar::one()).take(n).collect(); + let G_factors: Vec = iter::repeat(Scalar::ONE).take(n).collect(); let y_inv = Scalar::random(&mut OsRng); let H_factors: Vec = util::exp_iter(y_inv).take(n).collect(); @@ -479,7 +481,7 @@ mod tests { assert!(proof .verify( n, - iter::repeat(Scalar::one()).take(n), + iter::repeat(Scalar::ONE).take(n), util::exp_iter(y_inv).take(n), &P, &Q, @@ -494,7 +496,7 @@ mod tests { assert!(proof .verify( n, - iter::repeat(Scalar::one()).take(n), + iter::repeat(Scalar::ONE).take(n), util::exp_iter(y_inv).take(n), &P, &Q, diff --git a/zk-token-sdk/src/range_proof/mod.rs b/zk-token-sdk/src/range_proof/mod.rs index 32dac961f507df..200194e8079abd 100644 --- a/zk-token-sdk/src/range_proof/mod.rs +++ b/zk-token-sdk/src/range_proof/mod.rs @@ -149,16 +149,16 @@ impl RangeProof { let mut i = 0; let mut exp_z = z * z; - let mut exp_y = Scalar::one(); + let mut exp_y = Scalar::ONE; for (amount_i, n_i) in amounts.iter().zip(bit_lengths.iter()) { - let mut exp_2 = Scalar::one(); + let mut exp_2 = Scalar::ONE; for j in 0..(*n_i) { // `j` is guaranteed to be at most `u64::BITS` (a 6-bit number) and therefore, // casting is lossless and right shift can be safely unwrapped let a_L_j = Scalar::from(amount_i.checked_shr(j as u32).unwrap() & 1); - let a_R_j = a_L_j - Scalar::one(); + let a_R_j = a_L_j - Scalar::ONE; l_poly.0[i] = a_L_j - z; l_poly.1[i] = s_L[i]; @@ -193,7 +193,7 @@ impl RangeProof { // z^2 * V_1 + z^3 * V_2 + ... + z^{m+1} * V_m + delta(y, z)*G + x*T_1 + x^2*T_2 let x = transcript.challenge_scalar(b"x"); - let mut agg_opening = Scalar::zero(); + let mut agg_opening = Scalar::ZERO; let mut exp_z = z; for opening in openings { exp_z *= z; @@ -224,7 +224,7 @@ impl RangeProof { let w = transcript.challenge_scalar(b"w"); let Q = w * &(*G); - let G_factors: Vec = iter::repeat(Scalar::one()).take(nm).collect(); + let G_factors: Vec = iter::repeat(Scalar::ONE).take(nm).collect(); let H_factors: Vec = util::exp_iter(y.invert()).take(nm).collect(); // generate challenge `c` for consistency with the verifier's transcript @@ -325,7 +325,7 @@ impl RangeProof { let value_commitment_scalars = util::exp_iter(z).take(m).map(|z_exp| c * zz * z_exp); let mega_check = RistrettoPoint::optional_multiscalar_mul( - iter::once(Scalar::one()) + iter::once(Scalar::ONE) .chain(iter::once(x)) .chain(iter::once(c * x)) .chain(iter::once(c * x * x)) @@ -388,10 +388,13 @@ impl RangeProof { let T_2 = CompressedRistretto(util::read32(&slice[3 * 32..])); let t_x = Scalar::from_canonical_bytes(util::read32(&slice[4 * 32..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; let t_x_blinding = Scalar::from_canonical_bytes(util::read32(&slice[5 * 32..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; let e_blinding = Scalar::from_canonical_bytes(util::read32(&slice[6 * 32..])) + .into_option() .ok_or(RangeProofVerificationError::Deserialization)?; let ipp_proof = InnerProductProof::from_bytes(&slice[7 * 32..])?; diff --git a/zk-token-sdk/src/range_proof/util.rs b/zk-token-sdk/src/range_proof/util.rs index a656e73d8a03d9..2054829e3254b0 100644 --- a/zk-token-sdk/src/range_proof/util.rs +++ b/zk-token-sdk/src/range_proof/util.rs @@ -8,7 +8,7 @@ pub struct VecPoly1(pub Vec, pub Vec); impl VecPoly1 { pub fn zero(n: usize) -> Self { - VecPoly1(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) + VecPoly1(vec![Scalar::ZERO; n], vec![Scalar::ZERO; n]) } pub fn inner_product(&self, rhs: &VecPoly1) -> Option { @@ -29,7 +29,7 @@ impl VecPoly1 { pub fn eval(&self, x: Scalar) -> Vec { let n = self.0.len(); - let mut out = vec![Scalar::zero(); n]; + let mut out = vec![Scalar::ZERO; n]; #[allow(clippy::needless_range_loop)] for i in 0..n { out[i] = self.0[i] + self.1[i] * x; @@ -71,7 +71,7 @@ impl Iterator for ScalarExp { /// Return an iterator of the powers of `x`. pub fn exp_iter(x: Scalar) -> ScalarExp { - let next_exp_x = Scalar::one(); + let next_exp_x = Scalar::ONE; ScalarExp { x, next_exp_x } } @@ -80,7 +80,7 @@ pub fn add_vec(a: &[Scalar], b: &[Scalar]) -> Vec { // throw some error //println!("lengths of vectors don't match for vector addition"); } - let mut out = vec![Scalar::zero(); b.len()]; + let mut out = vec![Scalar::ZERO; b.len()]; for i in 0..a.len() { out[i] = a[i] + b[i]; } @@ -100,7 +100,7 @@ pub fn read32(data: &[u8]) -> [u8; 32] { /// \\] /// Errors if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Option { - let mut out = Scalar::zero(); + let mut out = Scalar::ZERO; if a.len() != b.len() { return None; } @@ -122,7 +122,7 @@ pub fn sum_of_powers(x: &Scalar, n: usize) -> Scalar { return Scalar::from(n as u64); } let mut m = n; - let mut result = Scalar::one() + x; + let mut result = Scalar::ONE + x; let mut factor = *x; while m > 2 { factor = factor * factor; diff --git a/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs b/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs index 565ba9b4360920..0c296fc2df6477 100644 --- a/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/ciphertext_ciphertext_equality_proof.rs @@ -189,7 +189,7 @@ impl CiphertextCiphertextEqualityProof { vec![ &self.z_s, // z_s &(-&c), // -c - &(-&Scalar::one()), // -identity + &(-&Scalar::ONE), // -identity &(&w * &self.z_x), // w * z_x &(&w * &self.z_s), // w * z_s &(&w_negated * &c), // -w * c diff --git a/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs b/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs index 768b07b216cdbe..0b361ffbcf52c4 100644 --- a/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/ciphertext_commitment_equality_proof.rs @@ -177,7 +177,7 @@ impl CiphertextCommitmentEqualityProof { vec![ &self.z_s, // z_s &(-&c), // -c - &(-&Scalar::one()), // -identity + &(-&Scalar::ONE), // -identity &(&w * &self.z_x), // w * z_x &(&w * &self.z_s), // w * z_s &(&w_negated * &c), // -w * c diff --git a/zk-token-sdk/src/sigma_proofs/fee_proof.rs b/zk-token-sdk/src/sigma_proofs/fee_proof.rs index 5cb67553e26276..308570ad45b4b6 100644 --- a/zk-token-sdk/src/sigma_proofs/fee_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/fee_proof.rs @@ -358,7 +358,7 @@ impl FeeSigmaProof { c_max_proof, -c_max_proof * m, -z_max, - Scalar::one(), + Scalar::ONE, w * z_x, w * z_delta_real, -w * c_equality, diff --git a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_2.rs b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_2.rs index be45e969cf1257..e72e1c0c4d94a1 100644 --- a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_2.rs +++ b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_2.rs @@ -172,7 +172,7 @@ impl GroupedCiphertext2HandlesValidityProof { &self.z_r, // z_r &self.z_x, // z_x &(-&c), // -c - &-(&Scalar::one()), // -identity + &-(&Scalar::ONE), // -identity &(&w * &self.z_r), // w * z_r &(&w_negated * &c), // -w * c &w_negated, // -w diff --git a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_3.rs b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_3.rs index 1324292315a04c..9b9c533a35b9f3 100644 --- a/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_3.rs +++ b/zk-token-sdk/src/sigma_proofs/grouped_ciphertext_validity_proof/handles_3.rs @@ -201,7 +201,7 @@ impl GroupedCiphertext3HandlesValidityProof { &self.z_r, // z_r &self.z_x, // z_x &(-&c), // -c - &-(&Scalar::one()), // -identity + &-(&Scalar::ONE), // -identity &(&w * &self.z_r), // w * z_r &(&w_negated * &c), // -w * c &w_negated, // -w diff --git a/zk-token-sdk/src/sigma_proofs/mod.rs b/zk-token-sdk/src/sigma_proofs/mod.rs index bad707157a8c33..38a6dde20816ca 100644 --- a/zk-token-sdk/src/sigma_proofs/mod.rs +++ b/zk-token-sdk/src/sigma_proofs/mod.rs @@ -36,10 +36,15 @@ use { fn ristretto_point_from_optional_slice( optional_slice: Option<&[u8]>, ) -> Result { - optional_slice - .and_then(|slice| (slice.len() == RISTRETTO_POINT_LEN).then_some(slice)) - .map(CompressedRistretto::from_slice) - .ok_or(SigmaProofVerificationError::Deserialization) + let Some(slice) = optional_slice else { + return Err(SigmaProofVerificationError::Deserialization); + }; + + if slice.len() != RISTRETTO_POINT_LEN { + return Err(SigmaProofVerificationError::Deserialization); + } + + CompressedRistretto::from_slice(slice).map_err(|_| SigmaProofVerificationError::Deserialization) } /// Deserializes an optional slice of bytes to a scalar. @@ -53,6 +58,6 @@ fn canonical_scalar_from_optional_slice( optional_slice .and_then(|slice| (slice.len() == SCALAR_LEN).then_some(slice)) // if chunk is the wrong length, convert to None .and_then(|slice| slice.try_into().ok()) // convert to array - .and_then(Scalar::from_canonical_bytes) + .and_then(|bytes| Scalar::from_canonical_bytes(bytes).into()) .ok_or(SigmaProofVerificationError::Deserialization) } diff --git a/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs b/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs index 16f6c3b25d81ef..2498a1e68686b5 100644 --- a/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/pubkey_proof.rs @@ -65,7 +65,7 @@ impl PubkeyValidityProof { // extract the relevant scalar and Ristretto points from the input let s = elgamal_keypair.secret().get_scalar(); - assert!(s != &Scalar::zero()); + assert!(s != &Scalar::ZERO); let s_inv = s.invert(); // generate a random masking factor that also serves as a nonce @@ -109,7 +109,7 @@ impl PubkeyValidityProof { .ok_or(SigmaProofVerificationError::Deserialization)?; let check = RistrettoPoint::vartime_multiscalar_mul( - vec![&self.z, &(-&c), &(-&Scalar::one())], + vec![&self.z, &(-&c), &(-&Scalar::ONE)], vec![&(*H), P, &Y], ); diff --git a/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs b/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs index 3585978c76c1df..9a20cb4fefba34 100644 --- a/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/zero_balance_proof.rs @@ -136,7 +136,7 @@ impl ZeroBalanceProof { vec![ &self.z, // z &(-&c), // -c - &(-&Scalar::one()), // -identity + &(-&Scalar::ONE), // -identity &(&w * &self.z), // w * z &(&w_negated * &c), // -w * c &w_negated, // -w From 570cdf1f8a11d76d037302378b85176d3acf5e5a Mon Sep 17 00:00:00 2001 From: osrm <90407222+osrm@users.noreply.github.com> Date: Tue, 3 Sep 2024 19:04:15 +0900 Subject: [PATCH 278/529] docs: fix typo backwards-compatibility.md (#2815) "seperately" -> "separately" --- docs/src/backwards-compatibility.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/backwards-compatibility.md b/docs/src/backwards-compatibility.md index 843bc1d4ebab5c..cc6c4a8689f0b6 100644 --- a/docs/src/backwards-compatibility.md +++ b/docs/src/backwards-compatibility.md @@ -7,7 +7,7 @@ breaking API and behavior changes affecting applications and tooling built for S In a perfect world, Solana development could continue at a very fast pace without ever causing issues for existing developers. However, some compromises will need to be made and so this document attempts to clarify and codify the process for new releases. Furthermore, -there will be a growing number of validator clients maintained seperately by distinct teams. +there will be a growing number of validator clients maintained separately by distinct teams. Coordinating across these teams to ensure the reliability of the network will require ongoing communication. From 0259407b909a4c6cf62a9a46b4e5b8bab9bfbb4e Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 3 Sep 2024 18:06:58 +0800 Subject: [PATCH 279/529] fix: uncommitted Cargo.lock (#2818) fix Cargo.lock --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index fbea4c6dc74cc0..1467f4e8105338 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1775,7 +1775,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] From 6e83982ea6dd29b7a7a0d6151a02b32c4d5cd9d7 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 3 Sep 2024 08:03:02 -0500 Subject: [PATCH 280/529] account_saver: optionally collect txs (#2793) --- accounts-db/src/accounts_db.rs | 5 + runtime/src/account_saver.rs | 261 ++++++++++++++++++++++----------- runtime/src/bank.rs | 1 + 3 files changed, 178 insertions(+), 89 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 8a71e8ad765eba..f358331b69021f 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2708,6 +2708,11 @@ impl AccountsDb { self.base_working_path.clone() } + /// Returns true if there is an accounts update notifier. + pub fn has_accounts_update_notifier(&self) -> bool { + self.accounts_update_notifier.is_some() + } + fn next_id(&self) -> AccountsFileId { let next_id = self.next_id.fetch_add(1, Ordering::AcqRel); assert!( diff --git a/runtime/src/account_saver.rs b/runtime/src/account_saver.rs index 4875413539107b..ec8049617108eb 100644 --- a/runtime/src/account_saver.rs +++ b/runtime/src/account_saver.rs @@ -45,10 +45,11 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( processing_results: &'a mut [TransactionProcessingResult], durable_nonce: &DurableNonce, lamports_per_signature: u64, + collect_transactions: bool, ) -> (Vec<(&'a Pubkey, &'a AccountSharedData)>, Option>) { let collect_capacity = max_number_of_accounts_to_collect(txs, processing_results); let mut accounts = Vec::with_capacity(collect_capacity); - let mut transactions = Vec::with_capacity(collect_capacity); + let mut transactions = collect_transactions.then(|| Vec::with_capacity(collect_capacity)); for (processing_result, transaction) in processing_results.iter_mut().zip(txs) { let Some(processed_tx) = processing_result.processed_transaction_mut() else { // Don't store any accounts if tx wasn't executed @@ -87,12 +88,12 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( } } } - (accounts, Some(transactions)) + (accounts, transactions) } fn collect_accounts_for_successful_tx<'a, T: SVMMessage>( collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, - collected_account_transactions: &mut Vec<&'a T>, + collected_account_transactions: &mut Option>, transaction: &'a T, transaction_accounts: &'a [TransactionAccount], ) { @@ -109,13 +110,15 @@ fn collect_accounts_for_successful_tx<'a, T: SVMMessage>( }) { collected_accounts.push((address, account)); - collected_account_transactions.push(transaction); + if let Some(collected_account_transactions) = collected_account_transactions { + collected_account_transactions.push(transaction); + } } } fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, - collected_account_transactions: &mut Vec<&'a T>, + collected_account_transactions: &mut Option>, transaction: &'a T, rollback_accounts: &'a mut RollbackAccounts, durable_nonce: &DurableNonce, @@ -125,7 +128,9 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( match rollback_accounts { RollbackAccounts::FeePayerOnly { fee_payer_account } => { collected_accounts.push((fee_payer_address, &*fee_payer_account)); - collected_account_transactions.push(transaction); + if let Some(collected_account_transactions) = collected_account_transactions { + collected_account_transactions.push(transaction); + } } RollbackAccounts::SameNonceAndFeePayer { nonce } => { // Since we know we are dealing with a valid nonce account, @@ -134,14 +139,18 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( .try_advance_nonce(*durable_nonce, lamports_per_signature) .unwrap(); collected_accounts.push((nonce.address(), nonce.account())); - collected_account_transactions.push(transaction); + if let Some(collected_account_transactions) = collected_account_transactions { + collected_account_transactions.push(transaction); + } } RollbackAccounts::SeparateNonceAndFeePayer { nonce, fee_payer_account, } => { collected_accounts.push((fee_payer_address, &*fee_payer_account)); - collected_account_transactions.push(transaction); + if let Some(collected_account_transactions) = collected_account_transactions { + collected_account_transactions.push(transaction); + } // Since we know we are dealing with a valid nonce account, // unwrap is safe here @@ -149,7 +158,9 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( .try_advance_nonce(*durable_nonce, lamports_per_signature) .unwrap(); collected_accounts.push((nonce.address(), nonce.account())); - collected_account_transactions.push(transaction); + if let Some(collected_account_transactions) = collected_account_transactions { + collected_account_transactions.push(transaction); + } } } } @@ -284,20 +295,32 @@ mod tests { ]; let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 2); - let (collected_accounts, transactions) = - collect_accounts_to_store(&txs, &mut processing_results, &DurableNonce::default(), 0); - assert_eq!(collected_accounts.len(), 2); - assert!(collected_accounts - .iter() - .any(|(pubkey, _account)| *pubkey == &keypair0.pubkey())); - assert!(collected_accounts - .iter() - .any(|(pubkey, _account)| *pubkey == &keypair1.pubkey())); - - let transactions = transactions.unwrap(); - assert_eq!(transactions.len(), 2); - assert!(transactions.iter().any(|txn| (*txn).eq(&tx0))); - assert!(transactions.iter().any(|txn| (*txn).eq(&tx1))); + + for collect_transactions in [false, true] { + let (collected_accounts, transactions) = collect_accounts_to_store( + &txs, + &mut processing_results, + &DurableNonce::default(), + 0, + collect_transactions, + ); + assert_eq!(collected_accounts.len(), 2); + assert!(collected_accounts + .iter() + .any(|(pubkey, _account)| *pubkey == &keypair0.pubkey())); + assert!(collected_accounts + .iter() + .any(|(pubkey, _account)| *pubkey == &keypair1.pubkey())); + + if collect_transactions { + let transactions = transactions.unwrap(); + assert_eq!(transactions.len(), 2); + assert!(transactions.iter().any(|txn| (*txn).eq(&tx0))); + assert!(transactions.iter().any(|txn| (*txn).eq(&tx1))); + } else { + assert!(transactions.is_none()); + } + } } #[test] @@ -343,18 +366,33 @@ mod tests { let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 1); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let (collected_accounts, _) = - collect_accounts_to_store(&txs, &mut processing_results, &durable_nonce, 0); - assert_eq!(collected_accounts.len(), 1); - assert_eq!( - collected_accounts - .iter() - .find(|(pubkey, _account)| *pubkey == &from_address) - .map(|(_pubkey, account)| *account) - .cloned() - .unwrap(), - from_account_pre, - ); + + for collect_transactions in [false, true] { + let (collected_accounts, transactions) = collect_accounts_to_store( + &txs, + &mut processing_results, + &durable_nonce, + 0, + collect_transactions, + ); + assert_eq!(collected_accounts.len(), 1); + assert_eq!( + collected_accounts + .iter() + .find(|(pubkey, _account)| *pubkey == &from_address) + .map(|(_pubkey, account)| *account) + .cloned() + .unwrap(), + from_account_pre, + ); + + if collect_transactions { + let transactions = transactions.unwrap(); + assert_eq!(transactions.len(), collected_accounts.len()); + } else { + assert!(transactions.is_none()); + } + } } #[test] @@ -428,33 +466,48 @@ mod tests { )]; let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 2); - let (collected_accounts, _) = - collect_accounts_to_store(&txs, &mut processing_results, &durable_nonce, 0); - assert_eq!(collected_accounts.len(), 2); - assert_eq!( - collected_accounts + + for collect_transactions in [false, true] { + let (collected_accounts, transactions) = collect_accounts_to_store( + &txs, + &mut processing_results, + &durable_nonce, + 0, + collect_transactions, + ); + assert_eq!(collected_accounts.len(), 2); + assert_eq!( + collected_accounts + .iter() + .find(|(pubkey, _account)| *pubkey == &from_address) + .map(|(_pubkey, account)| *account) + .cloned() + .unwrap(), + from_account_pre, + ); + let collected_nonce_account = collected_accounts .iter() - .find(|(pubkey, _account)| *pubkey == &from_address) + .find(|(pubkey, _account)| *pubkey == &nonce_address) .map(|(_pubkey, account)| *account) .cloned() - .unwrap(), - from_account_pre, - ); - let collected_nonce_account = collected_accounts - .iter() - .find(|(pubkey, _account)| *pubkey == &nonce_address) - .map(|(_pubkey, account)| *account) - .cloned() - .unwrap(); - assert_eq!( - collected_nonce_account.lamports(), - nonce_account_pre.lamports(), - ); - assert!(nonce_account::verify_nonce_account( - &collected_nonce_account, - durable_nonce.as_hash() - ) - .is_some()); + .unwrap(); + assert_eq!( + collected_nonce_account.lamports(), + nonce_account_pre.lamports(), + ); + assert!(nonce_account::verify_nonce_account( + &collected_nonce_account, + durable_nonce.as_hash() + ) + .is_some()); + + if collect_transactions { + let transactions = transactions.unwrap(); + assert_eq!(transactions.len(), collected_accounts.len()); + } else { + assert!(transactions.is_none()); + } + } } #[test] @@ -526,24 +579,39 @@ mod tests { )]; let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 1); - let (collected_accounts, _) = - collect_accounts_to_store(&txs, &mut processing_results, &durable_nonce, 0); - assert_eq!(collected_accounts.len(), 1); - let collected_nonce_account = collected_accounts - .iter() - .find(|(pubkey, _account)| *pubkey == &nonce_address) - .map(|(_pubkey, account)| *account) - .cloned() - .unwrap(); - assert_eq!( - collected_nonce_account.lamports(), - nonce_account_pre.lamports() - ); - assert!(nonce_account::verify_nonce_account( - &collected_nonce_account, - durable_nonce.as_hash() - ) - .is_some()); + + for collect_transactions in [false, true] { + let (collected_accounts, transactions) = collect_accounts_to_store( + &txs, + &mut processing_results, + &durable_nonce, + 0, + collect_transactions, + ); + assert_eq!(collected_accounts.len(), 1); + let collected_nonce_account = collected_accounts + .iter() + .find(|(pubkey, _account)| *pubkey == &nonce_address) + .map(|(_pubkey, account)| *account) + .cloned() + .unwrap(); + assert_eq!( + collected_nonce_account.lamports(), + nonce_account_pre.lamports() + ); + assert!(nonce_account::verify_nonce_account( + &collected_nonce_account, + durable_nonce.as_hash() + ) + .is_some()); + + if collect_transactions { + let transactions = transactions.unwrap(); + assert_eq!(transactions.len(), collected_accounts.len()); + } else { + assert!(transactions.is_none()); + } + } } #[test] @@ -572,17 +640,32 @@ mod tests { let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 1); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let (collected_accounts, _) = - collect_accounts_to_store(&txs, &mut processing_results, &durable_nonce, 0); - assert_eq!(collected_accounts.len(), 1); - assert_eq!( - collected_accounts - .iter() - .find(|(pubkey, _account)| *pubkey == &from_address) - .map(|(_pubkey, account)| *account) - .cloned() - .unwrap(), - from_account_pre, - ); + + for collect_transactions in [false, true] { + let (collected_accounts, transactions) = collect_accounts_to_store( + &txs, + &mut processing_results, + &durable_nonce, + 0, + collect_transactions, + ); + assert_eq!(collected_accounts.len(), 1); + assert_eq!( + collected_accounts + .iter() + .find(|(pubkey, _account)| *pubkey == &from_address) + .map(|(_pubkey, account)| *account) + .cloned() + .unwrap(), + from_account_pre, + ); + + if collect_transactions { + let transactions = transactions.unwrap(); + assert_eq!(transactions.len(), collected_accounts.len()); + } else { + assert!(transactions.is_none()); + } + } } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 82c3985211220b..841b09941873d8 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3804,6 +3804,7 @@ impl Bank { &mut processing_results, &durable_nonce, lamports_per_signature, + self.accounts().accounts_db.has_accounts_update_notifier(), ); self.rc.accounts.store_cached( (self.slot(), accounts_to_store.as_slice()), From 5a8c326ab82c0acdbd7491063daebc2a8e8f6dc3 Mon Sep 17 00:00:00 2001 From: dmakarov Date: Tue, 3 Sep 2024 12:04:45 -0400 Subject: [PATCH 281/529] Minor cleanup in construct_candidate_clean_keys (#2822) --- accounts-db/src/accounts_db.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f358331b69021f..787491c32181e0 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3132,10 +3132,10 @@ impl AccountsDb { .take(num_bins) .collect(); - let insert_pubkey = |pubkey: Pubkey| { - let index = self.accounts_index.bin_calculator.bin_from_pubkey(&pubkey); + let insert_pubkey = |pubkey: &Pubkey| { + let index = self.accounts_index.bin_calculator.bin_from_pubkey(pubkey); let mut candidates_bin = candidates[index].write().unwrap(); - candidates_bin.insert(pubkey, CleaningInfo::default()); + candidates_bin.insert(*pubkey, CleaningInfo::default()); }; let dirty_ancient_stores = AtomicUsize::default(); let mut dirty_store_routine = || { @@ -3149,9 +3149,7 @@ impl AccountsDb { dirty_ancient_stores.fetch_add(1, Ordering::Relaxed); } oldest_dirty_slot = oldest_dirty_slot.min(*slot); - store.accounts.scan_pubkeys(|key| { - insert_pubkey(*key); - }); + store.accounts.scan_pubkeys(insert_pubkey); }); oldest_dirty_slot }) @@ -3190,7 +3188,7 @@ impl AccountsDb { self.thread_pool_clean.install(|| { delta_keys.par_iter().for_each(|keys| { for key in keys { - insert_pubkey(*key); + insert_pubkey(key); } }); }); @@ -3212,7 +3210,7 @@ impl AccountsDb { let is_candidate_for_clean = max_slot_inclusive >= *slot && latest_full_snapshot_slot >= *slot; if is_candidate_for_clean { - insert_pubkey(*pubkey); + insert_pubkey(pubkey); } !is_candidate_for_clean }); From 22b823c1c019e30f6bd2586bb513abc2f3cc9e1b Mon Sep 17 00:00:00 2001 From: Tyera Date: Tue, 3 Sep 2024 10:31:30 -0600 Subject: [PATCH 282/529] Simplify RPC partitioned_epoch_reward_enabled determination (#2802) * Check num_reward_partitions for partitioned_epoch_reward_enabled * Make expect slightly more correct * Better variable name --- rpc/src/rpc.rs | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 76ad5119f9844c..f0294540a5e427 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -610,24 +610,6 @@ impl JsonRpcRequestProcessor { // epoch let bank = self.get_bank_with_config(context_config)?; - // DO NOT CLEAN UP with feature_set::partitioned_epoch_rewards_superfeature - // This logic needs to be retained indefinitely to support historical - // rewards before and after feature activation. - let partitioned_epoch_reward_enabled_slot = bank - .feature_set - .activated_slot(&feature_set::partitioned_epoch_rewards_superfeature::id()) - .or_else(|| { - // The order of these checks should not matter, since we will - // not ever have both features active on a live cluster. This - // check can be removed with - // feature_set::enable_partitioned_epoch_reward - bank.feature_set - .activated_slot(&feature_set::enable_partitioned_epoch_reward::id()) - }); - let partitioned_epoch_reward_enabled = partitioned_epoch_reward_enabled_slot - .map(|slot| slot <= first_confirmed_block_in_epoch) - .unwrap_or(false); - // Get first block in the epoch let Ok(Some(epoch_boundary_block)) = self .get_block( @@ -656,6 +638,8 @@ impl JsonRpcRequestProcessor { .into()); } + let epoch_has_partitioned_rewards = epoch_boundary_block.num_reward_partitions.is_some(); + // Collect rewards from first block in the epoch if partitioned epoch // rewards not enabled, or address is a vote account let mut reward_map: HashMap = { @@ -667,17 +651,17 @@ impl JsonRpcRequestProcessor { &addresses, &|reward_type| -> bool { reward_type == RewardType::Voting - || (!partitioned_epoch_reward_enabled && reward_type == RewardType::Staking) + || (!epoch_has_partitioned_rewards && reward_type == RewardType::Staking) }, ) }; // Append stake account rewards from partitions if partitions epoch // rewards is enabled - if partitioned_epoch_reward_enabled { + if epoch_has_partitioned_rewards { let num_partitions = epoch_boundary_block.num_reward_partitions.expect( - "epoch-boundary block should have num_reward_partitions after partitioned epoch \ - rewards enabled", + "epoch-boundary block should have num_reward_partitions for epochs with \ + partitioned rewards enabled", ); let num_partitions = usize::try_from(num_partitions) From a9ac3f55fcb2bc735db0d251eda89897a5dbaaaa Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Tue, 3 Sep 2024 14:00:12 -0500 Subject: [PATCH 283/529] Add workflow and script to add security-incident-response to GHSAs (#2764) * Add workflow and script to add security-incident-response to GHSAs that don't already have it * Remove echos to minimize the risk of leaking sensitive information * Remove whitespace * Switch ref to master and remove fetch-depth: 0 * Redirect gh api output to /dev/null * shellcheck * Disable SC2086 in gh call * Update workflow with runs-on: ubuntu-24.04 in order to get jq 1.7 --- .github/scripts/add-team-to-ghsa.sh | 36 ++++++++++++++++++++++++++ .github/workflows/add-team-to-ghsa.yml | 21 +++++++++++++++ 2 files changed, 57 insertions(+) create mode 100755 .github/scripts/add-team-to-ghsa.sh create mode 100644 .github/workflows/add-team-to-ghsa.yml diff --git a/.github/scripts/add-team-to-ghsa.sh b/.github/scripts/add-team-to-ghsa.sh new file mode 100755 index 00000000000000..41c1a787e85044 --- /dev/null +++ b/.github/scripts/add-team-to-ghsa.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +set -euof pipefail + +team_to_add_slug="security-incident-response" +github_org="anza-xyz" +github_repo="agave" + +# Note: This will get all the GHSAs even if there are more than the per_page value +# from gh api --help +# --paginate Make additional HTTP requests to fetch all pages of results +ghsa_json=$(gh api \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/$github_org/$github_repo/security-advisories?per_page=100 --paginate ) + +# Get a list of GHSAs that don't have the $team_to_add_slug in collaborating_teams +ghsa_without_team=$( jq -r '[ .[] | select(all(.collaborating_teams.[]; .slug != "'"$team_to_add_slug"'")) | .ghsa_id ] | sort | .[] ' <<< "$ghsa_json" ) + +# Iterate through the teams +while IFS= read -r ghsa_id; do + # PATCH updates the value. If we just set -f "collaborating_teams[]=$team_to_add_slug" it + # will overwrite any existing collaborating_teams. So we get the list of teams that are already + # added to this GHSA and format them as parameters for gh api like: + # -f collaborating_teams[]=ghsa-testing-1 + original_collaborating_team_slugs=$( jq -r '[ .[] | select(.ghsa_id == "'"$ghsa_id"'") | .collaborating_teams ] | "-f collaborating_teams[]=" + .[][].slug ' <<< "$ghsa_json" ) + + # Update the team list + # shellcheck disable=SC2086 + gh api \ + --method PATCH \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "/repos/$github_org/$github_repo/security-advisories/$ghsa_id" \ + -f "collaborating_teams[]=$team_to_add_slug" $original_collaborating_team_slugs \ + > /dev/null 2>&1 +done <<< "$ghsa_without_team" diff --git a/.github/workflows/add-team-to-ghsa.yml b/.github/workflows/add-team-to-ghsa.yml new file mode 100644 index 00000000000000..ea70d5870bf582 --- /dev/null +++ b/.github/workflows/add-team-to-ghsa.yml @@ -0,0 +1,21 @@ +name: Add Security Team to GHSAs + +on: + workflow_dispatch: + schedule: + - cron: "0 * * * *" + +jobs: + add-team-to-ghsa: + runs-on: ubuntu-24.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: master + - name: Run script + shell: bash + env: + GH_TOKEN: ${{ secrets.GHSA_ADD_SECURITY_INCIDENT_RESPONSE }} + run: | + .github/scripts/add-team-to-ghsa.sh From 738fbd8469e363ea2333f8410742a4dfe89f7928 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Tue, 3 Sep 2024 17:51:07 -0500 Subject: [PATCH 284/529] stop requesting entry on scan (#2805) --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 787491c32181e0..aa98b65e3aba72 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4024,7 +4024,7 @@ impl AccountsDb { result }, None, - true, + false, self.scan_filter_for_shrinking, ); assert_eq!(index, std::cmp::min(accounts.len(), count)); From b5432b001d485d36e55ba60749411b567c4a383d Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Wed, 4 Sep 2024 18:20:16 +0900 Subject: [PATCH 285/529] [zk-sdk] Add fixed string tests for proofs (#2814) * refactor `impl_from_str` and `impl_from_bytes` macros * impl `Display`, `FromStr`, `From<[u8; _]>` for sigma and range proof pod types * impl `Display` for Pedersen commitment and grouped ElGamal ciphertext pod types * impl `Display`, `FromStr`, `From<[u8; _]>` for decrypt handle pod type * add fixed proof string tests * cargo clippy * remove println's * update `assert!` and `.is_ok()` with `unwrap`'s --- zk-sdk/src/encryption/pod/auth_encryption.rs | 4 +- zk-sdk/src/encryption/pod/elgamal.rs | 35 +++- zk-sdk/src/encryption/pod/grouped_elgamal.rs | 18 +- zk-sdk/src/encryption/pod/mod.rs | 35 ---- zk-sdk/src/encryption/pod/pedersen.rs | 20 +- zk-sdk/src/pod.rs | 35 ++++ zk-sdk/src/range_proof/mod.rs | 51 ++++- zk-sdk/src/range_proof/pod.rs | 55 +++++- .../handles_2.rs | 75 +++++++- .../handles_3.rs | 90 ++++++++- .../ciphertext_ciphertext_equality.rs | 50 ++++- .../ciphertext_commitment_equality.rs | 62 ++++-- .../grouped_ciphertext_validity/handles_2.rs | 65 ++++++- .../grouped_ciphertext_validity/handles_3.rs | 75 +++++++- .../src/sigma_proofs/percentage_with_cap.rs | 57 +++++- zk-sdk/src/sigma_proofs/pod.rs | 178 +++++++++++++++++- zk-sdk/src/sigma_proofs/pubkey_validity.rs | 27 ++- zk-sdk/src/sigma_proofs/zero_ciphertext.rs | 44 ++++- 18 files changed, 849 insertions(+), 127 deletions(-) diff --git a/zk-sdk/src/encryption/pod/auth_encryption.rs b/zk-sdk/src/encryption/pod/auth_encryption.rs index d18e82f6b5e60c..8496e771155cc4 100644 --- a/zk-sdk/src/encryption/pod/auth_encryption.rs +++ b/zk-sdk/src/encryption/pod/auth_encryption.rs @@ -3,9 +3,9 @@ #[cfg(not(target_os = "solana"))] use crate::{encryption::auth_encryption::AeCiphertext, errors::AuthenticatedEncryptionError}; use { - crate::encryption::{ + crate::{ + encryption::AE_CIPHERTEXT_LEN, pod::{impl_from_bytes, impl_from_str}, - AE_CIPHERTEXT_LEN, }, base64::{prelude::BASE64_STANDARD, Engine}, bytemuck::{Pod, Zeroable}, diff --git a/zk-sdk/src/encryption/pod/elgamal.rs b/zk-sdk/src/encryption/pod/elgamal.rs index 10874fe4cea118..6b30f27a127e3a 100644 --- a/zk-sdk/src/encryption/pod/elgamal.rs +++ b/zk-sdk/src/encryption/pod/elgamal.rs @@ -1,14 +1,5 @@ //! Plain Old Data types for the ElGamal encryption scheme. -use { - crate::encryption::{ - pod::{impl_from_bytes, impl_from_str}, - DECRYPT_HANDLE_LEN, ELGAMAL_CIPHERTEXT_LEN, ELGAMAL_PUBKEY_LEN, - }, - base64::{prelude::BASE64_STANDARD, Engine}, - bytemuck::Zeroable, - std::fmt, -}; #[cfg(not(target_os = "solana"))] use { crate::{ @@ -17,6 +8,15 @@ use { }, curve25519_dalek::ristretto::CompressedRistretto, }; +use { + crate::{ + encryption::{DECRYPT_HANDLE_LEN, ELGAMAL_CIPHERTEXT_LEN, ELGAMAL_PUBKEY_LEN}, + pod::{impl_from_bytes, impl_from_str}, + }, + base64::{prelude::BASE64_STANDARD, Engine}, + bytemuck::Zeroable, + std::fmt, +}; /// Maximum length of a base64 encoded ElGamal public key const ELGAMAL_PUBKEY_MAX_BASE64_LEN: usize = 44; @@ -24,6 +24,9 @@ const ELGAMAL_PUBKEY_MAX_BASE64_LEN: usize = 44; /// Maximum length of a base64 encoded ElGamal ciphertext const ELGAMAL_CIPHERTEXT_MAX_BASE64_LEN: usize = 88; +/// Maximum length of a base64 encoded ElGamal decrypt handle +const DECRYPT_HANDLE_MAX_BASE64_LEN: usize = 44; + /// The `ElGamalCiphertext` type as a `Pod`. #[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] @@ -150,6 +153,20 @@ impl TryFrom for DecryptHandle { } } +impl fmt::Display for PodDecryptHandle { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodDecryptHandle, + BYTES_LEN = DECRYPT_HANDLE_LEN, + BASE64_LEN = DECRYPT_HANDLE_MAX_BASE64_LEN +); + +impl_from_bytes!(TYPE = PodDecryptHandle, BYTES_LEN = DECRYPT_HANDLE_LEN); + #[cfg(test)] mod tests { use {super::*, crate::encryption::elgamal::ElGamalKeypair, std::str::FromStr}; diff --git a/zk-sdk/src/encryption/pod/grouped_elgamal.rs b/zk-sdk/src/encryption/pod/grouped_elgamal.rs index 7d437bc2bc6933..3a6dc48cd6cd11 100644 --- a/zk-sdk/src/encryption/pod/grouped_elgamal.rs +++ b/zk-sdk/src/encryption/pod/grouped_elgamal.rs @@ -5,13 +5,11 @@ use crate::encryption::grouped_elgamal::GroupedElGamalCiphertext; use { crate::{ encryption::{ - pod::{ - elgamal::PodElGamalCiphertext, impl_from_bytes, impl_from_str, - pedersen::PodPedersenCommitment, - }, + pod::{elgamal::PodElGamalCiphertext, pedersen::PodPedersenCommitment}, DECRYPT_HANDLE_LEN, ELGAMAL_CIPHERTEXT_LEN, PEDERSEN_COMMITMENT_LEN, }, errors::ElGamalError, + pod::{impl_from_bytes, impl_from_str}, }, base64::{prelude::BASE64_STANDARD, Engine}, bytemuck::Zeroable, @@ -89,6 +87,12 @@ impl Default for PodGroupedElGamalCiphertext2Handles { } } +impl fmt::Display for PodGroupedElGamalCiphertext2Handles { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + impl_from_str!( TYPE = PodGroupedElGamalCiphertext2Handles, BYTES_LEN = GROUPED_ELGAMAL_CIPHERTEXT_2_HANDLES, @@ -137,6 +141,12 @@ impl Default for PodGroupedElGamalCiphertext3Handles { } } +impl fmt::Display for PodGroupedElGamalCiphertext3Handles { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + impl_from_str!( TYPE = PodGroupedElGamalCiphertext3Handles, BYTES_LEN = GROUPED_ELGAMAL_CIPHERTEXT_3_HANDLES, diff --git a/zk-sdk/src/encryption/pod/mod.rs b/zk-sdk/src/encryption/pod/mod.rs index 928f657b939317..90305a4464c452 100644 --- a/zk-sdk/src/encryption/pod/mod.rs +++ b/zk-sdk/src/encryption/pod/mod.rs @@ -2,38 +2,3 @@ pub mod auth_encryption; pub mod elgamal; pub mod grouped_elgamal; pub mod pedersen; - -macro_rules! impl_from_str { - (TYPE = $type:ident, BYTES_LEN = $bytes_len:expr, BASE64_LEN = $base64_len:expr) => { - impl std::str::FromStr for $type { - type Err = crate::errors::ParseError; - - fn from_str(s: &str) -> Result { - if s.len() > $base64_len { - return Err(Self::Err::WrongSize); - } - let mut bytes = [0u8; $bytes_len]; - let decoded_len = BASE64_STANDARD - .decode_slice(s, &mut bytes) - .map_err(|_| Self::Err::Invalid)?; - if decoded_len != $bytes_len { - Err(Self::Err::WrongSize) - } else { - Ok($type(bytes)) - } - } - } - }; -} -pub(crate) use impl_from_str; - -macro_rules! impl_from_bytes { - (TYPE = $type:ident, BYTES_LEN = $bytes_len:expr) => { - impl std::convert::From<[u8; $bytes_len]> for $type { - fn from(bytes: [u8; $bytes_len]) -> Self { - Self(bytes) - } - } - }; -} -pub(crate) use impl_from_bytes; diff --git a/zk-sdk/src/encryption/pod/pedersen.rs b/zk-sdk/src/encryption/pod/pedersen.rs index aecac312b78090..96d2154f19abfb 100644 --- a/zk-sdk/src/encryption/pod/pedersen.rs +++ b/zk-sdk/src/encryption/pod/pedersen.rs @@ -1,19 +1,19 @@ //! Plain Old Data type for the Pedersen commitment scheme. +#[cfg(not(target_os = "solana"))] use { - crate::encryption::{ + crate::{encryption::pedersen::PedersenCommitment, errors::ElGamalError}, + curve25519_dalek::ristretto::CompressedRistretto, +}; +use { + crate::{ + encryption::PEDERSEN_COMMITMENT_LEN, pod::{impl_from_bytes, impl_from_str}, - PEDERSEN_COMMITMENT_LEN, }, base64::{prelude::BASE64_STANDARD, Engine}, bytemuck_derive::{Pod, Zeroable}, std::fmt, }; -#[cfg(not(target_os = "solana"))] -use { - crate::{encryption::pedersen::PedersenCommitment, errors::ElGamalError}, - curve25519_dalek::ristretto::CompressedRistretto, -}; /// Maximum length of a base64 encoded ElGamal public key const PEDERSEN_COMMITMENT_MAX_BASE64_LEN: usize = 44; @@ -36,6 +36,12 @@ impl From for PodPedersenCommitment { } } +impl fmt::Display for PodPedersenCommitment { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + impl_from_str!( TYPE = PodPedersenCommitment, BYTES_LEN = PEDERSEN_COMMITMENT_LEN, diff --git a/zk-sdk/src/pod.rs b/zk-sdk/src/pod.rs index 2240b5c1ebe375..d939e4735bda7d 100644 --- a/zk-sdk/src/pod.rs +++ b/zk-sdk/src/pod.rs @@ -27,3 +27,38 @@ impl From for u64 { Self::from_le_bytes(pod.0) } } + +macro_rules! impl_from_str { + (TYPE = $type:ident, BYTES_LEN = $bytes_len:expr, BASE64_LEN = $base64_len:expr) => { + impl std::str::FromStr for $type { + type Err = crate::errors::ParseError; + + fn from_str(s: &str) -> Result { + if s.len() > $base64_len { + return Err(Self::Err::WrongSize); + } + let mut bytes = [0u8; $bytes_len]; + let decoded_len = BASE64_STANDARD + .decode_slice(s, &mut bytes) + .map_err(|_| Self::Err::Invalid)?; + if decoded_len != $bytes_len { + Err(Self::Err::WrongSize) + } else { + Ok($type(bytes)) + } + } + } + }; +} +pub(crate) use impl_from_str; + +macro_rules! impl_from_bytes { + (TYPE = $type:ident, BYTES_LEN = $bytes_len:expr) => { + impl std::convert::From<[u8; $bytes_len]> for $type { + fn from(bytes: [u8; $bytes_len]) -> Self { + Self(bytes) + } + } + }; +} +pub(crate) use impl_from_bytes; diff --git a/zk-sdk/src/range_proof/mod.rs b/zk-sdk/src/range_proof/mod.rs index fb2431dfa1d5ae..4806f91d9b757a 100644 --- a/zk-sdk/src/range_proof/mod.rs +++ b/zk-sdk/src/range_proof/mod.rs @@ -55,21 +55,21 @@ pub const INNER_PRODUCT_PROOF_U64_LEN: usize = 448; /// Byte length of a range proof for an unsigned 64-bit number pub const RANGE_PROOF_U64_LEN: usize = - INNER_PRODUCT_PROOF_U64_LEN + RANGE_PROOF_MODULO_INNER_PRODUCT_PROOF_LEN; + INNER_PRODUCT_PROOF_U64_LEN + RANGE_PROOF_MODULO_INNER_PRODUCT_PROOF_LEN; // 672 bytes /// Byte length of an inner-product proof for a vector of length 128 pub const INNER_PRODUCT_PROOF_U128_LEN: usize = 512; /// Byte length of a range proof for an unsigned 128-bit number pub const RANGE_PROOF_U128_LEN: usize = - INNER_PRODUCT_PROOF_U128_LEN + RANGE_PROOF_MODULO_INNER_PRODUCT_PROOF_LEN; + INNER_PRODUCT_PROOF_U128_LEN + RANGE_PROOF_MODULO_INNER_PRODUCT_PROOF_LEN; // 736 bytes /// Byte length of an inner-product proof for a vector of length 256 pub const INNER_PRODUCT_PROOF_U256_LEN: usize = 576; /// Byte length of a range proof for an unsigned 256-bit number pub const RANGE_PROOF_U256_LEN: usize = - INNER_PRODUCT_PROOF_U256_LEN + RANGE_PROOF_MODULO_INNER_PRODUCT_PROOF_LEN; + INNER_PRODUCT_PROOF_U256_LEN + RANGE_PROOF_MODULO_INNER_PRODUCT_PROOF_LEN; // 800 bytes #[allow(non_snake_case)] #[cfg(not(target_os = "solana"))] @@ -466,7 +466,13 @@ fn delta(bit_lengths: &[usize], y: &Scalar, z: &Scalar) -> Scalar { #[cfg(test)] mod tests { - use super::*; + use { + super::*, + crate::{ + encryption::pod::pedersen::PodPedersenCommitment, range_proof::pod::PodRangeProofU128, + }, + std::str::FromStr, + }; #[test] fn test_single_rangeproof() { @@ -478,9 +484,9 @@ mod tests { let proof = RangeProof::new(vec![55], vec![32], vec![&open], &mut transcript_create).unwrap(); - assert!(proof + proof .verify(vec![&comm], vec![32], &mut transcript_verify) - .is_ok()); + .unwrap(); } #[test] @@ -500,14 +506,41 @@ mod tests { ) .unwrap(); - assert!(proof + proof .verify( vec![&comm_1, &comm_2, &comm_3], vec![64, 32, 32], &mut transcript_verify, ) - .is_ok()); + .unwrap(); } - // TODO: write test for serialization/deserialization + #[test] + fn test_range_proof_string() { + let commitment_1_str = "dDaa/MTEDlyI0Nxx+iu1tOteZsTWmPXAfn9QI0W9mSc="; + let pod_commitment_1 = PodPedersenCommitment::from_str(commitment_1_str).unwrap(); + let commitment_1: PedersenCommitment = pod_commitment_1.try_into().unwrap(); + + let commitment_2_str = "tnRILjKpogi2sXxLgZzMqlqPMLnCJmrSjZ5SPQYhtgg="; + let pod_commitment_2 = PodPedersenCommitment::from_str(commitment_2_str).unwrap(); + let commitment_2: PedersenCommitment = pod_commitment_2.try_into().unwrap(); + + let commitment_3_str = "ZAC5ZLXotsMOVExtrr56D/EZNeyo9iWepNbeH22EuRo="; + let pod_commitment_3 = PodPedersenCommitment::from_str(commitment_3_str).unwrap(); + let commitment_3: PedersenCommitment = pod_commitment_3.try_into().unwrap(); + + let proof_str = "AvvBQL63pXMXsmuvuNbs/CqXdzeyrMpEIO2O/cI6/SyqU4N+7HUU3LmXai9st+DxqTnuKsm0SgnADfpLpQCEbDDupMb09NY8oHT8Bx8WQhv9eyoBlrPRd7DVhOUsio02gBshe3p2Wj7+yDCpFaZ7/PMypFBX6+E+EqCiPI6yUk4ztslWY0Ksac41eJgcPzXyIx2kvmSTsVBKLb7U01PWBC+AUyUmK3/IdvmJ4DnlS3xFrdg/mxSsYJFd3OZA3cwDb0jePQf/P43/2VVqPRixMVO7+VGoMKPoRTEEVbClsAlW6stGTFPcrimu3c+geASgvwElkIKNGtYcjoj3SS+/VeqIG9Ei1j+TJtPhOE9SG4KNw9xBGwecpliDbQhKjO950EVcnOts+a525/frZV1jHJmOOrZtKRV4pvk37dtQkx4sv+pxRmfVrjwOcKQeg+BzcuF0vaQbqa4SUbzbO9z3RwIMlYIBaz0bqZgJmtPOFuFmNyCJaeB29vlcEAfYbn5gdlgtWP50tKmhoskndulziKZjz4qHSA9rbG2ZtoMHoCsAobHKu2H9OxcaK4Scj1QGwst+zXBEY8uePNbxvU5DMJLVFORtLUXkVdPCmCSsm1Bz4TRbnls8LOVW6wqTgShQMhjNM3RtwdHXENPn5uDnhyvfduAcL+DtI8AIJyRneROefk7i7gjal8dLdMM/QnXT7ctpMQU6uNlpsNzq65xlOQKXO71vQ3c2mE/DmxVJi6BTS5WCzavvhiqdhQyRL61ESCALQpaP0/d0DLwLikVH3ypuDLEnVXe9Pmkxdd0xCzO6QcfyK50CPnV/dVgHeLg8EVag2O83+/7Ys5oLxrDad9TJTDcrT2xsRqECFnSA+z9uZtDPujhQL0ogS5RH4agnQN4mVGTwOLV8OKpn+AvWq6+j1/9EXFkLPBTU5wT0FQuT2VZ8xp5GeqdI13Zey1uPrxc6CZZ407y9OINED4IdBQ=="; + let pod_proof = PodRangeProofU128::from_str(proof_str).unwrap(); + let proof: RangeProof = pod_proof.try_into().unwrap(); + + let mut transcript_verify = Transcript::new(b"Test"); + + proof + .verify( + vec![&commitment_1, &commitment_2, &commitment_3], + vec![64, 32, 32], + &mut transcript_verify, + ) + .unwrap() + } } diff --git a/zk-sdk/src/range_proof/pod.rs b/zk-sdk/src/range_proof/pod.rs index fbee03bfeb27c6..28604080646b3f 100644 --- a/zk-sdk/src/range_proof/pod.rs +++ b/zk-sdk/src/range_proof/pod.rs @@ -6,8 +6,13 @@ use crate::{ UNIT_LEN, }; use { - crate::range_proof::*, + crate::{ + pod::{impl_from_bytes, impl_from_str}, + range_proof::*, + }, + base64::{prelude::BASE64_STANDARD, Engine}, bytemuck::{Pod, Zeroable}, + std::fmt, }; /// The `RangeProof` type as a `Pod` restricted to proofs on 64-bit numbers. @@ -41,6 +46,22 @@ impl TryFrom for RangeProof { } } +const RANGE_PROOF_U64_MAX_BASE64_LEN: usize = 896; + +impl fmt::Display for PodRangeProofU64 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodRangeProofU64, + BYTES_LEN = RANGE_PROOF_U64_LEN, + BASE64_LEN = RANGE_PROOF_U64_MAX_BASE64_LEN +); + +impl_from_bytes!(TYPE = PodRangeProofU64, BYTES_LEN = RANGE_PROOF_U64_LEN); + /// The `RangeProof` type as a `Pod` restricted to proofs on 128-bit numbers. #[derive(Clone, Copy)] #[repr(transparent)] @@ -72,6 +93,22 @@ impl TryFrom for RangeProof { } } +const RANGE_PROOF_U128_MAX_BASE64_LEN: usize = 984; + +impl fmt::Display for PodRangeProofU128 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodRangeProofU128, + BYTES_LEN = RANGE_PROOF_U128_LEN, + BASE64_LEN = RANGE_PROOF_U128_MAX_BASE64_LEN +); + +impl_from_bytes!(TYPE = PodRangeProofU128, BYTES_LEN = RANGE_PROOF_U128_LEN); + /// The `RangeProof` type as a `Pod` restricted to proofs on 256-bit numbers. #[derive(Clone, Copy)] #[repr(transparent)] @@ -103,6 +140,22 @@ impl TryFrom for RangeProof { } } +const RANGE_PROOF_U256_MAX_BASE64_LEN: usize = 1068; + +impl fmt::Display for PodRangeProofU256 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodRangeProofU256, + BYTES_LEN = RANGE_PROOF_U256_LEN, + BASE64_LEN = RANGE_PROOF_U256_MAX_BASE64_LEN +); + +impl_from_bytes!(TYPE = PodRangeProofU256, BYTES_LEN = RANGE_PROOF_U256_LEN); + #[cfg(not(target_os = "solana"))] fn copy_range_proof_modulo_inner_product_proof(proof: &RangeProof, buf: &mut [u8]) { let mut chunks = buf.chunks_mut(UNIT_LEN); diff --git a/zk-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity/handles_2.rs b/zk-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity/handles_2.rs index 2b8f747e38693e..a45ffd2722b4dc 100644 --- a/zk-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity/handles_2.rs +++ b/zk-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity/handles_2.rs @@ -123,11 +123,22 @@ impl BatchedGroupedCiphertext2HandlesValidityProof { mod test { use { super::*, - crate::encryption::{elgamal::ElGamalKeypair, pedersen::Pedersen}, + crate::{ + encryption::{ + elgamal::ElGamalKeypair, + pedersen::Pedersen, + pod::{ + elgamal::{PodDecryptHandle, PodElGamalPubkey}, + pedersen::PodPedersenCommitment, + }, + }, + sigma_proofs::pod::PodBatchedGroupedCiphertext2HandlesValidityProof, + }, + std::str::FromStr, }; #[test] - fn test_batched_grouped_ciphertext_validity_proof() { + fn test_batched_grouped_ciphertext_2_handles_validity_proof() { let first_keypair = ElGamalKeypair::new_rand(); let first_pubkey = first_keypair.pubkey(); @@ -159,7 +170,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( first_pubkey, second_pubkey, @@ -171,6 +182,62 @@ mod test { &second_handle_hi, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); + } + + #[test] + fn test_batched_grouped_ciphertext_2_handles_validity_proof_string() { + let first_pubkey_str = "3FQGicS6AgVkRnX5Sau8ybxJDvlehmbdvBUdo+o+oE4="; + let pod_first_pubkey = PodElGamalPubkey::from_str(first_pubkey_str).unwrap(); + let first_pubkey: ElGamalPubkey = pod_first_pubkey.try_into().unwrap(); + + let second_pubkey_str = "IieU/fJCRksbDNvIJZvg/N/safpnIWAGT/xpUAG7YUg="; + let pod_second_pubkey = PodElGamalPubkey::from_str(second_pubkey_str).unwrap(); + let second_pubkey: ElGamalPubkey = pod_second_pubkey.try_into().unwrap(); + + let commitment_lo_str = "Lq0z7bx3ccyxIB0rRHoWzcba8W1azvAhMfnJogxcz2I="; + let pod_commitment_lo = PodPedersenCommitment::from_str(commitment_lo_str).unwrap(); + let commitment_lo: PedersenCommitment = pod_commitment_lo.try_into().unwrap(); + + let commitment_hi_str = "dLPLdQrcl5ZWb0EaJcmebAlJA6RrzKpMSYPDVMJdOm0="; + let pod_commitment_hi = PodPedersenCommitment::from_str(commitment_hi_str).unwrap(); + let commitment_hi: PedersenCommitment = pod_commitment_hi.try_into().unwrap(); + + let first_handle_lo_str = "GizvHRUmu6CMjhH7qWg5Rqu43V69Nyjq4QsN/yXBHT8="; + let pod_first_handle_lo_str = PodDecryptHandle::from_str(first_handle_lo_str).unwrap(); + let first_handle_lo: DecryptHandle = pod_first_handle_lo_str.try_into().unwrap(); + + let first_handle_hi_str = "qMuR929bbkKiVJfRvYxnb90rbh2btjNDjaXpeLCvQWk="; + let pod_first_handle_hi_str = PodDecryptHandle::from_str(first_handle_hi_str).unwrap(); + let first_handle_hi: DecryptHandle = pod_first_handle_hi_str.try_into().unwrap(); + + let second_handle_lo_str = "MmDbMo2l/jAcXUIm09AQZsBXa93lI2BapAiGZ6f9zRs="; + let pod_second_handle_lo_str = PodDecryptHandle::from_str(second_handle_lo_str).unwrap(); + let second_handle_lo: DecryptHandle = pod_second_handle_lo_str.try_into().unwrap(); + + let second_handle_hi_str = "gKhb0o3d22XcUcQl5hENF4l1SJwg1vpgiw2RDYqXOxY="; + let pod_second_handle_hi_str = PodDecryptHandle::from_str(second_handle_hi_str).unwrap(); + let second_handle_hi: DecryptHandle = pod_second_handle_hi_str.try_into().unwrap(); + + let proof_str = "2n2mADpkNrop+eHJj1sAryXWcTtC/7QKcxMp7FdHeh8wjGKLAa9kC89QLGrphv7pZdb2J25kKXqhWUzRBsJWU0izi5vxau9XX6cyd72F3Q9hMXBfjk3htOHI0VnGAalZ/3dZ6C7erjGQDoeTVGOd1vewQ+NObAbfZwcry3+VhQNpkhL17E1dUgZZ+mb5K0tXAjWCmVh1OfN9h3sGltTUCg=="; + let pod_proof = + PodBatchedGroupedCiphertext2HandlesValidityProof::from_str(proof_str).unwrap(); + let proof: BatchedGroupedCiphertext2HandlesValidityProof = pod_proof.try_into().unwrap(); + + let mut verifier_transcript = Transcript::new(b"Test"); + + proof + .verify( + &first_pubkey, + &second_pubkey, + &commitment_lo, + &commitment_hi, + &first_handle_lo, + &first_handle_hi, + &second_handle_lo, + &second_handle_hi, + &mut verifier_transcript, + ) + .unwrap(); } } diff --git a/zk-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity/handles_3.rs b/zk-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity/handles_3.rs index 83dfd2c786c185..62dbf0918fd6d0 100644 --- a/zk-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity/handles_3.rs +++ b/zk-sdk/src/sigma_proofs/batched_grouped_ciphertext_validity/handles_3.rs @@ -133,11 +133,22 @@ impl BatchedGroupedCiphertext3HandlesValidityProof { mod test { use { super::*, - crate::encryption::{elgamal::ElGamalKeypair, pedersen::Pedersen}, + crate::{ + encryption::{ + elgamal::ElGamalKeypair, + pedersen::Pedersen, + pod::{ + elgamal::{PodDecryptHandle, PodElGamalPubkey}, + pedersen::PodPedersenCommitment, + }, + }, + sigma_proofs::pod::PodBatchedGroupedCiphertext3HandlesValidityProof, + }, + std::str::FromStr, }; #[test] - fn test_batched_grouped_ciphertext_validity_proof() { + fn test_batched_grouped_ciphertext_3_handles_validity_proof() { let first_keypair = ElGamalKeypair::new_rand(); let first_pubkey = first_keypair.pubkey(); @@ -176,7 +187,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( first_pubkey, second_pubkey, @@ -191,6 +202,77 @@ mod test { &third_handle_hi, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); + } + + #[test] + fn test_batched_grouped_ciphertext_3_handles_validity_proof_string() { + let first_pubkey_str = "PFQ4AD4W/Y4BEg3nI/qckFLhnjMQ12xPHyaMg9Bkg3w="; + let pod_first_pubkey = PodElGamalPubkey::from_str(first_pubkey_str).unwrap(); + let first_pubkey: ElGamalPubkey = pod_first_pubkey.try_into().unwrap(); + + let second_pubkey_str = "2CZ4h5oK7zh4/3P6s/kCQoNlpUPk1IrsrAtTWjCtfFo="; + let pod_second_pubkey = PodElGamalPubkey::from_str(second_pubkey_str).unwrap(); + let second_pubkey: ElGamalPubkey = pod_second_pubkey.try_into().unwrap(); + + let third_pubkey_str = "yonKhqkoXNvMbN/tU6fjHFhfZuNPpvMj8L55aP2bBG4="; + let pod_third_pubkey = PodElGamalPubkey::from_str(third_pubkey_str).unwrap(); + let third_pubkey: ElGamalPubkey = pod_third_pubkey.try_into().unwrap(); + + let commitment_lo_str = "atIteiveexponnuF2Z1nbovZYYtcGWjglpEA3caMShM="; + let pod_commitment_lo = PodPedersenCommitment::from_str(commitment_lo_str).unwrap(); + let commitment_lo: PedersenCommitment = pod_commitment_lo.try_into().unwrap(); + + let commitment_hi_str = "IoZlSj7spae2ogiAUiEuuwAjYA5khgBH8FhaHzkh+lc="; + let pod_commitment_hi = PodPedersenCommitment::from_str(commitment_hi_str).unwrap(); + let commitment_hi: PedersenCommitment = pod_commitment_hi.try_into().unwrap(); + + let first_handle_lo_str = "6PlKiitdapVZnh7VccQNbskXop9nmITGppLsV42UMkU="; + let pod_first_handle_lo_str = PodDecryptHandle::from_str(first_handle_lo_str).unwrap(); + let first_handle_lo: DecryptHandle = pod_first_handle_lo_str.try_into().unwrap(); + + let first_handle_hi_str = "vF+oZ3WWnrJyJ95Wl8EW+aVJiFmruiuRw6+TT3QVMBI="; + let pod_first_handle_hi_str = PodDecryptHandle::from_str(first_handle_hi_str).unwrap(); + let first_handle_hi: DecryptHandle = pod_first_handle_hi_str.try_into().unwrap(); + + let second_handle_lo_str = "rvxzo5ZyrD6YTm7X3GjplgOGJjx6PtoZ+DKbL4LsQWA="; + let pod_second_handle_lo_str = PodDecryptHandle::from_str(second_handle_lo_str).unwrap(); + let second_handle_lo: DecryptHandle = pod_second_handle_lo_str.try_into().unwrap(); + + let second_handle_hi_str = "0mdZSGiWQhOjqsExqFMD8hfgUlRRRrF/G3CJ7d0LEEk="; + let pod_second_handle_hi_str = PodDecryptHandle::from_str(second_handle_hi_str).unwrap(); + let second_handle_hi: DecryptHandle = pod_second_handle_hi_str.try_into().unwrap(); + + let third_handle_lo_str = "bpT2LuFektFhI/sacjSsqNtCsO8ac5qn0jWeMeQq4WM="; + let pod_third_handle_lo_str = PodDecryptHandle::from_str(third_handle_lo_str).unwrap(); + let third_handle_lo: DecryptHandle = pod_third_handle_lo_str.try_into().unwrap(); + + let third_handle_hi_str = "OE8z7Bbv2AHnjxebK6ASJfkJbOlYQdnN6ZPkG2u4SnA="; + let pod_third_handle_hi_str = PodDecryptHandle::from_str(third_handle_hi_str).unwrap(); + let third_handle_hi: DecryptHandle = pod_third_handle_hi_str.try_into().unwrap(); + + let proof_str = "GkjZ7QKcJq5X/OU8wb26wZ7p2D9thVK+Cb11CzRjWUoihYvGfuCbVG1vr4qtnfx65SS4jVK1H0q/948A9wy8ZPTrOZJA122G4+cpt5mKnSrKq/vbv4ZRha0oR9RGJFZ2SPT3gx2jysKDKRAQgBLOzSGfQg9Hsbz57i55SQfliUF5mByZKuzGKHSIHi81BDqbrFAj6x5bOeMAaLqsCboCA5XGDUZ2HMPUGuAd9F+OaVH+eJZnuoDjwwcBQ2eANgMB"; + let pod_proof = + PodBatchedGroupedCiphertext3HandlesValidityProof::from_str(proof_str).unwrap(); + let proof: BatchedGroupedCiphertext3HandlesValidityProof = pod_proof.try_into().unwrap(); + + let mut verifier_transcript = Transcript::new(b"Test"); + + proof + .verify( + &first_pubkey, + &second_pubkey, + &third_pubkey, + &commitment_lo, + &commitment_hi, + &first_handle_lo, + &first_handle_hi, + &second_handle_lo, + &second_handle_hi, + &third_handle_lo, + &third_handle_hi, + &mut verifier_transcript, + ) + .unwrap(); } } diff --git a/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs b/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs index 319a889b760681..44201e0594760c 100644 --- a/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs +++ b/zk-sdk/src/sigma_proofs/ciphertext_ciphertext_equality.rs @@ -267,7 +267,14 @@ impl CiphertextCiphertextEqualityProof { #[cfg(test)] mod test { - use super::*; + use { + super::*, + crate::{ + encryption::pod::elgamal::{PodElGamalCiphertext, PodElGamalPubkey}, + sigma_proofs::pod::PodCiphertextCiphertextEqualityProof, + }, + std::str::FromStr, + }; #[test] fn test_ciphertext_ciphertext_equality_proof_correctness() { @@ -295,15 +302,15 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( first_keypair.pubkey(), second_keypair.pubkey(), &first_ciphertext, &second_ciphertext, - &mut verifier_transcript + &mut verifier_transcript, ) - .is_ok()); + .unwrap(); // fail case: encrypted and committed messages are different let first_message: u64 = 55; @@ -338,4 +345,39 @@ mod test { ) .is_err()); } + + #[test] + fn test_ciphertext_ciphertext_equality_proof_string() { + let first_pubkey_str = "VOPKaqo4nsX4XnbgGjCKHkLkR6JG1jX9D5G/e0EuYmM="; + let pod_first_pubkey = PodElGamalPubkey::from_str(first_pubkey_str).unwrap(); + let first_pubkey: ElGamalPubkey = pod_first_pubkey.try_into().unwrap(); + + let second_pubkey_str = "JnVhtKo9B7g9c8Obo/5/EqvA59i3TvtuOcQWf17T7SU="; + let pod_second_pubkey = PodElGamalPubkey::from_str(second_pubkey_str).unwrap(); + let second_pubkey: ElGamalPubkey = pod_second_pubkey.try_into().unwrap(); + + let first_ciphertext_str = "oKv6zxN051MXdk2cISD+CUsH2+FINoH1iB4WZyuy6nNkE7Q+eLiY9JB8itJhgKHJEA/1sAzDvpnRlLL06OXvIg=="; + let pod_first_ciphertext = PodElGamalCiphertext::from_str(first_ciphertext_str).unwrap(); + let first_ciphertext: ElGamalCiphertext = pod_first_ciphertext.try_into().unwrap(); + + let second_ciphertext_str = "ooSA2cQDqutgyCBoMiQktM1Cu4NDNEbphF010gjG4iF0iMK1N+u/Qxqk0wwO/+w+5S6RiicwPs4mEKRJpFiHEw=="; + let pod_second_ciphertext = PodElGamalCiphertext::from_str(second_ciphertext_str).unwrap(); + let second_ciphertext: ElGamalCiphertext = pod_second_ciphertext.try_into().unwrap(); + + let proof_str = "MlfRDO4sBPbpciEXci3QfVSLVABAJ0s8wMZ/Uz3AyETmGJ1BUE961fHIiNQXPD0j1uu1Josj//E8loPD1w+4E3bfDBJ3Mp2YqeOv41Bdec02YXlAotTGjq/UfncGdUhyampkuXUmSvnmkf5BIp4nr3X18cR9KHTAzBrKv6erjAxIckyRnACaZGEx+ZboEb3FBEXqTklytT1nrebbwkjvDUWbcpZrE+xxBWYek3qeq1x1debzxVhtS2yx44cvR5UIGLzGYa2ec/xh7wvyNEbnX80rZju2dztr4bN5f2vrTgk="; + let pod_proof = PodCiphertextCiphertextEqualityProof::from_str(proof_str).unwrap(); + let proof: CiphertextCiphertextEqualityProof = pod_proof.try_into().unwrap(); + + let mut verifier_transcript = Transcript::new(b"Test"); + + proof + .verify( + &first_pubkey, + &second_pubkey, + &first_ciphertext, + &second_ciphertext, + &mut verifier_transcript, + ) + .unwrap(); + } } diff --git a/zk-sdk/src/sigma_proofs/ciphertext_commitment_equality.rs b/zk-sdk/src/sigma_proofs/ciphertext_commitment_equality.rs index 3f5fc58e3e2799..56daefffd69d27 100644 --- a/zk-sdk/src/sigma_proofs/ciphertext_commitment_equality.rs +++ b/zk-sdk/src/sigma_proofs/ciphertext_commitment_equality.rs @@ -244,7 +244,18 @@ impl CiphertextCommitmentEqualityProof { mod test { use { super::*, - crate::encryption::{elgamal::ElGamalSecretKey, pedersen::Pedersen}, + crate::{ + encryption::{ + elgamal::ElGamalSecretKey, + pedersen::Pedersen, + pod::{ + elgamal::{PodElGamalCiphertext, PodElGamalPubkey}, + pedersen::PodPedersenCommitment, + }, + }, + sigma_proofs::pod::PodCiphertextCommitmentEqualityProof, + }, + std::str::FromStr, }; #[test] @@ -267,14 +278,14 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( keypair.pubkey(), &ciphertext, &commitment, - &mut verifier_transcript + &mut verifier_transcript, ) - .is_ok()); + .unwrap(); // fail case: encrypted and committed messages are different let keypair = ElGamalKeypair::new_rand(); @@ -357,14 +368,14 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( elgamal_keypair.pubkey(), &ciphertext, &commitment, - &mut verifier_transcript + &mut verifier_transcript, ) - .is_ok()); + .unwrap(); // if commitment is all-zero and the ciphertext is a correct encryption of 0, then the // proof should still accept @@ -386,14 +397,14 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( elgamal_keypair.pubkey(), &ciphertext, &commitment, - &mut verifier_transcript + &mut verifier_transcript, ) - .is_ok()); + .unwrap(); // if ciphertext is all zero and commitment correctly encodes 0, then the proof should // still accept @@ -414,13 +425,38 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( elgamal_keypair.pubkey(), &ciphertext, &commitment, - &mut verifier_transcript + &mut verifier_transcript, ) - .is_ok()); + .unwrap(); + } + + #[test] + fn test_ciphertext_commitment_equality_proof_string() { + let pubkey_str = "JNa7rRrDm35laU7f8HPds1PmHoZEPSHFK/M+aTtEhAk="; + let pod_pubkey = PodElGamalPubkey::from_str(pubkey_str).unwrap(); + let pubkey: ElGamalPubkey = pod_pubkey.try_into().unwrap(); + + let ciphertext_str = "RAXnbQ/DPRlYAWmD+iHRNqMDv7oQcPgQ7OejRzj4bxVy2qOJNziqqDOC7VP3iTW1+z/jckW4smA3EUF7i/r8Rw=="; + let pod_ciphertext = PodElGamalCiphertext::from_str(ciphertext_str).unwrap(); + let ciphertext: ElGamalCiphertext = pod_ciphertext.try_into().unwrap(); + + let commitment_str = "ngPTYvbY9P5l6aOfr7bLQiI+0HZsw8GBgiumdW3tNzw="; + let pod_commitment = PodPedersenCommitment::from_str(commitment_str).unwrap(); + let commitment: PedersenCommitment = pod_commitment.try_into().unwrap(); + + let proof_str = "cCZySLxB2XJdGyDvckVBm2OWiXqf7Jf54IFoDuLJ4G+ySj+lh5DbaDMHDhuozQC9tDWtk2mFITuaXOc5Zw3nZ2oEvVYpqv5hN+k5dx9k8/nZKabUCkZwx310z7x4fE4Np5SY9PYia1hkrq9AWq0b3v97XvW1+XCSSxuflvBk5wsdaQQ+ZgcmPnKWKjHfRwmU2k5iVgYzs2VmvZa5E3OWBoM/M2yFNvukY+FCC2YMnspO0c4lNBr/vDFQuHdW0OgJ"; + let pod_proof = PodCiphertextCommitmentEqualityProof::from_str(proof_str).unwrap(); + let proof: CiphertextCommitmentEqualityProof = pod_proof.try_into().unwrap(); + + let mut verifier_transcript = Transcript::new(b"Test"); + + proof + .verify(&pubkey, &ciphertext, &commitment, &mut verifier_transcript) + .unwrap(); } } diff --git a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_2.rs b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_2.rs index 688d3cf73d5c88..11f46f61d86d19 100644 --- a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_2.rs +++ b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_2.rs @@ -238,7 +238,18 @@ impl GroupedCiphertext2HandlesValidityProof { mod test { use { super::*, - crate::encryption::{elgamal::ElGamalKeypair, pedersen::Pedersen}, + crate::{ + encryption::{ + elgamal::ElGamalKeypair, + pedersen::Pedersen, + pod::{ + elgamal::{PodDecryptHandle, PodElGamalPubkey}, + pedersen::PodPedersenCommitment, + }, + }, + sigma_proofs::pod::PodGroupedCiphertext2HandlesValidityProof, + }, + std::str::FromStr, }; #[test] @@ -266,7 +277,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( &commitment, first_pubkey, @@ -275,7 +286,7 @@ mod test { &second_handle, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); } #[test] @@ -339,7 +350,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( &commitment, first_pubkey, @@ -348,7 +359,7 @@ mod test { &second_handle, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); // decryption handles can be zero as long as the Pedersen commitment is valid let first_keypair = ElGamalKeypair::new_rand(); @@ -374,7 +385,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( &commitment, first_pubkey, @@ -383,6 +394,46 @@ mod test { &second_handle, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); + } + + #[test] + fn test_grouped_ciphertext_validity_proof_string() { + let commitment_str = "VjdpJcofkU/Lhd6RRvwsCoqaZ8XSbhiizI7jsxZNKSU="; + let pod_commitment = PodPedersenCommitment::from_str(commitment_str).unwrap(); + let commitment: PedersenCommitment = pod_commitment.try_into().unwrap(); + + let first_pubkey_str = "YllcTvlVBp9nv+bi8d0Z9UOujPfMsgH3ZcCqQSwXfic="; + let pod_first_pubkey = PodElGamalPubkey::from_str(first_pubkey_str).unwrap(); + let first_pubkey: ElGamalPubkey = pod_first_pubkey.try_into().unwrap(); + + let second_pubkey_str = "CCq+4oKGWlh3pkSbZpEsj6vfimhC/c3TxTVAghXq5Xo="; + let pod_second_pubkey = PodElGamalPubkey::from_str(second_pubkey_str).unwrap(); + let second_pubkey: ElGamalPubkey = pod_second_pubkey.try_into().unwrap(); + + let first_handle_str = "EE1qdL/QLMGXvsWIjw2c07Vg/DgUsaexxQECKtjEwWE="; + let pod_first_handle_str = PodDecryptHandle::from_str(first_handle_str).unwrap(); + let first_handle: DecryptHandle = pod_first_handle_str.try_into().unwrap(); + + let second_handle_str = "2Jn0+IVwpI5O/5pBU/nizS759k6dNn6UyUzxc1bt3RM="; + let pod_second_handle_str = PodDecryptHandle::from_str(second_handle_str).unwrap(); + let second_handle: DecryptHandle = pod_second_handle_str.try_into().unwrap(); + + let proof_str = "/GITIw3LjQSphEG1GWYpKGjKUrYnC1n4yGFDvBwcE2V6XdSM8FKgc3AjQYJWGVkUMsciv/vMRv3lyDuW4VJJclQk9STY7Pd2F4r6Lz1P3fBmODbDp++k3Ni759FrV141Oy4puCzHV8+LHg6ePh3WlZ8yL+Ri6VDTyLc+3pblSQ0VIno0QoxyavznU6faQhuCXuy3bD+E87ZlRNtk9jPKDg=="; + let pod_proof = PodGroupedCiphertext2HandlesValidityProof::from_str(proof_str).unwrap(); + let proof: GroupedCiphertext2HandlesValidityProof = pod_proof.try_into().unwrap(); + + let mut verifier_transcript = Transcript::new(b"Test"); + + proof + .verify( + &commitment, + &first_pubkey, + &second_pubkey, + &first_handle, + &second_handle, + &mut verifier_transcript, + ) + .unwrap(); } } diff --git a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs index ab917b6b7319ee..74c90f69e57ea1 100644 --- a/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs +++ b/zk-sdk/src/sigma_proofs/grouped_ciphertext_validity/handles_3.rs @@ -268,7 +268,18 @@ impl GroupedCiphertext3HandlesValidityProof { mod test { use { super::*, - crate::encryption::{elgamal::ElGamalKeypair, pedersen::Pedersen}, + crate::{ + encryption::{ + elgamal::ElGamalKeypair, + pedersen::Pedersen, + pod::{ + elgamal::{PodDecryptHandle, PodElGamalCiphertext, PodElGamalPubkey}, + pedersen::PodPedersenCommitment, + }, + }, + sigma_proofs::pod::PodGroupedCiphertext3HandlesValidityProof, + }, + std::str::FromStr, }; #[test] @@ -301,7 +312,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( &commitment, first_pubkey, @@ -312,7 +323,7 @@ mod test { &third_handle, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); } #[test] @@ -386,7 +397,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( &commitment, first_pubkey, @@ -397,7 +408,7 @@ mod test { &third_handle, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); // decryption handles can be zero as long as the Pedersen commitment is valid let first_keypair = ElGamalKeypair::new_rand(); @@ -430,7 +441,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( &commitment, first_pubkey, @@ -441,6 +452,56 @@ mod test { &third_handle, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); + } + + #[test] + fn test_grouped_ciphertext_3_handles_validity_proof_string() { + let commitment_str = "DDSCVZLH+eqC9gX+ZeP3HQQxigojAOgda3YwVChR5W4="; + let pod_commitment = PodPedersenCommitment::from_str(commitment_str).unwrap(); + let commitment: PedersenCommitment = pod_commitment.try_into().unwrap(); + + let first_pubkey_str = "yGGJnLUs8B744So/Ua3n2wNm+8u9ey/6KrDdHx4ySwk="; + let pod_first_pubkey = PodElGamalPubkey::from_str(first_pubkey_str).unwrap(); + let first_pubkey: ElGamalPubkey = pod_first_pubkey.try_into().unwrap(); + + let second_pubkey_str = "ZFETe85sZdWpxLAo177kwiOxZCpsXGeyZEnzern7tAk="; + let pod_second_pubkey = PodElGamalPubkey::from_str(second_pubkey_str).unwrap(); + let second_pubkey: ElGamalPubkey = pod_second_pubkey.try_into().unwrap(); + + let third_pubkey_str = "duUYiBx0l0jRRPsTLCoCD8PIKFczPdrxl+2f4eCflhQ="; + let pod_third_pubkey = PodElGamalPubkey::from_str(third_pubkey_str).unwrap(); + let third_pubkey: ElGamalPubkey = pod_third_pubkey.try_into().unwrap(); + + let first_handle_str = "Asor2klomf847EmJZmXn3qoi0SGE3cBXCkKttbJa+lE="; + let pod_first_handle_str = PodDecryptHandle::from_str(first_handle_str).unwrap(); + let first_handle: DecryptHandle = pod_first_handle_str.try_into().unwrap(); + + let second_handle_str = "kJ0GYHDVeB1Kgvqp+MY/my3BYZvqsC5Mv0gQLJHnNBQ="; + let pod_second_handle_str = PodDecryptHandle::from_str(second_handle_str).unwrap(); + let second_handle: DecryptHandle = pod_second_handle_str.try_into().unwrap(); + + let third_handle_str = "Jnd5jZLNDOMMt+kbgQWCQqTytbwHx3Bz5vwtfDLhRn0="; + let pod_third_handle_str = PodDecryptHandle::from_str(third_handle_str).unwrap(); + let third_handle: DecryptHandle = pod_third_handle_str.try_into().unwrap(); + + let proof_str = "8NoqOM40+fvPY2aHzO0SdWZM6lvSoaqI7KpaFuE4wQUaqewILtQV8IMHeHmpevxt/GTErJsdcV8kY3HDZ1GHbMoDujYpstUhyubX1voJh/DstYAL1SQqlRpNLG+kWEUZYvCudTur7i5R+zqZQY3sRMEAxW458V+1GmyCWbWP3FZEz5gX/Pa28/ZNLBvmSPpJBZapXRI5Ra0dKPskFmQ0CH0gBWo6pxj/PH9sgNEkLrbVZB7jpVtdmNzivwgFeb4M"; + let pod_proof = PodGroupedCiphertext3HandlesValidityProof::from_str(proof_str).unwrap(); + let proof: GroupedCiphertext3HandlesValidityProof = pod_proof.try_into().unwrap(); + + let mut verifier_transcript = Transcript::new(b"Test"); + + proof + .verify( + &commitment, + &first_pubkey, + &second_pubkey, + &third_pubkey, + &first_handle, + &second_handle, + &third_handle, + &mut verifier_transcript, + ) + .unwrap(); } } diff --git a/zk-sdk/src/sigma_proofs/percentage_with_cap.rs b/zk-sdk/src/sigma_proofs/percentage_with_cap.rs index 64e2b1794753d8..7803ffc92044de 100644 --- a/zk-sdk/src/sigma_proofs/percentage_with_cap.rs +++ b/zk-sdk/src/sigma_proofs/percentage_with_cap.rs @@ -556,7 +556,14 @@ fn conditional_select_ristretto( #[cfg(test)] mod test { - use {super::*, crate::encryption::pedersen::Pedersen}; + use { + super::*, + crate::{ + encryption::{pedersen::Pedersen, pod::pedersen::PodPedersenCommitment}, + sigma_proofs::pod::PodPercentageWithCapProof, + }, + std::str::FromStr, + }; #[test] fn test_proof_above_max_proof() { @@ -594,7 +601,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( &percentage_commitment, &delta_commitment, @@ -602,7 +609,7 @@ mod test { max_value, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); } #[test] @@ -646,7 +653,7 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof .verify( &percentage_commitment, &delta_commitment, @@ -654,7 +661,7 @@ mod test { max_value, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); } #[test] @@ -693,7 +700,43 @@ mod test { &mut prover_transcript, ); - assert!(proof + proof + .verify( + &percentage_commitment, + &delta_commitment, + &claimed_commitment, + max_value, + &mut verifier_transcript, + ) + .unwrap(); + } + + #[test] + fn test_percentage_with_cap_proof_string() { + let max_value: u64 = 3; + + let percentage_commitment_str = "JGuzRjhmp3d8PWshbrN3Q7kg027OdPn7IU26ISTiz3c="; + let pod_percentage_commitment = + PodPedersenCommitment::from_str(percentage_commitment_str).unwrap(); + let percentage_commitment: PedersenCommitment = + pod_percentage_commitment.try_into().unwrap(); + + let delta_commitment_str = "3mwfK4u0J0UqCVznbxyCjlGEgMrI+XHdW7g00YVjSVA="; + let pod_delta_commitment = PodPedersenCommitment::from_str(delta_commitment_str).unwrap(); + let delta_commitment: PedersenCommitment = pod_delta_commitment.try_into().unwrap(); + + let claimed_commitment_str = "/t9n3yJa7p9wJV5P2cclnUiirKU5oNUv/gQMe27WMT4="; + let pod_claimed_commitment = + PodPedersenCommitment::from_str(claimed_commitment_str).unwrap(); + let claimed_commitment: PedersenCommitment = pod_claimed_commitment.try_into().unwrap(); + + let proof_str = "SpmzL7hrLLp7P/Cz+2kBh22QKq3mWb0v28Er6lO9aRfBer77VY03i9VSEd4uHYMXdaf/MBPUsDVjUxNjoauwBmw6OrAcq6tq9o1Z+NS8lkukVh6sqSrSh9dy9ipq6JcIePAVmGwDNk07ACgPE/ynrenwSPJ7ZHDGZszGkw95h25gTKPyoaMbvZoXGLtkuHmvXJ7KBBJmK2eTzELb6UF2HOUg9cGFgomL8Xa3l14LBDMwLAokJK4n2d6eTkk1O0ECddmTDwoG6lmt0fHXYm37Z+k4yrQkhUgKwph2nLWG3Q7zvRM2qVFxFUGfLWJq5Sm7l7segOm+hQpRaH+q7OHNBg=="; + let pod_proof = PodPercentageWithCapProof::from_str(proof_str).unwrap(); + let proof: PercentageWithCapProof = pod_proof.try_into().unwrap(); + + let mut verifier_transcript = Transcript::new(b"test"); + + proof .verify( &percentage_commitment, &delta_commitment, @@ -701,6 +744,6 @@ mod test { max_value, &mut verifier_transcript, ) - .is_ok()); + .unwrap(); } } diff --git a/zk-sdk/src/sigma_proofs/pod.rs b/zk-sdk/src/sigma_proofs/pod.rs index fb0bc3a96efba0..eceaf447594249 100644 --- a/zk-sdk/src/sigma_proofs/pod.rs +++ b/zk-sdk/src/sigma_proofs/pod.rs @@ -16,8 +16,13 @@ use crate::sigma_proofs::{ zero_ciphertext::ZeroCiphertextProof, }; use { - crate::sigma_proofs::{errors::*, *}, + crate::{ + pod::{impl_from_bytes, impl_from_str}, + sigma_proofs::{errors::*, *}, + }, + base64::{prelude::BASE64_STANDARD, Engine}, bytemuck::{Pod, Zeroable}, + std::fmt, }; /// The `CiphertextCommitmentEqualityProof` type as a `Pod`. @@ -43,6 +48,25 @@ impl TryFrom for CiphertextCommitmentEqual } } +const CIPHERTEXT_COMMITMENT_EQUALITY_PROOF_MAX_BASE64_LEN: usize = 256; + +impl fmt::Display for PodCiphertextCommitmentEqualityProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodCiphertextCommitmentEqualityProof, + BYTES_LEN = CIPHERTEXT_COMMITMENT_EQUALITY_PROOF_LEN, + BASE64_LEN = CIPHERTEXT_COMMITMENT_EQUALITY_PROOF_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodCiphertextCommitmentEqualityProof, + BYTES_LEN = CIPHERTEXT_COMMITMENT_EQUALITY_PROOF_LEN +); + /// The `CiphertextCiphertextEqualityProof` type as a `Pod`. #[derive(Clone, Copy)] #[repr(transparent)] @@ -66,6 +90,25 @@ impl TryFrom for CiphertextCiphertextEqual } } +const CIPHERTEXT_CIPHERTEXT_EQUALITY_PROOF_MAX_BASE64_LEN: usize = 300; + +impl fmt::Display for PodCiphertextCiphertextEqualityProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodCiphertextCiphertextEqualityProof, + BYTES_LEN = CIPHERTEXT_CIPHERTEXT_EQUALITY_PROOF_LEN, + BASE64_LEN = CIPHERTEXT_CIPHERTEXT_EQUALITY_PROOF_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodCiphertextCiphertextEqualityProof, + BYTES_LEN = CIPHERTEXT_CIPHERTEXT_EQUALITY_PROOF_LEN +); + /// The `GroupedCiphertext2HandlesValidityProof` type as a `Pod`. #[derive(Clone, Copy)] #[repr(transparent)] @@ -89,6 +132,25 @@ impl TryFrom for GroupedCiphertext2Ha } } +const GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_PROOF_MAX_BASE64_LEN: usize = 216; + +impl fmt::Display for PodGroupedCiphertext2HandlesValidityProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodGroupedCiphertext2HandlesValidityProof, + BYTES_LEN = GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_PROOF_LEN, + BASE64_LEN = GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_PROOF_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodGroupedCiphertext2HandlesValidityProof, + BYTES_LEN = GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_PROOF_LEN +); + /// The `GroupedCiphertext3HandlesValidityProof` type as a `Pod`. #[derive(Clone, Copy)] #[repr(transparent)] @@ -112,6 +174,25 @@ impl TryFrom for GroupedCiphertext3Ha } } +const GROUPED_CIPHERTEXT_3_HANDLES_VALIDITY_PROOF_MAX_BASE64_LEN: usize = 256; + +impl fmt::Display for PodGroupedCiphertext3HandlesValidityProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodGroupedCiphertext3HandlesValidityProof, + BYTES_LEN = GROUPED_CIPHERTEXT_3_HANDLES_VALIDITY_PROOF_LEN, + BASE64_LEN = GROUPED_CIPHERTEXT_3_HANDLES_VALIDITY_PROOF_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodGroupedCiphertext3HandlesValidityProof, + BYTES_LEN = GROUPED_CIPHERTEXT_3_HANDLES_VALIDITY_PROOF_LEN +); + /// The `BatchedGroupedCiphertext2HandlesValidityProof` type as a `Pod`. #[derive(Clone, Copy)] #[repr(transparent)] @@ -141,6 +222,25 @@ impl TryFrom } } +const BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_PROOF_MAX_BASE64_LEN: usize = 216; + +impl fmt::Display for PodBatchedGroupedCiphertext2HandlesValidityProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodBatchedGroupedCiphertext2HandlesValidityProof, + BYTES_LEN = BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_PROOF_LEN, + BASE64_LEN = BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_PROOF_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodBatchedGroupedCiphertext2HandlesValidityProof, + BYTES_LEN = BATCHED_GROUPED_CIPHERTEXT_2_HANDLES_VALIDITY_PROOF_LEN +); + /// The `BatchedGroupedCiphertext3HandlesValidityProof` type as a `Pod`. #[derive(Clone, Copy)] #[repr(transparent)] @@ -170,6 +270,25 @@ impl TryFrom } } +const BATCHED_GROUPED_CIPHERTEXT_3_HANDLES_VALIDITY_PROOF_MAX_BASE64_LEN: usize = 256; + +impl fmt::Display for PodBatchedGroupedCiphertext3HandlesValidityProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodBatchedGroupedCiphertext3HandlesValidityProof, + BYTES_LEN = BATCHED_GROUPED_CIPHERTEXT_3_HANDLES_VALIDITY_PROOF_LEN, + BASE64_LEN = BATCHED_GROUPED_CIPHERTEXT_3_HANDLES_VALIDITY_PROOF_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodBatchedGroupedCiphertext3HandlesValidityProof, + BYTES_LEN = BATCHED_GROUPED_CIPHERTEXT_3_HANDLES_VALIDITY_PROOF_LEN +); + /// The `ZeroCiphertextProof` type as a `Pod`. #[derive(Clone, Copy)] #[repr(transparent)] @@ -191,6 +310,25 @@ impl TryFrom for ZeroCiphertextProof { } } +const ZERO_CIPHERTEXT_PROOF_MAX_BASE64_LEN: usize = 128; + +impl fmt::Display for PodZeroCiphertextProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodZeroCiphertextProof, + BYTES_LEN = ZERO_CIPHERTEXT_PROOF_LEN, + BASE64_LEN = ZERO_CIPHERTEXT_PROOF_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodZeroCiphertextProof, + BYTES_LEN = ZERO_CIPHERTEXT_PROOF_LEN +); + /// The `PercentageWithCapProof` type as a `Pod`. #[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(transparent)] @@ -212,6 +350,25 @@ impl TryFrom for PercentageWithCapProof { } } +const PERCENTAGE_WITH_CAP_PROOF_MAX_BASE64_LEN: usize = 344; + +impl fmt::Display for PodPercentageWithCapProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodPercentageWithCapProof, + BYTES_LEN = PERCENTAGE_WITH_CAP_PROOF_LEN, + BASE64_LEN = PERCENTAGE_WITH_CAP_PROOF_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodPercentageWithCapProof, + BYTES_LEN = PERCENTAGE_WITH_CAP_PROOF_LEN +); + /// The `PubkeyValidityProof` type as a `Pod`. #[derive(Clone, Copy, bytemuck_derive::Pod, bytemuck_derive::Zeroable)] #[repr(transparent)] @@ -233,6 +390,25 @@ impl TryFrom for PubkeyValidityProof { } } +const PUBKEY_VALIDITY_PROOF_MAX_BASE64_LEN: usize = 88; + +impl fmt::Display for PodPubkeyValidityProof { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", BASE64_STANDARD.encode(self.0)) + } +} + +impl_from_str!( + TYPE = PodPubkeyValidityProof, + BYTES_LEN = PUBKEY_VALIDITY_PROOF_LEN, + BASE64_LEN = PUBKEY_VALIDITY_PROOF_MAX_BASE64_LEN +); + +impl_from_bytes!( + TYPE = PodPubkeyValidityProof, + BYTES_LEN = PUBKEY_VALIDITY_PROOF_LEN +); + // The sigma proof pod types are wrappers for byte arrays, which are both `Pod` and `Zeroable`. However, // the marker traits `bytemuck::Pod` and `bytemuck::Zeroable` can only be derived for power-of-two // length byte arrays. Directly implement these traits for the sigma proof pod types. diff --git a/zk-sdk/src/sigma_proofs/pubkey_validity.rs b/zk-sdk/src/sigma_proofs/pubkey_validity.rs index 5e3c08f5b0630a..52aca468c8b396 100644 --- a/zk-sdk/src/sigma_proofs/pubkey_validity.rs +++ b/zk-sdk/src/sigma_proofs/pubkey_validity.rs @@ -140,7 +140,11 @@ impl PubkeyValidityProof { mod test { use { super::*, + crate::{ + encryption::pod::elgamal::PodElGamalPubkey, sigma_proofs::pod::PodPubkeyValidityProof, + }, solana_sdk::{pubkey::Pubkey, signature::Keypair}, + std::str::FromStr, }; #[test] @@ -152,9 +156,9 @@ mod test { let mut verifier_transcript = Transcript::new(b"test"); let proof = PubkeyValidityProof::new(&keypair, &mut prover_transcript); - assert!(proof + proof .verify(keypair.pubkey(), &mut verifier_transcript) - .is_ok()); + .unwrap(); // derived ElGamal keypair let keypair = @@ -164,8 +168,23 @@ mod test { let mut verifier_transcript = Transcript::new(b"test"); let proof = PubkeyValidityProof::new(&keypair, &mut prover_transcript); - assert!(proof + proof .verify(keypair.pubkey(), &mut verifier_transcript) - .is_ok()); + .unwrap(); + } + + #[test] + fn test_pubkey_proof_str() { + let pubkey_str = "XKF3GnFDX4HBoBEj04yDTr6Lqx+0qp9pQyPzFjyVmXY="; + let pod_pubkey = PodElGamalPubkey::from_str(pubkey_str).unwrap(); + let pubkey: ElGamalPubkey = pod_pubkey.try_into().unwrap(); + + let proof_str = "5hmM4uVtfJ2JfCcjWpo2dEbg22n4CdzHYQF4oBgWSGeYAh5d91z4emkjeXq9ihtmqAR+7BYCv44TqQWoMQrECA=="; + let pod_proof = PodPubkeyValidityProof::from_str(proof_str).unwrap(); + let proof: PubkeyValidityProof = pod_proof.try_into().unwrap(); + + let mut verifier_transcript = Transcript::new(b"test"); + + proof.verify(&pubkey, &mut verifier_transcript).unwrap(); } } diff --git a/zk-sdk/src/sigma_proofs/zero_ciphertext.rs b/zk-sdk/src/sigma_proofs/zero_ciphertext.rs index f598210af6018b..4b6cf95f6f463a 100644 --- a/zk-sdk/src/sigma_proofs/zero_ciphertext.rs +++ b/zk-sdk/src/sigma_proofs/zero_ciphertext.rs @@ -180,14 +180,19 @@ impl ZeroCiphertextProof { mod test { use { super::*, - crate::encryption::{ - elgamal::{DecryptHandle, ElGamalKeypair}, - pedersen::{Pedersen, PedersenCommitment, PedersenOpening}, + crate::{ + encryption::{ + elgamal::{DecryptHandle, ElGamalKeypair}, + pedersen::{Pedersen, PedersenCommitment, PedersenOpening}, + pod::elgamal::{PodElGamalCiphertext, PodElGamalPubkey}, + }, + sigma_proofs::pod::PodZeroCiphertextProof, }, + std::str::FromStr, }; #[test] - fn test_zero_cipehrtext_proof_correctness() { + fn test_zero_ciphertext_proof_correctness() { let keypair = ElGamalKeypair::new_rand(); let mut prover_transcript = Transcript::new(b"test"); @@ -196,13 +201,13 @@ mod test { // general case: encryption of 0 let elgamal_ciphertext = keypair.pubkey().encrypt(0_u64); let proof = ZeroCiphertextProof::new(&keypair, &elgamal_ciphertext, &mut prover_transcript); - assert!(proof + proof .verify( keypair.pubkey(), &elgamal_ciphertext, - &mut verifier_transcript + &mut verifier_transcript, ) - .is_ok()); + .unwrap(); // general case: encryption of > 0 let elgamal_ciphertext = keypair.pubkey().encrypt(1_u64); @@ -228,9 +233,9 @@ mod test { let proof = ZeroCiphertextProof::new(&keypair, &ciphertext, &mut prover_transcript); - assert!(proof + proof .verify(keypair.pubkey(), &ciphertext, &mut verifier_transcript) - .is_ok()); + .unwrap(); // if only either commitment or handle is zero, the ciphertext is always invalid and proof // verification should always reject @@ -281,4 +286,25 @@ mod test { .verify(keypair.pubkey(), &ciphertext, &mut verifier_transcript) .is_err()); } + + #[test] + fn test_zero_ciphertext_proof_string() { + let pubkey_str = "Vlx+Fr61KnreO27JDg5MsBN8NgbICGa3fIech8oZ4hQ="; + let pod_pubkey = PodElGamalPubkey::from_str(pubkey_str).unwrap(); + let pubkey: ElGamalPubkey = pod_pubkey.try_into().unwrap(); + + let ciphertext_str = "wps5X1mou5PUdPD+llxiJ+aoX4YWrR/S6/U2MUC2LjLS7wDu6S9nOG92VMnlngQaP4irBY0OqlsGdXS4j8DROg=="; + let pod_ciphertext = PodElGamalCiphertext::from_str(ciphertext_str).unwrap(); + let ciphertext: ElGamalCiphertext = pod_ciphertext.try_into().unwrap(); + + let proof_str = "qMDiQ5zPcTYFhchYBZzRS81UGIt2QRNce2/ULEqDBXBQEnGRI0u0G1HzRJfpIbOWCHBwMaNgsT1jTZwTOTWyMBE/2UjHI4x9IFpAM6ccGuexo/HjSECPDgL+85zrfA8L"; + let pod_proof = PodZeroCiphertextProof::from_str(proof_str).unwrap(); + let proof: ZeroCiphertextProof = pod_proof.try_into().unwrap(); + + let mut verifier_transcript = Transcript::new(b"test"); + + proof + .verify(&pubkey, &ciphertext, &mut verifier_transcript) + .unwrap(); + } } From 05361a9ed652e85b06fdb367fa93c22b4af49549 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 4 Sep 2024 18:44:58 +0800 Subject: [PATCH 286/529] metrics: remove optimistic_slot_elapsed (#2717) * metrics: remove optimistic_slot_elapsed * remove optimistic_slot_elapsed from grafana dashboard json --- core/src/optimistic_confirmation_verifier.rs | 9 -- .../dashboards/cluster-monitor.json | 136 ------------------ 2 files changed, 145 deletions(-) diff --git a/core/src/optimistic_confirmation_verifier.rs b/core/src/optimistic_confirmation_verifier.rs index eb63e83945f3bc..6f4e05451268c3 100644 --- a/core/src/optimistic_confirmation_verifier.rs +++ b/core/src/optimistic_confirmation_verifier.rs @@ -61,15 +61,6 @@ impl OptimisticConfirmationVerifier { return; } - datapoint_info!( - "optimistic_slot_elapsed", - ( - "average_elapsed_ms", - self.last_optimistic_slot_ts.elapsed().as_millis() as i64, - i64 - ), - ); - // We don't have any information about ancestors before the snapshot root, // so ignore those slots for (new_optimistic_slot, hash) in new_optimistic_slots { diff --git a/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json b/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json index f2b1e229bf7afc..c33ece2f2eb86c 100644 --- a/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json +++ b/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json @@ -10628,142 +10628,6 @@ "alignLevel": null } }, - { - "aliasColors": { - "cluster-info.repair": "#ba43a9", - "replay_stage-new_leader.last": "#00ffbb", - "tower-vote.last": "#00ffbb", - "window-service.receive": "#b7dbab", - "window-stage.consumed": "#5195ce" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 5, - "w": 8, - "x": 16, - "y": 83 - }, - "hiddenSeries": false, - "id": 59, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.4.3", - "pointradius": 2, - "points": true, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "measurement": "cluster_info-vote-count", - "orderByTime": "ASC", - "policy": "autogen", - "query": "SELECT last(\"average_elapsed_ms\") FROM \"$testnet\".\"autogen\".\"optimistic_slot_elapsed\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "count" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average Time Between Optimistic Confirmations ($hostid)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, { "aliasColors": { "cluster-info.repair": "#ba43a9", From 7e8a1ddf86fa84b0ca4b64360af89399afd9de44 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 4 Sep 2024 17:35:04 +0400 Subject: [PATCH 287/529] sdk: Extract clock crate (#1967) * extract clock crate * update clock usage in solana-program * fmt * fmt after rebase * update lock file after rebase * fmt after rebase * fmt after rebase * fmt * make serde optional in solana-clock * fix description Co-authored-by: Jon C * fix docs link Co-authored-by: Jon C * fix accidental deletions from workspace members table --------- Co-authored-by: Jon C --- Cargo.lock | 11 +++++++++ Cargo.toml | 2 ++ programs/sbf/Cargo.lock | 10 ++++++++ sdk/clock/Cargo.toml | 24 +++++++++++++++++++ .../src/clock.rs => clock/src/lib.rs} | 5 +++- sdk/program/Cargo.toml | 1 + sdk/program/src/account_info.rs | 3 ++- .../src/address_lookup_table/instruction.rs | 2 +- sdk/program/src/address_lookup_table/state.rs | 2 +- sdk/program/src/epoch_schedule.rs | 2 +- sdk/program/src/example_mocks.rs | 2 +- sdk/program/src/feature.rs | 11 +++++---- sdk/program/src/fee_calculator.rs | 2 +- sdk/program/src/last_restart_slot.rs | 2 +- sdk/program/src/lib.rs | 3 +-- sdk/program/src/program.rs | 11 +++++---- sdk/program/src/rent.rs | 2 +- sdk/program/src/slot_hashes.rs | 2 +- sdk/program/src/stake/instruction.rs | 2 +- sdk/program/src/stake/state.rs | 2 +- sdk/program/src/stake/tools.rs | 5 ++-- sdk/program/src/stake_history.rs | 2 +- sdk/program/src/sysvar/clock.rs | 2 +- sdk/program/src/sysvar/mod.rs | 2 +- sdk/program/src/sysvar/recent_blockhashes.rs | 2 +- sdk/program/src/sysvar/slot_hashes.rs | 3 +-- sdk/program/src/sysvar/stake_history.rs | 10 ++++---- sdk/program/src/vote/authorized_voters.rs | 3 ++- sdk/program/src/vote/instruction.rs | 2 +- sdk/program/src/vote/state/mod.rs | 14 +++-------- 30 files changed, 99 insertions(+), 47 deletions(-) create mode 100644 sdk/clock/Cargo.toml rename sdk/{program/src/clock.rs => clock/src/lib.rs} (97%) diff --git a/Cargo.lock b/Cargo.lock index 1467f4e8105338..636378ac3db666 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6106,6 +6106,16 @@ dependencies = [ "tungstenite", ] +[[package]] +name = "solana-clock" +version = "2.1.0" +dependencies = [ + "serde", + "serde_derive", + "solana-sdk-macro", + "static_assertions", +] + [[package]] name = "solana-compute-budget" version = "2.1.0" @@ -6996,6 +7006,7 @@ dependencies = [ "sha2 0.10.8", "sha3", "solana-atomic-u64", + "solana-clock", "solana-decode-error", "solana-define-syscall", "solana-frozen-abi", diff --git a/Cargo.toml b/Cargo.toml index 77eb27a151acf2..9ca4a0cdc98298 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,6 +103,7 @@ members = [ "sdk/atomic-u64", "sdk/cargo-build-sbf", "sdk/cargo-test-sbf", + "sdk/clock", "sdk/decode-error", "sdk/gen-headers", "sdk/macro", @@ -366,6 +367,7 @@ solana-cli = { path = "cli", version = "=2.1.0" } solana-cli-config = { path = "cli-config", version = "=2.1.0" } solana-cli-output = { path = "cli-output", version = "=2.1.0" } solana-client = { path = "client", version = "=2.1.0" } +solana-clock = { path = "sdk/clock", version = "=2.1.0" } solana-compute-budget = { path = "compute-budget", version = "=2.1.0" } solana-compute-budget-program = { path = "programs/compute-budget", version = "=2.1.0" } solana-config-program = { path = "programs/config", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b1758b3277f087..458f977bc76e7a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4839,6 +4839,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-clock" +version = "2.1.0" +dependencies = [ + "serde", + "serde_derive", + "solana-sdk-macro", +] + [[package]] name = "solana-compute-budget" version = "2.1.0" @@ -5411,6 +5420,7 @@ dependencies = [ "sha2 0.10.8", "sha3", "solana-atomic-u64", + "solana-clock", "solana-decode-error", "solana-define-syscall", "solana-msg", diff --git a/sdk/clock/Cargo.toml b/sdk/clock/Cargo.toml new file mode 100644 index 00000000000000..677c3f61d6a9ec --- /dev/null +++ b/sdk/clock/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "solana-clock" +description = "Solana Clock and Time Definitions" +documentation = "https://docs.rs/solana-clock" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +serde = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-sdk-macro = { workspace = true } + +[dev-dependencies] +static_assertions = { workspace = true } + +[features] +serde = ["dep:serde", "dep:serde_derive"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/src/clock.rs b/sdk/clock/src/lib.rs similarity index 97% rename from sdk/program/src/clock.rs rename to sdk/clock/src/lib.rs index 5cf609d3000c26..870265f9c18eb9 100644 --- a/sdk/program/src/clock.rs +++ b/sdk/clock/src/lib.rs @@ -20,6 +20,8 @@ //! //! [oracle]: https://docs.solanalabs.com/implemented-proposals/validator-timestamp-oracle +#[cfg(feature = "serde")] +use serde_derive::{Deserialize, Serialize}; use solana_sdk_macro::CloneZeroed; /// The default tick rate that the cluster attempts to achieve (160 per second). @@ -172,7 +174,8 @@ pub type UnixTimestamp = i64; /// /// All members of `Clock` start from 0 upon network boot. #[repr(C)] -#[derive(Serialize, Deserialize, Debug, CloneZeroed, Default, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, CloneZeroed, Default, PartialEq, Eq)] pub struct Clock { /// The current `Slot`. pub slot: Slot, diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index a5b525aaf0dca0..b711393274ef97 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -32,6 +32,7 @@ serde_derive = { workspace = true } sha2 = { workspace = true } sha3 = { workspace = true } solana-atomic-u64 = { workspace = true } +solana-clock = { workspace = true, features = ["serde"] } solana-decode-error = { workspace = true } solana-frozen-abi = { workspace = true, optional = true, features = ["frozen-abi"] } solana-frozen-abi-macro = { workspace = true, optional = true, features = ["frozen-abi"] } diff --git a/sdk/program/src/account_info.rs b/sdk/program/src/account_info.rs index 485195381a9f64..fb7614903673b8 100644 --- a/sdk/program/src/account_info.rs +++ b/sdk/program/src/account_info.rs @@ -2,9 +2,10 @@ use { crate::{ - clock::Epoch, debug_account_data::*, entrypoint::MAX_PERMITTED_DATA_INCREASE, + debug_account_data::*, entrypoint::MAX_PERMITTED_DATA_INCREASE, program_error::ProgramError, pubkey::Pubkey, }, + solana_clock::Epoch, solana_program_memory::sol_memset, std::{ cell::{Ref, RefCell, RefMut}, diff --git a/sdk/program/src/address_lookup_table/instruction.rs b/sdk/program/src/address_lookup_table/instruction.rs index 5687ab6d05a1aa..4d73060e046ec3 100644 --- a/sdk/program/src/address_lookup_table/instruction.rs +++ b/sdk/program/src/address_lookup_table/instruction.rs @@ -1,12 +1,12 @@ use { crate::{ address_lookup_table::program::id, - clock::Slot, instruction::{AccountMeta, Instruction}, pubkey::Pubkey, system_program, }, serde_derive::{Deserialize, Serialize}, + solana_clock::Slot, }; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] diff --git a/sdk/program/src/address_lookup_table/state.rs b/sdk/program/src/address_lookup_table/state.rs index df564f78fe2577..13a66637faa919 100644 --- a/sdk/program/src/address_lookup_table/state.rs +++ b/sdk/program/src/address_lookup_table/state.rs @@ -2,9 +2,9 @@ use solana_frozen_abi_macro::{AbiEnumVisitor, AbiExample}; use { serde_derive::{Deserialize, Serialize}, + solana_clock::Slot, solana_program::{ address_lookup_table::error::AddressLookupError, - clock::Slot, instruction::InstructionError, pubkey::Pubkey, slot_hashes::{SlotHashes, MAX_ENTRIES}, diff --git a/sdk/program/src/epoch_schedule.rs b/sdk/program/src/epoch_schedule.rs index 2e1398d86b9050..d36f34aacf64ab 100644 --- a/sdk/program/src/epoch_schedule.rs +++ b/sdk/program/src/epoch_schedule.rs @@ -11,7 +11,7 @@ //! the chain there is a "warmup" period, where epochs are short, with subsequent //! epochs increasing in slots until they last for [`DEFAULT_SLOTS_PER_EPOCH`]. -pub use crate::clock::{Epoch, Slot, DEFAULT_SLOTS_PER_EPOCH}; +pub use solana_clock::{Epoch, Slot, DEFAULT_SLOTS_PER_EPOCH}; use solana_sdk_macro::CloneZeroed; /// The default number of slots before an epoch starts to calculate the leader schedule. diff --git a/sdk/program/src/example_mocks.rs b/sdk/program/src/example_mocks.rs index b528812e36f6b3..eaa02ee02724de 100644 --- a/sdk/program/src/example_mocks.rs +++ b/sdk/program/src/example_mocks.rs @@ -123,7 +123,7 @@ pub mod solana_sdk { }; pub mod account { - use crate::{clock::Epoch, pubkey::Pubkey}; + use {crate::pubkey::Pubkey, solana_clock::Epoch}; #[derive(Clone)] pub struct Account { pub lamports: u64, diff --git a/sdk/program/src/feature.rs b/sdk/program/src/feature.rs index b46704ebcb9992..af4ab1ad287636 100644 --- a/sdk/program/src/feature.rs +++ b/sdk/program/src/feature.rs @@ -11,9 +11,12 @@ //! 2. When the next epoch is entered the runtime will check for new activation requests and //! active them. When this occurs, the activation slot is recorded in the feature account -use crate::{ - account_info::AccountInfo, clock::Slot, instruction::Instruction, program_error::ProgramError, - pubkey::Pubkey, rent::Rent, system_instruction, +use { + crate::{ + account_info::AccountInfo, instruction::Instruction, program_error::ProgramError, + pubkey::Pubkey, rent::Rent, system_instruction, + }, + solana_clock::Slot, }; crate::declare_id!("Feature111111111111111111111111111111111111"); @@ -60,7 +63,7 @@ pub fn activate_with_lamports( #[cfg(test)] mod test { - use {super::*, solana_program::clock::Slot}; + use super::*; #[test] fn test_feature_size_of() { diff --git a/sdk/program/src/fee_calculator.rs b/sdk/program/src/fee_calculator.rs index 5d753e4acaed3a..a9b616ff8f53da 100644 --- a/sdk/program/src/fee_calculator.rs +++ b/sdk/program/src/fee_calculator.rs @@ -1,7 +1,7 @@ //! Calculation of transaction fees. #![allow(clippy::arithmetic_side_effects)] -use {crate::clock::DEFAULT_MS_PER_SLOT, log::*}; +use {log::*, solana_clock::DEFAULT_MS_PER_SLOT}; #[repr(C)] #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] diff --git a/sdk/program/src/last_restart_slot.rs b/sdk/program/src/last_restart_slot.rs index 7c67a574e93c45..880685a096b5b1 100644 --- a/sdk/program/src/last_restart_slot.rs +++ b/sdk/program/src/last_restart_slot.rs @@ -1,6 +1,6 @@ //! Information about the last restart slot (hard fork). -use {crate::clock::Slot, solana_sdk_macro::CloneZeroed}; +use {solana_clock::Slot, solana_sdk_macro::CloneZeroed}; #[repr(C)] #[derive(Serialize, Deserialize, Debug, CloneZeroed, PartialEq, Eq, Default)] diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 591c4563a973ae..de4c685fdfbfba 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -482,7 +482,6 @@ pub mod borsh1; pub mod bpf_loader; pub mod bpf_loader_deprecated; pub mod bpf_loader_upgradeable; -pub mod clock; pub mod compute_units; pub mod debug_account_data; pub mod ed25519_program; @@ -530,7 +529,6 @@ pub mod sysvar; pub mod vote; pub mod wasm; -pub use solana_msg::msg; #[deprecated(since = "2.1.0", note = "Use `solana-program-memory` crate instead")] pub use solana_program_memory as program_memory; #[deprecated(since = "2.1.0", note = "Use `solana-sanitize` crate instead")] @@ -541,6 +539,7 @@ pub use solana_secp256k1_recover as secp256k1_recover; pub use solana_short_vec as short_vec; #[cfg(target_arch = "wasm32")] pub use wasm_bindgen::prelude::wasm_bindgen; +pub use {solana_clock as clock, solana_msg::msg}; /// The [config native program][np]. /// diff --git a/sdk/program/src/program.rs b/sdk/program/src/program.rs index 27a4a2a8cca957..7692b8d2b412ab 100644 --- a/sdk/program/src/program.rs +++ b/sdk/program/src/program.rs @@ -8,9 +8,12 @@ //! [`invoke_signed`]: invoke_signed //! [cpi]: https://solana.com/docs/core/cpi -use crate::{ - account_info::AccountInfo, entrypoint::ProgramResult, instruction::Instruction, pubkey::Pubkey, - stable_layout::stable_instruction::StableInstruction, +use { + crate::{ + account_info::AccountInfo, entrypoint::ProgramResult, instruction::Instruction, + pubkey::Pubkey, stable_layout::stable_instruction::StableInstruction, + }, + solana_clock::Epoch, }; /// Invoke a cross-program instruction. @@ -396,7 +399,7 @@ pub fn get_return_data() -> Option<(Pubkey, Vec)> { pub fn check_type_assumptions() { extern crate memoffset; use { - crate::{clock::Epoch, instruction::AccountMeta}, + crate::instruction::AccountMeta, memoffset::offset_of, std::{ cell::RefCell, diff --git a/sdk/program/src/rent.rs b/sdk/program/src/rent.rs index 6553125eede6d0..47308066d927cc 100644 --- a/sdk/program/src/rent.rs +++ b/sdk/program/src/rent.rs @@ -4,7 +4,7 @@ #![allow(clippy::arithmetic_side_effects)] -use {crate::clock::DEFAULT_SLOTS_PER_EPOCH, solana_sdk_macro::CloneZeroed}; +use {solana_clock::DEFAULT_SLOTS_PER_EPOCH, solana_sdk_macro::CloneZeroed}; /// Configuration of network rent. #[repr(C)] diff --git a/sdk/program/src/slot_hashes.rs b/sdk/program/src/slot_hashes.rs index f17512ac9fb124..f18d14e89f9e9c 100644 --- a/sdk/program/src/slot_hashes.rs +++ b/sdk/program/src/slot_hashes.rs @@ -6,7 +6,7 @@ //! //! [`sysvar::slot_hashes`]: crate::sysvar::slot_hashes -pub use crate::clock::Slot; +pub use solana_clock::Slot; use { crate::hash::Hash, std::{ diff --git a/sdk/program/src/stake/instruction.rs b/sdk/program/src/stake/instruction.rs index 89357050e93ca8..fff1811bc393e9 100644 --- a/sdk/program/src/stake/instruction.rs +++ b/sdk/program/src/stake/instruction.rs @@ -5,7 +5,6 @@ use { crate::{ - clock::{Epoch, UnixTimestamp}, instruction::{AccountMeta, Instruction}, program_error::ProgramError, pubkey::Pubkey, @@ -19,6 +18,7 @@ use { log::*, num_derive::{FromPrimitive, ToPrimitive}, serde_derive::{Deserialize, Serialize}, + solana_clock::{Epoch, UnixTimestamp}, solana_decode_error::DecodeError, thiserror::Error, }; diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index 139df906fc12b2..4fee1bb008fb79 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -8,7 +8,6 @@ use borsh::{io, BorshDeserialize, BorshSchema, BorshSerialize}; use { crate::{ - clock::{Clock, Epoch, UnixTimestamp}, instruction::InstructionError, pubkey::Pubkey, stake::{ @@ -17,6 +16,7 @@ use { }, stake_history::{StakeHistoryEntry, StakeHistoryGetEntry}, }, + solana_clock::{Clock, Epoch, UnixTimestamp}, std::collections::HashSet, }; diff --git a/sdk/program/src/stake/tools.rs b/sdk/program/src/stake/tools.rs index e0447f49fc69c9..73f92017bb9390 100644 --- a/sdk/program/src/stake/tools.rs +++ b/sdk/program/src/stake/tools.rs @@ -1,6 +1,7 @@ //! Utility functions -use crate::{ - clock::Epoch, program_error::ProgramError, stake::MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION, +use { + crate::{program_error::ProgramError, stake::MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION}, + solana_clock::Epoch, }; /// Helper function for programs to call [`GetMinimumDelegation`] and then fetch the return data diff --git a/sdk/program/src/stake_history.rs b/sdk/program/src/stake_history.rs index f1bbe3b0f81da8..9438d004c0f5f3 100644 --- a/sdk/program/src/stake_history.rs +++ b/sdk/program/src/stake_history.rs @@ -6,7 +6,7 @@ //! //! [`sysvar::stake_history`]: crate::sysvar::stake_history -pub use crate::clock::Epoch; +pub use solana_clock::Epoch; use std::ops::Deref; pub const MAX_ENTRIES: usize = 512; // it should never take as many as 512 epochs to warm up or cool down diff --git a/sdk/program/src/sysvar/clock.rs b/sdk/program/src/sysvar/clock.rs index c9f31e8fa9efcd..db87594001d578 100644 --- a/sdk/program/src/sysvar/clock.rs +++ b/sdk/program/src/sysvar/clock.rs @@ -126,8 +126,8 @@ //! # Ok::<(), anyhow::Error>(()) //! ``` -pub use crate::clock::Clock; use crate::{impl_sysvar_get, program_error::ProgramError, sysvar::Sysvar}; +pub use solana_clock::Clock; crate::declare_sysvar_id!("SysvarC1ock11111111111111111111111111111111", Clock); diff --git a/sdk/program/src/sysvar/mod.rs b/sdk/program/src/sysvar/mod.rs index 69a3c475a0decd..9cdbac0b75bc01 100644 --- a/sdk/program/src/sysvar/mod.rs +++ b/sdk/program/src/sysvar/mod.rs @@ -283,12 +283,12 @@ mod tests { use { super::*, crate::{ - clock::Epoch, entrypoint::SUCCESS, program_error::ProgramError, program_stubs::{set_syscall_stubs, SyscallStubs}, pubkey::Pubkey, }, + solana_clock::Epoch, std::{cell::RefCell, rc::Rc}, }; diff --git a/sdk/program/src/sysvar/recent_blockhashes.rs b/sdk/program/src/sysvar/recent_blockhashes.rs index ec3a69baf7adb9..1dde2fda5d29af 100644 --- a/sdk/program/src/sysvar/recent_blockhashes.rs +++ b/sdk/program/src/sysvar/recent_blockhashes.rs @@ -162,7 +162,7 @@ impl Deref for RecentBlockhashes { #[cfg(test)] mod tests { - use {super::*, crate::clock::MAX_PROCESSING_AGE}; + use {super::*, solana_clock::MAX_PROCESSING_AGE}; #[test] #[allow(clippy::assertions_on_constants)] diff --git a/sdk/program/src/sysvar/slot_hashes.rs b/sdk/program/src/sysvar/slot_hashes.rs index 69ab49dc4e124a..ab683c1802b441 100644 --- a/sdk/program/src/sysvar/slot_hashes.rs +++ b/sdk/program/src/sysvar/slot_hashes.rs @@ -49,13 +49,13 @@ pub use crate::slot_hashes::SlotHashes; use { crate::{ account_info::AccountInfo, - clock::Slot, hash::Hash, program_error::ProgramError, slot_hashes::MAX_ENTRIES, sysvar::{get_sysvar, Sysvar, SysvarId}, }, bytemuck_derive::{Pod, Zeroable}, + solana_clock::Slot, }; const U64_SIZE: usize = std::mem::size_of::(); @@ -218,7 +218,6 @@ mod tests { use { super::*, crate::{ - clock::Slot, hash::{hash, Hash}, slot_hashes::MAX_ENTRIES, sysvar::tests::mock_get_sysvar_syscall, diff --git a/sdk/program/src/sysvar/stake_history.rs b/sdk/program/src/sysvar/stake_history.rs index 6f2008bf8e44a5..0c41689b102f71 100644 --- a/sdk/program/src/sysvar/stake_history.rs +++ b/sdk/program/src/sysvar/stake_history.rs @@ -46,10 +46,12 @@ //! ``` pub use crate::stake_history::StakeHistory; -use crate::{ - clock::Epoch, - stake_history::{StakeHistoryEntry, StakeHistoryGetEntry, MAX_ENTRIES}, - sysvar::{get_sysvar, Sysvar, SysvarId}, +use { + crate::{ + stake_history::{StakeHistoryEntry, StakeHistoryGetEntry, MAX_ENTRIES}, + sysvar::{get_sysvar, Sysvar, SysvarId}, + }, + solana_clock::Epoch, }; crate::declare_sysvar_id!("SysvarStakeHistory1111111111111111111111111", StakeHistory); diff --git a/sdk/program/src/vote/authorized_voters.rs b/sdk/program/src/vote/authorized_voters.rs index 773dfa70c690de..69f1a8b4828b1f 100644 --- a/sdk/program/src/vote/authorized_voters.rs +++ b/sdk/program/src/vote/authorized_voters.rs @@ -1,8 +1,9 @@ #[cfg(test)] use arbitrary::Arbitrary; use { - crate::{clock::Epoch, pubkey::Pubkey}, + crate::pubkey::Pubkey, serde_derive::{Deserialize, Serialize}, + solana_clock::Epoch, std::collections::BTreeMap, }; diff --git a/sdk/program/src/vote/instruction.rs b/sdk/program/src/vote/instruction.rs index c4369dd26d8080..e707c9e06d05bd 100644 --- a/sdk/program/src/vote/instruction.rs +++ b/sdk/program/src/vote/instruction.rs @@ -3,7 +3,6 @@ use { super::state::TowerSync, crate::{ - clock::{Slot, UnixTimestamp}, hash::Hash, instruction::{AccountMeta, Instruction}, pubkey::Pubkey, @@ -18,6 +17,7 @@ use { }, }, serde_derive::{Deserialize, Serialize}, + solana_clock::{Slot, UnixTimestamp}, }; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 8d1efa7468db5f..9944f5517660f2 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -9,7 +9,6 @@ use { }; use { crate::{ - clock::{Epoch, Slot, UnixTimestamp}, hash::Hash, instruction::InstructionError, pubkey::Pubkey, @@ -20,6 +19,7 @@ use { }, bincode::{serialize_into, ErrorKind}, serde_derive::{Deserialize, Serialize}, + solana_clock::{Epoch, Slot, UnixTimestamp}, std::{ collections::VecDeque, fmt::Debug, @@ -978,11 +978,7 @@ impl VoteState { pub mod serde_compact_vote_state_update { use { super::*, - crate::{ - clock::{Slot, UnixTimestamp}, - serde_varint, - vote::state::Lockout, - }, + crate::{serde_varint, vote::state::Lockout}, serde::{Deserialize, Deserializer, Serialize, Serializer}, solana_short_vec as short_vec, }; @@ -1076,11 +1072,7 @@ pub mod serde_compact_vote_state_update { pub mod serde_tower_sync { use { super::*, - crate::{ - clock::{Slot, UnixTimestamp}, - serde_varint, - vote::state::Lockout, - }, + crate::{serde_varint, vote::state::Lockout}, serde::{Deserialize, Deserializer, Serialize, Serializer}, solana_short_vec as short_vec, }; From 7c9bbc47c2cd84fb703f19a57325d74782c82765 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Wed, 4 Sep 2024 09:01:27 -0500 Subject: [PATCH 288/529] Add early exit to add-team-to-ghsa (#2829) Add early exit to workflow if there are no teams that don't already have the team --- .github/scripts/add-team-to-ghsa.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/scripts/add-team-to-ghsa.sh b/.github/scripts/add-team-to-ghsa.sh index 41c1a787e85044..636888b02b7f02 100755 --- a/.github/scripts/add-team-to-ghsa.sh +++ b/.github/scripts/add-team-to-ghsa.sh @@ -15,6 +15,10 @@ ghsa_json=$(gh api \ # Get a list of GHSAs that don't have the $team_to_add_slug in collaborating_teams ghsa_without_team=$( jq -r '[ .[] | select(all(.collaborating_teams.[]; .slug != "'"$team_to_add_slug"'")) | .ghsa_id ] | sort | .[] ' <<< "$ghsa_json" ) +if [[ -z $ghsa_without_team ]]; then + echo "All GHSAs already have $team_to_add_slug. Exiting..." + exit 0 +fi # Iterate through the teams while IFS= read -r ghsa_id; do From f0179722340b45e11244d6921d4e54f3038c79ea Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 4 Sep 2024 10:12:15 -0500 Subject: [PATCH 289/529] add ancient.total_alive_bytes metric (#2828) * add ancient.total_alive_bytes metric * add to report --- accounts-db/src/accounts_db.rs | 6 ++++++ accounts-db/src/ancient_append_vecs.rs | 9 ++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index aa98b65e3aba72..7e51234ea93a8c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1990,6 +1990,7 @@ pub(crate) struct ShrinkAncientStats { pub(crate) many_refs_old_alive: AtomicU64, pub(crate) slots_eligible_to_shrink: AtomicU64, pub(crate) total_dead_bytes: AtomicU64, + pub(crate) total_alive_bytes: AtomicU64, } #[derive(Debug, Default)] @@ -2320,6 +2321,11 @@ impl ShrinkAncientStats { self.total_dead_bytes.swap(0, Ordering::Relaxed), i64 ), + ( + "total_alive_bytes", + self.total_alive_bytes.swap(0, Ordering::Relaxed), + i64 + ), ( "slots_considered", self.slots_considered.swap(0, Ordering::Relaxed) as i64, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 24ec415b792a3f..95d45bfc93d573 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -584,11 +584,15 @@ impl AccountsDb { } } let mut total_dead_bytes = 0; + let mut total_alive_bytes = 0; let should_shrink_count = infos .all_infos .iter() .filter(|info| info.should_shrink) - .map(|info| total_dead_bytes += info.capacity.saturating_sub(info.alive_bytes)) + .map(|info| { + total_dead_bytes += info.capacity.saturating_sub(info.alive_bytes); + total_alive_bytes += info.alive_bytes; + }) .count() .saturating_sub(randoms as usize); self.shrink_ancient_stats @@ -597,6 +601,9 @@ impl AccountsDb { self.shrink_ancient_stats .total_dead_bytes .fetch_add(total_dead_bytes, Ordering::Relaxed); + self.shrink_ancient_stats + .total_alive_bytes + .fetch_add(total_alive_bytes, Ordering::Relaxed); if randoms > 0 { self.shrink_ancient_stats .random_shrink From efd47046c1bb9bb027757ddabe408315bc7865cc Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 4 Sep 2024 19:46:22 +0400 Subject: [PATCH 290/529] Extract serde-varint crate (#2053) * move serde_varint.rs to its own crate * update serde_varint dependents * re-export serde_varint crate with deprecation notice in sdk and program * missing dep * fix test deps * update lock file after rebase * fmt after rebase * fix deps and imports after rebase * fmt * mention solana in the crate description * update lock file --- Cargo.lock | 15 +++++++++++++ Cargo.toml | 2 ++ gossip/Cargo.toml | 1 + gossip/src/contact_info.rs | 3 +-- gossip/src/restart_crds_values.rs | 3 ++- programs/sbf/Cargo.lock | 11 ++++++++++ sdk/Cargo.toml | 1 + sdk/program/Cargo.toml | 1 + sdk/program/src/lib.rs | 3 ++- sdk/program/src/vote/state/mod.rs | 8 +++---- sdk/serde-varint/Cargo.toml | 22 +++++++++++++++++++ .../src/lib.rs} | 6 ++++- sdk/src/lib.rs | 6 +++-- version/Cargo.toml | 1 + version/src/lib.rs | 2 +- 15 files changed, 73 insertions(+), 12 deletions(-) create mode 100644 sdk/serde-varint/Cargo.toml rename sdk/{program/src/serde_varint.rs => serde-varint/src/lib.rs} (98%) diff --git a/Cargo.lock b/Cargo.lock index 636378ac3db666..05283f66d31d42 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6560,6 +6560,7 @@ dependencies = [ "solana-runtime", "solana-sanitize", "solana-sdk", + "solana-serde-varint", "solana-short-vec", "solana-streamer", "solana-tpu-client", @@ -7017,6 +7018,7 @@ dependencies = [ "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", + "solana-serde-varint", "solana-short-vec", "static_assertions", "test-case", @@ -7509,6 +7511,7 @@ dependencies = [ "solana-sdk", "solana-sdk-macro", "solana-secp256k1-recover", + "solana-serde-varint", "solana-short-vec", "static_assertions", "thiserror", @@ -7563,6 +7566,17 @@ dependencies = [ "solana-tpu-client", ] +[[package]] +name = "solana-serde-varint" +version = "2.1.0" +dependencies = [ + "bincode", + "rand 0.8.5", + "serde", + "serde_derive", + "solana-short-vec", +] + [[package]] name = "solana-short-vec" version = "2.1.0" @@ -8119,6 +8133,7 @@ dependencies = [ "solana-frozen-abi-macro", "solana-sanitize", "solana-sdk", + "solana-serde-varint", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 9ca4a0cdc98298..3859d377714dff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -111,6 +111,7 @@ members = [ "sdk/package-metadata-macro", "sdk/program", "sdk/program-memory", + "sdk/serde-varint", "send-transaction-service", "short-vec", "stake-accounts", @@ -416,6 +417,7 @@ solana-quic-client = { path = "quic-client", version = "=2.1.0" } solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.1.0" } solana-remote-wallet = { path = "remote-wallet", version = "=2.1.0", default-features = false } solana-sanitize = { path = "sanitize", version = "=2.1.0" } +solana-serde-varint = { path = "sdk/serde-varint", version = "=2.1.0" } solana-timings = { path = "timings", version = "=2.1.0" } solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=2.1.0" } solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=2.1.0" } diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index db46eb8ea1c974..510fb9e75e9be7 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -45,6 +45,7 @@ solana-rpc-client = { workspace = true } solana-runtime = { workspace = true } solana-sanitize = { workspace = true } solana-sdk = { workspace = true } +solana-serde-varint = { workspace = true } solana-short-vec = { workspace = true } solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index b745db31f43692..3b05a0fd5fbc81 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -8,9 +8,8 @@ use { pubkey::Pubkey, quic::QUIC_PORT_OFFSET, rpc_port::{DEFAULT_RPC_PORT, DEFAULT_RPC_PUBSUB_PORT}, - serde_varint, }, - solana_short_vec as short_vec, + solana_serde_varint as serde_varint, solana_short_vec as short_vec, solana_streamer::socket::SocketAddrSpace, static_assertions::const_assert_eq, std::{ diff --git a/gossip/src/restart_crds_values.rs b/gossip/src/restart_crds_values.rs index 537ae7a3baff6f..5da66eb46bf007 100644 --- a/gossip/src/restart_crds_values.rs +++ b/gossip/src/restart_crds_values.rs @@ -4,7 +4,8 @@ use { itertools::Itertools, rand::Rng, solana_sanitize::{Sanitize, SanitizeError}, - solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey, serde_varint}, + solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}, + solana_serde_varint as serde_varint, thiserror::Error, }; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 458f977bc76e7a..fc502d752ce1ce 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5152,6 +5152,7 @@ dependencies = [ "solana-runtime", "solana-sanitize", "solana-sdk", + "solana-serde-varint", "solana-short-vec", "solana-streamer", "solana-tpu-client", @@ -5428,6 +5429,7 @@ dependencies = [ "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", + "solana-serde-varint", "solana-short-vec", "thiserror", "wasm-bindgen", @@ -6306,6 +6308,7 @@ dependencies = [ "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", + "solana-serde-varint", "solana-short-vec", "thiserror", "uriparse", @@ -6353,6 +6356,13 @@ dependencies = [ "solana-tpu-client", ] +[[package]] +name = "solana-serde-varint" +version = "2.1.0" +dependencies = [ + "serde", +] + [[package]] name = "solana-short-vec" version = "2.1.0" @@ -6719,6 +6729,7 @@ dependencies = [ "serde_derive", "solana-sanitize", "solana-sdk", + "solana-serde-varint", ] [[package]] diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 2e817063aae792..5427e5dedb93f8 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -93,6 +93,7 @@ solana-program-memory = { workspace = true } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } +solana-serde-varint = { workspace = true } solana-short-vec = { workspace = true } thiserror = { workspace = true } uriparse = { workspace = true } diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index b711393274ef97..3ab08562a96027 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -41,6 +41,7 @@ solana-program-memory = { workspace = true } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } +solana-serde-varint = { workspace = true } solana-short-vec = { workspace = true } thiserror = { workspace = true } diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index de4c685fdfbfba..9cf1fc64083e50 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -515,7 +515,6 @@ pub mod program_utils; pub mod pubkey; pub mod rent; pub mod secp256k1_program; -pub mod serde_varint; pub mod serialize_utils; pub mod slot_hashes; pub mod slot_history; @@ -535,6 +534,8 @@ pub use solana_program_memory as program_memory; pub use solana_sanitize as sanitize; #[deprecated(since = "2.1.0", note = "Use `solana-secp256k1-recover` crate instead")] pub use solana_secp256k1_recover as secp256k1_recover; +#[deprecated(since = "2.1.0", note = "Use `solana-serde-varint` crate instead")] +pub use solana_serde_varint as serde_varint; #[deprecated(since = "2.1.0", note = "Use `solana-short-vec` crate instead")] pub use solana_short_vec as short_vec; #[cfg(target_arch = "wasm32")] diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 9944f5517660f2..7a42ad8d066243 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -978,9 +978,9 @@ impl VoteState { pub mod serde_compact_vote_state_update { use { super::*, - crate::{serde_varint, vote::state::Lockout}, + crate::vote::state::Lockout, serde::{Deserialize, Deserializer, Serialize, Serializer}, - solana_short_vec as short_vec, + solana_serde_varint as serde_varint, solana_short_vec as short_vec, }; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -1072,9 +1072,9 @@ pub mod serde_compact_vote_state_update { pub mod serde_tower_sync { use { super::*, - crate::{serde_varint, vote::state::Lockout}, + crate::vote::state::Lockout, serde::{Deserialize, Deserializer, Serialize, Serializer}, - solana_short_vec as short_vec, + solana_serde_varint as serde_varint, solana_short_vec as short_vec, }; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] diff --git a/sdk/serde-varint/Cargo.toml b/sdk/serde-varint/Cargo.toml new file mode 100644 index 00000000000000..e59d898aac3e05 --- /dev/null +++ b/sdk/serde-varint/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "solana-serde-varint" +description = "Solana definitions for integers that serialize to variable size" +documentation = "https://docs.rs/solana-serde-varint" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +serde = { workspace = true } + +[dev-dependencies] +bincode = { workspace = true } +rand = { workspace = true } +serde_derive = { workspace = true } +solana-short-vec = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/src/serde_varint.rs b/sdk/serde-varint/src/lib.rs similarity index 98% rename from sdk/program/src/serde_varint.rs rename to sdk/serde-varint/src/lib.rs index 7df84540a5f12a..dd9d90b712f597 100644 --- a/sdk/program/src/serde_varint.rs +++ b/sdk/serde-varint/src/lib.rs @@ -120,7 +120,11 @@ impl_var_int!(u64); #[cfg(test)] mod tests { - use {rand::Rng, solana_short_vec::ShortU16}; + use { + rand::Rng, + serde_derive::{Deserialize, Serialize}, + solana_short_vec::ShortU16, + }; #[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] struct Dummy { diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index c021789b507da8..fe5176f0f6947b 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -52,8 +52,8 @@ pub use solana_program::{ epoch_rewards, epoch_schedule, fee_calculator, impl_sysvar_get, incinerator, instruction, keccak, lamports, loader_instruction, loader_upgradeable_instruction, loader_v4, loader_v4_instruction, message, msg, native_token, nonce, program, program_error, - program_option, program_pack, rent, secp256k1_program, serde_varint, serialize_utils, - slot_hashes, slot_history, stable_layout, stake, stake_history, syscalls, system_instruction, + program_option, program_pack, rent, secp256k1_program, serialize_utils, slot_hashes, + slot_history, stable_layout, stake, stake_history, syscalls, system_instruction, system_program, sysvar, unchecked_div_by_const, vote, }; #[cfg(feature = "borsh")] @@ -161,6 +161,8 @@ pub use solana_sdk_macro::pubkey; pub use solana_sdk_macro::pubkeys; #[deprecated(since = "2.1.0", note = "Use `solana-secp256k1-recover` crate instead")] pub use solana_secp256k1_recover as secp256k1_recover; +#[deprecated(since = "2.1.0", note = "Use `solana-serde-varint` crate instead")] +pub use solana_serde_varint as serde_varint; #[deprecated(since = "2.1.0", note = "Use `solana-short-vec` crate instead")] pub use solana_short_vec as short_vec; diff --git a/version/Cargo.toml b/version/Cargo.toml index 81f37b111b300a..e2f26ddfa760e7 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -18,6 +18,7 @@ solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-sanitize = { workspace = true } solana-sdk = { workspace = true } +solana-serde-varint = { workspace = true } [features] dummy-for-ci-check = [] diff --git a/version/src/lib.rs b/version/src/lib.rs index a6f8e13b8adae8..5c6443f80c6d96 100644 --- a/version/src/lib.rs +++ b/version/src/lib.rs @@ -5,7 +5,7 @@ pub use self::legacy::{LegacyVersion1, LegacyVersion2}; use { serde_derive::{Deserialize, Serialize}, solana_sanitize::Sanitize, - solana_sdk::serde_varint, + solana_serde_varint as serde_varint, std::{convert::TryInto, fmt}, }; #[cfg_attr(feature = "frozen-abi", macro_use)] From 546e4d65b86af55f7f44843a4cb06aecf6763fad Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 4 Sep 2024 10:51:19 -0500 Subject: [PATCH 291/529] transaction_view::*Meta rename to *Frame (#2830) --- .../src/compute_budget_program_id_filter.rs | 2 +- runtime/src/bank/tests.rs | 2 +- ..._meta.rs => address_table_lookup_frame.rs} | 40 ++-- ...ructions_meta.rs => instructions_frame.rs} | 28 +-- transaction-view/src/lib.rs | 12 +- ...header_meta.rs => message_header_frame.rs} | 16 +- .../{signature_meta.rs => signature_frame.rs} | 30 +-- ...s_meta.rs => static_account_keys_frame.rs} | 24 +-- ...ansaction_meta.rs => transaction_frame.rs} | 172 +++++++++--------- transaction-view/src/transaction_view.rs | 60 +++--- 10 files changed, 193 insertions(+), 193 deletions(-) rename transaction-view/src/{address_table_lookup_meta.rs => address_table_lookup_frame.rs} (90%) rename transaction-view/src/{instructions_meta.rs => instructions_frame.rs} (89%) rename transaction-view/src/{message_header_meta.rs => message_header_frame.rs} (87%) rename transaction-view/src/{signature_meta.rs => signature_frame.rs} (80%) rename transaction-view/src/{static_account_keys_meta.rs => static_account_keys_frame.rs} (80%) rename transaction-view/src/{transaction_meta.rs => transaction_frame.rs} (79%) diff --git a/runtime-transaction/src/compute_budget_program_id_filter.rs b/runtime-transaction/src/compute_budget_program_id_filter.rs index b89b67113de105..87c12784222f90 100644 --- a/runtime-transaction/src/compute_budget_program_id_filter.rs +++ b/runtime-transaction/src/compute_budget_program_id_filter.rs @@ -1,6 +1,6 @@ // static account keys has max use { - agave_transaction_view::static_account_keys_meta::MAX_STATIC_ACCOUNTS_PER_PACKET as FILTER_SIZE, + agave_transaction_view::static_account_keys_frame::MAX_STATIC_ACCOUNTS_PER_PACKET as FILTER_SIZE, solana_builtins_default_costs::MAYBE_BUILTIN_KEY, solana_sdk::pubkey::Pubkey, }; diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 184c3ec9a55033..56612762743f97 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -16,7 +16,7 @@ use { snapshot_bank_utils, snapshot_utils, status_cache::MAX_CACHE_ENTRIES, }, - agave_transaction_view::static_account_keys_meta::MAX_STATIC_ACCOUNTS_PER_PACKET, + agave_transaction_view::static_account_keys_frame::MAX_STATIC_ACCOUNTS_PER_PACKET, assert_matches::assert_matches, crossbeam_channel::{bounded, unbounded}, itertools::Itertools, diff --git a/transaction-view/src/address_table_lookup_meta.rs b/transaction-view/src/address_table_lookup_frame.rs similarity index 90% rename from transaction-view/src/address_table_lookup_meta.rs rename to transaction-view/src/address_table_lookup_frame.rs index 297ea71c245767..32730c6f10fd03 100644 --- a/transaction-view/src/address_table_lookup_meta.rs +++ b/transaction-view/src/address_table_lookup_frame.rs @@ -46,7 +46,7 @@ const MAX_ATLS_PER_PACKET: u8 = ((PACKET_DATA_SIZE - MIN_SIZED_PACKET_WITH_ATLS) / MIN_SIZED_ATL) as u8; /// Contains metadata about the address table lookups in a transaction packet. -pub(crate) struct AddressTableLookupMeta { +pub(crate) struct AddressTableLookupFrame { /// The number of address table lookups in the transaction. pub(crate) num_address_table_lookups: u8, /// The offset to the first address table lookup in the transaction. @@ -57,7 +57,7 @@ pub(crate) struct AddressTableLookupMeta { pub(crate) total_readonly_lookup_accounts: u16, } -impl AddressTableLookupMeta { +impl AddressTableLookupFrame { /// Get the number of address table lookups (ATL) and offset to the first. /// The offset will be updated to point to the first byte after the last /// ATL. @@ -211,12 +211,12 @@ mod tests { fn test_zero_atls() { let bytes = bincode::serialize(&ShortVec::(vec![])).unwrap(); let mut offset = 0; - let meta = AddressTableLookupMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_address_table_lookups, 0); - assert_eq!(meta.offset, 1); + let frame = AddressTableLookupFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(frame.num_address_table_lookups, 0); + assert_eq!(frame.offset, 1); assert_eq!(offset, bytes.len()); - assert_eq!(meta.total_writable_lookup_accounts, 0); - assert_eq!(meta.total_readonly_lookup_accounts, 0); + assert_eq!(frame.total_writable_lookup_accounts, 0); + assert_eq!(frame.total_readonly_lookup_accounts, 0); } #[test] @@ -225,7 +225,7 @@ mod tests { let mut offset = 0; // modify the number of atls to be too high bytes[0] = 5; - assert!(AddressTableLookupMeta::try_new(&bytes, &mut offset).is_err()); + assert!(AddressTableLookupFrame::try_new(&bytes, &mut offset).is_err()); } #[test] @@ -239,12 +239,12 @@ mod tests { ])) .unwrap(); let mut offset = 0; - let meta = AddressTableLookupMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_address_table_lookups, 1); - assert_eq!(meta.offset, 1); + let frame = AddressTableLookupFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(frame.num_address_table_lookups, 1); + assert_eq!(frame.offset, 1); assert_eq!(offset, bytes.len()); - assert_eq!(meta.total_writable_lookup_accounts, 3); - assert_eq!(meta.total_readonly_lookup_accounts, 3); + assert_eq!(frame.total_writable_lookup_accounts, 3); + assert_eq!(frame.total_readonly_lookup_accounts, 3); } #[test] @@ -263,12 +263,12 @@ mod tests { ])) .unwrap(); let mut offset = 0; - let meta = AddressTableLookupMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_address_table_lookups, 2); - assert_eq!(meta.offset, 1); + let frame = AddressTableLookupFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(frame.num_address_table_lookups, 2); + assert_eq!(frame.offset, 1); assert_eq!(offset, bytes.len()); - assert_eq!(meta.total_writable_lookup_accounts, 6); - assert_eq!(meta.total_readonly_lookup_accounts, 5); + assert_eq!(frame.total_writable_lookup_accounts, 6); + assert_eq!(frame.total_readonly_lookup_accounts, 5); } #[test] @@ -284,7 +284,7 @@ mod tests { bytes[33] = 127; let mut offset = 0; - assert!(AddressTableLookupMeta::try_new(&bytes, &mut offset).is_err()); + assert!(AddressTableLookupFrame::try_new(&bytes, &mut offset).is_err()); } #[test] @@ -300,6 +300,6 @@ mod tests { bytes[37] = 127; let mut offset = 0; - assert!(AddressTableLookupMeta::try_new(&bytes, &mut offset).is_err()); + assert!(AddressTableLookupFrame::try_new(&bytes, &mut offset).is_err()); } } diff --git a/transaction-view/src/instructions_meta.rs b/transaction-view/src/instructions_frame.rs similarity index 89% rename from transaction-view/src/instructions_meta.rs rename to transaction-view/src/instructions_frame.rs index 42a0bdd9825b98..a908cba82aff5a 100644 --- a/transaction-view/src/instructions_meta.rs +++ b/transaction-view/src/instructions_frame.rs @@ -11,14 +11,14 @@ use { /// Contains metadata about the instructions in a transaction packet. #[derive(Default)] -pub(crate) struct InstructionsMeta { +pub(crate) struct InstructionsFrame { /// The number of instructions in the transaction. pub(crate) num_instructions: u16, /// The offset to the first instruction in the transaction. pub(crate) offset: u16, } -impl InstructionsMeta { +impl InstructionsFrame { /// Get the number of instructions and offset to the first instruction. /// The offset will be updated to point to the first byte after the last /// instruction. @@ -149,10 +149,10 @@ mod tests { fn test_zero_instructions() { let bytes = bincode::serialize(&ShortVec(Vec::::new())).unwrap(); let mut offset = 0; - let instructions_meta = InstructionsMeta::try_new(&bytes, &mut offset).unwrap(); + let instructions_frame = InstructionsFrame::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(instructions_meta.num_instructions, 0); - assert_eq!(instructions_meta.offset, 1); + assert_eq!(instructions_frame.num_instructions, 0); + assert_eq!(instructions_frame.offset, 1); assert_eq!(offset, bytes.len()); } @@ -167,7 +167,7 @@ mod tests { // modify the number of instructions to be too high bytes[0] = 0x02; let mut offset = 0; - assert!(InstructionsMeta::try_new(&bytes, &mut offset).is_err()); + assert!(InstructionsFrame::try_new(&bytes, &mut offset).is_err()); } #[test] @@ -179,9 +179,9 @@ mod tests { }])) .unwrap(); let mut offset = 0; - let instructions_meta = InstructionsMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(instructions_meta.num_instructions, 1); - assert_eq!(instructions_meta.offset, 1); + let instructions_frame = InstructionsFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(instructions_frame.num_instructions, 1); + assert_eq!(instructions_frame.offset, 1); assert_eq!(offset, bytes.len()); } @@ -201,9 +201,9 @@ mod tests { ])) .unwrap(); let mut offset = 0; - let instructions_meta = InstructionsMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(instructions_meta.num_instructions, 2); - assert_eq!(instructions_meta.offset, 1); + let instructions_frame = InstructionsFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(instructions_frame.num_instructions, 2); + assert_eq!(instructions_frame.offset, 1); assert_eq!(offset, bytes.len()); } @@ -220,7 +220,7 @@ mod tests { bytes[2] = 127; let mut offset = 0; - assert!(InstructionsMeta::try_new(&bytes, &mut offset).is_err()); + assert!(InstructionsFrame::try_new(&bytes, &mut offset).is_err()); } #[test] @@ -236,6 +236,6 @@ mod tests { bytes[6] = 127; let mut offset = 0; - assert!(InstructionsMeta::try_new(&bytes, &mut offset).is_err()); + assert!(InstructionsFrame::try_new(&bytes, &mut offset).is_err()); } } diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index 4058c88fa83034..047514c6e70ffc 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -4,13 +4,13 @@ pub mod bytes; #[cfg(not(feature = "dev-context-only-utils"))] mod bytes; -mod address_table_lookup_meta; -mod instructions_meta; -mod message_header_meta; +mod address_table_lookup_frame; +mod instructions_frame; +mod message_header_frame; pub mod result; mod sanitize; -mod signature_meta; -pub mod static_account_keys_meta; +mod signature_frame; +pub mod static_account_keys_frame; pub mod transaction_data; -mod transaction_meta; +mod transaction_frame; pub mod transaction_view; diff --git a/transaction-view/src/message_header_meta.rs b/transaction-view/src/message_header_frame.rs similarity index 87% rename from transaction-view/src/message_header_meta.rs rename to transaction-view/src/message_header_frame.rs index b9f40e3cb7ef11..435e58c48404d6 100644 --- a/transaction-view/src/message_header_meta.rs +++ b/transaction-view/src/message_header_frame.rs @@ -15,8 +15,8 @@ pub enum TransactionVersion { V0 = 0, } -/// Meta data for accessing message header fields in a transaction view. -pub(crate) struct MessageHeaderMeta { +/// Metadata for accessing message header fields in a transaction view. +pub(crate) struct MessageHeaderFrame { /// The offset to the first byte of the message in the transaction packet. pub(crate) offset: u16, /// The version of the transaction. @@ -32,7 +32,7 @@ pub(crate) struct MessageHeaderMeta { pub(crate) num_readonly_unsigned_accounts: u8, } -impl MessageHeaderMeta { +impl MessageHeaderFrame { #[inline(always)] pub(crate) fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Get the message offset. @@ -78,21 +78,21 @@ mod tests { fn test_invalid_version() { let bytes = [0b1000_0001]; let mut offset = 0; - assert!(MessageHeaderMeta::try_new(&bytes, &mut offset).is_err()); + assert!(MessageHeaderFrame::try_new(&bytes, &mut offset).is_err()); } #[test] fn test_legacy_transaction_missing_header_byte() { let bytes = [5, 0]; let mut offset = 0; - assert!(MessageHeaderMeta::try_new(&bytes, &mut offset).is_err()); + assert!(MessageHeaderFrame::try_new(&bytes, &mut offset).is_err()); } #[test] fn test_legacy_transaction_valid() { let bytes = [5, 1, 2]; let mut offset = 0; - let header = MessageHeaderMeta::try_new(&bytes, &mut offset).unwrap(); + let header = MessageHeaderFrame::try_new(&bytes, &mut offset).unwrap(); assert!(matches!(header.version, TransactionVersion::Legacy)); assert_eq!(header.num_required_signatures, 5); assert_eq!(header.num_readonly_signed_accounts, 1); @@ -103,14 +103,14 @@ mod tests { fn test_v0_transaction_missing_header_byte() { let bytes = [MESSAGE_VERSION_PREFIX, 5, 1]; let mut offset = 0; - assert!(MessageHeaderMeta::try_new(&bytes, &mut offset).is_err()); + assert!(MessageHeaderFrame::try_new(&bytes, &mut offset).is_err()); } #[test] fn test_v0_transaction_valid() { let bytes = [MESSAGE_VERSION_PREFIX, 5, 1, 2]; let mut offset = 0; - let header = MessageHeaderMeta::try_new(&bytes, &mut offset).unwrap(); + let header = MessageHeaderFrame::try_new(&bytes, &mut offset).unwrap(); assert!(matches!(header.version, TransactionVersion::V0)); assert_eq!(header.num_required_signatures, 5); assert_eq!(header.num_readonly_signed_accounts, 1); diff --git a/transaction-view/src/signature_meta.rs b/transaction-view/src/signature_frame.rs similarity index 80% rename from transaction-view/src/signature_meta.rs rename to transaction-view/src/signature_frame.rs index 227649483ccfe3..b8176e538e01b7 100644 --- a/transaction-view/src/signature_meta.rs +++ b/transaction-view/src/signature_frame.rs @@ -16,15 +16,15 @@ use { const MAX_SIGNATURES_PER_PACKET: u8 = (PACKET_DATA_SIZE / (core::mem::size_of::() + core::mem::size_of::())) as u8; -/// Meta data for accessing transaction-level signatures in a transaction view. -pub(crate) struct SignatureMeta { +/// Metadata for accessing transaction-level signatures in a transaction view. +pub(crate) struct SignatureFrame { /// The number of signatures in the transaction. pub(crate) num_signatures: u8, /// Offset to the first signature in the transaction packet. pub(crate) offset: u16, } -impl SignatureMeta { +impl SignatureFrame { /// Get the number of signatures and the offset to the first signature in /// the transaction packet, starting at the given `offset`. #[inline(always)] @@ -56,16 +56,16 @@ mod tests { fn test_zero_signatures() { let bytes = bincode::serialize(&ShortVec(Vec::::new())).unwrap(); let mut offset = 0; - assert!(SignatureMeta::try_new(&bytes, &mut offset).is_err()); + assert!(SignatureFrame::try_new(&bytes, &mut offset).is_err()); } #[test] fn test_one_signature() { let bytes = bincode::serialize(&ShortVec(vec![Signature::default()])).unwrap(); let mut offset = 0; - let meta = SignatureMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_signatures, 1); - assert_eq!(meta.offset, 1); + let frame = SignatureFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(frame.num_signatures, 1); + assert_eq!(frame.offset, 1); assert_eq!(offset, 1 + core::mem::size_of::()); } @@ -74,9 +74,9 @@ mod tests { let signatures = vec![Signature::default(); usize::from(MAX_SIGNATURES_PER_PACKET)]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; - let meta = SignatureMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_signatures, 12); - assert_eq!(meta.offset, 1); + let frame = SignatureFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(frame.num_signatures, 12); + assert_eq!(frame.offset, 1); assert_eq!(offset, 1 + 12 * core::mem::size_of::()); } @@ -85,9 +85,9 @@ mod tests { let mut bytes = bincode::serialize(&ShortVec(vec![Signature::default()])).unwrap(); bytes.insert(0, 0); // Insert a byte at the beginning of the packet. let mut offset = 1; // Start at the second byte. - let meta = SignatureMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_signatures, 1); - assert_eq!(meta.offset, 2); + let frame = SignatureFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(frame.num_signatures, 1); + assert_eq!(frame.offset, 2); assert_eq!(offset, 2 + core::mem::size_of::()); } @@ -96,7 +96,7 @@ mod tests { let signatures = vec![Signature::default(); usize::from(MAX_SIGNATURES_PER_PACKET) + 1]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; - assert!(SignatureMeta::try_new(&bytes, &mut offset).is_err()); + assert!(SignatureFrame::try_new(&bytes, &mut offset).is_err()); } #[test] @@ -104,6 +104,6 @@ mod tests { let signatures = vec![Signature::default(); u16::MAX as usize]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; - assert!(SignatureMeta::try_new(&bytes, &mut offset).is_err()); + assert!(SignatureFrame::try_new(&bytes, &mut offset).is_err()); } } diff --git a/transaction-view/src/static_account_keys_meta.rs b/transaction-view/src/static_account_keys_frame.rs similarity index 80% rename from transaction-view/src/static_account_keys_meta.rs rename to transaction-view/src/static_account_keys_frame.rs index 46bd6fb5babf5d..904c9905b0d68c 100644 --- a/transaction-view/src/static_account_keys_meta.rs +++ b/transaction-view/src/static_account_keys_frame.rs @@ -13,16 +13,16 @@ use { pub const MAX_STATIC_ACCOUNTS_PER_PACKET: u8 = (PACKET_DATA_SIZE / core::mem::size_of::()) as u8; -/// Contains meta-data about the static account keys in a transaction packet. +/// Contains metadata about the static account keys in a transaction packet. #[derive(Default)] -pub(crate) struct StaticAccountKeysMeta { +pub(crate) struct StaticAccountKeysFrame { /// The number of static accounts in the transaction. pub(crate) num_static_accounts: u8, /// The offset to the first static account in the transaction. pub(crate) offset: u16, } -impl StaticAccountKeysMeta { +impl StaticAccountKeysFrame { #[inline(always)] pub(crate) fn try_new(bytes: &[u8], offset: &mut usize) -> Result { // Max size must not have the MSB set so that it is size 1. @@ -55,16 +55,16 @@ mod tests { fn test_zero_accounts() { let bytes = bincode::serialize(&ShortVec(Vec::::new())).unwrap(); let mut offset = 0; - assert!(StaticAccountKeysMeta::try_new(&bytes, &mut offset).is_err()); + assert!(StaticAccountKeysFrame::try_new(&bytes, &mut offset).is_err()); } #[test] fn test_one_account() { let bytes = bincode::serialize(&ShortVec(vec![Pubkey::default()])).unwrap(); let mut offset = 0; - let meta = StaticAccountKeysMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_static_accounts, 1); - assert_eq!(meta.offset, 1); + let frame = StaticAccountKeysFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(frame.num_static_accounts, 1); + assert_eq!(frame.offset, 1); assert_eq!(offset, 1 + core::mem::size_of::()); } @@ -73,9 +73,9 @@ mod tests { let signatures = vec![Pubkey::default(); usize::from(MAX_STATIC_ACCOUNTS_PER_PACKET)]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; - let meta = StaticAccountKeysMeta::try_new(&bytes, &mut offset).unwrap(); - assert_eq!(meta.num_static_accounts, 38); - assert_eq!(meta.offset, 1); + let frame = StaticAccountKeysFrame::try_new(&bytes, &mut offset).unwrap(); + assert_eq!(frame.num_static_accounts, 38); + assert_eq!(frame.offset, 1); assert_eq!(offset, 1 + 38 * core::mem::size_of::()); } @@ -84,7 +84,7 @@ mod tests { let signatures = vec![Pubkey::default(); usize::from(MAX_STATIC_ACCOUNTS_PER_PACKET) + 1]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; - assert!(StaticAccountKeysMeta::try_new(&bytes, &mut offset).is_err()); + assert!(StaticAccountKeysFrame::try_new(&bytes, &mut offset).is_err()); } #[test] @@ -92,6 +92,6 @@ mod tests { let signatures = vec![Pubkey::default(); u16::MAX as usize]; let bytes = bincode::serialize(&ShortVec(signatures)).unwrap(); let mut offset = 0; - assert!(StaticAccountKeysMeta::try_new(&bytes, &mut offset).is_err()); + assert!(StaticAccountKeysFrame::try_new(&bytes, &mut offset).is_err()); } } diff --git a/transaction-view/src/transaction_meta.rs b/transaction-view/src/transaction_frame.rs similarity index 79% rename from transaction-view/src/transaction_meta.rs rename to transaction-view/src/transaction_frame.rs index 376ac6b2c08cb5..e556c14cb26a9d 100644 --- a/transaction-view/src/transaction_meta.rs +++ b/transaction-view/src/transaction_frame.rs @@ -1,39 +1,39 @@ use { crate::{ - address_table_lookup_meta::{AddressTableLookupIterator, AddressTableLookupMeta}, + address_table_lookup_frame::{AddressTableLookupFrame, AddressTableLookupIterator}, bytes::advance_offset_for_type, - instructions_meta::{InstructionsIterator, InstructionsMeta}, - message_header_meta::{MessageHeaderMeta, TransactionVersion}, + instructions_frame::{InstructionsFrame, InstructionsIterator}, + message_header_frame::{MessageHeaderFrame, TransactionVersion}, result::{Result, TransactionViewError}, - signature_meta::SignatureMeta, - static_account_keys_meta::StaticAccountKeysMeta, + signature_frame::SignatureFrame, + static_account_keys_frame::StaticAccountKeysFrame, }, solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Signature}, }; -pub(crate) struct TransactionMeta { - /// Signature metadata. - signature: SignatureMeta, - /// Message header metadata. - message_header: MessageHeaderMeta, - /// Static account keys metadata. - static_account_keys: StaticAccountKeysMeta, +pub(crate) struct TransactionFrame { + /// Signature framing data. + signature: SignatureFrame, + /// Message header framing data. + message_header: MessageHeaderFrame, + /// Static account keys framing data. + static_account_keys: StaticAccountKeysFrame, /// Recent blockhash offset. recent_blockhash_offset: u16, - /// Instructions metadata. - instructions: InstructionsMeta, - /// Address table lookup metadata. - address_table_lookup: AddressTableLookupMeta, + /// Instructions framing data. + instructions: InstructionsFrame, + /// Address table lookup framing data. + address_table_lookup: AddressTableLookupFrame, } -impl TransactionMeta { +impl TransactionFrame { /// Parse a serialized transaction and verify basic structure. /// The `bytes` parameter must have no trailing data. pub(crate) fn try_new(bytes: &[u8]) -> Result { let mut offset = 0; - let signature = SignatureMeta::try_new(bytes, &mut offset)?; - let message_header = MessageHeaderMeta::try_new(bytes, &mut offset)?; - let static_account_keys = StaticAccountKeysMeta::try_new(bytes, &mut offset)?; + let signature = SignatureFrame::try_new(bytes, &mut offset)?; + let message_header = MessageHeaderFrame::try_new(bytes, &mut offset)?; + let static_account_keys = StaticAccountKeysFrame::try_new(bytes, &mut offset)?; // The recent blockhash is the first account key after the static // account keys. The recent blockhash is always present in a valid @@ -41,15 +41,15 @@ impl TransactionMeta { let recent_blockhash_offset = offset as u16; advance_offset_for_type::(bytes, &mut offset)?; - let instructions = InstructionsMeta::try_new(bytes, &mut offset)?; + let instructions = InstructionsFrame::try_new(bytes, &mut offset)?; let address_table_lookup = match message_header.version { - TransactionVersion::Legacy => AddressTableLookupMeta { + TransactionVersion::Legacy => AddressTableLookupFrame { num_address_table_lookups: 0, offset: 0, total_writable_lookup_accounts: 0, total_readonly_lookup_accounts: 0, }, - TransactionVersion::V0 => AddressTableLookupMeta::try_new(bytes, &mut offset)?, + TransactionVersion::V0 => AddressTableLookupFrame::try_new(bytes, &mut offset)?, }; // Verify that the entire transaction was parsed. @@ -135,11 +135,11 @@ impl TransactionMeta { } // Separate implementation for `unsafe` accessor methods. -impl TransactionMeta { +impl TransactionFrame { /// Return the slice of signatures in the transaction. /// # Safety /// - This function must be called with the same `bytes` slice that was - /// used to create the `TransactionMeta` instance. + /// used to create the `TransactionFrame` instance. #[inline] pub(crate) unsafe fn signatures<'a>(&self, bytes: &'a [u8]) -> &'a [Signature] { // Verify at compile time there are no alignment constraints. @@ -152,10 +152,10 @@ impl TransactionMeta { assert!(u8::MAX as usize * core::mem::size_of::() <= isize::MAX as usize); // SAFETY: - // - If this `TransactionMeta` was created from `bytes`: + // - If this `TransactionFrame` was created from `bytes`: // - the pointer is valid for the range and is properly aligned. // - `num_signatures` has been verified against the bounds if - // `TransactionMeta` was created successfully. + // `TransactionFrame` was created successfully. // - `Signature` are just byte arrays; there is no possibility the // `Signature` are not initialized properly. // - The lifetime of the returned slice is the same as the input @@ -172,7 +172,7 @@ impl TransactionMeta { /// /// # Safety /// - This function must be called with the same `bytes` slice that was - /// used to create the `TransactionMeta` instance. + /// used to create the `TransactionFrame` instance. #[inline] pub(crate) unsafe fn static_account_keys<'a>(&self, bytes: &'a [u8]) -> &'a [Pubkey] { // Verify at compile time there are no alignment constraints. @@ -182,10 +182,10 @@ impl TransactionMeta { assert!(u8::MAX as usize * core::mem::size_of::() <= isize::MAX as usize); // SAFETY: - // - If this `TransactionMeta` was created from `bytes`: + // - If this `TransactionFrame` was created from `bytes`: // - the pointer is valid for the range and is properly aligned. // - `num_static_accounts` has been verified against the bounds if - // `TransactionMeta` was created successfully. + // `TransactionFrame` was created successfully. // - `Pubkey` are just byte arrays; there is no possibility the // `Pubkey` are not initialized properly. // - The lifetime of the returned slice is the same as the input @@ -203,7 +203,7 @@ impl TransactionMeta { /// Return the recent blockhash in the transaction. /// # Safety /// - This function must be called with the same `bytes` slice that was - /// used to create the `TransactionMeta` instance. + /// used to create the `TransactionFrame` instance. #[inline] pub(crate) unsafe fn recent_blockhash<'a>(&self, bytes: &'a [u8]) -> &'a Hash { // Verify at compile time there are no alignment constraints. @@ -223,7 +223,7 @@ impl TransactionMeta { /// Return an iterator over the instructions in the transaction. /// # Safety /// - This function must be called with the same `bytes` slice that was - /// used to create the `TransactionMeta` instance. + /// used to create the `TransactionFrame` instance. #[inline] pub(crate) unsafe fn instructions_iter<'a>(&self, bytes: &'a [u8]) -> InstructionsIterator<'a> { InstructionsIterator { @@ -237,7 +237,7 @@ impl TransactionMeta { /// Return an iterator over the address table lookups in the transaction. /// # Safety /// - This function must be called with the same `bytes` slice that was - /// used to create the `TransactionMeta` instance. + /// used to create the `TransactionFrame` instance. #[inline] pub(crate) unsafe fn address_table_lookup_iter<'a>( &self, @@ -266,36 +266,36 @@ mod tests { }, }; - fn verify_transaction_view_meta(tx: &VersionedTransaction) { + fn verify_transaction_view_frame(tx: &VersionedTransaction) { let bytes = bincode::serialize(tx).unwrap(); - let meta = TransactionMeta::try_new(&bytes).unwrap(); + let frame = TransactionFrame::try_new(&bytes).unwrap(); - assert_eq!(meta.signature.num_signatures, tx.signatures.len() as u8); - assert_eq!(meta.signature.offset as usize, 1); + assert_eq!(frame.signature.num_signatures, tx.signatures.len() as u8); + assert_eq!(frame.signature.offset as usize, 1); assert_eq!( - meta.message_header.num_required_signatures, + frame.message_header.num_required_signatures, tx.message.header().num_required_signatures ); assert_eq!( - meta.message_header.num_readonly_signed_accounts, + frame.message_header.num_readonly_signed_accounts, tx.message.header().num_readonly_signed_accounts ); assert_eq!( - meta.message_header.num_readonly_unsigned_accounts, + frame.message_header.num_readonly_unsigned_accounts, tx.message.header().num_readonly_unsigned_accounts ); assert_eq!( - meta.static_account_keys.num_static_accounts, + frame.static_account_keys.num_static_accounts, tx.message.static_account_keys().len() as u8 ); assert_eq!( - meta.instructions.num_instructions, + frame.instructions.num_instructions, tx.message.instructions().len() as u16 ); assert_eq!( - meta.address_table_lookup.num_address_table_lookups, + frame.address_table_lookup.num_address_table_lookups, tx.message .address_table_lookups() .map(|x| x.len() as u8) @@ -420,22 +420,22 @@ mod tests { #[test] fn test_minimal_sized_transaction() { - verify_transaction_view_meta(&minimally_sized_transaction()); + verify_transaction_view_frame(&minimally_sized_transaction()); } #[test] fn test_simple_transfer() { - verify_transaction_view_meta(&simple_transfer()); + verify_transaction_view_frame(&simple_transfer()); } #[test] fn test_simple_transfer_v0() { - verify_transaction_view_meta(&simple_transfer_v0()); + verify_transaction_view_frame(&simple_transfer_v0()); } #[test] fn test_v0_with_lookup() { - verify_transaction_view_meta(&v0_with_single_lookup()); + verify_transaction_view_frame(&v0_with_single_lookup()); } #[test] @@ -443,14 +443,14 @@ mod tests { let tx = simple_transfer(); let mut bytes = bincode::serialize(&tx).unwrap(); bytes.push(0); - assert!(TransactionMeta::try_new(&bytes).is_err()); + assert!(TransactionFrame::try_new(&bytes).is_err()); } #[test] fn test_insufficient_bytes() { let tx = simple_transfer(); let bytes = bincode::serialize(&tx).unwrap(); - assert!(TransactionMeta::try_new(&bytes[..bytes.len().wrapping_sub(1)]).is_err()); + assert!(TransactionFrame::try_new(&bytes[..bytes.len().wrapping_sub(1)]).is_err()); } #[test] @@ -461,7 +461,7 @@ mod tests { bytes[0] = 0xff; bytes[1] = 0xff; bytes[2] = 0xff; - assert!(TransactionMeta::try_new(&bytes).is_err()); + assert!(TransactionFrame::try_new(&bytes).is_err()); } #[test] @@ -473,7 +473,7 @@ mod tests { bytes[offset] = 0xff; bytes[offset + 1] = 0xff; bytes[offset + 2] = 0xff; - assert!(TransactionMeta::try_new(&bytes).is_err()); + assert!(TransactionFrame::try_new(&bytes).is_err()); } #[test] @@ -490,7 +490,7 @@ mod tests { bytes[offset] = 0xff; bytes[offset + 1] = 0xff; bytes[offset + 2] = 0xff; - assert!(TransactionMeta::try_new(&bytes).is_err()); + assert!(TransactionFrame::try_new(&bytes).is_err()); } #[test] @@ -513,33 +513,33 @@ mod tests { + 1 // byte for data length + ix_bytes; bytes[offset] = 0x01; - assert!(TransactionMeta::try_new(&bytes).is_err()); + assert!(TransactionFrame::try_new(&bytes).is_err()); } #[test] fn test_basic_accessors() { let tx = simple_transfer(); let bytes = bincode::serialize(&tx).unwrap(); - let meta = TransactionMeta::try_new(&bytes).unwrap(); - - assert_eq!(meta.num_signatures(), 1); - assert!(matches!(meta.version(), TransactionVersion::Legacy)); - assert_eq!(meta.num_required_signatures(), 1); - assert_eq!(meta.num_readonly_signed_accounts(), 0); - assert_eq!(meta.num_readonly_unsigned_accounts(), 1); - assert_eq!(meta.num_static_account_keys(), 3); - assert_eq!(meta.num_instructions(), 1); - assert_eq!(meta.num_address_table_lookups(), 0); - - // SAFETY: `bytes` is the same slice used to create `meta`. + let frame = TransactionFrame::try_new(&bytes).unwrap(); + + assert_eq!(frame.num_signatures(), 1); + assert!(matches!(frame.version(), TransactionVersion::Legacy)); + assert_eq!(frame.num_required_signatures(), 1); + assert_eq!(frame.num_readonly_signed_accounts(), 0); + assert_eq!(frame.num_readonly_unsigned_accounts(), 1); + assert_eq!(frame.num_static_account_keys(), 3); + assert_eq!(frame.num_instructions(), 1); + assert_eq!(frame.num_address_table_lookups(), 0); + + // SAFETY: `bytes` is the same slice used to create `frame`. unsafe { - let signatures = meta.signatures(&bytes); + let signatures = frame.signatures(&bytes); assert_eq!(signatures, &tx.signatures); - let static_account_keys = meta.static_account_keys(&bytes); + let static_account_keys = frame.static_account_keys(&bytes); assert_eq!(static_account_keys, tx.message.static_account_keys()); - let recent_blockhash = meta.recent_blockhash(&bytes); + let recent_blockhash = frame.recent_blockhash(&bytes); assert_eq!(recent_blockhash, tx.message.recent_blockhash()); } } @@ -548,11 +548,11 @@ mod tests { fn test_instructions_iter_empty() { let tx = minimally_sized_transaction(); let bytes = bincode::serialize(&tx).unwrap(); - let meta = TransactionMeta::try_new(&bytes).unwrap(); + let frame = TransactionFrame::try_new(&bytes).unwrap(); - // SAFETY: `bytes` is the same slice used to create `meta`. + // SAFETY: `bytes` is the same slice used to create `frame`. unsafe { - let mut iter = meta.instructions_iter(&bytes); + let mut iter = frame.instructions_iter(&bytes); assert!(iter.next().is_none()); } } @@ -561,11 +561,11 @@ mod tests { fn test_instructions_iter_single() { let tx = simple_transfer(); let bytes = bincode::serialize(&tx).unwrap(); - let meta = TransactionMeta::try_new(&bytes).unwrap(); + let frame = TransactionFrame::try_new(&bytes).unwrap(); - // SAFETY: `bytes` is the same slice used to create `meta`. + // SAFETY: `bytes` is the same slice used to create `frame`. unsafe { - let mut iter = meta.instructions_iter(&bytes); + let mut iter = frame.instructions_iter(&bytes); let ix = iter.next().unwrap(); assert_eq!(ix.program_id_index, 2); assert_eq!(ix.accounts, &[0, 1]); @@ -581,11 +581,11 @@ mod tests { fn test_instructions_iter_multiple() { let tx = multiple_transfers(); let bytes = bincode::serialize(&tx).unwrap(); - let meta = TransactionMeta::try_new(&bytes).unwrap(); + let frame = TransactionFrame::try_new(&bytes).unwrap(); - // SAFETY: `bytes` is the same slice used to create `meta`. + // SAFETY: `bytes` is the same slice used to create `frame`. unsafe { - let mut iter = meta.instructions_iter(&bytes); + let mut iter = frame.instructions_iter(&bytes); let ix = iter.next().unwrap(); assert_eq!(ix.program_id_index, 3); assert_eq!(ix.accounts, &[0, 1]); @@ -608,11 +608,11 @@ mod tests { fn test_address_table_lookup_iter_empty() { let tx = simple_transfer(); let bytes = bincode::serialize(&tx).unwrap(); - let meta = TransactionMeta::try_new(&bytes).unwrap(); + let frame = TransactionFrame::try_new(&bytes).unwrap(); - // SAFETY: `bytes` is the same slice used to create `meta`. + // SAFETY: `bytes` is the same slice used to create `frame`. unsafe { - let mut iter = meta.address_table_lookup_iter(&bytes); + let mut iter = frame.address_table_lookup_iter(&bytes); assert!(iter.next().is_none()); } } @@ -621,12 +621,12 @@ mod tests { fn test_address_table_lookup_iter_single() { let tx = v0_with_single_lookup(); let bytes = bincode::serialize(&tx).unwrap(); - let meta = TransactionMeta::try_new(&bytes).unwrap(); + let frame = TransactionFrame::try_new(&bytes).unwrap(); let atls_actual = tx.message.address_table_lookups().unwrap(); - // SAFETY: `bytes` is the same slice used to create `meta`. + // SAFETY: `bytes` is the same slice used to create `frame`. unsafe { - let mut iter = meta.address_table_lookup_iter(&bytes); + let mut iter = frame.address_table_lookup_iter(&bytes); let lookup = iter.next().unwrap(); assert_eq!(lookup.account_key, &atls_actual[0].account_key); assert_eq!(lookup.writable_indexes, atls_actual[0].writable_indexes); @@ -639,12 +639,12 @@ mod tests { fn test_address_table_lookup_iter_multiple() { let tx = v0_with_multiple_lookups(); let bytes = bincode::serialize(&tx).unwrap(); - let meta = TransactionMeta::try_new(&bytes).unwrap(); + let frame = TransactionFrame::try_new(&bytes).unwrap(); let atls_actual = tx.message.address_table_lookups().unwrap(); - // SAFETY: `bytes` is the same slice used to create `meta`. + // SAFETY: `bytes` is the same slice used to create `frame`. unsafe { - let mut iter = meta.address_table_lookup_iter(&bytes); + let mut iter = frame.address_table_lookup_iter(&bytes); let lookup = iter.next().unwrap(); assert_eq!(lookup.account_key, &atls_actual[0].account_key); diff --git a/transaction-view/src/transaction_view.rs b/transaction-view/src/transaction_view.rs index 0c9a9b49063a5f..b869a4fecb94a5 100644 --- a/transaction-view/src/transaction_view.rs +++ b/transaction-view/src/transaction_view.rs @@ -1,9 +1,9 @@ use { crate::{ - address_table_lookup_meta::AddressTableLookupIterator, - instructions_meta::InstructionsIterator, message_header_meta::TransactionVersion, + address_table_lookup_frame::AddressTableLookupIterator, + instructions_frame::InstructionsIterator, message_header_frame::TransactionVersion, result::Result, sanitize::sanitize, transaction_data::TransactionData, - transaction_meta::TransactionMeta, + transaction_frame::TransactionFrame, }, solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Signature}, }; @@ -21,14 +21,14 @@ pub type SanitizedTransactionView = TransactionView; /// so that different containers for the serialized transaction can be used. pub struct TransactionView { data: D, - meta: TransactionMeta, + frame: TransactionFrame, } impl TransactionView { /// Creates a new `TransactionView` without running sanitization checks. pub fn try_new_unsanitized(data: D) -> Result { - let meta = TransactionMeta::try_new(data.data())?; - Ok(Self { data, meta }) + let frame = TransactionFrame::try_new(data.data())?; + Ok(Self { data, frame }) } /// Sanitizes the transaction view, returning a sanitized view on success. @@ -36,7 +36,7 @@ impl TransactionView { sanitize(&self)?; Ok(SanitizedTransactionView { data: self.data, - meta: self.meta, + frame: self.frame, }) } } @@ -53,101 +53,101 @@ impl TransactionView { /// Return the number of signatures in the transaction. #[inline] pub fn num_signatures(&self) -> u8 { - self.meta.num_signatures() + self.frame.num_signatures() } /// Return the version of the transaction. #[inline] pub fn version(&self) -> TransactionVersion { - self.meta.version() + self.frame.version() } /// Return the number of required signatures in the transaction. #[inline] pub fn num_required_signatures(&self) -> u8 { - self.meta.num_required_signatures() + self.frame.num_required_signatures() } /// Return the number of readonly signed accounts in the transaction. #[inline] pub fn num_readonly_signed_accounts(&self) -> u8 { - self.meta.num_readonly_signed_accounts() + self.frame.num_readonly_signed_accounts() } /// Return the number of readonly unsigned accounts in the transaction. #[inline] pub fn num_readonly_unsigned_accounts(&self) -> u8 { - self.meta.num_readonly_unsigned_accounts() + self.frame.num_readonly_unsigned_accounts() } /// Return the number of static account keys in the transaction. #[inline] pub fn num_static_account_keys(&self) -> u8 { - self.meta.num_static_account_keys() + self.frame.num_static_account_keys() } /// Return the number of instructions in the transaction. #[inline] pub fn num_instructions(&self) -> u16 { - self.meta.num_instructions() + self.frame.num_instructions() } /// Return the number of address table lookups in the transaction. #[inline] pub fn num_address_table_lookups(&self) -> u8 { - self.meta.num_address_table_lookups() + self.frame.num_address_table_lookups() } /// Return the number of writable lookup accounts in the transaction. #[inline] pub fn total_writable_lookup_accounts(&self) -> u16 { - self.meta.total_writable_lookup_accounts() + self.frame.total_writable_lookup_accounts() } /// Return the number of readonly lookup accounts in the transaction. #[inline] pub fn total_readonly_lookup_accounts(&self) -> u16 { - self.meta.total_readonly_lookup_accounts() + self.frame.total_readonly_lookup_accounts() } /// Return the slice of signatures in the transaction. #[inline] pub fn signatures(&self) -> &[Signature] { let data = self.data(); - // SAFETY: `meta` was created from `data`. - unsafe { self.meta.signatures(data) } + // SAFETY: `frame` was created from `data`. + unsafe { self.frame.signatures(data) } } /// Return the slice of static account keys in the transaction. #[inline] pub fn static_account_keys(&self) -> &[Pubkey] { let data = self.data(); - // SAFETY: `meta` was created from `data`. - unsafe { self.meta.static_account_keys(data) } + // SAFETY: `frame` was created from `data`. + unsafe { self.frame.static_account_keys(data) } } /// Return the recent blockhash in the transaction. #[inline] pub fn recent_blockhash(&self) -> &Hash { let data = self.data(); - // SAFETY: `meta` was created from `data`. - unsafe { self.meta.recent_blockhash(data) } + // SAFETY: `frame` was created from `data`. + unsafe { self.frame.recent_blockhash(data) } } /// Return an iterator over the instructions in the transaction. #[inline] pub fn instructions_iter(&self) -> InstructionsIterator { let data = self.data(); - // SAFETY: `meta` was created from `data`. - unsafe { self.meta.instructions_iter(data) } + // SAFETY: `frame` was created from `data`. + unsafe { self.frame.instructions_iter(data) } } /// Return an iterator over the address table lookups in the transaction. #[inline] pub fn address_table_lookup_iter(&self) -> AddressTableLookupIterator { let data = self.data(); - // SAFETY: `meta` was created from `data`. - unsafe { self.meta.address_table_lookup_iter(data) } + // SAFETY: `frame` was created from `data`. + unsafe { self.frame.address_table_lookup_iter(data) } } /// Return the full serialized transaction data. @@ -160,7 +160,7 @@ impl TransactionView { /// This does not include the signatures. #[inline] pub fn message_data(&self) -> &[u8] { - &self.data()[usize::from(self.meta.message_offset())..] + &self.data()[usize::from(self.frame.message_offset())..] } } @@ -177,7 +177,7 @@ mod tests { }, }; - fn verify_transaction_view_meta(tx: &VersionedTransaction) { + fn verify_transaction_view_frame(tx: &VersionedTransaction) { let bytes = bincode::serialize(tx).unwrap(); let view = TransactionView::try_new_unsanitized(bytes.as_ref()).unwrap(); @@ -229,6 +229,6 @@ mod tests { #[test] fn test_multiple_transfers() { - verify_transaction_view_meta(&multiple_transfers()); + verify_transaction_view_frame(&multiple_transfers()); } } From 53db0c19430072fd4810bff88cd1b128a83fc660 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 4 Sep 2024 11:24:23 -0500 Subject: [PATCH 292/529] shrink can work on 'ancient' slots (#2827) * shrink can work on 'ancient' slots * fix tests * cleanup map to if * rename --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 72 ++++++++++++---------------------- 1 file changed, 26 insertions(+), 46 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7e51234ea93a8c..7d934e5adbe586 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -585,6 +585,8 @@ pub struct AccountsAddRootTiming { pub store_us: u64, } +/// if negative, this many accounts older than # slots in epoch are still treated as modern (ie. non-ancient). +/// Slots older than # slots in epoch - this # are then treated as ancient and subject to packing. const ANCIENT_APPEND_VEC_DEFAULT_OFFSET: Option = Some(-10_000); #[derive(Debug, Default, Clone)] @@ -2039,6 +2041,7 @@ pub struct ShrinkStats { accounts_loaded: AtomicU64, purged_zero_lamports: AtomicU64, accounts_not_found_in_index: AtomicU64, + num_ancient_slots_shrunk: AtomicU64, } impl ShrinkStats { @@ -2152,6 +2155,11 @@ impl ShrinkStats { self.purged_zero_lamports.swap(0, Ordering::Relaxed), i64 ), + ( + "num_ancient_slots_shrunk", + self.num_ancient_slots_shrunk.swap(0, Ordering::Relaxed), + i64 + ), ( "accounts_not_found_in_index", self.accounts_not_found_in_index.swap(0, Ordering::Relaxed), @@ -4558,7 +4566,6 @@ impl AccountsDb { &self, shrink_slots: &ShrinkCandidates, shrink_ratio: f64, - oldest_non_ancient_slot: Option, ) -> (IntMap>, ShrinkCandidates) { struct StoreUsageInfo { slot: Slot, @@ -4572,13 +4579,6 @@ impl AccountsDb { let mut total_bytes: u64 = 0; let mut total_candidate_stores: usize = 0; for slot in shrink_slots { - if oldest_non_ancient_slot - .map(|oldest_non_ancient_slot| slot < &oldest_non_ancient_slot) - .unwrap_or_default() - { - // this slot will be 'shrunk' by ancient code - continue; - } let Some(store) = self.storage.get_slot_storage_entry(*slot) else { continue; }; @@ -5049,13 +5049,8 @@ impl AccountsDb { let (shrink_slots, shrink_slots_next_batch) = { if let AccountShrinkThreshold::TotalSpace { shrink_ratio } = self.shrink_ratio { - let (shrink_slots, shrink_slots_next_batch) = self - .select_candidates_by_total_usage( - &shrink_candidates_slots, - shrink_ratio, - self.ancient_append_vec_offset - .map(|_| oldest_non_ancient_slot), - ); + let (shrink_slots, shrink_slots_next_batch) = + self.select_candidates_by_total_usage(&shrink_candidates_slots, shrink_ratio); (shrink_slots, Some(shrink_slots_next_batch)) } else { ( @@ -5092,6 +5087,11 @@ impl AccountsDb { shrink_slots .into_par_iter() .for_each(|(slot, slot_shrink_candidate)| { + if self.ancient_append_vec_offset.is_some() && slot < oldest_non_ancient_slot { + self.shrink_stats + .num_ancient_slots_shrunk + .fetch_add(1, Ordering::Relaxed); + } let mut measure = Measure::start("shrink_candidate_slots-ms"); self.do_shrink_slot_store(slot, &slot_shrink_candidate); measure.stop(); @@ -12614,7 +12614,7 @@ pub mod tests { let db = AccountsDb::new_single_for_tests(); let (selected_candidates, next_candidates) = - db.select_candidates_by_total_usage(&candidates, DEFAULT_ACCOUNTS_SHRINK_RATIO, None); + db.select_candidates_by_total_usage(&candidates, DEFAULT_ACCOUNTS_SHRINK_RATIO); assert_eq!(0, selected_candidates.len()); assert_eq!(0, next_candidates.len()); @@ -12676,7 +12676,7 @@ pub mod tests { // to the candidates list for next round. let target_alive_ratio = 0.6; let (selected_candidates, next_candidates) = - db.select_candidates_by_total_usage(&candidates, target_alive_ratio, None); + db.select_candidates_by_total_usage(&candidates, target_alive_ratio); assert_eq!(1, selected_candidates.len()); assert!(selected_candidates.contains(&store1_slot)); assert_eq!(1, next_candidates.len()); @@ -12736,7 +12736,7 @@ pub mod tests { // Set the target ratio to default (0.8), both store1 and store2 must be selected and store3 is ignored. let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO; let (selected_candidates, next_candidates) = - db.select_candidates_by_total_usage(&candidates, target_alive_ratio, None); + db.select_candidates_by_total_usage(&candidates, target_alive_ratio); assert_eq!(2, selected_candidates.len()); assert!(selected_candidates.contains(&store1_slot)); assert!(selected_candidates.contains(&store2_slot)); @@ -12781,34 +12781,14 @@ pub mod tests { .store(store_file_size as usize / 2, Ordering::Release); candidates.insert(store2_slot); - for newest_ancient_slot in [None, Some(store1_slot), Some(store2_slot)] { - // Set the target ratio to default (0.8), both stores from the two different slots must be selected. - let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO; - let (selected_candidates, next_candidates) = db.select_candidates_by_total_usage( - &candidates, - target_alive_ratio, - newest_ancient_slot.map(|newest_ancient_slot| newest_ancient_slot + 1), - ); - assert_eq!( - if newest_ancient_slot == Some(store1_slot) { - 1 - } else if newest_ancient_slot == Some(store2_slot) { - 0 - } else { - 2 - }, - selected_candidates.len() - ); - assert_eq!( - newest_ancient_slot.is_none(), - selected_candidates.contains(&store1_slot) - ); - - if newest_ancient_slot != Some(store2_slot) { - assert!(selected_candidates.contains(&store2_slot)); - } - assert_eq!(0, next_candidates.len()); - } + // Set the target ratio to default (0.8), both stores from the two different slots must be selected. + let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO; + let (selected_candidates, next_candidates) = + db.select_candidates_by_total_usage(&candidates, target_alive_ratio); + assert_eq!(2, selected_candidates.len()); + assert!(selected_candidates.contains(&store1_slot)); + assert!(selected_candidates.contains(&store2_slot)); + assert_eq!(0, next_candidates.len()); } const UPSERT_POPULATE_RECLAIMS: UpsertReclaim = UpsertReclaim::PopulateReclaims; From f2b7ef4aea2df6e9949e8d5bab089594466e0713 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 4 Sep 2024 17:06:21 +0000 Subject: [PATCH 293/529] rolls out chained Merkle shreds to ~50% of testnet slots (#2660) --- turbine/src/broadcast_stage/standard_broadcast_run.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 0808f9c2532236..58cfc79fb3745a 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -508,8 +508,8 @@ fn should_chain_merkle_shreds(slot: Slot, cluster_type: ClusterType) -> bool { ClusterType::Development => true, ClusterType::Devnet => false, ClusterType::MainnetBeta => false, - // Roll out chained Merkle shreds to ~21% of testnet. - ClusterType::Testnet => slot % 19 < 4, + // Roll out chained Merkle shreds to ~53% of testnet slots. + ClusterType::Testnet => slot % 19 < 10, } } From 79b40f193ab3c9c24f8d512d4560f40e5ad775b7 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:46:27 -0500 Subject: [PATCH 294/529] accounts-db: silence false positive info in shrink (#2831) * log for all zero in shrink * pr: refactor to remove duplicated message logging * pr: only long when all_are_zero = false * pr: update log text * revert log text --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7d934e5adbe586..ab41e4e8f07c16 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4355,11 +4355,15 @@ impl AccountsDb { // clean needs to take care of this dead slot self.accounts_index.add_uncleaned_roots([slot]); } - info!( - "Unexpected shrink for slot {} alive {} capacity {}, \ - likely caused by a bug for calculating alive bytes.", - slot, shrink_collect.alive_total_bytes, shrink_collect.capacity - ); + + if !shrink_collect.all_are_zero_lamports { + // if all are zero lamports, then we expect that we would like to mark the whole slot dead, but we cannot. That's clean's job. + info!( + "Unexpected shrink for slot {} alive {} capacity {}, \ + likely caused by a bug for calculating alive bytes.", + slot, shrink_collect.alive_total_bytes, shrink_collect.capacity + ); + } self.shrink_stats .skipped_shrink From 820644f60a4db79e4f9c2bcf525313387234151e Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 4 Sep 2024 12:59:58 -0500 Subject: [PATCH 295/529] unref accounts in shink and pack when we're committed (#2806) * unref accounts in shink and pack when we're committed * remove bad comments * rewrite shrink_ancient_fail_ref test * fix comments * fix a test * fix another test * fmt * del invalid comments * reviews: move log to new PR. * Revert "reviews: move log to new PR." This reverts commit f8aefe04c7ded58c9aaaf1fcd733af50598f8198. * fix comments * revert log content * pr: rename * pr: more rename --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 71 ++++++++-------- accounts-db/src/ancient_append_vecs.rs | 109 ++++++++++++------------- 2 files changed, 89 insertions(+), 91 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ab41e4e8f07c16..b82c7f91dfbb28 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -524,8 +524,9 @@ pub type BinnedHashData = Vec>; struct LoadAccountsIndexForShrink<'a, T: ShrinkCollectRefs<'a>> { /// all alive accounts alive_accounts: T, - /// pubkeys that were unref'd in the accounts index because they were dead - unrefed_pubkeys: Vec<&'a Pubkey>, + /// pubkeys that are going to be unref'd in the accounts index after we are + /// done with shrinking, because they are dead + pubkeys_to_unref: Vec<&'a Pubkey>, /// pubkeys that are the last remaining zero lamport instance of an account zero_lamport_single_ref_pubkeys: Vec<&'a Pubkey>, /// true if all alive accounts are zero lamport accounts @@ -3968,7 +3969,7 @@ impl AccountsDb { ) -> LoadAccountsIndexForShrink<'a, T> { let count = accounts.len(); let mut alive_accounts = T::with_capacity(count, slot_to_shrink); - let mut unrefed_pubkeys = Vec::with_capacity(count); + let mut pubkeys_to_unref = Vec::with_capacity(count); let mut zero_lamport_single_ref_pubkeys = Vec::with_capacity(count); let mut alive = 0; @@ -3981,7 +3982,6 @@ impl AccountsDb { self.accounts_index.scan( accounts.iter().map(|account| account.pubkey()), |pubkey, slots_refs, _entry| { - let mut result = AccountsIndexScanResult::OnlyKeepInMemoryIfDirty; let stored_account = &accounts[index]; let mut do_populate_accounts_for_shrink = |ref_count, slot_list| { if stored_account.is_zero_lamport() @@ -4017,8 +4017,7 @@ impl AccountsDb { // It would have had a ref to the storage from the initial store, but it will // not exist in the re-written slot. Unref it to keep the index consistent with // rewriting the storage entries. - unrefed_pubkeys.push(pubkey); - result = AccountsIndexScanResult::Unref; + pubkeys_to_unref.push(pubkey); dead += 1; } else { do_populate_accounts_for_shrink(ref_count, slot_list); @@ -4035,7 +4034,7 @@ impl AccountsDb { do_populate_accounts_for_shrink(ref_count, &slot_list); } index += 1; - result + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty }, None, false, @@ -4053,7 +4052,7 @@ impl AccountsDb { LoadAccountsIndexForShrink { alive_accounts, - unrefed_pubkeys, + pubkeys_to_unref, zero_lamport_single_ref_pubkeys, all_are_zero_lamports, } @@ -4173,7 +4172,7 @@ impl AccountsDb { .for_each(|stored_accounts| { let LoadAccountsIndexForShrink { alive_accounts, - mut unrefed_pubkeys, + pubkeys_to_unref: mut unrefed_pubkeys, all_are_zero_lamports, mut zero_lamport_single_ref_pubkeys, } = self.load_accounts_index_for_shrink(stored_accounts, stats, slot); @@ -4323,6 +4322,33 @@ impl AccountsDb { .fetch_add(time.as_us(), Ordering::Relaxed); } + pub(crate) fn unref_shrunk_dead_accounts<'a>( + &self, + pubkeys: impl Iterator, + slot: Slot, + ) { + self.accounts_index.scan( + pubkeys, + |pubkey, slot_refs, _entry| { + if slot_refs.is_none() { + // We also expect that the accounts index must contain an + // entry for `pubkey`. Log a warning for now. In future, + // we will panic when this happens. + warn!("pubkey {pubkey} in slot {slot} was NOT found in accounts index during shrink"); + datapoint_warn!( + "accounts_db-shink_pubkey_missing_from_index", + ("store_slot", slot, i64), + ("pubkey", pubkey.to_string(), String), + ) + } + AccountsIndexScanResult::Unref + }, + None, + false, + ScanFilter::All, + ); + } + fn do_shrink_slot_store(&self, slot: Slot, store: &Arc) { if self.accounts_cache.contains(slot) { // It is not correct to shrink a slot while it is in the write cache until flush is complete and the slot is removed from the write cache. @@ -4368,34 +4394,11 @@ impl AccountsDb { self.shrink_stats .skipped_shrink .fetch_add(1, Ordering::Relaxed); - - self.accounts_index.scan( - shrink_collect.unrefed_pubkeys.into_iter(), - |pubkey, _slot_refs, entry| { - // pubkeys in `unrefed_pubkeys` were unref'd in `shrink_collect` above under the assumption that we would shrink everything. - // Since shrink is not occurring, we need to addref the pubkeys to get the system back to the prior state since the account still exists at this slot. - if let Some(entry) = entry { - entry.addref(); - } else { - // We also expect that the accounts index must contain an - // entry for `pubkey`. Log a warning for now. In future, - // we will panic when this happens. - warn!("pubkey {pubkey} in slot {slot} was NOT found in accounts index during shrink"); - datapoint_warn!( - "accounts_db-shink_pubkey_missing_from_index", - ("store_slot", slot, i64), - ("pubkey", pubkey.to_string(), String), - ) - } - AccountsIndexScanResult::OnlyKeepInMemoryIfDirty - }, - None, - true, - ScanFilter::All, - ); return; } + self.unref_shrunk_dead_accounts(shrink_collect.unrefed_pubkeys.iter().cloned(), slot); + let total_accounts_after_shrink = shrink_collect.alive_accounts.len(); debug!( "shrinking: slot: {}, accounts: ({} => {}) bytes: {} original: {}", diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 95d45bfc93d573..92ed6354432c2c 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -13,12 +13,11 @@ use { ShrinkCollectAliveSeparatedByRefs, ShrinkStatsSub, }, accounts_file::AccountsFile, - accounts_index::{AccountsIndexScanResult, ScanFilter}, active_stats::ActiveStatItem, storable_accounts::{StorableAccounts, StorableAccountsBySlot}, }, rand::{thread_rng, Rng}, - rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, + rayon::prelude::{IntoParallelRefIterator, ParallelIterator}, solana_measure::measure_us, solana_sdk::clock::Slot, std::{ @@ -440,7 +439,6 @@ impl AccountsDb { accounts_to_combine.target_slots_sorted.last(), many_refs_newest.last().map(|accounts| accounts.slot) ); - self.addref_accounts_failed_to_shrink_ancient(accounts_to_combine.accounts_to_combine); return; } @@ -468,12 +466,19 @@ impl AccountsDb { if pack.len() > accounts_to_combine.target_slots_sorted.len() { // Not enough slots to contain the accounts we are trying to pack. - // `shrink_collect` previously unref'd some accounts. We need to addref them - // to restore the correct state since we failed to combine anything. - self.addref_accounts_failed_to_shrink_ancient(accounts_to_combine.accounts_to_combine); return; } + accounts_to_combine + .accounts_to_combine + .iter() + .for_each(|combine| { + self.unref_shrunk_dead_accounts( + combine.unrefed_pubkeys.iter().cloned(), + combine.slot, + ); + }); + let write_ancient_accounts = self.write_packed_storages(&accounts_to_combine, pack); self.finish_combine_ancient_slots_packed_internal( @@ -483,29 +488,6 @@ impl AccountsDb { ); } - /// for each account in `unrefed_pubkeys`, in each `accounts_to_combine`, addref - fn addref_accounts_failed_to_shrink_ancient<'a>( - &self, - accounts_to_combine: Vec>>, - ) { - self.thread_pool_clean.install(|| { - accounts_to_combine.into_par_iter().for_each(|combine| { - self.accounts_index.scan( - combine.unrefed_pubkeys.into_iter(), - |_pubkey, _slots_refs, entry| { - if let Some(entry) = entry { - entry.addref(); - } - AccountsIndexScanResult::OnlyKeepInMemoryIfDirty - }, - None, - true, - ScanFilter::All, - ); - }); - }); - } - /// calculate all storage info for the storages in slots /// Then, apply 'tuning' to filter out slots we do NOT want to combine. fn collect_sort_filter_ancient_slots( @@ -762,8 +744,12 @@ impl AccountsDb { let mut target_slots_sorted = Vec::with_capacity(len); // `shrink_collect` all accounts in the append vecs we want to combine. - // This also unrefs all dead accounts in those append vecs. - // This needs to serially iterate largest to smallest slot so that we unref older dead slots after we have visited the newer alive slots. + // We are no longer doing eager unref in shrink_collect. Therefore, we will no longer need to iter them serially? + // There is a subtle difference for zero lamport accounts, which can lead to having more multi-refs than before? + // Consider account X in both slot x, and x+1 and x+2. + // With eager unref, we will only collect `one_ref`` X at slot x+2 after shrink. + // Without eager unref, we will collect X at `multi-ref` after shrink. + // Packing multi-ref is less efficient than `one_ref``. But it might be ok - in next round of clean, hopefully, it can turn this from multi-ref into one-ref. let mut accounts_to_combine = accounts_per_storage .iter() .map(|(info, unique_accounts)| { @@ -865,9 +851,6 @@ impl AccountsDb { } } let unpackable_slots_count = remove.len(); - remove.into_iter().rev().for_each(|i| { - self.addref_accounts_failed_to_shrink_ancient(vec![accounts_to_combine.remove(i)]); - }); target_slots_sorted.sort_unstable(); self.shrink_ancient_stats .slots_cannot_move_count @@ -1210,7 +1193,7 @@ pub mod tests { }, accounts_file::StorageAccess, accounts_hash::AccountHash, - accounts_index::UpsertReclaim, + accounts_index::{AccountsIndexScanResult, ScanFilter, UpsertReclaim}, append_vec::{ aligned_stored_size, AppendVec, AppendVecStoredAccountMeta, MAXIMUM_APPEND_VEC_FILE_SIZE, @@ -1772,13 +1755,7 @@ pub mod tests { &tuning, many_ref_slots, ); - let mut expected_accounts_to_combine = num_slots; - if two_refs && many_ref_slots == IncludeManyRefSlots::Skip && num_slots > 2 - { - // We require more than 1 target slot. Since all slots have multi refs, we find no slots we can use as target slots. - // Thus, nothing can be packed. - expected_accounts_to_combine = 0; - } + let expected_accounts_to_combine = num_slots; (0..accounts_to_combine .target_slots_sorted .len() @@ -1881,7 +1858,7 @@ pub mod tests { assert_eq!( accounts_to_combine.accounts_to_combine.len(), // if we are only trying to pack a single slot of multi-refs, it will succeed - if !two_refs || many_ref_slots == IncludeManyRefSlots::Include || num_slots == 1 {num_slots} else {0}, + if !two_refs || many_ref_slots == IncludeManyRefSlots::Include || num_slots == 1 || num_slots == 2 {num_slots} else {0}, "method: {method:?}, num_slots: {num_slots}, two_refs: {two_refs}, many_refs: {many_ref_slots:?}" ); @@ -3840,7 +3817,7 @@ pub mod tests { } #[test] - fn test_addref_accounts_failed_to_shrink_ancient() { + fn test_shrink_ancient_expected_unref() { let db = AccountsDb::new_single_for_tests(); let empty_account = AccountSharedData::default(); for count in 0..3 { @@ -3848,7 +3825,8 @@ pub mod tests { .map(|_| solana_sdk::pubkey::new_rand()) .collect::>(); // how many of `many_ref_accounts` should be found in the index with ref_count=1 - let mut expected_ref_counts = HashMap::::default(); + let mut expected_ref_counts_before_unref = HashMap::::default(); + let mut expected_ref_counts_after_unref = HashMap::::default(); unrefed_pubkeys.iter().for_each(|k| { for slot in 0..2 { @@ -3864,8 +3842,8 @@ pub mod tests { UpsertReclaim::IgnoreReclaims, ); } - // set to 2 initially, made part of `unrefed_pubkeys`, expect it to be addref'd to 3 - expected_ref_counts.insert(*k, 3); + expected_ref_counts_before_unref.insert(*k, 2); + expected_ref_counts_after_unref.insert(*k, 1); }); let shrink_collect = ShrinkCollect:: { @@ -3885,17 +3863,34 @@ pub mod tests { total_starting_accounts: 0, all_are_zero_lamports: false, }; - let accounts_to_combine = AccountsToCombine { - accounts_keep_slots: HashMap::default(), - accounts_to_combine: vec![shrink_collect], - target_slots_sorted: Vec::default(), - unpackable_slots_count: 0, - }; - db.addref_accounts_failed_to_shrink_ancient(accounts_to_combine.accounts_to_combine); + + // Assert ref_counts before unref. + db.accounts_index.scan( + shrink_collect.unrefed_pubkeys.iter().cloned(), + |k, slot_refs, _entry| { + assert_eq!( + expected_ref_counts_before_unref.remove(k).unwrap(), + slot_refs.unwrap().1 + ); + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty + }, + None, + false, + ScanFilter::All, + ); + assert!(expected_ref_counts_before_unref.is_empty()); + + // unref ref_counts + db.unref_shrunk_dead_accounts(shrink_collect.unrefed_pubkeys.iter().cloned(), 0); + + // Assert ref_counts after unref db.accounts_index.scan( - unrefed_pubkeys.iter(), + shrink_collect.unrefed_pubkeys.iter().cloned(), |k, slot_refs, _entry| { - assert_eq!(expected_ref_counts.remove(k).unwrap(), slot_refs.unwrap().1); + assert_eq!( + expected_ref_counts_after_unref.remove(k).unwrap(), + slot_refs.unwrap().1 + ); AccountsIndexScanResult::OnlyKeepInMemoryIfDirty }, None, @@ -3903,7 +3898,7 @@ pub mod tests { ScanFilter::All, ); // should have removed all of them - assert!(expected_ref_counts.is_empty()); + assert!(expected_ref_counts_after_unref.is_empty()); } } From 5716b7ea1eacf859a8769277fc6294717ba11398 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 4 Sep 2024 15:10:46 -0500 Subject: [PATCH 296/529] rename pubkeys_to_unref (#2834) --- accounts-db/src/accounts_db.rs | 24 ++++++++++++------------ accounts-db/src/ancient_append_vecs.rs | 18 +++++++++--------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index b82c7f91dfbb28..d528c4a3c6b47d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -476,7 +476,7 @@ impl AncientSlotPubkeys { pub(crate) struct ShrinkCollect<'a, T: ShrinkCollectRefs<'a>> { pub(crate) slot: Slot, pub(crate) capacity: u64, - pub(crate) unrefed_pubkeys: Vec<&'a Pubkey>, + pub(crate) pubkeys_to_unref: Vec<&'a Pubkey>, pub(crate) zero_lamport_single_ref_pubkeys: Vec<&'a Pubkey>, pub(crate) alive_accounts: T, /// total size in storage of all alive accounts @@ -3959,7 +3959,7 @@ impl AccountsDb { /// load the account index entry for the first `count` items in `accounts` /// store a reference to all alive accounts in `alive_accounts` - /// unref and optionally store a reference to all pubkeys that are in the index, but dead in `unrefed_pubkeys` + /// store all pubkeys dead in `slot_to_shrink` in `pubkeys_to_unref` /// return sum of account size for all alive accounts fn load_accounts_index_for_shrink<'a, T: ShrinkCollectRefs<'a>>( &self, @@ -4157,7 +4157,7 @@ impl AccountsDb { let len = stored_accounts.len(); let alive_accounts_collect = Mutex::new(T::with_capacity(len, slot)); - let unrefed_pubkeys_collect = Mutex::new(Vec::with_capacity(len)); + let pubkeys_to_unref_collect = Mutex::new(Vec::with_capacity(len)); let zero_lamport_single_ref_pubkeys_collect = Mutex::new(Vec::with_capacity(len)); stats .accounts_loaded @@ -4172,7 +4172,7 @@ impl AccountsDb { .for_each(|stored_accounts| { let LoadAccountsIndexForShrink { alive_accounts, - pubkeys_to_unref: mut unrefed_pubkeys, + mut pubkeys_to_unref, all_are_zero_lamports, mut zero_lamport_single_ref_pubkeys, } = self.load_accounts_index_for_shrink(stored_accounts, stats, slot); @@ -4182,10 +4182,10 @@ impl AccountsDb { .lock() .unwrap() .collect(alive_accounts); - unrefed_pubkeys_collect + pubkeys_to_unref_collect .lock() .unwrap() - .append(&mut unrefed_pubkeys); + .append(&mut pubkeys_to_unref); zero_lamport_single_ref_pubkeys_collect .lock() .unwrap() @@ -4197,7 +4197,7 @@ impl AccountsDb { }); let alive_accounts = alive_accounts_collect.into_inner().unwrap(); - let unrefed_pubkeys = unrefed_pubkeys_collect.into_inner().unwrap(); + let pubkeys_to_unref = pubkeys_to_unref_collect.into_inner().unwrap(); let zero_lamport_single_ref_pubkeys = zero_lamport_single_ref_pubkeys_collect .into_inner() .unwrap(); @@ -4223,7 +4223,7 @@ impl AccountsDb { ShrinkCollect { slot, capacity: *capacity, - unrefed_pubkeys, + pubkeys_to_unref, zero_lamport_single_ref_pubkeys, alive_accounts, alive_total_bytes, @@ -4304,7 +4304,7 @@ impl AccountsDb { if !shrink_collect.all_are_zero_lamports { self.add_uncleaned_pubkeys_after_shrink( shrink_collect.slot, - shrink_collect.unrefed_pubkeys.iter().cloned().cloned(), + shrink_collect.pubkeys_to_unref.iter().cloned().cloned(), ); } @@ -4397,7 +4397,7 @@ impl AccountsDb { return; } - self.unref_shrunk_dead_accounts(shrink_collect.unrefed_pubkeys.iter().cloned(), slot); + self.unref_shrunk_dead_accounts(shrink_collect.pubkeys_to_unref.iter().cloned(), slot); let total_accounts_after_shrink = shrink_collect.alive_accounts.len(); debug!( @@ -5027,7 +5027,7 @@ impl AccountsDb { ) { /* This is only called during 'shrink'-type operations. - Original accounts were separated into 'accounts' and 'unrefed_pubkeys'. + Original accounts were separated into 'accounts' and 'pubkeys_to_unref'. These sets correspond to 'alive' and 'dead'. 'alive' means this account in this slot is in the accounts index. 'dead' means this account in this slot is NOT in the accounts index. @@ -16453,7 +16453,7 @@ pub mod tests { ); assert_eq!( shrink_collect - .unrefed_pubkeys + .pubkeys_to_unref .iter() .sorted() .cloned() diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 92ed6354432c2c..0eb3309ac5133a 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -474,7 +474,7 @@ impl AccountsDb { .iter() .for_each(|combine| { self.unref_shrunk_dead_accounts( - combine.unrefed_pubkeys.iter().cloned(), + combine.pubkeys_to_unref.iter().cloned(), combine.slot, ); }); @@ -831,7 +831,7 @@ impl AccountsDb { // This would fail the invariant that the highest slot # where an account exists defines the most recent account. // It could be a clean error or a transient condition that will resolve if we encounter this situation. // The count of these accounts per call will be reported by metrics in `unpackable_slots_count` - if shrink_collect.unrefed_pubkeys.is_empty() + if shrink_collect.pubkeys_to_unref.is_empty() && shrink_collect.alive_accounts.one_ref.accounts.is_empty() && shrink_collect .alive_accounts @@ -1866,7 +1866,7 @@ pub mod tests { assert!(!accounts_to_combine .accounts_to_combine .iter() - .any(|a| a.unrefed_pubkeys.is_empty())); + .any(|a| a.pubkeys_to_unref.is_empty())); } // all accounts should be in one_ref and all slots are available as target slots assert_eq!( @@ -3821,14 +3821,14 @@ pub mod tests { let db = AccountsDb::new_single_for_tests(); let empty_account = AccountSharedData::default(); for count in 0..3 { - let unrefed_pubkeys = (0..count) + let pubkeys_to_unref = (0..count) .map(|_| solana_sdk::pubkey::new_rand()) .collect::>(); // how many of `many_ref_accounts` should be found in the index with ref_count=1 let mut expected_ref_counts_before_unref = HashMap::::default(); let mut expected_ref_counts_after_unref = HashMap::::default(); - unrefed_pubkeys.iter().for_each(|k| { + pubkeys_to_unref.iter().for_each(|k| { for slot in 0..2 { // each upsert here (to a different slot) adds a refcount of 1 since entry is NOT cached db.accounts_index.upsert( @@ -3848,7 +3848,7 @@ pub mod tests { let shrink_collect = ShrinkCollect:: { // the only interesting field - unrefed_pubkeys: unrefed_pubkeys.iter().collect(), + pubkeys_to_unref: pubkeys_to_unref.iter().collect(), // irrelevant fields zero_lamport_single_ref_pubkeys: Vec::default(), @@ -3866,7 +3866,7 @@ pub mod tests { // Assert ref_counts before unref. db.accounts_index.scan( - shrink_collect.unrefed_pubkeys.iter().cloned(), + shrink_collect.pubkeys_to_unref.iter().cloned(), |k, slot_refs, _entry| { assert_eq!( expected_ref_counts_before_unref.remove(k).unwrap(), @@ -3881,11 +3881,11 @@ pub mod tests { assert!(expected_ref_counts_before_unref.is_empty()); // unref ref_counts - db.unref_shrunk_dead_accounts(shrink_collect.unrefed_pubkeys.iter().cloned(), 0); + db.unref_shrunk_dead_accounts(shrink_collect.pubkeys_to_unref.iter().cloned(), 0); // Assert ref_counts after unref db.accounts_index.scan( - shrink_collect.unrefed_pubkeys.iter().cloned(), + shrink_collect.pubkeys_to_unref.iter().cloned(), |k, slot_refs, _entry| { assert_eq!( expected_ref_counts_after_unref.remove(k).unwrap(), From e207c6e0eaf8e1657fbfaff07da05ca6a7928349 Mon Sep 17 00:00:00 2001 From: steviez Date: Thu, 5 Sep 2024 12:46:28 +0900 Subject: [PATCH 297/529] Cleanup Validator::new() shred version check (#2843) No functional changes, just some minor cleanup to reduce the diff in an upcoming change --- core/src/validator.rs | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 6f42c60c1c9f5c..863da61782bc37 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -742,28 +742,26 @@ impl Validator { } let hard_forks = bank_forks.read().unwrap().root_bank().hard_forks(); - if !hard_forks.is_empty() { - info!("Hard forks: {:?}", hard_forks); - } - - node.info.set_wallclock(timestamp()); - node.info.set_shred_version(compute_shred_version( - &genesis_config.hash(), - Some(&hard_forks), - )); - Self::print_node_info(&node); + let shred_version = compute_shred_version(&genesis_config.hash(), Some(&hard_forks)); + info!( + "shred version: {shred_version}, hard forks: {:?}", + hard_forks + ); if let Some(expected_shred_version) = config.expected_shred_version { - if expected_shred_version != node.info.shred_version() { - return Err(ValidatorError::Other(format!( - "shred version mismatch: expected {} found: {}", - expected_shred_version, - node.info.shred_version(), - )) + if expected_shred_version != shred_version { + return Err(ValidatorError::ShredVersionMismatch { + actual: shred_version, + expected: expected_shred_version, + } .into()); } } + node.info.set_shred_version(shred_version); + node.info.set_wallclock(timestamp()); + Self::print_node_info(&node); + let mut cluster_info = ClusterInfo::new( node.info.clone(), identity_keypair.clone(), @@ -1449,7 +1447,7 @@ impl Validator { ("cluster_type", genesis_config.cluster_type as u32, i64), ("elapsed_ms", start_time.elapsed().as_millis() as i64, i64), ("waited_for_supermajority", waited_for_supermajority, bool), - ("expected_shred_version", config.expected_shred_version, Option), + ("shred_version", shred_version as i64, i64), ); *start_progress.write().unwrap() = ValidatorStartProgress::Running; @@ -2360,6 +2358,9 @@ pub enum ValidatorError { )] PohTooSlow { mine: u64, target: u64 }, + #[error("shred version mistmatch: actual {actual}, expected {expected}")] + ShredVersionMismatch { actual: u16, expected: u16 }, + #[error(transparent)] TraceError(#[from] TraceError), From 27f4b3d3b9b2cfad274c9cb45c33ea9a4009acb0 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Thu, 5 Sep 2024 14:19:20 +0400 Subject: [PATCH 298/529] sdk: Extract hash and hasher crates (#2015) * extract hash and hasher crates and re-export their contents in solana_program::hash * make bytemuck and serde optional, and activate features in solana-program * fix frozen-abi support * fix import * update lock file * cargo sort * fix wasm_bindgen import * typo * fmt * make the inner field of Hash pub(crate) again because otherwise wasm_bindgen breaks * move program/src/wasm/hash.rs contents to solana-hash crate * update lock file * remove duplicate frozen-abi stuff * fix specialization stuff * delete tmp tests * update BlockhashQueue digest * Revert "update BlockhashQueue digest" This reverts commit 591302bd6efbd9be51c83a2dc7d13380ecc6b591. * update expected digests after confirming that the change is merely from moving the Hash struct and not a real ABI change * update another digest * update digests in sdk and program * update digests in runtime * update VoteTransaction digest * conditionally activate solana-hash/borsh in solana-program * move js-sys dep under cfg(target_arch = "wasm32") * remove thiserror from hash crate * remove solana-program dependency from merkle-tree * make solana-hash no_std by default * fmt * fmt after rebase * make std feature default * make sha2 an optional dep when target_os = "solana", because it's unlikely to be used in that case * fmt * make rustc_version optional * update lock file * fix frozen-abi lint * another lint fix * add comment about sha2 removal * avoid Vec in FromStr Co-authored-by: Jon C * put Hash::new_unique behind #[cfg(feature = "dev-context-only-utils")] * move tests from solana-hasher to solana-hash * rename solana-hasher to solana-sha256-hasher * fmt * make conditional import more consistent Co-authored-by: Jon C * don't use std feature of bs58 in solana-hash * undo putting new_unique behind dev-context-only-utils * missing feature * rename to write_as_base58 and reorder params Co-authored-by: Jon C * update write_as_base58 usage * fix feature activation for tests * remove part of doc that no longer makes sense Co-authored-by: Jon C --------- Co-authored-by: Jon C --- Cargo.lock | 34 +++- Cargo.toml | 6 +- accounts-db/src/blockhash_queue.rs | 2 +- core/src/banking_trace.rs | 2 +- core/src/consensus.rs | 2 +- core/src/consensus/tower1_14_11.rs | 2 +- core/src/consensus/tower1_7_14.rs | 2 +- core/src/repair/serve_repair.rs | 4 +- frozen-abi/Cargo.toml | 2 +- gossip/src/cluster_info.rs | 2 +- merkle-tree/Cargo.toml | 3 +- merkle-tree/src/merkle_tree.rs | 2 +- programs/sbf/Cargo.lock | 30 ++- programs/vote/src/vote_state/mod.rs | 2 +- rpc-client-api/Cargo.toml | 2 +- runtime/src/bank.rs | 2 +- runtime/src/bank/serde_snapshot.rs | 2 +- sdk/hash/Cargo.toml | 52 +++++ sdk/hash/build.rs | 1 + sdk/hash/src/lib.rs | 254 ++++++++++++++++++++++++ sdk/macro/Cargo.toml | 2 +- sdk/program/Cargo.toml | 13 +- sdk/program/src/hash.rs | 236 +--------------------- sdk/program/src/message/legacy.rs | 9 +- sdk/program/src/message/versions/mod.rs | 6 +- sdk/program/src/syscalls/definitions.rs | 5 +- sdk/program/src/vote/state/mod.rs | 6 +- sdk/program/src/wasm/hash.rs | 57 ------ sdk/program/src/wasm/mod.rs | 1 - sdk/sha256-hasher/Cargo.toml | 32 +++ sdk/sha256-hasher/src/lib.rs | 68 +++++++ sdk/src/transaction/mod.rs | 4 +- 32 files changed, 521 insertions(+), 326 deletions(-) create mode 100644 sdk/hash/Cargo.toml create mode 120000 sdk/hash/build.rs create mode 100644 sdk/hash/src/lib.rs delete mode 100644 sdk/program/src/wasm/hash.rs create mode 100644 sdk/sha256-hasher/Cargo.toml create mode 100644 sdk/sha256-hasher/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 05283f66d31d42..62b36b6bd5d043 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6572,6 +6572,26 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-hash" +version = "2.1.0" +dependencies = [ + "borsh 1.5.1", + "bs58", + "bytemuck", + "bytemuck_derive", + "js-sys", + "rustc_version 0.4.1", + "serde", + "serde_derive", + "solana-atomic-u64", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-hash", + "solana-sanitize", + "wasm-bindgen", +] + [[package]] name = "solana-inline-spl" version = "2.1.0" @@ -6800,7 +6820,8 @@ version = "2.1.0" dependencies = [ "fast-math", "hex", - "solana-program", + "solana-hash", + "solana-sha256-hasher", ] [[package]] @@ -7012,6 +7033,7 @@ dependencies = [ "solana-define-syscall", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-hash", "solana-logger", "solana-msg", "solana-program-memory", @@ -7019,6 +7041,7 @@ dependencies = [ "solana-sdk-macro", "solana-secp256k1-recover", "solana-serde-varint", + "solana-sha256-hasher", "solana-short-vec", "static_assertions", "test-case", @@ -7577,6 +7600,15 @@ dependencies = [ "solana-short-vec", ] +[[package]] +name = "solana-sha256-hasher" +version = "2.1.0" +dependencies = [ + "sha2 0.10.8", + "solana-define-syscall", + "solana-hash", +] + [[package]] name = "solana-short-vec" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 3859d377714dff..11d7bc1f9550cf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,12 +106,14 @@ members = [ "sdk/clock", "sdk/decode-error", "sdk/gen-headers", + "sdk/hash", "sdk/macro", "sdk/msg", "sdk/package-metadata-macro", "sdk/program", "sdk/program-memory", "sdk/serde-varint", + "sdk/sha256-hasher", "send-transaction-service", "short-vec", "stake-accounts", @@ -198,7 +200,7 @@ bincode = "1.3.3" bitflags = { version = "2.6.0" } blake3 = "1.5.1" borsh = { version = "1.5.1", features = ["derive", "unstable__schema"] } -bs58 = "0.5.1" +bs58 = { version = "0.5.1", default-features = false } bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" @@ -390,6 +392,7 @@ solana-genesis-utils = { path = "genesis-utils", version = "=2.1.0" } agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=2.1.0" } solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=2.1.0" } solana-gossip = { path = "gossip", version = "=2.1.0" } +solana-hash = { path = "sdk/hash", version = "=2.1.0" } solana-inline-spl = { path = "inline-spl", version = "=2.1.0" } solana-lattice-hash = { path = "lattice-hash", version = "=2.1.0" } solana-ledger = { path = "ledger", version = "=2.1.0" } @@ -418,6 +421,7 @@ solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.1.0" } solana-remote-wallet = { path = "remote-wallet", version = "=2.1.0", default-features = false } solana-sanitize = { path = "sanitize", version = "=2.1.0" } solana-serde-varint = { path = "sdk/serde-varint", version = "=2.1.0" } +solana-sha256-hasher = { path = "sdk/sha256-hasher", version = "=2.1.0" } solana-timings = { path = "timings", version = "=2.1.0" } solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=2.1.0" } solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=2.1.0" } diff --git a/accounts-db/src/blockhash_queue.rs b/accounts-db/src/blockhash_queue.rs index 871ad115596cb1..e99204160aa1ff 100644 --- a/accounts-db/src/blockhash_queue.rs +++ b/accounts-db/src/blockhash_queue.rs @@ -26,7 +26,7 @@ impl HashInfo { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "HzbCqb1YtCodkx6Fu57SpzkgaLp9WSrtw8texsjBhEDH") + frozen_abi(digest = "BxykY65dC2NCcDm17rHQPjEY8wK55sKAhfhKVFGc5T1u") )] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct BlockhashQueue { diff --git a/core/src/banking_trace.rs b/core/src/banking_trace.rs index c2b3c38695d123..cc077dfa2c2755 100644 --- a/core/src/banking_trace.rs +++ b/core/src/banking_trace.rs @@ -65,7 +65,7 @@ pub struct BankingTracer { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "Eq6YrAFtTbtPrCEvh6Et1mZZDCARUg1gcK2qiZdqyjUz") + frozen_abi(digest = "F5GH1poHbPqipU4DB3MczhSxHZw4o27f3C7QnMVirFci") )] #[derive(Serialize, Deserialize, Debug)] pub struct TimedTracedEvent(pub std::time::SystemTime, pub TracedEvent); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 79446125f5b819..c079dbb7cde51d 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -239,7 +239,7 @@ pub(crate) enum BlockhashStatus { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "H6T5A66kgJYANFXVrUprxV76WD5ce7Gf62q9SiBC2uYk") + frozen_abi(digest = "5BUswzvu7Qe44HbR4eBwPX4Jn9GSfhjmg8eijnBjoKUd") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower { diff --git a/core/src/consensus/tower1_14_11.rs b/core/src/consensus/tower1_14_11.rs index 8068d000deff22..7dfcd0bd340fac 100644 --- a/core/src/consensus/tower1_14_11.rs +++ b/core/src/consensus/tower1_14_11.rs @@ -9,7 +9,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "EqYa8kwY9Z1Zbjxgs2aBbqKyCK4f7WAG8gJ7pVSQyKzk") + frozen_abi(digest = "9P6J8ZtVLR5zbUxWT83q1iUsJMH6B7SwcomSqcoomPmg") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower1_14_11 { diff --git a/core/src/consensus/tower1_7_14.rs b/core/src/consensus/tower1_7_14.rs index c7255bb88d9dd5..7a57b7b1d4f09e 100644 --- a/core/src/consensus/tower1_7_14.rs +++ b/core/src/consensus/tower1_7_14.rs @@ -11,7 +11,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "9Kc3Cpak93xdL8bCnEwMWA8ZLGCBNfqh9PLo1o5RiPyT") + frozen_abi(digest = "DJVvkk4EFFCbA37vsKcFPGuwEULh2wEvMUESsTyvABzU") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower1_7_14 { diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index ad123ea8562957..24f484105a8ddb 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -143,7 +143,7 @@ impl AncestorHashesRepairType { #[cfg_attr( feature = "frozen-abi", derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "AKpurCovzn6rsji4aQrP3hUdEHxjtXUfT7AatZXN7Rpz") + frozen_abi(digest = "98D6KvXCBxAHTxXgqiywLTugTp6WFUHSf559yy4VvKE7") )] #[derive(Debug, Deserialize, Serialize)] pub enum AncestorHashesResponse { @@ -224,7 +224,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; REPAIR_PING_TOKEN_SIZE]>; #[cfg_attr( feature = "frozen-abi", derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "5cmSdmXMgkpUH5ZCmYYjxUVQfULe9iJqCqqfrADfsEmK") + frozen_abi(digest = "DzofXbeBFKJpbA88nUEnDpCGKvMEcguNphyQoVr7FyLh") )] #[derive(Debug, Deserialize, Serialize)] pub enum RepairProtocol { diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 096662d05956f9..18382a28b3b1bd 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -10,7 +10,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -bs58 = { workspace = true } +bs58 = { workspace = true, features = ["alloc"] } bv = { workspace = true, features = ["serde"] } log = { workspace = true, features = ["std"] } serde = { workspace = true, features = ["rc"] } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 686510023b476f..fdce90cba41e6c 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -311,7 +311,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; #[cfg_attr( feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor), - frozen_abi(digest = "6YaMJand6tKtNLUrqvusC5QVDmVLCWYRg5LtxYNi6XN4") + frozen_abi(digest = "7jwuQ3oFEy8bMnmr5XHSR2jqivZniG8ZjxHx3YKTfR6C") )] #[derive(Serialize, Deserialize, Debug)] #[allow(clippy::large_enum_variant)] diff --git a/merkle-tree/Cargo.toml b/merkle-tree/Cargo.toml index 9b9f566ade7406..cbd11a0e65797c 100644 --- a/merkle-tree/Cargo.toml +++ b/merkle-tree/Cargo.toml @@ -11,7 +11,8 @@ edition = { workspace = true } [dependencies] fast-math = { workspace = true } -solana-program = { workspace = true } +solana-hash = { workspace = true } +solana-sha256-hasher = { workspace = true } [dev-dependencies] hex = { workspace = true } diff --git a/merkle-tree/src/merkle_tree.rs b/merkle-tree/src/merkle_tree.rs index 09285a41e7af1f..de87811240a958 100644 --- a/merkle-tree/src/merkle_tree.rs +++ b/merkle-tree/src/merkle_tree.rs @@ -1,4 +1,4 @@ -use solana_program::hash::{hashv, Hash}; +use {solana_hash::Hash, solana_sha256_hasher::hashv}; // We need to discern between leaf and intermediate nodes to prevent trivial second // pre-image attacks. diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index fc502d752ce1ce..f0d9d78805ab85 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5163,6 +5163,22 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-hash" +version = "2.1.0" +dependencies = [ + "borsh 1.5.1", + "bs58", + "bytemuck", + "bytemuck_derive", + "js-sys", + "serde", + "serde_derive", + "solana-atomic-u64", + "solana-sanitize", + "wasm-bindgen", +] + [[package]] name = "solana-inline-spl" version = "2.1.0" @@ -5291,7 +5307,8 @@ name = "solana-merkle-tree" version = "2.1.0" dependencies = [ "fast-math", - "solana-program", + "solana-hash", + "solana-sha256-hasher", ] [[package]] @@ -5424,12 +5441,14 @@ dependencies = [ "solana-clock", "solana-decode-error", "solana-define-syscall", + "solana-hash", "solana-msg", "solana-program-memory", "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", "solana-serde-varint", + "solana-sha256-hasher", "solana-short-vec", "thiserror", "wasm-bindgen", @@ -6363,6 +6382,15 @@ dependencies = [ "serde", ] +[[package]] +name = "solana-sha256-hasher" +version = "2.1.0" +dependencies = [ + "sha2 0.10.8", + "solana-define-syscall", + "solana-hash", +] + [[package]] name = "solana-short-vec" version = "2.1.0" diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 851faa957db9fe..c4ea689fc9d3bf 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -30,7 +30,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor), - frozen_abi(digest = "3R2hRL3FM6jovbYubq2UWeiVDEVzrhH6M1ihoCPZWLsk") + frozen_abi(digest = "3dbyMxwfCN43orGKa5YiyY1EqN2K97pTicNhKYTZSUQH") )] #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub enum VoteTransaction { diff --git a/rpc-client-api/Cargo.toml b/rpc-client-api/Cargo.toml index 22a883244c709e..021e069960e39e 100644 --- a/rpc-client-api/Cargo.toml +++ b/rpc-client-api/Cargo.toml @@ -12,7 +12,7 @@ edition = { workspace = true } [dependencies] anyhow = { workspace = true } base64 = { workspace = true } -bs58 = { workspace = true } +bs58 = { workspace = true, features = ["std"] } jsonrpc-core = { workspace = true } reqwest = { workspace = true, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } reqwest-middleware = { workspace = true } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 841b09941873d8..0b53a3fd95617f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -247,7 +247,7 @@ struct RentMetrics { pub type BankStatusCache = StatusCache>; #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "9Pf3NTGr1AEzB4nKaVBY24uNwoQR4aJi8vc96W6kGvNk") + frozen_abi(digest = "EQwW6Ym6ECKaAREnAgkhXYisBQovuraBKSALdJ8koZzq") )] pub type BankSlotDelta = SlotDelta>; diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index bdcf7ae6215f72..c628fe7c0e1360 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -535,7 +535,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "HQYDRuCaM5V1ggSuMPTKT5Mu2vE5HX4y4ZM1Xuorx6My") + frozen_abi(digest = "FuFBQtx7rGruVC3cyh4zvZ3uN4RUtBiwh1pXJRwUCcoS") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/sdk/hash/Cargo.toml b/sdk/hash/Cargo.toml new file mode 100644 index 00000000000000..de0dafd76226da --- /dev/null +++ b/sdk/hash/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "solana-hash" +description = "Solana wrapper for the 32-byte output of a hashing algorithm." +documentation = "https://docs.rs/solana-hash" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +borsh = { workspace = true, optional = true } +bs58 = { workspace = true, default-features = false } +bytemuck = { workspace = true, optional = true } +bytemuck_derive = { workspace = true, optional = true } +serde = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-atomic-u64 = { workspace = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } +solana-sanitize = { workspace = true } + +[dev-dependencies] +solana-hash = { path = ".", features = ["dev-context-only-utils"] } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +js-sys = { workspace = true } +wasm-bindgen = { workspace = true } + +[build-dependencies] +rustc_version = { workspace = true, optional = true } + +[features] +borsh = ["dep:borsh", "std"] +bytemuck = ["dep:bytemuck", "dep:bytemuck_derive"] +default = ["std"] +dev-context-only-utils = ["bs58/alloc"] +frozen-abi = [ + "dep:rustc_version", + "dep:solana-frozen-abi", + "dep:solana-frozen-abi-macro", + "std" +] +serde = ["dep:serde", "dep:serde_derive"] +std = [] + +[lints] +workspace = true diff --git a/sdk/hash/build.rs b/sdk/hash/build.rs new file mode 120000 index 00000000000000..84539eddaa6ded --- /dev/null +++ b/sdk/hash/build.rs @@ -0,0 +1 @@ +../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/hash/src/lib.rs b/sdk/hash/src/lib.rs new file mode 100644 index 00000000000000..dfc2e3efce8cf3 --- /dev/null +++ b/sdk/hash/src/lib.rs @@ -0,0 +1,254 @@ +#![no_std] +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#[cfg(feature = "borsh")] +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +#[cfg(any(feature = "std", target_arch = "wasm32"))] +extern crate std; +#[cfg(feature = "bytemuck")] +use bytemuck_derive::{Pod, Zeroable}; +#[cfg(feature = "serde")] +use serde_derive::{Deserialize, Serialize}; +#[cfg(any(all(feature = "borsh", feature = "std"), target_arch = "wasm32"))] +use std::string::ToString; +use { + core::{ + convert::TryFrom, + fmt, mem, + str::{from_utf8, FromStr}, + }, + solana_sanitize::Sanitize, +}; +#[cfg(target_arch = "wasm32")] +use { + js_sys::{Array, Uint8Array}, + std::{boxed::Box, format, string::String, vec}, + wasm_bindgen::{prelude::*, JsCast}, +}; + +/// Size of a hash in bytes. +pub const HASH_BYTES: usize = 32; +/// Maximum string length of a base58 encoded hash. +pub const MAX_BASE58_LEN: usize = 44; + +/// A hash; the 32-byte output of a hashing algorithm. +/// +/// This struct is used most often in `solana-sdk` and related crates to contain +/// a [SHA-256] hash, but may instead contain a [blake3] hash. +/// +/// [SHA-256]: https://en.wikipedia.org/wiki/SHA-2 +/// [blake3]: https://github.com/BLAKE3-team/BLAKE3 +#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] +#[cfg_attr(feature = "frozen-abi", derive(solana_frozen_abi_macro::AbiExample))] +#[cfg_attr( + feature = "borsh", + derive(BorshSerialize, BorshDeserialize), + borsh(crate = "borsh") +)] +#[cfg_attr(all(feature = "borsh", feature = "std"), derive(BorshSchema))] +#[cfg_attr(feature = "bytemuck", derive(Pod, Zeroable))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize,))] +#[derive(Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[repr(transparent)] +pub struct Hash(pub(crate) [u8; HASH_BYTES]); + +impl Sanitize for Hash {} + +impl From<[u8; HASH_BYTES]> for Hash { + fn from(from: [u8; 32]) -> Self { + Self(from) + } +} + +impl AsRef<[u8]> for Hash { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +fn write_as_base58(f: &mut fmt::Formatter, h: &Hash) -> fmt::Result { + let mut out = [0u8; MAX_BASE58_LEN]; + let out_slice: &mut [u8] = &mut out; + // This will never fail because the only possible error is BufferTooSmall, + // and we will never call it with too small a buffer. + let len = bs58::encode(h.0).onto(out_slice).unwrap(); + let as_str = from_utf8(&out[..len]).unwrap(); + f.write_str(as_str) +} + +impl fmt::Debug for Hash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write_as_base58(f, self) + } +} + +impl fmt::Display for Hash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write_as_base58(f, self) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ParseHashError { + WrongSize, + Invalid, +} + +#[cfg(feature = "std")] +impl std::error::Error for ParseHashError {} + +impl fmt::Display for ParseHashError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ParseHashError::WrongSize => f.write_str("string decoded to wrong size for hash"), + ParseHashError::Invalid => f.write_str("failed to decoded string to hash"), + } + } +} + +impl FromStr for Hash { + type Err = ParseHashError; + + fn from_str(s: &str) -> Result { + if s.len() > MAX_BASE58_LEN { + return Err(ParseHashError::WrongSize); + } + let mut bytes = [0; HASH_BYTES]; + let decoded_size = bs58::decode(s) + .onto(&mut bytes) + .map_err(|_| ParseHashError::Invalid)?; + if decoded_size != mem::size_of::() { + Err(ParseHashError::WrongSize) + } else { + Ok(bytes.into()) + } + } +} + +impl Hash { + pub fn new(hash_slice: &[u8]) -> Self { + Hash(<[u8; HASH_BYTES]>::try_from(hash_slice).unwrap()) + } + + pub const fn new_from_array(hash_array: [u8; HASH_BYTES]) -> Self { + Self(hash_array) + } + + /// unique Hash for tests and benchmarks. + pub fn new_unique() -> Self { + use solana_atomic_u64::AtomicU64; + static I: AtomicU64 = AtomicU64::new(1); + + let mut b = [0u8; HASH_BYTES]; + let i = I.fetch_add(1); + b[0..8].copy_from_slice(&i.to_le_bytes()); + Self::new(&b) + } + + pub fn to_bytes(self) -> [u8; HASH_BYTES] { + self.0 + } +} + +#[cfg(target_arch = "wasm32")] +#[allow(non_snake_case)] +#[wasm_bindgen] +impl Hash { + /// Create a new Hash object + /// + /// * `value` - optional hash as a base58 encoded string, `Uint8Array`, `[number]` + #[wasm_bindgen(constructor)] + pub fn constructor(value: JsValue) -> Result { + if let Some(base58_str) = value.as_string() { + base58_str + .parse::() + .map_err(|x| JsValue::from(x.to_string())) + } else if let Some(uint8_array) = value.dyn_ref::() { + Ok(Hash::new(&uint8_array.to_vec())) + } else if let Some(array) = value.dyn_ref::() { + let mut bytes = vec![]; + let iterator = js_sys::try_iter(&array.values())?.expect("array to be iterable"); + for x in iterator { + let x = x?; + + if let Some(n) = x.as_f64() { + if n >= 0. && n <= 255. { + bytes.push(n as u8); + continue; + } + } + return Err(format!("Invalid array argument: {:?}", x).into()); + } + Ok(Hash::new(&bytes)) + } else if value.is_undefined() { + Ok(Hash::default()) + } else { + Err("Unsupported argument".into()) + } + } + + /// Return the base58 string representation of the hash + pub fn toString(&self) -> String { + self.to_string() + } + + /// Checks if two `Hash`s are equal + pub fn equals(&self, other: &Hash) -> bool { + self == other + } + + /// Return the `Uint8Array` representation of the hash + pub fn toBytes(&self) -> Box<[u8]> { + self.0.clone().into() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_new_unique() { + assert!(Hash::new_unique() != Hash::new_unique()); + } + + #[test] + fn test_hash_fromstr() { + let hash = Hash::new_from_array([1; 32]); + + let mut hash_base58_str = bs58::encode(hash).into_string(); + + assert_eq!(hash_base58_str.parse::(), Ok(hash)); + + hash_base58_str.push_str(&bs58::encode(hash.as_ref()).into_string()); + assert_eq!( + hash_base58_str.parse::(), + Err(ParseHashError::WrongSize) + ); + + hash_base58_str.truncate(hash_base58_str.len() / 2); + assert_eq!(hash_base58_str.parse::(), Ok(hash)); + + hash_base58_str.truncate(hash_base58_str.len() / 2); + assert_eq!( + hash_base58_str.parse::(), + Err(ParseHashError::WrongSize) + ); + + let input_too_big = bs58::encode(&[0xffu8; HASH_BYTES + 1]).into_string(); + assert!(input_too_big.len() > MAX_BASE58_LEN); + assert_eq!( + input_too_big.parse::(), + Err(ParseHashError::WrongSize) + ); + + let mut hash_base58_str = bs58::encode(hash.as_ref()).into_string(); + assert_eq!(hash_base58_str.parse::(), Ok(hash)); + + // throw some non-base58 stuff in there + hash_base58_str.replace_range(..1, "I"); + assert_eq!( + hash_base58_str.parse::(), + Err(ParseHashError::Invalid) + ); + } +} diff --git a/sdk/macro/Cargo.toml b/sdk/macro/Cargo.toml index 04b11590829192..07829062a4ba01 100644 --- a/sdk/macro/Cargo.toml +++ b/sdk/macro/Cargo.toml @@ -13,7 +13,7 @@ edition = { workspace = true } proc-macro = true [dependencies] -bs58 = { workspace = true } +bs58 = { workspace = true, features = ["alloc"] } proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true, features = ["full"] } diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 3ab08562a96027..f293283f9dff3e 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -16,7 +16,7 @@ bincode = { workspace = true } blake3 = { workspace = true, features = ["digest", "traits-preview"] } borsh = { workspace = true, optional = true } borsh0-10 = { package = "borsh", version = "0.10.3", optional = true } -bs58 = { workspace = true } +bs58 = { workspace = true, features = ["alloc"] } bv = { workspace = true, features = ["serde"] } bytemuck = { workspace = true } bytemuck_derive = { workspace = true } @@ -36,12 +36,18 @@ solana-clock = { workspace = true, features = ["serde"] } solana-decode-error = { workspace = true } solana-frozen-abi = { workspace = true, optional = true, features = ["frozen-abi"] } solana-frozen-abi-macro = { workspace = true, optional = true, features = ["frozen-abi"] } +solana-hash = { workspace = true, features = [ + "bytemuck", + "serde", + "std", +] } solana-msg = { workspace = true } solana-program-memory = { workspace = true } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } solana-serde-varint = { workspace = true } +solana-sha256-hasher = { workspace = true, features = ["sha2"] } solana-short-vec = { workspace = true } thiserror = { workspace = true } @@ -98,13 +104,14 @@ crate-type = ["cdylib", "rlib"] [features] default = ["borsh"] -borsh = ["dep:borsh", "dep:borsh0-10"] +borsh = ["dep:borsh", "dep:borsh0-10", "solana-hash/borsh"] dev-context-only-utils = ["dep:qualifier_attr"] frozen-abi = [ "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", - "solana-short-vec/frozen-abi", + "solana-hash/frozen-abi", + "solana-short-vec/frozen-abi" ] [lints] diff --git a/sdk/program/src/hash.rs b/sdk/program/src/hash.rs index 27967c850376bb..990f6c184814bf 100644 --- a/sdk/program/src/hash.rs +++ b/sdk/program/src/hash.rs @@ -3,237 +3,7 @@ //! [SHA-256]: https://en.wikipedia.org/wiki/SHA-2 //! [`Hash`]: struct@Hash -#[cfg(target_arch = "wasm32")] -use crate::wasm_bindgen; -#[cfg(feature = "borsh")] -use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use { - bytemuck_derive::{Pod, Zeroable}, - sha2::{Digest, Sha256}, - solana_sanitize::Sanitize, - std::{convert::TryFrom, fmt, mem, str::FromStr}, - thiserror::Error, +pub use { + solana_hash::{Hash, ParseHashError, HASH_BYTES}, + solana_sha256_hasher::{extend_and_hash, hash, hashv, Hasher}, }; - -/// Size of a hash in bytes. -pub const HASH_BYTES: usize = 32; -/// Maximum string length of a base58 encoded hash. -const MAX_BASE58_LEN: usize = 44; - -/// A hash; the 32-byte output of a hashing algorithm. -/// -/// This struct is used most often in `solana-sdk` and related crates to contain -/// a [SHA-256] hash, but may instead contain a [blake3] hash, as created by the -/// [`blake3`] module (and used in [`Message::hash`]). -/// -/// [SHA-256]: https://en.wikipedia.org/wiki/SHA-2 -/// [blake3]: https://github.com/BLAKE3-team/BLAKE3 -/// [`blake3`]: crate::blake3 -/// [`Message::hash`]: crate::message::Message::hash -#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[cfg_attr( - feature = "borsh", - derive(BorshSerialize, BorshDeserialize, BorshSchema), - borsh(crate = "borsh") -)] -#[derive( - Serialize, - Deserialize, - Clone, - Copy, - Default, - Eq, - PartialEq, - Ord, - PartialOrd, - Hash, - Pod, - Zeroable, -)] -#[repr(transparent)] -pub struct Hash(pub(crate) [u8; HASH_BYTES]); - -#[derive(Clone, Default)] -pub struct Hasher { - hasher: Sha256, -} - -impl Hasher { - pub fn hash(&mut self, val: &[u8]) { - self.hasher.update(val); - } - pub fn hashv(&mut self, vals: &[&[u8]]) { - for val in vals { - self.hash(val); - } - } - pub fn result(self) -> Hash { - Hash(self.hasher.finalize().into()) - } -} - -impl Sanitize for Hash {} - -impl From<[u8; HASH_BYTES]> for Hash { - fn from(from: [u8; 32]) -> Self { - Self(from) - } -} - -impl AsRef<[u8]> for Hash { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl fmt::Debug for Hash { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", bs58::encode(self.0).into_string()) - } -} - -impl fmt::Display for Hash { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", bs58::encode(self.0).into_string()) - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Error)] -pub enum ParseHashError { - #[error("string decoded to wrong size for hash")] - WrongSize, - #[error("failed to decoded string to hash")] - Invalid, -} - -impl FromStr for Hash { - type Err = ParseHashError; - - fn from_str(s: &str) -> Result { - if s.len() > MAX_BASE58_LEN { - return Err(ParseHashError::WrongSize); - } - let bytes = bs58::decode(s) - .into_vec() - .map_err(|_| ParseHashError::Invalid)?; - if bytes.len() != mem::size_of::() { - Err(ParseHashError::WrongSize) - } else { - Ok(Hash::new(&bytes)) - } - } -} - -impl Hash { - pub fn new(hash_slice: &[u8]) -> Self { - Hash(<[u8; HASH_BYTES]>::try_from(hash_slice).unwrap()) - } - - pub const fn new_from_array(hash_array: [u8; HASH_BYTES]) -> Self { - Self(hash_array) - } - - /// unique Hash for tests and benchmarks. - pub fn new_unique() -> Self { - use solana_atomic_u64::AtomicU64; - static I: AtomicU64 = AtomicU64::new(1); - - let mut b = [0u8; HASH_BYTES]; - let i = I.fetch_add(1); - b[0..8].copy_from_slice(&i.to_le_bytes()); - Self::new(&b) - } - - pub fn to_bytes(self) -> [u8; HASH_BYTES] { - self.0 - } -} - -/// Return a Sha256 hash for the given data. -pub fn hashv(vals: &[&[u8]]) -> Hash { - // Perform the calculation inline, calling this from within a program is - // not supported - #[cfg(not(target_os = "solana"))] - { - let mut hasher = Hasher::default(); - hasher.hashv(vals); - hasher.result() - } - // Call via a system call to perform the calculation - #[cfg(target_os = "solana")] - { - let mut hash_result = [0; HASH_BYTES]; - unsafe { - crate::syscalls::sol_sha256( - vals as *const _ as *const u8, - vals.len() as u64, - &mut hash_result as *mut _ as *mut u8, - ); - } - Hash::new_from_array(hash_result) - } -} - -/// Return a Sha256 hash for the given data. -pub fn hash(val: &[u8]) -> Hash { - hashv(&[val]) -} - -/// Return the hash of the given hash extended with the given value. -pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash { - let mut hash_data = id.as_ref().to_vec(); - hash_data.extend_from_slice(val); - hash(&hash_data) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_new_unique() { - assert!(Hash::new_unique() != Hash::new_unique()); - } - - #[test] - fn test_hash_fromstr() { - let hash = hash(&[1u8]); - - let mut hash_base58_str = bs58::encode(hash).into_string(); - - assert_eq!(hash_base58_str.parse::(), Ok(hash)); - - hash_base58_str.push_str(&bs58::encode(hash.0).into_string()); - assert_eq!( - hash_base58_str.parse::(), - Err(ParseHashError::WrongSize) - ); - - hash_base58_str.truncate(hash_base58_str.len() / 2); - assert_eq!(hash_base58_str.parse::(), Ok(hash)); - - hash_base58_str.truncate(hash_base58_str.len() / 2); - assert_eq!( - hash_base58_str.parse::(), - Err(ParseHashError::WrongSize) - ); - - let input_too_big = bs58::encode(&[0xffu8; HASH_BYTES + 1]).into_string(); - assert!(input_too_big.len() > MAX_BASE58_LEN); - assert_eq!( - input_too_big.parse::(), - Err(ParseHashError::WrongSize) - ); - - let mut hash_base58_str = bs58::encode(hash.0).into_string(); - assert_eq!(hash_base58_str.parse::(), Ok(hash)); - - // throw some non-base58 stuff in there - hash_base58_str.replace_range(..1, "I"); - assert_eq!( - hash_base58_str.parse::(), - Err(ParseHashError::Invalid) - ); - } -} diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index 4c1d4c5a9da418..68d5dfb2e25588 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -123,7 +123,7 @@ fn compile_instructions(ixs: &[Instruction], keys: &[Pubkey]) -> Vec Hash { - use blake3::traits::digest::Digest; + use {blake3::traits::digest::Digest, solana_hash::HASH_BYTES}; let mut hasher = blake3::Hasher::new(); hasher.update(b"solana-tx-message-v1"); hasher.update(message_bytes); - Hash(hasher.finalize().into()) + let hash_bytes: [u8; HASH_BYTES] = hasher.finalize().into(); + hash_bytes.into() } pub fn compile_instruction(&self, ix: &Instruction) -> CompiledInstruction { diff --git a/sdk/program/src/message/versions/mod.rs b/sdk/program/src/message/versions/mod.rs index 53b95d96acf490..24bd21d1090328 100644 --- a/sdk/program/src/message/versions/mod.rs +++ b/sdk/program/src/message/versions/mod.rs @@ -10,6 +10,7 @@ use { ser::{SerializeTuple, Serializer}, }, serde_derive::{Deserialize, Serialize}, + solana_hash::HASH_BYTES, solana_sanitize::{Sanitize, SanitizeError}, solana_short_vec as short_vec, std::{collections::HashSet, fmt}, @@ -33,7 +34,7 @@ pub const MESSAGE_VERSION_PREFIX: u8 = 0x80; /// format. #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "8wyn6rxrJ1WwsUJkVxtDH9VEmd7djwqMfBLL3EpuY7H4"), + frozen_abi(digest = "3g49yJ9ZZPsT9iF6Za6FyWXV259vWcY6gfJ94uzQ5BcY"), derive(AbiEnumVisitor, AbiExample) )] #[derive(Debug, PartialEq, Eq, Clone)] @@ -161,7 +162,8 @@ impl VersionedMessage { let mut hasher = blake3::Hasher::new(); hasher.update(b"solana-tx-message-v1"); hasher.update(message_bytes); - Hash(hasher.finalize().into()) + let hash_bytes: [u8; HASH_BYTES] = hasher.finalize().into(); + hash_bytes.into() } } diff --git a/sdk/program/src/syscalls/definitions.rs b/sdk/program/src/syscalls/definitions.rs index fd22296e03ff59..061dd317c2794d 100644 --- a/sdk/program/src/syscalls/definitions.rs +++ b/sdk/program/src/syscalls/definitions.rs @@ -1,6 +1,6 @@ #[cfg(target_feature = "static-syscalls")] pub use solana_define_syscall::sys_hash; -#[deprecated(since = "2.1.0", note = "Use `solana-msg::sol_log` instead.")] +#[deprecated(since = "2.1.0", note = "Use `solana_msg::sol_log` instead.")] pub use solana_msg::sol_log; #[deprecated( since = "2.1.0", @@ -12,6 +12,8 @@ pub use solana_program_memory::syscalls::{sol_memcmp_, sol_memcpy_, sol_memmove_ note = "Use `solana_secp256k1_recover::sol_secp256k1_recover` instead" )] pub use solana_secp256k1_recover::sol_secp256k1_recover; +#[deprecated(since = "2.1.0", note = "Use solana_sha256_hasher::sol_sha256 instead")] +pub use solana_sha256_hasher::sol_sha256; use { crate::{ instruction::{AccountMeta, ProcessedSiblingInstruction}, @@ -24,7 +26,6 @@ define_syscall!(fn sol_log_compute_units_()); define_syscall!(fn sol_log_pubkey(pubkey_addr: *const u8)); define_syscall!(fn sol_create_program_address(seeds_addr: *const u8, seeds_len: u64, program_id_addr: *const u8, address_bytes_addr: *const u8) -> u64); define_syscall!(fn sol_try_find_program_address(seeds_addr: *const u8, seeds_len: u64, program_id_addr: *const u8, address_bytes_addr: *const u8, bump_seed_addr: *const u8) -> u64); -define_syscall!(fn sol_sha256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); define_syscall!(fn sol_keccak256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); define_syscall!(fn sol_blake3(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); define_syscall!(fn sol_invoke_signed_c(instruction_addr: *const u8, account_infos_addr: *const u8, account_infos_len: u64, signers_seeds_addr: *const u8, signers_seeds_len: u64) -> u64); diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 7a42ad8d066243..40f3268fcfd3d4 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -54,7 +54,7 @@ pub const VOTE_CREDITS_MAXIMUM_PER_SLOT: u8 = 16; #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "Ch2vVEwos2EjAVqSHCyJjnN2MNX1yrpapZTGhMSCjWUH"), + frozen_abi(digest = "GvUzgtcxhKVVxPAjSntXGPqjLZK5ovgZzCiUP1tDpB9q"), derive(AbiExample) )] #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone)] @@ -168,7 +168,7 @@ impl From for LandedVote { #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "GwJfVFsATSj7nvKwtUkHYzqPRaPY6SLxPGXApuCya3x5"), + frozen_abi(digest = "DRKTb72wifCUcCTSJs6PqWrQQK5Pfis4SCLEvXqWnDaL"), derive(AbiExample) )] #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone)] @@ -221,7 +221,7 @@ impl VoteStateUpdate { #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "5VUusSTenF9vZ9eHiCprVe9ABJUHCubeDNCCDxykybZY"), + frozen_abi(digest = "5PFw9pyF1UG1DXVsw7gpjHegNyRycAAxWf2GA9wUXPs5"), derive(AbiExample) )] #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone)] diff --git a/sdk/program/src/wasm/hash.rs b/sdk/program/src/wasm/hash.rs deleted file mode 100644 index add1e6bbe80657..00000000000000 --- a/sdk/program/src/wasm/hash.rs +++ /dev/null @@ -1,57 +0,0 @@ -//! `Hash` Javascript interface -#![cfg(target_arch = "wasm32")] -#![allow(non_snake_case)] -use { - crate::{hash::*, wasm::display_to_jsvalue}, - js_sys::{Array, Uint8Array}, - wasm_bindgen::{prelude::*, JsCast}, -}; - -#[wasm_bindgen] -impl Hash { - /// Create a new Hash object - /// - /// * `value` - optional hash as a base58 encoded string, `Uint8Array`, `[number]` - #[wasm_bindgen(constructor)] - pub fn constructor(value: JsValue) -> Result { - if let Some(base58_str) = value.as_string() { - base58_str.parse::().map_err(display_to_jsvalue) - } else if let Some(uint8_array) = value.dyn_ref::() { - Ok(Hash::new(&uint8_array.to_vec())) - } else if let Some(array) = value.dyn_ref::() { - let mut bytes = vec![]; - let iterator = js_sys::try_iter(&array.values())?.expect("array to be iterable"); - for x in iterator { - let x = x?; - - if let Some(n) = x.as_f64() { - if n >= 0. && n <= 255. { - bytes.push(n as u8); - continue; - } - } - return Err(format!("Invalid array argument: {:?}", x).into()); - } - Ok(Hash::new(&bytes)) - } else if value.is_undefined() { - Ok(Hash::default()) - } else { - Err("Unsupported argument".into()) - } - } - - /// Return the base58 string representation of the hash - pub fn toString(&self) -> String { - self.to_string() - } - - /// Checks if two `Hash`s are equal - pub fn equals(&self, other: &Hash) -> bool { - self == other - } - - /// Return the `Uint8Array` representation of the hash - pub fn toBytes(&self) -> Box<[u8]> { - self.0.clone().into() - } -} diff --git a/sdk/program/src/wasm/mod.rs b/sdk/program/src/wasm/mod.rs index b7939a142a2d17..c390efed559ab0 100644 --- a/sdk/program/src/wasm/mod.rs +++ b/sdk/program/src/wasm/mod.rs @@ -2,7 +2,6 @@ #![cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*; -pub mod hash; pub mod instructions; pub mod pubkey; pub mod system_instruction; diff --git a/sdk/sha256-hasher/Cargo.toml b/sdk/sha256-hasher/Cargo.toml new file mode 100644 index 00000000000000..3933e259f07424 --- /dev/null +++ b/sdk/sha256-hasher/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "solana-sha256-hasher" +description = "Solana SHA256 hashing" +documentation = "https://docs.rs/solana-sha256-hasher" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +solana-hash = { workspace = true } + +[target.'cfg(not(target_os = "solana"))'.dependencies] +sha2 = { workspace = true } + +[target.'cfg(target_os = "solana")'.dependencies] +# sha2 should be removed in the next breaking release, +# as there's no reason to use the crate instead of the syscall +# onchain +sha2 = { workspace = true, optional = true } +solana-define-syscall = { workspace = true } + +[features] +sha2 = ["dep:sha2"] + +[lints] +workspace = true diff --git a/sdk/sha256-hasher/src/lib.rs b/sdk/sha256-hasher/src/lib.rs new file mode 100644 index 00000000000000..fe34c3ea929a91 --- /dev/null +++ b/sdk/sha256-hasher/src/lib.rs @@ -0,0 +1,68 @@ +#![no_std] +#[cfg(any(feature = "sha2", not(target_os = "solana")))] +use sha2::{Digest, Sha256}; +#[cfg(target_os = "solana")] +use solana_define_syscall::define_syscall; +use solana_hash::Hash; + +#[cfg(any(feature = "sha2", not(target_os = "solana")))] +#[derive(Clone, Default)] +pub struct Hasher { + hasher: Sha256, +} + +#[cfg(any(feature = "sha2", not(target_os = "solana")))] +impl Hasher { + pub fn hash(&mut self, val: &[u8]) { + self.hasher.update(val); + } + pub fn hashv(&mut self, vals: &[&[u8]]) { + for val in vals { + self.hash(val); + } + } + pub fn result(self) -> Hash { + let bytes: [u8; solana_hash::HASH_BYTES] = self.hasher.finalize().into(); + bytes.into() + } +} + +#[cfg(target_os = "solana")] +define_syscall!(fn sol_sha256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); + +/// Return a Sha256 hash for the given data. +pub fn hashv(vals: &[&[u8]]) -> Hash { + // Perform the calculation inline, calling this from within a program is + // not supported + #[cfg(not(target_os = "solana"))] + { + let mut hasher = Hasher::default(); + hasher.hashv(vals); + hasher.result() + } + // Call via a system call to perform the calculation + #[cfg(target_os = "solana")] + { + let mut hash_result = [0; solana_hash::HASH_BYTES]; + unsafe { + sol_sha256( + vals as *const _ as *const u8, + vals.len() as u64, + &mut hash_result as *mut _ as *mut u8, + ); + } + Hash::new_from_array(hash_result) + } +} + +/// Return a Sha256 hash for the given data. +pub fn hash(val: &[u8]) -> Hash { + hashv(&[val]) +} + +/// Return the hash of the given hash extended with the given value. +pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash { + let mut hash_data = id.as_ref().to_vec(); + hash_data.extend_from_slice(val); + hash(&hash_data) +} diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index 535c8dd9bf665e..c5022487e19e06 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -172,7 +172,7 @@ pub type Result = result::Result; #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "5LPHxp7TKPeV7GZ9pcT4NxNxJa3ZhvToDekCMAPvNWLv") + frozen_abi(digest = "sGWhrQNiMNnUjPSG5cZvxujYaxHaiU5ggbvp46hKZSN") )] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] pub struct Transaction { @@ -200,7 +200,7 @@ pub struct Transaction { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "5LPHxp7TKPeV7GZ9pcT4NxNxJa3ZhvToDekCMAPvNWLv") + frozen_abi(digest = "sGWhrQNiMNnUjPSG5cZvxujYaxHaiU5ggbvp46hKZSN") )] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] pub struct Transaction { From cfb9aae017857ebdab07dcd27eb2ca09ab8e7a4b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 19:19:35 +0800 Subject: [PATCH 299/529] build(deps): bump serde_json from 1.0.127 to 1.0.128 (#2845) * build(deps): bump serde_json from 1.0.127 to 1.0.128 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.127 to 1.0.128. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/1.0.127...1.0.128) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62b36b6bd5d043..5724eb24b0b1ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5131,9 +5131,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 11d7bc1f9550cf..854a722b705008 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -337,7 +337,7 @@ seqlock = "0.2.0" serde = "1.0.209" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" serde_derive = "1.0.209" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 -serde_json = "1.0.127" +serde_json = "1.0.128" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f0d9d78805ab85..8fb61dee255ab2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4290,9 +4290,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", From 4afd4bbbc47d7a54a92342b6a977ed0c0789a6ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 19:20:37 +0800 Subject: [PATCH 300/529] build(deps): bump tokio-util from 0.7.11 to 0.7.12 (#2846) Bumps [tokio-util](https://github.com/tokio-rs/tokio) from 0.7.11 to 0.7.12. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-util-0.7.11...tokio-util-0.7.12) --- updated-dependencies: - dependency-name: tokio-util dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5724eb24b0b1ea..a63717837f0b3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2606,7 +2606,7 @@ dependencies = [ "indexmap 2.5.0", "slab", "tokio", - "tokio-util 0.7.11", + "tokio-util 0.7.12", "tracing", ] @@ -4772,7 +4772,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls", - "tokio-util 0.7.11", + "tokio-util 0.7.12", "tower-service", "url 2.5.2", "wasm-bindgen", @@ -7260,7 +7260,7 @@ dependencies = [ "symlink", "thiserror", "tokio", - "tokio-util 0.7.11", + "tokio-util 0.7.12", ] [[package]] @@ -9158,9 +9158,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -9282,7 +9282,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.11", + "tokio-util 0.7.12", "tower-layer", "tower-service", "tracing", From 1ab8e07d7a68d591bdac38598d88f80d291eb3f1 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Thu, 5 Sep 2024 15:54:12 +0400 Subject: [PATCH 301/529] Extract derivation-path crate (#2056) * extract derivation-path crate * remove now unused dep * update derivation_path dependents * remove now-unused derivation-path dep --- Cargo.lock | 21 +++++++++++++++--- Cargo.toml | 2 ++ clap-utils/Cargo.toml | 1 + clap-utils/src/keypair.rs | 2 +- clap-v3-utils/Cargo.toml | 1 + clap-v3-utils/src/input_parsers/signer.rs | 2 +- clap-v3-utils/src/keygen/derivation_path.rs | 2 +- clap-v3-utils/src/keypair.rs | 2 +- keygen/Cargo.toml | 1 + programs/sbf/Cargo.lock | 18 ++++++++++++--- remote-wallet/Cargo.toml | 1 + remote-wallet/src/ledger.rs | 2 +- remote-wallet/src/remote_keypair.rs | 2 +- remote-wallet/src/remote_wallet.rs | 2 +- sdk/Cargo.toml | 4 +--- sdk/derivation-path/Cargo.toml | 22 +++++++++++++++++++ .../src/lib.rs} | 0 sdk/src/lib.rs | 3 ++- sdk/src/signer/keypair.rs | 2 +- sdk/src/signer/mod.rs | 2 +- zk-sdk/Cargo.toml | 1 + zk-sdk/src/encryption/auth_encryption.rs | 2 +- zk-sdk/src/encryption/elgamal.rs | 2 +- zk-token-sdk/Cargo.toml | 1 + .../src/encryption/auth_encryption.rs | 2 +- zk-token-sdk/src/encryption/elgamal.rs | 2 +- 26 files changed, 79 insertions(+), 23 deletions(-) create mode 100644 sdk/derivation-path/Cargo.toml rename sdk/{src/derivation_path.rs => derivation-path/src/lib.rs} (100%) diff --git a/Cargo.lock b/Cargo.lock index a63717837f0b3e..c6ead351294b2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5917,6 +5917,7 @@ dependencies = [ "chrono", "clap 2.33.3", "rpassword", + "solana-derivation-path", "solana-remote-wallet", "solana-sdk", "tempfile", @@ -5934,6 +5935,7 @@ dependencies = [ "chrono", "clap 3.2.23", "rpassword", + "solana-derivation-path", "solana-remote-wallet", "solana-sdk", "solana-zk-token-sdk", @@ -6316,6 +6318,17 @@ dependencies = [ name = "solana-define-syscall" version = "2.1.0" +[[package]] +name = "solana-derivation-path" +version = "2.1.0" +dependencies = [ + "assert_matches", + "derivation-path", + "qstring", + "thiserror", + "uriparse", +] + [[package]] name = "solana-dos" version = "2.1.0" @@ -6610,6 +6623,7 @@ dependencies = [ "num_cpus", "solana-clap-v3-utils", "solana-cli-config", + "solana-derivation-path", "solana-remote-wallet", "solana-sdk", "solana-version", @@ -7197,6 +7211,7 @@ dependencies = [ "parking_lot 0.12.3", "qstring", "semver 1.0.23", + "solana-derivation-path", "solana-sdk", "thiserror", "uriparse", @@ -7492,7 +7507,6 @@ dependencies = [ "byteorder", "chrono", "curve25519-dalek 4.1.3", - "derivation-path", "digest 0.10.7", "ed25519-dalek", "ed25519-dalek-bip32", @@ -7510,7 +7524,6 @@ dependencies = [ "num-traits", "num_enum", "pbkdf2 0.11.0", - "qstring", "qualifier_attr", "rand 0.7.3", "rand 0.8.5", @@ -7525,6 +7538,7 @@ dependencies = [ "siphasher", "solana-bn254", "solana-decode-error", + "solana-derivation-path", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", @@ -7539,7 +7553,6 @@ dependencies = [ "static_assertions", "thiserror", "tiny-bip39", - "uriparse", "wasm-bindgen", ] @@ -8287,6 +8300,7 @@ dependencies = [ "serde_derive", "serde_json", "sha3", + "solana-derivation-path", "solana-program", "solana-sdk", "subtle", @@ -8344,6 +8358,7 @@ dependencies = [ "serde_json", "sha3", "solana-curve25519", + "solana-derivation-path", "solana-program", "solana-sdk", "subtle", diff --git a/Cargo.toml b/Cargo.toml index 854a722b705008..32d6b501a93fe2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -105,6 +105,7 @@ members = [ "sdk/cargo-test-sbf", "sdk/clock", "sdk/decode-error", + "sdk/derivation-path", "sdk/gen-headers", "sdk/hash", "sdk/macro", @@ -380,6 +381,7 @@ solana-cost-model = { path = "cost-model", version = "=2.1.0" } solana-curve25519 = { path = "curves/curve25519", version = "=2.1.0" } solana-decode-error = { path = "sdk/decode-error", version = "=2.1.0" } solana-define-syscall = { path = "define-syscall", version = "=2.1.0" } +solana-derivation-path = { path = "sdk/derivation-path", version = "=2.1.0" } solana-download-utils = { path = "download-utils", version = "=2.1.0" } solana-entry = { path = "entry", version = "=2.1.0" } solana-faucet = { path = "faucet", version = "=2.1.0" } diff --git a/clap-utils/Cargo.toml b/clap-utils/Cargo.toml index c51dc0f1d4b060..48969c708957eb 100644 --- a/clap-utils/Cargo.toml +++ b/clap-utils/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } chrono = { workspace = true, features = ["default"] } clap = "2.33.0" rpassword = { workspace = true } +solana-derivation-path = { workspace = true } solana-remote-wallet = { workspace = true } solana-sdk = { workspace = true } thiserror = { workspace = true } diff --git a/clap-utils/src/keypair.rs b/clap-utils/src/keypair.rs index ead51c9970ea93..bddd20c3361c18 100644 --- a/clap-utils/src/keypair.rs +++ b/clap-utils/src/keypair.rs @@ -18,13 +18,13 @@ use { bip39::{Language, Mnemonic, Seed}, clap::ArgMatches, rpassword::prompt_password, + solana_derivation_path::{DerivationPath, DerivationPathError}, solana_remote_wallet::{ locator::{Locator as RemoteWalletLocator, LocatorError as RemoteWalletLocatorError}, remote_keypair::generate_remote_keypair, remote_wallet::{maybe_wallet_manager, RemoteWalletError, RemoteWalletManager}, }, solana_sdk::{ - derivation_path::{DerivationPath, DerivationPathError}, hash::Hash, message::Message, pubkey::Pubkey, diff --git a/clap-v3-utils/Cargo.toml b/clap-v3-utils/Cargo.toml index 85cc2092ec07a9..a42f1a183fe37d 100644 --- a/clap-v3-utils/Cargo.toml +++ b/clap-v3-utils/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } chrono = { workspace = true, features = ["default"] } clap = { version = "3.2.23", features = ["cargo"] } rpassword = { workspace = true } +solana-derivation-path = { workspace = true } solana-remote-wallet = { workspace = true } solana-sdk = { workspace = true } solana-zk-token-sdk = { workspace = true } diff --git a/clap-v3-utils/src/input_parsers/signer.rs b/clap-v3-utils/src/input_parsers/signer.rs index 9ce69d250f1140..73241998a6ec5f 100644 --- a/clap-v3-utils/src/input_parsers/signer.rs +++ b/clap-v3-utils/src/input_parsers/signer.rs @@ -5,12 +5,12 @@ use { ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG, }, clap::{builder::ValueParser, ArgMatches}, + solana_derivation_path::{DerivationPath, DerivationPathError}, solana_remote_wallet::{ locator::{Locator as RemoteWalletLocator, LocatorError as RemoteWalletLocatorError}, remote_wallet::RemoteWalletManager, }, solana_sdk::{ - derivation_path::{DerivationPath, DerivationPathError}, pubkey::Pubkey, signature::{read_keypair_file, Keypair, Signature, Signer}, }, diff --git a/clap-v3-utils/src/keygen/derivation_path.rs b/clap-v3-utils/src/keygen/derivation_path.rs index 5e0d79a1cf38da..81a95693e1c968 100644 --- a/clap-v3-utils/src/keygen/derivation_path.rs +++ b/clap-v3-utils/src/keygen/derivation_path.rs @@ -1,6 +1,6 @@ use { clap::{Arg, ArgMatches}, - solana_sdk::derivation_path::DerivationPath, + solana_derivation_path::DerivationPath, std::error, }; diff --git a/clap-v3-utils/src/keypair.rs b/clap-v3-utils/src/keypair.rs index c140f9573ba38d..47f6719a2bc7eb 100644 --- a/clap-v3-utils/src/keypair.rs +++ b/clap-v3-utils/src/keypair.rs @@ -18,12 +18,12 @@ use { bip39::{Language, Mnemonic, Seed}, clap::ArgMatches, rpassword::prompt_password, + solana_derivation_path::DerivationPath, solana_remote_wallet::{ remote_keypair::generate_remote_keypair, remote_wallet::{maybe_wallet_manager, RemoteWalletError, RemoteWalletManager}, }, solana_sdk::{ - derivation_path::DerivationPath, hash::Hash, message::Message, pubkey::Pubkey, diff --git a/keygen/Cargo.toml b/keygen/Cargo.toml index 0d36a5213a929c..4dd74305996883 100644 --- a/keygen/Cargo.toml +++ b/keygen/Cargo.toml @@ -16,6 +16,7 @@ dirs-next = { workspace = true } num_cpus = { workspace = true } solana-clap-v3-utils = { workspace = true } solana-cli-config = { workspace = true } +solana-derivation-path = { workspace = true } solana-remote-wallet = { workspace = true, features = ["default"] } solana-sdk = { workspace = true } solana-version = { workspace = true } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 8fb61dee255ab2..370659316da19e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4762,6 +4762,7 @@ dependencies = [ "chrono", "clap", "rpassword", + "solana-derivation-path", "solana-remote-wallet", "solana-sdk", "thiserror", @@ -5017,6 +5018,16 @@ dependencies = [ name = "solana-define-syscall" version = "2.1.0" +[[package]] +name = "solana-derivation-path" +version = "2.1.0" +dependencies = [ + "derivation-path", + "qstring", + "thiserror", + "uriparse", +] + [[package]] name = "solana-download-utils" version = "2.1.0" @@ -5588,6 +5599,7 @@ dependencies = [ "parking_lot 0.12.2", "qstring", "semver", + "solana-derivation-path", "solana-sdk", "thiserror", "uriparse", @@ -6291,7 +6303,6 @@ dependencies = [ "bytemuck_derive", "byteorder 1.5.0", "chrono", - "derivation-path", "digest 0.10.7", "ed25519-dalek", "ed25519-dalek-bip32", @@ -6308,7 +6319,6 @@ dependencies = [ "num-traits", "num_enum", "pbkdf2 0.11.0", - "qstring", "qualifier_attr", "rand 0.7.3", "rand 0.8.5", @@ -6322,6 +6332,7 @@ dependencies = [ "siphasher", "solana-bn254", "solana-decode-error", + "solana-derivation-path", "solana-program", "solana-program-memory", "solana-sanitize", @@ -6330,7 +6341,6 @@ dependencies = [ "solana-serde-varint", "solana-short-vec", "thiserror", - "uriparse", "wasm-bindgen", ] @@ -6843,6 +6853,7 @@ dependencies = [ "serde_derive", "serde_json", "sha3", + "solana-derivation-path", "solana-program", "solana-sdk", "subtle", @@ -6885,6 +6896,7 @@ dependencies = [ "serde_json", "sha3", "solana-curve25519", + "solana-derivation-path", "solana-program", "solana-sdk", "subtle", diff --git a/remote-wallet/Cargo.toml b/remote-wallet/Cargo.toml index 8cea360d7c14ca..a11c254587f907 100644 --- a/remote-wallet/Cargo.toml +++ b/remote-wallet/Cargo.toml @@ -19,6 +19,7 @@ num-traits = { workspace = true } parking_lot = { workspace = true } qstring = { workspace = true } semver = { workspace = true } +solana-derivation-path = { workspace = true } solana-sdk = { workspace = true } thiserror = { workspace = true } uriparse = { workspace = true } diff --git a/remote-wallet/src/ledger.rs b/remote-wallet/src/ledger.rs index e2f9a5fa07592a..66d46d752c9cf9 100644 --- a/remote-wallet/src/ledger.rs +++ b/remote-wallet/src/ledger.rs @@ -5,7 +5,7 @@ use { console::Emoji, dialoguer::{theme::ColorfulTheme, Select}, semver::Version as FirmwareVersion, - solana_sdk::derivation_path::DerivationPath, + solana_derivation_path::DerivationPath, std::{fmt, rc::Rc}, }; #[cfg(feature = "hidapi")] diff --git a/remote-wallet/src/remote_keypair.rs b/remote-wallet/src/remote_keypair.rs index d37eefe2427175..13f14d03de881b 100644 --- a/remote-wallet/src/remote_keypair.rs +++ b/remote-wallet/src/remote_keypair.rs @@ -7,8 +7,8 @@ use { RemoteWalletType, }, }, + solana_derivation_path::DerivationPath, solana_sdk::{ - derivation_path::DerivationPath, pubkey::Pubkey, signature::{Signature, Signer, SignerError}, }, diff --git a/remote-wallet/src/remote_wallet.rs b/remote-wallet/src/remote_wallet.rs index 738a9bbf52114d..02c739405b1282 100644 --- a/remote-wallet/src/remote_wallet.rs +++ b/remote-wallet/src/remote_wallet.rs @@ -8,8 +8,8 @@ use { }, log::*, parking_lot::RwLock, + solana_derivation_path::{DerivationPath, DerivationPathError}, solana_sdk::{ - derivation_path::{DerivationPath, DerivationPathError}, pubkey::Pubkey, signature::{Signature, SignerError}, }, diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 5427e5dedb93f8..4e6613a8eb932d 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -54,7 +54,6 @@ bytemuck_derive = { workspace = true } byteorder = { workspace = true, optional = true } chrono = { workspace = true, features = ["alloc"], optional = true } curve25519-dalek = { workspace = true, optional = true } -derivation-path = { workspace = true } digest = { workspace = true, optional = true } ed25519-dalek = { workspace = true, optional = true } ed25519-dalek-bip32 = { workspace = true, optional = true } @@ -72,7 +71,6 @@ num-derive = { workspace = true } num-traits = { workspace = true } num_enum = { workspace = true } pbkdf2 = { workspace = true } -qstring = { workspace = true } qualifier_attr = { workspace = true, optional = true } rand = { workspace = true, optional = true } rand0-7 = { package = "rand", version = "0.7", optional = true } @@ -86,6 +84,7 @@ sha3 = { workspace = true, optional = true } siphasher = { workspace = true } solana-bn254 = { workspace = true } solana-decode-error = { workspace = true } +solana-derivation-path = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-program = { workspace = true } @@ -96,7 +95,6 @@ solana-secp256k1-recover = { workspace = true } solana-serde-varint = { workspace = true } solana-short-vec = { workspace = true } thiserror = { workspace = true } -uriparse = { workspace = true } [target.'cfg(target_arch = "wasm32")'.dependencies] getrandom = { version = "0.1", features = ["wasm-bindgen"] } diff --git a/sdk/derivation-path/Cargo.toml b/sdk/derivation-path/Cargo.toml new file mode 100644 index 00000000000000..8b82b091a07861 --- /dev/null +++ b/sdk/derivation-path/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "solana-derivation-path" +description = "Solana BIP44 derivation paths." +documentation = "https://docs.rs/solana-derivation-path" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +derivation-path = { workspace = true } +qstring = { workspace = true } +thiserror = { workspace = true } +uriparse = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/src/derivation_path.rs b/sdk/derivation-path/src/lib.rs similarity index 100% rename from sdk/src/derivation_path.rs rename to sdk/derivation-path/src/lib.rs diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index fe5176f0f6947b..5a1b87b211724b 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -63,7 +63,6 @@ pub mod account_utils; pub mod client; pub mod commitment_config; pub mod compute_budget; -pub mod derivation_path; pub mod deserialize_utils; pub mod ed25519_instruction; pub mod entrypoint; @@ -113,6 +112,8 @@ pub mod wasm; pub use solana_bn254 as alt_bn128; #[deprecated(since = "2.1.0", note = "Use `solana-decode-error` crate instead")] pub use solana_decode_error as decode_error; +#[deprecated(since = "2.1.0", note = "Use `solana-derivation-path` crate instead")] +pub use solana_derivation_path as derivation_path; #[deprecated(since = "2.1.0", note = "Use `solana-program-memory` crate instead")] pub use solana_program_memory as program_memory; #[deprecated(since = "2.1.0", note = "Use `solana-sanitize` crate instead")] diff --git a/sdk/src/signer/keypair.rs b/sdk/src/signer/keypair.rs index 9e6088c1b5444f..ecd98ec3c7aa3f 100644 --- a/sdk/src/signer/keypair.rs +++ b/sdk/src/signer/keypair.rs @@ -4,7 +4,6 @@ use wasm_bindgen::prelude::*; use { crate::{ - derivation_path::DerivationPath, pubkey::Pubkey, signature::Signature, signer::{EncodableKey, EncodableKeypair, SeedDerivable, Signer, SignerError}, @@ -13,6 +12,7 @@ use { ed25519_dalek_bip32::Error as Bip32Error, hmac::Hmac, rand0_7::{rngs::OsRng, CryptoRng, RngCore}, + solana_derivation_path::DerivationPath, std::{ error, io::{Read, Write}, diff --git a/sdk/src/signer/mod.rs b/sdk/src/signer/mod.rs index 66e95267fa2ca7..b5be748ecfb2a3 100644 --- a/sdk/src/signer/mod.rs +++ b/sdk/src/signer/mod.rs @@ -4,12 +4,12 @@ use { crate::{ - derivation_path::DerivationPath, pubkey::Pubkey, signature::{PresignerError, Signature}, transaction::TransactionError, }, itertools::Itertools, + solana_derivation_path::DerivationPath, std::{ error, fs::{self, File, OpenOptions}, diff --git a/zk-sdk/Cargo.toml b/zk-sdk/Cargo.toml index c7ffa7569e0069..5a1ff83a620b9c 100644 --- a/zk-sdk/Cargo.toml +++ b/zk-sdk/Cargo.toml @@ -33,6 +33,7 @@ serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } sha3 = { workspace = true } +solana-derivation-path = { workspace = true } solana-sdk = { workspace = true } subtle = { workspace = true } zeroize = { workspace = true, features = ["zeroize_derive"] } diff --git a/zk-sdk/src/encryption/auth_encryption.rs b/zk-sdk/src/encryption/auth_encryption.rs index f8e513c4e5b998..14c145decb6736 100644 --- a/zk-sdk/src/encryption/auth_encryption.rs +++ b/zk-sdk/src/encryption/auth_encryption.rs @@ -14,8 +14,8 @@ use { base64::{prelude::BASE64_STANDARD, Engine}, rand::{rngs::OsRng, Rng}, sha3::{Digest, Sha3_512}, + solana_derivation_path::DerivationPath, solana_sdk::{ - derivation_path::DerivationPath, signature::Signature, signer::{ keypair::generate_seed_from_seed_phrase_and_passphrase, EncodableKey, SeedDerivable, diff --git a/zk-sdk/src/encryption/elgamal.rs b/zk-sdk/src/encryption/elgamal.rs index 2ba02df21c9d5d..850db36329c715 100644 --- a/zk-sdk/src/encryption/elgamal.rs +++ b/zk-sdk/src/encryption/elgamal.rs @@ -34,8 +34,8 @@ use { rand::rngs::OsRng, serde::{Deserialize, Serialize}, sha3::{Digest, Sha3_512}, + solana_derivation_path::DerivationPath, solana_sdk::{ - derivation_path::DerivationPath, signature::Signature, signer::{ keypair::generate_seed_from_seed_phrase_and_passphrase, EncodableKey, EncodableKeypair, diff --git a/zk-token-sdk/Cargo.toml b/zk-token-sdk/Cargo.toml index 20d43f998f2eaa..a47164889ea133 100644 --- a/zk-token-sdk/Cargo.toml +++ b/zk-token-sdk/Cargo.toml @@ -35,6 +35,7 @@ serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } sha3 = { workspace = true } +solana-derivation-path = { workspace = true } solana-sdk = { workspace = true } subtle = { workspace = true } zeroize = { workspace = true, features = ["zeroize_derive"] } diff --git a/zk-token-sdk/src/encryption/auth_encryption.rs b/zk-token-sdk/src/encryption/auth_encryption.rs index 52ddb236514893..c61d27e486f7f1 100644 --- a/zk-token-sdk/src/encryption/auth_encryption.rs +++ b/zk-token-sdk/src/encryption/auth_encryption.rs @@ -6,8 +6,8 @@ use { crate::errors::AuthenticatedEncryptionError, base64::{prelude::BASE64_STANDARD, Engine}, sha3::{Digest, Sha3_512}, + solana_derivation_path::DerivationPath, solana_sdk::{ - derivation_path::DerivationPath, signature::Signature, signer::{ keypair::generate_seed_from_seed_phrase_and_passphrase, EncodableKey, SeedDerivable, diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index d0fcaf8b48bd41..d4ffc2028bf27a 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -33,8 +33,8 @@ use { traits::Identity, }, serde::{Deserialize, Serialize}, + solana_derivation_path::DerivationPath, solana_sdk::{ - derivation_path::DerivationPath, signature::Signature, signer::{ keypair::generate_seed_from_seed_phrase_and_passphrase, EncodableKey, EncodableKeypair, From e196a080a1b0045c14363600b1c9b762be7bb451 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 5 Sep 2024 08:33:50 -0500 Subject: [PATCH 302/529] Consistent TransactionPriorityId ordering/equality (#2832) --- core/src/banking_stage/scheduler_messages.rs | 2 +- .../scheduler_controller.rs | 12 ++--- .../transaction_id_generator.rs | 11 ++-- .../transaction_priority_id.rs | 54 ++++++++++++++----- 4 files changed, 55 insertions(+), 24 deletions(-) diff --git a/core/src/banking_stage/scheduler_messages.rs b/core/src/banking_stage/scheduler_messages.rs index 92181e2abf9655..ee5c4ebeef9738 100644 --- a/core/src/banking_stage/scheduler_messages.rs +++ b/core/src/banking_stage/scheduler_messages.rs @@ -21,7 +21,7 @@ impl Display for TransactionBatchId { } /// A unique identifier for a transaction. -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] pub struct TransactionId(u64); impl TransactionId { diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 417d5b95bf5bda..b576fd1576511d 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -858,7 +858,7 @@ mod tests { &Keypair::new(), &Pubkey::new_unique(), 1, - 1, + 1000, bank.last_blockhash(), ); let tx2 = create_and_fund_prioritized_transfer( @@ -867,7 +867,7 @@ mod tests { &Keypair::new(), &Pubkey::new_unique(), 1, - 2, + 2000, bank.last_blockhash(), ); let tx1_hash = tx1.message().hash(); @@ -914,7 +914,7 @@ mod tests { &Keypair::new(), &pk, 1, - 1, + 1000, bank.last_blockhash(), ); let tx2 = create_and_fund_prioritized_transfer( @@ -923,7 +923,7 @@ mod tests { &Keypair::new(), &pk, 1, - 2, + 2000, bank.last_blockhash(), ); let tx1_hash = tx1.message().hash(); @@ -1105,7 +1105,7 @@ mod tests { &Keypair::new(), &Pubkey::new_unique(), 1, - 1, + 1000, bank.last_blockhash(), ); let tx2 = create_and_fund_prioritized_transfer( @@ -1114,7 +1114,7 @@ mod tests { &Keypair::new(), &Pubkey::new_unique(), 1, - 2, + 2000, bank.last_blockhash(), ); let tx1_hash = tx1.message().hash(); diff --git a/core/src/banking_stage/transaction_scheduler/transaction_id_generator.rs b/core/src/banking_stage/transaction_scheduler/transaction_id_generator.rs index 0f88fd769d1a2a..f54523890f9caf 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_id_generator.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_id_generator.rs @@ -1,16 +1,21 @@ use crate::banking_stage::scheduler_messages::TransactionId; -/// Simple sequential ID generator for `TransactionId`s. +/// Simple reverse-sequential ID generator for `TransactionId`s. /// These IDs uniquely identify transactions during the scheduling process. -#[derive(Default)] pub struct TransactionIdGenerator { next_id: u64, } +impl Default for TransactionIdGenerator { + fn default() -> Self { + Self { next_id: u64::MAX } + } +} + impl TransactionIdGenerator { pub fn next(&mut self) -> TransactionId { let id = self.next_id; - self.next_id = self.next_id.wrapping_add(1); + self.next_id = self.next_id.wrapping_sub(1); TransactionId::new(id) } } diff --git a/core/src/banking_stage/transaction_scheduler/transaction_priority_id.rs b/core/src/banking_stage/transaction_scheduler/transaction_priority_id.rs index 74b7105bd1d1e5..9857a689519502 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_priority_id.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_priority_id.rs @@ -5,8 +5,7 @@ use { }; /// A unique identifier tied with priority ordering for a transaction/packet: -/// - `id` has no effect on ordering -#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub(crate) struct TransactionPriorityId { pub(crate) priority: u64, pub(crate) id: TransactionId, @@ -18,18 +17,6 @@ impl TransactionPriorityId { } } -impl Ord for TransactionPriorityId { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.priority.cmp(&other.priority) - } -} - -impl PartialOrd for TransactionPriorityId { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - impl Hash for TransactionPriorityId { fn hash(&self, state: &mut H) { self.id.hash(state) @@ -41,3 +28,42 @@ impl TopLevelId for TransactionPriorityId { *self } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_transaction_priority_id_ordering() { + // Higher priority first + { + let id1 = TransactionPriorityId::new(1, TransactionId::new(1)); + let id2 = TransactionPriorityId::new(2, TransactionId::new(1)); + assert!(id1 < id2); + assert!(id1 <= id2); + assert!(id2 > id1); + assert!(id2 >= id1); + } + + // Equal priority then compare by id + { + let id1 = TransactionPriorityId::new(1, TransactionId::new(1)); + let id2 = TransactionPriorityId::new(1, TransactionId::new(2)); + assert!(id1 < id2); + assert!(id1 <= id2); + assert!(id2 > id1); + assert!(id2 >= id1); + } + + // Equal priority and id + { + let id1 = TransactionPriorityId::new(1, TransactionId::new(1)); + let id2 = TransactionPriorityId::new(1, TransactionId::new(1)); + assert_eq!(id1, id2); + assert!(id1 >= id2); + assert!(id1 <= id2); + assert!(id2 >= id1); + assert!(id2 <= id1); + } + } +} From 98c8853626f9405f53c45ef85e473528c8a5470f Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 5 Sep 2024 09:42:37 -0500 Subject: [PATCH 303/529] assert refcount = 0 for zero lamport 1 refcount deletes (#2774) * assert refcount = 0 for zero lamport 1 refcount deletes * add test to covert zero accounts shrink unrefassert0 * fix a test * comments * add info to assert * pr changes * log instead of assert * update assert message * Update accounts-db/src/accounts_index.rs Co-authored-by: Brooks --------- Co-authored-by: HaoranYi Co-authored-by: HaoranYi <219428+HaoranYi@users.noreply.github.com> Co-authored-by: Brooks --- accounts-db/src/accounts_db.rs | 106 ++++++++++++++++++++++++------ accounts-db/src/accounts_index.rs | 41 ++++++++++-- 2 files changed, 119 insertions(+), 28 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index d528c4a3c6b47d..76b8e3f001c1c8 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4247,6 +4247,7 @@ impl AccountsDb { zero_lamport_single_ref_pubkeys: &[&Pubkey], slot: Slot, stats: &ShrinkStats, + do_assert: bool, ) { stats.purged_zero_lamports.fetch_add( zero_lamport_single_ref_pubkeys.len() as u64, @@ -4255,10 +4256,15 @@ impl AccountsDb { // we have to unref before we `purge_keys_exact`. Otherwise, we could race with the foreground with tx processing // reviving this index entry and then we'd unref the revived version, which is a refcount bug. + self.accounts_index.scan( zero_lamport_single_ref_pubkeys.iter().cloned(), |_pubkey, _slots_refs, _entry| AccountsIndexScanResult::Unref, - Some(AccountsIndexScanResult::Unref), + if do_assert { + Some(AccountsIndexScanResult::UnrefAssert0) + } else { + Some(AccountsIndexScanResult::UnrefLog0) + }, false, ScanFilter::All, ); @@ -4286,6 +4292,7 @@ impl AccountsDb { &shrink_collect.zero_lamport_single_ref_pubkeys, shrink_collect.slot, stats, + false, ); // Purge old, overwritten storage entries @@ -11150,6 +11157,35 @@ pub mod tests { assert_eq!(accounts.alive_account_count_in_slot(1), 0); } + #[test] + #[should_panic(expected = "ref count expected to be zero")] + fn test_remove_zero_lamport_multi_ref_accounts_panic() { + let accounts = AccountsDb::new_single_for_tests(); + let pubkey_zero = Pubkey::from([1; 32]); + let one_lamport_account = + AccountSharedData::new(1, 0, AccountSharedData::default().owner()); + + let zero_lamport_account = + AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + let slot = 1; + + accounts.store_for_tests(slot, &[(&pubkey_zero, &one_lamport_account)]); + accounts.calculate_accounts_delta_hash(slot); + accounts.add_root_and_flush_write_cache(slot); + + accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]); + accounts.calculate_accounts_delta_hash(slot + 1); + accounts.add_root_and_flush_write_cache(slot + 1); + + // This should panic because there are 2 refs for pubkey_zero. + accounts.remove_zero_lamport_single_ref_accounts_after_shrink( + &[&pubkey_zero], + slot, + &ShrinkStats::default(), + true, + ); + } + #[test] fn test_remove_zero_lamport_single_ref_accounts_after_shrink() { for pass in 0..3 { @@ -11195,39 +11231,67 @@ pub mod tests { (false, ()) }); - let zero_lamport_single_ref_pubkeys = [&pubkey_zero]; + let zero_lamport_single_ref_pubkeys = + if pass < 2 { vec![&pubkey_zero] } else { vec![] }; accounts.remove_zero_lamport_single_ref_accounts_after_shrink( &zero_lamport_single_ref_pubkeys, slot, &ShrinkStats::default(), + true, ); accounts.accounts_index.get_and_then(&pubkey_zero, |entry| { - if pass == 0 { - // should not exist in index at all - assert!(entry.is_none(), "{pass}"); - } else { - // alive only in slot + 1 - assert_eq!(entry.unwrap().slot_list.read().unwrap().len(), 1); - assert_eq!( - entry + match pass { + 0 => { + // should not exist in index at all + assert!(entry.is_none(), "{pass}"); + } + 1 => { + // alive only in slot + 1 + assert_eq!(entry.unwrap().slot_list.read().unwrap().len(), 1); + assert_eq!( + entry + .unwrap() + .slot_list + .read() + .unwrap() + .first() + .map(|(s, _)| s) + .cloned() + .unwrap(), + slot + 1 + ); + let expected_ref_count = 0; + assert_eq!( + entry.map(|e| e.ref_count()), + Some(expected_ref_count), + "{pass}" + ); + } + 2 => { + // alive in both slot, slot + 1 + assert_eq!(entry.unwrap().slot_list.read().unwrap().len(), 2); + + let slots = entry .unwrap() .slot_list .read() .unwrap() - .first() + .iter() .map(|(s, _)| s) .cloned() - .unwrap(), - slot + 1 - ); - // refcount = 1 if we flushed the write cache for slot + 1 - let expected_ref_count = if pass < 2 { 0 } else { 1 }; - assert_eq!( - entry.map(|e| e.ref_count()), - Some(expected_ref_count), - "{pass}" - ); + .collect::>(); + assert_eq!(slots, vec![slot, slot + 1]); + let expected_ref_count = 2; + assert_eq!( + entry.map(|e| e.ref_count()), + Some(expected_ref_count), + "{pass}" + ); + } + _ => { + unreachable!("Shouldn't reach here.") + } } (false, ()) }); diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index fbd99409a50771..a849d08af385f6 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -311,14 +311,15 @@ impl AccountMapEntryInner { } /// decrement the ref count - /// return true if the old refcount was already 0. This indicates an under refcounting error in the system. - pub fn unref(&self) -> bool { + /// return the refcount prior to subtracting 1 + /// 0 indicates an under refcounting error in the system. + pub fn unref(&self) -> RefCount { let previous = self.ref_count.fetch_sub(1, Ordering::Release); self.set_dirty(true); if previous == 0 { inc_new_counter_info!("accounts_index-deref_from_0", 1); } - previous == 0 + previous } pub fn dirty(&self) -> bool { @@ -647,6 +648,10 @@ pub enum AccountsIndexScanResult { KeepInMemory, /// reduce refcount by 1 Unref, + /// reduce refcount by 1 and assert that ref_count = 0 after unref + UnrefAssert0, + /// reduce refcount by 1 and log if ref_count != 0 after unref + UnrefLog0, } #[derive(Debug)] @@ -1453,12 +1458,34 @@ impl + Into> AccountsIndex { }; cache = match result { AccountsIndexScanResult::Unref => { - if locked_entry.unref() { + if locked_entry.unref() == 0 { info!("scan: refcount of item already at 0: {pubkey}"); self.unref_zero_count.fetch_add(1, Ordering::Relaxed); } true } + AccountsIndexScanResult::UnrefAssert0 => { + assert_eq!( + locked_entry.unref(), + 1, + "ref count expected to be zero, but is {}! {pubkey}, {:?}", + locked_entry.ref_count(), + locked_entry.slot_list.read().unwrap(), + ); + true + } + AccountsIndexScanResult::UnrefLog0 => { + let old_ref = locked_entry.unref(); + if old_ref != 1 { + info!("Unexpected unref {pubkey} with {old_ref} {:?}, expect old_ref to be 1", locked_entry.slot_list.read().unwrap()); + datapoint_warn!( + "accounts_db-unexpected-unref-zero", + ("old_ref", old_ref, i64), + ("pubkey", pubkey.to_string(), String), + ); + } + true + } AccountsIndexScanResult::KeepInMemory => true, AccountsIndexScanResult::OnlyKeepInMemoryIfDirty => false, }; @@ -4065,9 +4092,9 @@ pub mod tests { assert!(map.get_internal_inner(&key, |entry| { // check refcount BEFORE the unref assert_eq!(u64::from(!expected), entry.unwrap().ref_count()); - // first time, ref count was at 1, we can unref once. Unref should return false. - // second time, ref count was at 0, it is an error to unref. Unref should return true - assert_eq!(expected, entry.unwrap().unref()); + // first time, ref count was at 1, we can unref once. Unref should return 1. + // second time, ref count was at 0, it is an error to unref. Unref should return 0 + assert_eq!(u64::from(!expected), entry.unwrap().unref()); // check refcount AFTER the unref assert_eq!( if expected { From 7f2013d0399e2da474cbeba2c2784a42e9c60810 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 5 Sep 2024 20:18:54 +0200 Subject: [PATCH 304/529] Adjustments of loader-v4 (part 3) (#2821) * Fixes two bugs in loader-v4. * Adds instructions_to_load_program_of_loader_v4() and load_program_of_loader_v4(). Removes load_and_finalize_program() and load_program(). --- programs/loader-v4/src/lib.rs | 3 +- runtime/src/loader_utils.rs | 147 ++++++++++++++++++++-------------- 2 files changed, 91 insertions(+), 59 deletions(-) diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index f8e2d2f6eec80f..c35b67a45ff9ce 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -167,6 +167,7 @@ pub fn process_instruction_truncate( } else { let rent = invoke_context.get_sysvar_cache().get_rent()?; rent.minimum_balance(LoaderV4State::program_data_offset().saturating_add(new_size as usize)) + .max(1) }; match program.get_lamports().cmp(&required_lamports) { std::cmp::Ordering::Less => { @@ -279,7 +280,7 @@ pub fn process_instruction_deploy( }; let executor = ProgramCacheEntry::new( &loader_v4::id(), - environments.program_runtime_v2.clone(), + environments.program_runtime_v1.clone(), deployment_slot, effective_slot, programdata, diff --git a/runtime/src/loader_utils.rs b/runtime/src/loader_utils.rs index 436a7242d93fd7..61404464bce264 100644 --- a/runtime/src/loader_utils.rs +++ b/runtime/src/loader_utils.rs @@ -8,7 +8,7 @@ use { client::{Client, SyncClient}, clock::Clock, instruction::{AccountMeta, Instruction}, - loader_instruction, + loader_v4, message::Message, pubkey::Pubkey, signature::{Keypair, Signer}, @@ -65,63 +65,6 @@ pub fn create_program(bank: &Bank, loader_id: &Pubkey, name: &str) -> Pubkey { program_id } -pub fn load_and_finalize_program( - bank_client: &T, - loader_id: &Pubkey, - program_keypair: Option, - payer_keypair: &Keypair, - name: &str, -) -> (Keypair, Instruction) { - let program = load_program_from_file(name); - let program_keypair = program_keypair.unwrap_or_else(|| { - let program_keypair = Keypair::new(); - let instruction = system_instruction::create_account( - &payer_keypair.pubkey(), - &program_keypair.pubkey(), - 1.max( - bank_client - .get_minimum_balance_for_rent_exemption(program.len()) - .unwrap(), - ), - program.len() as u64, - loader_id, - ); - let message = Message::new(&[instruction], Some(&payer_keypair.pubkey())); - bank_client - .send_and_confirm_message(&[payer_keypair, &program_keypair], message) - .unwrap(); - program_keypair - }); - let chunk_size = CHUNK_SIZE; - let mut offset = 0; - for chunk in program.chunks(chunk_size) { - let instruction = - loader_instruction::write(&program_keypair.pubkey(), loader_id, offset, chunk.to_vec()); - let message = Message::new(&[instruction], Some(&payer_keypair.pubkey())); - bank_client - .send_and_confirm_message(&[payer_keypair, &program_keypair], message) - .unwrap(); - offset += chunk_size as u32; - } - let instruction = loader_instruction::finalize(&program_keypair.pubkey(), loader_id); - (program_keypair, instruction) -} - -pub fn load_program( - bank_client: &T, - loader_id: &Pubkey, - payer_keypair: &Keypair, - name: &str, -) -> Pubkey { - let (program_keypair, instruction) = - load_and_finalize_program(bank_client, loader_id, None, payer_keypair, name); - let message = Message::new(&[instruction], Some(&payer_keypair.pubkey())); - bank_client - .send_and_confirm_message(&[payer_keypair, &program_keypair], message) - .unwrap(); - program_keypair.pubkey() -} - pub fn load_upgradeable_buffer( bank_client: &T, from_keypair: &Keypair, @@ -313,6 +256,94 @@ pub fn set_upgrade_authority( .unwrap(); } +pub fn instructions_to_load_program_of_loader_v4( + bank_client: &T, + payer_keypair: &Keypair, + authority_keypair: &Keypair, + name: &str, + program_keypair: Option, + target_program_id: Option<&Pubkey>, +) -> (Keypair, Vec) { + let mut instructions = Vec::new(); + let loader_id = &loader_v4::id(); + let program = load_program_from_file(name); + let program_keypair = program_keypair.unwrap_or_else(|| { + let program_keypair = Keypair::new(); + instructions.push(system_instruction::create_account( + &payer_keypair.pubkey(), + &program_keypair.pubkey(), + bank_client + .get_minimum_balance_for_rent_exemption(program.len()) + .unwrap(), + 0, + loader_id, + )); + program_keypair + }); + instructions.push(loader_v4::truncate_uninitialized( + &program_keypair.pubkey(), + &authority_keypair.pubkey(), + program.len() as u32, + &payer_keypair.pubkey(), + )); + let chunk_size = CHUNK_SIZE; + let mut offset = 0; + for chunk in program.chunks(chunk_size) { + instructions.push(loader_v4::write( + &program_keypair.pubkey(), + &authority_keypair.pubkey(), + offset, + chunk.to_vec(), + )); + offset += chunk_size as u32; + } + instructions.push(if let Some(target_program_id) = target_program_id { + loader_v4::deploy_from_source( + target_program_id, + &authority_keypair.pubkey(), + &program_keypair.pubkey(), + ) + } else { + loader_v4::deploy(&program_keypair.pubkey(), &authority_keypair.pubkey()) + }); + (program_keypair, instructions) +} + +pub fn load_program_of_loader_v4( + bank_client: &mut BankClient, + bank_forks: &RwLock, + payer_keypair: &Keypair, + authority_keypair: &Keypair, + name: &str, +) -> (Arc, Pubkey) { + let (program_keypair, instructions) = instructions_to_load_program_of_loader_v4( + bank_client, + payer_keypair, + authority_keypair, + name, + None, + None, + ); + let signers: &[&[&Keypair]] = &[ + &[payer_keypair, &program_keypair], + &[payer_keypair, &program_keypair, authority_keypair], + &[payer_keypair, authority_keypair], + ]; + let signers = std::iter::once(signers[0]) + .chain(std::iter::once(signers[1])) + .chain(std::iter::repeat(signers[2])); + for (instruction, signers) in instructions.into_iter().zip(signers) { + let message = Message::new(&[instruction], Some(&payer_keypair.pubkey())); + bank_client + .send_and_confirm_message(signers, message) + .unwrap(); + } + let bank = bank_client + .advance_slot(1, bank_forks, &Pubkey::default()) + .expect("Failed to advance the slot"); + (bank, program_keypair.pubkey()) +} + // Return an Instruction that invokes `program_id` with `data` and required // a signature from `from_pubkey`. pub fn create_invoke_instruction( From 66c126b41ec2b55b3f747a4ac4e3ee6b439164a5 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 6 Sep 2024 22:41:33 +0900 Subject: [PATCH 305/529] cleanup: use let-else to reduce nesting in backup_and_clear_blockstore() (#2855) --- core/src/validator.rs | 104 +++++++++++++++++++++--------------------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 863da61782bc37..555b1221df6e1c 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -2224,64 +2224,64 @@ fn backup_and_clear_blockstore( start_slot, expected_shred_version, )?; + let Some(incorrect_shred_version) = incorrect_shred_version else { + info!("Only shreds with the correct version were found in the blockstore"); + return Ok(()); + }; - if let Some(incorrect_shred_version) = incorrect_shred_version { - // .unwrap() safe because getting to this point implies blockstore has slots/shreds - let end_slot = blockstore.highest_slot()?.unwrap(); - - // Backing up the shreds that will be deleted from primary blockstore is - // not critical, so swallow errors from backup blockstore operations. - let backup_folder = format!( - "{}_backup_{}_{}_{}", - config - .ledger_column_options - .shred_storage_type - .blockstore_directory(), - incorrect_shred_version, - start_slot, - end_slot - ); - match Blockstore::open_with_options( - &ledger_path.join(backup_folder), - blockstore_options_from_config(config), - ) { - Ok(backup_blockstore) => { - info!("Backing up slots from {start_slot} to {end_slot}"); - let mut timer = Measure::start("blockstore backup"); - - const PRINT_INTERVAL: Duration = Duration::from_secs(5); - let mut print_timer = Instant::now(); - let mut num_slots_copied = 0; - let slot_meta_iterator = blockstore.slot_meta_iterator(start_slot)?; - for (slot, _meta) in slot_meta_iterator { - let shreds = blockstore.get_data_shreds_for_slot(slot, 0)?; - let _ = backup_blockstore.insert_shreds(shreds, None, true); - num_slots_copied += 1; - - if print_timer.elapsed() > PRINT_INTERVAL { - info!("Backed up {num_slots_copied} slots thus far"); - print_timer = Instant::now(); - } + // .unwrap() safe because getting to this point implies blockstore has slots/shreds + let end_slot = blockstore.highest_slot()?.unwrap(); + + // Backing up the shreds that will be deleted from primary blockstore is + // not critical, so swallow errors from backup blockstore operations. + let backup_folder = format!( + "{}_backup_{}_{}_{}", + config + .ledger_column_options + .shred_storage_type + .blockstore_directory(), + incorrect_shred_version, + start_slot, + end_slot + ); + match Blockstore::open_with_options( + &ledger_path.join(backup_folder), + blockstore_options_from_config(config), + ) { + Ok(backup_blockstore) => { + info!("Backing up slots from {start_slot} to {end_slot}"); + let mut timer = Measure::start("blockstore backup"); + + const PRINT_INTERVAL: Duration = Duration::from_secs(5); + let mut print_timer = Instant::now(); + let mut num_slots_copied = 0; + let slot_meta_iterator = blockstore.slot_meta_iterator(start_slot)?; + for (slot, _meta) in slot_meta_iterator { + let shreds = blockstore.get_data_shreds_for_slot(slot, 0)?; + let _ = backup_blockstore.insert_shreds(shreds, None, true); + num_slots_copied += 1; + + if print_timer.elapsed() > PRINT_INTERVAL { + info!("Backed up {num_slots_copied} slots thus far"); + print_timer = Instant::now(); } - - timer.stop(); - info!("Backing up slots done. {timer}"); - } - Err(err) => { - warn!("Unable to backup shreds with incorrect shred version: {err}"); } - } - info!("Purging slots {start_slot} to {end_slot} from blockstore"); - let mut timer = Measure::start("blockstore purge"); - blockstore.purge_from_next_slots(start_slot, end_slot); - blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact); - timer.stop(); - info!("Purging slots done. {timer}"); - } else { - info!("Only shreds with the correct version were found in the blockstore"); + timer.stop(); + info!("Backing up slots done. {timer}"); + } + Err(err) => { + warn!("Unable to backup shreds with incorrect shred version: {err}"); + } } + info!("Purging slots {start_slot} to {end_slot} from blockstore"); + let mut timer = Measure::start("blockstore purge"); + blockstore.purge_from_next_slots(start_slot, end_slot); + blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact); + timer.stop(); + info!("Purging slots done. {timer}"); + Ok(()) } From 38aef8a4dafd78d6466eb5f3c861a4313dd72df6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 23:22:44 +0800 Subject: [PATCH 306/529] build(deps): bump bytemuck from 1.17.1 to 1.18.0 (#2853) * build(deps): bump bytemuck from 1.17.1 to 1.18.0 Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.17.1 to 1.18.0. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/v1.17.1...v1.18.0) --- updated-dependencies: - dependency-name: bytemuck dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c6ead351294b2b..c1ac3224b2da53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1144,9 +1144,9 @@ checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "bytemuck" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773d90827bc3feecfb67fab12e24de0749aad83c74b9504ecde46237b5cd24e2" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" dependencies = [ "bytemuck_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 32d6b501a93fe2..20169b0ce3c396 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -205,7 +205,7 @@ bs58 = { version = "0.5.1", default-features = false } bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" -bytemuck = "1.17.1" +bytemuck = "1.18.0" bytemuck_derive = "1.7.1" byteorder = "1.5.0" bytes = "1.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 370659316da19e..ea99368b616508 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -848,9 +848,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773d90827bc3feecfb67fab12e24de0749aad83c74b9504ecde46237b5cd24e2" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" dependencies = [ "bytemuck_derive", ] From 4e46bee70759899c06b8c1fef5ec1d459c7bc369 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 23:22:57 +0800 Subject: [PATCH 307/529] build(deps): bump const_format from 0.2.32 to 0.2.33 (#2852) Bumps [const_format](https://github.com/rodrimati1992/const_format_crates) from 0.2.32 to 0.2.33. - [Release notes](https://github.com/rodrimati1992/const_format_crates/releases) - [Changelog](https://github.com/rodrimati1992/const_format_crates/blob/master/Changelog.md) - [Commits](https://github.com/rodrimati1992/const_format_crates/commits) --- updated-dependencies: - dependency-name: const_format dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c1ac3224b2da53..79be55a688d393 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1503,18 +1503,18 @@ dependencies = [ [[package]] name = "const_format" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +checksum = "50c655d81ff1114fb0dcdea9225ea9f0cc712a6f8d189378e82bdf62a473a64b" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 20169b0ce3c396..1dccada8f20af2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -218,7 +218,7 @@ clap = "2.33.1" console = "0.15.8" console_error_panic_hook = "0.1.7" console_log = "0.2.2" -const_format = "0.2.32" +const_format = "0.2.33" core_affinity = "0.5.10" criterion = "0.5.1" criterion-stats = "0.3.0" From f674f8513abcacc18627ba9e89ddf61b4365332a Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Fri, 6 Sep 2024 19:26:16 +0400 Subject: [PATCH 308/529] Extract signature crate (#2054) * extract solana-signature crate * make serde optional in solana-signature * make ed25519-dalek optional in solana-signature * make rand optional in solana-signature * replace generic_array with optional serde_bytes * fix AbiExample derivation * lint * fix tests * update lock file * remove unused dev-dep * activate verify feature in tests * update lock file * try another trick to get around incorrect circular dep check * update lock file * cargo sort * fix dev deps * fmt * Revert "replace generic_array with optional serde_bytes" This reverts commit a1aeab79906b88052fc20ed458f28d882c6fabc9. * finish adding back GenericArray * only activate serde feature of generic-array when serde feature of solana-signature is activated * update lock file * make solana-signature dependent on "full" feature in SDK, as it is on master branch * update lock file * update digests (they always change when you move to a new crate) * add workspace lints * update digest * make Debug, Display and FromStr no_std * remove thiserror * only depend on rustc_version when frozen-abi is activated * make std optional --- Cargo.lock | 19 +++ Cargo.toml | 2 + core/src/consensus/tower1_7_14.rs | 2 +- core/src/consensus/tower_storage.rs | 2 +- core/src/repair/serve_repair.rs | 6 +- gossip/src/cluster_info.rs | 2 +- programs/sbf/Cargo.lock | 15 ++- sdk/Cargo.toml | 4 +- sdk/signature/Cargo.toml | 48 ++++++++ sdk/signature/build.rs | 1 + sdk/signature/src/lib.rs | 180 ++++++++++++++++++++++++++++ sdk/src/signature.rs | 144 ++-------------------- sdk/src/transaction/mod.rs | 2 +- 13 files changed, 282 insertions(+), 145 deletions(-) create mode 100644 sdk/signature/Cargo.toml create mode 120000 sdk/signature/build.rs create mode 100644 sdk/signature/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 79be55a688d393..3e0a230321fccd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7550,6 +7550,7 @@ dependencies = [ "solana-secp256k1-recover", "solana-serde-varint", "solana-short-vec", + "solana-signature", "static_assertions", "thiserror", "tiny-bip39", @@ -7635,6 +7636,24 @@ dependencies = [ "solana-frozen-abi-macro", ] +[[package]] +name = "solana-signature" +version = "2.1.0" +dependencies = [ + "bs58", + "curve25519-dalek 4.1.3", + "ed25519-dalek", + "generic-array 0.14.7", + "rand 0.8.5", + "rustc_version 0.4.1", + "serde", + "serde_derive", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-program", + "solana-sanitize", +] + [[package]] name = "solana-stake-accounts" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 1dccada8f20af2..1cf9832076b71a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,6 +115,7 @@ members = [ "sdk/program-memory", "sdk/serde-varint", "sdk/sha256-hasher", + "sdk/signature", "send-transaction-service", "short-vec", "stake-accounts", @@ -424,6 +425,7 @@ solana-remote-wallet = { path = "remote-wallet", version = "=2.1.0", default-fea solana-sanitize = { path = "sanitize", version = "=2.1.0" } solana-serde-varint = { path = "sdk/serde-varint", version = "=2.1.0" } solana-sha256-hasher = { path = "sdk/sha256-hasher", version = "=2.1.0" } +solana-signature = { path = "sdk/signature", version = "=2.1.0" } solana-timings = { path = "timings", version = "=2.1.0" } solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=2.1.0" } solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=2.1.0" } diff --git a/core/src/consensus/tower1_7_14.rs b/core/src/consensus/tower1_7_14.rs index 7a57b7b1d4f09e..ff7f7024eba77f 100644 --- a/core/src/consensus/tower1_7_14.rs +++ b/core/src/consensus/tower1_7_14.rs @@ -42,7 +42,7 @@ pub struct Tower1_7_14 { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "CxwFFxKfn6ez6wifDKr5WYr3eu2PsWUKdMYp3LX8Xj52") + frozen_abi(digest = "2ngfAgnN19JwF6FnFYPYp2aHQiZnnjzaYP1vSRMRiSaq") )] #[derive(Default, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct SavedTower1_7_14 { diff --git a/core/src/consensus/tower_storage.rs b/core/src/consensus/tower_storage.rs index 5da032633ff654..6b25ab44292ed2 100644 --- a/core/src/consensus/tower_storage.rs +++ b/core/src/consensus/tower_storage.rs @@ -79,7 +79,7 @@ impl From for SavedTowerVersions { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "Gaxfwvx5MArn52mKZQgzHmDCyn5YfCuTHvp5Et3rFfpp") + frozen_abi(digest = "JBXfVQ6BXHBGSNY919yEkXN4H7XxmAMA7QcGrf7DiWHP") )] #[derive(Default, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct SavedTower { diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 24f484105a8ddb..012ec93ee964d6 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -143,7 +143,7 @@ impl AncestorHashesRepairType { #[cfg_attr( feature = "frozen-abi", derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "98D6KvXCBxAHTxXgqiywLTugTp6WFUHSf559yy4VvKE7") + frozen_abi(digest = "H7S44V9G9cjKeZdtSNZnRivsMrKaThkazF3k3c63TxP4") )] #[derive(Debug, Deserialize, Serialize)] pub enum AncestorHashesResponse { @@ -224,7 +224,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; REPAIR_PING_TOKEN_SIZE]>; #[cfg_attr( feature = "frozen-abi", derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "DzofXbeBFKJpbA88nUEnDpCGKvMEcguNphyQoVr7FyLh") + frozen_abi(digest = "CYguF3KopGoM48XFJJS9pw9Z4TDZ2eUTqPPqbm3L4mFr") )] #[derive(Debug, Deserialize, Serialize)] pub enum RepairProtocol { @@ -272,7 +272,7 @@ fn discard_malformed_repair_requests( #[cfg_attr( feature = "frozen-abi", derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "CkffjyMPCwuJgk9NiCMELXLCecAnTPZqpKEnUCb3VyVf") + frozen_abi(digest = "8TYqNDnUGbE5duZgbCJAyZ2nZDSx39ueYo9PLLZCsiVy") )] #[derive(Debug, Deserialize, Serialize)] pub(crate) enum RepairResponse { diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index fdce90cba41e6c..b49b728e846957 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -311,7 +311,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; #[cfg_attr( feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor), - frozen_abi(digest = "7jwuQ3oFEy8bMnmr5XHSR2jqivZniG8ZjxHx3YKTfR6C") + frozen_abi(digest = "ANgFTZHXSMbjYEuvf9YphECo47tWWrqKdPDD6B9D1YGB") )] #[derive(Serialize, Deserialize, Debug)] #[allow(clippy::large_enum_variant)] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ea99368b616508..907be7f488dcb6 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6306,7 +6306,6 @@ dependencies = [ "digest 0.10.7", "ed25519-dalek", "ed25519-dalek-bip32", - "generic-array", "getrandom 0.1.14", "hmac 0.12.1", "itertools 0.12.1", @@ -6340,6 +6339,7 @@ dependencies = [ "solana-secp256k1-recover", "solana-serde-varint", "solana-short-vec", + "solana-signature", "thiserror", "wasm-bindgen", ] @@ -6408,6 +6408,19 @@ dependencies = [ "serde", ] +[[package]] +name = "solana-signature" +version = "2.1.0" +dependencies = [ + "bs58", + "ed25519-dalek", + "generic-array", + "rand 0.8.5", + "serde", + "serde_derive", + "solana-sanitize", +] + [[package]] name = "solana-stake-program" version = "2.1.0" diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 4e6613a8eb932d..eab3e27d1a88de 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -23,11 +23,11 @@ default = [ full = [ "byteorder", "chrono", - "generic-array", "memmap2", "rand", "rand0-7", "serde_json", + "solana-signature", "ed25519-dalek", "ed25519-dalek-bip32", "libsecp256k1", @@ -42,6 +42,7 @@ frozen-abi = [ "dep:solana-frozen-abi-macro", "solana-program/frozen-abi", "solana-short-vec/frozen-abi", + "solana-signature/frozen-abi" ] [dependencies] @@ -94,6 +95,7 @@ solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } solana-serde-varint = { workspace = true } solana-short-vec = { workspace = true } +solana-signature = { workspace = true, features = ["rand", "serde", "verify"], optional = true } thiserror = { workspace = true } [target.'cfg(target_arch = "wasm32")'.dependencies] diff --git a/sdk/signature/Cargo.toml b/sdk/signature/Cargo.toml new file mode 100644 index 00000000000000..e6ac2a0af65554 --- /dev/null +++ b/sdk/signature/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "solana-signature" +description = "Solana 64-byte signature type" +documentation = "https://docs.rs/solana-signature" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bs58 = { workspace = true } +ed25519-dalek = { workspace = true, optional = true } +generic-array = { workspace = true, features = ["more_lengths"] } +rand = { workspace = true, optional = true } +serde = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } +solana-sanitize = { workspace = true } + +[dev-dependencies] +curve25519-dalek = { workspace = true } +ed25519-dalek = { workspace = true } +solana-program = { workspace = true } + +[build-dependencies] +rustc_version = { workspace = true, optional = true } + +[features] +default = ["std"] +frozen-abi = [ + "dep:rustc_version", + "dep:solana-frozen-abi", + "dep:solana-frozen-abi-macro", + "std" +] +rand = ["dep:rand"] +serde = ["dep:serde", "dep:serde_derive", "generic-array/serde"] +std = [] +verify = ["dep:ed25519-dalek"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/sdk/signature/build.rs b/sdk/signature/build.rs new file mode 120000 index 00000000000000..84539eddaa6ded --- /dev/null +++ b/sdk/signature/build.rs @@ -0,0 +1 @@ +../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/signature/src/lib.rs b/sdk/signature/src/lib.rs new file mode 100644 index 00000000000000..8d2434c2d3fa14 --- /dev/null +++ b/sdk/signature/src/lib.rs @@ -0,0 +1,180 @@ +//! 64-byte signature type. +#![no_std] +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#[cfg(any(test, feature = "verify"))] +use core::convert::TryInto; +#[cfg(feature = "serde")] +use serde_derive::{Deserialize, Serialize}; +use { + core::{ + fmt, + str::{from_utf8, FromStr}, + }, + generic_array::{typenum::U64, GenericArray}, +}; +#[cfg(feature = "std")] +extern crate std; +#[cfg(feature = "std")] +use std::{error::Error, vec::Vec}; + +/// Number of bytes in a signature +pub const SIGNATURE_BYTES: usize = 64; +/// Maximum string length of a base58 encoded signature +const MAX_BASE58_SIGNATURE_LEN: usize = 88; + +#[repr(transparent)] +#[cfg_attr(feature = "frozen-abi", derive(solana_frozen_abi_macro::AbiExample))] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[derive(Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct Signature(GenericArray); + +impl solana_sanitize::Sanitize for Signature {} + +#[cfg(feature = "rand")] +impl Signature { + pub fn new_unique() -> Self { + Self::from(core::array::from_fn(|_| rand::random())) + } +} + +#[cfg(any(test, feature = "verify"))] +impl Signature { + pub(self) fn verify_verbose( + &self, + pubkey_bytes: &[u8], + message_bytes: &[u8], + ) -> Result<(), ed25519_dalek::SignatureError> { + let publickey = ed25519_dalek::PublicKey::from_bytes(pubkey_bytes)?; + let signature = self.0.as_slice().try_into()?; + publickey.verify_strict(message_bytes, &signature) + } + + pub fn verify(&self, pubkey_bytes: &[u8], message_bytes: &[u8]) -> bool { + self.verify_verbose(pubkey_bytes, message_bytes).is_ok() + } +} + +impl AsRef<[u8]> for Signature { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +fn write_as_base58(f: &mut fmt::Formatter, s: &Signature) -> fmt::Result { + let mut out = [0u8; MAX_BASE58_SIGNATURE_LEN]; + let out_slice: &mut [u8] = &mut out; + // This will never fail because the only possible error is BufferTooSmall, + // and we will never call it with too small a buffer. + let len = bs58::encode(s.0).onto(out_slice).unwrap(); + let as_str = from_utf8(&out[..len]).unwrap(); + f.write_str(as_str) +} + +impl fmt::Debug for Signature { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write_as_base58(f, self) + } +} + +impl fmt::Display for Signature { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write_as_base58(f, self) + } +} + +impl From for [u8; 64] { + fn from(signature: Signature) -> Self { + signature.0.into() + } +} + +impl From<[u8; SIGNATURE_BYTES]> for Signature { + #[inline] + fn from(signature: [u8; SIGNATURE_BYTES]) -> Self { + Self(GenericArray::from(signature)) + } +} + +impl<'a> TryFrom<&'a [u8]> for Signature { + type Error = <[u8; SIGNATURE_BYTES] as TryFrom<&'a [u8]>>::Error; + + #[inline] + fn try_from(signature: &'a [u8]) -> Result { + <[u8; SIGNATURE_BYTES]>::try_from(signature).map(Self::from) + } +} + +#[cfg(feature = "std")] +impl TryFrom> for Signature { + type Error = <[u8; SIGNATURE_BYTES] as TryFrom>>::Error; + + #[inline] + fn try_from(signature: Vec) -> Result { + <[u8; SIGNATURE_BYTES]>::try_from(signature).map(Self::from) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ParseSignatureError { + WrongSize, + Invalid, +} + +#[cfg(feature = "std")] +impl Error for ParseSignatureError {} + +impl fmt::Display for ParseSignatureError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ParseSignatureError::WrongSize => { + f.write_str("string decoded to wrong size for signature") + } + ParseSignatureError::Invalid => f.write_str("failed to decode string to signature"), + } + } +} + +impl FromStr for Signature { + type Err = ParseSignatureError; + + fn from_str(s: &str) -> Result { + if s.len() > MAX_BASE58_SIGNATURE_LEN { + return Err(ParseSignatureError::WrongSize); + } + let mut bytes = [0; SIGNATURE_BYTES]; + let decoded_size = bs58::decode(s) + .onto(&mut bytes) + .map_err(|_| ParseSignatureError::Invalid)?; + if decoded_size != SIGNATURE_BYTES { + Err(ParseSignatureError::WrongSize) + } else { + Ok(bytes.into()) + } + } +} + +#[cfg(test)] +mod tests { + use {super::*, solana_program::pubkey::Pubkey}; + + #[test] + fn test_off_curve_pubkey_verify_fails() { + // Golden point off the ed25519 curve + let off_curve_bytes = bs58::decode("9z5nJyQar1FUxVJxpBXzon6kHehbomeYiDaLi9WAMhCq") + .into_vec() + .unwrap(); + + // Confirm golden's off-curvedness + let mut off_curve_bits = [0u8; 32]; + off_curve_bits.copy_from_slice(&off_curve_bytes); + let off_curve_point = curve25519_dalek::edwards::CompressedEdwardsY(off_curve_bits); + assert_eq!(off_curve_point.decompress(), None); + + let pubkey = Pubkey::try_from(off_curve_bytes).unwrap(); + let signature = Signature::default(); + // Unfortunately, ed25519-dalek doesn't surface the internal error types that we'd ideally + // `source()` out of the `SignatureError` returned by `verify_strict()`. So the best we + // can do is `is_err()` here. + assert!(signature.verify_verbose(pubkey.as_ref(), &[0u8]).is_err()); + } +} diff --git a/sdk/src/signature.rs b/sdk/src/signature.rs index d33b7a613ebca8..299cd79fa5792a 100644 --- a/sdk/src/signature.rs +++ b/sdk/src/signature.rs @@ -2,50 +2,14 @@ #![cfg(feature = "full")] // legacy module paths -pub use crate::signer::{keypair::*, null_signer::*, presigner::*, *}; use { crate::pubkey::Pubkey, - generic_array::{typenum::U64, GenericArray}, - std::{ - borrow::{Borrow, Cow}, - convert::TryInto, - fmt, - str::FromStr, - }, - thiserror::Error, + std::borrow::{Borrow, Cow}, +}; +pub use { + crate::signer::{keypair::*, null_signer::*, presigner::*, *}, + solana_signature::{ParseSignatureError, Signature, SIGNATURE_BYTES}, }; - -/// Number of bytes in a signature -pub const SIGNATURE_BYTES: usize = 64; -/// Maximum string length of a base58 encoded signature -const MAX_BASE58_SIGNATURE_LEN: usize = 88; - -#[repr(transparent)] -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Serialize, Deserialize, Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct Signature(GenericArray); - -impl solana_sanitize::Sanitize for Signature {} - -impl Signature { - pub fn new_unique() -> Self { - Self::from(std::array::from_fn(|_| rand::random())) - } - - pub(self) fn verify_verbose( - &self, - pubkey_bytes: &[u8], - message_bytes: &[u8], - ) -> Result<(), ed25519_dalek::SignatureError> { - let publickey = ed25519_dalek::PublicKey::from_bytes(pubkey_bytes)?; - let signature = self.0.as_slice().try_into()?; - publickey.verify_strict(message_bytes, &signature) - } - - pub fn verify(&self, pubkey_bytes: &[u8], message_bytes: &[u8]) -> bool { - self.verify_verbose(pubkey_bytes, message_bytes).is_ok() - } -} pub trait Signable { fn sign(&mut self, keypair: &Keypair) { @@ -63,80 +27,9 @@ pub trait Signable { fn set_signature(&mut self, signature: Signature); } -impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl fmt::Debug for Signature { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", bs58::encode(self.0).into_string()) - } -} - -impl fmt::Display for Signature { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", bs58::encode(self.0).into_string()) - } -} - -impl From for [u8; 64] { - fn from(signature: Signature) -> Self { - signature.0.into() - } -} - -impl From<[u8; SIGNATURE_BYTES]> for Signature { - #[inline] - fn from(signature: [u8; SIGNATURE_BYTES]) -> Self { - Self(GenericArray::from(signature)) - } -} - -impl<'a> TryFrom<&'a [u8]> for Signature { - type Error = <[u8; SIGNATURE_BYTES] as TryFrom<&'a [u8]>>::Error; - - #[inline] - fn try_from(signature: &'a [u8]) -> Result { - <[u8; SIGNATURE_BYTES]>::try_from(signature).map(Self::from) - } -} - -impl TryFrom> for Signature { - type Error = <[u8; SIGNATURE_BYTES] as TryFrom>>::Error; - - #[inline] - fn try_from(signature: Vec) -> Result { - <[u8; SIGNATURE_BYTES]>::try_from(signature).map(Self::from) - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Error)] -pub enum ParseSignatureError { - #[error("string decoded to wrong size for signature")] - WrongSize, - #[error("failed to decode string to signature")] - Invalid, -} - -impl FromStr for Signature { - type Err = ParseSignatureError; - - fn from_str(s: &str) -> Result { - if s.len() > MAX_BASE58_SIGNATURE_LEN { - return Err(ParseSignatureError::WrongSize); - } - let bytes = bs58::decode(s) - .into_vec() - .map_err(|_| ParseSignatureError::Invalid)?; - Signature::try_from(bytes).map_err(|_| ParseSignatureError::WrongSize) - } -} - #[cfg(test)] mod tests { - use super::*; + use {super::*, solana_sdk::signer::keypair::Keypair}; #[test] fn test_signature_fromstr() { let signature = Keypair::new().sign_message(&[0u8]); @@ -145,7 +38,7 @@ mod tests { assert_eq!(signature_base58_str.parse::(), Ok(signature)); - signature_base58_str.push_str(&bs58::encode(signature.0).into_string()); + signature_base58_str.push_str(&bs58::encode(<[u8; 64]>::from(signature)).into_string()); assert_eq!( signature_base58_str.parse::(), Err(ParseSignatureError::WrongSize) @@ -160,7 +53,7 @@ mod tests { Err(ParseSignatureError::WrongSize) ); - let mut signature_base58_str = bs58::encode(signature.0).into_string(); + let mut signature_base58_str = bs58::encode(<[u8; 64]>::from(signature)).into_string(); assert_eq!(signature_base58_str.parse::(), Ok(signature)); // throw some non-base58 stuff in there @@ -180,25 +73,4 @@ mod tests { Err(ParseSignatureError::WrongSize) ); } - - #[test] - fn test_off_curve_pubkey_verify_fails() { - // Golden point off the ed25519 curve - let off_curve_bytes = bs58::decode("9z5nJyQar1FUxVJxpBXzon6kHehbomeYiDaLi9WAMhCq") - .into_vec() - .unwrap(); - - // Confirm golden's off-curvedness - let mut off_curve_bits = [0u8; 32]; - off_curve_bits.copy_from_slice(&off_curve_bytes); - let off_curve_point = curve25519_dalek::edwards::CompressedEdwardsY(off_curve_bits); - assert_eq!(off_curve_point.decompress(), None); - - let pubkey = Pubkey::try_from(off_curve_bytes).unwrap(); - let signature = Signature::default(); - // Unfortunately, ed25519-dalek doesn't surface the internal error types that we'd ideally - // `source()` out of the `SignatureError` returned by `verify_strict()`. So the best we - // can do is `is_err()` here. - assert!(signature.verify_verbose(pubkey.as_ref(), &[0u8]).is_err()); - } } diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index c5022487e19e06..b4dd1aee955874 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -172,7 +172,7 @@ pub type Result = result::Result; #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "sGWhrQNiMNnUjPSG5cZvxujYaxHaiU5ggbvp46hKZSN") + frozen_abi(digest = "GNfV7vYLggBbde9n1xYKE8koExFLdr3yti7zDp7xShJR") )] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] pub struct Transaction { From 4df90e4d4ffd5a1e2201187408f63671b0a1f883 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:55:39 -0500 Subject: [PATCH 309/529] accounts-db/tools/add store-histogram tool (#2850) * add store-histogram tool * remove deps --------- Co-authored-by: HaoranYi --- Cargo.lock | 8 + Cargo.toml | 1 + accounts-db/store-histogram/Cargo.toml | 17 ++ accounts-db/store-histogram/src/main.rs | 301 ++++++++++++++++++++++++ scripts/check-dev-context-only-utils.sh | 1 + 5 files changed, 328 insertions(+) create mode 100644 accounts-db/store-histogram/Cargo.toml create mode 100644 accounts-db/store-histogram/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 3e0a230321fccd..a0955b6ac829fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -210,6 +210,14 @@ dependencies = [ "tokio", ] +[[package]] +name = "agave-store-histogram" +version = "2.1.0" +dependencies = [ + "clap 2.33.3", + "solana-version", +] + [[package]] name = "agave-store-tool" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 1cf9832076b71a..b550ba39f6fcb9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "accounts-cluster-bench", "accounts-db", "accounts-db/accounts-hash-cache-tool", + "accounts-db/store-histogram", "accounts-db/store-tool", "banking-bench", "banks-client", diff --git a/accounts-db/store-histogram/Cargo.toml b/accounts-db/store-histogram/Cargo.toml new file mode 100644 index 00000000000000..75cdea3eb1f362 --- /dev/null +++ b/accounts-db/store-histogram/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "agave-store-histogram" +description = "Tool to calculate account storage histogram" +publish = false +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +clap = { workspace = true } +solana-version = { workspace = true } + +[features] +dev-context-only-utils = [] diff --git a/accounts-db/store-histogram/src/main.rs b/accounts-db/store-histogram/src/main.rs new file mode 100644 index 00000000000000..fb681ceeb58775 --- /dev/null +++ b/accounts-db/store-histogram/src/main.rs @@ -0,0 +1,301 @@ +#![allow(clippy::arithmetic_side_effects)] +use { + clap::{crate_description, crate_name, value_t_or_exit, App, Arg}, + std::{fs, path::PathBuf}, +}; + +struct Bin { + slot_min: usize, + slot_max: usize, + count: usize, + min_size: usize, + max_size: usize, + sum_size: usize, + avg: usize, +} + +fn pad(width: usize) -> String { + let mut s = String::new(); + for _i in 0..width { + s = format!("{s} "); + } + s +} + +fn get_stars(x: usize, max: usize, width: usize) -> String { + let mut s = String::new(); + let percent = x * width / max; + for i in 0..width { + s = format!("{s}{}", if i <= percent { "*" } else { " " }); + } + s +} + +fn calc(info: &[(usize, usize)], bin_widths: Vec) { + let mut info = info.to_owned(); + info.sort(); + let min = info.first().unwrap().0; + let max_inclusive = info.last().unwrap().0; + eprintln!("storages: {}", info.len()); + eprintln!("lowest slot: {min}"); + eprintln!("highest slot: {max_inclusive}"); + eprintln!("slot range: {}", max_inclusive - min + 1); + eprintln!( + "outside of epoch: {}", + info.iter() + .filter(|x| x.0 < max_inclusive - 432_000) + .count() + ); + + let mut bins = Vec::default(); + for i in 0..bin_widths.len() { + let next = if i == bin_widths.len() - 1 { + usize::MAX + } else { + bin_widths[i + 1] + }; + let abin = Bin { + slot_min: bin_widths[i], + slot_max: next, + count: 0, + min_size: usize::MAX, + max_size: 0, + sum_size: 0, + avg: 0, + }; + bins.push(abin); + } + let mut bin_all = Bin { + slot_min: 0, + slot_max: 0, + count: 0, + min_size: usize::MAX, + max_size: 0, + sum_size: 0, + avg: 0, + }; + let mut bin_max = Bin { + slot_min: 0, + slot_max: 0, + count: 0, + min_size: 0, + max_size: 0, + sum_size: 0, + avg: 0, + }; + info.into_iter().for_each(|(slot, size)| { + for bin in bins.iter_mut() { + let relative = max_inclusive - slot; + if bin.slot_min <= relative && bin.slot_max > relative { + // eprintln!("{}, {}, {}, {}", slot, relative, max_inclusive, bin.slot_min); + bin.count += 1; + bin.sum_size += size; + bin.min_size = bin.min_size.min(size); + bin.max_size = bin.max_size.max(size); + + bin_all.count += 1; + bin_all.sum_size += size; + bin_all.min_size = bin_all.min_size.min(size); + bin_all.max_size = bin_all.max_size.max(size); + + break; + } + } + }); + bins.retain_mut(|bin| { + if bin.count > 0 { + bin_max.sum_size = bin_max.sum_size.max(bin.sum_size); + bin_max.max_size = bin_max.max_size.max(bin.max_size); + bin_max.count = bin_max.count.max(bin.count); + bin_max.min_size = bin_max.min_size.max(bin.min_size); + bin.avg = bin.sum_size / bin.count; + } + bin_max.avg = bin_max.avg.max(bin.avg); + + bin.count > 0 + }); + + bin_all.avg = bin_all.sum_size / bin_all.count; + + eprintln!("overall stats"); + eprintln!("size {}", bin_all.sum_size); + eprintln!("count {}", bin_all.count); + eprintln!("min size {}", bin_all.min_size); + eprintln!("max size {}", bin_all.max_size); + eprintln!("avg size {}", bin_all.sum_size / bin_all.count); + eprintln!("avg size {}", bin_all.avg); + eprintln!("bin width {}", bins[0].slot_max - bins[0].slot_min); + eprintln!("..."); + + for i in 0..bins.len() { + if i > 0 && bins[i - 1].slot_max != bins[i].slot_min { + eprintln!("..."); + } + let bin = &bins[i]; + if bin.slot_min == 432_000 { + eprintln!("-------------------------------------------------------------------------------------------------------------------------------------------------------------------"); + } + let offset = format!("{:10}", bin.slot_min); + + if i == 0 { + let s = [ + format!("{:10}", "slot age"), + pad(2), + format!("{:10}", "count"), + pad(2), + format!("{:10}", "min size"), + pad(2), + format!("{:10}", "max size"), + pad(2), + format!("{:10}", "sum size"), + pad(2), + format!("{:10}", "avg size"), + pad(2), + format!(",{:>15}", "slot min"), + format!(",{:>15}", "count"), + format!(",{:>15}", "sum size"), + format!(",{:>15}", "min size"), + format!(",{:>15}", "max size"), + format!(",{:>15}", "avg size"), + ]; + let mut s2 = String::new(); + s.iter().for_each(|s| { + s2 = format!("{s2}{s}"); + }); + eprintln!("{s2}"); + } + + let s = [ + offset, + pad(2), + get_stars(bin.count, bin_max.count, 10), + pad(2), + get_stars(bin.min_size, bin_max.min_size, 10), + pad(2), + get_stars(bin.max_size, bin_max.max_size, 10), + pad(2), + get_stars(bin.sum_size, bin_max.sum_size, 10), + pad(2), + get_stars(bin.avg, bin_max.avg, 10), + pad(2), + format!(",{:15}", max_inclusive - bin.slot_min), + format!(",{:15}", bin.count), + format!(",{:15}", bin.sum_size), + format!(",{:15}", bin.min_size), + format!(",{:15}", bin.max_size), + format!(",{:15}", bin.avg), + ]; + let mut s2 = String::new(); + s.iter().for_each(|s| { + s2 = format!("{s2}{s}"); + }); + eprintln!("{s2}"); + } +} + +fn normal_bin_widths() -> Vec { + let mut bin_widths = vec![0]; + let div = 432_000 / 20; + for i in 1..432_000 { + let b = i * div; + if b > 432_000 { + break; + } + bin_widths.push(b); + } + bin_widths.push(432_000); + for i in 1..100000 { + let b = 432_000 + i * div; + // if b > max_range { + // break; + // } + bin_widths.push(b); + } + bin_widths +} + +fn normal_ancient() -> Vec { + let mut bin_widths = vec![0]; + bin_widths.push(432_000); + bin_widths +} +fn normal_10k() -> Vec { + let mut bin_widths = vec![0]; + bin_widths.push(432_000); + bin_widths.push(442_000); + bin_widths +} + +fn main() { + let matches = App::new(crate_name!()) + .about(crate_description!()) + .version(solana_version::version!()) + .arg( + Arg::with_name("ledger") + .index(1) + .takes_value(true) + .value_name("PATH") + .help("ledger path"), + ) + .get_matches(); + + let ledger = value_t_or_exit!(matches, "ledger", String); + let path: PathBuf = [&ledger, "accounts", "run"].iter().collect(); + + if path.is_dir() { + let dir = fs::read_dir(&path); + if let Ok(dir) = dir { + let mut info = Vec::default(); + for entry in dir.flatten() { + if let Some(name) = entry.path().file_name() { + let name = name.to_str().unwrap().split_once(".").unwrap().0; + let len = fs::metadata(entry.path()).unwrap().len(); + info.push((name.parse::().unwrap(), len as usize)); + // eprintln!("{name}, {len}"); + } + } + eprintln!("======== Normal Histogram"); + calc(&info, normal_bin_widths()); + eprintln!("========"); + + eprintln!("\n======== Normal Ancient Histogram"); + calc(&info, normal_ancient()); + eprintln!("========"); + + eprintln!("\n======== Normal Ancient 10K Histogram"); + calc(&info, normal_10k()); + eprintln!("========"); + } else { + panic!("couldn't read folder: {path:?}, {:?}", dir); + } + } else { + panic!("not a folder: {:?}", path); + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + + #[test] + fn test_calc() { + let info = vec![ + (0, 8usize), + (500, 23usize), + (501, 100), + (432_000 - 1, 2), + (432_000, 1), + (500_000, 18), + (1_000_000, 80), + ]; + let max = info.iter().map(|(slot, _size)| *slot).max().unwrap(); + let base = 1000; + let info = info + .into_iter() + .map(|(slot, size)| (max - slot + base, size)) + .collect::>(); + calc(&info, normal_bin_widths()); + calc(&info, normal_ancient()); + calc(&info, normal_10k()); + } +} diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index 78dcd6cad98773..c1b12eb9cd8931 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -34,6 +34,7 @@ declare tainted_packages=( agave-ledger-tool solana-bench-tps agave-store-tool + agave-store-histogram agave-accounts-hash-cache-tool ) From 4c2e79fe159d0d27b732468da3d88225ebe62e32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 7 Sep 2024 00:13:24 +0800 Subject: [PATCH 310/529] build(deps): bump tokio-stream from 0.1.15 to 0.1.16 (#2854) * build(deps): bump tokio-stream from 0.1.15 to 0.1.16 Bumps [tokio-stream](https://github.com/tokio-rs/tokio) from 0.1.15 to 0.1.16. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-stream-0.1.15...tokio-stream-0.1.16) --- updated-dependencies: - dependency-name: tokio-stream dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a0955b6ac829fa..1650f74a492a60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9159,9 +9159,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", diff --git a/Cargo.toml b/Cargo.toml index b550ba39f6fcb9..ac4ece5ead3a0f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -496,7 +496,7 @@ tiny-bip39 = "0.8.2" # Update solana-tokio patch below when updating this version tokio = "1.29.1" tokio-serde = "0.8" -tokio-stream = "0.1.15" +tokio-stream = "0.1.16" tokio-tungstenite = "0.20.1" tokio-util = "0.7" toml = "0.8.12" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 907be7f488dcb6..a6cab3aba1f3d0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7609,9 +7609,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", From aab56622edab30ea7654960eb31be7135561a966 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Fri, 6 Sep 2024 22:55:12 +0400 Subject: [PATCH 311/529] Extract program-option crate (#2369) * extract program-option crate * update lock file * update nits.sh * missing dev dep * mention Solana in description * remove solana_program from program_option doc examples --- Cargo.lock | 5 +++++ Cargo.toml | 2 ++ ci/nits.sh | 2 +- programs/sbf/Cargo.lock | 5 +++++ sdk/program-option/Cargo.toml | 13 +++++++++++++ .../program_option.rs => program-option/src/lib.rs} | 4 ++-- sdk/program/Cargo.toml | 1 + sdk/program/src/lib.rs | 3 +-- 8 files changed, 30 insertions(+), 5 deletions(-) create mode 100644 sdk/program-option/Cargo.toml rename sdk/{program/src/program_option.rs => program-option/src/lib.rs} (99%) diff --git a/Cargo.lock b/Cargo.lock index 1650f74a492a60..e7994628db1094 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7059,6 +7059,7 @@ dependencies = [ "solana-logger", "solana-msg", "solana-program-memory", + "solana-program-option", "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", @@ -7079,6 +7080,10 @@ dependencies = [ "solana-define-syscall", ] +[[package]] +name = "solana-program-option" +version = "2.1.0" + [[package]] name = "solana-program-runtime" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index ac4ece5ead3a0f..da9fca1116b54e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,6 +114,7 @@ members = [ "sdk/package-metadata-macro", "sdk/program", "sdk/program-memory", + "sdk/program-option", "sdk/serde-varint", "sdk/sha256-hasher", "sdk/signature", @@ -417,6 +418,7 @@ solana-poh = { path = "poh", version = "=2.1.0" } solana-poseidon = { path = "poseidon", version = "=2.1.0" } solana-program = { path = "sdk/program", version = "=2.1.0", default-features = false } solana-program-memory = { path = "sdk/program-memory", version = "=2.1.0" } +solana-program-option = { path = "sdk/program-option", version = "=2.1.0" } solana-program-runtime = { path = "program-runtime", version = "=2.1.0" } solana-program-test = { path = "program-test", version = "=2.1.0" } solana-pubsub-client = { path = "pubsub-client", version = "=2.1.0" } diff --git a/ci/nits.sh b/ci/nits.sh index 856a4d323cddf0..764881a475dcad 100755 --- a/ci/nits.sh +++ b/ci/nits.sh @@ -29,7 +29,7 @@ declare print_free_tree=( ':sdk/**.rs' ':^sdk/cargo-build-sbf/**.rs' ':^sdk/msg/src/lib.rs' - ':^sdk/program/src/program_option.rs' + ':^sdk/program-option/src/lib.rs' ':^sdk/program/src/program_stubs.rs' ':programs/**.rs' ':^**bin**.rs' diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a6cab3aba1f3d0..bf82a2b4219f9b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5455,6 +5455,7 @@ dependencies = [ "solana-hash", "solana-msg", "solana-program-memory", + "solana-program-option", "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", @@ -5473,6 +5474,10 @@ dependencies = [ "solana-define-syscall", ] +[[package]] +name = "solana-program-option" +version = "2.1.0" + [[package]] name = "solana-program-runtime" version = "2.1.0" diff --git a/sdk/program-option/Cargo.toml b/sdk/program-option/Cargo.toml new file mode 100644 index 00000000000000..d078d12d82636c --- /dev/null +++ b/sdk/program-option/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "solana-program-option" +description = "A C representation of Rust's Option, used in Solana programs." +documentation = "https://docs.rs/solana-program-option" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/src/program_option.rs b/sdk/program-option/src/lib.rs similarity index 99% rename from sdk/program/src/program_option.rs rename to sdk/program-option/src/lib.rs index 3496e5c282a804..bde0e60f6e546f 100644 --- a/sdk/program/src/program_option.rs +++ b/sdk/program-option/src/lib.rs @@ -172,7 +172,7 @@ impl COption { /// ``` /// /// ```should_panic - /// # use solana_program::program_option::COption; + /// # use solana_program_option::COption; /// let x: COption<&str> = COption::None; /// x.expect("the world is ending"); // panics with `the world is ending` /// ``` @@ -205,7 +205,7 @@ impl COption { /// ``` /// /// ```should_panic - /// # use solana_program::program_option::COption; + /// # use solana_program_option::COption; /// let x: COption<&str> = COption::None; /// assert_eq!(x.unwrap(), "air"); // fails /// ``` diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index f293283f9dff3e..3b8eeba0d567fc 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -43,6 +43,7 @@ solana-hash = { workspace = true, features = [ ] } solana-msg = { workspace = true } solana-program-memory = { workspace = true } +solana-program-option = { workspace = true } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 9cf1fc64083e50..f1ab379034df89 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -508,7 +508,6 @@ pub mod native_token; pub mod nonce; pub mod program; pub mod program_error; -pub mod program_option; pub mod program_pack; pub mod program_stubs; pub mod program_utils; @@ -540,7 +539,7 @@ pub use solana_serde_varint as serde_varint; pub use solana_short_vec as short_vec; #[cfg(target_arch = "wasm32")] pub use wasm_bindgen::prelude::wasm_bindgen; -pub use {solana_clock as clock, solana_msg::msg}; +pub use {solana_clock as clock, solana_msg::msg, solana_program_option as program_option}; /// The [config native program][np]. /// From 9eee3f2d67ae2b49a4641df3653d7fc8eb066ac1 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 6 Sep 2024 15:14:33 -0700 Subject: [PATCH 312/529] wen_restart: Forbid update on gossip messages and count old votes in active_peers. (#2731) * Forbid update on LastVotedForkSlots and HeaviestFork and log conflicting messages. * Make linter happy. * Do not discard RestartLastVotedForkSlots with super old vote, it can be counted in active_peers. * Add comment that to_slots discards any slot < root_slot. * remove entry_exists. * Add is_valid_change. * Update comments to explain actively_voting_for_this_epoch_stake. * Update comments again. --- wen-restart/proto/wen_restart.proto | 6 + wen-restart/src/heaviest_fork_aggregate.rs | 246 +++++++++------- .../src/last_voted_fork_slots_aggregate.rs | 267 +++++++++--------- wen-restart/src/wen_restart.rs | 83 ++++-- 4 files changed, 348 insertions(+), 254 deletions(-) diff --git a/wen-restart/proto/wen_restart.proto b/wen-restart/proto/wen_restart.proto index 856e7df9ef114a..b32ca5f6537283 100644 --- a/wen-restart/proto/wen_restart.proto +++ b/wen-restart/proto/wen_restart.proto @@ -59,6 +59,11 @@ message GenerateSnapshotRecord { uint32 shred_version = 4; } +message ConflictMessage { + string old_message = 2; + string new_message = 3; +} + message WenRestartProgress { State state = 1; optional LastVotedForkSlotsRecord my_last_voted_fork_slots = 2; @@ -66,4 +71,5 @@ message WenRestartProgress { optional HeaviestForkRecord my_heaviest_fork = 4; optional HeaviestForkAggregateRecord heaviest_fork_aggregate = 5; optional GenerateSnapshotRecord my_snapshot = 6; + map conflict_message = 7; } diff --git a/wen-restart/src/heaviest_fork_aggregate.rs b/wen-restart/src/heaviest_fork_aggregate.rs index d5e454b6eeebe1..84a67426d89664 100644 --- a/wen-restart/src/heaviest_fork_aggregate.rs +++ b/wen-restart/src/heaviest_fork_aggregate.rs @@ -31,6 +31,15 @@ pub struct HeaviestForkFinalResult { pub total_active_stake_seen_supermajority: u64, } +#[derive(Debug, PartialEq)] +pub enum HeaviestForkAggregateResult { + AlreadyExists, + DifferentVersionExists(RestartHeaviestFork, RestartHeaviestFork), + Inserted(HeaviestForkRecord), + Malformed, + ZeroStakeIgnored, +} + impl HeaviestForkAggregate { pub(crate) fn new( wait_for_supermajority_threshold_percent: u64, @@ -63,7 +72,7 @@ impl HeaviestForkAggregate { &mut self, key_string: &str, record: &HeaviestForkRecord, - ) -> Result> { + ) -> Result { let from = Pubkey::from_str(key_string)?; let bankhash = Hash::from_str(&record.bankhash)?; let restart_heaviest_fork = RestartHeaviestFork { @@ -77,29 +86,37 @@ impl HeaviestForkAggregate { Ok(self.aggregate(restart_heaviest_fork)) } - fn should_replace( + fn is_valid_change( current_heaviest_fork: &RestartHeaviestFork, new_heaviest_fork: &RestartHeaviestFork, - ) -> bool { - if current_heaviest_fork == new_heaviest_fork { - return false; - } - if current_heaviest_fork.wallclock > new_heaviest_fork.wallclock { - return false; + ) -> HeaviestForkAggregateResult { + if current_heaviest_fork.last_slot != new_heaviest_fork.last_slot + || current_heaviest_fork.last_slot_hash != new_heaviest_fork.last_slot_hash + { + return HeaviestForkAggregateResult::DifferentVersionExists( + current_heaviest_fork.clone(), + new_heaviest_fork.clone(), + ); } - if current_heaviest_fork.last_slot == new_heaviest_fork.last_slot - && current_heaviest_fork.last_slot_hash == new_heaviest_fork.last_slot_hash - && current_heaviest_fork.observed_stake == new_heaviest_fork.observed_stake + if current_heaviest_fork == new_heaviest_fork + || current_heaviest_fork.wallclock > new_heaviest_fork.wallclock + || current_heaviest_fork.observed_stake == new_heaviest_fork.observed_stake { - return false; + return HeaviestForkAggregateResult::AlreadyExists; } - true + HeaviestForkAggregateResult::Inserted(HeaviestForkRecord { + slot: new_heaviest_fork.last_slot, + bankhash: new_heaviest_fork.last_slot_hash.to_string(), + total_active_stake: new_heaviest_fork.observed_stake, + shred_version: new_heaviest_fork.shred_version as u32, + wallclock: new_heaviest_fork.wallclock, + }) } pub(crate) fn aggregate( &mut self, received_heaviest_fork: RestartHeaviestFork, - ) -> Option { + ) -> HeaviestForkAggregateResult { let total_stake = self.epoch_stakes.total_stake(); let from = &received_heaviest_fork.from; let sender_stake = self.epoch_stakes.node_id_to_stake(from).unwrap_or(0); @@ -108,55 +125,46 @@ impl HeaviestForkAggregate { "Gossip should not accept zero-stake RestartLastVotedFork from {:?}", from ); - return None; + return HeaviestForkAggregateResult::ZeroStakeIgnored; } if from == &self.my_pubkey { - return None; + return HeaviestForkAggregateResult::AlreadyExists; } if received_heaviest_fork.shred_version != self.my_shred_version { warn!( "Gossip should not accept RestartLastVotedFork with different shred version {} from {:?}", received_heaviest_fork.shred_version, from ); - return None; + return HeaviestForkAggregateResult::Malformed; } - let record = HeaviestForkRecord { - slot: received_heaviest_fork.last_slot, - bankhash: received_heaviest_fork.last_slot_hash.to_string(), - total_active_stake: received_heaviest_fork.observed_stake, - shred_version: received_heaviest_fork.shred_version as u32, - wallclock: received_heaviest_fork.wallclock, - }; - if let Some(old_heaviest_fork) = self - .heaviest_forks - .insert(*from, received_heaviest_fork.clone()) - { - if Self::should_replace(&old_heaviest_fork, &received_heaviest_fork) { - let entry = self - .block_stake_map - .get_mut(&( - old_heaviest_fork.last_slot, - old_heaviest_fork.last_slot_hash, - )) - .unwrap(); - info!( - "{:?} Replacing old heaviest fork from {:?} with {:?}", - from, old_heaviest_fork, received_heaviest_fork - ); - *entry = entry.saturating_sub(sender_stake); + let result = if let Some(old_heaviest_fork) = self.heaviest_forks.get(from) { + let result = Self::is_valid_change(old_heaviest_fork, &received_heaviest_fork); + if let HeaviestForkAggregateResult::Inserted(_) = result { + // continue following processing } else { - return None; + return result; } - } - let entry = self - .block_stake_map - .entry(( - received_heaviest_fork.last_slot, - received_heaviest_fork.last_slot_hash, - )) - .or_insert(0); - *entry = entry.saturating_add(sender_stake); - self.active_peers.insert(*from); + result + } else { + let entry = self + .block_stake_map + .entry(( + received_heaviest_fork.last_slot, + received_heaviest_fork.last_slot_hash, + )) + .or_insert(0); + *entry = entry.saturating_add(sender_stake); + self.active_peers.insert(*from); + HeaviestForkAggregateResult::Inserted(HeaviestForkRecord { + slot: received_heaviest_fork.last_slot, + bankhash: received_heaviest_fork.last_slot_hash.to_string(), + total_active_stake: received_heaviest_fork.observed_stake, + shred_version: received_heaviest_fork.shred_version as u32, + wallclock: received_heaviest_fork.wallclock, + }) + }; + self.heaviest_forks + .insert(*from, received_heaviest_fork.clone()); if received_heaviest_fork.observed_stake as f64 / total_stake as f64 >= self.supermajority_threshold { @@ -169,7 +177,7 @@ impl HeaviestForkAggregate { { self.active_peers_seen_supermajority.insert(self.my_pubkey); } - Some(record) + result } pub(crate) fn total_active_stake(&self) -> u64 { @@ -195,7 +203,7 @@ impl HeaviestForkAggregate { mod tests { use { crate::{ - heaviest_fork_aggregate::HeaviestForkAggregate, + heaviest_fork_aggregate::{HeaviestForkAggregate, HeaviestForkAggregateResult}, solana::wen_restart_proto::HeaviestForkRecord, }, solana_gossip::restart_crds_values::RestartHeaviestFork, @@ -253,30 +261,30 @@ mod tests { fn test_aggregate_from_gossip() { let mut test_state = test_aggregate_init(); let initial_num_active_validators = 3; + let timestamp1 = timestamp(); for validator_voting_keypair in test_state .validator_voting_keypairs .iter() .take(initial_num_active_validators) { let pubkey = validator_voting_keypair.node_keypair.pubkey(); - let now = timestamp(); assert_eq!( test_state .heaviest_fork_aggregate .aggregate(RestartHeaviestFork { from: pubkey, - wallclock: now, + wallclock: timestamp1, last_slot: test_state.heaviest_slot, last_slot_hash: test_state.heaviest_hash, observed_stake: 100, shred_version: SHRED_VERSION, },), - Some(HeaviestForkRecord { + HeaviestForkAggregateResult::Inserted(HeaviestForkRecord { slot: test_state.heaviest_slot, bankhash: test_state.heaviest_hash.to_string(), total_active_stake: 100, shred_version: SHRED_VERSION as u32, - wallclock: now, + wallclock: timestamp1, }), ); } @@ -302,7 +310,7 @@ mod tests { test_state .heaviest_fork_aggregate .aggregate(new_active_validator_last_voted_slots), - Some(HeaviestForkRecord { + HeaviestForkAggregateResult::Inserted(HeaviestForkRecord { slot: test_state.heaviest_slot, bankhash: test_state.heaviest_hash.to_string(), total_active_stake: 100, @@ -318,7 +326,7 @@ mod tests { let replace_message_validator = test_state.validator_voting_keypairs[2] .node_keypair .pubkey(); - // Allow specific validator to replace message. + // If hash changes, it will be ignored. let now = timestamp(); let new_hash = Hash::new_unique(); let replace_message_validator_last_fork = RestartHeaviestFork { @@ -332,14 +340,18 @@ mod tests { assert_eq!( test_state .heaviest_fork_aggregate - .aggregate(replace_message_validator_last_fork), - Some(HeaviestForkRecord { - slot: test_state.heaviest_slot + 1, - bankhash: new_hash.to_string(), - total_active_stake: 100, - shred_version: SHRED_VERSION as u32, - wallclock: now, - }), + .aggregate(replace_message_validator_last_fork.clone()), + HeaviestForkAggregateResult::DifferentVersionExists( + RestartHeaviestFork { + from: replace_message_validator, + wallclock: timestamp1, + last_slot: test_state.heaviest_slot, + last_slot_hash: test_state.heaviest_hash, + observed_stake: 100, + shred_version: SHRED_VERSION, + }, + replace_message_validator_last_fork, + ), ); assert_eq!( test_state.heaviest_fork_aggregate.total_active_stake(), @@ -359,7 +371,7 @@ mod tests { observed_stake: 100, shred_version: SHRED_VERSION, },), - None, + HeaviestForkAggregateResult::ZeroStakeIgnored, ); assert_eq!( test_state.heaviest_fork_aggregate.total_active_stake(), @@ -381,7 +393,7 @@ mod tests { observed_stake: 1400, shred_version: SHRED_VERSION, },), - Some(HeaviestForkRecord { + HeaviestForkAggregateResult::Inserted(HeaviestForkRecord { slot: test_state.heaviest_slot, bankhash: test_state.heaviest_hash.to_string(), total_active_stake: 1400, @@ -417,7 +429,7 @@ mod tests { observed_stake: 1500, shred_version: SHRED_VERSION, },), - Some(HeaviestForkRecord { + HeaviestForkAggregateResult::Inserted(HeaviestForkRecord { slot: test_state.heaviest_slot, bankhash: test_state.heaviest_hash.to_string(), total_active_stake: 1500, @@ -454,7 +466,7 @@ mod tests { observed_stake: 100, shred_version: SHRED_VERSION, },), - None, + HeaviestForkAggregateResult::AlreadyExists, ); } @@ -462,6 +474,9 @@ mod tests { fn test_aggregate_from_record() { let mut test_state = test_aggregate_init(); let time1 = timestamp(); + let from = test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey(); let record = HeaviestForkRecord { wallclock: time1, slot: test_state.heaviest_slot, @@ -473,15 +488,9 @@ mod tests { assert_eq!( test_state .heaviest_fork_aggregate - .aggregate_from_record( - &test_state.validator_voting_keypairs[0] - .node_keypair - .pubkey() - .to_string(), - &record, - ) + .aggregate_from_record(&from.to_string(), &record,) .unwrap(), - Some(record.clone()), + HeaviestForkAggregateResult::Inserted(record.clone()), ); assert_eq!(test_state.heaviest_fork_aggregate.total_active_stake(), 200); // Now if you get the same result from Gossip again, it should be ignored. @@ -489,34 +498,31 @@ mod tests { test_state .heaviest_fork_aggregate .aggregate(RestartHeaviestFork { - from: test_state.validator_voting_keypairs[0] - .node_keypair - .pubkey(), + from, wallclock: time1, last_slot: test_state.heaviest_slot, last_slot_hash: test_state.heaviest_hash, observed_stake: 100, shred_version: SHRED_VERSION, },), - None, + HeaviestForkAggregateResult::AlreadyExists, ); - // But if it's a new record from the same validator, it will be replaced. + // If only observed_stake changes, it will be replaced. let time2 = timestamp(); + let old_heaviest_fork = RestartHeaviestFork { + from, + wallclock: time2, + last_slot: test_state.heaviest_slot, + last_slot_hash: test_state.heaviest_hash, + observed_stake: 200, + shred_version: SHRED_VERSION, + }; assert_eq!( test_state .heaviest_fork_aggregate - .aggregate(RestartHeaviestFork { - from: test_state.validator_voting_keypairs[0] - .node_keypair - .pubkey(), - wallclock: time2, - last_slot: test_state.heaviest_slot, - last_slot_hash: test_state.heaviest_hash, - observed_stake: 200, - shred_version: SHRED_VERSION, - },), - Some(HeaviestForkRecord { + .aggregate(old_heaviest_fork.clone()), + HeaviestForkAggregateResult::Inserted(HeaviestForkRecord { wallclock: time2, slot: test_state.heaviest_slot, bankhash: test_state.heaviest_hash.to_string(), @@ -524,6 +530,48 @@ mod tests { total_active_stake: 200, }), ); + + // If slot changes, it will be ignored. + let new_heaviest_fork = RestartHeaviestFork { + from: test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey(), + wallclock: timestamp(), + last_slot: test_state.heaviest_slot + 1, + last_slot_hash: test_state.heaviest_hash, + observed_stake: 100, + shred_version: SHRED_VERSION, + }; + assert_eq!( + test_state + .heaviest_fork_aggregate + .aggregate(new_heaviest_fork.clone()), + HeaviestForkAggregateResult::DifferentVersionExists( + old_heaviest_fork.clone(), + new_heaviest_fork + ) + ); + // If hash changes, it will also be ignored. + let new_heaviest_fork = RestartHeaviestFork { + from: test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey(), + wallclock: timestamp(), + last_slot: test_state.heaviest_slot, + last_slot_hash: Hash::new_unique(), + observed_stake: 100, + shred_version: SHRED_VERSION, + }; + assert_eq!( + test_state + .heaviest_fork_aggregate + .aggregate(new_heaviest_fork.clone()), + HeaviestForkAggregateResult::DifferentVersionExists( + old_heaviest_fork, + new_heaviest_fork + ) + ); + // percentage doesn't change since it's a replace. assert_eq!(test_state.heaviest_fork_aggregate.total_active_stake(), 200); @@ -542,7 +590,7 @@ mod tests { } ) .unwrap(), - None, + HeaviestForkAggregateResult::ZeroStakeIgnored, ); // percentage doesn't change since the previous aggregate is ignored. assert_eq!(test_state.heaviest_fork_aggregate.total_active_stake(), 200); @@ -565,7 +613,7 @@ mod tests { } ) .unwrap(), - None, + HeaviestForkAggregateResult::AlreadyExists, ); } @@ -591,7 +639,7 @@ mod tests { &heaviest_fork_record, ) .unwrap(), - Some(heaviest_fork_record.clone()), + HeaviestForkAggregateResult::Inserted(heaviest_fork_record.clone()), ); // Then test that it fails if the record is invalid. diff --git a/wen-restart/src/last_voted_fork_slots_aggregate.rs b/wen-restart/src/last_voted_fork_slots_aggregate.rs index f680dc73238156..fade1354a93b39 100644 --- a/wen-restart/src/last_voted_fork_slots_aggregate.rs +++ b/wen-restart/src/last_voted_fork_slots_aggregate.rs @@ -10,15 +10,18 @@ use { pubkey::Pubkey, }, std::{ - collections::{BTreeSet, HashMap, HashSet}, + collections::{BTreeSet, HashMap}, str::FromStr, sync::Arc, }, }; -// If at least 1/3 of the stake has voted for a slot in next Epoch, we think +// If at least 1/3 of the stake has voted for any slot in next Epoch, we think // the cluster's clock is in sync and everyone will enter the new Epoch soon. // So we require that we have >80% stake in the new Epoch to exit. +// We use actively_voting_for_this_epoch_stake to determine whether 1/3 of the +// stake has voted for any slot in this Epoch, and then we use actively_voting_stake +// to determine if we have >80% stake in this Epoch. const EPOCH_CONSIDERED_FOR_EXIT_THRESHOLD: f64 = 1f64 / 3f64; #[derive(Debug, Clone, PartialEq)] @@ -28,13 +31,14 @@ pub(crate) struct LastVotedForkSlotsEpochInfo { // Total stake of active peers in this epoch, no matter they voted for a slot // in this epoch or not. pub actively_voting_stake: u64, - // Total stake of active peers which has voted for a slot in this epoch. + // Total stake of active peers which has voted for any slot in this epoch. + // Note that if last_vote slot belongs to epoch n, then this validator should + // have voted for at least one slot in epoch n - 1, so it should be counted + // in earlier epoch as well. pub actively_voting_for_this_epoch_stake: u64, } pub(crate) struct LastVotedForkSlotsAggregate { - // Map each peer pubkey to the epoch of its last vote. - node_to_last_vote_epoch_map: HashMap, epoch_info_vec: Vec, last_voted_fork_slots: HashMap, my_pubkey: Pubkey, @@ -50,6 +54,13 @@ pub struct LastVotedForkSlotsFinalResult { pub epoch_info_vec: Vec, } +#[derive(Debug, PartialEq)] +pub enum LastVotedForkSlotsAggregateResult { + AlreadyExists, + DifferentVersionExists(RestartLastVotedForkSlots, RestartLastVotedForkSlots), + Inserted(LastVotedForkSlotsRecord), +} + impl LastVotedForkSlotsAggregate { pub(crate) fn new( root_bank: Arc, @@ -79,8 +90,6 @@ impl LastVotedForkSlotsAggregate { .expect("my voted slots should not be empty"), ) .0; - let mut node_to_last_vote_epoch_map = HashMap::new(); - node_to_last_vote_epoch_map.insert(*my_pubkey, my_last_vote_epoch); // We would only consider slots in root_epoch and the next epoch. let epoch_info_vec: Vec = (root_epoch ..root_epoch @@ -107,7 +116,6 @@ impl LastVotedForkSlotsAggregate { }) .collect(); Self { - node_to_last_vote_epoch_map, epoch_info_vec, last_voted_fork_slots: HashMap::new(), my_pubkey: *my_pubkey, @@ -122,11 +130,8 @@ impl LastVotedForkSlotsAggregate { &mut self, key_string: &str, record: &LastVotedForkSlotsRecord, - ) -> Result> { + ) -> Result { let from = Pubkey::from_str(key_string)?; - if from == self.my_pubkey { - return Ok(None); - } let last_voted_hash = Hash::from_str(&record.last_vote_bankhash)?; let converted_record = RestartLastVotedForkSlots::new( from, @@ -141,31 +146,38 @@ impl LastVotedForkSlotsAggregate { pub(crate) fn aggregate( &mut self, new_slots: RestartLastVotedForkSlots, - ) -> Option { + ) -> LastVotedForkSlotsAggregateResult { let from = &new_slots.from; if from == &self.my_pubkey { - return None; + return LastVotedForkSlotsAggregateResult::AlreadyExists; } let root_slot = self.root_bank.slot(); + // to_slots will discard any slot < root_slot. let new_slots_vec = new_slots.to_slots(root_slot); if new_slots_vec.is_empty() { - return None; + // This could be a validator that has super old vote, we still want to + // count it in active peers though because it will switch to agreed upon + // heaviest fork later. + info!("The slots from {from} is older than root slot {root_slot}"); + } + if let Some(old_slots) = self.last_voted_fork_slots.get(from) { + if old_slots.to_slots(self.root_bank.slot()) == new_slots_vec { + return LastVotedForkSlotsAggregateResult::AlreadyExists; + } else { + return LastVotedForkSlotsAggregateResult::DifferentVersionExists( + old_slots.clone(), + new_slots.clone(), + ); + } } + self.last_voted_fork_slots.insert(*from, new_slots.clone()); let last_vote_epoch = self .root_bank - .get_epoch_and_slot_index(*new_slots_vec.last().unwrap()) + .get_epoch_and_slot_index(new_slots.last_voted_slot) .0; - let old_last_vote_epoch = self - .node_to_last_vote_epoch_map - .insert(*from, last_vote_epoch); - if old_last_vote_epoch != Some(last_vote_epoch) { - self.update_epoch_info(from, last_vote_epoch, old_last_vote_epoch); - } - if self.update_and_check_if_message_already_saved(new_slots.clone(), new_slots_vec.clone()) - { - return None; - } - Some(LastVotedForkSlotsRecord { + self.update_epoch_info(from, last_vote_epoch); + self.insert_message(from, &new_slots_vec); + LastVotedForkSlotsAggregateResult::Inserted(LastVotedForkSlotsRecord { last_voted_fork_slots: new_slots_vec, last_vote_bankhash: new_slots.last_voted_hash.to_string(), shred_version: new_slots.shred_version as u32, @@ -173,38 +185,8 @@ impl LastVotedForkSlotsAggregate { }) } - // Return true if the message has already been saved, so we can skip the rest of the processing. - fn update_and_check_if_message_already_saved( - &mut self, - new_slots: RestartLastVotedForkSlots, - new_slots_vec: Vec, - ) -> bool { - let from = &new_slots.from; - let new_slots_set: HashSet = HashSet::from_iter(new_slots_vec); - let old_slots_set = match self.last_voted_fork_slots.insert(*from, new_slots.clone()) { - Some(old_slots) => { - if old_slots == new_slots { - return true; - } else { - HashSet::from_iter(old_slots.to_slots(self.root_bank.slot())) - } - } - None => HashSet::new(), - }; - for slot in old_slots_set.difference(&new_slots_set) { - let epoch = self.root_bank.epoch_schedule().get_epoch(*slot); - let entry = self.slots_stake_map.get_mut(slot).unwrap(); - if let Some(sender_stake) = self.root_bank.epoch_node_id_to_stake(epoch, from) { - *entry = entry.saturating_sub(sender_stake); - let repair_threshold_stake = (self.root_bank.epoch_total_stake(epoch).unwrap() - as f64 - * self.repair_threshold) as u64; - if *entry < repair_threshold_stake { - self.slots_to_repair.remove(slot); - } - } - } - for slot in new_slots_set.difference(&old_slots_set) { + fn insert_message(&mut self, from: &Pubkey, new_slots_vec: &Vec) { + for slot in new_slots_vec { let epoch = self.root_bank.epoch_schedule().get_epoch(*slot); let entry = self.slots_stake_map.entry(*slot).or_insert(0); if let Some(sender_stake) = self.root_bank.epoch_node_id_to_stake(epoch, from) { @@ -217,37 +199,18 @@ impl LastVotedForkSlotsAggregate { } } } - false } - fn update_epoch_info( - &mut self, - from: &Pubkey, - last_vote_epoch: Epoch, - old_last_vote_epoch: Option, - ) { - if Some(last_vote_epoch) < old_last_vote_epoch { - // We only have two entries so old epoch must be the second one. - let entry = self.epoch_info_vec.last_mut().unwrap(); + fn update_epoch_info(&mut self, from: &Pubkey, last_vote_epoch: Epoch) { + for entry in self.epoch_info_vec.iter_mut() { if let Some(stake) = self.root_bank.epoch_node_id_to_stake(entry.epoch, from) { - entry.actively_voting_for_this_epoch_stake = entry - .actively_voting_for_this_epoch_stake - .checked_sub(stake) - .unwrap(); - } - } else { - for entry in self.epoch_info_vec.iter_mut() { - if let Some(stake) = self.root_bank.epoch_node_id_to_stake(entry.epoch, from) { - if old_last_vote_epoch.is_none() { - entry.actively_voting_stake = - entry.actively_voting_stake.checked_add(stake).unwrap(); - } - if Some(entry.epoch) > old_last_vote_epoch && entry.epoch <= last_vote_epoch { - entry.actively_voting_for_this_epoch_stake = entry - .actively_voting_for_this_epoch_stake - .checked_add(stake) - .unwrap(); - } + entry.actively_voting_stake = + entry.actively_voting_stake.checked_add(stake).unwrap(); + if entry.epoch <= last_vote_epoch { + entry.actively_voting_for_this_epoch_stake = entry + .actively_voting_for_this_epoch_stake + .checked_add(stake) + .unwrap(); } } } @@ -286,6 +249,7 @@ mod tests { solana_gossip::restart_crds_values::RestartLastVotedForkSlots, solana_program::clock::Slot, solana_runtime::{ + accounts_background_service::AbsRequestSender, bank::Bank, epoch_stakes::EpochStakes, genesis_utils::{ @@ -320,6 +284,14 @@ mod tests { vec![100; validator_voting_keypairs.len()], ); let (_, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank0 = bank_forks.read().unwrap().root_bank(); + let bank1 = Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1); + bank_forks.write().unwrap().insert(bank1); + assert!(bank_forks + .write() + .unwrap() + .set_root(1, &AbsRequestSender::default(), None) + .is_ok()); let root_bank = bank_forks.read().unwrap().root_bank(); let root_slot = root_bank.slot(); let last_voted_fork_slots = vec![ @@ -341,7 +313,7 @@ mod tests { } #[test] - fn test_aggregate() { + fn test_aggregate_success() { let mut test_state = test_aggregate_init(); let root_slot = test_state.root_slot; // Until 33% stake vote, the percentage should be 0. @@ -365,7 +337,7 @@ mod tests { ) .unwrap(), ), - Some(LastVotedForkSlotsRecord { + LastVotedForkSlotsAggregateResult::Inserted(LastVotedForkSlotsRecord { last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), last_vote_bankhash: Hash::default().to_string(), shred_version: SHRED_VERSION as u32, @@ -386,10 +358,10 @@ mod tests { [initial_num_active_validators] .node_keypair .pubkey(); - let now = timestamp(); + let timestamp1 = timestamp(); let new_active_validator_last_voted_slots = RestartLastVotedForkSlots::new( new_active_validator, - now, + timestamp1, &test_state.last_voted_fork_slots, Hash::default(), SHRED_VERSION, @@ -399,11 +371,11 @@ mod tests { test_state .slots_aggregate .aggregate(new_active_validator_last_voted_slots), - Some(LastVotedForkSlotsRecord { + LastVotedForkSlotsAggregateResult::Inserted(LastVotedForkSlotsRecord { last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), last_vote_bankhash: Hash::default().to_string(), shred_version: SHRED_VERSION as u32, - wallclock: now, + wallclock: timestamp1, }), ); let expected_active_percent = @@ -436,7 +408,7 @@ mod tests { test_state .slots_aggregate .aggregate(new_active_validator_last_voted_slots), - Some(LastVotedForkSlotsRecord { + LastVotedForkSlotsAggregateResult::Inserted(LastVotedForkSlotsRecord { last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), last_vote_bankhash: Hash::default().to_string(), shred_version: SHRED_VERSION as u32, @@ -458,7 +430,7 @@ mod tests { [initial_num_active_validators] .node_keypair .pubkey(); - // Allow specific validator to replace message. + // Do not allow validator to replace message. let now = timestamp(); let replace_message_validator_last_fork = RestartLastVotedForkSlots::new( replace_message_validator, @@ -471,13 +443,18 @@ mod tests { assert_eq!( test_state .slots_aggregate - .aggregate(replace_message_validator_last_fork), - Some(LastVotedForkSlotsRecord { - last_voted_fork_slots: vec![root_slot + 1, root_slot + 4, root_slot + 5], - last_vote_bankhash: Hash::default().to_string(), - shred_version: SHRED_VERSION as u32, - wallclock: now, - }), + .aggregate(replace_message_validator_last_fork.clone()), + LastVotedForkSlotsAggregateResult::DifferentVersionExists( + RestartLastVotedForkSlots::new( + replace_message_validator, + timestamp1, + &test_state.last_voted_fork_slots, + Hash::default(), + SHRED_VERSION + ) + .unwrap(), + replace_message_validator_last_fork + ), ); assert_eq!( test_state.slots_aggregate.min_active_percent(), @@ -486,7 +463,10 @@ mod tests { let mut actual_slots = Vec::from_iter(test_state.slots_aggregate.slots_to_repair_iter().cloned()); actual_slots.sort(); - assert_eq!(actual_slots, vec![root_slot + 1]); + assert_eq!( + actual_slots, + vec![root_slot + 1, root_slot + 2, root_slot + 3] + ); // test that message from my pubkey is ignored. assert_eq!( @@ -502,18 +482,40 @@ mod tests { ) .unwrap(), ), - None, + LastVotedForkSlotsAggregateResult::AlreadyExists, ); + // Test that someone sending super old vote is still counted in active peers. + let super_old_validator = test_state.validator_voting_keypairs + [initial_num_active_validators + 2] + .node_keypair + .pubkey(); + let super_old_validator_last_voted_slots = RestartLastVotedForkSlots::new( + super_old_validator, + timestamp(), + &[root_slot - 1], + Hash::default(), + SHRED_VERSION, + ) + .unwrap(); + assert_eq!( + test_state + .slots_aggregate + .aggregate(super_old_validator_last_voted_slots), + LastVotedForkSlotsAggregateResult::Inserted(LastVotedForkSlotsRecord { + last_voted_fork_slots: vec![], + last_vote_bankhash: Hash::default().to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: timestamp(), + }), + ); assert_eq!( test_state.slots_aggregate.get_final_result(), LastVotedForkSlotsFinalResult { slots_stake_map: vec![ (root_slot + 1, 500), - (root_slot + 2, 400), - (root_slot + 3, 400), - (root_slot + 4, 100), - (root_slot + 5, 100), + (root_slot + 2, 500), + (root_slot + 3, 500), ] .into_iter() .collect(), @@ -521,13 +523,13 @@ mod tests { LastVotedForkSlotsEpochInfo { epoch: 0, total_stake: 1000, - actively_voting_stake: 500, - actively_voting_for_this_epoch_stake: 500, + actively_voting_stake: 600, + actively_voting_for_this_epoch_stake: 600, }, LastVotedForkSlotsEpochInfo { epoch: 1, total_stake: 1000, - actively_voting_stake: 500, + actively_voting_stake: 600, actively_voting_for_this_epoch_stake: 0, } ], @@ -536,7 +538,7 @@ mod tests { } #[test] - fn test_aggregate_from_record() { + fn test_aggregate_from_record_success() { let mut test_state = test_aggregate_init(); let root_slot = test_state.root_slot; let last_vote_bankhash = Hash::new_unique(); @@ -559,7 +561,7 @@ mod tests { &record, ) .unwrap(), - Some(record.clone()), + LastVotedForkSlotsAggregateResult::Inserted(record.clone()), ); // Before 33% voted for slot in this epoch, the percentage should be 0. assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); @@ -579,7 +581,7 @@ mod tests { .unwrap(); assert_eq!( test_state.slots_aggregate.aggregate(last_voted_fork_slots), - Some(LastVotedForkSlotsRecord { + LastVotedForkSlotsAggregateResult::Inserted(LastVotedForkSlotsRecord { wallclock: now, last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), last_vote_bankhash: last_vote_bankhash.to_string(), @@ -602,33 +604,41 @@ mod tests { ) .unwrap(), ), - None, + LastVotedForkSlotsAggregateResult::AlreadyExists, ); - // But if it's a new record from the same validator, it will be replaced. + // If it's a new record from the same validator, it will be ignored. let time2 = timestamp(); let last_voted_fork_slots2 = vec![root_slot + 1, root_slot + 2, root_slot + 3, root_slot + 4]; let last_vote_bankhash2 = Hash::new_unique(); + let new_last_voted_fork_slots = RestartLastVotedForkSlots::new( + test_state.validator_voting_keypairs[0] + .node_keypair + .pubkey(), + time2, + &last_voted_fork_slots2, + last_vote_bankhash2, + SHRED_VERSION, + ) + .unwrap(); assert_eq!( - test_state.slots_aggregate.aggregate( + test_state + .slots_aggregate + .aggregate(new_last_voted_fork_slots.clone(),), + LastVotedForkSlotsAggregateResult::DifferentVersionExists( RestartLastVotedForkSlots::new( test_state.validator_voting_keypairs[0] .node_keypair .pubkey(), - time2, - &last_voted_fork_slots2, - last_vote_bankhash2, + time1, + &test_state.last_voted_fork_slots, + last_vote_bankhash, SHRED_VERSION, ) .unwrap(), + new_last_voted_fork_slots ), - Some(LastVotedForkSlotsRecord { - wallclock: time2, - last_voted_fork_slots: last_voted_fork_slots2.clone(), - last_vote_bankhash: last_vote_bankhash2.to_string(), - shred_version: SHRED_VERSION as u32, - }), ); // percentage doesn't change since it's a replace. assert_eq!(test_state.slots_aggregate.min_active_percent(), 40.0); @@ -650,7 +660,7 @@ mod tests { } ) .unwrap(), - None, + LastVotedForkSlotsAggregateResult::AlreadyExists, ); assert_eq!( test_state.slots_aggregate.get_final_result(), @@ -659,7 +669,6 @@ mod tests { (root_slot + 1, 400), (root_slot + 2, 400), (root_slot + 3, 400), - (root_slot + 4, 100), ] .into_iter() .collect(), @@ -704,7 +713,7 @@ mod tests { &last_voted_fork_slots_record, ) .unwrap(), - Some(last_voted_fork_slots_record.clone()), + LastVotedForkSlotsAggregateResult::Inserted(last_voted_fork_slots_record.clone()), ); // Then test that it fails if the record is invalid. diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 9e89fa968ee8b3..34dde659728a39 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -2,13 +2,14 @@ use { crate::{ - heaviest_fork_aggregate::HeaviestForkAggregate, + heaviest_fork_aggregate::{HeaviestForkAggregate, HeaviestForkAggregateResult}, last_voted_fork_slots_aggregate::{ - LastVotedForkSlotsAggregate, LastVotedForkSlotsEpochInfo, LastVotedForkSlotsFinalResult, + LastVotedForkSlotsAggregate, LastVotedForkSlotsAggregateResult, + LastVotedForkSlotsEpochInfo, LastVotedForkSlotsFinalResult, }, solana::wen_restart_proto::{ - self, GenerateSnapshotRecord, HeaviestForkAggregateFinal, HeaviestForkAggregateRecord, - HeaviestForkRecord, LastVotedForkSlotsAggregateFinal, + self, ConflictMessage, GenerateSnapshotRecord, HeaviestForkAggregateFinal, + HeaviestForkAggregateRecord, HeaviestForkRecord, LastVotedForkSlotsAggregateFinal, LastVotedForkSlotsAggregateRecord, LastVotedForkSlotsEpochInfoRecord, LastVotedForkSlotsRecord, State as RestartState, WenRestartProgress, }, @@ -256,15 +257,29 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( for new_last_voted_fork_slots in cluster_info.get_restart_last_voted_fork_slots(&mut cursor) { let from = new_last_voted_fork_slots.from.to_string(); - if let Some(record) = - last_voted_fork_slots_aggregate.aggregate(new_last_voted_fork_slots) - { - progress - .last_voted_fork_slots_aggregate - .as_mut() - .unwrap() - .received - .insert(from, record); + match last_voted_fork_slots_aggregate.aggregate(new_last_voted_fork_slots) { + LastVotedForkSlotsAggregateResult::Inserted(record) => { + progress + .last_voted_fork_slots_aggregate + .as_mut() + .unwrap() + .received + .insert(from, record); + } + LastVotedForkSlotsAggregateResult::DifferentVersionExists( + old_record, + new_record, + ) => { + info!("Different LastVotedForkSlots message exists from {from}: {old_record:#?} vs {new_record:#?}"); + progress.conflict_message.insert( + from, + ConflictMessage { + old_message: format!("{:?}", old_record), + new_message: format!("{:?}", new_record), + }, + ); + } + LastVotedForkSlotsAggregateResult::AlreadyExists => (), } } // Because all operations on the aggregate are called from this single thread, we can @@ -666,10 +681,9 @@ pub(crate) fn aggregate_restart_heaviest_fork( ); if let Some(aggregate_record) = &progress.heaviest_fork_aggregate { for (key_string, message) in &aggregate_record.received { - match heaviest_fork_aggregate.aggregate_from_record(key_string, message) { - Err(e) => error!("Failed to aggregate from record: {:?}", e), - Ok(None) => info!("Record {:?} ignored", message), - Ok(_) => (), + if let Err(e) = heaviest_fork_aggregate.aggregate_from_record(key_string, message) { + // Do not abort wen_restart if we got one malformed message. + error!("Failed to aggregate from record: {:?}", e); } } } else { @@ -703,15 +717,30 @@ pub(crate) fn aggregate_restart_heaviest_fork( for new_heaviest_fork in cluster_info.get_restart_heaviest_fork(&mut cursor) { info!("Received new heaviest fork: {:?}", new_heaviest_fork); let from = new_heaviest_fork.from.to_string(); - if let Some(record) = heaviest_fork_aggregate.aggregate(new_heaviest_fork) { - info!("Successfully aggregated new heaviest fork: {:?}", record); - progress - .heaviest_fork_aggregate - .as_mut() - .unwrap() - .received - .insert(from, record); - progress_changed = true; + match heaviest_fork_aggregate.aggregate(new_heaviest_fork) { + HeaviestForkAggregateResult::Inserted(record) => { + info!("Successfully aggregated new heaviest fork: {:?}", record); + progress + .heaviest_fork_aggregate + .as_mut() + .unwrap() + .received + .insert(from, record); + progress_changed = true; + } + HeaviestForkAggregateResult::DifferentVersionExists(old_record, new_record) => { + warn!("Different version from {from} exists old {old_record:#?} vs new {new_record:#?}"); + progress.conflict_message.insert( + from, + ConflictMessage { + old_message: format!("{:?}", old_record), + new_message: format!("{:?}", new_record), + }, + ); + } + HeaviestForkAggregateResult::ZeroStakeIgnored => (), + HeaviestForkAggregateResult::AlreadyExists => (), + HeaviestForkAggregateResult::Malformed => (), } } let current_total_active_stake = heaviest_fork_aggregate.total_active_stake(); @@ -1793,6 +1822,7 @@ mod tests { shred_version: progress.my_snapshot.as_ref().unwrap().shred_version, path: progress.my_snapshot.as_ref().unwrap().path.clone(), }), + ..Default::default() } ); } @@ -2726,6 +2756,7 @@ mod tests { my_heaviest_fork: my_heaviest_fork.clone(), heaviest_fork_aggregate, my_snapshot: my_snapshot.clone(), + ..Default::default() }, ), ] { From e5d5a08d33e01347dab2f3ab682f2c5679dd209d Mon Sep 17 00:00:00 2001 From: OldTyT <42464565+OldTyT@users.noreply.github.com> Date: Sat, 7 Sep 2024 04:49:43 +0300 Subject: [PATCH 313/529] [docs] removing an extra character (#2863) docs(rpc-transaction-history.md): removing an extra character --- docs/src/implemented-proposals/rpc-transaction-history.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/implemented-proposals/rpc-transaction-history.md b/docs/src/implemented-proposals/rpc-transaction-history.md index 607a79ce658b98..522b9160fb9883 100644 --- a/docs/src/implemented-proposals/rpc-transaction-history.md +++ b/docs/src/implemented-proposals/rpc-transaction-history.md @@ -61,7 +61,7 @@ all transactions to build up the necessary metadata. ## Accessing BigTable BigTable has a gRPC endpoint that can be accessed using the -[tonic](https://crates.io/crates/crate)] and the raw protobuf API, as currently +[tonic](https://crates.io/crates/crate) and the raw protobuf API, as currently no higher-level Rust crate for BigTable exists. Practically this makes parsing the results of BigTable queries more complicated but is not a significant issue. From aebca79b4d1df7e72c0eb966f62774eeb72ac225 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 9 Sep 2024 10:32:32 +0800 Subject: [PATCH 314/529] fix some args name in the wen restart log (#2860) * fix some args name in the wen restart log * remove hard fork --- wen-restart/src/wen_restart.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 34dde659728a39..924debb2adf226 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -1000,9 +1000,9 @@ pub fn wait_for_wen_restart(config: WenRestartConfig) -> Result<()> { } => { error!( "Wen start finished, please remove --wen_restart and restart with \ - --wait-for-supermajority {} --expected-bank-hash {} --shred-version {}\ - --hard-fork {} --no-snapshot-fetchsnapshot", - slot, hash, shred_version, slot + --wait-for-supermajority {} --expected-bank-hash {} --expected-shred-version {} \ + --no-snapshot-fetch", + slot, hash, shred_version, ); return Ok(()); } From e351b4f81c56d8b9d1abd170a7883cf5286e48a1 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 9 Sep 2024 12:28:08 +0900 Subject: [PATCH 315/529] Remove cargo-install-all.sh hack for solana-genesis (#2866) --- scripts/cargo-install-all.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index ce0572b2af4652..2c1a8dd883801a 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -103,6 +103,7 @@ else solana solana-bench-tps solana-faucet + solana-genesis solana-gossip agave-install solana-keygen @@ -126,10 +127,6 @@ else agave-watchtower ) fi - - #XXX: Ensure `solana-genesis` is built LAST! - # See https://github.com/solana-labs/solana/issues/5826 - BINS+=(solana-genesis) fi binArgs=() From b1de2e0ce873c7aa2e8470a0a3e3e1ae785ca821 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 9 Sep 2024 14:35:28 +0900 Subject: [PATCH 316/529] Ensure to build release binaries without dcou (#2867) * Ensure to build release binaries without dcou * Comment about use of RUSTC_BOOTSTRAP * Mention about lack of use of cargo tree --- scripts/cargo-install-all.sh | 57 ++++++++++++++++++++++--- scripts/check-dev-context-only-utils.sh | 12 +----- scripts/dcou-tainted-packages.sh | 13 ++++++ 3 files changed, 66 insertions(+), 16 deletions(-) create mode 100644 scripts/dcou-tainted-packages.sh diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 2c1a8dd883801a..645e57dcb4f429 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -96,36 +96,41 @@ if [[ $CI_OS_NAME = windows ]]; then solana-test-validator solana-tokens ) + DCOU_BINS=() else ./fetch-perf-libs.sh BINS=( solana - solana-bench-tps solana-faucet solana-genesis solana-gossip agave-install solana-keygen - agave-ledger-tool solana-log-analyzer solana-net-shaper agave-validator rbpf-cli ) + DCOU_BINS=( + agave-ledger-tool + solana-bench-tps + ) # Speed up net.sh deploys by excluding unused binaries if [[ -z "$validatorOnly" ]]; then BINS+=( cargo-build-sbf cargo-test-sbf - solana-dos agave-install-init solana-stake-accounts solana-test-validator solana-tokens agave-watchtower ) + DCOU_BINS+=( + solana-dos + ) fi fi @@ -134,12 +139,52 @@ for bin in "${BINS[@]}"; do binArgs+=(--bin "$bin") done +dcouBinArgs=() +for bin in "${DCOU_BINS[@]}"; do + dcouBinArgs+=(--bin "$bin") +done + +source "$SOLANA_ROOT"/scripts/dcou-tainted-packages.sh + +excludeArgs=() +for package in "${dcou_tainted_packages[@]}"; do + excludeArgs+=(--exclude "$package") +done + mkdir -p "$installDir/bin" +# Some binaries (like the notable agave-ledger-tool) need to acitivate +# the dev-context-only-utils feature flag to build. +# Build those binaries separately to avoid the unwanted feature unification. +# Note that `--workspace --exclude ` is needed to really +# inhibit the feature unification due to a cargo bug. Otherwise, feature +# unification happens even if cargo build is run only with `--bin` targets +# which don't depend on dcou as part of dependencies at all. ( set -x - # shellcheck disable=SC2086 # Don't want to double quote $rust_version - "$cargo" $maybeRustVersion build $buildProfileArg "${binArgs[@]}" + # Make sure dcou is really disabled by peeking the (unstable) build plan + # output after turning rustc into the nightly mode with RUSTC_BOOTSTRAP=1. + # In this way, additional requirement of nightly rustc toolchian is avoided. + # Note that `cargo tree` can't be used, because it doesn't support `--bin`. + # shellcheck disable=SC2086 # Don't want to double quote $maybeRustVersion + if (RUSTC_BOOTSTRAP=1 \ + "$cargo" $maybeRustVersion build \ + -Z unstable-options --build-plan \ + $buildProfileArg "${binArgs[@]}" --workspace "${excludeArgs[@]}" | \ + grep -q -F '"feature=\"dev-context-only-utils\""'); then + echo 'dcou feature activation is incorrctly activated!' && \ + exit 1 + fi + + # Build our production binaries without dcou. + # shellcheck disable=SC2086 # Don't want to double quote $maybeRustVersion + "$cargo" $maybeRustVersion build \ + $buildProfileArg "${binArgs[@]}" --workspace "${excludeArgs[@]}" + + # Finally, build the remaining dev tools with dcou. + # shellcheck disable=SC2086 # Don't want to double quote $maybeRustVersion + "$cargo" $maybeRustVersion build \ + $buildProfileArg "${dcouBinArgs[@]}" # Exclude `spl-token` binary for net.sh builds if [[ -z "$validatorOnly" ]]; then @@ -155,7 +200,7 @@ mkdir -p "$installDir/bin" fi ) -for bin in "${BINS[@]}"; do +for bin in "${BINS[@]}" "${DCOU_BINS[@]}"; do cp -fv "target/$buildProfile/$bin" "$installDir"/bin done diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index c1b12eb9cd8931..e64e0691b80a6c 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -28,18 +28,10 @@ source ci/rust-version.sh nightly # as normal (not dev) dependencies, only if you're sure that there's good # reason to bend dev-context-only-utils's original intention and that listed # package isn't part of released binaries. -declare tainted_packages=( - solana-accounts-bench - solana-banking-bench - agave-ledger-tool - solana-bench-tps - agave-store-tool - agave-store-histogram - agave-accounts-hash-cache-tool -) +source scripts/dcou-tainted-packages.sh # convert to comma separeted (ref: https://stackoverflow.com/a/53839433) -printf -v allowed '"%s",' "${tainted_packages[@]}" +printf -v allowed '"%s",' "${dcou_tainted_packages[@]}" allowed="${allowed%,}" mode=${1:-full} diff --git a/scripts/dcou-tainted-packages.sh b/scripts/dcou-tainted-packages.sh new file mode 100644 index 00000000000000..72e578bf2d1bfe --- /dev/null +++ b/scripts/dcou-tainted-packages.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# shellcheck disable=SC2034 # This file is intended to be `source`d +declare dcou_tainted_packages=( + solana-accounts-bench + solana-banking-bench + agave-ledger-tool + solana-bench-tps + agave-store-tool + agave-store-histogram + agave-accounts-hash-cache-tool + solana-dos +) From 2e27278e470ac3aa8598afd70a7c7cc157fe9b91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 21:53:39 +0800 Subject: [PATCH 317/529] build(deps): bump anyhow from 1.0.86 to 1.0.87 (#2876) * build(deps): bump anyhow from 1.0.86 to 1.0.87 Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.86 to 1.0.87. - [Release notes](https://github.com/dtolnay/anyhow/releases) - [Commits](https://github.com/dtolnay/anyhow/compare/1.0.86...1.0.87) --- updated-dependencies: - dependency-name: anyhow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7994628db1094..4005d2f41b0079 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -430,9 +430,9 @@ checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" [[package]] name = "aquamarine" diff --git a/Cargo.toml b/Cargo.toml index da9fca1116b54e..4676b17e1bf914 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -183,7 +183,7 @@ agave-transaction-view = { path = "transaction-view", version = "=2.1.0" } aquamarine = "0.3.3" aes-gcm-siv = "0.11.1" ahash = "0.8.10" -anyhow = "1.0.82" +anyhow = "1.0.87" arbitrary = "1.3.2" ark-bn254 = "0.4.0" ark-ec = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index bf82a2b4219f9b..b7d9cd54e50a6c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -235,9 +235,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" [[package]] name = "aquamarine" From 8f5a4d750a45002a8e08cb4cec07f17b1a30f6f9 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Tue, 10 Sep 2024 00:04:07 +0900 Subject: [PATCH 318/529] Skip full build with no dcou bin (and extras) (#2874) --- scripts/cargo-install-all.sh | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 645e57dcb4f429..3a25fb75892ff1 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -153,6 +153,19 @@ done mkdir -p "$installDir/bin" +cargo_build() { + # shellcheck disable=SC2086 # Don't want to double quote $maybeRustVersion + "$cargo" $maybeRustVersion build $buildProfileArg "$@" +} + +# This is called to detect both of unintended activation AND deactivation of +# dcou, in order to make this rather fragile grep more resilient to bitrot... +check_dcou() { + RUSTC_BOOTSTRAP=1 \ + cargo_build -Z unstable-options --build-plan "$@" | \ + grep -q -F '"feature=\"dev-context-only-utils\""' +} + # Some binaries (like the notable agave-ledger-tool) need to acitivate # the dev-context-only-utils feature flag to build. # Build those binaries separately to avoid the unwanted feature unification. @@ -166,25 +179,22 @@ mkdir -p "$installDir/bin" # output after turning rustc into the nightly mode with RUSTC_BOOTSTRAP=1. # In this way, additional requirement of nightly rustc toolchian is avoided. # Note that `cargo tree` can't be used, because it doesn't support `--bin`. - # shellcheck disable=SC2086 # Don't want to double quote $maybeRustVersion - if (RUSTC_BOOTSTRAP=1 \ - "$cargo" $maybeRustVersion build \ - -Z unstable-options --build-plan \ - $buildProfileArg "${binArgs[@]}" --workspace "${excludeArgs[@]}" | \ - grep -q -F '"feature=\"dev-context-only-utils\""'); then - echo 'dcou feature activation is incorrctly activated!' && \ + if check_dcou "${binArgs[@]}" --workspace "${excludeArgs[@]}"; then + echo 'dcou feature activation is incorrectly activated!' exit 1 fi # Build our production binaries without dcou. - # shellcheck disable=SC2086 # Don't want to double quote $maybeRustVersion - "$cargo" $maybeRustVersion build \ - $buildProfileArg "${binArgs[@]}" --workspace "${excludeArgs[@]}" + cargo_build "${binArgs[@]}" --workspace "${excludeArgs[@]}" # Finally, build the remaining dev tools with dcou. - # shellcheck disable=SC2086 # Don't want to double quote $maybeRustVersion - "$cargo" $maybeRustVersion build \ - $buildProfileArg "${dcouBinArgs[@]}" + if [[ ${#dcouBinArgs[@]} -gt 0 ]]; then + if ! check_dcou "${dcouBinArgs[@]}"; then + echo 'dcou feature activation is incorrectly remain to be deactivated!' + exit 1 + fi + cargo_build "${dcouBinArgs[@]}" + fi # Exclude `spl-token` binary for net.sh builds if [[ -z "$validatorOnly" ]]; then From 4ac412f3aa6f8512abdb2e095ea983c712ea1b4f Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 9 Sep 2024 11:14:07 -0400 Subject: [PATCH 319/529] Rewords BankFromSnapshotsDirectory error message (#2877) --- ledger/src/bank_forks_utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 10a82e25ee2a53..b26e5ef5e07c64 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -52,7 +52,7 @@ pub enum BankForksUtilsError { )] NoBankSnapshotDirectory { flag: String, value: String }, - #[error("failed to load bank: {source}, snapshot: {path}")] + #[error("failed to load bank from snapshot '{path}': {source}")] BankFromSnapshotsDirectory { source: snapshot_utils::SnapshotError, path: PathBuf, From 9a665944beffd3f3b6acf4efe0b617f5b3b517bd Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 9 Sep 2024 10:42:17 -0500 Subject: [PATCH 320/529] account_saver: collect `SanitizedTransaction` references (#2820) --- runtime/src/account_saver.rs | 65 ++++++++++++++++++++++++++---------- runtime/src/bank.rs | 12 ++++++- 2 files changed, 59 insertions(+), 18 deletions(-) diff --git a/runtime/src/account_saver.rs b/runtime/src/account_saver.rs index ec8049617108eb..da4188b87f441c 100644 --- a/runtime/src/account_saver.rs +++ b/runtime/src/account_saver.rs @@ -1,7 +1,8 @@ use { + core::borrow::Borrow, solana_sdk::{ account::AccountSharedData, nonce::state::DurableNonce, pubkey::Pubkey, - transaction_context::TransactionAccount, + transaction::SanitizedTransaction, transaction_context::TransactionAccount, }, solana_svm::{ rollback_accounts::RollbackAccounts, @@ -40,22 +41,37 @@ fn max_number_of_accounts_to_collect( .sum() } +// Due to the current geyser interface, we are forced to collect references to +// `SanitizedTransaction` - even if that's not the type that we have. +// Until that interface changes, this function takes in an additional +// `txs_refs` parameter that collects references to `SanitizedTransaction` +// if it's provided. +// If geyser is not used, `txs_refs` should be `None`, since the work would +// be useless. pub fn collect_accounts_to_store<'a, T: SVMMessage>( txs: &'a [T], + txs_refs: &'a Option>>, processing_results: &'a mut [TransactionProcessingResult], durable_nonce: &DurableNonce, lamports_per_signature: u64, - collect_transactions: bool, -) -> (Vec<(&'a Pubkey, &'a AccountSharedData)>, Option>) { +) -> ( + Vec<(&'a Pubkey, &'a AccountSharedData)>, + Option>, +) { let collect_capacity = max_number_of_accounts_to_collect(txs, processing_results); let mut accounts = Vec::with_capacity(collect_capacity); - let mut transactions = collect_transactions.then(|| Vec::with_capacity(collect_capacity)); - for (processing_result, transaction) in processing_results.iter_mut().zip(txs) { + let mut transactions = txs_refs + .is_some() + .then(|| Vec::with_capacity(collect_capacity)); + for (index, (processing_result, transaction)) in + processing_results.iter_mut().zip(txs).enumerate() + { let Some(processed_tx) = processing_result.processed_transaction_mut() else { // Don't store any accounts if tx wasn't executed continue; }; + let transaction_ref = txs_refs.as_ref().map(|txs_refs| txs_refs[index].borrow()); match processed_tx { ProcessedTransaction::Executed(executed_tx) => { if executed_tx.execution_details.status.is_ok() { @@ -63,6 +79,7 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( &mut accounts, &mut transactions, transaction, + transaction_ref, &executed_tx.loaded_transaction.accounts, ); } else { @@ -70,6 +87,7 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( &mut accounts, &mut transactions, transaction, + transaction_ref, &mut executed_tx.loaded_transaction.rollback_accounts, durable_nonce, lamports_per_signature, @@ -81,6 +99,7 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( &mut accounts, &mut transactions, transaction, + transaction_ref, &mut fees_only_tx.rollback_accounts, durable_nonce, lamports_per_signature, @@ -93,8 +112,9 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( fn collect_accounts_for_successful_tx<'a, T: SVMMessage>( collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, - collected_account_transactions: &mut Option>, + collected_account_transactions: &mut Option>, transaction: &'a T, + transaction_ref: Option<&'a SanitizedTransaction>, transaction_accounts: &'a [TransactionAccount], ) { for (_, (address, account)) in (0..transaction.account_keys().len()) @@ -111,15 +131,17 @@ fn collect_accounts_for_successful_tx<'a, T: SVMMessage>( { collected_accounts.push((address, account)); if let Some(collected_account_transactions) = collected_account_transactions { - collected_account_transactions.push(transaction); + collected_account_transactions + .push(transaction_ref.expect("transaction ref must exist if collecting")); } } } fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, - collected_account_transactions: &mut Option>, + collected_account_transactions: &mut Option>, transaction: &'a T, + transaction_ref: Option<&'a SanitizedTransaction>, rollback_accounts: &'a mut RollbackAccounts, durable_nonce: &DurableNonce, lamports_per_signature: u64, @@ -129,7 +151,8 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( RollbackAccounts::FeePayerOnly { fee_payer_account } => { collected_accounts.push((fee_payer_address, &*fee_payer_account)); if let Some(collected_account_transactions) = collected_account_transactions { - collected_account_transactions.push(transaction); + collected_account_transactions + .push(transaction_ref.expect("transaction ref must exist if collecting")); } } RollbackAccounts::SameNonceAndFeePayer { nonce } => { @@ -140,7 +163,8 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( .unwrap(); collected_accounts.push((nonce.address(), nonce.account())); if let Some(collected_account_transactions) = collected_account_transactions { - collected_account_transactions.push(transaction); + collected_account_transactions + .push(transaction_ref.expect("transaction ref must exist if collecting")); } } RollbackAccounts::SeparateNonceAndFeePayer { @@ -149,7 +173,8 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( } => { collected_accounts.push((fee_payer_address, &*fee_payer_account)); if let Some(collected_account_transactions) = collected_account_transactions { - collected_account_transactions.push(transaction); + collected_account_transactions + .push(transaction_ref.expect("transaction ref must exist if collecting")); } // Since we know we are dealing with a valid nonce account, @@ -159,7 +184,8 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( .unwrap(); collected_accounts.push((nonce.address(), nonce.account())); if let Some(collected_account_transactions) = collected_account_transactions { - collected_account_transactions.push(transaction); + collected_account_transactions + .push(transaction_ref.expect("transaction ref must exist if collecting")); } } } @@ -297,12 +323,13 @@ mod tests { assert_eq!(max_collected_accounts, 2); for collect_transactions in [false, true] { + let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); let (collected_accounts, transactions) = collect_accounts_to_store( &txs, + &transaction_refs, &mut processing_results, &DurableNonce::default(), 0, - collect_transactions, ); assert_eq!(collected_accounts.len(), 2); assert!(collected_accounts @@ -368,12 +395,13 @@ mod tests { let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); for collect_transactions in [false, true] { + let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); let (collected_accounts, transactions) = collect_accounts_to_store( &txs, + &transaction_refs, &mut processing_results, &durable_nonce, 0, - collect_transactions, ); assert_eq!(collected_accounts.len(), 1); assert_eq!( @@ -468,12 +496,13 @@ mod tests { assert_eq!(max_collected_accounts, 2); for collect_transactions in [false, true] { + let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); let (collected_accounts, transactions) = collect_accounts_to_store( &txs, + &transaction_refs, &mut processing_results, &durable_nonce, 0, - collect_transactions, ); assert_eq!(collected_accounts.len(), 2); assert_eq!( @@ -581,12 +610,13 @@ mod tests { assert_eq!(max_collected_accounts, 1); for collect_transactions in [false, true] { + let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); let (collected_accounts, transactions) = collect_accounts_to_store( &txs, + &transaction_refs, &mut processing_results, &durable_nonce, 0, - collect_transactions, ); assert_eq!(collected_accounts.len(), 1); let collected_nonce_account = collected_accounts @@ -642,12 +672,13 @@ mod tests { let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); for collect_transactions in [false, true] { + let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); let (collected_accounts, transactions) = collect_accounts_to_store( &txs, + &transaction_refs, &mut processing_results, &durable_nonce, 0, - collect_transactions, ); assert_eq!(collected_accounts.len(), 1); assert_eq!( diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 0b53a3fd95617f..7ad9c63ee0e468 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3799,12 +3799,22 @@ impl Bank { let ((), store_accounts_us) = measure_us!({ let durable_nonce = DurableNonce::from_blockhash(&last_blockhash); + + // If geyser is present, we must collect `SanitizedTransaction` + // references in order to comply with that interface - until it + // is changed. + let maybe_transaction_refs = self + .accounts() + .accounts_db + .has_accounts_update_notifier() + .then(|| sanitized_txs.iter().collect::>()); + let (accounts_to_store, transactions) = collect_accounts_to_store( sanitized_txs, + &maybe_transaction_refs, &mut processing_results, &durable_nonce, lamports_per_signature, - self.accounts().accounts_db.has_accounts_update_notifier(), ); self.rc.accounts.store_cached( (self.slot(), accounts_to_store.as_slice()), From 300701e00f5a02ac2e2127374920a0ffa48133c8 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 9 Sep 2024 12:01:38 -0500 Subject: [PATCH 321/529] minor tweaks to store histogram tool (#2862) * minor tweaks to store histogram tool * fix clippy --- accounts-db/store-histogram/src/main.rs | 34 ++++++++++++------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/accounts-db/store-histogram/src/main.rs b/accounts-db/store-histogram/src/main.rs index fb681ceeb58775..9d0b87327de9d8 100644 --- a/accounts-db/store-histogram/src/main.rs +++ b/accounts-db/store-histogram/src/main.rs @@ -122,10 +122,8 @@ fn calc(info: &[(usize, usize)], bin_widths: Vec) { eprintln!("count {}", bin_all.count); eprintln!("min size {}", bin_all.min_size); eprintln!("max size {}", bin_all.max_size); - eprintln!("avg size {}", bin_all.sum_size / bin_all.count); eprintln!("avg size {}", bin_all.avg); eprintln!("bin width {}", bins[0].slot_max - bins[0].slot_min); - eprintln!("..."); for i in 0..bins.len() { if i > 0 && bins[i - 1].slot_max != bins[i].slot_min { @@ -133,27 +131,28 @@ fn calc(info: &[(usize, usize)], bin_widths: Vec) { } let bin = &bins[i]; if bin.slot_min == 432_000 { - eprintln!("-------------------------------------------------------------------------------------------------------------------------------------------------------------------"); + eprintln!("------------------------------------------------------------------------------------------------------------------------------------------------------------------------"); } - let offset = format!("{:10}", bin.slot_min); + let offset = format!("{:8}", bin.slot_min); if i == 0 { let s = [ - format!("{:10}", "slot age"), - pad(2), + format!("{:8}", "slot age"), + pad(1), format!("{:10}", "count"), - pad(2), + pad(1), format!("{:10}", "min size"), - pad(2), + pad(1), format!("{:10}", "max size"), - pad(2), + pad(1), format!("{:10}", "sum size"), - pad(2), + pad(1), format!("{:10}", "avg size"), - pad(2), + pad(1), format!(",{:>15}", "slot min"), format!(",{:>15}", "count"), format!(",{:>15}", "sum size"), + format!(",{:>7}", "% size"), format!(",{:>15}", "min size"), format!(",{:>15}", "max size"), format!(",{:>15}", "avg size"), @@ -167,20 +166,21 @@ fn calc(info: &[(usize, usize)], bin_widths: Vec) { let s = [ offset, - pad(2), + pad(1), get_stars(bin.count, bin_max.count, 10), - pad(2), + pad(1), get_stars(bin.min_size, bin_max.min_size, 10), - pad(2), + pad(1), get_stars(bin.max_size, bin_max.max_size, 10), - pad(2), + pad(1), get_stars(bin.sum_size, bin_max.sum_size, 10), - pad(2), + pad(1), get_stars(bin.avg, bin_max.avg, 10), - pad(2), + pad(1), format!(",{:15}", max_inclusive - bin.slot_min), format!(",{:15}", bin.count), format!(",{:15}", bin.sum_size), + format!(",{:6}%", bin.sum_size * 100 / bin_all.sum_size), format!(",{:15}", bin.min_size), format!(",{:15}", bin.max_size), format!(",{:15}", bin.avg), From d6eda1fec50f9a872f466119946be055676cfbb8 Mon Sep 17 00:00:00 2001 From: Brennan Date: Mon, 9 Sep 2024 12:22:34 -0700 Subject: [PATCH 322/529] retire dead code (#2868) --- core/src/replay_stage.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 4bf1f5cef37d73..723a91fb9f9a15 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -68,7 +68,6 @@ use { solana_sdk::{ clock::{BankId, Slot, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS}, feature_set, - genesis_config::ClusterType, hash::Hash, pubkey::Pubkey, saturating_add_assign, @@ -4083,17 +4082,6 @@ impl ReplayStage { } } - pub fn get_unlock_switch_vote_slot(cluster_type: ClusterType) -> Slot { - match cluster_type { - ClusterType::Development => 0, - ClusterType::Devnet => 0, - // Epoch 63 - ClusterType::Testnet => 21_692_256, - // 400_000 slots into epoch 61 - ClusterType::MainnetBeta => 26_752_000, - } - } - fn log_heaviest_fork_failures( heaviest_fork_failures: &Vec, bank_forks: &Arc>, From 3d9541654af7e2bbc3c3a994a8331ed437768d9d Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 9 Sep 2024 15:52:42 -0400 Subject: [PATCH 323/529] Uses Acquire/Release for AccountStorageEntry::alive_bytes (#2878) --- accounts-db/src/accounts_db.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 76b8e3f001c1c8..af886da75c7df3 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1183,7 +1183,7 @@ impl AccountStorageEntry { } pub fn alive_bytes(&self) -> usize { - self.alive_bytes.load(Ordering::SeqCst) + self.alive_bytes.load(Ordering::Acquire) } pub fn written_bytes(&self) -> u64 { @@ -1219,7 +1219,7 @@ impl AccountStorageEntry { *count_and_status = (count_and_status.0 + num_accounts, count_and_status.1); self.approx_store_count .fetch_add(num_accounts, Ordering::Relaxed); - self.alive_bytes.fetch_add(num_bytes, Ordering::SeqCst); + self.alive_bytes.fetch_add(num_bytes, Ordering::Release); } fn try_available(&self) -> bool { @@ -1269,7 +1269,7 @@ impl AccountStorageEntry { self.id(), ); - self.alive_bytes.fetch_sub(num_bytes, Ordering::SeqCst); + self.alive_bytes.fetch_sub(num_bytes, Ordering::Release); count = count.saturating_sub(num_accounts); *count_and_status = (count, status); count @@ -9371,7 +9371,9 @@ impl AccountsDb { assert_eq!(count_and_status.0, 0); count_and_status.0 = entry.count; } - store.alive_bytes.store(entry.stored_size, Ordering::SeqCst); + store + .alive_bytes + .store(entry.stored_size, Ordering::Release); assert!( store.approx_stored_count() >= entry.count, "{}, {}", @@ -11335,7 +11337,7 @@ pub mod tests { .get_slot_storage_entry(slot) .unwrap() .alive_bytes - .fetch_sub(aligned_stored_size(0), Ordering::Relaxed); + .fetch_sub(aligned_stored_size(0), Ordering::Release); if let Some(latest_full_snapshot_slot) = latest_full_snapshot_slot { accounts.set_latest_full_snapshot_slot(latest_full_snapshot_slot); @@ -13854,7 +13856,7 @@ pub mod tests { let storage0 = accounts_db.get_and_assert_single_storage(slot); storage0.accounts.scan_accounts(|account| { - let before_size = storage0.alive_bytes.load(Ordering::Acquire); + let before_size = storage0.alive_bytes(); let account_info = accounts_db .accounts_index .get_cloned(account.pubkey()) @@ -13867,7 +13869,7 @@ pub mod tests { assert_eq!(account_info.0, slot); let reclaims = [account_info]; accounts_db.remove_dead_accounts(reclaims.iter(), None, true); - let after_size = storage0.alive_bytes.load(Ordering::Acquire); + let after_size = storage0.alive_bytes(); if storage0.count() == 0 && AccountsFileProvider::HotStorage == accounts_db.accounts_file_provider { @@ -15137,14 +15139,14 @@ pub mod tests { for (_, store) in accounts.storage.iter() { assert_eq!(store.count_and_status.read().0, 0); - assert_eq!(store.alive_bytes.load(Ordering::Acquire), 0); + assert_eq!(store.alive_bytes(), 0); } accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default()); assert_eq!(accounts.storage.len(), 1); for (_, store) in accounts.storage.iter() { assert_eq!(store.id(), 0); assert_eq!(store.count_and_status.read().0, count); - assert_eq!(store.alive_bytes.load(Ordering::Acquire), 2); + assert_eq!(store.alive_bytes(), 2); } }); From 89050f3cb7e76d9e273f10bea5e8207f2452f79f Mon Sep 17 00:00:00 2001 From: dmakarov Date: Mon, 9 Sep 2024 16:52:45 -0400 Subject: [PATCH 324/529] Retain only candidates that should be purged or with non-empty slot list (#2679) * Retain only candidates that should be purged or with non-empty slot list * Clean accounts older than root while scanning candidates for purging * Clippy fix * Rename clean_accounts_older_than_root * Add retained_keys_count metric * Split clean_account_older_than_root into collecting and handling parts * Implement feedback * Remove debug * Add debug_assert * Check reclaims --- accounts-db/src/accounts_db.rs | 282 ++++++++++++++++----------------- 1 file changed, 138 insertions(+), 144 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index af886da75c7df3..02449fcc021c33 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1352,8 +1352,6 @@ impl StoreAccountsTiming { struct CleaningInfo { slot_list: SlotList, ref_count: u64, - /// True for pubkeys mapping to older versions of accounts that should be purged. - should_purge: bool, } /// This is the return type of AccountsDb::construct_candidate_clean_keys. @@ -2752,59 +2750,54 @@ impl AccountsDb { .expect("Cluster type must be set at initialization") } - /// Reclaim older states of accounts older than max_clean_root_inclusive for AccountsDb bloat mitigation. - /// Any accounts which are removed from the accounts index are returned in PubkeysRemovedFromAccountsIndex. - /// These should NOT be unref'd later from the accounts index. - fn clean_accounts_older_than_root( + /// While scanning cleaning candidates obtain slots that can be + /// reclaimed for each pubkey. In addition, if the pubkey is + /// removed from the index, insert in pubkeys_removed_from_accounts_index. + fn collect_reclaims( &self, - candidates: &[RwLock>], + pubkey: &Pubkey, max_clean_root_inclusive: Option, ancient_account_cleans: &AtomicU64, epoch_schedule: &EpochSchedule, - ) -> (ReclaimResult, PubkeysRemovedFromAccountsIndex) { - let pubkeys_removed_from_accounts_index = HashSet::default(); + pubkeys_removed_from_accounts_index: &Mutex, + ) -> SlotList { let one_epoch_old = self.get_oldest_non_ancient_slot(epoch_schedule); - let pubkeys_removed_from_accounts_index = Mutex::new(pubkeys_removed_from_accounts_index); - let mut clean_rooted = Measure::start("clean_old_root-ms"); - let reclaim_vecs = candidates - .par_iter() - .filter_map(|candidates_bin| { - let mut reclaims = Vec::new(); - for (pubkey, cleaning_info) in candidates_bin.read().unwrap().iter() { - if cleaning_info.should_purge { - let removed_from_index = self.accounts_index.clean_rooted_entries( - pubkey, - &mut reclaims, - max_clean_root_inclusive, - ); - if removed_from_index { - pubkeys_removed_from_accounts_index - .lock() - .unwrap() - .insert(*pubkey); - } - } - } - - (!reclaims.is_empty()).then(|| { - // figure out how many ancient accounts have been reclaimed - let old_reclaims = reclaims - .iter() - .filter_map(|(slot, _)| (slot < &one_epoch_old).then_some(1)) - .sum(); - ancient_account_cleans.fetch_add(old_reclaims, Ordering::Relaxed); - reclaims - }) - }) - .collect::>(); + let mut reclaims = Vec::new(); + let removed_from_index = self.accounts_index.clean_rooted_entries( + pubkey, + &mut reclaims, + max_clean_root_inclusive, + ); + if removed_from_index { + pubkeys_removed_from_accounts_index + .lock() + .unwrap() + .insert(*pubkey); + } + if !reclaims.is_empty() { + // figure out how many ancient accounts have been reclaimed + let old_reclaims = reclaims + .iter() + .filter_map(|(slot, _)| (slot < &one_epoch_old).then_some(1)) + .sum(); + ancient_account_cleans.fetch_add(old_reclaims, Ordering::Relaxed); + } clean_rooted.stop(); - let pubkeys_removed_from_accounts_index = - pubkeys_removed_from_accounts_index.into_inner().unwrap(); self.clean_accounts_stats .clean_old_root_us .fetch_add(clean_rooted.as_us(), Ordering::Relaxed); + reclaims + } + /// Reclaim older states of accounts older than max_clean_root_inclusive for AccountsDb bloat mitigation. + /// Any accounts which are removed from the accounts index are returned in PubkeysRemovedFromAccountsIndex. + /// These should NOT be unref'd later from the accounts index. + fn clean_accounts_older_than_root( + &self, + reclaims: &SlotList, + pubkeys_removed_from_accounts_index: &HashSet, + ) -> ReclaimResult { let mut measure = Measure::start("clean_old_root_reclaims"); // Don't reset from clean, since the pubkeys in those stores may need to be unref'ed @@ -2812,18 +2805,18 @@ impl AccountsDb { let reset_accounts = false; let reclaim_result = self.handle_reclaims( - (!reclaim_vecs.is_empty()).then(|| reclaim_vecs.iter().flatten()), + (!reclaims.is_empty()).then(|| reclaims.iter()), None, reset_accounts, - &pubkeys_removed_from_accounts_index, + pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), ); measure.stop(); - debug!("{} {}", clean_rooted, measure); + debug!("{}", measure); self.clean_accounts_stats .clean_old_root_reclaim_us .fetch_add(measure.as_us(), Ordering::Relaxed); - (reclaim_result, pubkeys_removed_from_accounts_index) + reclaim_result } fn do_reset_uncleaned_roots(&self, max_clean_root: Option) { @@ -2856,9 +2849,8 @@ impl AccountsDb { CleaningInfo { slot_list, ref_count, - should_purge: _, }, - ) in bin.iter().filter(|x| !x.1.slot_list.is_empty()) + ) in bin.iter() { let mut failed_slot = None; let all_stores_being_deleted = slot_list.len() as RefCount == *ref_count; @@ -3349,7 +3341,10 @@ impl AccountsDb { let not_found_on_fork_accum = AtomicU64::new(0); let missing_accum = AtomicU64::new(0); let useful_accum = AtomicU64::new(0); - + let reclaims: SlotList = Vec::with_capacity(num_candidates as usize); + let reclaims = Mutex::new(reclaims); + let pubkeys_removed_from_accounts_index: PubkeysRemovedFromAccountsIndex = HashSet::new(); + let pubkeys_removed_from_accounts_index = Mutex::new(pubkeys_removed_from_accounts_index); // parallel scan the index. let do_clean_scan = || { candidates.par_iter().for_each(|candidates_bin| { @@ -3363,86 +3358,97 @@ impl AccountsDb { // avoid capturing the HashMap in the // closure passed to scan thus making // conflicting read and write borrows. - candidates_bin - .iter_mut() - .for_each(|(candidate_pubkey, candidate_info)| { - self.accounts_index.scan( - [*candidate_pubkey].iter(), - |_candidate_pubkey, slot_list_and_ref_count, _entry| { - let mut useless = true; - if let Some((slot_list, ref_count)) = slot_list_and_ref_count { - // find the highest rooted slot in the slot list - let index_in_slot_list = self.accounts_index.latest_slot( - None, - slot_list, - max_clean_root_inclusive, - ); + candidates_bin.retain(|candidate_pubkey, candidate_info| { + let mut should_purge = false; + self.accounts_index.scan( + [*candidate_pubkey].iter(), + |_candidate_pubkey, slot_list_and_ref_count, _entry| { + let mut useless = true; + if let Some((slot_list, ref_count)) = slot_list_and_ref_count { + // find the highest rooted slot in the slot list + let index_in_slot_list = self.accounts_index.latest_slot( + None, + slot_list, + max_clean_root_inclusive, + ); - match index_in_slot_list { - Some(index_in_slot_list) => { - // found info relative to max_clean_root - let (slot, account_info) = - &slot_list[index_in_slot_list]; - if account_info.is_zero_lamport() { + match index_in_slot_list { + Some(index_in_slot_list) => { + // found info relative to max_clean_root + let (slot, account_info) = &slot_list[index_in_slot_list]; + if account_info.is_zero_lamport() { + useless = false; + // The latest one is zero lamports. We may be able to purge it. + // Add all the rooted entries that contain this pubkey. + // We know the highest rooted entry is zero lamports. + candidate_info.slot_list = + self.accounts_index.get_rooted_entries( + slot_list, + max_clean_root_inclusive, + ); + candidate_info.ref_count = ref_count; + } else { + found_not_zero += 1; + } + if uncleaned_roots.contains(slot) { + // Assertion enforced by `accounts_index.get()`, the latest slot + // will not be greater than the given `max_clean_root` + if let Some(max_clean_root_inclusive) = + max_clean_root_inclusive + { + assert!(slot <= &max_clean_root_inclusive); + } + if slot_list.len() > 1 { + // no need to purge old accounts if there is only 1 slot in the slot list + should_purge = true; + purges_old_accounts_local += 1; useless = false; - // The latest one is zero lamports. We may be able to purge it. - // Add all the rooted entries that contain this pubkey. - // We know the highest rooted entry is zero lamports. - candidate_info.slot_list = - self.accounts_index.get_rooted_entries( - slot_list, - max_clean_root_inclusive, - ); - candidate_info.ref_count = ref_count; } else { - found_not_zero += 1; + self.clean_accounts_stats + .uncleaned_roots_slot_list_1 + .fetch_add(1, Ordering::Relaxed); } - if uncleaned_roots.contains(slot) { - // Assertion enforced by `accounts_index.get()`, the latest slot - // will not be greater than the given `max_clean_root` - if let Some(max_clean_root_inclusive) = - max_clean_root_inclusive - { - assert!(slot <= &max_clean_root_inclusive); - } - if slot_list.len() > 1 { - // no need to purge old accounts if there is only 1 slot in the slot list - candidate_info.should_purge = true; - purges_old_accounts_local += 1; - useless = false; - } else { - self.clean_accounts_stats - .uncleaned_roots_slot_list_1 - .fetch_add(1, Ordering::Relaxed); - } - } - } - None => { - // This pubkey is in the index but not in a root slot, so clean - // it up by adding it to the to-be-purged list. - // - // Also, this pubkey must have been touched by some slot since - // it was in the dirty list, so we assume that the slot it was - // touched in must be unrooted. - not_found_on_fork += 1; - candidate_info.should_purge = true; - purges_old_accounts_local += 1; - useless = false; } } - } else { - missing += 1; - } - if !useless { - useful += 1; + None => { + // This pubkey is in the index but not in a root slot, so clean + // it up by adding it to the to-be-purged list. + // + // Also, this pubkey must have been touched by some slot since + // it was in the dirty list, so we assume that the slot it was + // touched in must be unrooted. + not_found_on_fork += 1; + should_purge = true; + purges_old_accounts_local += 1; + useless = false; + } } - AccountsIndexScanResult::OnlyKeepInMemoryIfDirty - }, - None, - false, - ScanFilter::All, + } else { + missing += 1; + } + if !useless { + useful += 1; + } + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty + }, + None, + false, + ScanFilter::All, + ); + if should_purge { + let reclaims_new = self.collect_reclaims( + candidate_pubkey, + max_clean_root_inclusive, + &ancient_account_cleans, + epoch_schedule, + &pubkeys_removed_from_accounts_index, ); - }); + if !reclaims_new.is_empty() { + reclaims.lock().unwrap().extend(reclaims_new); + } + } + !candidate_info.slot_list.is_empty() + }); found_not_zero_accum.fetch_add(found_not_zero, Ordering::Relaxed); not_found_on_fork_accum.fetch_add(not_found_on_fork, Ordering::Relaxed); missing_accum.fetch_add(missing, Ordering::Relaxed); @@ -3457,16 +3463,13 @@ impl AccountsDb { } accounts_scan.stop(); - + let retained_keys_count = Self::count_pubkeys(&candidates); + let reclaims = reclaims.into_inner().unwrap(); + let mut pubkeys_removed_from_accounts_index = + pubkeys_removed_from_accounts_index.into_inner().unwrap(); let mut clean_old_rooted = Measure::start("clean_old_roots"); - let ((purged_account_slots, removed_accounts), mut pubkeys_removed_from_accounts_index) = - self.clean_accounts_older_than_root( - &candidates, - max_clean_root_inclusive, - &ancient_account_cleans, - epoch_schedule, - ); - + let (purged_account_slots, removed_accounts) = + self.clean_accounts_older_than_root(&reclaims, &pubkeys_removed_from_accounts_index); self.do_reset_uncleaned_roots(max_clean_root_inclusive); clean_old_rooted.stop(); @@ -3481,13 +3484,10 @@ impl AccountsDb { CleaningInfo { slot_list, ref_count, - should_purge: _, }, ) in candidates_bin.write().unwrap().iter_mut() { - if slot_list.is_empty() { - continue; // seems simpler than filtering. `candidates` contains all the pubkeys we original started with - } + debug_assert!(!slot_list.is_empty(), "candidate slot_list can't be empty"); if purged_account_slots.contains_key(pubkey) { *ref_count = self.accounts_index.ref_count_from_storage(pubkey); } @@ -3563,7 +3563,6 @@ impl AccountsDb { let CleaningInfo { slot_list, ref_count: _, - should_purge: _, } = cleaning_info; (!slot_list.is_empty()).then_some(( *pubkey, @@ -3631,6 +3630,7 @@ impl AccountsDb { ("dirty_pubkeys_count", key_timings.dirty_pubkeys_count, i64), ("useful_keys", useful_accum.load(Ordering::Relaxed), i64), ("total_keys_count", num_candidates, i64), + ("retained_keys_count", retained_keys_count, i64), ( "scan_found_not_zero", found_not_zero_accum.load(Ordering::Relaxed), @@ -3866,11 +3866,8 @@ impl AccountsDb { let CleaningInfo { slot_list, ref_count: _, - should_purge: _, } = cleaning_info; - if slot_list.is_empty() { - return false; - } + debug_assert!(!slot_list.is_empty(), "candidate slot_list can't be empty"); // Only keep candidates where the entire history of the account in the root set // can be purged. All AppendVecs for those updates are dead. for (slot, _account_info) in slot_list.iter() { @@ -12959,7 +12956,6 @@ pub mod tests { CleaningInfo { slot_list: rooted_entries, ref_count, - should_purge: false, }, ); } @@ -12970,7 +12966,6 @@ pub mod tests { CleaningInfo { slot_list: list, ref_count, - should_purge: _, }, ) in candidates_bin.iter() { @@ -15275,7 +15270,6 @@ pub mod tests { CleaningInfo { slot_list: vec![(slot, account_info)], ref_count: 1, - should_purge: false, }, ); let accounts_db = AccountsDb::new_single_for_tests(); From c7e44c1389e01b576b1180387db20971eb1c8d3c Mon Sep 17 00:00:00 2001 From: asolana <110843012+ksolana@users.noreply.github.com> Date: Mon, 9 Sep 2024 15:30:30 -0700 Subject: [PATCH 325/529] Set zstd compression level to 1 as it offers fastest compression with small size tradeoff. (#2729) Get faster compression level --fast=4 As per https://github.com/facebook/zstd/tree/dev?tab=readme-ov-file#benchmarks Level=4 offers faster compression 710MB/s vs. 670 MB/s --- runtime/src/snapshot_utils.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 1ff4c5096d0007..0dfbd8a13e91a3 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1083,8 +1083,9 @@ fn archive_snapshot( encoder.finish().map_err(E::FinishEncoder)?; } ArchiveFormat::TarZstd => { + // Compression level of 1 is optimized for speed. let mut encoder = - zstd::stream::Encoder::new(archive_file, 0).map_err(E::CreateEncoder)?; + zstd::stream::Encoder::new(archive_file, 1).map_err(E::CreateEncoder)?; do_archive_files(&mut encoder)?; encoder.finish().map_err(E::FinishEncoder)?; } From 6bd5d38ed554e928fb05f197a7e6e80db35142f1 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Tue, 10 Sep 2024 22:44:20 +0900 Subject: [PATCH 326/529] Box NewTaskPayload to reduce size greatly (#2881) --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 1 + unified-scheduler-pool/Cargo.toml | 1 + unified-scheduler-pool/src/lib.rs | 15 ++++++++------- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4005d2f41b0079..056b0bf28653c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8186,6 +8186,7 @@ dependencies = [ "solana-sdk", "solana-timings", "solana-unified-scheduler-logic", + "static_assertions", "vec_extract_if_polyfill", ] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b7d9cd54e50a6c..d78d07199c5eb1 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6772,6 +6772,7 @@ dependencies = [ "solana-sdk", "solana-timings", "solana-unified-scheduler-logic", + "static_assertions", "vec_extract_if_polyfill", ] diff --git a/unified-scheduler-pool/Cargo.toml b/unified-scheduler-pool/Cargo.toml index 46a020c661280d..e4dd6015ed44cd 100644 --- a/unified-scheduler-pool/Cargo.toml +++ b/unified-scheduler-pool/Cargo.toml @@ -22,6 +22,7 @@ solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-timings = { workspace = true } solana-unified-scheduler-logic = { workspace = true } +static_assertions = { workspace = true } vec_extract_if_polyfill = { workspace = true } [dev-dependencies] diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 1b9c471137ca70..8c2745e138fd33 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -37,6 +37,7 @@ use { }, solana_timings::ExecuteTimings, solana_unified_scheduler_logic::{SchedulingStateMachine, Task, UsageQueue}, + static_assertions::const_assert_eq, std::{ fmt::Debug, marker::PhantomData, @@ -475,7 +476,8 @@ enum SubchanneledPayload { CloseSubchannel, } -type NewTaskPayload = SubchanneledPayload; +type NewTaskPayload = SubchanneledPayload>; +const_assert_eq!(mem::size_of::(), 16); // A tiny generic message type to synchronize multiple threads everytime some contextual data needs // to be switched (ie. SchedulingContext), just using a single communication channel. @@ -1092,10 +1094,9 @@ impl, TH: TaskHandler> ThreadManager { // Prepare for the new session. match new_task_receiver.recv() { - Ok(NewTaskPayload::OpenSubchannel(( - new_context, - new_result_with_timings, - ))) => { + Ok(NewTaskPayload::OpenSubchannel(context_and_result_with_timings)) => { + let (new_context, new_result_with_timings) = + *context_and_result_with_timings; // We just received subsequent (= not initial) session and about to // enter into the preceding `while(!is_finished) {...}` loop again. // Before that, propagate new SchedulingContext to handler threads @@ -1332,10 +1333,10 @@ impl, TH: TaskHandler> ThreadManager { assert!(!self.are_threads_joined()); assert_matches!(self.session_result_with_timings, None); self.new_task_sender - .send(NewTaskPayload::OpenSubchannel(( + .send(NewTaskPayload::OpenSubchannel(Box::new(( context, result_with_timings, - ))) + )))) .expect("no new session after aborted"); } } From 91dfa6b6bad9039b99b1740b22be8e2a521af57f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 14:17:54 +0000 Subject: [PATCH 327/529] build(deps): bump serde from 1.0.209 to 1.0.210 (#2875) * build(deps): bump serde from 1.0.209 to 1.0.210 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.209 to 1.0.210. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.209...v1.0.210) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files * sync --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: yihau --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 056b0bf28653c6..843ea41f8240d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5110,9 +5110,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -5128,9 +5128,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 4676b17e1bf914..6bc5d413b00132 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -338,9 +338,9 @@ rustls = { version = "0.21.12", default-features = false, features = ["quic"] } scopeguard = "1.2.0" semver = "1.0.23" seqlock = "0.2.0" -serde = "1.0.209" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde = "1.0.210" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" -serde_derive = "1.0.209" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_derive = "1.0.210" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.128" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index d78d07199c5eb1..a3684236e05d39 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4261,9 +4261,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -4279,9 +4279,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", From fe64cf654c935735bb9831b146f587d970799839 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 10 Sep 2024 09:50:39 -0500 Subject: [PATCH 328/529] bank::(un)lock_accounts generic (#2835) --- accounts-db/src/accounts.rs | 10 +++++----- runtime/src/bank.rs | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 8de5431318a3a0..2584f900edbc49 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -514,9 +514,9 @@ impl Accounts { /// This function will prevent multiple threads from modifying the same account state at the /// same time #[must_use] - pub fn lock_accounts<'a>( + pub fn lock_accounts<'a, Tx: SVMMessage + 'a>( &self, - txs: impl Iterator, + txs: impl Iterator, tx_account_lock_limit: usize, ) -> Vec> { // Validate the account locks, then get iterator if successful validation. @@ -566,9 +566,9 @@ impl Accounts { } /// Once accounts are unlocked, new transactions that modify that state can enter the pipeline - pub fn unlock_accounts<'a>( + pub fn unlock_accounts<'a, Tx: SVMMessage + 'a>( &self, - txs_and_results: impl Iterator)> + Clone, + txs_and_results: impl Iterator)> + Clone, ) { if !txs_and_results.clone().any(|(_, res)| res.is_ok()) { return; @@ -578,7 +578,7 @@ impl Accounts { debug!("bank unlock accounts"); for (tx, res) in txs_and_results { if res.is_ok() { - let tx_account_locks = TransactionAccountLocksIterator::new(tx.message()); + let tx_account_locks = TransactionAccountLocksIterator::new(tx); account_locks.unlock_accounts(tx_account_locks.accounts_with_is_writable()); } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7ad9c63ee0e468..f6f4f8edee9bb5 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3425,9 +3425,9 @@ impl Bank { account_overrides } - pub fn unlock_accounts<'a>( + pub fn unlock_accounts<'a, Tx: SVMMessage + 'a>( &self, - txs_and_results: impl Iterator)> + Clone, + txs_and_results: impl Iterator)> + Clone, ) { self.rc.accounts.unlock_accounts(txs_and_results) } From 8116c10021f09c806159852f65d37ffe6d5a118e Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 10 Sep 2024 21:27:20 +0400 Subject: [PATCH 329/529] Extract feature-set crate (#2172) * extract feature-set crate * update feature_set dependents * re-export with deprecation notice * fix path after rebase * fmt * fmt after rebase * fixes after rebase * unused import * fix import * unused import * post-rebase fixes * fix path * missing frozen-abi support * update lock file * use workspace lints * fix paths * fix import * remove unused import * remove unused deps * consolidate imports * add back dep (not unused after all) --- Cargo.lock | 36 ++ Cargo.toml | 2 + banks-server/Cargo.toml | 1 + banks-server/src/banks_server.rs | 4 +- bench-tps/Cargo.toml | 1 + bench-tps/src/bench.rs | 2 +- cli/Cargo.toml | 1 + cli/src/cluster_query.rs | 7 +- cli/src/feature.rs | 2 +- cli/src/program.rs | 2 +- cli/src/stake.rs | 3 +- cli/tests/program.rs | 2 +- core/Cargo.toml | 1 + core/src/banking_stage/consumer.rs | 2 +- .../forward_packet_batches_by_accounts.rs | 8 +- core/src/banking_stage/forwarder.rs | 6 +- core/src/banking_stage/qos_service.rs | 2 +- .../unprocessed_transaction_storage.rs | 5 +- core/src/consensus.rs | 4 +- core/src/repair/serve_repair.rs | 6 +- core/src/replay_stage.rs | 3 +- core/src/shred_fetch_stage.rs | 2 +- core/src/window_service.rs | 6 +- cost-model/Cargo.toml | 1 + cost-model/src/cost_model.rs | 2 +- cost-model/src/transaction_cost.rs | 2 +- gossip/Cargo.toml | 1 + gossip/src/cluster_info.rs | 2 +- ledger-tool/Cargo.toml | 1 + ledger-tool/src/main.rs | 2 +- ledger/Cargo.toml | 1 + ledger/benches/blockstore_processor.rs | 2 +- ledger/src/blockstore_processor.rs | 3 +- program-runtime/Cargo.toml | 1 + program-runtime/src/invoke_context.rs | 7 +- program-test/Cargo.toml | 1 + program-test/src/lib.rs | 2 +- .../address-lookup-table-tests/Cargo.toml | 1 + .../tests/create_lookup_table_ix.rs | 3 +- programs/address-lookup-table/Cargo.toml | 1 + .../address-lookup-table/src/processor.rs | 2 +- programs/bpf_loader/Cargo.toml | 1 + programs/bpf_loader/src/lib.rs | 6 +- programs/bpf_loader/src/syscalls/cpi.rs | 4 +- programs/bpf_loader/src/syscalls/mem_ops.rs | 6 +- programs/bpf_loader/src/syscalls/mod.rs | 22 +- programs/sbf/Cargo.lock | 28 ++ programs/sbf/Cargo.toml | 2 + programs/sbf/benches/bpf_loader.rs | 7 +- programs/sbf/tests/programs.rs | 8 +- programs/sbf/tests/sysvar.rs | 2 +- programs/stake-tests/Cargo.toml | 1 + .../tests/test_move_stake_and_lamports.rs | 4 +- programs/stake/Cargo.toml | 1 + programs/stake/src/lib.rs | 7 +- programs/stake/src/stake_instruction.rs | 9 +- programs/stake/src/stake_state.rs | 2 +- programs/system/Cargo.toml | 1 + programs/vote/Cargo.toml | 1 + programs/vote/src/vote_processor.rs | 2 +- programs/vote/src/vote_state/mod.rs | 2 +- programs/zk-token-proof/Cargo.toml | 1 + programs/zk-token-proof/src/lib.rs | 2 +- rpc/Cargo.toml | 1 + rpc/src/rpc.rs | 2 +- runtime/Cargo.toml | 1 + runtime/src/bank.rs | 7 +- runtime/src/bank/builtin_programs.rs | 7 +- .../bank/builtins/core_bpf_migration/mod.rs | 2 +- .../core_bpf_migration/target_builtin.rs | 3 +- runtime/src/bank/builtins/mod.rs | 10 +- runtime/src/bank/fee_distribution.rs | 2 +- .../partitioned_epoch_rewards/distribution.rs | 3 +- .../src/bank/partitioned_epoch_rewards/mod.rs | 2 +- .../bank/partitioned_epoch_rewards/sysvar.rs | 4 +- runtime/src/bank/sysvar_cache.rs | 3 +- runtime/src/bank/tests.rs | 10 +- runtime/src/genesis_utils.rs | 2 +- sdk/Cargo.toml | 2 + sdk/benches/ed25519_instructions.rs | 2 +- sdk/benches/secp256k1_instructions.rs | 2 +- sdk/feature-set/Cargo.toml | 32 ++ sdk/feature-set/build.rs | 1 + .../feature_set.rs => feature-set/src/lib.rs} | 424 +++++++++--------- sdk/src/ed25519_instruction.rs | 9 +- sdk/src/lib.rs | 3 +- sdk/src/precompiles.rs | 4 +- sdk/src/reserved_account_keys.rs | 6 +- sdk/src/secp256k1_instruction.rs | 14 +- sdk/src/transaction/mod.rs | 2 +- sdk/src/transaction/sanitized.rs | 2 +- svm/Cargo.toml | 1 + svm/src/account_loader.rs | 4 +- svm/src/message_processor.rs | 2 +- svm/src/transaction_processor.rs | 8 +- svm/tests/conformance.rs | 2 +- svm/tests/mock_bank.rs | 2 +- test-validator/Cargo.toml | 1 + test-validator/src/lib.rs | 5 +- turbine/Cargo.toml | 1 + turbine/src/cluster_nodes.rs | 2 +- turbine/src/sigverify_shreds.rs | 2 +- version/Cargo.toml | 1 + version/src/legacy.rs | 7 +- version/src/lib.rs | 7 +- 105 files changed, 488 insertions(+), 381 deletions(-) create mode 100644 sdk/feature-set/Cargo.toml create mode 120000 sdk/feature-set/build.rs rename sdk/{src/feature_set.rs => feature-set/src/lib.rs} (69%) diff --git a/Cargo.lock b/Cargo.lock index 843ea41f8240d8..18bae8a7e97ac0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -186,6 +186,7 @@ dependencies = [ "solana-core", "solana-cost-model", "solana-entry", + "solana-feature-set", "solana-geyser-plugin-manager", "solana-gossip", "solana-ledger", @@ -5617,6 +5618,7 @@ dependencies = [ "num-derive", "num-traits", "rustc_version 0.4.1", + "solana-feature-set", "solana-log-collector", "solana-program", "solana-program-runtime", @@ -5631,6 +5633,7 @@ dependencies = [ "assert_matches", "bincode", "solana-address-lookup-table-program", + "solana-feature-set", "solana-program-test", "solana-sdk", ] @@ -5702,6 +5705,7 @@ dependencies = [ "futures 0.3.30", "solana-banks-interface", "solana-client", + "solana-feature-set", "solana-runtime", "solana-sdk", "solana-send-transaction-service", @@ -5743,6 +5747,7 @@ dependencies = [ "solana-connection-cache", "solana-core", "solana-faucet", + "solana-feature-set", "solana-genesis", "solana-gossip", "solana-local-cluster", @@ -5818,6 +5823,7 @@ dependencies = [ "solana-bn254", "solana-compute-budget", "solana-curve25519", + "solana-feature-set", "solana-log-collector", "solana-measure", "solana-poseidon", @@ -5988,6 +5994,7 @@ dependencies = [ "solana-connection-cache", "solana-decode-error", "solana-faucet", + "solana-feature-set", "solana-loader-v4-program", "solana-logger", "solana-program-runtime", @@ -6229,6 +6236,7 @@ dependencies = [ "solana-core", "solana-cost-model", "solana-entry", + "solana-feature-set", "solana-fee", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -6290,6 +6298,7 @@ dependencies = [ "rustc_version 0.4.1", "solana-builtins-default-costs", "solana-compute-budget", + "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", @@ -6437,6 +6446,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-feature-set" +version = "2.1.0" +dependencies = [ + "lazy_static", + "rustc_version 0.4.1", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-program", +] + [[package]] name = "solana-fee" version = "2.1.0" @@ -6568,6 +6588,7 @@ dependencies = [ "solana-client", "solana-connection-cache", "solana-entry", + "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-ledger", @@ -6693,6 +6714,7 @@ dependencies = [ "solana-bpf-loader-program", "solana-cost-model", "solana-entry", + "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", @@ -7102,6 +7124,7 @@ dependencies = [ "rustc_version 0.4.1", "serde", "solana-compute-budget", + "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-log-collector", @@ -7135,6 +7158,7 @@ dependencies = [ "solana-banks-server", "solana-bpf-loader-program", "solana-compute-budget", + "solana-feature-set", "solana-inline-spl", "solana-log-collector", "solana-logger", @@ -7259,6 +7283,7 @@ dependencies = [ "solana-client", "solana-entry", "solana-faucet", + "solana-feature-set", "solana-gossip", "solana-inline-spl", "solana-ledger", @@ -7444,6 +7469,7 @@ dependencies = [ "solana-compute-budget-program", "solana-config-program", "solana-cost-model", + "solana-feature-set", "solana-fee", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -7552,6 +7578,7 @@ dependencies = [ "solana-bn254", "solana-decode-error", "solana-derivation-path", + "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", @@ -7694,6 +7721,7 @@ dependencies = [ "rustc_version 0.4.1", "solana-compute-budget", "solana-config-program", + "solana-feature-set", "solana-log-collector", "solana-logger", "solana-program-runtime", @@ -7710,6 +7738,7 @@ dependencies = [ "assert_matches", "bincode", "rustc_version 0.4.1", + "solana-feature-set", "solana-program-test", "solana-sdk", "solana-vote-program", @@ -7822,6 +7851,7 @@ dependencies = [ "solana-bpf-loader-program", "solana-compute-budget", "solana-compute-budget-program", + "solana-feature-set", "solana-fee", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -7895,6 +7925,7 @@ dependencies = [ "serde", "serde_derive", "solana-compute-budget", + "solana-feature-set", "solana-log-collector", "solana-logger", "solana-program-runtime", @@ -7916,6 +7947,7 @@ dependencies = [ "solana-cli-output", "solana-compute-budget", "solana-core", + "solana-feature-set", "solana-geyser-plugin-manager", "solana-gossip", "solana-ledger", @@ -8116,6 +8148,7 @@ dependencies = [ "rayon", "rustls", "solana-entry", + "solana-feature-set", "solana-gossip", "solana-ledger", "solana-logger", @@ -8207,6 +8240,7 @@ dependencies = [ "semver 1.0.23", "serde", "serde_derive", + "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-sanitize", @@ -8243,6 +8277,7 @@ dependencies = [ "rustc_version 0.4.1", "serde", "serde_derive", + "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", @@ -8351,6 +8386,7 @@ dependencies = [ "curve25519-dalek 4.1.3", "num-derive", "num-traits", + "solana-feature-set", "solana-log-collector", "solana-program-runtime", "solana-sdk", diff --git a/Cargo.toml b/Cargo.toml index 6bc5d413b00132..a31088169447ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,6 +107,7 @@ members = [ "sdk/clock", "sdk/decode-error", "sdk/derivation-path", + "sdk/feature-set", "sdk/gen-headers", "sdk/hash", "sdk/macro", @@ -388,6 +389,7 @@ solana-derivation-path = { path = "sdk/derivation-path", version = "=2.1.0" } solana-download-utils = { path = "download-utils", version = "=2.1.0" } solana-entry = { path = "entry", version = "=2.1.0" } solana-faucet = { path = "faucet", version = "=2.1.0" } +solana-feature-set = { path = "sdk/feature-set", version = "=2.1.0" } solana-fee = { path = "fee", version = "=2.1.0" } solana-frozen-abi = { path = "frozen-abi", version = "=2.1.0" } solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=2.1.0" } diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml index 6cf5f77f92548b..a2fe94c781cc02 100644 --- a/banks-server/Cargo.toml +++ b/banks-server/Cargo.toml @@ -15,6 +15,7 @@ crossbeam-channel = { workspace = true } futures = { workspace = true } solana-banks-interface = { workspace = true } solana-client = { workspace = true } +solana-feature-set = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 10cc43a5878619..7051daac45cb0f 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -8,6 +8,7 @@ use { TransactionSimulationDetails, TransactionStatus, }, solana_client::connection_cache::ConnectionCache, + solana_feature_set::{move_precompile_verification_to_svm, FeatureSet}, solana_runtime::{ bank::{Bank, TransactionSimulationResult}, bank_forks::BankForks, @@ -17,7 +18,6 @@ use { account::Account, clock::Slot, commitment_config::CommitmentLevel, - feature_set::{self, FeatureSet}, hash::Hash, message::{Message, SanitizedMessage}, pubkey::Pubkey, @@ -165,7 +165,7 @@ fn verify_transaction( transaction.verify()?; let move_precompile_verification_to_svm = - feature_set.is_active(&feature_set::move_precompile_verification_to_svm::id()); + feature_set.is_active(&move_precompile_verification_to_svm::id()); if !move_precompile_verification_to_svm { transaction.verify_precompiles(feature_set)?; } diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 562f31885bea2f..3c3c5f718998c4 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -48,6 +48,7 @@ thiserror = { workspace = true } [dev-dependencies] serial_test = { workspace = true } +solana-feature-set = { workspace = true } solana-local-cluster = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-test-validator = { workspace = true } diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 0c85af917965ca..3e65ee5604d60e 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -1224,10 +1224,10 @@ pub fn fund_keypairs( mod tests { use { super::*, + solana_feature_set::FeatureSet, solana_runtime::{bank::Bank, bank_client::BankClient, bank_forks::BankForks}, solana_sdk::{ commitment_config::CommitmentConfig, - feature_set::FeatureSet, fee_calculator::FeeRateGovernor, genesis_config::{create_genesis_config, GenesisConfig}, native_token::sol_to_lamports, diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 1e05e67eba420e..3c5c8372aeaeb7 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -38,6 +38,7 @@ solana-compute-budget = { workspace = true } solana-config-program = { workspace = true } solana-connection-cache = { workspace = true } solana-decode-error = { workspace = true } +solana-feature-set = { workspace = true } solana-loader-v4-program = { workspace = true } solana-logger = { workspace = true } solana-program-runtime = { workspace = true } diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 0e66d95a5d3bd1..3792a4689f2d3c 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -46,7 +46,6 @@ use { clock::{self, Clock, Slot}, commitment_config::CommitmentConfig, epoch_schedule::Epoch, - feature_set, hash::Hash, message::Message, native_token::lamports_to_sol, @@ -1898,8 +1897,10 @@ pub fn process_show_stakes( let stake_history = from_account(&stake_history_account).ok_or_else(|| { CliError::RpcRequestError("Failed to deserialize stake history".to_string()) })?; - let new_rate_activation_epoch = - get_feature_activation_epoch(rpc_client, &feature_set::reduce_stake_warmup_cooldown::id())?; + let new_rate_activation_epoch = get_feature_activation_epoch( + rpc_client, + &solana_feature_set::reduce_stake_warmup_cooldown::id(), + )?; stake_account_progress_bar.finish_and_clear(); let mut stake_accounts: Vec = vec![]; diff --git a/cli/src/feature.rs b/cli/src/feature.rs index 66696b11c74190..885cfe03dd6749 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -14,6 +14,7 @@ use { input_validators::*, keypair::*, }, solana_cli_output::{cli_version::CliVersion, QuietDisplay, VerboseDisplay}, + solana_feature_set::FEATURE_NAMES, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{ @@ -25,7 +26,6 @@ use { clock::Slot, epoch_schedule::EpochSchedule, feature::{self, Feature}, - feature_set::FEATURE_NAMES, genesis_config::ClusterType, message::Message, pubkey::Pubkey, diff --git a/cli/src/program.rs b/cli/src/program.rs index a879ab3347c91a..d4192589aa86a5 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -40,6 +40,7 @@ use { tpu_client::{TpuClient, TpuClientConfig}, }, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::{FeatureSet, FEATURE_NAMES}, solana_program_runtime::invoke_context::InvokeContext, solana_rbpf::{elf::Executable, verifier::RequisiteVerifier}, solana_remote_wallet::remote_wallet::RemoteWalletManager, @@ -58,7 +59,6 @@ use { bpf_loader_upgradeable::{self, get_program_data_address, UpgradeableLoaderState}, commitment_config::CommitmentConfig, compute_budget, - feature_set::{FeatureSet, FEATURE_NAMES}, instruction::{Instruction, InstructionError}, message::Message, packet::PACKET_DATA_SIZE, diff --git a/cli/src/stake.rs b/cli/src/stake.rs index d70a41c0a57e6f..6073ea4f873c01 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -43,7 +43,6 @@ use { clock::{Clock, UnixTimestamp, SECONDS_PER_DAY}, commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, - feature_set, message::Message, native_token::Sol, pubkey::Pubkey, @@ -2567,7 +2566,7 @@ pub fn process_show_stake_account( })?; let new_rate_activation_epoch = get_feature_activation_epoch( rpc_client, - &feature_set::reduce_stake_warmup_cooldown::id(), + &solana_feature_set::reduce_stake_warmup_cooldown::id(), )?; let mut state = build_stake_state( diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 6bec3bcc28b36f..f1c441a8654ea4 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -12,6 +12,7 @@ use { }, solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, solana_faucet::faucet::run_local_faucet, + solana_feature_set::enable_alt_bn128_syscall, solana_rpc::rpc::JsonRpcConfig, solana_rpc_client::rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient}, solana_rpc_client_api::config::RpcTransactionConfig, @@ -23,7 +24,6 @@ use { bpf_loader_upgradeable::{self, UpgradeableLoaderState}, commitment_config::CommitmentConfig, compute_budget::{self, ComputeBudgetInstruction}, - feature_set::enable_alt_bn128_syscall, fee_calculator::FeeRateGovernor, pubkey::Pubkey, rent::Rent, diff --git a/core/Cargo.toml b/core/Cargo.toml index d107296bba0e6e..bde6144e142ae0 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -52,6 +52,7 @@ solana-compute-budget = { workspace = true } solana-connection-cache = { workspace = true } solana-cost-model = { workspace = true } solana-entry = { workspace = true } +solana-feature-set = { workspace = true } solana-fee = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 39e3dc1fd95d5c..25942d11c1f914 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -11,6 +11,7 @@ use { BankingStageStats, }, itertools::Itertools, + solana_feature_set as feature_set, solana_ledger::token_balances::collect_token_balances, solana_measure::{measure::Measure, measure_us}, solana_poh::poh_recorder::{ @@ -24,7 +25,6 @@ use { solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, - feature_set, fee::FeeBudgetLimits, message::SanitizedMessage, saturating_add_assign, diff --git a/core/src/banking_stage/forward_packet_batches_by_accounts.rs b/core/src/banking_stage/forward_packet_batches_by_accounts.rs index e01ca3b213b81e..1d86cfb9753b1b 100644 --- a/core/src/banking_stage/forward_packet_batches_by_accounts.rs +++ b/core/src/banking_stage/forward_packet_batches_by_accounts.rs @@ -6,8 +6,9 @@ use { cost_tracker::{CostTracker, UpdatedCosts}, transaction_cost::TransactionCost, }, + solana_feature_set::FeatureSet, solana_perf::packet::Packet, - solana_sdk::{feature_set::FeatureSet, transaction::SanitizedTransaction}, + solana_sdk::transaction::SanitizedTransaction, std::sync::Arc, }; @@ -170,9 +171,10 @@ mod tests { super::*, crate::banking_stage::unprocessed_packet_batches::DeserializedPacket, solana_cost_model::transaction_cost::UsageCostDetails, + solana_feature_set::FeatureSet, solana_sdk::{ - compute_budget::ComputeBudgetInstruction, feature_set::FeatureSet, message::Message, - pubkey::Pubkey, system_instruction, transaction::Transaction, + compute_budget::ComputeBudgetInstruction, message::Message, pubkey::Pubkey, + system_instruction, transaction::Transaction, }, }; diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index 4d39ea65dfc7b8..563c93861cd30e 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -12,15 +12,13 @@ use { }, solana_client::connection_cache::ConnectionCache, solana_connection_cache::client_connection::ClientConnection as TpuConnection, + solana_feature_set::FeatureSet, solana_gossip::cluster_info::ClusterInfo, solana_measure::measure_us, solana_perf::{data_budget::DataBudget, packet::Packet}, solana_poh::poh_recorder::PohRecorder, solana_runtime::bank_forks::BankForks, - solana_sdk::{ - feature_set::FeatureSet, pubkey::Pubkey, transaction::SanitizedTransaction, - transport::TransportError, - }, + solana_sdk::{pubkey::Pubkey, transaction::SanitizedTransaction, transport::TransportError}, solana_streamer::sendmmsg::batch_send, std::{ iter::repeat, diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index 8af53ca4d9e7b3..21e19be6f0ec52 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -8,11 +8,11 @@ use { solana_cost_model::{ cost_model::CostModel, cost_tracker::UpdatedCosts, transaction_cost::TransactionCost, }, + solana_feature_set::FeatureSet, solana_measure::measure::Measure, solana_runtime::bank::Bank, solana_sdk::{ clock::Slot, - feature_set::FeatureSet, saturating_add_assign, transaction::{self, SanitizedTransaction, TransactionError}, }, diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 212b7f4f2f48d8..35bc04a2997995 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -18,11 +18,12 @@ use { itertools::Itertools, min_max_heap::MinMaxHeap, solana_accounts_db::account_locks::validate_account_locks, + solana_feature_set::FeatureSet, solana_measure::measure_us, solana_runtime::bank::Bank, solana_sdk::{ - clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, feature_set::FeatureSet, hash::Hash, - saturating_add_assign, transaction::SanitizedTransaction, + clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, hash::Hash, saturating_add_assign, + transaction::SanitizedTransaction, }, solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ diff --git a/core/src/consensus.rs b/core/src/consensus.rs index c079dbb7cde51d..417e29254c2d42 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1,4 +1,4 @@ -use {crate::replay_stage::DUPLICATE_THRESHOLD, solana_sdk::feature_set}; +use crate::replay_stage::DUPLICATE_THRESHOLD; pub mod fork_choice; pub mod heaviest_subtree_fork_choice; @@ -594,7 +594,7 @@ impl Tower { bank.slot(), bank.hash(), bank.feature_set - .is_active(&feature_set::enable_tower_sync_ix::id()), + .is_active(&solana_feature_set::enable_tower_sync_ix::id()), ) } diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 012ec93ee964d6..9c3a53b2ad053a 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -1441,6 +1441,7 @@ mod tests { use { super::*, crate::repair::repair_response, + solana_feature_set::FeatureSet, solana_gossip::{contact_info::ContactInfo, socketaddr, socketaddr_any}, solana_ledger::{ blockstore::make_many_slot_entries, @@ -1451,10 +1452,7 @@ mod tests { }, solana_perf::packet::{deserialize_from_with_limit, Packet}, solana_runtime::bank::Bank, - solana_sdk::{ - feature_set::FeatureSet, hash::Hash, pubkey::Pubkey, signature::Keypair, - timing::timestamp, - }, + solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Keypair, timing::timestamp}, solana_streamer::socket::SocketAddrSpace, std::{io::Cursor, net::Ipv4Addr}, }; diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 723a91fb9f9a15..db0ee5aff30d53 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -67,7 +67,6 @@ use { }, solana_sdk::{ clock::{BankId, Slot, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS}, - feature_set, hash::Hash, pubkey::Pubkey, saturating_add_assign, @@ -3585,7 +3584,7 @@ impl ReplayStage { .get_hash(last_voted_slot) .expect("Must exist for us to have frozen descendant"), bank.feature_set - .is_active(&feature_set::enable_tower_sync_ix::id()), + .is_active(&solana_feature_set::enable_tower_sync_ix::id()), ); // Since we are updating our tower we need to update associated caches for previously computed // slots as well. diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 8af40645629c23..0776674a4748c1 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -5,6 +5,7 @@ use { bytes::Bytes, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, itertools::Itertools, + solana_feature_set::{self as feature_set, FeatureSet}, solana_gossip::cluster_info::ClusterInfo, solana_ledger::shred::{should_discard_shred, ShredFetchStats}, solana_perf::packet::{PacketBatch, PacketBatchRecycler, PacketFlags, PACKETS_PER_BATCH}, @@ -12,7 +13,6 @@ use { solana_sdk::{ clock::{Slot, DEFAULT_MS_PER_SLOT}, epoch_schedule::EpochSchedule, - feature_set::{self, FeatureSet}, genesis_config::ClusterType, packet::{Meta, PACKET_DATA_SIZE}, pubkey::Pubkey, diff --git a/core/src/window_service.rs b/core/src/window_service.rs index ff902e414ee017..3056090cf9ba94 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -19,6 +19,7 @@ use { }, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, rayon::{prelude::*, ThreadPool}, + solana_feature_set as feature_set, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ blockstore::{Blockstore, BlockstoreInsertionMetrics, PossibleDuplicateShred}, @@ -30,10 +31,7 @@ use { solana_perf::packet::{Packet, PacketBatch}, solana_rayon_threadlimit::get_thread_count, solana_runtime::bank_forks::BankForks, - solana_sdk::{ - clock::{Slot, DEFAULT_MS_PER_SLOT}, - feature_set, - }, + solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT}, solana_turbine::cluster_nodes, std::{ cmp::Reverse, diff --git a/cost-model/Cargo.toml b/cost-model/Cargo.toml index b1d7949bf63a05..2339f2e9d3eee4 100644 --- a/cost-model/Cargo.toml +++ b/cost-model/Cargo.toml @@ -15,6 +15,7 @@ lazy_static = { workspace = true } log = { workspace = true } solana-builtins-default-costs = { workspace = true } solana-compute-budget = { workspace = true } +solana-feature-set = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-metrics = { workspace = true } diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index 68162bfcdc5bbd..4c1cc0df6edbfa 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -12,11 +12,11 @@ use { solana_compute_budget::compute_budget_limits::{ DEFAULT_HEAP_COST, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, }, + solana_feature_set::{self as feature_set, FeatureSet}, solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - feature_set::{self, FeatureSet}, fee::FeeStructure, instruction::CompiledInstruction, program_utils::limited_deserialize, diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index 9db5832a114a42..3065162c5ee22b 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -199,8 +199,8 @@ mod tests { use { super::*, crate::cost_model::CostModel, + solana_feature_set::FeatureSet, solana_sdk::{ - feature_set::FeatureSet, hash::Hash, message::SimpleAddressLoader, reserved_account_keys::ReservedAccountKeys, diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 510fb9e75e9be7..679c4a93f91e09 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -32,6 +32,7 @@ solana-clap-utils = { workspace = true } solana-client = { workspace = true } solana-connection-cache = { workspace = true } solana-entry = { workspace = true } +solana-feature-set = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-ledger = { workspace = true } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index b49b728e846957..a26ff3a3560e2e 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -45,6 +45,7 @@ use { rand::{seq::SliceRandom, thread_rng, CryptoRng, Rng}, rayon::{prelude::*, ThreadPool, ThreadPoolBuilder}, serde::ser::Serialize, + solana_feature_set::FeatureSet, solana_ledger::shred::Shred, solana_measure::measure::Measure, solana_net_utils::{ @@ -62,7 +63,6 @@ use { solana_sanitize::{Sanitize, SanitizeError}, solana_sdk::{ clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_SLOTS_PER_EPOCH}, - feature_set::FeatureSet, hash::Hash, pubkey::Pubkey, quic::QUIC_PORT_OFFSET, diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 663201514932e9..39061de55f2382 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -35,6 +35,7 @@ solana-compute-budget = { workspace = true } solana-core = { workspace = true } solana-cost-model = { workspace = true } solana-entry = { workspace = true } +solana-feature-set = { workspace = true } solana-geyser-plugin-manager = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index ac9e3fb9f929c2..0c74d53f3e41e2 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -35,6 +35,7 @@ use { validator::BlockVerificationMethod, }, solana_cost_model::{cost_model::CostModel, cost_tracker::CostTracker}, + solana_feature_set::{self as feature_set, FeatureSet}, solana_ledger::{ blockstore::{create_new_ledger, Blockstore}, blockstore_options::{AccessType, LedgerColumnOptions}, @@ -62,7 +63,6 @@ use { account_utils::StateMut, clock::{Epoch, Slot}, feature::{self, Feature}, - feature_set::{self, FeatureSet}, genesis_config::ClusterType, inflation::Inflation, native_token::{lamports_to_sol, sol_to_lamports, Sol}, diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 255efa9c8358dd..44367a30fd5ec7 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -45,6 +45,7 @@ solana-accounts-db = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-cost-model = { workspace = true } solana-entry = { workspace = true } +solana-feature-set = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-measure = { workspace = true } diff --git a/ledger/benches/blockstore_processor.rs b/ledger/benches/blockstore_processor.rs index e72f75186ca6f2..65b0ac229978a8 100644 --- a/ledger/benches/blockstore_processor.rs +++ b/ledger/benches/blockstore_processor.rs @@ -6,6 +6,7 @@ use { iter::IndexedParallelIterator, prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, }, + solana_feature_set::apply_cost_tracker_during_replay, solana_ledger::{ blockstore_processor::{execute_batch, TransactionBatchWithIndexes}, genesis_utils::{create_genesis_config, GenesisConfigInfo}, @@ -16,7 +17,6 @@ use { }, solana_sdk::{ account::{Account, ReadableAccount}, - feature_set::apply_cost_tracker_during_replay, signature::Keypair, signer::Signer, stake_history::Epoch, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 5273b4601bfe33..b34bdee591dd9c 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -44,7 +44,6 @@ use { }, solana_sdk::{ clock::{Slot, MAX_PROCESSING_AGE}, - feature_set, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey, @@ -173,7 +172,7 @@ pub fn execute_batch( let (check_block_cost_limits_result, check_block_cost_limits_us) = measure_us!(if bank .feature_set - .is_active(&feature_set::apply_cost_tracker_during_replay::id()) + .is_active(&solana_feature_set::apply_cost_tracker_during_replay::id()) { check_block_cost_limits(bank, &commit_results, batch.sanitized_transactions()) } else { diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index 413f7b7665ba42..af276bc12562d0 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -22,6 +22,7 @@ percentage = { workspace = true } rand = { workspace = true } serde = { workspace = true } solana-compute-budget = { workspace = true } +solana-feature-set = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-log-collector = { workspace = true } diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 97d8200053a1a5..c96da3e1da3539 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -8,6 +8,7 @@ use { sysvar_cache::SysvarCache, }, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::{move_precompile_verification_to_svm, FeatureSet}, solana_log_collector::{ic_msg, LogCollector}, solana_measure::measure::Measure, solana_rbpf::{ @@ -22,7 +23,6 @@ use { bpf_loader_deprecated, clock::Slot, epoch_schedule::EpochSchedule, - feature_set::{self, FeatureSet}, hash::Hash, instruction::{AccountMeta, InstructionError}, native_loader, @@ -482,7 +482,7 @@ impl<'a> InvokeContext<'a> { let feature_set = self.get_feature_set(); let move_precompile_verification_to_svm = - feature_set.is_active(&feature_set::move_precompile_verification_to_svm::id()); + feature_set.is_active(&move_precompile_verification_to_svm::id()); if move_precompile_verification_to_svm { let instruction_datas: Vec<_> = message_instruction_datas_iter.collect(); precompile @@ -706,9 +706,10 @@ macro_rules! with_mock_invoke_context { ) => { use { solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::FeatureSet, solana_log_collector::LogCollector, solana_sdk::{ - account::ReadableAccount, feature_set::FeatureSet, hash::Hash, sysvar::rent::Rent, + account::ReadableAccount, hash::Hash, sysvar::rent::Rent, transaction_context::TransactionContext, }, solana_type_overrides::sync::Arc, diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index b31e0330a34f92..c96cb8d28b4ceb 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -23,6 +23,7 @@ solana-banks-interface = { workspace = true } solana-banks-server = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-compute-budget = { workspace = true } +solana-feature-set = { workspace = true } solana-inline-spl = { workspace = true } solana-log-collector = { workspace = true } solana-logger = { workspace = true } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index e6744366ec4f5d..d4d6388436c373 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -16,6 +16,7 @@ use { solana_banks_server::banks_server::start_local_server, solana_bpf_loader_program::serialization::serialize_parameters, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::FEATURE_NAMES, solana_log_collector::ic_msg, solana_program_runtime::{ invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, stable_log, @@ -33,7 +34,6 @@ use { account_info::AccountInfo, clock::{Epoch, Slot}, entrypoint::{deserialize, ProgramResult, SUCCESS}, - feature_set::FEATURE_NAMES, fee_calculator::{FeeRateGovernor, DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE}, genesis_config::{ClusterType, GenesisConfig}, hash::Hash, diff --git a/programs/address-lookup-table-tests/Cargo.toml b/programs/address-lookup-table-tests/Cargo.toml index 0d64f6b66f535c..328e60169b0d62 100644 --- a/programs/address-lookup-table-tests/Cargo.toml +++ b/programs/address-lookup-table-tests/Cargo.toml @@ -15,6 +15,7 @@ edition = { workspace = true } assert_matches = { workspace = true } bincode = { workspace = true } solana-address-lookup-table-program = { workspace = true } +solana-feature-set = { workspace = true } solana-program-test = { workspace = true } solana-sdk = { workspace = true } diff --git a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs index 39ff9aea6604d5..5a640448fb16d2 100644 --- a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs +++ b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs @@ -9,7 +9,6 @@ use { state::{AddressLookupTable, LOOKUP_TABLE_META_SIZE}, }, clock::Slot, - feature_set, instruction::InstructionError, pubkey::Pubkey, rent::Rent, @@ -28,7 +27,7 @@ pub async fn setup_test_context_without_authority_feature() -> ProgramTestContex Some(solana_address_lookup_table_program::processor::Entrypoint::vm), ); program_test.deactivate_feature( - feature_set::relax_authority_signer_check_for_lookup_table_creation::id(), + solana_feature_set::relax_authority_signer_check_for_lookup_table_creation::id(), ); program_test.start_with_context().await } diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml index 3ff68a686a7521..30e1ca5a0f3d7b 100644 --- a/programs/address-lookup-table/Cargo.toml +++ b/programs/address-lookup-table/Cargo.toml @@ -15,6 +15,7 @@ bytemuck = { workspace = true } log = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } +solana-feature-set = { workspace = true } solana-program = { workspace = true } thiserror = { workspace = true } diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs index 6274606228b4bd..0d8abe8bd2e260 100644 --- a/programs/address-lookup-table/src/processor.rs +++ b/programs/address-lookup-table/src/processor.rs @@ -1,4 +1,5 @@ use { + solana_feature_set as feature_set, solana_log_collector::ic_msg, solana_program_runtime::{declare_process_instruction, invoke_context::InvokeContext}, solana_sdk::{ @@ -11,7 +12,6 @@ use { }, }, clock::Slot, - feature_set, instruction::InstructionError, program_utils::limited_deserialize, pubkey::{Pubkey, PUBKEY_BYTES}, diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index 4c085663513300..8fb86df6d28ab3 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -18,6 +18,7 @@ scopeguard = { workspace = true } solana-bn254 = { workspace = true } solana-compute-budget = { workspace = true } solana-curve25519 = { workspace = true } +solana-feature-set = { workspace = true } solana-log-collector = { workspace = true } solana-measure = { workspace = true } solana-poseidon = { workspace = true } diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 400806bb4c98bd..5e81062c504e97 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -6,6 +6,9 @@ pub mod syscalls; use { solana_compute_budget::compute_budget::MAX_INSTRUCTION_STACK_DEPTH, + solana_feature_set::{ + bpf_account_data_direct_mapping, enable_bpf_loader_set_authority_checked_ix, + }, solana_log_collector::{ic_logger_msg, ic_msg, LogCollector}, solana_measure::measure::Measure, solana_program_runtime::{ @@ -34,9 +37,6 @@ use { bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::Slot, entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, - feature_set::{ - bpf_account_data_direct_mapping, enable_bpf_loader_set_authority_checked_ix, - }, instruction::{AccountMeta, InstructionError}, loader_upgradeable_instruction::UpgradeableLoaderInstruction, loader_v4, native_loader, diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index 94046f5f741560..d4d626e959d8fe 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -2,6 +2,7 @@ use { super::*, crate::serialization::account_data_region_memory_state, scopeguard::defer, + solana_feature_set::{self as feature_set, enable_bpf_loader_set_authority_checked_ix}, solana_measure::measure::Measure, solana_program_runtime::invoke_context::SerializedAccountMetadata, solana_rbpf::{ @@ -9,7 +10,6 @@ use { memory_region::{MemoryRegion, MemoryState}, }, solana_sdk::{ - feature_set::enable_bpf_loader_set_authority_checked_ix, saturating_add_assign, stable_layout::stable_instruction::StableInstruction, syscalls::{ @@ -1593,6 +1593,7 @@ mod tests { super::*, crate::mock_create_vm, assert_matches::assert_matches, + solana_feature_set::bpf_account_data_direct_mapping, solana_program_runtime::{ invoke_context::SerializedAccountMetadata, with_mock_invoke_context, }, @@ -1602,7 +1603,6 @@ mod tests { solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount}, clock::Epoch, - feature_set::bpf_account_data_direct_mapping, instruction::Instruction, system_program, transaction_context::TransactionAccount, diff --git a/programs/bpf_loader/src/syscalls/mem_ops.rs b/programs/bpf_loader/src/syscalls/mem_ops.rs index bbdf0ccc4c479f..0367c03d006ad8 100644 --- a/programs/bpf_loader/src/syscalls/mem_ops.rs +++ b/programs/bpf_loader/src/syscalls/mem_ops.rs @@ -70,7 +70,7 @@ declare_builtin_function!( if invoke_context .get_feature_set() - .is_active(&feature_set::bpf_account_data_direct_mapping::id()) + .is_active(&solana_feature_set::bpf_account_data_direct_mapping::id()) { let cmp_result = translate_type_mut::( memory_mapping, @@ -126,7 +126,7 @@ declare_builtin_function!( if invoke_context .get_feature_set() - .is_active(&feature_set::bpf_account_data_direct_mapping::id()) + .is_active(&solana_feature_set::bpf_account_data_direct_mapping::id()) { memset_non_contiguous(dst_addr, c as u8, n, memory_mapping) } else { @@ -151,7 +151,7 @@ fn memmove( ) -> Result { if invoke_context .get_feature_set() - .is_active(&feature_set::bpf_account_data_direct_mapping::id()) + .is_active(&solana_feature_set::bpf_account_data_direct_mapping::id()) { memmove_non_contiguous(dst_addr, src_addr, n, memory_mapping) } else { diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 7661a000da2938..2c5d8e9feed26d 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -18,6 +18,16 @@ use { ALT_BN128_PAIRING_ELEMENT_LEN, ALT_BN128_PAIRING_OUTPUT_LEN, }, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::{ + self as feature_set, abort_on_invalid_curve, blake3_syscall_enabled, + bpf_account_data_direct_mapping, curve25519_syscall_enabled, + disable_deploy_of_alloc_free_syscall, disable_fees_sysvar, + enable_alt_bn128_compression_syscall, enable_alt_bn128_syscall, enable_big_mod_exp_syscall, + enable_get_epoch_stake_syscall, enable_partitioned_epoch_reward, enable_poseidon_syscall, + error_on_syscall_bpf_function_hash_collisions, get_sysvar_syscall_enabled, + last_restart_slot_sysvar, partitioned_epoch_rewards_superfeature, reject_callx_r10, + remaining_compute_units_syscall_enabled, FeatureSet, + }, solana_log_collector::{ic_logger_msg, ic_msg}, solana_poseidon as poseidon, solana_program_memory::is_nonoverlapping, @@ -33,18 +43,6 @@ use { big_mod_exp::{big_mod_exp, BigModExpParams}, blake3, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, SUCCESS}, - feature_set::bpf_account_data_direct_mapping, - feature_set::FeatureSet, - feature_set::{ - self, abort_on_invalid_curve, blake3_syscall_enabled, curve25519_syscall_enabled, - disable_deploy_of_alloc_free_syscall, disable_fees_sysvar, - enable_alt_bn128_compression_syscall, enable_alt_bn128_syscall, - enable_big_mod_exp_syscall, enable_get_epoch_stake_syscall, - enable_partitioned_epoch_reward, enable_poseidon_syscall, - error_on_syscall_bpf_function_hash_collisions, get_sysvar_syscall_enabled, - last_restart_slot_sysvar, partitioned_epoch_rewards_superfeature, reject_callx_r10, - remaining_compute_units_syscall_enabled, - }, hash::{Hash, Hasher}, instruction::{AccountMeta, InstructionError, ProcessedSiblingInstruction}, keccak, native_loader, diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a3684236e05d39..ae119446dad773 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4612,6 +4612,7 @@ dependencies = [ "log", "num-derive", "num-traits", + "solana-feature-set", "solana-log-collector", "solana-program", "solana-program-runtime", @@ -4660,6 +4661,7 @@ dependencies = [ "futures 0.3.30", "solana-banks-interface", "solana-client", + "solana-feature-set", "solana-runtime", "solana-sdk", "solana-send-transaction-service", @@ -4708,6 +4710,7 @@ dependencies = [ "solana-bn254", "solana-compute-budget", "solana-curve25519", + "solana-feature-set", "solana-log-collector", "solana-measure", "solana-poseidon", @@ -4939,6 +4942,7 @@ dependencies = [ "solana-connection-cache", "solana-cost-model", "solana-entry", + "solana-feature-set", "solana-fee", "solana-geyser-plugin-manager", "solana-gossip", @@ -4989,6 +4993,7 @@ dependencies = [ "log", "solana-builtins-default-costs", "solana-compute-budget", + "solana-feature-set", "solana-metrics", "solana-runtime-transaction", "solana-sdk", @@ -5082,6 +5087,14 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-feature-set" +version = "2.1.0" +dependencies = [ + "lazy_static", + "solana-program", +] + [[package]] name = "solana-fee" version = "2.1.0" @@ -5152,6 +5165,7 @@ dependencies = [ "solana-client", "solana-connection-cache", "solana-entry", + "solana-feature-set", "solana-ledger", "solana-logger", "solana-measure", @@ -5247,6 +5261,7 @@ dependencies = [ "solana-bpf-loader-program", "solana-cost-model", "solana-entry", + "solana-feature-set", "solana-measure", "solana-metrics", "solana-perf", @@ -5494,6 +5509,7 @@ dependencies = [ "rand 0.8.5", "serde", "solana-compute-budget", + "solana-feature-set", "solana-log-collector", "solana-measure", "solana-metrics", @@ -5523,6 +5539,7 @@ dependencies = [ "solana-banks-server", "solana-bpf-loader-program", "solana-compute-budget", + "solana-feature-set", "solana-inline-spl", "solana-log-collector", "solana-logger", @@ -5638,6 +5655,7 @@ dependencies = [ "solana-client", "solana-entry", "solana-faucet", + "solana-feature-set", "solana-gossip", "solana-inline-spl", "solana-ledger", @@ -5771,6 +5789,7 @@ dependencies = [ "solana-compute-budget-program", "solana-config-program", "solana-cost-model", + "solana-feature-set", "solana-fee", "solana-inline-spl", "solana-loader-v4-program", @@ -5840,6 +5859,7 @@ dependencies = [ "solana-bpf-loader-program", "solana-cli-output", "solana-compute-budget", + "solana-feature-set", "solana-fee", "solana-ledger", "solana-log-collector", @@ -6337,6 +6357,7 @@ dependencies = [ "solana-bn254", "solana-decode-error", "solana-derivation-path", + "solana-feature-set", "solana-program", "solana-program-memory", "solana-sanitize", @@ -6433,6 +6454,7 @@ dependencies = [ "bincode", "log", "solana-config-program", + "solana-feature-set", "solana-log-collector", "solana-program-runtime", "solana-sdk", @@ -6534,6 +6556,7 @@ dependencies = [ "serde_derive", "solana-bpf-loader-program", "solana-compute-budget", + "solana-feature-set", "solana-fee", "solana-loader-v4-program", "solana-log-collector", @@ -6592,6 +6615,7 @@ dependencies = [ "solana-cli-output", "solana-compute-budget", "solana-core", + "solana-feature-set", "solana-geyser-plugin-manager", "solana-gossip", "solana-ledger", @@ -6708,6 +6732,7 @@ dependencies = [ "rayon", "rustls", "solana-entry", + "solana-feature-set", "solana-gossip", "solana-ledger", "solana-measure", @@ -6784,6 +6809,7 @@ dependencies = [ "semver", "serde", "serde_derive", + "solana-feature-set", "solana-sanitize", "solana-sdk", "solana-serde-varint", @@ -6811,6 +6837,7 @@ dependencies = [ "num-traits", "serde", "serde_derive", + "solana-feature-set", "solana-metrics", "solana-program", "solana-program-runtime", @@ -6887,6 +6914,7 @@ dependencies = [ "bytemuck", "num-derive", "num-traits", + "solana-feature-set", "solana-log-collector", "solana-program-runtime", "solana-sdk", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index a82a5efa92fa4e..7a60f3a55d6e3b 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -38,6 +38,7 @@ solana-cli-output = { path = "../../cli-output", version = "=2.1.0" } solana-compute-budget = { path = "../../compute-budget", version = "=2.1.0" } solana-curve25519 = { path = "../../curves/curve25519", version = "=2.1.0" } solana-decode-error = { path = "../../sdk/decode-error", version = "=2.1.0" } +solana-feature-set = { path = "../../sdk/feature-set", version = "=2.1.0" } solana-fee = { path = "../../fee", version = "=2.1.0" } solana-ledger = { path = "../../ledger", version = "=2.1.0" } solana-log-collector = { path = "../../log-collector", version = "=2.1.0" } @@ -108,6 +109,7 @@ solana-accounts-db = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-cli-output = { workspace = true } solana-compute-budget = { workspace = true } +solana-feature-set = { workspace = true } solana-fee = { workspace = true } solana-ledger = { workspace = true } solana-log-collector = { workspace = true } diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index ab5e950faab874..7475e9ea2d0f53 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -8,9 +8,8 @@ )] use { - solana_rbpf::memory_region::MemoryState, - solana_sdk::{feature_set::bpf_account_data_direct_mapping, signer::keypair::Keypair}, - std::slice, + solana_feature_set::bpf_account_data_direct_mapping, solana_rbpf::memory_region::MemoryState, + solana_sdk::signer::keypair::Keypair, std::slice, }; extern crate test; @@ -22,6 +21,7 @@ use { syscalls::create_program_runtime_environment_v1, }, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::FeatureSet, solana_measure::measure::Measure, solana_program_runtime::invoke_context::InvokeContext, solana_rbpf::{ @@ -39,7 +39,6 @@ use { bpf_loader, client::SyncClient, entrypoint::SUCCESS, - feature_set::FeatureSet, instruction::{AccountMeta, Instruction}, message::Message, native_loader, diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 1b44bacfb55ffa..19f6b943a51769 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -15,6 +15,7 @@ use { parse_bpf_upgradeable_loader, BpfUpgradeableLoaderAccountType, }, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::{self as feature_set, FeatureSet}, solana_ledger::token_balances::collect_token_balances, solana_program_runtime::invoke_context::mock_process_instruction, solana_rbpf::vm::ContextObject, @@ -44,7 +45,6 @@ use { clock::{UnixTimestamp, MAX_PROCESSING_AGE}, compute_budget::ComputeBudgetInstruction, entrypoint::MAX_PERMITTED_DATA_INCREASE, - feature_set::{self, FeatureSet}, fee::{FeeBudgetLimits, FeeStructure}, fee_calculator::FeeRateGovernor, genesis_config::ClusterType, @@ -334,7 +334,7 @@ fn test_program_sbf_loader_deprecated() { } = create_genesis_config(50); genesis_config .accounts - .remove(&solana_sdk::feature_set::disable_deploy_of_alloc_free_syscall::id()) + .remove(&solana_feature_set::disable_deploy_of_alloc_free_syscall::id()) .unwrap(); let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let program_id = create_program(&bank, &bpf_loader_deprecated::id(), program); @@ -2244,9 +2244,7 @@ fn test_program_sbf_disguised_as_sbf_loader() { .. } = create_genesis_config(50); let mut bank = Bank::new_for_tests(&genesis_config); - bank.deactivate_feature( - &solana_sdk::feature_set::remove_bpf_loader_incorrect_program_id::id(), - ); + bank.deactivate_feature(&solana_feature_set::remove_bpf_loader_incorrect_program_id::id()); let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank); let authority_keypair = Keypair::new(); diff --git a/programs/sbf/tests/sysvar.rs b/programs/sbf/tests/sysvar.rs index 92213bb36d33bb..c0ab5321c5bf4f 100644 --- a/programs/sbf/tests/sysvar.rs +++ b/programs/sbf/tests/sysvar.rs @@ -1,6 +1,7 @@ #![cfg(feature = "sbf_rust")] use { + solana_feature_set::disable_fees_sysvar, solana_runtime::{ bank::Bank, bank_client::BankClient, @@ -8,7 +9,6 @@ use { loader_utils::load_upgradeable_program_and_advance_slot, }, solana_sdk::{ - feature_set::disable_fees_sysvar, instruction::{AccountMeta, Instruction}, message::Message, pubkey::Pubkey, diff --git a/programs/stake-tests/Cargo.toml b/programs/stake-tests/Cargo.toml index 7e3a461ba09e2b..f73a7a4195dd0a 100644 --- a/programs/stake-tests/Cargo.toml +++ b/programs/stake-tests/Cargo.toml @@ -14,6 +14,7 @@ edition = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } bincode = { workspace = true } +solana-feature-set = { workspace = true } solana-program-test = { workspace = true } solana-sdk = { workspace = true } solana-vote-program = { workspace = true } diff --git a/programs/stake-tests/tests/test_move_stake_and_lamports.rs b/programs/stake-tests/tests/test_move_stake_and_lamports.rs index 0ef36753337c83..8991d3c5fe6bea 100644 --- a/programs/stake-tests/tests/test_move_stake_and_lamports.rs +++ b/programs/stake-tests/tests/test_move_stake_and_lamports.rs @@ -6,11 +6,13 @@ // in other words the utility functions in this file should not be broken out into modules or used elsewhere use { + solana_feature_set::{ + move_stake_and_move_lamports_ixs, stake_raise_minimum_delegation_to_1_sol, + }, solana_program_test::*, solana_sdk::{ account::Account as SolanaAccount, entrypoint::ProgramResult, - feature_set::{move_stake_and_move_lamports_ixs, stake_raise_minimum_delegation_to_1_sol}, instruction::Instruction, program_error::ProgramError, pubkey::Pubkey, diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 16f1e746698e71..d5a14a98d4d0ec 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } bincode = { workspace = true } log = { workspace = true } solana-config-program = { workspace = true } +solana-feature-set = { workspace = true } solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } diff --git a/programs/stake/src/lib.rs b/programs/stake/src/lib.rs index 8a1d888bfc22a2..4ea8df91501a85 100644 --- a/programs/stake/src/lib.rs +++ b/programs/stake/src/lib.rs @@ -5,10 +5,9 @@ note = "Please use `solana_sdk::stake::program::id` or `solana_program::stake::program::id` instead" )] pub use solana_sdk::stake::program::{check_id, id}; -use solana_sdk::{ - feature_set::{self, FeatureSet}, - genesis_config::GenesisConfig, - native_token::LAMPORTS_PER_SOL, +use { + solana_feature_set::{self as feature_set, FeatureSet}, + solana_sdk::{genesis_config::GenesisConfig, native_token::LAMPORTS_PER_SOL}, }; pub mod config; diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 086a1bf044e025..00ecfbf56c4715 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -9,7 +9,6 @@ use { declare_process_instruction, sysvar_cache::get_sysvar_with_account_check, }, solana_sdk::{ - feature_set, instruction::InstructionError, program_utils::limited_deserialize, pubkey::Pubkey, @@ -340,7 +339,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| StakeInstruction::MoveStake(lamports) => { if invoke_context .get_feature_set() - .is_active(&feature_set::move_stake_and_move_lamports_ixs::id()) + .is_active(&solana_feature_set::move_stake_and_move_lamports_ixs::id()) { instruction_context.check_number_of_instruction_accounts(3)?; move_stake( @@ -359,7 +358,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| StakeInstruction::MoveLamports(lamports) => { if invoke_context .get_feature_set() - .is_active(&feature_set::move_stake_and_move_lamports_ixs::id()) + .is_active(&solana_feature_set::move_stake_and_move_lamports_ixs::id()) { instruction_context.check_number_of_instruction_accounts(3)?; move_lamports( @@ -391,6 +390,7 @@ mod tests { }, assert_matches::assert_matches, bincode::serialize, + solana_feature_set::FeatureSet, solana_program_runtime::invoke_context::mock_process_instruction, solana_sdk::{ account::{ @@ -400,7 +400,6 @@ mod tests { account_utils::StateMut, clock::{Clock, Epoch, UnixTimestamp}, epoch_schedule::EpochSchedule, - feature_set::FeatureSet, instruction::{AccountMeta, Instruction}, pubkey::Pubkey, rent::Rent, @@ -437,7 +436,7 @@ mod tests { let mut feature_set = feature_set_all_enabled(); Arc::get_mut(&mut feature_set) .unwrap() - .deactivate(&feature_set::stake_raise_minimum_delegation_to_1_sol::id()); + .deactivate(&solana_feature_set::stake_raise_minimum_delegation_to_1_sol::id()); feature_set } diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 6bb26e288db8f2..8dc1e728a2b94d 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -9,13 +9,13 @@ )] pub use solana_sdk::stake::state::*; use { + solana_feature_set::FeatureSet, solana_log_collector::ic_msg, solana_program_runtime::invoke_context::InvokeContext, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, clock::{Clock, Epoch}, - feature_set::FeatureSet, instruction::{checked_add, InstructionError}, pubkey::Pubkey, rent::Rent, diff --git a/programs/system/Cargo.toml b/programs/system/Cargo.toml index a7e464fe5c3f9a..f1c67e1aa9e393 100644 --- a/programs/system/Cargo.toml +++ b/programs/system/Cargo.toml @@ -22,6 +22,7 @@ solana-type-overrides = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } solana-compute-budget = { workspace = true } +solana-feature-set = { workspace = true } solana-logger = { workspace = true } [lib] diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index 2a8462cd178ecf..d76f5e1980818b 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -16,6 +16,7 @@ num-derive = { workspace = true } num-traits = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } +solana-feature-set = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-metrics = { workspace = true } diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index ea6514420cd0d2..8cb4db468f9b72 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -3,13 +3,13 @@ use { crate::vote_state, log::*, + solana_feature_set as feature_set, solana_program::vote::{instruction::VoteInstruction, program::id, state::VoteAuthorize}, solana_program_runtime::{ declare_process_instruction, invoke_context::InvokeContext, sysvar_cache::get_sysvar_with_account_check, }, solana_sdk::{ - feature_set, instruction::InstructionError, program_utils::limited_deserialize, pubkey::Pubkey, diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index c4ea689fc9d3bf..24b480c6198d84 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -4,12 +4,12 @@ pub use solana_program::vote::state::{vote_state_versions::*, *}; use { log::*, serde_derive::{Deserialize, Serialize}, + solana_feature_set::{self as feature_set, FeatureSet}, solana_program::vote::{error::VoteError, program::id}, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, clock::{Epoch, Slot, UnixTimestamp}, epoch_schedule::EpochSchedule, - feature_set::{self, FeatureSet}, hash::Hash, instruction::InstructionError, pubkey::Pubkey, diff --git a/programs/zk-token-proof/Cargo.toml b/programs/zk-token-proof/Cargo.toml index afbda6f1b8161c..2b8ce98f2ac2f3 100644 --- a/programs/zk-token-proof/Cargo.toml +++ b/programs/zk-token-proof/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } bytemuck = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } +solana-feature-set = { workspace = true } solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index f1fc5bca28c1fc..15d292b29a4ef4 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -2,10 +2,10 @@ use { bytemuck::Pod, + solana_feature_set as feature_set, solana_log_collector::ic_msg, solana_program_runtime::{declare_process_instruction, invoke_context::InvokeContext}, solana_sdk::{ - feature_set, instruction::{InstructionError, TRANSACTION_LEVEL_STACK_HEIGHT}, system_program, }, diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 2a0c5c480da1b0..f7bc16bd6f9bd9 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -34,6 +34,7 @@ solana-accounts-db = { workspace = true } solana-client = { workspace = true } solana-entry = { workspace = true } solana-faucet = { workspace = true } +solana-feature-set = { workspace = true } solana-gossip = { workspace = true } solana-inline-spl = { workspace = true } solana-ledger = { workspace = true } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index f0294540a5e427..3c8b1c1217ae64 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -22,6 +22,7 @@ use { solana_client::connection_cache::{ConnectionCache, Protocol}, solana_entry::entry::Entry, solana_faucet::faucet::request_airdrop_transaction, + solana_feature_set as feature_set, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_inline_spl::{ token::{SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, @@ -67,7 +68,6 @@ use { epoch_rewards_hasher::EpochRewardsHasher, epoch_schedule::EpochSchedule, exit::Exit, - feature_set, hash::Hash, message::SanitizedMessage, pubkey::{Pubkey, PUBKEY_BYTES}, diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 9afeea8c469f02..5a740f32805aed 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -54,6 +54,7 @@ solana-compute-budget = { workspace = true } solana-compute-budget-program = { workspace = true } solana-config-program = { workspace = true } solana-cost-model = { workspace = true } +solana-feature-set = { workspace = true } solana-fee = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f6f4f8edee9bb5..44d8195c993c5a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -92,6 +92,10 @@ use { }, solana_compute_budget::compute_budget::ComputeBudget, solana_cost_model::cost_tracker::CostTracker, + solana_feature_set::{ + self as feature_set, remove_rounding_in_fee_calculation, reward_full_priority_fee, + FeatureSet, + }, solana_measure::{measure::Measure, measure_time, measure_us}, solana_program_runtime::{ invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, @@ -113,9 +117,6 @@ use { epoch_info::EpochInfo, epoch_schedule::EpochSchedule, feature, - feature_set::{ - self, remove_rounding_in_fee_calculation, reward_full_priority_fee, FeatureSet, - }, fee::{FeeBudgetLimits, FeeDetails, FeeStructure}, fee_calculator::FeeRateGovernor, genesis_config::{ClusterType, GenesisConfig}, diff --git a/runtime/src/bank/builtin_programs.rs b/runtime/src/bank/builtin_programs.rs index 7c12bb23fbd6b0..a0715d0c488336 100644 --- a/runtime/src/bank/builtin_programs.rs +++ b/runtime/src/bank/builtin_programs.rs @@ -2,9 +2,8 @@ mod tests { use { crate::bank::*, - solana_sdk::{ - ed25519_program, feature_set::FeatureSet, genesis_config::create_genesis_config, - }, + solana_feature_set::FeatureSet, + solana_sdk::{ed25519_program, genesis_config::create_genesis_config}, }; #[test] @@ -75,13 +74,13 @@ mod tests_core_bpf_migration { tests::{create_genesis_config, new_bank_from_parent_with_bank_forks}, Bank, }, + solana_feature_set::FeatureSet, solana_program_runtime::loaded_programs::ProgramCacheEntry, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable::{self, get_program_data_address, UpgradeableLoaderState}, epoch_schedule::EpochSchedule, feature::{self, Feature}, - feature_set::FeatureSet, instruction::{AccountMeta, Instruction}, message::Message, native_loader, diff --git a/runtime/src/bank/builtins/core_bpf_migration/mod.rs b/runtime/src/bank/builtins/core_bpf_migration/mod.rs index 6fecbdeba3640e..023f27ec8e0794 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/mod.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/mod.rs @@ -322,7 +322,7 @@ impl Bank { /// `apply_feature_activations` function, similar to below. /// /// ```ignore - /// if new_feature_activations.contains(&feature_set::test_upgrade_program::id()) { + /// if new_feature_activations.contains(&solana_feature_set::test_upgrade_program::id()) { /// self.upgrade_core_bpf_program( /// &core_bpf_program_address, /// &source_buffer_address, diff --git a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs index b89a951ffd2cdf..ad565909ccee7b 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs @@ -75,10 +75,11 @@ mod tests { super::*, crate::bank::{tests::create_simple_test_bank, ApplyFeatureActivationsCaller}, assert_matches::assert_matches, + solana_feature_set as feature_set, solana_sdk::{ account::Account, bpf_loader_upgradeable::{UpgradeableLoaderState, ID as BPF_LOADER_UPGRADEABLE_ID}, - feature, feature_set, + feature, }, test_case::test_case, }; diff --git a/runtime/src/bank/builtins/mod.rs b/runtime/src/bank/builtins/mod.rs index 6e1797be11a8cb..99dfa5a0cfa836 100644 --- a/runtime/src/bank/builtins/mod.rs +++ b/runtime/src/bank/builtins/mod.rs @@ -4,7 +4,8 @@ pub mod prototypes; pub use prototypes::{BuiltinPrototype, StatelessBuiltinPrototype}; use { core_bpf_migration::CoreBpfMigrationConfig, - solana_sdk::{bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, feature_set}, + solana_feature_set as feature_set, + solana_sdk::{bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable}, }; macro_rules! testable_prototype { @@ -56,7 +57,7 @@ pub static BUILTINS: &[BuiltinPrototype] = &[ core_bpf_migration_config: Some(CoreBpfMigrationConfig { source_buffer_address: buffer_accounts::config_program::id(), upgrade_authority_address: None, - feature_id: solana_sdk::feature_set::migrate_config_program_to_core_bpf::id(), + feature_id: solana_feature_set::migrate_config_program_to_core_bpf::id(), migration_target: core_bpf_migration::CoreBpfMigrationTargetType::Builtin, datapoint_name: "migrate_builtin_to_core_bpf_config_program", }), @@ -97,8 +98,7 @@ pub static BUILTINS: &[BuiltinPrototype] = &[ core_bpf_migration_config: Some(CoreBpfMigrationConfig { source_buffer_address: buffer_accounts::address_lookup_table_program::id(), upgrade_authority_address: None, - feature_id: - solana_sdk::feature_set::migrate_address_lookup_table_program_to_core_bpf::id(), + feature_id: solana_feature_set::migrate_address_lookup_table_program_to_core_bpf::id(), migration_target: core_bpf_migration::CoreBpfMigrationTargetType::Builtin, datapoint_name: "migrate_builtin_to_core_bpf_address_lookup_table_program", }), @@ -134,7 +134,7 @@ pub static STATELESS_BUILTINS: &[StatelessBuiltinPrototype] = &[StatelessBuiltin core_bpf_migration_config: Some(CoreBpfMigrationConfig { source_buffer_address: buffer_accounts::feature_gate_program::id(), upgrade_authority_address: None, - feature_id: solana_sdk::feature_set::migrate_feature_gate_program_to_core_bpf::id(), + feature_id: solana_feature_set::migrate_feature_gate_program_to_core_bpf::id(), migration_target: core_bpf_migration::CoreBpfMigrationTargetType::Stateless, datapoint_name: "migrate_stateless_to_core_bpf_feature_gate_program", }), diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 89d0add35df7b0..e0be18d5e609fc 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -2,9 +2,9 @@ use { super::Bank, crate::bank::CollectorFeeDetails, log::{debug, warn}, + solana_feature_set::{remove_rounding_in_fee_calculation, reward_full_priority_fee}, solana_sdk::{ account::{ReadableAccount, WritableAccount}, - feature_set::{remove_rounding_in_fee_calculation, reward_full_priority_fee}, fee::FeeBudgetLimits, pubkey::Pubkey, reward_info::RewardInfo, diff --git a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs index 8a0fa2af2b3086..392d9c51630f50 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs @@ -243,7 +243,6 @@ mod tests { solana_sdk::{ account::from_account, epoch_schedule::EpochSchedule, - feature_set, hash::Hash, native_token::LAMPORTS_PER_SOL, rent::Rent, @@ -349,7 +348,7 @@ mod tests { create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::partitioned_epoch_rewards_superfeature::id()); + bank.activate_feature(&solana_feature_set::partitioned_epoch_rewards_superfeature::id()); // Set up epoch_rewards sysvar with rewards with 1e9 lamports to distribute. let total_rewards = 1_000_000_000; diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index 894b058ca2f8a8..c3208a0e3e4f98 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -10,10 +10,10 @@ use { solana_accounts_db::{ partitioned_rewards::PartitionedEpochRewardsConfig, stake_rewards::StakeReward, }, + solana_feature_set as feature_set, solana_sdk::{ account::AccountSharedData, account_utils::StateMut, - feature_set, pubkey::Pubkey, reward_info::RewardInfo, stake::state::{Delegation, Stake, StakeStateV2}, diff --git a/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs index 624630f39712c9..3b3a485679c3ab 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs @@ -133,7 +133,7 @@ mod tests { super::*, crate::bank::tests::create_genesis_config, solana_sdk::{ - account::ReadableAccount, epoch_schedule::EpochSchedule, feature_set, + account::ReadableAccount, epoch_schedule::EpochSchedule, native_token::LAMPORTS_PER_SOL, pubkey::Pubkey, }, std::sync::Arc, @@ -148,7 +148,7 @@ mod tests { create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::partitioned_epoch_rewards_superfeature::id()); + bank.activate_feature(&solana_feature_set::partitioned_epoch_rewards_superfeature::id()); let total_rewards = 1_000_000_000; let num_partitions = 2; // num_partitions is arbitrary and unimportant for this test diff --git a/runtime/src/bank/sysvar_cache.rs b/runtime/src/bank/sysvar_cache.rs index 1c7cea037d23af..35f23fb9222bd2 100644 --- a/runtime/src/bank/sysvar_cache.rs +++ b/runtime/src/bank/sysvar_cache.rs @@ -5,8 +5,9 @@ use super::Bank; mod tests { use { super::*, + solana_feature_set as feature_set, solana_sdk::{ - feature_set, genesis_config::create_genesis_config, pubkey::Pubkey, + genesis_config::create_genesis_config, pubkey::Pubkey, sysvar::epoch_rewards::EpochRewards, }, solana_stake_program::points::PointValue, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 56612762743f97..ec62293b28f105 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -38,6 +38,7 @@ use { compute_budget_limits::{self, MAX_COMPUTE_UNIT_LIMIT}, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, + solana_feature_set::{self as feature_set, FeatureSet}, solana_inline_spl::token, solana_logger, solana_program_runtime::{ @@ -63,7 +64,6 @@ use { entrypoint::MAX_PERMITTED_DATA_INCREASE, epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH}, feature::{self, Feature}, - feature_set::{self, FeatureSet}, fee::FeeStructure, fee_calculator::FeeRateGovernor, genesis_config::{ClusterType, GenesisConfig}, @@ -1645,9 +1645,9 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { solana_logger::setup(); let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000); for feature_id in FeatureSet::default().inactive { - if feature_id != solana_sdk::feature_set::skip_rent_rewrites::id() + if feature_id != solana_feature_set::skip_rent_rewrites::id() && (!should_collect_rent - || feature_id != solana_sdk::feature_set::disable_rent_fees_collection::id()) + || feature_id != solana_feature_set::disable_rent_fees_collection::id()) { activate_feature(&mut genesis_config, feature_id); } @@ -6343,7 +6343,7 @@ fn test_bank_hash_consistency() { genesis_config.rent.burn_percent = 100; activate_feature( &mut genesis_config, - solana_sdk::feature_set::set_exempt_rent_epoch_max::id(), + solana_feature_set::set_exempt_rent_epoch_max::id(), ); let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); @@ -11576,7 +11576,7 @@ fn test_accounts_data_size_and_rent_collection(should_collect_rent: bool) { if should_collect_rent { genesis_config .accounts - .remove(&solana_sdk::feature_set::disable_rent_fees_collection::id()); + .remove(&solana_feature_set::disable_rent_fees_collection::id()); } let bank = Arc::new(Bank::new_for_tests(&genesis_config)); diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index aac8f56a75b699..0e91fe08616acd 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -1,9 +1,9 @@ use { log::*, + solana_feature_set::{FeatureSet, FEATURE_NAMES}, solana_sdk::{ account::{Account, AccountSharedData}, feature::{self, Feature}, - feature_set::{FeatureSet, FEATURE_NAMES}, fee_calculator::FeeRateGovernor, genesis_config::{ClusterType, GenesisConfig}, native_token::sol_to_lamports, diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index eab3e27d1a88de..a14c58db882378 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -40,6 +40,7 @@ frozen-abi = [ "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", + "solana-feature-set/frozen-abi", "solana-program/frozen-abi", "solana-short-vec/frozen-abi", "solana-signature/frozen-abi" @@ -86,6 +87,7 @@ siphasher = { workspace = true } solana-bn254 = { workspace = true } solana-decode-error = { workspace = true } solana-derivation-path = { workspace = true } +solana-feature-set = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-program = { workspace = true } diff --git a/sdk/benches/ed25519_instructions.rs b/sdk/benches/ed25519_instructions.rs index 4dcbbc0e035353..0bd7273357f6cd 100644 --- a/sdk/benches/ed25519_instructions.rs +++ b/sdk/benches/ed25519_instructions.rs @@ -3,9 +3,9 @@ extern crate test; use { rand0_7::{thread_rng, Rng}, + solana_feature_set::FeatureSet, solana_sdk::{ ed25519_instruction::new_ed25519_instruction, - feature_set::FeatureSet, hash::Hash, signature::{Keypair, Signer}, transaction::Transaction, diff --git a/sdk/benches/secp256k1_instructions.rs b/sdk/benches/secp256k1_instructions.rs index 339c50dc639aef..8940bda5a99eb0 100644 --- a/sdk/benches/secp256k1_instructions.rs +++ b/sdk/benches/secp256k1_instructions.rs @@ -3,8 +3,8 @@ extern crate test; use { rand0_7::{thread_rng, Rng}, + solana_feature_set::FeatureSet, solana_sdk::{ - feature_set::FeatureSet, hash::Hash, secp256k1_instruction::new_secp256k1_instruction, signature::{Keypair, Signer}, diff --git a/sdk/feature-set/Cargo.toml b/sdk/feature-set/Cargo.toml new file mode 100644 index 00000000000000..02da9d0cc4f2f9 --- /dev/null +++ b/sdk/feature-set/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "solana-feature-set" +description = "Solana runtime features." +documentation = "https://docs.rs/solana-feature-set" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +lazy_static = { workspace = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } +solana-program = { workspace = true } + +[build-dependencies] +rustc_version = { workspace = true, optional = true } + +[features] +frozen-abi = [ + "dep:rustc_version", + "dep:solana-frozen-abi", + "dep:solana-frozen-abi-macro", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/sdk/feature-set/build.rs b/sdk/feature-set/build.rs new file mode 120000 index 00000000000000..84539eddaa6ded --- /dev/null +++ b/sdk/feature-set/build.rs @@ -0,0 +1 @@ +../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/src/feature_set.rs b/sdk/feature-set/src/lib.rs similarity index 69% rename from sdk/src/feature_set.rs rename to sdk/feature-set/src/lib.rs index 02cc2594d30593..58bb290a1e653c 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/feature-set/src/lib.rs @@ -12,853 +12,855 @@ //! through these steps, the PR process will facilitate a keypair holder being picked. That //! person will generate the keypair, provide pubkey for PR, and ultimately enable the feature. //! 2. Add a public module for the feature, specifying keypair pubkey as the id with -//! `solana_sdk::declare_id!()` within the module. +//! `solana_program::declare_id!()` within the module. //! Additionally, add an entry to `FEATURE_NAMES` map. //! 3. Add desired logic to check for and switch on feature availability. //! //! For more information on how features are picked up, see comments for `Feature`. +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] use { lazy_static::lazy_static, - solana_program::{epoch_schedule::EpochSchedule, stake_history::Epoch}, - solana_sdk::{ + solana_program::{ clock::Slot, + epoch_schedule::EpochSchedule, hash::{Hash, Hasher}, pubkey::Pubkey, + stake_history::Epoch, }, std::collections::{HashMap, HashSet}, }; pub mod deprecate_rewards_sysvar { - solana_sdk::declare_id!("GaBtBJvmS4Arjj5W1NmFcyvPjsHN38UGYDq2MDwbs9Qu"); + solana_program::declare_id!("GaBtBJvmS4Arjj5W1NmFcyvPjsHN38UGYDq2MDwbs9Qu"); } pub mod pico_inflation { - solana_sdk::declare_id!("4RWNif6C2WCNiKVW7otP4G7dkmkHGyKQWRpuZ1pxKU5m"); + solana_program::declare_id!("4RWNif6C2WCNiKVW7otP4G7dkmkHGyKQWRpuZ1pxKU5m"); } pub mod full_inflation { pub mod devnet_and_testnet { - solana_sdk::declare_id!("DT4n6ABDqs6w4bnfwrXT9rsprcPf6cdDga1egctaPkLC"); + solana_program::declare_id!("DT4n6ABDqs6w4bnfwrXT9rsprcPf6cdDga1egctaPkLC"); } pub mod mainnet { pub mod certusone { pub mod vote { - solana_sdk::declare_id!("BzBBveUDymEYoYzcMWNQCx3cd4jQs7puaVFHLtsbB6fm"); + solana_program::declare_id!("BzBBveUDymEYoYzcMWNQCx3cd4jQs7puaVFHLtsbB6fm"); } pub mod enable { - solana_sdk::declare_id!("7XRJcS5Ud5vxGB54JbK9N2vBZVwnwdBNeJW1ibRgD9gx"); + solana_program::declare_id!("7XRJcS5Ud5vxGB54JbK9N2vBZVwnwdBNeJW1ibRgD9gx"); } } } } pub mod secp256k1_program_enabled { - solana_sdk::declare_id!("E3PHP7w8kB7np3CTQ1qQ2tW3KCtjRSXBQgW9vM2mWv2Y"); + solana_program::declare_id!("E3PHP7w8kB7np3CTQ1qQ2tW3KCtjRSXBQgW9vM2mWv2Y"); } pub mod spl_token_v2_multisig_fix { - solana_sdk::declare_id!("E5JiFDQCwyC6QfT9REFyMpfK2mHcmv1GUDySU1Ue7TYv"); + solana_program::declare_id!("E5JiFDQCwyC6QfT9REFyMpfK2mHcmv1GUDySU1Ue7TYv"); } pub mod no_overflow_rent_distribution { - solana_sdk::declare_id!("4kpdyrcj5jS47CZb2oJGfVxjYbsMm2Kx97gFyZrxxwXz"); + solana_program::declare_id!("4kpdyrcj5jS47CZb2oJGfVxjYbsMm2Kx97gFyZrxxwXz"); } pub mod filter_stake_delegation_accounts { - solana_sdk::declare_id!("GE7fRxmW46K6EmCD9AMZSbnaJ2e3LfqCZzdHi9hmYAgi"); + solana_program::declare_id!("GE7fRxmW46K6EmCD9AMZSbnaJ2e3LfqCZzdHi9hmYAgi"); } pub mod require_custodian_for_locked_stake_authorize { - solana_sdk::declare_id!("D4jsDcXaqdW8tDAWn8H4R25Cdns2YwLneujSL1zvjW6R"); + solana_program::declare_id!("D4jsDcXaqdW8tDAWn8H4R25Cdns2YwLneujSL1zvjW6R"); } pub mod spl_token_v2_self_transfer_fix { - solana_sdk::declare_id!("BL99GYhdjjcv6ys22C9wPgn2aTVERDbPHHo4NbS3hgp7"); + solana_program::declare_id!("BL99GYhdjjcv6ys22C9wPgn2aTVERDbPHHo4NbS3hgp7"); } pub mod warp_timestamp_again { - solana_sdk::declare_id!("GvDsGDkH5gyzwpDhxNixx8vtx1kwYHH13RiNAPw27zXb"); + solana_program::declare_id!("GvDsGDkH5gyzwpDhxNixx8vtx1kwYHH13RiNAPw27zXb"); } pub mod check_init_vote_data { - solana_sdk::declare_id!("3ccR6QpxGYsAbWyfevEtBNGfWV4xBffxRj2tD6A9i39F"); + solana_program::declare_id!("3ccR6QpxGYsAbWyfevEtBNGfWV4xBffxRj2tD6A9i39F"); } pub mod secp256k1_recover_syscall_enabled { - solana_sdk::declare_id!("6RvdSWHh8oh72Dp7wMTS2DBkf3fRPtChfNrAo3cZZoXJ"); + solana_program::declare_id!("6RvdSWHh8oh72Dp7wMTS2DBkf3fRPtChfNrAo3cZZoXJ"); } pub mod system_transfer_zero_check { - solana_sdk::declare_id!("BrTR9hzw4WBGFP65AJMbpAo64DcA3U6jdPSga9fMV5cS"); + solana_program::declare_id!("BrTR9hzw4WBGFP65AJMbpAo64DcA3U6jdPSga9fMV5cS"); } pub mod blake3_syscall_enabled { - solana_sdk::declare_id!("HTW2pSyErTj4BV6KBM9NZ9VBUJVxt7sacNWcf76wtzb3"); + solana_program::declare_id!("HTW2pSyErTj4BV6KBM9NZ9VBUJVxt7sacNWcf76wtzb3"); } pub mod dedupe_config_program_signers { - solana_sdk::declare_id!("8kEuAshXLsgkUEdcFVLqrjCGGHVWFW99ZZpxvAzzMtBp"); + solana_program::declare_id!("8kEuAshXLsgkUEdcFVLqrjCGGHVWFW99ZZpxvAzzMtBp"); } pub mod verify_tx_signatures_len { - solana_sdk::declare_id!("EVW9B5xD9FFK7vw1SBARwMA4s5eRo5eKJdKpsBikzKBz"); + solana_program::declare_id!("EVW9B5xD9FFK7vw1SBARwMA4s5eRo5eKJdKpsBikzKBz"); } pub mod vote_stake_checked_instructions { - solana_sdk::declare_id!("BcWknVcgvonN8sL4HE4XFuEVgfcee5MwxWPAgP6ZV89X"); + solana_program::declare_id!("BcWknVcgvonN8sL4HE4XFuEVgfcee5MwxWPAgP6ZV89X"); } pub mod rent_for_sysvars { - solana_sdk::declare_id!("BKCPBQQBZqggVnFso5nQ8rQ4RwwogYwjuUt9biBjxwNF"); + solana_program::declare_id!("BKCPBQQBZqggVnFso5nQ8rQ4RwwogYwjuUt9biBjxwNF"); } pub mod libsecp256k1_0_5_upgrade_enabled { - solana_sdk::declare_id!("DhsYfRjxfnh2g7HKJYSzT79r74Afa1wbHkAgHndrA1oy"); + solana_program::declare_id!("DhsYfRjxfnh2g7HKJYSzT79r74Afa1wbHkAgHndrA1oy"); } pub mod tx_wide_compute_cap { - solana_sdk::declare_id!("5ekBxc8itEnPv4NzGJtr8BVVQLNMQuLMNQQj7pHoLNZ9"); + solana_program::declare_id!("5ekBxc8itEnPv4NzGJtr8BVVQLNMQuLMNQQj7pHoLNZ9"); } pub mod spl_token_v2_set_authority_fix { - solana_sdk::declare_id!("FToKNBYyiF4ky9s8WsmLBXHCht17Ek7RXaLZGHzzQhJ1"); + solana_program::declare_id!("FToKNBYyiF4ky9s8WsmLBXHCht17Ek7RXaLZGHzzQhJ1"); } pub mod merge_nonce_error_into_system_error { - solana_sdk::declare_id!("21AWDosvp3pBamFW91KB35pNoaoZVTM7ess8nr2nt53B"); + solana_program::declare_id!("21AWDosvp3pBamFW91KB35pNoaoZVTM7ess8nr2nt53B"); } pub mod disable_fees_sysvar { - solana_sdk::declare_id!("JAN1trEUEtZjgXYzNBYHU9DYd7GnThhXfFP7SzPXkPsG"); + solana_program::declare_id!("JAN1trEUEtZjgXYzNBYHU9DYd7GnThhXfFP7SzPXkPsG"); } pub mod stake_merge_with_unmatched_credits_observed { - solana_sdk::declare_id!("meRgp4ArRPhD3KtCY9c5yAf2med7mBLsjKTPeVUHqBL"); + solana_program::declare_id!("meRgp4ArRPhD3KtCY9c5yAf2med7mBLsjKTPeVUHqBL"); } pub mod zk_token_sdk_enabled { - solana_sdk::declare_id!("zk1snxsc6Fh3wsGNbbHAJNHiJoYgF29mMnTSusGx5EJ"); + solana_program::declare_id!("zk1snxsc6Fh3wsGNbbHAJNHiJoYgF29mMnTSusGx5EJ"); } pub mod curve25519_syscall_enabled { - solana_sdk::declare_id!("7rcw5UtqgDTBBv2EcynNfYckgdAaH1MAsCjKgXMkN7Ri"); + solana_program::declare_id!("7rcw5UtqgDTBBv2EcynNfYckgdAaH1MAsCjKgXMkN7Ri"); } pub mod curve25519_restrict_msm_length { - solana_sdk::declare_id!("eca6zf6JJRjQsYYPkBHF3N32MTzur4n2WL4QiiacPCL"); + solana_program::declare_id!("eca6zf6JJRjQsYYPkBHF3N32MTzur4n2WL4QiiacPCL"); } pub mod versioned_tx_message_enabled { - solana_sdk::declare_id!("3KZZ6Ks1885aGBQ45fwRcPXVBCtzUvxhUTkwKMR41Tca"); + solana_program::declare_id!("3KZZ6Ks1885aGBQ45fwRcPXVBCtzUvxhUTkwKMR41Tca"); } pub mod libsecp256k1_fail_on_bad_count { - solana_sdk::declare_id!("8aXvSuopd1PUj7UhehfXJRg6619RHp8ZvwTyyJHdUYsj"); + solana_program::declare_id!("8aXvSuopd1PUj7UhehfXJRg6619RHp8ZvwTyyJHdUYsj"); } pub mod libsecp256k1_fail_on_bad_count2 { - solana_sdk::declare_id!("54KAoNiUERNoWWUhTWWwXgym94gzoXFVnHyQwPA18V9A"); + solana_program::declare_id!("54KAoNiUERNoWWUhTWWwXgym94gzoXFVnHyQwPA18V9A"); } pub mod instructions_sysvar_owned_by_sysvar { - solana_sdk::declare_id!("H3kBSaKdeiUsyHmeHqjJYNc27jesXZ6zWj3zWkowQbkV"); + solana_program::declare_id!("H3kBSaKdeiUsyHmeHqjJYNc27jesXZ6zWj3zWkowQbkV"); } pub mod stake_program_advance_activating_credits_observed { - solana_sdk::declare_id!("SAdVFw3RZvzbo6DvySbSdBnHN4gkzSTH9dSxesyKKPj"); + solana_program::declare_id!("SAdVFw3RZvzbo6DvySbSdBnHN4gkzSTH9dSxesyKKPj"); } pub mod credits_auto_rewind { - solana_sdk::declare_id!("BUS12ciZ5gCoFafUHWW8qaFMMtwFQGVxjsDheWLdqBE2"); + solana_program::declare_id!("BUS12ciZ5gCoFafUHWW8qaFMMtwFQGVxjsDheWLdqBE2"); } pub mod demote_program_write_locks { - solana_sdk::declare_id!("3E3jV7v9VcdJL8iYZUMax9DiDno8j7EWUVbhm9RtShj2"); + solana_program::declare_id!("3E3jV7v9VcdJL8iYZUMax9DiDno8j7EWUVbhm9RtShj2"); } pub mod ed25519_program_enabled { - solana_sdk::declare_id!("6ppMXNYLhVd7GcsZ5uV11wQEW7spppiMVfqQv5SXhDpX"); + solana_program::declare_id!("6ppMXNYLhVd7GcsZ5uV11wQEW7spppiMVfqQv5SXhDpX"); } pub mod return_data_syscall_enabled { - solana_sdk::declare_id!("DwScAzPUjuv65TMbDnFY7AgwmotzWy3xpEJMXM3hZFaB"); + solana_program::declare_id!("DwScAzPUjuv65TMbDnFY7AgwmotzWy3xpEJMXM3hZFaB"); } pub mod reduce_required_deploy_balance { - solana_sdk::declare_id!("EBeznQDjcPG8491sFsKZYBi5S5jTVXMpAKNDJMQPS2kq"); + solana_program::declare_id!("EBeznQDjcPG8491sFsKZYBi5S5jTVXMpAKNDJMQPS2kq"); } pub mod sol_log_data_syscall_enabled { - solana_sdk::declare_id!("6uaHcKPGUy4J7emLBgUTeufhJdiwhngW6a1R9B7c2ob9"); + solana_program::declare_id!("6uaHcKPGUy4J7emLBgUTeufhJdiwhngW6a1R9B7c2ob9"); } pub mod stakes_remove_delegation_if_inactive { - solana_sdk::declare_id!("HFpdDDNQjvcXnXKec697HDDsyk6tFoWS2o8fkxuhQZpL"); + solana_program::declare_id!("HFpdDDNQjvcXnXKec697HDDsyk6tFoWS2o8fkxuhQZpL"); } pub mod do_support_realloc { - solana_sdk::declare_id!("75m6ysz33AfLA5DDEzWM1obBrnPQRSsdVQ2nRmc8Vuu1"); + solana_program::declare_id!("75m6ysz33AfLA5DDEzWM1obBrnPQRSsdVQ2nRmc8Vuu1"); } pub mod prevent_calling_precompiles_as_programs { - solana_sdk::declare_id!("4ApgRX3ud6p7LNMJmsuaAcZY5HWctGPr5obAsjB3A54d"); + solana_program::declare_id!("4ApgRX3ud6p7LNMJmsuaAcZY5HWctGPr5obAsjB3A54d"); } pub mod optimize_epoch_boundary_updates { - solana_sdk::declare_id!("265hPS8k8xJ37ot82KEgjRunsUp5w4n4Q4VwwiN9i9ps"); + solana_program::declare_id!("265hPS8k8xJ37ot82KEgjRunsUp5w4n4Q4VwwiN9i9ps"); } pub mod remove_native_loader { - solana_sdk::declare_id!("HTTgmruMYRZEntyL3EdCDdnS6e4D5wRq1FA7kQsb66qq"); + solana_program::declare_id!("HTTgmruMYRZEntyL3EdCDdnS6e4D5wRq1FA7kQsb66qq"); } pub mod send_to_tpu_vote_port { - solana_sdk::declare_id!("C5fh68nJ7uyKAuYZg2x9sEQ5YrVf3dkW6oojNBSc3Jvo"); + solana_program::declare_id!("C5fh68nJ7uyKAuYZg2x9sEQ5YrVf3dkW6oojNBSc3Jvo"); } pub mod requestable_heap_size { - solana_sdk::declare_id!("CCu4boMmfLuqcmfTLPHQiUo22ZdUsXjgzPAURYaWt1Bw"); + solana_program::declare_id!("CCu4boMmfLuqcmfTLPHQiUo22ZdUsXjgzPAURYaWt1Bw"); } pub mod disable_fee_calculator { - solana_sdk::declare_id!("2jXx2yDmGysmBKfKYNgLj2DQyAQv6mMk2BPh4eSbyB4H"); + solana_program::declare_id!("2jXx2yDmGysmBKfKYNgLj2DQyAQv6mMk2BPh4eSbyB4H"); } pub mod add_compute_budget_program { - solana_sdk::declare_id!("4d5AKtxoh93Dwm1vHXUU3iRATuMndx1c431KgT2td52r"); + solana_program::declare_id!("4d5AKtxoh93Dwm1vHXUU3iRATuMndx1c431KgT2td52r"); } pub mod nonce_must_be_writable { - solana_sdk::declare_id!("BiCU7M5w8ZCMykVSyhZ7Q3m2SWoR2qrEQ86ERcDX77ME"); + solana_program::declare_id!("BiCU7M5w8ZCMykVSyhZ7Q3m2SWoR2qrEQ86ERcDX77ME"); } pub mod spl_token_v3_3_0_release { - solana_sdk::declare_id!("Ftok2jhqAqxUWEiCVRrfRs9DPppWP8cgTB7NQNKL88mS"); + solana_program::declare_id!("Ftok2jhqAqxUWEiCVRrfRs9DPppWP8cgTB7NQNKL88mS"); } pub mod leave_nonce_on_success { - solana_sdk::declare_id!("E8MkiWZNNPGU6n55jkGzyj8ghUmjCHRmDFdYYFYHxWhQ"); + solana_program::declare_id!("E8MkiWZNNPGU6n55jkGzyj8ghUmjCHRmDFdYYFYHxWhQ"); } pub mod reject_empty_instruction_without_program { - solana_sdk::declare_id!("9kdtFSrXHQg3hKkbXkQ6trJ3Ja1xpJ22CTFSNAciEwmL"); + solana_program::declare_id!("9kdtFSrXHQg3hKkbXkQ6trJ3Ja1xpJ22CTFSNAciEwmL"); } pub mod fixed_memcpy_nonoverlapping_check { - solana_sdk::declare_id!("36PRUK2Dz6HWYdG9SpjeAsF5F3KxnFCakA2BZMbtMhSb"); + solana_program::declare_id!("36PRUK2Dz6HWYdG9SpjeAsF5F3KxnFCakA2BZMbtMhSb"); } pub mod reject_non_rent_exempt_vote_withdraws { - solana_sdk::declare_id!("7txXZZD6Um59YoLMF7XUNimbMjsqsWhc7g2EniiTrmp1"); + solana_program::declare_id!("7txXZZD6Um59YoLMF7XUNimbMjsqsWhc7g2EniiTrmp1"); } pub mod evict_invalid_stakes_cache_entries { - solana_sdk::declare_id!("EMX9Q7TVFAmQ9V1CggAkhMzhXSg8ECp7fHrWQX2G1chf"); + solana_program::declare_id!("EMX9Q7TVFAmQ9V1CggAkhMzhXSg8ECp7fHrWQX2G1chf"); } pub mod allow_votes_to_directly_update_vote_state { - solana_sdk::declare_id!("Ff8b1fBeB86q8cjq47ZhsQLgv5EkHu3G1C99zjUfAzrq"); + solana_program::declare_id!("Ff8b1fBeB86q8cjq47ZhsQLgv5EkHu3G1C99zjUfAzrq"); } pub mod max_tx_account_locks { - solana_sdk::declare_id!("CBkDroRDqm8HwHe6ak9cguPjUomrASEkfmxEaZ5CNNxz"); + solana_program::declare_id!("CBkDroRDqm8HwHe6ak9cguPjUomrASEkfmxEaZ5CNNxz"); } pub mod require_rent_exempt_accounts { - solana_sdk::declare_id!("BkFDxiJQWZXGTZaJQxH7wVEHkAmwCgSEVkrvswFfRJPD"); + solana_program::declare_id!("BkFDxiJQWZXGTZaJQxH7wVEHkAmwCgSEVkrvswFfRJPD"); } pub mod filter_votes_outside_slot_hashes { - solana_sdk::declare_id!("3gtZPqvPpsbXZVCx6hceMfWxtsmrjMzmg8C7PLKSxS2d"); + solana_program::declare_id!("3gtZPqvPpsbXZVCx6hceMfWxtsmrjMzmg8C7PLKSxS2d"); } pub mod update_syscall_base_costs { - solana_sdk::declare_id!("2h63t332mGCCsWK2nqqqHhN4U9ayyqhLVFvczznHDoTZ"); + solana_program::declare_id!("2h63t332mGCCsWK2nqqqHhN4U9ayyqhLVFvczznHDoTZ"); } pub mod stake_deactivate_delinquent_instruction { - solana_sdk::declare_id!("437r62HoAdUb63amq3D7ENnBLDhHT2xY8eFkLJYVKK4x"); + solana_program::declare_id!("437r62HoAdUb63amq3D7ENnBLDhHT2xY8eFkLJYVKK4x"); } pub mod vote_withdraw_authority_may_change_authorized_voter { - solana_sdk::declare_id!("AVZS3ZsN4gi6Rkx2QUibYuSJG3S6QHib7xCYhG6vGJxU"); + solana_program::declare_id!("AVZS3ZsN4gi6Rkx2QUibYuSJG3S6QHib7xCYhG6vGJxU"); } pub mod spl_associated_token_account_v1_0_4 { - solana_sdk::declare_id!("FaTa4SpiaSNH44PGC4z8bnGVTkSRYaWvrBs3KTu8XQQq"); + solana_program::declare_id!("FaTa4SpiaSNH44PGC4z8bnGVTkSRYaWvrBs3KTu8XQQq"); } pub mod reject_vote_account_close_unless_zero_credit_epoch { - solana_sdk::declare_id!("ALBk3EWdeAg2WAGf6GPDUf1nynyNqCdEVmgouG7rpuCj"); + solana_program::declare_id!("ALBk3EWdeAg2WAGf6GPDUf1nynyNqCdEVmgouG7rpuCj"); } pub mod add_get_processed_sibling_instruction_syscall { - solana_sdk::declare_id!("CFK1hRCNy8JJuAAY8Pb2GjLFNdCThS2qwZNe3izzBMgn"); + solana_program::declare_id!("CFK1hRCNy8JJuAAY8Pb2GjLFNdCThS2qwZNe3izzBMgn"); } pub mod bank_transaction_count_fix { - solana_sdk::declare_id!("Vo5siZ442SaZBKPXNocthiXysNviW4UYPwRFggmbgAp"); + solana_program::declare_id!("Vo5siZ442SaZBKPXNocthiXysNviW4UYPwRFggmbgAp"); } pub mod disable_bpf_deprecated_load_instructions { - solana_sdk::declare_id!("3XgNukcZWf9o3HdA3fpJbm94XFc4qpvTXc8h1wxYwiPi"); + solana_program::declare_id!("3XgNukcZWf9o3HdA3fpJbm94XFc4qpvTXc8h1wxYwiPi"); } pub mod disable_bpf_unresolved_symbols_at_runtime { - solana_sdk::declare_id!("4yuaYAj2jGMGTh1sSmi4G2eFscsDq8qjugJXZoBN6YEa"); + solana_program::declare_id!("4yuaYAj2jGMGTh1sSmi4G2eFscsDq8qjugJXZoBN6YEa"); } pub mod record_instruction_in_transaction_context_push { - solana_sdk::declare_id!("3aJdcZqxoLpSBxgeYGjPwaYS1zzcByxUDqJkbzWAH1Zb"); + solana_program::declare_id!("3aJdcZqxoLpSBxgeYGjPwaYS1zzcByxUDqJkbzWAH1Zb"); } pub mod syscall_saturated_math { - solana_sdk::declare_id!("HyrbKftCdJ5CrUfEti6x26Cj7rZLNe32weugk7tLcWb8"); + solana_program::declare_id!("HyrbKftCdJ5CrUfEti6x26Cj7rZLNe32weugk7tLcWb8"); } pub mod check_physical_overlapping { - solana_sdk::declare_id!("nWBqjr3gpETbiaVj3CBJ3HFC5TMdnJDGt21hnvSTvVZ"); + solana_program::declare_id!("nWBqjr3gpETbiaVj3CBJ3HFC5TMdnJDGt21hnvSTvVZ"); } pub mod limit_secp256k1_recovery_id { - solana_sdk::declare_id!("7g9EUwj4j7CS21Yx1wvgWLjSZeh5aPq8x9kpoPwXM8n8"); + solana_program::declare_id!("7g9EUwj4j7CS21Yx1wvgWLjSZeh5aPq8x9kpoPwXM8n8"); } pub mod disable_deprecated_loader { - solana_sdk::declare_id!("GTUMCZ8LTNxVfxdrw7ZsDFTxXb7TutYkzJnFwinpE6dg"); + solana_program::declare_id!("GTUMCZ8LTNxVfxdrw7ZsDFTxXb7TutYkzJnFwinpE6dg"); } pub mod check_slice_translation_size { - solana_sdk::declare_id!("GmC19j9qLn2RFk5NduX6QXaDhVpGncVVBzyM8e9WMz2F"); + solana_program::declare_id!("GmC19j9qLn2RFk5NduX6QXaDhVpGncVVBzyM8e9WMz2F"); } pub mod stake_split_uses_rent_sysvar { - solana_sdk::declare_id!("FQnc7U4koHqWgRvFaBJjZnV8VPg6L6wWK33yJeDp4yvV"); + solana_program::declare_id!("FQnc7U4koHqWgRvFaBJjZnV8VPg6L6wWK33yJeDp4yvV"); } pub mod add_get_minimum_delegation_instruction_to_stake_program { - solana_sdk::declare_id!("St8k9dVXP97xT6faW24YmRSYConLbhsMJA4TJTBLmMT"); + solana_program::declare_id!("St8k9dVXP97xT6faW24YmRSYConLbhsMJA4TJTBLmMT"); } pub mod error_on_syscall_bpf_function_hash_collisions { - solana_sdk::declare_id!("8199Q2gMD2kwgfopK5qqVWuDbegLgpuFUFHCcUJQDN8b"); + solana_program::declare_id!("8199Q2gMD2kwgfopK5qqVWuDbegLgpuFUFHCcUJQDN8b"); } pub mod reject_callx_r10 { - solana_sdk::declare_id!("3NKRSwpySNwD3TvP5pHnRmkAQRsdkXWRr1WaQh8p4PWX"); + solana_program::declare_id!("3NKRSwpySNwD3TvP5pHnRmkAQRsdkXWRr1WaQh8p4PWX"); } pub mod drop_redundant_turbine_path { - solana_sdk::declare_id!("4Di3y24QFLt5QEUPZtbnjyfQKfm6ZMTfa6Dw1psfoMKU"); + solana_program::declare_id!("4Di3y24QFLt5QEUPZtbnjyfQKfm6ZMTfa6Dw1psfoMKU"); } pub mod executables_incur_cpi_data_cost { - solana_sdk::declare_id!("7GUcYgq4tVtaqNCKT3dho9r4665Qp5TxCZ27Qgjx3829"); + solana_program::declare_id!("7GUcYgq4tVtaqNCKT3dho9r4665Qp5TxCZ27Qgjx3829"); } pub mod fix_recent_blockhashes { - solana_sdk::declare_id!("6iyggb5MTcsvdcugX7bEKbHV8c6jdLbpHwkncrgLMhfo"); + solana_program::declare_id!("6iyggb5MTcsvdcugX7bEKbHV8c6jdLbpHwkncrgLMhfo"); } pub mod update_rewards_from_cached_accounts { - solana_sdk::declare_id!("28s7i3htzhahXQKqmS2ExzbEoUypg9krwvtK2M9UWXh9"); + solana_program::declare_id!("28s7i3htzhahXQKqmS2ExzbEoUypg9krwvtK2M9UWXh9"); } pub mod enable_partitioned_epoch_reward { - solana_sdk::declare_id!("9bn2vTJUsUcnpiZWbu2woSKtTGW3ErZC9ERv88SDqQjK"); + solana_program::declare_id!("9bn2vTJUsUcnpiZWbu2woSKtTGW3ErZC9ERv88SDqQjK"); } pub mod partitioned_epoch_rewards_superfeature { - solana_sdk::declare_id!("PERzQrt5gBD1XEe2c9XdFWqwgHY3mr7cYWbm5V772V8"); + solana_program::declare_id!("PERzQrt5gBD1XEe2c9XdFWqwgHY3mr7cYWbm5V772V8"); } pub mod spl_token_v3_4_0 { - solana_sdk::declare_id!("Ftok4njE8b7tDffYkC5bAbCaQv5sL6jispYrprzatUwN"); + solana_program::declare_id!("Ftok4njE8b7tDffYkC5bAbCaQv5sL6jispYrprzatUwN"); } pub mod spl_associated_token_account_v1_1_0 { - solana_sdk::declare_id!("FaTa17gVKoqbh38HcfiQonPsAaQViyDCCSg71AubYZw8"); + solana_program::declare_id!("FaTa17gVKoqbh38HcfiQonPsAaQViyDCCSg71AubYZw8"); } pub mod default_units_per_instruction { - solana_sdk::declare_id!("J2QdYx8crLbTVK8nur1jeLsmc3krDbfjoxoea2V1Uy5Q"); + solana_program::declare_id!("J2QdYx8crLbTVK8nur1jeLsmc3krDbfjoxoea2V1Uy5Q"); } pub mod stake_allow_zero_undelegated_amount { - solana_sdk::declare_id!("sTKz343FM8mqtyGvYWvbLpTThw3ixRM4Xk8QvZ985mw"); + solana_program::declare_id!("sTKz343FM8mqtyGvYWvbLpTThw3ixRM4Xk8QvZ985mw"); } pub mod require_static_program_ids_in_transaction { - solana_sdk::declare_id!("8FdwgyHFEjhAdjWfV2vfqk7wA1g9X3fQpKH7SBpEv3kC"); + solana_program::declare_id!("8FdwgyHFEjhAdjWfV2vfqk7wA1g9X3fQpKH7SBpEv3kC"); } pub mod stake_raise_minimum_delegation_to_1_sol { // This is a feature-proposal *feature id*. The feature keypair address is `GQXzC7YiSNkje6FFUk6sc2p53XRvKoaZ9VMktYzUMnpL`. - solana_sdk::declare_id!("9onWzzvCzNC2jfhxxeqRgs5q7nFAAKpCUvkj6T6GJK9i"); + solana_program::declare_id!("9onWzzvCzNC2jfhxxeqRgs5q7nFAAKpCUvkj6T6GJK9i"); } pub mod stake_minimum_delegation_for_rewards { - solana_sdk::declare_id!("G6ANXD6ptCSyNd9znZm7j4dEczAJCfx7Cy43oBx3rKHJ"); + solana_program::declare_id!("G6ANXD6ptCSyNd9znZm7j4dEczAJCfx7Cy43oBx3rKHJ"); } pub mod add_set_compute_unit_price_ix { - solana_sdk::declare_id!("98std1NSHqXi9WYvFShfVepRdCoq1qvsp8fsR2XZtG8g"); + solana_program::declare_id!("98std1NSHqXi9WYvFShfVepRdCoq1qvsp8fsR2XZtG8g"); } pub mod disable_deploy_of_alloc_free_syscall { - solana_sdk::declare_id!("79HWsX9rpnnJBPcdNURVqygpMAfxdrAirzAGAVmf92im"); + solana_program::declare_id!("79HWsX9rpnnJBPcdNURVqygpMAfxdrAirzAGAVmf92im"); } pub mod include_account_index_in_rent_error { - solana_sdk::declare_id!("2R72wpcQ7qV7aTJWUumdn8u5wmmTyXbK7qzEy7YSAgyY"); + solana_program::declare_id!("2R72wpcQ7qV7aTJWUumdn8u5wmmTyXbK7qzEy7YSAgyY"); } pub mod add_shred_type_to_shred_seed { - solana_sdk::declare_id!("Ds87KVeqhbv7Jw8W6avsS1mqz3Mw5J3pRTpPoDQ2QdiJ"); + solana_program::declare_id!("Ds87KVeqhbv7Jw8W6avsS1mqz3Mw5J3pRTpPoDQ2QdiJ"); } pub mod warp_timestamp_with_a_vengeance { - solana_sdk::declare_id!("3BX6SBeEBibHaVQXywdkcgyUk6evfYZkHdztXiDtEpFS"); + solana_program::declare_id!("3BX6SBeEBibHaVQXywdkcgyUk6evfYZkHdztXiDtEpFS"); } pub mod separate_nonce_from_blockhash { - solana_sdk::declare_id!("Gea3ZkK2N4pHuVZVxWcnAtS6UEDdyumdYt4pFcKjA3ar"); + solana_program::declare_id!("Gea3ZkK2N4pHuVZVxWcnAtS6UEDdyumdYt4pFcKjA3ar"); } pub mod enable_durable_nonce { - solana_sdk::declare_id!("4EJQtF2pkRyawwcTVfQutzq4Sa5hRhibF6QAK1QXhtEX"); + solana_program::declare_id!("4EJQtF2pkRyawwcTVfQutzq4Sa5hRhibF6QAK1QXhtEX"); } pub mod vote_state_update_credit_per_dequeue { - solana_sdk::declare_id!("CveezY6FDLVBToHDcvJRmtMouqzsmj4UXYh5ths5G5Uv"); + solana_program::declare_id!("CveezY6FDLVBToHDcvJRmtMouqzsmj4UXYh5ths5G5Uv"); } pub mod quick_bail_on_panic { - solana_sdk::declare_id!("DpJREPyuMZ5nDfU6H3WTqSqUFSXAfw8u7xqmWtEwJDcP"); + solana_program::declare_id!("DpJREPyuMZ5nDfU6H3WTqSqUFSXAfw8u7xqmWtEwJDcP"); } pub mod nonce_must_be_authorized { - solana_sdk::declare_id!("HxrEu1gXuH7iD3Puua1ohd5n4iUKJyFNtNxk9DVJkvgr"); + solana_program::declare_id!("HxrEu1gXuH7iD3Puua1ohd5n4iUKJyFNtNxk9DVJkvgr"); } pub mod nonce_must_be_advanceable { - solana_sdk::declare_id!("3u3Er5Vc2jVcwz4xr2GJeSAXT3fAj6ADHZ4BJMZiScFd"); + solana_program::declare_id!("3u3Er5Vc2jVcwz4xr2GJeSAXT3fAj6ADHZ4BJMZiScFd"); } pub mod vote_authorize_with_seed { - solana_sdk::declare_id!("6tRxEYKuy2L5nnv5bgn7iT28MxUbYxp5h7F3Ncf1exrT"); + solana_program::declare_id!("6tRxEYKuy2L5nnv5bgn7iT28MxUbYxp5h7F3Ncf1exrT"); } pub mod preserve_rent_epoch_for_rent_exempt_accounts { - solana_sdk::declare_id!("HH3MUYReL2BvqqA3oEcAa7txju5GY6G4nxJ51zvsEjEZ"); + solana_program::declare_id!("HH3MUYReL2BvqqA3oEcAa7txju5GY6G4nxJ51zvsEjEZ"); } pub mod enable_bpf_loader_extend_program_ix { - solana_sdk::declare_id!("8Zs9W7D9MpSEtUWSQdGniZk2cNmV22y6FLJwCx53asme"); + solana_program::declare_id!("8Zs9W7D9MpSEtUWSQdGniZk2cNmV22y6FLJwCx53asme"); } pub mod enable_early_verification_of_account_modifications { - solana_sdk::declare_id!("7Vced912WrRnfjaiKRiNBcbuFw7RrnLv3E3z95Y4GTNc"); + solana_program::declare_id!("7Vced912WrRnfjaiKRiNBcbuFw7RrnLv3E3z95Y4GTNc"); } pub mod skip_rent_rewrites { - solana_sdk::declare_id!("CGB2jM8pwZkeeiXQ66kBMyBR6Np61mggL7XUsmLjVcrw"); + solana_program::declare_id!("CGB2jM8pwZkeeiXQ66kBMyBR6Np61mggL7XUsmLjVcrw"); } pub mod prevent_crediting_accounts_that_end_rent_paying { - solana_sdk::declare_id!("812kqX67odAp5NFwM8D2N24cku7WTm9CHUTFUXaDkWPn"); + solana_program::declare_id!("812kqX67odAp5NFwM8D2N24cku7WTm9CHUTFUXaDkWPn"); } pub mod cap_bpf_program_instruction_accounts { - solana_sdk::declare_id!("9k5ijzTbYPtjzu8wj2ErH9v45xecHzQ1x4PMYMMxFgdM"); + solana_program::declare_id!("9k5ijzTbYPtjzu8wj2ErH9v45xecHzQ1x4PMYMMxFgdM"); } pub mod loosen_cpi_size_restriction { - solana_sdk::declare_id!("GDH5TVdbTPUpRnXaRyQqiKUa7uZAbZ28Q2N9bhbKoMLm"); + solana_program::declare_id!("GDH5TVdbTPUpRnXaRyQqiKUa7uZAbZ28Q2N9bhbKoMLm"); } pub mod use_default_units_in_fee_calculation { - solana_sdk::declare_id!("8sKQrMQoUHtQSUP83SPG4ta2JDjSAiWs7t5aJ9uEd6To"); + solana_program::declare_id!("8sKQrMQoUHtQSUP83SPG4ta2JDjSAiWs7t5aJ9uEd6To"); } pub mod compact_vote_state_updates { - solana_sdk::declare_id!("86HpNqzutEZwLcPxS6EHDcMNYWk6ikhteg9un7Y2PBKE"); + solana_program::declare_id!("86HpNqzutEZwLcPxS6EHDcMNYWk6ikhteg9un7Y2PBKE"); } pub mod incremental_snapshot_only_incremental_hash_calculation { - solana_sdk::declare_id!("25vqsfjk7Nv1prsQJmA4Xu1bN61s8LXCBGUPp8Rfy1UF"); + solana_program::declare_id!("25vqsfjk7Nv1prsQJmA4Xu1bN61s8LXCBGUPp8Rfy1UF"); } pub mod disable_cpi_setting_executable_and_rent_epoch { - solana_sdk::declare_id!("B9cdB55u4jQsDNsdTK525yE9dmSc5Ga7YBaBrDFvEhM9"); + solana_program::declare_id!("B9cdB55u4jQsDNsdTK525yE9dmSc5Ga7YBaBrDFvEhM9"); } pub mod on_load_preserve_rent_epoch_for_rent_exempt_accounts { - solana_sdk::declare_id!("CpkdQmspsaZZ8FVAouQTtTWZkc8eeQ7V3uj7dWz543rZ"); + solana_program::declare_id!("CpkdQmspsaZZ8FVAouQTtTWZkc8eeQ7V3uj7dWz543rZ"); } pub mod account_hash_ignore_slot { - solana_sdk::declare_id!("SVn36yVApPLYsa8koK3qUcy14zXDnqkNYWyUh1f4oK1"); + solana_program::declare_id!("SVn36yVApPLYsa8koK3qUcy14zXDnqkNYWyUh1f4oK1"); } pub mod set_exempt_rent_epoch_max { - solana_sdk::declare_id!("5wAGiy15X1Jb2hkHnPDCM8oB9V42VNA9ftNVFK84dEgv"); + solana_program::declare_id!("5wAGiy15X1Jb2hkHnPDCM8oB9V42VNA9ftNVFK84dEgv"); } pub mod relax_authority_signer_check_for_lookup_table_creation { - solana_sdk::declare_id!("FKAcEvNgSY79RpqsPNUV5gDyumopH4cEHqUxyfm8b8Ap"); + solana_program::declare_id!("FKAcEvNgSY79RpqsPNUV5gDyumopH4cEHqUxyfm8b8Ap"); } pub mod stop_sibling_instruction_search_at_parent { - solana_sdk::declare_id!("EYVpEP7uzH1CoXzbD6PubGhYmnxRXPeq3PPsm1ba3gpo"); + solana_program::declare_id!("EYVpEP7uzH1CoXzbD6PubGhYmnxRXPeq3PPsm1ba3gpo"); } pub mod vote_state_update_root_fix { - solana_sdk::declare_id!("G74BkWBzmsByZ1kxHy44H3wjwp5hp7JbrGRuDpco22tY"); + solana_program::declare_id!("G74BkWBzmsByZ1kxHy44H3wjwp5hp7JbrGRuDpco22tY"); } pub mod cap_accounts_data_allocations_per_transaction { - solana_sdk::declare_id!("9gxu85LYRAcZL38We8MYJ4A9AwgBBPtVBAqebMcT1241"); + solana_program::declare_id!("9gxu85LYRAcZL38We8MYJ4A9AwgBBPtVBAqebMcT1241"); } pub mod epoch_accounts_hash { - solana_sdk::declare_id!("5GpmAKxaGsWWbPp4bNXFLJxZVvG92ctxf7jQnzTQjF3n"); + solana_program::declare_id!("5GpmAKxaGsWWbPp4bNXFLJxZVvG92ctxf7jQnzTQjF3n"); } pub mod remove_deprecated_request_unit_ix { - solana_sdk::declare_id!("EfhYd3SafzGT472tYQDUc4dPd2xdEfKs5fwkowUgVt4W"); + solana_program::declare_id!("EfhYd3SafzGT472tYQDUc4dPd2xdEfKs5fwkowUgVt4W"); } pub mod disable_rehash_for_rent_epoch { - solana_sdk::declare_id!("DTVTkmw3JSofd8CJVJte8PXEbxNQ2yZijvVr3pe2APPj"); + solana_program::declare_id!("DTVTkmw3JSofd8CJVJte8PXEbxNQ2yZijvVr3pe2APPj"); } pub mod increase_tx_account_lock_limit { - solana_sdk::declare_id!("9LZdXeKGeBV6hRLdxS1rHbHoEUsKqesCC2ZAPTPKJAbK"); + solana_program::declare_id!("9LZdXeKGeBV6hRLdxS1rHbHoEUsKqesCC2ZAPTPKJAbK"); } pub mod limit_max_instruction_trace_length { - solana_sdk::declare_id!("GQALDaC48fEhZGWRj9iL5Q889emJKcj3aCvHF7VCbbF4"); + solana_program::declare_id!("GQALDaC48fEhZGWRj9iL5Q889emJKcj3aCvHF7VCbbF4"); } pub mod check_syscall_outputs_do_not_overlap { - solana_sdk::declare_id!("3uRVPBpyEJRo1emLCrq38eLRFGcu6uKSpUXqGvU8T7SZ"); + solana_program::declare_id!("3uRVPBpyEJRo1emLCrq38eLRFGcu6uKSpUXqGvU8T7SZ"); } pub mod enable_bpf_loader_set_authority_checked_ix { - solana_sdk::declare_id!("5x3825XS7M2A3Ekbn5VGGkvFoAg5qrRWkTrY4bARP1GL"); + solana_program::declare_id!("5x3825XS7M2A3Ekbn5VGGkvFoAg5qrRWkTrY4bARP1GL"); } pub mod enable_alt_bn128_syscall { - solana_sdk::declare_id!("A16q37opZdQMCbe5qJ6xpBB9usykfv8jZaMkxvZQi4GJ"); + solana_program::declare_id!("A16q37opZdQMCbe5qJ6xpBB9usykfv8jZaMkxvZQi4GJ"); } pub mod simplify_alt_bn128_syscall_error_codes { - solana_sdk::declare_id!("JDn5q3GBeqzvUa7z67BbmVHVdE3EbUAjvFep3weR3jxX"); + solana_program::declare_id!("JDn5q3GBeqzvUa7z67BbmVHVdE3EbUAjvFep3weR3jxX"); } pub mod enable_alt_bn128_compression_syscall { - solana_sdk::declare_id!("EJJewYSddEEtSZHiqugnvhQHiWyZKjkFDQASd7oKSagn"); + solana_program::declare_id!("EJJewYSddEEtSZHiqugnvhQHiWyZKjkFDQASd7oKSagn"); } pub mod enable_program_redeployment_cooldown { - solana_sdk::declare_id!("J4HFT8usBxpcF63y46t1upYobJgChmKyZPm5uTBRg25Z"); + solana_program::declare_id!("J4HFT8usBxpcF63y46t1upYobJgChmKyZPm5uTBRg25Z"); } pub mod commission_updates_only_allowed_in_first_half_of_epoch { - solana_sdk::declare_id!("noRuG2kzACwgaY7TVmLRnUNPLKNVQE1fb7X55YWBehp"); + solana_program::declare_id!("noRuG2kzACwgaY7TVmLRnUNPLKNVQE1fb7X55YWBehp"); } pub mod enable_turbine_fanout_experiments { - solana_sdk::declare_id!("D31EFnLgdiysi84Woo3of4JMu7VmasUS3Z7j9HYXCeLY"); + solana_program::declare_id!("D31EFnLgdiysi84Woo3of4JMu7VmasUS3Z7j9HYXCeLY"); } pub mod disable_turbine_fanout_experiments { - solana_sdk::declare_id!("Gz1aLrbeQ4Q6PTSafCZcGWZXz91yVRi7ASFzFEr1U4sa"); + solana_program::declare_id!("Gz1aLrbeQ4Q6PTSafCZcGWZXz91yVRi7ASFzFEr1U4sa"); } pub mod move_serialized_len_ptr_in_cpi { - solana_sdk::declare_id!("74CoWuBmt3rUVUrCb2JiSTvh6nXyBWUsK4SaMj3CtE3T"); + solana_program::declare_id!("74CoWuBmt3rUVUrCb2JiSTvh6nXyBWUsK4SaMj3CtE3T"); } pub mod update_hashes_per_tick { - solana_sdk::declare_id!("3uFHb9oKdGfgZGJK9EHaAXN4USvnQtAFC13Fh5gGFS5B"); + solana_program::declare_id!("3uFHb9oKdGfgZGJK9EHaAXN4USvnQtAFC13Fh5gGFS5B"); } pub mod enable_big_mod_exp_syscall { - solana_sdk::declare_id!("EBq48m8irRKuE7ZnMTLvLg2UuGSqhe8s8oMqnmja1fJw"); + solana_program::declare_id!("EBq48m8irRKuE7ZnMTLvLg2UuGSqhe8s8oMqnmja1fJw"); } pub mod disable_builtin_loader_ownership_chains { - solana_sdk::declare_id!("4UDcAfQ6EcA6bdcadkeHpkarkhZGJ7Bpq7wTAiRMjkoi"); + solana_program::declare_id!("4UDcAfQ6EcA6bdcadkeHpkarkhZGJ7Bpq7wTAiRMjkoi"); } pub mod cap_transaction_accounts_data_size { - solana_sdk::declare_id!("DdLwVYuvDz26JohmgSbA7mjpJFgX5zP2dkp8qsF2C33V"); + solana_program::declare_id!("DdLwVYuvDz26JohmgSbA7mjpJFgX5zP2dkp8qsF2C33V"); } pub mod remove_congestion_multiplier_from_fee_calculation { - solana_sdk::declare_id!("A8xyMHZovGXFkorFqEmVH2PKGLiBip5JD7jt4zsUWo4H"); + solana_program::declare_id!("A8xyMHZovGXFkorFqEmVH2PKGLiBip5JD7jt4zsUWo4H"); } pub mod enable_request_heap_frame_ix { - solana_sdk::declare_id!("Hr1nUA9b7NJ6eChS26o7Vi8gYYDDwWD3YeBfzJkTbU86"); + solana_program::declare_id!("Hr1nUA9b7NJ6eChS26o7Vi8gYYDDwWD3YeBfzJkTbU86"); } pub mod prevent_rent_paying_rent_recipients { - solana_sdk::declare_id!("Fab5oP3DmsLYCiQZXdjyqT3ukFFPrsmqhXU4WU1AWVVF"); + solana_program::declare_id!("Fab5oP3DmsLYCiQZXdjyqT3ukFFPrsmqhXU4WU1AWVVF"); } pub mod delay_visibility_of_program_deployment { - solana_sdk::declare_id!("GmuBvtFb2aHfSfMXpuFeWZGHyDeCLPS79s48fmCWCfM5"); + solana_program::declare_id!("GmuBvtFb2aHfSfMXpuFeWZGHyDeCLPS79s48fmCWCfM5"); } pub mod apply_cost_tracker_during_replay { - solana_sdk::declare_id!("2ry7ygxiYURULZCrypHhveanvP5tzZ4toRwVp89oCNSj"); + solana_program::declare_id!("2ry7ygxiYURULZCrypHhveanvP5tzZ4toRwVp89oCNSj"); } pub mod bpf_account_data_direct_mapping { - solana_sdk::declare_id!("EenyoWx9UMXYKpR8mW5Jmfmy2fRjzUtM7NduYMY8bx33"); + solana_program::declare_id!("EenyoWx9UMXYKpR8mW5Jmfmy2fRjzUtM7NduYMY8bx33"); } pub mod add_set_tx_loaded_accounts_data_size_instruction { - solana_sdk::declare_id!("G6vbf1UBok8MWb8m25ex86aoQHeKTzDKzuZADHkShqm6"); + solana_program::declare_id!("G6vbf1UBok8MWb8m25ex86aoQHeKTzDKzuZADHkShqm6"); } pub mod switch_to_new_elf_parser { - solana_sdk::declare_id!("Cdkc8PPTeTNUPoZEfCY5AyetUrEdkZtNPMgz58nqyaHD"); + solana_program::declare_id!("Cdkc8PPTeTNUPoZEfCY5AyetUrEdkZtNPMgz58nqyaHD"); } pub mod round_up_heap_size { - solana_sdk::declare_id!("CE2et8pqgyQMP2mQRg3CgvX8nJBKUArMu3wfiQiQKY1y"); + solana_program::declare_id!("CE2et8pqgyQMP2mQRg3CgvX8nJBKUArMu3wfiQiQKY1y"); } pub mod remove_bpf_loader_incorrect_program_id { - solana_sdk::declare_id!("2HmTkCj9tXuPE4ueHzdD7jPeMf9JGCoZh5AsyoATiWEe"); + solana_program::declare_id!("2HmTkCj9tXuPE4ueHzdD7jPeMf9JGCoZh5AsyoATiWEe"); } pub mod include_loaded_accounts_data_size_in_fee_calculation { - solana_sdk::declare_id!("EaQpmC6GtRssaZ3PCUM5YksGqUdMLeZ46BQXYtHYakDS"); + solana_program::declare_id!("EaQpmC6GtRssaZ3PCUM5YksGqUdMLeZ46BQXYtHYakDS"); } pub mod native_programs_consume_cu { - solana_sdk::declare_id!("8pgXCMNXC8qyEFypuwpXyRxLXZdpM4Qo72gJ6k87A6wL"); + solana_program::declare_id!("8pgXCMNXC8qyEFypuwpXyRxLXZdpM4Qo72gJ6k87A6wL"); } pub mod simplify_writable_program_account_check { - solana_sdk::declare_id!("5ZCcFAzJ1zsFKe1KSZa9K92jhx7gkcKj97ci2DBo1vwj"); + solana_program::declare_id!("5ZCcFAzJ1zsFKe1KSZa9K92jhx7gkcKj97ci2DBo1vwj"); } pub mod stop_truncating_strings_in_syscalls { - solana_sdk::declare_id!("16FMCmgLzCNNz6eTwGanbyN2ZxvTBSLuQ6DZhgeMshg"); + solana_program::declare_id!("16FMCmgLzCNNz6eTwGanbyN2ZxvTBSLuQ6DZhgeMshg"); } pub mod clean_up_delegation_errors { - solana_sdk::declare_id!("Bj2jmUsM2iRhfdLLDSTkhM5UQRQvQHm57HSmPibPtEyu"); + solana_program::declare_id!("Bj2jmUsM2iRhfdLLDSTkhM5UQRQvQHm57HSmPibPtEyu"); } pub mod vote_state_add_vote_latency { - solana_sdk::declare_id!("7axKe5BTYBDD87ftzWbk5DfzWMGyRvqmWTduuo22Yaqy"); + solana_program::declare_id!("7axKe5BTYBDD87ftzWbk5DfzWMGyRvqmWTduuo22Yaqy"); } pub mod checked_arithmetic_in_fee_validation { - solana_sdk::declare_id!("5Pecy6ie6XGm22pc9d4P9W5c31BugcFBuy6hsP2zkETv"); + solana_program::declare_id!("5Pecy6ie6XGm22pc9d4P9W5c31BugcFBuy6hsP2zkETv"); } pub mod last_restart_slot_sysvar { - solana_sdk::declare_id!("HooKD5NC9QNxk25QuzCssB8ecrEzGt6eXEPBUxWp1LaR"); + solana_program::declare_id!("HooKD5NC9QNxk25QuzCssB8ecrEzGt6eXEPBUxWp1LaR"); } pub mod reduce_stake_warmup_cooldown { - solana_sdk::declare_id!("GwtDQBghCTBgmX2cpEGNPxTEBUTQRaDMGTr5qychdGMj"); + solana_program::declare_id!("GwtDQBghCTBgmX2cpEGNPxTEBUTQRaDMGTr5qychdGMj"); } mod revise_turbine_epoch_stakes { - solana_sdk::declare_id!("BTWmtJC8U5ZLMbBUUA1k6As62sYjPEjAiNAT55xYGdJU"); + solana_program::declare_id!("BTWmtJC8U5ZLMbBUUA1k6As62sYjPEjAiNAT55xYGdJU"); } pub mod enable_poseidon_syscall { - solana_sdk::declare_id!("FL9RsQA6TVUoh5xJQ9d936RHSebA1NLQqe3Zv9sXZRpr"); + solana_program::declare_id!("FL9RsQA6TVUoh5xJQ9d936RHSebA1NLQqe3Zv9sXZRpr"); } pub mod timely_vote_credits { - solana_sdk::declare_id!("tvcF6b1TRz353zKuhBjinZkKzjmihXmBAHJdjNYw1sQ"); + solana_program::declare_id!("tvcF6b1TRz353zKuhBjinZkKzjmihXmBAHJdjNYw1sQ"); } pub mod remaining_compute_units_syscall_enabled { - solana_sdk::declare_id!("5TuppMutoyzhUSfuYdhgzD47F92GL1g89KpCZQKqedxP"); + solana_program::declare_id!("5TuppMutoyzhUSfuYdhgzD47F92GL1g89KpCZQKqedxP"); } pub mod enable_program_runtime_v2_and_loader_v4 { - solana_sdk::declare_id!("8oBxsYqnCvUTGzgEpxPcnVf7MLbWWPYddE33PftFeBBd"); + solana_program::declare_id!("8oBxsYqnCvUTGzgEpxPcnVf7MLbWWPYddE33PftFeBBd"); } pub mod require_rent_exempt_split_destination { - solana_sdk::declare_id!("D2aip4BBr8NPWtU9vLrwrBvbuaQ8w1zV38zFLxx4pfBV"); + solana_program::declare_id!("D2aip4BBr8NPWtU9vLrwrBvbuaQ8w1zV38zFLxx4pfBV"); } pub mod better_error_codes_for_tx_lamport_check { - solana_sdk::declare_id!("Ffswd3egL3tccB6Rv3XY6oqfdzn913vUcjCSnpvCKpfx"); + solana_program::declare_id!("Ffswd3egL3tccB6Rv3XY6oqfdzn913vUcjCSnpvCKpfx"); } pub mod update_hashes_per_tick2 { - solana_sdk::declare_id!("EWme9uFqfy1ikK1jhJs8fM5hxWnK336QJpbscNtizkTU"); + solana_program::declare_id!("EWme9uFqfy1ikK1jhJs8fM5hxWnK336QJpbscNtizkTU"); } pub mod update_hashes_per_tick3 { - solana_sdk::declare_id!("8C8MCtsab5SsfammbzvYz65HHauuUYdbY2DZ4sznH6h5"); + solana_program::declare_id!("8C8MCtsab5SsfammbzvYz65HHauuUYdbY2DZ4sznH6h5"); } pub mod update_hashes_per_tick4 { - solana_sdk::declare_id!("8We4E7DPwF2WfAN8tRTtWQNhi98B99Qpuj7JoZ3Aikgg"); + solana_program::declare_id!("8We4E7DPwF2WfAN8tRTtWQNhi98B99Qpuj7JoZ3Aikgg"); } pub mod update_hashes_per_tick5 { - solana_sdk::declare_id!("BsKLKAn1WM4HVhPRDsjosmqSg2J8Tq5xP2s2daDS6Ni4"); + solana_program::declare_id!("BsKLKAn1WM4HVhPRDsjosmqSg2J8Tq5xP2s2daDS6Ni4"); } pub mod update_hashes_per_tick6 { - solana_sdk::declare_id!("FKu1qYwLQSiehz644H6Si65U5ZQ2cp9GxsyFUfYcuADv"); + solana_program::declare_id!("FKu1qYwLQSiehz644H6Si65U5ZQ2cp9GxsyFUfYcuADv"); } pub mod validate_fee_collector_account { - solana_sdk::declare_id!("prpFrMtgNmzaNzkPJg9o753fVvbHKqNrNTm76foJ2wm"); + solana_program::declare_id!("prpFrMtgNmzaNzkPJg9o753fVvbHKqNrNTm76foJ2wm"); } pub mod disable_rent_fees_collection { - solana_sdk::declare_id!("CJzY83ggJHqPGDq8VisV3U91jDJLuEaALZooBrXtnnLU"); + solana_program::declare_id!("CJzY83ggJHqPGDq8VisV3U91jDJLuEaALZooBrXtnnLU"); } pub mod enable_zk_transfer_with_fee { - solana_sdk::declare_id!("zkNLP7EQALfC1TYeB3biDU7akDckj8iPkvh9y2Mt2K3"); + solana_program::declare_id!("zkNLP7EQALfC1TYeB3biDU7akDckj8iPkvh9y2Mt2K3"); } pub mod drop_legacy_shreds { - solana_sdk::declare_id!("GV49KKQdBNaiv2pgqhS2Dy3GWYJGXMTVYbYkdk91orRy"); + solana_program::declare_id!("GV49KKQdBNaiv2pgqhS2Dy3GWYJGXMTVYbYkdk91orRy"); } pub mod allow_commission_decrease_at_any_time { - solana_sdk::declare_id!("decoMktMcnmiq6t3u7g5BfgcQu91nKZr6RvMYf9z1Jb"); + solana_program::declare_id!("decoMktMcnmiq6t3u7g5BfgcQu91nKZr6RvMYf9z1Jb"); } pub mod add_new_reserved_account_keys { - solana_sdk::declare_id!("8U4skmMVnF6k2kMvrWbQuRUT3qQSiTYpSjqmhmgfthZu"); + solana_program::declare_id!("8U4skmMVnF6k2kMvrWbQuRUT3qQSiTYpSjqmhmgfthZu"); } pub mod consume_blockstore_duplicate_proofs { - solana_sdk::declare_id!("6YsBCejwK96GZCkJ6mkZ4b68oP63z2PLoQmWjC7ggTqZ"); + solana_program::declare_id!("6YsBCejwK96GZCkJ6mkZ4b68oP63z2PLoQmWjC7ggTqZ"); } pub mod index_erasure_conflict_duplicate_proofs { - solana_sdk::declare_id!("dupPajaLy2SSn8ko42aZz4mHANDNrLe8Nw8VQgFecLa"); + solana_program::declare_id!("dupPajaLy2SSn8ko42aZz4mHANDNrLe8Nw8VQgFecLa"); } pub mod merkle_conflict_duplicate_proofs { - solana_sdk::declare_id!("mrkPjRg79B2oK2ZLgd7S3AfEJaX9B6gAF3H9aEykRUS"); + solana_program::declare_id!("mrkPjRg79B2oK2ZLgd7S3AfEJaX9B6gAF3H9aEykRUS"); } pub mod disable_bpf_loader_instructions { - solana_sdk::declare_id!("7WeS1vfPRgeeoXArLh7879YcB9mgE9ktjPDtajXeWfXn"); + solana_program::declare_id!("7WeS1vfPRgeeoXArLh7879YcB9mgE9ktjPDtajXeWfXn"); } pub mod enable_zk_proof_from_account { - solana_sdk::declare_id!("zkiTNuzBKxrCLMKehzuQeKZyLtX2yvFcEKMML8nExU8"); + solana_program::declare_id!("zkiTNuzBKxrCLMKehzuQeKZyLtX2yvFcEKMML8nExU8"); } pub mod cost_model_requested_write_lock_cost { - solana_sdk::declare_id!("wLckV1a64ngtcKPRGU4S4grVTestXjmNjxBjaKZrAcn"); + solana_program::declare_id!("wLckV1a64ngtcKPRGU4S4grVTestXjmNjxBjaKZrAcn"); } pub mod enable_gossip_duplicate_proof_ingestion { - solana_sdk::declare_id!("FNKCMBzYUdjhHyPdsKG2LSmdzH8TCHXn3ytj8RNBS4nG"); + solana_program::declare_id!("FNKCMBzYUdjhHyPdsKG2LSmdzH8TCHXn3ytj8RNBS4nG"); } pub mod chained_merkle_conflict_duplicate_proofs { - solana_sdk::declare_id!("chaie9S2zVfuxJKNRGkyTDokLwWxx6kD2ZLsqQHaDD8"); + solana_program::declare_id!("chaie9S2zVfuxJKNRGkyTDokLwWxx6kD2ZLsqQHaDD8"); } pub mod enable_chained_merkle_shreds { - solana_sdk::declare_id!("7uZBkJXJ1HkuP6R3MJfZs7mLwymBcDbKdqbF51ZWLier"); + solana_program::declare_id!("7uZBkJXJ1HkuP6R3MJfZs7mLwymBcDbKdqbF51ZWLier"); } pub mod remove_rounding_in_fee_calculation { - solana_sdk::declare_id!("BtVN7YjDzNE6Dk7kTT7YTDgMNUZTNgiSJgsdzAeTg2jF"); + solana_program::declare_id!("BtVN7YjDzNE6Dk7kTT7YTDgMNUZTNgiSJgsdzAeTg2jF"); } pub mod enable_tower_sync_ix { - solana_sdk::declare_id!("tSynMCspg4xFiCj1v3TDb4c7crMR5tSBhLz4sF7rrNA"); + solana_program::declare_id!("tSynMCspg4xFiCj1v3TDb4c7crMR5tSBhLz4sF7rrNA"); } pub mod deprecate_unused_legacy_vote_plumbing { - solana_sdk::declare_id!("6Uf8S75PVh91MYgPQSHnjRAPQq6an5BDv9vomrCwDqLe"); + solana_program::declare_id!("6Uf8S75PVh91MYgPQSHnjRAPQq6an5BDv9vomrCwDqLe"); } pub mod reward_full_priority_fee { - solana_sdk::declare_id!("3opE3EzAKnUftUDURkzMgwpNgimBAypW1mNDYH4x4Zg7"); + solana_program::declare_id!("3opE3EzAKnUftUDURkzMgwpNgimBAypW1mNDYH4x4Zg7"); } pub mod get_sysvar_syscall_enabled { - solana_sdk::declare_id!("CLCoTADvV64PSrnR6QXty6Fwrt9Xc6EdxSJE4wLRePjq"); + solana_program::declare_id!("CLCoTADvV64PSrnR6QXty6Fwrt9Xc6EdxSJE4wLRePjq"); } pub mod abort_on_invalid_curve { - solana_sdk::declare_id!("FuS3FPfJDKSNot99ECLXtp3rueq36hMNStJkPJwWodLh"); + solana_program::declare_id!("FuS3FPfJDKSNot99ECLXtp3rueq36hMNStJkPJwWodLh"); } pub mod migrate_feature_gate_program_to_core_bpf { - solana_sdk::declare_id!("4eohviozzEeivk1y9UbrnekbAFMDQyJz5JjA9Y6gyvky"); + solana_program::declare_id!("4eohviozzEeivk1y9UbrnekbAFMDQyJz5JjA9Y6gyvky"); } pub mod vote_only_full_fec_sets { - solana_sdk::declare_id!("ffecLRhhakKSGhMuc6Fz2Lnfq4uT9q3iu9ZsNaPLxPc"); + solana_program::declare_id!("ffecLRhhakKSGhMuc6Fz2Lnfq4uT9q3iu9ZsNaPLxPc"); } pub mod migrate_config_program_to_core_bpf { - solana_sdk::declare_id!("2Fr57nzzkLYXW695UdDxDeR5fhnZWSttZeZYemrnpGFV"); + solana_program::declare_id!("2Fr57nzzkLYXW695UdDxDeR5fhnZWSttZeZYemrnpGFV"); } pub mod enable_get_epoch_stake_syscall { - solana_sdk::declare_id!("7mScTYkJXsbdrcwTQRs7oeCSXoJm4WjzBsRyf8bCU3Np"); + solana_program::declare_id!("7mScTYkJXsbdrcwTQRs7oeCSXoJm4WjzBsRyf8bCU3Np"); } pub mod migrate_address_lookup_table_program_to_core_bpf { - solana_sdk::declare_id!("C97eKZygrkU4JxJsZdjgbUY7iQR7rKTr4NyDWo2E5pRm"); + solana_program::declare_id!("C97eKZygrkU4JxJsZdjgbUY7iQR7rKTr4NyDWo2E5pRm"); } pub mod zk_elgamal_proof_program_enabled { - solana_sdk::declare_id!("zkhiy5oLowR7HY4zogXjCjeMXyruLqBwSWH21qcFtnv"); + solana_program::declare_id!("zkhiy5oLowR7HY4zogXjCjeMXyruLqBwSWH21qcFtnv"); } pub mod verify_retransmitter_signature { - solana_sdk::declare_id!("BZ5g4hRbu5hLQQBdPyo2z9icGyJ8Khiyj3QS6dhWijTb"); + solana_program::declare_id!("BZ5g4hRbu5hLQQBdPyo2z9icGyJ8Khiyj3QS6dhWijTb"); } pub mod move_stake_and_move_lamports_ixs { - solana_sdk::declare_id!("7bTK6Jis8Xpfrs8ZoUfiMDPazTcdPcTWheZFJTA5Z6X4"); + solana_program::declare_id!("7bTK6Jis8Xpfrs8ZoUfiMDPazTcdPcTWheZFJTA5Z6X4"); } pub mod ed25519_precompile_verify_strict { - solana_sdk::declare_id!("ed9tNscbWLYBooxWA7FE2B5KHWs8A6sxfY8EzezEcoo"); + solana_program::declare_id!("ed9tNscbWLYBooxWA7FE2B5KHWs8A6sxfY8EzezEcoo"); } pub mod vote_only_retransmitter_signed_fec_sets { - solana_sdk::declare_id!("RfEcA95xnhuwooVAhUUksEJLZBF7xKCLuqrJoqk4Zph"); + solana_program::declare_id!("RfEcA95xnhuwooVAhUUksEJLZBF7xKCLuqrJoqk4Zph"); } pub mod move_precompile_verification_to_svm { - solana_sdk::declare_id!("9ypxGLzkMxi89eDerRKXWDXe44UY2z4hBig4mDhNq5Dp"); + solana_program::declare_id!("9ypxGLzkMxi89eDerRKXWDXe44UY2z4hBig4mDhNq5Dp"); } pub mod enable_transaction_loading_failure_fees { - solana_sdk::declare_id!("PaymEPK2oqwT9TXAVfadjztH2H6KfLEB9Hhd5Q5frvP"); + solana_program::declare_id!("PaymEPK2oqwT9TXAVfadjztH2H6KfLEB9Hhd5Q5frvP"); } pub mod enable_turbine_extended_fanout_experiments { - solana_sdk::declare_id!("BZn14Liea52wtBwrXUxTv6vojuTTmfc7XGEDTXrvMD7b"); + solana_program::declare_id!("BZn14Liea52wtBwrXUxTv6vojuTTmfc7XGEDTXrvMD7b"); } pub mod deprecate_legacy_vote_ixs { - solana_sdk::declare_id!("depVvnQ2UysGrhwdiwU42tCadZL8GcBb1i2GYhMopQv"); + solana_program::declare_id!("depVvnQ2UysGrhwdiwU42tCadZL8GcBb1i2GYhMopQv"); } lazy_static! { @@ -1109,7 +1111,7 @@ lazy_static! { } /// `FeatureSet` holds the set of currently active/inactive runtime features -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[cfg_attr(feature = "frozen-abi", derive(solana_frozen_abi_macro::AbiExample))] #[derive(Debug, Clone, Eq, PartialEq)] pub struct FeatureSet { pub active: HashMap, diff --git a/sdk/src/ed25519_instruction.rs b/sdk/src/ed25519_instruction.rs index bdc4d4d0c4681b..45a8f1c2c2d932 100644 --- a/sdk/src/ed25519_instruction.rs +++ b/sdk/src/ed25519_instruction.rs @@ -5,14 +5,11 @@ #![cfg(feature = "full")] use { - crate::{ - feature_set::{ed25519_precompile_verify_strict, FeatureSet}, - instruction::Instruction, - precompiles::PrecompileError, - }, + crate::{instruction::Instruction, precompiles::PrecompileError}, bytemuck::bytes_of, bytemuck_derive::{Pod, Zeroable}, ed25519_dalek::{ed25519::signature::Signature, Signer, Verifier}, + solana_feature_set::{ed25519_precompile_verify_strict, FeatureSet}, }; pub const PUBKEY_SERIALIZED_SIZE: usize = 32; @@ -194,13 +191,13 @@ pub mod test { super::*, crate::{ ed25519_instruction::new_ed25519_instruction, - feature_set::FeatureSet, hash::Hash, signature::{Keypair, Signer}, transaction::Transaction, }, hex, rand0_7::{thread_rng, Rng}, + solana_feature_set::FeatureSet, }; pub fn new_ed25519_instruction_raw( diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 5a1b87b211724b..47f680603c65c3 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -72,7 +72,6 @@ pub mod epoch_rewards_hasher; pub mod example_mocks; pub mod exit; pub mod feature; -pub mod feature_set; pub mod fee; pub mod genesis_config; pub mod hard_forks; @@ -114,6 +113,8 @@ pub use solana_bn254 as alt_bn128; pub use solana_decode_error as decode_error; #[deprecated(since = "2.1.0", note = "Use `solana-derivation-path` crate instead")] pub use solana_derivation_path as derivation_path; +#[deprecated(since = "2.1.0", note = "Use `solana-feature-set` crate instead")] +pub use solana_feature_set as feature_set; #[deprecated(since = "2.1.0", note = "Use `solana-program-memory` crate instead")] pub use solana_program_memory as program_memory; #[deprecated(since = "2.1.0", note = "Use `solana-sanitize` crate instead")] diff --git a/sdk/src/precompiles.rs b/sdk/src/precompiles.rs index cb16e5ecd86276..087066e1b23be5 100644 --- a/sdk/src/precompiles.rs +++ b/sdk/src/precompiles.rs @@ -3,11 +3,11 @@ #![cfg(feature = "full")] use { - crate::{feature_set::FeatureSet, pubkey::Pubkey}, lazy_static::lazy_static, num_derive::{FromPrimitive, ToPrimitive}, solana_decode_error::DecodeError, - solana_program::instruction::CompiledInstruction, + solana_feature_set::FeatureSet, + solana_program::{instruction::CompiledInstruction, pubkey::Pubkey}, thiserror::Error, }; diff --git a/sdk/src/reserved_account_keys.rs b/sdk/src/reserved_account_keys.rs index e3c6bed5c973be..0ce4ac632bb250 100644 --- a/sdk/src/reserved_account_keys.rs +++ b/sdk/src/reserved_account_keys.rs @@ -7,13 +7,11 @@ use { crate::{ address_lookup_table, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, - compute_budget, config, ed25519_program, feature, - feature_set::{self, FeatureSet}, - loader_v4, native_loader, - pubkey::Pubkey, + compute_budget, config, ed25519_program, feature, loader_v4, native_loader, pubkey::Pubkey, secp256k1_program, stake, system_program, sysvar, vote, }, lazy_static::lazy_static, + solana_feature_set::{self as feature_set, FeatureSet}, std::collections::{HashMap, HashSet}, }; diff --git a/sdk/src/secp256k1_instruction.rs b/sdk/src/secp256k1_instruction.rs index c694065483d9c5..825e9bdbd03d5d 100644 --- a/sdk/src/secp256k1_instruction.rs +++ b/sdk/src/secp256k1_instruction.rs @@ -788,15 +788,12 @@ #![cfg(feature = "full")] use { - crate::{ - feature_set::{ - libsecp256k1_fail_on_bad_count, libsecp256k1_fail_on_bad_count2, FeatureSet, - }, - instruction::Instruction, - precompiles::PrecompileError, - }, + crate::{instruction::Instruction, precompiles::PrecompileError}, digest::Digest, serde_derive::{Deserialize, Serialize}, + solana_feature_set::{ + libsecp256k1_fail_on_bad_count, libsecp256k1_fail_on_bad_count2, FeatureSet, + }, }; pub const HASHED_PUBKEY_SERIALIZED_SIZE: usize = 20; @@ -1040,7 +1037,6 @@ pub mod test { use { super::*, crate::{ - feature_set, hash::Hash, keccak, secp256k1_instruction::{ @@ -1224,7 +1220,7 @@ pub mod test { let message_arr = b"hello"; let mut secp_instruction = new_secp256k1_instruction(&secp_privkey, message_arr); let mint_keypair = Keypair::new(); - let feature_set = feature_set::FeatureSet::all_enabled(); + let feature_set = solana_feature_set::FeatureSet::all_enabled(); let tx = Transaction::new_signed_with_payer( &[secp_instruction.clone()], diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index b4dd1aee955874..7007a592732217 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -126,9 +126,9 @@ use { signers::Signers, }, serde::Serialize, + solana_feature_set as feature_set, solana_program::{system_instruction::SystemInstruction, system_program}, solana_sanitize::{Sanitize, SanitizeError}, - solana_sdk::feature_set, solana_short_vec as short_vec, std::result, }; diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index 5384388987e299..3027578918e1dd 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -15,9 +15,9 @@ use { reserved_account_keys::ReservedAccountKeys, signature::Signature, simple_vote_transaction_checker::is_simple_vote_transaction, - solana_sdk::feature_set, transaction::{Result, Transaction, TransactionError, VersionedTransaction}, }, + solana_feature_set as feature_set, solana_program::message::SanitizedVersionedMessage, solana_sanitize::Sanitize, std::collections::HashSet, diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 9f0d80c4d1f5ad..2b2a44646e7604 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -18,6 +18,7 @@ serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-compute-budget = { workspace = true } +solana-feature-set = { workspace = true } solana-fee = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 0799ac8ba3cec7..c139ea15130319 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -8,10 +8,10 @@ use { }, itertools::Itertools, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, + solana_feature_set::{self as feature_set, FeatureSet}, solana_program_runtime::loaded_programs::{ProgramCacheEntry, ProgramCacheForTxBatch}, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, - feature_set::{self, FeatureSet}, fee::FeeDetails, native_loader, nonce::State as NonceState, @@ -583,12 +583,12 @@ mod tests { }, nonce::state::Versions as NonceVersions, solana_compute_budget::{compute_budget::ComputeBudget, compute_budget_limits}, + solana_feature_set::FeatureSet, solana_program_runtime::loaded_programs::{ProgramCacheEntry, ProgramCacheForTxBatch}, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable, epoch_schedule::EpochSchedule, - feature_set::FeatureSet, hash::Hash, instruction::CompiledInstruction, message::{ diff --git a/svm/src/message_processor.rs b/svm/src/message_processor.rs index 39a1b0ad1b9ee0..7b9d248e5b981f 100644 --- a/svm/src/message_processor.rs +++ b/svm/src/message_processor.rs @@ -138,6 +138,7 @@ mod tests { use { super::*, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::FeatureSet, solana_program_runtime::{ declare_process_instruction, invoke_context::EnvironmentConfig, @@ -146,7 +147,6 @@ mod tests { }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, - feature_set::FeatureSet, hash::Hash, instruction::{AccountMeta, Instruction, InstructionError}, message::{AccountKeys, Message, SanitizedMessage}, diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index c6a5e43149949e..76ececd6ec5cd0 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -24,6 +24,9 @@ use { create_program_runtime_environment_v1, create_program_runtime_environment_v2, }, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::{ + enable_transaction_loading_failure_fees, remove_rounding_in_fee_calculation, FeatureSet, + }, solana_log_collector::LogCollector, solana_measure::{measure::Measure, measure_us}, solana_program_runtime::{ @@ -38,7 +41,6 @@ use { solana_sdk::{ account::{AccountSharedData, ReadableAccount, PROGRAM_OWNERS}, clock::{Epoch, Slot}, - feature_set::{self, remove_rounding_in_fee_calculation, FeatureSet}, fee::{FeeBudgetLimits, FeeStructure}, hash::Hash, inner_instruction::{InnerInstruction, InnerInstructionsList}, @@ -298,7 +300,7 @@ impl TransactionBatchProcessor { let enable_transaction_loading_failure_fees = environment .feature_set - .is_active(&feature_set::enable_transaction_loading_failure_fees::id()); + .is_active(&enable_transaction_loading_failure_fees::id()); let (processing_results, execution_us): (Vec, u64) = measure_us!(loaded_transactions .into_iter() @@ -999,13 +1001,13 @@ mod tests { rollback_accounts::RollbackAccounts, }, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, + solana_feature_set::FeatureSet, solana_program_runtime::loaded_programs::{BlockRelation, ProgramCacheEntryType}, solana_sdk::{ account::{create_account_shared_data_for_test, WritableAccount}, bpf_loader, compute_budget::ComputeBudgetInstruction, epoch_schedule::EpochSchedule, - feature_set::FeatureSet, fee::FeeDetails, fee_calculator::FeeCalculator, hash::Hash, diff --git a/svm/tests/conformance.rs b/svm/tests/conformance.rs index 5015e1fadd9d0c..dc521bc36eee15 100644 --- a/svm/tests/conformance.rs +++ b/svm/tests/conformance.rs @@ -7,6 +7,7 @@ use { prost::Message, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::{FeatureSet, FEATURE_NAMES}, solana_log_collector::LogCollector, solana_program_runtime::{ invoke_context::{EnvironmentConfig, InvokeContext}, @@ -19,7 +20,6 @@ use { solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable, - feature_set::{FeatureSet, FEATURE_NAMES}, hash::Hash, instruction::AccountMeta, message::SanitizedMessage, diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 6ece85bfcc1385..57f00360e91bfb 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -4,6 +4,7 @@ use { SyscallMemset, SyscallSetReturnData, }, solana_compute_budget::compute_budget::ComputeBudget, + solana_feature_set::FeatureSet, solana_program_runtime::{ invoke_context::InvokeContext, loaded_programs::{ @@ -18,7 +19,6 @@ use { account::{AccountSharedData, ReadableAccount, WritableAccount}, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{Clock, UnixTimestamp}, - feature_set::FeatureSet, native_loader, pubkey::Pubkey, slot_hashes::Slot, diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 1d73cf10c097a9..4fa4330f90be00 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -20,6 +20,7 @@ solana-accounts-db = { workspace = true } solana-cli-output = { workspace = true } solana-compute-budget = { workspace = true } solana-core = { workspace = true } +solana-feature-set = { workspace = true } solana-geyser-plugin-manager = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index a86b5c79d87070..199c61237ed403 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -15,6 +15,7 @@ use { consensus::tower_storage::TowerStorage, validator::{Validator, ValidatorConfig, ValidatorStartProgress}, }, + solana_feature_set::FEATURE_NAMES, solana_geyser_plugin_manager::{ geyser_plugin_manager::GeyserPluginManager, GeyserPluginManagerRequest, }, @@ -43,8 +44,6 @@ use { commitment_config::CommitmentConfig, epoch_schedule::EpochSchedule, exit::Exit, - feature, - feature_set::FEATURE_NAMES, fee_calculator::FeeRateGovernor, instruction::{AccountMeta, Instruction}, message::Message, @@ -444,7 +443,7 @@ impl TestValidatorGenesis { .for_each(|(maybe_account, feature_id)| { if maybe_account .as_ref() - .and_then(feature::from_account) + .and_then(solana_sdk::feature::from_account) .and_then(|feature| feature.activated_at) .is_none() { diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index a03d384d94516c..0addbaf6866f74 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -24,6 +24,7 @@ rand_chacha = { workspace = true } rayon = { workspace = true } rustls = { workspace = true } solana-entry = { workspace = true } +solana-feature-set = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true } solana-measure = { workspace = true } diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 3ee66011e58a9c..be3bca47458f30 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -4,6 +4,7 @@ use { lazy_lru::LruCache, rand::{seq::SliceRandom, Rng, SeedableRng}, rand_chacha::ChaChaRng, + solana_feature_set as feature_set, solana_gossip::{ cluster_info::ClusterInfo, contact_info::{ContactInfo, Protocol}, @@ -16,7 +17,6 @@ use { solana_runtime::bank::Bank, solana_sdk::{ clock::{Epoch, Slot}, - feature_set, genesis_config::ClusterType, native_token::LAMPORTS_PER_SOL, pubkey::Pubkey, diff --git a/turbine/src/sigverify_shreds.rs b/turbine/src/sigverify_shreds.rs index 320b95ac24188e..b1b4530b666e72 100644 --- a/turbine/src/sigverify_shreds.rs +++ b/turbine/src/sigverify_shreds.rs @@ -5,6 +5,7 @@ use { }, crossbeam_channel::{Receiver, RecvTimeoutError, SendError, Sender}, rayon::{prelude::*, ThreadPool, ThreadPoolBuilder}, + solana_feature_set as feature_set, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ leader_schedule_cache::LeaderScheduleCache, @@ -19,7 +20,6 @@ use { }, solana_sdk::{ clock::Slot, - feature_set, pubkey::Pubkey, signature::{Keypair, Signer}, }, diff --git a/version/Cargo.toml b/version/Cargo.toml index e2f26ddfa760e7..8fd935b5ae9b25 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -14,6 +14,7 @@ log = { workspace = true } semver = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } +solana-feature-set = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-sanitize = { workspace = true } diff --git a/version/src/legacy.rs b/version/src/legacy.rs index 32d3dcd245c349..23629b7c247cde 100644 --- a/version/src/legacy.rs +++ b/version/src/legacy.rs @@ -41,11 +41,8 @@ impl From for LegacyVersion2 { impl Default for LegacyVersion2 { fn default() -> Self { - let feature_set = u32::from_le_bytes( - solana_sdk::feature_set::ID.as_ref()[..4] - .try_into() - .unwrap(), - ); + let feature_set = + u32::from_le_bytes(solana_feature_set::ID.as_ref()[..4].try_into().unwrap()); Self { major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(), minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(), diff --git a/version/src/lib.rs b/version/src/lib.rs index 5c6443f80c6d96..2a09817ccfbcd6 100644 --- a/version/src/lib.rs +++ b/version/src/lib.rs @@ -68,11 +68,8 @@ impl From for Version { impl Default for Version { fn default() -> Self { - let feature_set = u32::from_le_bytes( - solana_sdk::feature_set::ID.as_ref()[..4] - .try_into() - .unwrap(), - ); + let feature_set = + u32::from_le_bytes(solana_feature_set::ID.as_ref()[..4].try_into().unwrap()); Self { major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(), minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(), From 9e7637a9b6f20201a746007e9dadfddbc43285dc Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 10 Sep 2024 21:46:48 +0400 Subject: [PATCH 330/529] Extract pubkey crate (#2394) * extract solana-pubkey from solana-program * make curve25519_dalek an optional dep * make sha2 optional in solana-pubkey * move pubkey wasm code to solana-pubkey * make serde optional in solana-pubkey * make bytemuck optional in solana-pubkey * put syscalls behind target_os = "solana" * move rand to dev deps * remove thiserror * remove num_derive * make std optional * use std when target_arch = "wasm32" * fix frozen-abi support * update digests * update digests * update nits.sh * update lock file * make some doc examples text-only because order-crates-for-publishing.py is wrong * add dev-context-only-utils to appease ci * fmt * fix unused import when target_os = "solana" * fix imports in wasm target * fix import issue * activate std feat when borsh feat is activated * fix a conditional import * fix more feature issues * add default-features = false (otherwise we can't disable default features anywhere in the workspace) * activate std feature explicitly * clean up imports * fix test features * fix lints * post-rebase fixes * make FromStr, Display and Debug no_std * fmt * update digest * fix duplicate line post-rebase * stop avoiding circular dev dep since CI now accommodates this * make rustc_version optional * fix doc link * fix frozen-abi deps * update digests * fmt * don't require std for FromStr * simplify some imports * use as_ref instead of to_bytes Co-authored-by: Jon C * use as_ref instead of to_bytes Co-authored-by: Jon C * remove unnecessary test criterion in #[cfg()] Co-authored-by: Jon C * remove unnecessary test criterion in #[cfg()] Co-authored-by: Jon C * remove unrelated change * but don't remove #[cfg(test)] * call out doc tests in explanation of circular dev dep * add missing conversion * remove unnecessary #[cfg(feature = "std")] * use strum to check that FromPrimitive impls are exhaustive * sort deps * add test for From --------- Co-authored-by: Jon C --- Cargo.lock | 34 ++ Cargo.toml | 2 + ci/nits.sh | 1 + core/src/consensus.rs | 2 +- core/src/consensus/tower1_14_11.rs | 2 +- core/src/consensus/tower1_7_14.rs | 2 +- core/src/repair/serve_repair.rs | 6 +- gossip/src/cluster_info.rs | 2 +- programs/sbf/Cargo.lock | 24 + runtime/src/bank/serde_snapshot.rs | 2 +- sdk/program/Cargo.toml | 5 +- sdk/program/src/lib.rs | 6 +- sdk/program/src/message/legacy.rs | 10 +- sdk/program/src/message/versions/mod.rs | 2 +- sdk/program/src/syscalls/definitions.rs | 7 +- sdk/program/src/system_instruction.rs | 2 +- sdk/program/src/vote/state/mod.rs | 2 +- .../src/vote/state/vote_state_1_14_11.rs | 2 +- sdk/program/src/wasm/mod.rs | 1 - sdk/program/src/wasm/pubkey.rs | 123 ------ sdk/pubkey/Cargo.toml | 80 ++++ sdk/pubkey/build.rs | 1 + .../src/pubkey.rs => pubkey/src/lib.rs} | 418 +++++++++++++++--- sdk/pubkey/src/syscalls.rs | 6 + sdk/src/account.rs | 4 +- sdk/src/genesis_config.rs | 2 +- sdk/src/transaction/mod.rs | 4 +- 27 files changed, 541 insertions(+), 211 deletions(-) delete mode 100644 sdk/program/src/wasm/pubkey.rs create mode 100644 sdk/pubkey/Cargo.toml create mode 120000 sdk/pubkey/build.rs rename sdk/{program/src/pubkey.rs => pubkey/src/lib.rs} (71%) create mode 100644 sdk/pubkey/src/syscalls.rs diff --git a/Cargo.lock b/Cargo.lock index 18bae8a7e97ac0..83992bc32a7586 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7082,6 +7082,7 @@ dependencies = [ "solana-msg", "solana-program-memory", "solana-program-option", + "solana-pubkey", "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", @@ -7175,6 +7176,39 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-pubkey" +version = "2.1.0" +dependencies = [ + "anyhow", + "arbitrary", + "borsh 0.10.3", + "borsh 1.5.1", + "bs58", + "bytemuck", + "bytemuck_derive", + "curve25519-dalek 4.1.3", + "getrandom 0.2.10", + "js-sys", + "num-traits", + "rand 0.8.5", + "rustc_version 0.4.1", + "serde", + "serde_derive", + "solana-atomic-u64", + "solana-decode-error", + "solana-define-syscall", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-program", + "solana-pubkey", + "solana-sanitize", + "solana-sha256-hasher", + "strum", + "strum_macros", + "wasm-bindgen", +] + [[package]] name = "solana-pubsub-client" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index a31088169447ba..c0e93451fbaa6e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,6 +116,7 @@ members = [ "sdk/program", "sdk/program-memory", "sdk/program-option", + "sdk/pubkey", "sdk/serde-varint", "sdk/sha256-hasher", "sdk/signature", @@ -423,6 +424,7 @@ solana-program-memory = { path = "sdk/program-memory", version = "=2.1.0" } solana-program-option = { path = "sdk/program-option", version = "=2.1.0" } solana-program-runtime = { path = "program-runtime", version = "=2.1.0" } solana-program-test = { path = "program-test", version = "=2.1.0" } +solana-pubkey = { path = "sdk/pubkey", version = "=2.1.0", default-features = false } solana-pubsub-client = { path = "pubsub-client", version = "=2.1.0" } solana-quic-client = { path = "quic-client", version = "=2.1.0" } solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.1.0" } diff --git a/ci/nits.sh b/ci/nits.sh index 764881a475dcad..963315cf7a2b17 100755 --- a/ci/nits.sh +++ b/ci/nits.sh @@ -31,6 +31,7 @@ declare print_free_tree=( ':^sdk/msg/src/lib.rs' ':^sdk/program-option/src/lib.rs' ':^sdk/program/src/program_stubs.rs' + ':^sdk/pubkey/src/lib.rs' ':programs/**.rs' ':^**bin**.rs' ':^**bench**.rs' diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 417e29254c2d42..cb4beca4ae80ed 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -239,7 +239,7 @@ pub(crate) enum BlockhashStatus { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "5BUswzvu7Qe44HbR4eBwPX4Jn9GSfhjmg8eijnBjoKUd") + frozen_abi(digest = "9ziHa1vA7WG5RCvXiE3g1f2qjSTNa47FB7e2czo7en7a") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower { diff --git a/core/src/consensus/tower1_14_11.rs b/core/src/consensus/tower1_14_11.rs index 7dfcd0bd340fac..2c90bd1fd62926 100644 --- a/core/src/consensus/tower1_14_11.rs +++ b/core/src/consensus/tower1_14_11.rs @@ -9,7 +9,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "9P6J8ZtVLR5zbUxWT83q1iUsJMH6B7SwcomSqcoomPmg") + frozen_abi(digest = "6VhLW7DSHNzrcswtxbNo4cb47oGrKLcKuDmCWVpUMLLM") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower1_14_11 { diff --git a/core/src/consensus/tower1_7_14.rs b/core/src/consensus/tower1_7_14.rs index ff7f7024eba77f..fb87a475c520b0 100644 --- a/core/src/consensus/tower1_7_14.rs +++ b/core/src/consensus/tower1_7_14.rs @@ -11,7 +11,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "DJVvkk4EFFCbA37vsKcFPGuwEULh2wEvMUESsTyvABzU") + frozen_abi(digest = "5bwSGBqA1BVmgNtnTenfYtEt123cciEzpfqt6bUX1dJo") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower1_7_14 { diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 9c3a53b2ad053a..a1887cf689bb09 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -143,7 +143,7 @@ impl AncestorHashesRepairType { #[cfg_attr( feature = "frozen-abi", derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "H7S44V9G9cjKeZdtSNZnRivsMrKaThkazF3k3c63TxP4") + frozen_abi(digest = "9SdneX58ekpqLJBzUwfwJsK2fZc9mN4vTcaS4temEjkP") )] #[derive(Debug, Deserialize, Serialize)] pub enum AncestorHashesResponse { @@ -224,7 +224,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; REPAIR_PING_TOKEN_SIZE]>; #[cfg_attr( feature = "frozen-abi", derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "CYguF3KopGoM48XFJJS9pw9Z4TDZ2eUTqPPqbm3L4mFr") + frozen_abi(digest = "3E2R8jiSt9QfVHdX3MgW3UdeNWfor7zNjJcLJLz2K1JY") )] #[derive(Debug, Deserialize, Serialize)] pub enum RepairProtocol { @@ -272,7 +272,7 @@ fn discard_malformed_repair_requests( #[cfg_attr( feature = "frozen-abi", derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "8TYqNDnUGbE5duZgbCJAyZ2nZDSx39ueYo9PLLZCsiVy") + frozen_abi(digest = "CpKVYghdpMDRMiGjZpa71dcnB7rCVHLVogZbB3AGDKAK") )] #[derive(Debug, Deserialize, Serialize)] pub(crate) enum RepairResponse { diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index a26ff3a3560e2e..f2e93765560ece 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -311,7 +311,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; #[cfg_attr( feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor), - frozen_abi(digest = "ANgFTZHXSMbjYEuvf9YphECo47tWWrqKdPDD6B9D1YGB") + frozen_abi(digest = "GfVFxfPfYcFLCaa29uxQxyKJAuTZ1cYqcRKhVrEKwDK7") )] #[derive(Serialize, Deserialize, Debug)] #[allow(clippy::large_enum_variant)] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ae119446dad773..7ec3d180fb55c9 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5471,6 +5471,7 @@ dependencies = [ "solana-msg", "solana-program-memory", "solana-program-option", + "solana-pubkey", "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", @@ -5554,6 +5555,29 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-pubkey" +version = "2.1.0" +dependencies = [ + "borsh 0.10.3", + "borsh 1.5.1", + "bs58", + "bytemuck", + "bytemuck_derive", + "curve25519-dalek 4.1.3", + "getrandom 0.2.10", + "js-sys", + "num-traits", + "serde", + "serde_derive", + "solana-atomic-u64", + "solana-decode-error", + "solana-define-syscall", + "solana-sanitize", + "solana-sha256-hasher", + "wasm-bindgen", +] + [[package]] name = "solana-pubsub-client" version = "2.1.0" diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index c628fe7c0e1360..69de472175978e 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -535,7 +535,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "FuFBQtx7rGruVC3cyh4zvZ3uN4RUtBiwh1pXJRwUCcoS") + frozen_abi(digest = "6d4H7gw1hSrspdTew8dAXZ5dZT1mwFc6VZdXnkuggJ8E") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 3b8eeba0d567fc..1a25c1fde1352d 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -44,6 +44,7 @@ solana-hash = { workspace = true, features = [ solana-msg = { workspace = true } solana-program-memory = { workspace = true } solana-program-option = { workspace = true } +solana-pubkey = { workspace = true, features = ["bytemuck", "curve25519", "serde", "std"] } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } @@ -91,6 +92,7 @@ assert_matches = { workspace = true } itertools = { workspace = true } serde_json = { workspace = true } serial_test = { workspace = true } +solana-pubkey = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } test-case = { workspace = true } @@ -105,13 +107,14 @@ crate-type = ["cdylib", "rlib"] [features] default = ["borsh"] -borsh = ["dep:borsh", "dep:borsh0-10", "solana-hash/borsh"] +borsh = ["dep:borsh", "dep:borsh0-10", "solana-hash/borsh", "solana-pubkey/borsh"] dev-context-only-utils = ["dep:qualifier_attr"] frozen-abi = [ "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-hash/frozen-abi", + "solana-pubkey/frozen-abi", "solana-short-vec/frozen-abi" ] diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index f1ab379034df89..7fb630240861de 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -511,7 +511,6 @@ pub mod program_error; pub mod program_pack; pub mod program_stubs; pub mod program_utils; -pub mod pubkey; pub mod rent; pub mod secp256k1_program; pub mod serialize_utils; @@ -539,7 +538,10 @@ pub use solana_serde_varint as serde_varint; pub use solana_short_vec as short_vec; #[cfg(target_arch = "wasm32")] pub use wasm_bindgen::prelude::wasm_bindgen; -pub use {solana_clock as clock, solana_msg::msg, solana_program_option as program_option}; +pub use { + solana_clock as clock, solana_msg::msg, solana_program_option as program_option, + solana_pubkey as pubkey, +}; /// The [config native program][np]. /// diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index 68d5dfb2e25588..f225e5a646b654 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -63,8 +63,8 @@ mod builtins { // the value is "false"), or might be in one of these lists (if the value is "true") pub static ref MAYBE_BUILTIN_KEY_OR_SYSVAR: [bool; 256] = { let mut temp_table: [bool; 256] = [false; 256]; - BUILTIN_PROGRAMS_KEYS.iter().for_each(|key| temp_table[key.0[0] as usize] = true); - sysvar::ALL_IDS.iter().for_each(|key| temp_table[key.0[0] as usize] = true); + BUILTIN_PROGRAMS_KEYS.iter().for_each(|key| temp_table[key.as_ref()[0] as usize] = true); + sysvar::ALL_IDS.iter().for_each(|key| temp_table[key.as_ref()[0] as usize] = true); temp_table }; } @@ -76,7 +76,7 @@ mod builtins { )] #[allow(deprecated)] pub fn is_builtin_key_or_sysvar(key: &Pubkey) -> bool { - if MAYBE_BUILTIN_KEY_OR_SYSVAR[key.0[0] as usize] { + if MAYBE_BUILTIN_KEY_OR_SYSVAR[key.as_ref()[0] as usize] { return sysvar::is_sysvar_id(key) || BUILTIN_PROGRAMS_KEYS.contains(key); } false @@ -123,7 +123,7 @@ fn compile_instructions(ixs: &[Instruction], keys: &[Pubkey]) -> Vec u64); -define_syscall!(fn sol_try_find_program_address(seeds_addr: *const u8, seeds_len: u64, program_id_addr: *const u8, address_bytes_addr: *const u8, bump_seed_addr: *const u8) -> u64); define_syscall!(fn sol_keccak256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); define_syscall!(fn sol_blake3(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); define_syscall!(fn sol_invoke_signed_c(instruction_addr: *const u8, account_infos_addr: *const u8, account_infos_len: u64, signers_seeds_addr: *const u8, signers_seeds_len: u64) -> u64); diff --git a/sdk/program/src/system_instruction.rs b/sdk/program/src/system_instruction.rs index a1c301b85cdab9..bd0fcb0b37e0a6 100644 --- a/sdk/program/src/system_instruction.rs +++ b/sdk/program/src/system_instruction.rs @@ -103,7 +103,7 @@ static_assertions::const_assert_eq!(MAX_PERMITTED_DATA_LENGTH, 10_485_760); /// An instruction to the system program. #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "5e22s2kFu9Do77hdcCyxyhuKHD8ThAB6Q6dNaLTCjL5M"), + frozen_abi(digest = "2LnVTnJg7LxB1FawNZLoQEY8yiYx3MT3paTdx4s5kAXU"), derive(AbiExample, AbiEnumVisitor) )] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 40f3268fcfd3d4..73a6221eadf354 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -406,7 +406,7 @@ impl CircBuf { #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "EeenjJaSrm9hRM39gK6raRNtzG61hnk7GciUCJJRDUSQ"), + frozen_abi(digest = "87ULMjjHnMsPmCTEyzj4KPn2u5gdX1rmgtSdycpbSaLs"), derive(AbiExample) )] #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone)] diff --git a/sdk/program/src/vote/state/vote_state_1_14_11.rs b/sdk/program/src/vote/state/vote_state_1_14_11.rs index 285272a3ab646f..2e0877ab3924eb 100644 --- a/sdk/program/src/vote/state/vote_state_1_14_11.rs +++ b/sdk/program/src/vote/state/vote_state_1_14_11.rs @@ -7,7 +7,7 @@ const DEFAULT_PRIOR_VOTERS_OFFSET: usize = 82; #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "CZTgLymuevXjAx6tM8X8T5J3MCx9AkEsFSmu4FJrEpkG"), + frozen_abi(digest = "64duaG8iUgwmgMM9y1Pdi8S9jBGoPcjS5HrE8RRfsxJJ"), derive(AbiExample) )] #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone)] diff --git a/sdk/program/src/wasm/mod.rs b/sdk/program/src/wasm/mod.rs index c390efed559ab0..19a18d76dc3bd8 100644 --- a/sdk/program/src/wasm/mod.rs +++ b/sdk/program/src/wasm/mod.rs @@ -3,7 +3,6 @@ use wasm_bindgen::prelude::*; pub mod instructions; -pub mod pubkey; pub mod system_instruction; /// Initialize Javascript logging and panic handler diff --git a/sdk/program/src/wasm/pubkey.rs b/sdk/program/src/wasm/pubkey.rs deleted file mode 100644 index 5f8733b88a6eac..00000000000000 --- a/sdk/program/src/wasm/pubkey.rs +++ /dev/null @@ -1,123 +0,0 @@ -//! `Pubkey` Javascript interface -#![cfg(target_arch = "wasm32")] -#![allow(non_snake_case)] -use { - crate::{pubkey::*, wasm::display_to_jsvalue}, - js_sys::{Array, Uint8Array}, - wasm_bindgen::{prelude::*, JsCast}, -}; - -fn js_value_to_seeds_vec(array_of_uint8_arrays: &[JsValue]) -> Result>, JsValue> { - let vec_vec_u8 = array_of_uint8_arrays - .iter() - .filter_map(|u8_array| { - u8_array - .dyn_ref::() - .map(|u8_array| u8_array.to_vec()) - }) - .collect::>(); - - if vec_vec_u8.len() != array_of_uint8_arrays.len() { - Err("Invalid Array of Uint8Arrays".into()) - } else { - Ok(vec_vec_u8) - } -} - -#[wasm_bindgen] -impl Pubkey { - /// Create a new Pubkey object - /// - /// * `value` - optional public key as a base58 encoded string, `Uint8Array`, `[number]` - #[wasm_bindgen(constructor)] - pub fn constructor(value: JsValue) -> Result { - if let Some(base58_str) = value.as_string() { - base58_str.parse::().map_err(display_to_jsvalue) - } else if let Some(uint8_array) = value.dyn_ref::() { - Pubkey::try_from(uint8_array.to_vec()) - .map_err(|err| JsValue::from(format!("Invalid Uint8Array pubkey: {err:?}"))) - } else if let Some(array) = value.dyn_ref::() { - let mut bytes = vec![]; - let iterator = js_sys::try_iter(&array.values())?.expect("array to be iterable"); - for x in iterator { - let x = x?; - - if let Some(n) = x.as_f64() { - if n >= 0. && n <= 255. { - bytes.push(n as u8); - continue; - } - } - return Err(format!("Invalid array argument: {:?}", x).into()); - } - Pubkey::try_from(bytes) - .map_err(|err| JsValue::from(format!("Invalid Array pubkey: {err:?}"))) - } else if value.is_undefined() { - Ok(Pubkey::default()) - } else { - Err("Unsupported argument".into()) - } - } - - /// Return the base58 string representation of the public key - pub fn toString(&self) -> String { - self.to_string() - } - - /// Check if a `Pubkey` is on the ed25519 curve. - pub fn isOnCurve(&self) -> bool { - self.is_on_curve() - } - - /// Checks if two `Pubkey`s are equal - pub fn equals(&self, other: &Pubkey) -> bool { - self == other - } - - /// Return the `Uint8Array` representation of the public key - pub fn toBytes(&self) -> Box<[u8]> { - self.0.clone().into() - } - - /// Derive a Pubkey from another Pubkey, string seed, and a program id - pub fn createWithSeed(base: &Pubkey, seed: &str, owner: &Pubkey) -> Result { - Pubkey::create_with_seed(base, seed, owner).map_err(display_to_jsvalue) - } - - /// Derive a program address from seeds and a program id - pub fn createProgramAddress( - seeds: Box<[JsValue]>, - program_id: &Pubkey, - ) -> Result { - let seeds_vec = js_value_to_seeds_vec(&seeds)?; - let seeds_slice = seeds_vec - .iter() - .map(|seed| seed.as_slice()) - .collect::>(); - - Pubkey::create_program_address(seeds_slice.as_slice(), program_id) - .map_err(display_to_jsvalue) - } - - /// Find a valid program address - /// - /// Returns: - /// * `[PubKey, number]` - the program address and bump seed - pub fn findProgramAddress( - seeds: Box<[JsValue]>, - program_id: &Pubkey, - ) -> Result { - let seeds_vec = js_value_to_seeds_vec(&seeds)?; - let seeds_slice = seeds_vec - .iter() - .map(|seed| seed.as_slice()) - .collect::>(); - - let (address, bump_seed) = Pubkey::find_program_address(seeds_slice.as_slice(), program_id); - - let result = Array::new_with_length(2); - result.set(0, address.into()); - result.set(1, bump_seed.into()); - Ok(result.into()) - } -} diff --git a/sdk/pubkey/Cargo.toml b/sdk/pubkey/Cargo.toml new file mode 100644 index 00000000000000..b7c9434e7e9ca9 --- /dev/null +++ b/sdk/pubkey/Cargo.toml @@ -0,0 +1,80 @@ +[package] +name = "solana-pubkey" +description = "Solana account addresses" +documentation = "https://docs.rs/solana-pubkey" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +arbitrary = { workspace = true, features = ["derive"], optional = true } +borsh = { workspace = true, optional = true } +borsh0-10 = { package = "borsh", version = "0.10.3", optional = true } +bs58 = { workspace = true } +bytemuck = { workspace = true, optional = true } +bytemuck_derive = { workspace = true, optional = true } +num-traits = { workspace = true } +serde = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-atomic-u64 = { workspace = true } +solana-decode-error = { workspace = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } +solana-sanitize = { workspace = true } + +[target.'cfg(target_os = "solana")'.dependencies] +solana-define-syscall = { workspace = true } +solana-sha256-hasher = { workspace = true } + +[target.'cfg(not(target_os = "solana"))'.dependencies] +curve25519-dalek = { workspace = true, optional = true } +solana-sha256-hasher = { workspace = true, optional = true } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +getrandom = { workspace = true, features = ["js", "wasm-bindgen"] } +js-sys = { workspace = true } +wasm-bindgen = { workspace = true } + +[dev-dependencies] +anyhow = { workspace = true } +arbitrary = { workspace = true, features = ["derive"] } +bs58 = { workspace = true, features = ["alloc"] } +rand = { workspace = true } +# circular dev deps need to be path deps for `cargo publish` to be happy, +# and for now the doc tests need solana-program +solana-program = { path = "../program" } +solana-pubkey = { path = ".", features = [ + "borsh", + "curve25519", + "dev-context-only-utils", + "std", +] } +strum = { workspace = true } +strum_macros = { workspace = true } + +[build-dependencies] +rustc_version = { workspace = true, optional = true } + +[features] +borsh = ["dep:borsh", "dep:borsh0-10", "std"] +bytemuck = ["dep:bytemuck", "dep:bytemuck_derive"] +curve25519 = ["dep:curve25519-dalek", "sha2"] +default = ["std"] +dev-context-only-utils = ["dep:arbitrary", "std"] +frozen-abi = [ + "dep:rustc_version", + "dep:solana-frozen-abi", + "dep:solana-frozen-abi-macro" +] +serde = ["dep:serde", "dep:serde_derive"] +sha2 = ["dep:solana-sha256-hasher", "solana-sha256-hasher/sha2"] +std = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/sdk/pubkey/build.rs b/sdk/pubkey/build.rs new file mode 120000 index 00000000000000..84539eddaa6ded --- /dev/null +++ b/sdk/pubkey/build.rs @@ -0,0 +1 @@ +../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/program/src/pubkey.rs b/sdk/pubkey/src/lib.rs similarity index 71% rename from sdk/program/src/pubkey.rs rename to sdk/pubkey/src/lib.rs index 1c1dcaed092544..82a9753df8f6da 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/pubkey/src/lib.rs @@ -1,25 +1,40 @@ //! Solana account addresses. - +#![no_std] +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] -#[cfg(target_arch = "wasm32")] -use crate::wasm_bindgen; -#[cfg(test)] +#[cfg(any(feature = "std", target_arch = "wasm32"))] +extern crate std; +#[cfg(feature = "dev-context-only-utils")] use arbitrary::Arbitrary; +#[cfg(feature = "bytemuck")] +use bytemuck_derive::{Pod, Zeroable}; +#[cfg(feature = "serde")] +use serde_derive::{Deserialize, Serialize}; +#[cfg(any(feature = "std", target_arch = "wasm32"))] +use std::vec::Vec; #[cfg(feature = "borsh")] -use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use { - crate::hash::hashv, - bytemuck_derive::{Pod, Zeroable}, - num_derive::{FromPrimitive, ToPrimitive}, - solana_decode_error::DecodeError, - std::{ + borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, + std::string::ToString, +}; +use { + core::{ + array, convert::{Infallible, TryFrom}, fmt, mem, - str::FromStr, + str::{from_utf8, FromStr}, }, - thiserror::Error, + num_traits::{FromPrimitive, ToPrimitive}, + solana_decode_error::DecodeError, }; +#[cfg(target_arch = "wasm32")] +use { + js_sys::{Array, Uint8Array}, + wasm_bindgen::{prelude::wasm_bindgen, JsCast, JsValue}, +}; +#[cfg(target_os = "solana")] +pub mod syscalls; /// Number of bytes in a pubkey pub const PUBKEY_BYTES: usize = 32; @@ -30,18 +45,77 @@ pub const MAX_SEEDS: usize = 16; /// Maximum string length of a base58 encoded pubkey const MAX_BASE58_LEN: usize = 44; +#[cfg(any(target_os = "solana", feature = "sha2", feature = "curve25519"))] const PDA_MARKER: &[u8; 21] = b"ProgramDerivedAddress"; -#[derive(Error, Debug, Serialize, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)] +/// Copied from `solana_program::entrypoint::SUCCESS` +/// to avoid a `solana_program` dependency +#[cfg(target_os = "solana")] +const SUCCESS: u64 = 0; + +// Use strum when testing to ensure our FromPrimitive +// impl is exhaustive +#[cfg_attr(test, derive(strum_macros::FromRepr, strum_macros::EnumIter))] +#[cfg_attr(feature = "serde", derive(Serialize))] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum PubkeyError { /// Length of the seed is too long for address generation - #[error("Length of the seed is too long for address generation")] MaxSeedLengthExceeded, - #[error("Provided seeds do not result in a valid address")] InvalidSeeds, - #[error("Provided owner is not allowed")] IllegalOwner, } + +impl ToPrimitive for PubkeyError { + #[inline] + fn to_i64(&self) -> Option { + Some(match *self { + PubkeyError::MaxSeedLengthExceeded => PubkeyError::MaxSeedLengthExceeded as i64, + PubkeyError::InvalidSeeds => PubkeyError::InvalidSeeds as i64, + PubkeyError::IllegalOwner => PubkeyError::IllegalOwner as i64, + }) + } + #[inline] + fn to_u64(&self) -> Option { + self.to_i64().map(|x| x as u64) + } +} + +impl FromPrimitive for PubkeyError { + #[inline] + fn from_i64(n: i64) -> Option { + if n == PubkeyError::MaxSeedLengthExceeded as i64 { + Some(PubkeyError::MaxSeedLengthExceeded) + } else if n == PubkeyError::InvalidSeeds as i64 { + Some(PubkeyError::InvalidSeeds) + } else if n == PubkeyError::IllegalOwner as i64 { + Some(PubkeyError::IllegalOwner) + } else { + None + } + } + #[inline] + fn from_u64(n: u64) -> Option { + Self::from_i64(n as i64) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for PubkeyError {} + +impl fmt::Display for PubkeyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + PubkeyError::MaxSeedLengthExceeded => { + f.write_str("Length of the seed is too long for address generation") + } + PubkeyError::InvalidSeeds => { + f.write_str("Provided seeds do not result in a valid address") + } + PubkeyError::IllegalOwner => f.write_str("Provided owner is not allowed"), + } + } +} + impl DecodeError for PubkeyError { fn type_of() -> &'static str { "PubkeyError" @@ -52,6 +126,7 @@ impl From for PubkeyError { match error { 0 => PubkeyError::MaxSeedLengthExceeded, 1 => PubkeyError::InvalidSeeds, + 2 => PubkeyError::IllegalOwner, _ => panic!("Unsupported PubkeyError"), } } @@ -73,39 +148,74 @@ impl From for PubkeyError { /// [`Keypair`]: https://docs.rs/solana-sdk/latest/solana_sdk/signer/keypair/struct.Keypair.html #[cfg_attr(target_arch = "wasm32", wasm_bindgen)] #[repr(transparent)] -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[cfg_attr(feature = "frozen-abi", derive(solana_frozen_abi_macro::AbiExample))] #[cfg_attr( feature = "borsh", - derive(BorshSerialize, BorshDeserialize, BorshSchema), + derive(BorshSerialize, BorshDeserialize), borsh(crate = "borsh") )] -#[derive( - Clone, - Copy, - Default, - Deserialize, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - Pod, - Serialize, - Zeroable, -)] -#[cfg_attr(test, derive(Arbitrary))] +#[cfg_attr(all(feature = "borsh", feature = "std"), derive(BorshSchema))] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "bytemuck", derive(Pod, Zeroable))] +#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "dev-context-only-utils", derive(Arbitrary))] pub struct Pubkey(pub(crate) [u8; 32]); impl solana_sanitize::Sanitize for Pubkey {} -#[derive(Error, Debug, Serialize, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)] +// Use strum when testing to ensure our FromPrimitive +// impl is exhaustive +#[cfg_attr(test, derive(strum_macros::FromRepr, strum_macros::EnumIter))] +#[cfg_attr(feature = "serde", derive(Serialize))] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum ParsePubkeyError { - #[error("String is the wrong size")] WrongSize, - #[error("Invalid Base58 string")] Invalid, } +impl ToPrimitive for ParsePubkeyError { + #[inline] + fn to_i64(&self) -> Option { + Some(match *self { + ParsePubkeyError::WrongSize => ParsePubkeyError::WrongSize as i64, + ParsePubkeyError::Invalid => ParsePubkeyError::Invalid as i64, + }) + } + #[inline] + fn to_u64(&self) -> Option { + self.to_i64().map(|x| x as u64) + } +} + +impl FromPrimitive for ParsePubkeyError { + #[inline] + fn from_i64(n: i64) -> Option { + if n == ParsePubkeyError::WrongSize as i64 { + Some(ParsePubkeyError::WrongSize) + } else if n == ParsePubkeyError::Invalid as i64 { + Some(ParsePubkeyError::Invalid) + } else { + None + } + } + #[inline] + fn from_u64(n: u64) -> Option { + Self::from_i64(n as i64) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for ParsePubkeyError {} + +impl fmt::Display for ParsePubkeyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ParsePubkeyError::WrongSize => f.write_str("String is the wrong size"), + ParsePubkeyError::Invalid => f.write_str("Invalid Base58 string"), + } + } +} + impl From for ParsePubkeyError { fn from(_: Infallible) -> Self { unreachable!("Infallible uninhabited"); @@ -125,13 +235,14 @@ impl FromStr for Pubkey { if s.len() > MAX_BASE58_LEN { return Err(ParsePubkeyError::WrongSize); } - let pubkey_vec = bs58::decode(s) - .into_vec() + let mut bytes = [0; PUBKEY_BYTES]; + let decoded_size = bs58::decode(s) + .onto(&mut bytes) .map_err(|_| ParsePubkeyError::Invalid)?; - if pubkey_vec.len() != mem::size_of::() { + if decoded_size != mem::size_of::() { Err(ParsePubkeyError::WrongSize) } else { - Pubkey::try_from(pubkey_vec).map_err(|_| ParsePubkeyError::Invalid) + Ok(Pubkey(bytes)) } } } @@ -144,7 +255,7 @@ impl From<[u8; 32]> for Pubkey { } impl TryFrom<&[u8]> for Pubkey { - type Error = std::array::TryFromSliceError; + type Error = array::TryFromSliceError; #[inline] fn try_from(pubkey: &[u8]) -> Result { @@ -152,6 +263,7 @@ impl TryFrom<&[u8]> for Pubkey { } } +#[cfg(any(feature = "std", target_arch = "wasm32"))] impl TryFrom> for Pubkey { type Error = Vec; @@ -168,6 +280,10 @@ impl TryFrom<&str> for Pubkey { } } +// If target_os = "solana", then this panics so there are no dependencies. +// When target_os != "solana", this should be opt-in so users +// don't need the curve25519 dependency. +#[cfg(any(target_os = "solana", feature = "curve25519"))] #[allow(clippy::used_underscore_binding)] pub fn bytes_are_curve_point>(_bytes: T) -> bool { #[cfg(not(target_os = "solana"))] @@ -201,6 +317,11 @@ impl Pubkey { Self::from(b) } + // If target_os = "solana", then the solana_sha256_hasher crate will use + // syscalls which bring no dependencies. + // When target_os != "solana", this should be opt-in so users + // don't need the sha2 dependency. + #[cfg(any(target_os = "solana", feature = "sha2"))] pub fn create_with_seed( base: &Pubkey, seed: &str, @@ -217,7 +338,7 @@ impl Pubkey { return Err(PubkeyError::IllegalOwner); } } - let hash = hashv(&[base.as_ref(), seed.as_ref(), owner]); + let hash = solana_sha256_hasher::hashv(&[base.as_ref(), seed.as_ref(), owner]); Ok(Pubkey::from(hash.to_bytes())) } @@ -236,7 +357,7 @@ impl Pubkey { /// program associated with this address is the caller and thus authorized /// to be the signer. /// - /// [`invoke_signed`]: crate::program::invoke_signed + /// [`invoke_signed`]: https://docs.rs/solana-program/latest/solana_program/program/fn.invoke_signed.html /// /// The `seeds` are application-specific, and must be carefully selected to /// uniquely derive accounts per application requirements. It is common to @@ -310,8 +431,8 @@ impl Pubkey { /// /// ``` /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # use solana_pubkey::Pubkey; /// # use solana_program::{ - /// # pubkey::Pubkey, /// # entrypoint::ProgramResult, /// # program::invoke_signed, /// # system_instruction, @@ -391,8 +512,8 @@ impl Pubkey { /// ``` /// # use borsh::{BorshSerialize, BorshDeserialize}; /// # use solana_program::example_mocks::{solana_sdk, solana_rpc_client}; + /// # use solana_pubkey::Pubkey; /// # use solana_program::{ - /// # pubkey::Pubkey, /// # instruction::Instruction, /// # hash::Hash, /// # instruction::AccountMeta, @@ -474,6 +595,11 @@ impl Pubkey { /// # /// # Ok::<(), anyhow::Error>(()) /// ``` + // If target_os = "solana", then the function will use + // syscalls which bring no dependencies. + // When target_os != "solana", this should be opt-in so users + // don't need the curve25519 dependency. + #[cfg(any(target_os = "solana", feature = "curve25519"))] pub fn find_program_address(seeds: &[&[u8]], program_id: &Pubkey) -> (Pubkey, u8) { Self::try_find_program_address(seeds, program_id) .unwrap_or_else(|| panic!("Unable to find a viable program address bump seed")) @@ -491,6 +617,11 @@ impl Pubkey { /// See the documentation for [`find_program_address`] for a full description. /// /// [`find_program_address`]: Pubkey::find_program_address + // If target_os = "solana", then the function will use + // syscalls which bring no dependencies. + // When target_os != "solana", this should be opt-in so users + // don't need the curve25519 dependency. + #[cfg(any(target_os = "solana", feature = "curve25519"))] #[allow(clippy::same_item_push)] pub fn try_find_program_address(seeds: &[&[u8]], program_id: &Pubkey) -> Option<(Pubkey, u8)> { // Perform the calculation inline, calling this from within a program is @@ -527,7 +658,7 @@ impl Pubkey { ) }; match result { - crate::entrypoint::SUCCESS => Some((Pubkey::from(bytes), bump_seed)), + SUCCESS => Some((Pubkey::from(bytes), bump_seed)), _ => None, } } @@ -568,13 +699,18 @@ impl Pubkey { /// that the returned `Pubkey` has the expected value. /// /// ``` - /// # use solana_program::pubkey::Pubkey; + /// # use solana_pubkey::Pubkey; /// # let program_id = Pubkey::new_unique(); /// let (expected_pda, bump_seed) = Pubkey::find_program_address(&[b"vault"], &program_id); /// let actual_pda = Pubkey::create_program_address(&[b"vault", &[bump_seed]], &program_id)?; /// assert_eq!(expected_pda, actual_pda); /// # Ok::<(), anyhow::Error>(()) /// ``` + // If target_os = "solana", then the function will use + // syscalls which bring no dependencies. + // When target_os != "solana", this should be opt-in so users + // don't need the curve225519 dep. + #[cfg(any(target_os = "solana", feature = "curve25519"))] pub fn create_program_address( seeds: &[&[u8]], program_id: &Pubkey, @@ -592,7 +728,7 @@ impl Pubkey { // not supported #[cfg(not(target_os = "solana"))] { - let mut hasher = crate::hash::Hasher::default(); + let mut hasher = solana_sha256_hasher::Hasher::default(); for seed in seeds.iter() { hasher.hash(seed); } @@ -618,7 +754,7 @@ impl Pubkey { ) }; match result { - crate::entrypoint::SUCCESS => Ok(Pubkey::from(bytes)), + SUCCESS => Ok(Pubkey::from(bytes)), _ => Err(result.into()), } } @@ -628,6 +764,10 @@ impl Pubkey { self.0 } + // If target_os = "solana", then this panics so there are no dependencies. + // When target_os != "solana", this should be opt-in so users + // don't need the curve25519 dependency. + #[cfg(any(target_os = "solana", feature = "curve25519"))] pub fn is_on_curve(&self) -> bool { bytes_are_curve_point(self) } @@ -639,8 +779,8 @@ impl Pubkey { crate::syscalls::sol_log_pubkey(self.as_ref() as *const _ as *const u8) }; - #[cfg(not(target_os = "solana"))] - crate::program_stubs::sol_log(&self.to_string()); + #[cfg(all(not(target_os = "solana"), feature = "std"))] + std::println!("{}", std::string::ToString::to_string(&self)); } } @@ -656,15 +796,25 @@ impl AsMut<[u8]> for Pubkey { } } +fn write_as_base58(f: &mut fmt::Formatter, p: &Pubkey) -> fmt::Result { + let mut out = [0u8; MAX_BASE58_LEN]; + let out_slice: &mut [u8] = &mut out; + // This will never fail because the only possible error is BufferTooSmall, + // and we will never call it with too small a buffer. + let len = bs58::encode(p.0).onto(out_slice).unwrap(); + let as_str = from_utf8(&out[..len]).unwrap(); + f.write_str(as_str) +} + impl fmt::Debug for Pubkey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", bs58::encode(self.0).into_string()) + write_as_base58(f, self) } } impl fmt::Display for Pubkey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", bs58::encode(self.0).into_string()) + write_as_base58(f, self) } } @@ -672,7 +822,7 @@ impl fmt::Display for Pubkey { impl borsh0_10::de::BorshDeserialize for Pubkey { fn deserialize_reader( reader: &mut R, - ) -> ::core::result::Result { + ) -> Result { Ok(Self(borsh0_10::BorshDeserialize::deserialize_reader( reader, )?)) @@ -687,7 +837,7 @@ macro_rules! impl_borsh_schema { [u8; 32]: $borsh::BorshSchema, { fn declaration() -> $borsh::schema::Declaration { - "Pubkey".to_string() + std::string::String::from("Pubkey") } fn add_definitions_recursively( definitions: &mut $borsh::maybestd::collections::HashMap< @@ -731,9 +881,136 @@ macro_rules! impl_borsh_serialize { #[cfg(feature = "borsh")] impl_borsh_serialize!(borsh0_10); +#[cfg(all(target_arch = "wasm32", feature = "curve25519"))] +fn js_value_to_seeds_vec(array_of_uint8_arrays: &[JsValue]) -> Result>, JsValue> { + let vec_vec_u8 = array_of_uint8_arrays + .iter() + .filter_map(|u8_array| { + u8_array + .dyn_ref::() + .map(|u8_array| u8_array.to_vec()) + }) + .collect::>(); + + if vec_vec_u8.len() != array_of_uint8_arrays.len() { + Err("Invalid Array of Uint8Arrays".into()) + } else { + Ok(vec_vec_u8) + } +} + +#[cfg(target_arch = "wasm32")] +fn display_to_jsvalue(display: T) -> JsValue { + std::string::ToString::to_string(&display).into() +} + +#[allow(non_snake_case)] +#[cfg(target_arch = "wasm32")] +#[wasm_bindgen] +impl Pubkey { + /// Create a new Pubkey object + /// + /// * `value` - optional public key as a base58 encoded string, `Uint8Array`, `[number]` + #[wasm_bindgen(constructor)] + pub fn constructor(value: JsValue) -> Result { + if let Some(base58_str) = value.as_string() { + base58_str.parse::().map_err(display_to_jsvalue) + } else if let Some(uint8_array) = value.dyn_ref::() { + Pubkey::try_from(uint8_array.to_vec()) + .map_err(|err| JsValue::from(std::format!("Invalid Uint8Array pubkey: {err:?}"))) + } else if let Some(array) = value.dyn_ref::() { + let mut bytes = std::vec![]; + let iterator = js_sys::try_iter(&array.values())?.expect("array to be iterable"); + for x in iterator { + let x = x?; + + if let Some(n) = x.as_f64() { + if n >= 0. && n <= 255. { + bytes.push(n as u8); + continue; + } + } + return Err(std::format!("Invalid array argument: {:?}", x).into()); + } + Pubkey::try_from(bytes) + .map_err(|err| JsValue::from(std::format!("Invalid Array pubkey: {err:?}"))) + } else if value.is_undefined() { + Ok(Pubkey::default()) + } else { + Err("Unsupported argument".into()) + } + } + + /// Return the base58 string representation of the public key + pub fn toString(&self) -> std::string::String { + std::string::ToString::to_string(self) + } + + /// Check if a `Pubkey` is on the ed25519 curve. + #[cfg(feature = "curve25519")] + pub fn isOnCurve(&self) -> bool { + self.is_on_curve() + } + + /// Checks if two `Pubkey`s are equal + pub fn equals(&self, other: &Pubkey) -> bool { + self == other + } + + /// Return the `Uint8Array` representation of the public key + pub fn toBytes(&self) -> std::boxed::Box<[u8]> { + self.0.clone().into() + } + + /// Derive a Pubkey from another Pubkey, string seed, and a program id + #[cfg(feature = "sha2")] + pub fn createWithSeed(base: &Pubkey, seed: &str, owner: &Pubkey) -> Result { + Pubkey::create_with_seed(base, seed, owner).map_err(display_to_jsvalue) + } + + /// Derive a program address from seeds and a program id + #[cfg(feature = "curve25519")] + pub fn createProgramAddress( + seeds: std::boxed::Box<[JsValue]>, + program_id: &Pubkey, + ) -> Result { + let seeds_vec = js_value_to_seeds_vec(&seeds)?; + let seeds_slice = seeds_vec + .iter() + .map(|seed| seed.as_slice()) + .collect::>(); + + Pubkey::create_program_address(seeds_slice.as_slice(), program_id) + .map_err(display_to_jsvalue) + } + + /// Find a valid program address + /// + /// Returns: + /// * `[PubKey, number]` - the program address and bump seed + #[cfg(feature = "curve25519")] + pub fn findProgramAddress( + seeds: std::boxed::Box<[JsValue]>, + program_id: &Pubkey, + ) -> Result { + let seeds_vec = js_value_to_seeds_vec(&seeds)?; + let seeds_slice = seeds_vec + .iter() + .map(|seed| seed.as_slice()) + .collect::>(); + + let (address, bump_seed) = Pubkey::find_program_address(seeds_slice.as_slice(), program_id); + + let result = Array::new_with_length(2); + result.set(0, address.into()); + result.set(1, bump_seed.into()); + Ok(result.into()) + } +} + #[cfg(test)] mod tests { - use {super::*, std::str::from_utf8}; + use {super::*, strum::IntoEnumIterator}; #[test] fn test_new_unique() { @@ -815,7 +1092,7 @@ mod tests { assert!(Pubkey::create_with_seed( &Pubkey::new_unique(), - std::str::from_utf8(&[0; MAX_SEED_LEN]).unwrap(), + from_utf8(&[0; MAX_SEED_LEN]).unwrap(), &Pubkey::new_unique(), ) .is_ok()); @@ -928,7 +1205,7 @@ mod tests { fn test_pubkey_off_curve() { // try a bunch of random input, all successful generated program // addresses must land off the curve and be unique - let mut addresses = vec![]; + let mut addresses = std::vec![]; for _ in 0..1_000 { let program_id = Pubkey::new_unique(); let bytes1 = rand::random::<[u8; 10]>(); @@ -964,7 +1241,7 @@ mod tests { let mut to_fake = owner.to_bytes().to_vec(); to_fake.extend_from_slice(marker); - let seed = &String::from_utf8(to_fake[..to_fake.len() - 32].to_vec()).expect("not utf8"); + let seed = from_utf8(&to_fake[..to_fake.len() - 32]).expect("not utf8"); let base = &Pubkey::try_from(&to_fake[to_fake.len() - 32..]).unwrap(); Pubkey::create_with_seed(&key, seed, base) @@ -978,4 +1255,27 @@ mod tests { ); assert!(pubkey_from_seed_by_marker(&PDA_MARKER[1..]).is_ok()); } + + #[test] + fn test_pubkey_error_from_primitive_exhaustive() { + for variant in PubkeyError::iter() { + let variant_i64 = variant.clone() as i64; + assert_eq!( + PubkeyError::from_repr(variant_i64 as usize), + PubkeyError::from_i64(variant_i64) + ); + assert_eq!(PubkeyError::from(variant_i64 as u64), variant); + } + } + + #[test] + fn test_parse_pubkey_error_from_primitive_exhaustive() { + for variant in ParsePubkeyError::iter() { + let variant_i64 = variant as i64; + assert_eq!( + ParsePubkeyError::from_repr(variant_i64 as usize), + ParsePubkeyError::from_i64(variant_i64) + ); + } + } } diff --git a/sdk/pubkey/src/syscalls.rs b/sdk/pubkey/src/syscalls.rs new file mode 100644 index 00000000000000..e539b902e9f7c2 --- /dev/null +++ b/sdk/pubkey/src/syscalls.rs @@ -0,0 +1,6 @@ +/// Syscall definitions used by `solana_pubkey`. +use solana_define_syscall::define_syscall; + +define_syscall!(fn sol_log_pubkey(pubkey_addr: *const u8)); +define_syscall!(fn sol_create_program_address(seeds_addr: *const u8, seeds_len: u64, program_id_addr: *const u8, address_bytes_addr: *const u8) -> u64); +define_syscall!(fn sol_try_find_program_address(seeds_addr: *const u8, seeds_len: u64, program_id_addr: *const u8, address_bytes_addr: *const u8, bump_seed_addr: *const u8) -> u64); diff --git a/sdk/src/account.rs b/sdk/src/account.rs index 0541e1f8d9ce44..cb7dfbc9638c3e 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -30,7 +30,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "HawRVHh7t4d3H3bitWHFt25WhhoDmbJMCfWdESQQoYEy") + frozen_abi(digest = "2SUJNHbXMPWrsSXmDTFc4VHx2XQ85fT5Leabefh5Nwe7") )] #[derive(Deserialize, PartialEq, Eq, Clone, Default)] #[serde(rename_all = "camelCase")] @@ -58,7 +58,7 @@ mod account_serialize { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "HawRVHh7t4d3H3bitWHFt25WhhoDmbJMCfWdESQQoYEy") + frozen_abi(digest = "2SUJNHbXMPWrsSXmDTFc4VHx2XQ85fT5Leabefh5Nwe7") )] #[derive(Serialize)] #[serde(rename_all = "camelCase")] diff --git a/sdk/src/genesis_config.rs b/sdk/src/genesis_config.rs index c27793c5e20eec..ba58d5bd8811f8 100644 --- a/sdk/src/genesis_config.rs +++ b/sdk/src/genesis_config.rs @@ -87,7 +87,7 @@ impl FromStr for ClusterType { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "3V3ZVRyzNhRfe8RJwDeGpeTP8xBWGGFBEbwTkvKKVjEa") + frozen_abi(digest = "z6vuQfrTaknTiRs1giPFzG1Jcw8eReidFTNDTmaX6GN") )] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct GenesisConfig { diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index 7007a592732217..ec4ff6a161a007 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -172,7 +172,7 @@ pub type Result = result::Result; #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "GNfV7vYLggBbde9n1xYKE8koExFLdr3yti7zDp7xShJR") + frozen_abi(digest = "686AAhRhjXpqKidmJEdHHcJCL9XxCxebu8Xmku9shp83") )] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] pub struct Transaction { @@ -200,7 +200,7 @@ pub struct Transaction { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "sGWhrQNiMNnUjPSG5cZvxujYaxHaiU5ggbvp46hKZSN") + frozen_abi(digest = "5mA54x7skHmXUoVfvwNSDrSo4F8kXJSrDrKrLMcUkAib") )] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] pub struct Transaction { From edda97e5df7db68e78070160cbda8dcaa5508ba4 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 10 Sep 2024 16:23:33 -0400 Subject: [PATCH 331/529] Removes unnecessary Arc in shrink function parameters (#2883) --- accounts-db/src/accounts_db.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 02449fcc021c33..169e3d498f3fa3 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4059,7 +4059,7 @@ impl AccountsDb { /// for duplicate pubkeys, the account with the highest write_value is returned pub fn get_unique_accounts_from_storage( &self, - store: &Arc, + store: &AccountStorageEntry, ) -> GetUniqueAccountsResult { let capacity = store.capacity(); let mut stored_accounts = Vec::with_capacity(store.count()); @@ -4120,7 +4120,7 @@ impl AccountsDb { pub(crate) fn get_unique_accounts_from_storage_for_shrink( &self, - store: &Arc, + store: &AccountStorageEntry, stats: &ShrinkStats, ) -> GetUniqueAccountsResult { let (result, storage_read_elapsed_us) = @@ -4138,7 +4138,7 @@ impl AccountsDb { /// note 'unique_accounts' is passed by ref so we can return references to data within it, avoiding self-references pub(crate) fn shrink_collect<'a: 'b, 'b, T: ShrinkCollectRefs<'b>>( &self, - store: &'a Arc, + store: &'a AccountStorageEntry, unique_accounts: &'b GetUniqueAccountsResult, stats: &ShrinkStats, ) -> ShrinkCollect<'b, T> { @@ -4353,7 +4353,7 @@ impl AccountsDb { ); } - fn do_shrink_slot_store(&self, slot: Slot, store: &Arc) { + fn do_shrink_slot_store(&self, slot: Slot, store: &AccountStorageEntry) { if self.accounts_cache.contains(slot) { // It is not correct to shrink a slot while it is in the write cache until flush is complete and the slot is removed from the write cache. // There can exist a window after a slot is made a root and before the write cache flushing for that slot begins and then completes. From 37671df9fd654e9da453cb094ce32e3ea181c536 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Tue, 10 Sep 2024 19:10:38 -0700 Subject: [PATCH 332/529] Fix flaky unittest, should use timestamp of the Gossip message. (#2893) --- wen-restart/src/last_voted_fork_slots_aggregate.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/wen-restart/src/last_voted_fork_slots_aggregate.rs b/wen-restart/src/last_voted_fork_slots_aggregate.rs index fade1354a93b39..970cc706d56f34 100644 --- a/wen-restart/src/last_voted_fork_slots_aggregate.rs +++ b/wen-restart/src/last_voted_fork_slots_aggregate.rs @@ -490,9 +490,10 @@ mod tests { [initial_num_active_validators + 2] .node_keypair .pubkey(); + let now = timestamp(); let super_old_validator_last_voted_slots = RestartLastVotedForkSlots::new( super_old_validator, - timestamp(), + now, &[root_slot - 1], Hash::default(), SHRED_VERSION, @@ -506,7 +507,7 @@ mod tests { last_voted_fork_slots: vec![], last_vote_bankhash: Hash::default().to_string(), shred_version: SHRED_VERSION as u32, - wallclock: timestamp(), + wallclock: now, }), ); assert_eq!( From f0a77e94bf0f4fbe2f423689c153cf38294d2992 Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Wed, 11 Sep 2024 02:23:09 -0700 Subject: [PATCH 333/529] svm: advance nonce for fee-only transactions sooner (#2741) * write integration tests for nonce transactions these pass against master as-written * advance nonce when creating rollback accounts previously rollback accounts carried the fee payer that was to be committed, but an untouched nonce now, the account saver can use the nonce and fee payer from rollback accounts identically, possibly merging them for a fee-paying nonce * fix nonce-related unit tests * remove nonce hack from integration test * support four tx states in svm integration * advance nonce in check transactions * fix tests for new interfaces * remove last blockhash timing and tx result mutator * change copy mut to proper accountshareddata function * adddress feedback * please clippy * fix merge issues * get lamports_per_signature from blockhash * revert niche lps change * fix merge issues again --- core/src/banking_stage/committer.rs | 7 +- core/src/banking_stage/consume_worker.rs | 10 - core/src/banking_stage/consumer.rs | 13 - .../leader_slot_timing_metrics.rs | 3 - runtime/src/account_saver.rs | 100 +--- runtime/src/bank.rs | 22 +- runtime/src/bank/check_transactions.rs | 117 ++++- runtime/src/bank/tests.rs | 4 +- svm/src/rollback_accounts.rs | 8 +- svm/src/transaction_processing_result.rs | 8 - svm/src/transaction_processor.rs | 9 +- svm/tests/integration_test.rs | 474 +++++++++++++++--- svm/tests/mock_bank.rs | 38 +- 13 files changed, 584 insertions(+), 229 deletions(-) diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index 4c5bf8bbf5382b..ff27104251759e 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -12,7 +12,7 @@ use { transaction_batch::TransactionBatch, vote_sender_types::ReplayVoteSender, }, - solana_sdk::{hash::Hash, pubkey::Pubkey, saturating_add_assign}, + solana_sdk::{pubkey::Pubkey, saturating_add_assign}, solana_svm::{ transaction_commit_result::{TransactionCommitResult, TransactionCommitResultExtensions}, transaction_processing_result::{ @@ -65,13 +65,10 @@ impl Committer { self.transaction_status_sender.is_some() } - #[allow(clippy::too_many_arguments)] pub(super) fn commit_transactions( &self, batch: &TransactionBatch, processing_results: Vec, - last_blockhash: Hash, - lamports_per_signature: u64, starting_transaction_index: Option, bank: &Arc, pre_balance_info: &mut PreBalanceInfo, @@ -87,8 +84,6 @@ impl Committer { let (commit_results, commit_time_us) = measure_us!(bank.commit_transactions( batch.sanitized_transactions(), processing_results, - last_blockhash, - lamports_per_signature, processed_counts, &mut execute_and_commit_timings.execute_timings, )); diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 3902ce8829f163..b676168bb04d4d 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -275,7 +275,6 @@ impl ConsumeWorkerMetrics { collect_balances_us, load_execute_us, freeze_lock_us, - last_blockhash_us, record_us, commit_us, find_and_send_votes_us, @@ -291,9 +290,6 @@ impl ConsumeWorkerMetrics { self.timing_metrics .freeze_lock_us .fetch_add(*freeze_lock_us, Ordering::Relaxed); - self.timing_metrics - .last_blockhash_us - .fetch_add(*last_blockhash_us, Ordering::Relaxed); self.timing_metrics .record_us .fetch_add(*record_us, Ordering::Relaxed); @@ -494,7 +490,6 @@ struct ConsumeWorkerTimingMetrics { collect_balances_us: AtomicU64, load_execute_us: AtomicU64, freeze_lock_us: AtomicU64, - last_blockhash_us: AtomicU64, record_us: AtomicU64, commit_us: AtomicU64, find_and_send_votes_us: AtomicU64, @@ -527,11 +522,6 @@ impl ConsumeWorkerTimingMetrics { self.freeze_lock_us.swap(0, Ordering::Relaxed), i64 ), - ( - "last_blockhash_us", - self.last_blockhash_us.swap(0, Ordering::Relaxed), - i64 - ), ("record_us", self.record_us.swap(0, Ordering::Relaxed), i64), ("commit_us", self.commit_us.swap(0, Ordering::Relaxed), i64), ( diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 25942d11c1f914..b8cd383a896634 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -666,17 +666,6 @@ impl Consumer { let (freeze_lock, freeze_lock_us) = measure_us!(bank.freeze_lock()); execute_and_commit_timings.freeze_lock_us = freeze_lock_us; - // In order to avoid a race condition, leaders must get the last - // blockhash *before* recording transactions because recording - // transactions will only succeed if the block max tick height hasn't - // been reached yet. If they get the last blockhash *after* recording - // transactions, the block max tick height could have already been - // reached and the blockhash queue could have already been updated with - // a new blockhash. - let ((last_blockhash, lamports_per_signature), last_blockhash_us) = - measure_us!(bank.last_blockhash_and_lamports_per_signature()); - execute_and_commit_timings.last_blockhash_us = last_blockhash_us; - let (record_transactions_summary, record_us) = measure_us!(self .transaction_recorder .record_transactions(bank.slot(), processed_transactions)); @@ -713,8 +702,6 @@ impl Consumer { self.committer.commit_transactions( batch, processing_results, - last_blockhash, - lamports_per_signature, starting_transaction_index, bank, &mut pre_balance_info, diff --git a/core/src/banking_stage/leader_slot_timing_metrics.rs b/core/src/banking_stage/leader_slot_timing_metrics.rs index 0de9296ce91aac..31b3dc0a24e7ca 100644 --- a/core/src/banking_stage/leader_slot_timing_metrics.rs +++ b/core/src/banking_stage/leader_slot_timing_metrics.rs @@ -10,7 +10,6 @@ pub struct LeaderExecuteAndCommitTimings { pub collect_balances_us: u64, pub load_execute_us: u64, pub freeze_lock_us: u64, - pub last_blockhash_us: u64, pub record_us: u64, pub commit_us: u64, pub find_and_send_votes_us: u64, @@ -23,7 +22,6 @@ impl LeaderExecuteAndCommitTimings { saturating_add_assign!(self.collect_balances_us, other.collect_balances_us); saturating_add_assign!(self.load_execute_us, other.load_execute_us); saturating_add_assign!(self.freeze_lock_us, other.freeze_lock_us); - saturating_add_assign!(self.last_blockhash_us, other.last_blockhash_us); saturating_add_assign!(self.record_us, other.record_us); saturating_add_assign!(self.commit_us, other.commit_us); saturating_add_assign!(self.find_and_send_votes_us, other.find_and_send_votes_us); @@ -40,7 +38,6 @@ impl LeaderExecuteAndCommitTimings { ("collect_balances_us", self.collect_balances_us as i64, i64), ("load_execute_us", self.load_execute_us as i64, i64), ("freeze_lock_us", self.freeze_lock_us as i64, i64), - ("last_blockhash_us", self.last_blockhash_us as i64, i64), ("record_us", self.record_us as i64, i64), ("commit_us", self.commit_us as i64, i64), ( diff --git a/runtime/src/account_saver.rs b/runtime/src/account_saver.rs index da4188b87f441c..941e4934175af3 100644 --- a/runtime/src/account_saver.rs +++ b/runtime/src/account_saver.rs @@ -1,8 +1,8 @@ use { core::borrow::Borrow, solana_sdk::{ - account::AccountSharedData, nonce::state::DurableNonce, pubkey::Pubkey, - transaction::SanitizedTransaction, transaction_context::TransactionAccount, + account::AccountSharedData, pubkey::Pubkey, transaction::SanitizedTransaction, + transaction_context::TransactionAccount, }, solana_svm::{ rollback_accounts::RollbackAccounts, @@ -51,9 +51,7 @@ fn max_number_of_accounts_to_collect( pub fn collect_accounts_to_store<'a, T: SVMMessage>( txs: &'a [T], txs_refs: &'a Option>>, - processing_results: &'a mut [TransactionProcessingResult], - durable_nonce: &DurableNonce, - lamports_per_signature: u64, + processing_results: &'a [TransactionProcessingResult], ) -> ( Vec<(&'a Pubkey, &'a AccountSharedData)>, Option>, @@ -63,10 +61,9 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( let mut transactions = txs_refs .is_some() .then(|| Vec::with_capacity(collect_capacity)); - for (index, (processing_result, transaction)) in - processing_results.iter_mut().zip(txs).enumerate() + for (index, (processing_result, transaction)) in processing_results.iter().zip(txs).enumerate() { - let Some(processed_tx) = processing_result.processed_transaction_mut() else { + let Some(processed_tx) = processing_result.processed_transaction() else { // Don't store any accounts if tx wasn't executed continue; }; @@ -88,9 +85,7 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( &mut transactions, transaction, transaction_ref, - &mut executed_tx.loaded_transaction.rollback_accounts, - durable_nonce, - lamports_per_signature, + &executed_tx.loaded_transaction.rollback_accounts, ); } } @@ -100,9 +95,7 @@ pub fn collect_accounts_to_store<'a, T: SVMMessage>( &mut transactions, transaction, transaction_ref, - &mut fees_only_tx.rollback_accounts, - durable_nonce, - lamports_per_signature, + &fees_only_tx.rollback_accounts, ); } } @@ -142,25 +135,18 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( collected_account_transactions: &mut Option>, transaction: &'a T, transaction_ref: Option<&'a SanitizedTransaction>, - rollback_accounts: &'a mut RollbackAccounts, - durable_nonce: &DurableNonce, - lamports_per_signature: u64, + rollback_accounts: &'a RollbackAccounts, ) { let fee_payer_address = transaction.fee_payer(); match rollback_accounts { RollbackAccounts::FeePayerOnly { fee_payer_account } => { - collected_accounts.push((fee_payer_address, &*fee_payer_account)); + collected_accounts.push((fee_payer_address, fee_payer_account)); if let Some(collected_account_transactions) = collected_account_transactions { collected_account_transactions .push(transaction_ref.expect("transaction ref must exist if collecting")); } } RollbackAccounts::SameNonceAndFeePayer { nonce } => { - // Since we know we are dealing with a valid nonce account, - // unwrap is safe here - nonce - .try_advance_nonce(*durable_nonce, lamports_per_signature) - .unwrap(); collected_accounts.push((nonce.address(), nonce.account())); if let Some(collected_account_transactions) = collected_account_transactions { collected_account_transactions @@ -171,17 +157,12 @@ fn collect_accounts_for_failed_tx<'a, T: SVMMessage>( nonce, fee_payer_account, } => { - collected_accounts.push((fee_payer_address, &*fee_payer_account)); + collected_accounts.push((fee_payer_address, fee_payer_account)); if let Some(collected_account_transactions) = collected_account_transactions { collected_account_transactions .push(transaction_ref.expect("transaction ref must exist if collecting")); } - // Since we know we are dealing with a valid nonce account, - // unwrap is safe here - nonce - .try_advance_nonce(*durable_nonce, lamports_per_signature) - .unwrap(); collected_accounts.push((nonce.address(), nonce.account())); if let Some(collected_account_transactions) = collected_account_transactions { collected_account_transactions @@ -204,7 +185,7 @@ mod tests { message::Message, native_loader, nonce::{ - state::{Data as NonceData, Versions as NonceVersions}, + state::{Data as NonceData, DurableNonce, Versions as NonceVersions}, State as NonceState, }, nonce_account, @@ -315,7 +296,7 @@ mod tests { }; let txs = vec![tx0.clone(), tx1.clone()]; - let mut processing_results = vec![ + let processing_results = vec![ new_executed_processing_result(Ok(()), loaded0), new_executed_processing_result(Ok(()), loaded1), ]; @@ -324,13 +305,8 @@ mod tests { for collect_transactions in [false, true] { let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); - let (collected_accounts, transactions) = collect_accounts_to_store( - &txs, - &transaction_refs, - &mut processing_results, - &DurableNonce::default(), - 0, - ); + let (collected_accounts, transactions) = + collect_accounts_to_store(&txs, &transaction_refs, &processing_results); assert_eq!(collected_accounts.len(), 2); assert!(collected_accounts .iter() @@ -383,7 +359,7 @@ mod tests { }; let txs = vec![tx]; - let mut processing_results = vec![new_executed_processing_result( + let processing_results = vec![new_executed_processing_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, @@ -392,17 +368,11 @@ mod tests { )]; let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 1); - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); for collect_transactions in [false, true] { let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); - let (collected_accounts, transactions) = collect_accounts_to_store( - &txs, - &transaction_refs, - &mut processing_results, - &durable_nonce, - 0, - ); + let (collected_accounts, transactions) = + collect_accounts_to_store(&txs, &transaction_refs, &processing_results); assert_eq!(collected_accounts.len(), 1); assert_eq!( collected_accounts @@ -483,9 +453,8 @@ mod tests { loaded_accounts_data_size: 0, }; - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let txs = vec![tx]; - let mut processing_results = vec![new_executed_processing_result( + let processing_results = vec![new_executed_processing_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, @@ -497,13 +466,8 @@ mod tests { for collect_transactions in [false, true] { let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); - let (collected_accounts, transactions) = collect_accounts_to_store( - &txs, - &transaction_refs, - &mut processing_results, - &durable_nonce, - 0, - ); + let (collected_accounts, transactions) = + collect_accounts_to_store(&txs, &transaction_refs, &processing_results); assert_eq!(collected_accounts.len(), 2); assert_eq!( collected_accounts @@ -597,9 +561,8 @@ mod tests { loaded_accounts_data_size: 0, }; - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let txs = vec![tx]; - let mut processing_results = vec![new_executed_processing_result( + let processing_results = vec![new_executed_processing_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, @@ -611,13 +574,8 @@ mod tests { for collect_transactions in [false, true] { let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); - let (collected_accounts, transactions) = collect_accounts_to_store( - &txs, - &transaction_refs, - &mut processing_results, - &durable_nonce, - 0, - ); + let (collected_accounts, transactions) = + collect_accounts_to_store(&txs, &transaction_refs, &processing_results); assert_eq!(collected_accounts.len(), 1); let collected_nonce_account = collected_accounts .iter() @@ -658,7 +616,7 @@ mod tests { let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); let txs = vec![tx]; - let mut processing_results = vec![Ok(ProcessedTransaction::FeesOnly(Box::new( + let processing_results = vec![Ok(ProcessedTransaction::FeesOnly(Box::new( FeesOnlyTransaction { load_error: TransactionError::InvalidProgramForExecution, fee_details: FeeDetails::default(), @@ -669,17 +627,11 @@ mod tests { )))]; let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &processing_results); assert_eq!(max_collected_accounts, 1); - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); for collect_transactions in [false, true] { let transaction_refs = collect_transactions.then(|| txs.iter().collect::>()); - let (collected_accounts, transactions) = collect_accounts_to_store( - &txs, - &transaction_refs, - &mut processing_results, - &durable_nonce, - 0, - ); + let (collected_accounts, transactions) = + collect_accounts_to_store(&txs, &transaction_refs, &processing_results); assert_eq!(collected_accounts.len(), 1); assert_eq!( collected_accounts diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 44d8195c993c5a..e0072749a4c078 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -128,7 +128,6 @@ use { message::{AccountKeys, SanitizedMessage}, native_loader, native_token::LAMPORTS_PER_SOL, - nonce::state::DurableNonce, packet::PACKET_DATA_SIZE, precompiles::get_precompiles, pubkey::Pubkey, @@ -3017,8 +3016,11 @@ impl Bank { blockhash_queue.get_lamports_per_signature(message.recent_blockhash()) } .or_else(|| { - self.load_message_nonce_account(message) - .map(|(_nonce, nonce_data)| nonce_data.get_lamports_per_signature()) + self.load_message_nonce_account(message).map( + |(_nonce_address, _nonce_account, nonce_data)| { + nonce_data.get_lamports_per_signature() + }, + ) })?; Some(self.get_fee_for_message_with_lamports_per_signature(message, lamports_per_signature)) } @@ -3762,9 +3764,7 @@ impl Bank { pub fn commit_transactions( &self, sanitized_txs: &[SanitizedTransaction], - mut processing_results: Vec, - last_blockhash: Hash, - lamports_per_signature: u64, + processing_results: Vec, processed_counts: &ProcessedTransactionCounts, timings: &mut ExecuteTimings, ) -> Vec { @@ -3799,8 +3799,6 @@ impl Bank { } let ((), store_accounts_us) = measure_us!({ - let durable_nonce = DurableNonce::from_blockhash(&last_blockhash); - // If geyser is present, we must collect `SanitizedTransaction` // references in order to comply with that interface - until it // is changed. @@ -3813,9 +3811,7 @@ impl Bank { let (accounts_to_store, transactions) = collect_accounts_to_store( sanitized_txs, &maybe_transaction_refs, - &mut processing_results, - &durable_nonce, - lamports_per_signature, + &processing_results, ); self.rc.accounts.store_cached( (self.slot(), accounts_to_store.as_slice()), @@ -4634,13 +4630,9 @@ impl Bank { }, ); - let (last_blockhash, lamports_per_signature) = - self.last_blockhash_and_lamports_per_signature(); let commit_results = self.commit_transactions( batch.sanitized_transactions(), processing_results, - last_blockhash, - lamports_per_signature, &processed_counts, timings, ); diff --git a/runtime/src/bank/check_transactions.rs b/runtime/src/bank/check_transactions.rs index d966d986fb8305..0f0d70f15b07ab 100644 --- a/runtime/src/bank/check_transactions.rs +++ b/runtime/src/bank/check_transactions.rs @@ -3,13 +3,21 @@ use { solana_accounts_db::blockhash_queue::BlockhashQueue, solana_perf::perf_libs, solana_sdk::{ + account::AccountSharedData, + account_utils::StateMut, clock::{ MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU, }, message::SanitizedMessage, - nonce::{self, state::DurableNonce, NONCED_TX_MARKER_IX_INDEX}, + nonce::{ + state::{ + Data as NonceData, DurableNonce, State as NonceState, Versions as NonceVersions, + }, + NONCED_TX_MARKER_IX_INDEX, + }, nonce_account, + pubkey::Pubkey, transaction::{Result as TransactionResult, SanitizedTransaction, TransactionError}, }, solana_svm::{ @@ -71,6 +79,10 @@ impl Bank { let hash_queue = self.blockhash_queue.read().unwrap(); let last_blockhash = hash_queue.last_hash(); let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash); + // safe so long as the BlockhashQueue is consistent + let next_lamports_per_signature = hash_queue + .get_lamports_per_signature(&last_blockhash) + .unwrap(); sanitized_txs .iter() @@ -81,6 +93,7 @@ impl Bank { max_age, &next_durable_nonce, &hash_queue, + next_lamports_per_signature, error_counters, ), Err(e) => Err(e.clone()), @@ -94,6 +107,7 @@ impl Bank { max_age: usize, next_durable_nonce: &DurableNonce, hash_queue: &BlockhashQueue, + next_lamports_per_signature: u64, error_counters: &mut TransactionErrorMetrics, ) -> TransactionCheckResult { let recent_blockhash = tx.message().recent_blockhash(); @@ -102,12 +116,16 @@ impl Bank { nonce: None, lamports_per_signature: hash_info.lamports_per_signature(), }) - } else if let Some((nonce, nonce_data)) = - self.check_and_load_message_nonce_account(tx.message(), next_durable_nonce) + } else if let Some((nonce, previous_lamports_per_signature)) = self + .check_load_and_advance_message_nonce_account( + tx.message(), + next_durable_nonce, + next_lamports_per_signature, + ) { Ok(CheckedTransactionDetails { nonce: Some(nonce), - lamports_per_signature: nonce_data.get_lamports_per_signature(), + lamports_per_signature: previous_lamports_per_signature, }) } else { error_counters.blockhash_not_found += 1; @@ -115,23 +133,40 @@ impl Bank { } } - pub(super) fn check_and_load_message_nonce_account( + pub(super) fn check_load_and_advance_message_nonce_account( &self, message: &SanitizedMessage, next_durable_nonce: &DurableNonce, - ) -> Option<(NonceInfo, nonce::state::Data)> { + next_lamports_per_signature: u64, + ) -> Option<(NonceInfo, u64)> { let nonce_is_advanceable = message.recent_blockhash() != next_durable_nonce.as_hash(); - if nonce_is_advanceable { - self.load_message_nonce_account(message) - } else { - None + if !nonce_is_advanceable { + return None; } + + let (nonce_address, mut nonce_account, nonce_data) = + self.load_message_nonce_account(message)?; + + let previous_lamports_per_signature = nonce_data.get_lamports_per_signature(); + let next_nonce_state = NonceState::new_initialized( + &nonce_data.authority, + *next_durable_nonce, + next_lamports_per_signature, + ); + nonce_account + .set_state(&NonceVersions::new(next_nonce_state)) + .ok()?; + + Some(( + NonceInfo::new(nonce_address, nonce_account), + previous_lamports_per_signature, + )) } pub(super) fn load_message_nonce_account( &self, message: &SanitizedMessage, - ) -> Option<(NonceInfo, nonce::state::Data)> { + ) -> Option<(Pubkey, AccountSharedData, NonceData)> { let nonce_address = message.get_durable_nonce()?; let nonce_account = self.get_account_with_fixed_root(nonce_address)?; let nonce_data = @@ -144,7 +179,7 @@ impl Bank { return None; } - Some((NonceInfo::new(*nonce_address, nonce_account), nonce_data)) + Some((*nonce_address, nonce_account, nonce_data)) } fn check_status_cache( @@ -200,6 +235,7 @@ mod tests { #[test] fn test_check_and_load_message_nonce_account_ok() { + const STALE_LAMPORTS_PER_SIGNATURE: u64 = 42; let (bank, _mint_keypair, custodian_keypair, nonce_keypair, _) = setup_nonce_with_bank( 10_000_000, |_| {}, @@ -221,11 +257,37 @@ mod tests { Some(&custodian_pubkey), &nonce_hash, )); - let nonce_account = bank.get_account(&nonce_pubkey).unwrap(); + + // set a spurious lamports_per_signature value + let mut nonce_account = bank.get_account(&nonce_pubkey).unwrap(); let nonce_data = get_nonce_data_from_account(&nonce_account).unwrap(); + nonce_account + .set_state(&NonceVersions::new(NonceState::new_initialized( + &nonce_data.authority, + nonce_data.durable_nonce, + STALE_LAMPORTS_PER_SIGNATURE, + ))) + .unwrap(); + bank.store_account(&nonce_pubkey, &nonce_account); + + let nonce_account = bank.get_account(&nonce_pubkey).unwrap(); + let (_, next_lamports_per_signature) = bank.last_blockhash_and_lamports_per_signature(); + let mut expected_nonce_info = NonceInfo::new(nonce_pubkey, nonce_account); + expected_nonce_info + .try_advance_nonce(bank.next_durable_nonce(), next_lamports_per_signature) + .unwrap(); + + // we now expect to: + // * advance the nonce account to the current durable nonce value + // * set the blockhash queue's last blockhash's lamports_per_signature value in the nonce data + // * retrieve the previous lamports_per_signature value set on the nonce data for transaction fee checks assert_eq!( - bank.check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()), - Some((NonceInfo::new(nonce_pubkey, nonce_account), nonce_data)) + bank.check_load_and_advance_message_nonce_account( + &message, + &bank.next_durable_nonce(), + next_lamports_per_signature + ), + Some((expected_nonce_info, STALE_LAMPORTS_PER_SIGNATURE)), ); } @@ -252,8 +314,13 @@ mod tests { Some(&custodian_pubkey), &nonce_hash, )); + let (_, lamports_per_signature) = bank.last_blockhash_and_lamports_per_signature(); assert!(bank - .check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()) + .check_load_and_advance_message_nonce_account( + &message, + &bank.next_durable_nonce(), + lamports_per_signature + ) .is_none()); } @@ -281,10 +348,12 @@ mod tests { &nonce_hash, ); message.instructions[0].accounts.clear(); + let (_, lamports_per_signature) = bank.last_blockhash_and_lamports_per_signature(); assert!(bank - .check_and_load_message_nonce_account( + .check_load_and_advance_message_nonce_account( &new_sanitized_message(message), &bank.next_durable_nonce(), + lamports_per_signature, ) .is_none()); } @@ -314,8 +383,13 @@ mod tests { Some(&custodian_pubkey), &nonce_hash, )); + let (_, lamports_per_signature) = bank.last_blockhash_and_lamports_per_signature(); assert!(bank - .check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()) + .check_load_and_advance_message_nonce_account( + &message, + &bank.next_durable_nonce(), + lamports_per_signature + ) .is_none()); } @@ -341,8 +415,13 @@ mod tests { Some(&custodian_pubkey), &Hash::default(), )); + let (_, lamports_per_signature) = bank.last_blockhash_and_lamports_per_signature(); assert!(bank - .check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()) + .check_load_and_advance_message_nonce_account( + &message, + &bank.next_durable_nonce(), + lamports_per_signature + ) .is_none()); } } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index ec62293b28f105..4df464cdfea6ff 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -5732,10 +5732,12 @@ fn test_check_ro_durable_nonce_fails() { bank.process_transaction(&tx), Err(TransactionError::BlockhashNotFound) ); + let (_, lamports_per_signature) = bank.last_blockhash_and_lamports_per_signature(); assert_eq!( - bank.check_and_load_message_nonce_account( + bank.check_load_and_advance_message_nonce_account( &new_sanitized_message(tx.message().clone()), &bank.next_durable_nonce(), + lamports_per_signature, ), None ); diff --git a/svm/src/rollback_accounts.rs b/svm/src/rollback_accounts.rs index c2c02f2f80bd43..1a9a764e131186 100644 --- a/svm/src/rollback_accounts.rs +++ b/svm/src/rollback_accounts.rs @@ -51,6 +51,12 @@ impl RollbackAccounts { if let Some(nonce) = nonce { if &fee_payer_address == nonce.address() { + // `nonce` contains an AccountSharedData which has already been advanced to the current DurableNonce + // `fee_payer_account` is an AccountSharedData as it currently exists on-chain + // thus if the nonce account is being used as the fee payer, we need to update that data here + // so we capture both the data change for the nonce and the lamports/rent epoch change for the fee payer + fee_payer_account.set_data_from_slice(nonce.account().data()); + RollbackAccounts::SameNonceAndFeePayer { nonce: NonceInfo::new(fee_payer_address, fee_payer_account), } @@ -63,7 +69,7 @@ impl RollbackAccounts { } else { // When rolling back failed transactions which don't use nonces, the // runtime should not update the fee payer's rent epoch so reset the - // rollback fee payer acocunt's rent epoch to its originally loaded + // rollback fee payer account's rent epoch to its originally loaded // rent epoch value. In the future, a feature gate could be used to // alter this behavior such that rent epoch updates are handled the // same for both nonce and non-nonce failed transactions. diff --git a/svm/src/transaction_processing_result.rs b/svm/src/transaction_processing_result.rs index 7802b9ac213808..0658b5035fda0f 100644 --- a/svm/src/transaction_processing_result.rs +++ b/svm/src/transaction_processing_result.rs @@ -15,7 +15,6 @@ pub trait TransactionProcessingResultExtensions { fn was_processed(&self) -> bool; fn was_processed_with_successful_result(&self) -> bool; fn processed_transaction(&self) -> Option<&ProcessedTransaction>; - fn processed_transaction_mut(&mut self) -> Option<&mut ProcessedTransaction>; fn flattened_result(&self) -> TransactionResult<()>; } @@ -48,13 +47,6 @@ impl TransactionProcessingResultExtensions for TransactionProcessingResult { } } - fn processed_transaction_mut(&mut self) -> Option<&mut ProcessedTransaction> { - match self { - Ok(processed_tx) => Some(processed_tx), - Err(_) => None, - } - } - fn flattened_result(&self) -> TransactionResult<()> { self.as_ref() .map_err(|err| err.clone()) diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 76ececd6ec5cd0..f0c9f681a74955 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -475,8 +475,8 @@ impl TransactionBatchProcessor { fee_details.total_fee(), )?; - // Capture fee-subtracted fee payer account and original nonce account state - // to rollback to if transaction execution fails. + // Capture fee-subtracted fee payer account and next nonce account state + // to commit if transaction execution fails. let rollback_accounts = RollbackAccounts::new( nonce, *fee_payer_address, @@ -2180,13 +2180,14 @@ mod tests { let lamports_per_signature = 5000; let rent_collector = RentCollector::default(); let compute_unit_limit = 2 * solana_compute_budget_program::DEFAULT_COMPUTE_UNITS; + let last_blockhash = Hash::new_unique(); let message = new_unchecked_sanitized_message(Message::new_with_blockhash( &[ ComputeBudgetInstruction::set_compute_unit_limit(compute_unit_limit as u32), ComputeBudgetInstruction::set_compute_unit_price(1_000_000), ], Some(&Pubkey::new_unique()), - &Hash::new_unique(), + &last_blockhash, )); let compute_budget_limits = process_compute_budget_instructions(SVMMessage::program_instructions_iter(&message)) @@ -2216,10 +2217,12 @@ mod tests { let mut error_counters = TransactionErrorMetrics::default(); let batch_processor = TransactionBatchProcessor::::default(); + let nonce = Some(NonceInfo::new( *fee_payer_address, fee_payer_account.clone(), )); + let result = batch_processor.validate_transaction_fee_payer( &mock_bank, None, diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 08f6db09101e04..53d9d04f445183 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -6,20 +6,25 @@ use { MockBankCallback, MockForkGraph, WALLCLOCK_TIME, }, solana_sdk::{ - account::{AccountSharedData, WritableAccount}, + account::{AccountSharedData, ReadableAccount, WritableAccount}, clock::Slot, + feature_set::{self, FeatureSet}, hash::Hash, instruction::{AccountMeta, Instruction}, native_token::LAMPORTS_PER_SOL, + nonce::{self, state::DurableNonce}, pubkey::Pubkey, signature::Signer, signer::keypair::Keypair, system_instruction, system_program, system_transaction, + sysvar::rent::Rent, transaction::{SanitizedTransaction, Transaction, TransactionError}, transaction_context::TransactionReturnData, }, solana_svm::{ account_loader::{CheckedTransactionDetails, TransactionCheckResult}, + nonce_info::NonceInfo, + rollback_accounts::RollbackAccounts, transaction_execution_result::TransactionExecutionDetails, transaction_processing_result::ProcessedTransaction, transaction_processor::{ @@ -27,6 +32,7 @@ use { TransactionProcessingEnvironment, }, }, + solana_svm_transaction::svm_message::SVMMessage, solana_type_overrides::sync::{Arc, RwLock}, std::collections::{HashMap, HashSet}, test_case::test_case, @@ -39,23 +45,27 @@ const DEPLOYMENT_SLOT: u64 = 0; const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot const EXECUTION_EPOCH: u64 = 2; // The execution epoch must be greater than the deployment epoch const LAMPORTS_PER_SIGNATURE: u64 = 5000; +const LAST_BLOCKHASH: Hash = Hash::new_from_array([7; 32]); // Arbitrary constant hash for advancing nonces -pub type AccountMap = HashMap; +pub type AccountsMap = HashMap; // container for a transaction batch and all data needed to run and verify it against svm #[derive(Debug, Default)] pub struct SvmTestEntry { + // features are disabled by default; these will be enabled + pub enabled_features: Vec, + // programs to deploy to the new svm before transaction execution pub initial_programs: Vec<(String, Slot)>, // accounts to deploy to the new svm before transaction execution - pub initial_accounts: AccountMap, + pub initial_accounts: AccountsMap, // transactions to execute and transaction-specific checks to perform on the results from svm pub transaction_batch: Vec, // expected final account states, checked after transaction execution - pub final_accounts: AccountMap, + pub final_accounts: AccountsMap, } impl SvmTestEntry { @@ -108,18 +118,53 @@ impl SvmTestEntry { // convenience function that adds a transaction that is expected to succeed pub fn push_transaction(&mut self, transaction: Transaction) { + self.push_transaction_with_status(transaction, ExecutionStatus::Succeeded) + } + + // convenience function that adds a transaction with an expected execution status + pub fn push_transaction_with_status( + &mut self, + transaction: Transaction, + status: ExecutionStatus, + ) { self.transaction_batch.push(TransactionBatchItem { transaction, + asserts: TransactionBatchItemAsserts { + status, + ..TransactionBatchItemAsserts::default() + }, ..TransactionBatchItem::default() }); } - // convenience function that adds a transaction that is expected to execute but fail - pub fn push_failed_transaction(&mut self, transaction: Transaction) { + // convenience function that adds a nonce transaction that is expected to succeed + // we accept the prior nonce state and advance it for the check status, since this happens before svm + pub fn push_nonce_transaction(&mut self, transaction: Transaction, nonce_info: NonceInfo) { + self.push_nonce_transaction_with_status(transaction, nonce_info, ExecutionStatus::Succeeded) + } + + // convenience function that adds a nonce transaction with an expected execution status + // we accept the prior nonce state and advance it for the check status, since this happens before svm + pub fn push_nonce_transaction_with_status( + &mut self, + transaction: Transaction, + mut nonce_info: NonceInfo, + status: ExecutionStatus, + ) { + nonce_info + .try_advance_nonce( + DurableNonce::from_blockhash(&LAST_BLOCKHASH), + LAMPORTS_PER_SIGNATURE, + ) + .unwrap(); + self.transaction_batch.push(TransactionBatchItem { transaction, - asserts: TransactionBatchItemAsserts::failed(), - ..TransactionBatchItem::default() + asserts: TransactionBatchItemAsserts { + status, + ..TransactionBatchItemAsserts::default() + }, + ..TransactionBatchItem::with_nonce(nonce_info) }); } @@ -155,6 +200,18 @@ pub struct TransactionBatchItem { pub asserts: TransactionBatchItemAsserts, } +impl TransactionBatchItem { + fn with_nonce(nonce_info: NonceInfo) -> Self { + Self { + check_result: Ok(CheckedTransactionDetails { + nonce: Some(nonce_info), + lamports_per_signature: LAMPORTS_PER_SIGNATURE, + }), + ..Self::default() + } + } +} + impl Default for TransactionBatchItem { fn default() -> Self { Self { @@ -163,7 +220,7 @@ impl Default for TransactionBatchItem { nonce: None, lamports_per_signature: LAMPORTS_PER_SIGNATURE, }), - asserts: TransactionBatchItemAsserts::succeeded(), + asserts: TransactionBatchItemAsserts::default(), } } } @@ -171,45 +228,33 @@ impl Default for TransactionBatchItem { // asserts for a given transaction in a batch // we can automatically check whether it executed, whether it succeeded // log items we expect to see (exect match only), and rodata -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct TransactionBatchItemAsserts { - pub executed: bool, - pub succeeded: bool, + pub status: ExecutionStatus, pub logs: Vec, pub return_data: ReturnDataAssert, } impl TransactionBatchItemAsserts { - pub fn succeeded() -> Self { - Self { - executed: true, - succeeded: true, - logs: vec![], - return_data: ReturnDataAssert::Skip, - } + pub fn succeeded(&self) -> bool { + self.status.succeeded() } - pub fn failed() -> Self { - Self { - executed: true, - succeeded: false, - logs: vec![], - return_data: ReturnDataAssert::Skip, - } + pub fn executed(&self) -> bool { + self.status.executed() } - pub fn not_executed() -> Self { - Self { - executed: false, - succeeded: false, - logs: vec![], - return_data: ReturnDataAssert::Skip, - } + pub fn processed(&self) -> bool { + self.status.processed() + } + + pub fn discarded(&self) -> bool { + self.status.discarded() } pub fn check_executed_transaction(&self, execution_details: &TransactionExecutionDetails) { - assert!(self.executed); - assert_eq!(self.succeeded, execution_details.status.is_ok()); + assert!(self.executed()); + assert_eq!(self.succeeded(), execution_details.status.is_ok()); if !self.logs.is_empty() { let actual_logs = execution_details.log_messages.as_ref().unwrap(); @@ -227,7 +272,51 @@ impl TransactionBatchItemAsserts { } } -#[derive(Clone, Debug, Default, PartialEq)] +impl From for TransactionBatchItemAsserts { + fn from(status: ExecutionStatus) -> Self { + Self { + status, + ..Self::default() + } + } +} + +// states a transaction can end in after a trip through the batch processor: +// * discarded: no-op. not even processed. a flawed transaction excluded from the entry +// * processed-failed: aka fee (and nonce) only. charged and added to an entry but not executed, would have failed invariably +// * executed-failed: failed during execution. as above, fees charged and nonce advanced +// * succeeded: what we all aspire to be in our transaction processing lifecycles +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)] +pub enum ExecutionStatus { + Discarded, + ProcessedFailed, + ExecutedFailed, + #[default] + Succeeded, +} + +// note we avoid the word "failed" because it is confusing +// the batch processor uses it to mean "executed and not succeeded" +// but intuitively (and from the point of a user) it could just as likely mean "any state other than succeeded" +impl ExecutionStatus { + pub fn succeeded(self) -> bool { + self == Self::Succeeded + } + + pub fn executed(self) -> bool { + self > Self::ProcessedFailed + } + + pub fn processed(self) -> bool { + self != Self::Discarded + } + + pub fn discarded(self) -> bool { + self == Self::Discarded + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] pub enum ReturnDataAssert { Some(TransactionReturnData), None, @@ -397,12 +486,15 @@ fn program_medley() -> Vec { ], ); - test_entry.push_failed_transaction(Transaction::new_signed_with_payer( - &[instruction], - Some(&fee_payer), - &[&fee_payer_keypair, &sender_keypair], - Hash::default(), - )); + test_entry.push_transaction_with_status( + Transaction::new_signed_with_payer( + &[instruction], + Some(&fee_payer), + &[&fee_payer_keypair, &sender_keypair], + Hash::default(), + ), + ExecutionStatus::ExecutedFailed, + ); test_entry.transaction_batch[3] .asserts @@ -425,7 +517,7 @@ fn program_medley() -> Vec { Hash::default(), ), check_result: Err(TransactionError::BlockhashNotFound), - asserts: TransactionBatchItemAsserts::not_executed(), + asserts: ExecutionStatus::Discarded.into(), }); } @@ -473,12 +565,15 @@ fn simple_transfer() -> Vec { source_data.set_lamports(transfer_amount - 1); test_entry.add_initial_account(source, &source_data); - test_entry.push_failed_transaction(system_transaction::transfer( - &source_keypair, - &Pubkey::new_unique(), - transfer_amount, - Hash::default(), - )); + test_entry.push_transaction_with_status( + system_transaction::transfer( + &source_keypair, + &Pubkey::new_unique(), + transfer_amount, + Hash::default(), + ), + ExecutionStatus::ExecutedFailed, + ); test_entry.decrease_expected_lamports(&source, LAMPORTS_PER_SIGNATURE); } @@ -493,23 +588,22 @@ fn simple_transfer() -> Vec { Hash::default(), ), check_result: Err(TransactionError::BlockhashNotFound), - asserts: TransactionBatchItemAsserts::not_executed(), + asserts: ExecutionStatus::Discarded.into(), }); } // 3: a non-executable transfer that fails loading the fee-payer // NOTE when we support the processed/executed distinction, this is NOT processed { - test_entry.transaction_batch.push(TransactionBatchItem { - transaction: system_transaction::transfer( + test_entry.push_transaction_with_status( + system_transaction::transfer( &Keypair::new(), &Pubkey::new_unique(), transfer_amount, Hash::default(), ), - asserts: TransactionBatchItemAsserts::not_executed(), - ..TransactionBatchItem::default() - }); + ExecutionStatus::Discarded, + ); } // 4: a non-executable transfer that fails loading the program @@ -531,16 +625,217 @@ fn simple_transfer() -> Vec { system_instruction::transfer(&source, &Pubkey::new_unique(), transfer_amount); instruction.program_id = Pubkey::new_unique(); - test_entry.transaction_batch.push(TransactionBatchItem { - transaction: Transaction::new_signed_with_payer( + test_entry.push_transaction_with_status( + Transaction::new_signed_with_payer( &[instruction], Some(&source), &[&source_keypair], Hash::default(), ), - asserts: TransactionBatchItemAsserts::not_executed(), - ..TransactionBatchItem::default() - }); + ExecutionStatus::Discarded, + ); + } + + vec![test_entry] +} + +fn simple_nonce_fee_only( + enable_fee_only_transactions: bool, + fee_paying_nonce: bool, +) -> Vec { + let mut test_entry = SvmTestEntry::default(); + if enable_fee_only_transactions { + test_entry + .enabled_features + .push(feature_set::enable_transaction_loading_failure_fees::id()); + } + + let program_name = "hello-solana".to_string(); + let real_program_id = program_address(&program_name); + test_entry + .initial_programs + .push((program_name, DEPLOYMENT_SLOT)); + + // create and return a transaction, fee payer, and nonce info + // sets up initial account states but not final ones + // there are four cases of fee_paying_nonce and fake_fee_payer: + // * false/false: normal nonce account with rent minimum, normal fee payer account with 1sol + // * true/false: normal nonce account used to pay fees with rent minimum plus 1sol + // * false/true: normal nonce account with rent minimum, fee payer doesnt exist + // * true/true: same account for both which does not exist + let mk_nonce_transaction = |test_entry: &mut SvmTestEntry, program_id, fake_fee_payer: bool| { + let fee_payer_keypair = Keypair::new(); + let fee_payer = fee_payer_keypair.pubkey(); + let nonce_pubkey = if fee_paying_nonce { + fee_payer + } else { + Pubkey::new_unique() + }; + + let nonce_size = nonce::State::size(); + let mut nonce_balance = Rent::default().minimum_balance(nonce_size); + + if !fake_fee_payer && !fee_paying_nonce { + let mut fee_payer_data = AccountSharedData::default(); + fee_payer_data.set_lamports(LAMPORTS_PER_SOL); + test_entry.add_initial_account(fee_payer, &fee_payer_data); + } else if fee_paying_nonce { + nonce_balance = nonce_balance.saturating_add(LAMPORTS_PER_SOL); + } + + let nonce_initial_hash = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_data = + nonce::state::Data::new(fee_payer, nonce_initial_hash, LAMPORTS_PER_SIGNATURE); + let nonce_account = AccountSharedData::new_data( + nonce_balance, + &nonce::state::Versions::new(nonce::State::Initialized(nonce_data.clone())), + &system_program::id(), + ) + .unwrap(); + let nonce_info = NonceInfo::new(nonce_pubkey, nonce_account.clone()); + + if !(fake_fee_payer && fee_paying_nonce) { + test_entry.add_initial_account(nonce_pubkey, &nonce_account); + } + + let instructions = vec![ + system_instruction::advance_nonce_account(&nonce_pubkey, &fee_payer), + Instruction::new_with_bytes(program_id, &[], vec![]), + ]; + + let transaction = Transaction::new_signed_with_payer( + &instructions, + Some(&fee_payer), + &[&fee_payer_keypair], + nonce_data.blockhash(), + ); + + (transaction, fee_payer, nonce_info) + }; + + // successful nonce transaction, regardless of features + { + let (transaction, fee_payer, mut nonce_info) = + mk_nonce_transaction(&mut test_entry, real_program_id, false); + test_entry.push_nonce_transaction(transaction, nonce_info.clone()); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + + nonce_info + .try_advance_nonce( + DurableNonce::from_blockhash(&LAST_BLOCKHASH), + LAMPORTS_PER_SIGNATURE, + ) + .unwrap(); + + test_entry + .final_accounts + .get_mut(nonce_info.address()) + .unwrap() + .data_as_mut_slice() + .copy_from_slice(nonce_info.account().data()); + } + + // non-executing nonce transaction (fee payer doesnt exist) regardless of features + { + let (transaction, _fee_payer, nonce_info) = + mk_nonce_transaction(&mut test_entry, real_program_id, true); + + test_entry + .final_accounts + .entry(*nonce_info.address()) + .and_modify(|account| account.set_rent_epoch(0)); + + test_entry.push_nonce_transaction_with_status( + transaction, + nonce_info, + ExecutionStatus::Discarded, + ); + } + + // failing nonce transaction (bad system instruction) regardless of features + { + let (transaction, fee_payer, mut nonce_info) = + mk_nonce_transaction(&mut test_entry, system_program::id(), false); + test_entry.push_nonce_transaction_with_status( + transaction, + nonce_info.clone(), + ExecutionStatus::ExecutedFailed, + ); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + + nonce_info + .try_advance_nonce( + DurableNonce::from_blockhash(&LAST_BLOCKHASH), + LAMPORTS_PER_SIGNATURE, + ) + .unwrap(); + + test_entry + .final_accounts + .get_mut(nonce_info.address()) + .unwrap() + .data_as_mut_slice() + .copy_from_slice(nonce_info.account().data()); + } + + // and this (program doesnt exist) will be a non-executing transaction without the feature + // or a fee-only transaction with it. which is identical to failed *except* rent is not updated + { + let (transaction, fee_payer, mut nonce_info) = + mk_nonce_transaction(&mut test_entry, Pubkey::new_unique(), false); + + if enable_fee_only_transactions { + test_entry.push_nonce_transaction_with_status( + transaction, + nonce_info.clone(), + ExecutionStatus::ProcessedFailed, + ); + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + + nonce_info + .try_advance_nonce( + DurableNonce::from_blockhash(&LAST_BLOCKHASH), + LAMPORTS_PER_SIGNATURE, + ) + .unwrap(); + + test_entry + .final_accounts + .get_mut(nonce_info.address()) + .unwrap() + .data_as_mut_slice() + .copy_from_slice(nonce_info.account().data()); + + // if the nonce account pays fees, it keeps its new rent epoch, otherwise it resets + if !fee_paying_nonce { + test_entry + .final_accounts + .get_mut(nonce_info.address()) + .unwrap() + .set_rent_epoch(0); + } + } else { + test_entry + .final_accounts + .get_mut(&fee_payer) + .unwrap() + .set_rent_epoch(0); + + test_entry + .final_accounts + .get_mut(nonce_info.address()) + .unwrap() + .set_rent_epoch(0); + + test_entry.push_nonce_transaction_with_status( + transaction, + nonce_info, + ExecutionStatus::Discarded, + ); + } } vec![test_entry] @@ -548,6 +843,10 @@ fn simple_transfer() -> Vec { #[test_case(program_medley())] #[test_case(simple_transfer())] +#[test_case(simple_nonce_fee_only(false, false))] +#[test_case(simple_nonce_fee_only(true, false))] +#[test_case(simple_nonce_fee_only(false, true))] +#[test_case(simple_nonce_fee_only(true, true))] fn svm_integration(test_entries: Vec) { for test_entry in test_entries { execute_test_entry(test_entry); @@ -596,37 +895,61 @@ fn execute_test_entry(test_entry: SvmTestEntry) { ..Default::default() }; + let mut feature_set = FeatureSet::default(); + for feature_id in &test_entry.enabled_features { + feature_set.activate(feature_id, 0); + } + + let processing_environment = TransactionProcessingEnvironment { + blockhash: LAST_BLOCKHASH, + feature_set: feature_set.into(), + lamports_per_signature: LAMPORTS_PER_SIGNATURE, + ..TransactionProcessingEnvironment::default() + }; + // execute transaction batch let (transactions, check_results) = test_entry.prepare_transactions(); let batch_output = batch_processor.load_and_execute_sanitized_transactions( &mock_bank, &transactions, check_results, - &TransactionProcessingEnvironment::default(), + &processing_environment, &processing_config, ); // build a hashmap of final account states incrementally, starting with all initial states, updating to all final states // NOTE with SIMD-83 an account may appear multiple times in the same batch let mut final_accounts_actual = test_entry.initial_accounts.clone(); - for processed_transaction in batch_output - .processing_results - .iter() - .filter_map(|r| r.as_ref().ok()) - { + + for (index, processed_transaction) in batch_output.processing_results.iter().enumerate() { match processed_transaction { - ProcessedTransaction::Executed(executed_transaction) => { + Ok(ProcessedTransaction::Executed(executed_transaction)) => { for (pubkey, account_data) in executed_transaction.loaded_transaction.accounts.clone() { final_accounts_actual.insert(pubkey, account_data); } } - // NOTE this is a possible state with `feature_set::enable_transaction_loading_failure_fees` enabled - // by using `TransactionProcessingEnvironment::default()` we have all features disabled - // in other words, this will be unreachable until we are ready to test fee-only transactions - // (or the feature is activated on mainnet and removed... but we should do it before then!) - ProcessedTransaction::FeesOnly(_) => unreachable!(), + Ok(ProcessedTransaction::FeesOnly(fees_only_transaction)) => { + let fee_payer = transactions[index].fee_payer(); + + match fees_only_transaction.rollback_accounts.clone() { + RollbackAccounts::FeePayerOnly { fee_payer_account } => { + final_accounts_actual.insert(*fee_payer, fee_payer_account); + } + RollbackAccounts::SameNonceAndFeePayer { nonce } => { + final_accounts_actual.insert(*nonce.address(), nonce.account().clone()); + } + RollbackAccounts::SeparateNonceAndFeePayer { + nonce, + fee_payer_account, + } => { + final_accounts_actual.insert(*fee_payer, fee_payer_account); + final_accounts_actual.insert(*nonce.address(), nonce.account().clone()); + } + } + } + Err(_) => {} } } @@ -650,8 +973,11 @@ fn execute_test_entry(test_entry: SvmTestEntry) { match processing_result { Ok(ProcessedTransaction::Executed(executed_transaction)) => test_item_asserts .check_executed_transaction(&executed_transaction.execution_details), - Ok(ProcessedTransaction::FeesOnly(_)) => unreachable!(), - Err(_) => assert!(!test_item_asserts.executed), + Ok(ProcessedTransaction::FeesOnly(_)) => { + assert!(test_item_asserts.processed()); + assert!(!test_item_asserts.executed()); + } + Err(_) => assert!(test_item_asserts.discarded()), } } } diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 57f00360e91bfb..5797d514888201 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -1,7 +1,9 @@ +#[allow(deprecated)] +use solana_sdk::sysvar::recent_blockhashes::{Entry as BlockhashesEntry, RecentBlockhashes}; use { solana_bpf_loader_program::syscalls::{ - SyscallAbort, SyscallGetClockSysvar, SyscallInvokeSignedRust, SyscallLog, SyscallMemcpy, - SyscallMemset, SyscallSetReturnData, + SyscallAbort, SyscallGetClockSysvar, SyscallGetRentSysvar, SyscallInvokeSignedRust, + SyscallLog, SyscallMemcpy, SyscallMemset, SyscallSetReturnData, }, solana_compute_budget::compute_budget::ComputeBudget, solana_feature_set::FeatureSet, @@ -21,6 +23,7 @@ use { clock::{Clock, UnixTimestamp}, native_loader, pubkey::Pubkey, + rent::Rent, slot_hashes::Slot, sysvar::SysvarId, }, @@ -201,6 +204,8 @@ pub fn create_executable_environment( program_cache.fork_graph = Some(Arc::downgrade(&fork_graph)); // We must fill in the sysvar cache entries + + // clock contents are important because we use them for a sysvar loading test let clock = Clock { slot: DEPLOYMENT_SLOT, epoch_start_timestamp: WALLCLOCK_TIME.saturating_sub(10) as UnixTimestamp, @@ -216,6 +221,31 @@ pub fn create_executable_environment( .write() .unwrap() .insert(Clock::id(), account_data); + + // default rent is fine + let rent = Rent::default(); + + let mut account_data = AccountSharedData::default(); + account_data.set_data(bincode::serialize(&rent).unwrap()); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(Rent::id(), account_data); + + // SystemInstruction::AdvanceNonceAccount asserts RecentBlockhashes is non-empty + // but then just gets the blockhash from InvokeContext. so the sysvar doesnt need real entries + #[allow(deprecated)] + let recent_blockhashes = vec![BlockhashesEntry::default()]; + + let mut account_data = AccountSharedData::default(); + account_data.set_data(bincode::serialize(&recent_blockhashes).unwrap()); + #[allow(deprecated)] + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(RecentBlockhashes::id(), account_data); } #[allow(unused)] @@ -304,5 +334,9 @@ fn create_custom_environment<'a>() -> BuiltinProgram> { .register_function_hashed(*b"sol_get_clock_sysvar", SyscallGetClockSysvar::vm) .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_get_rent_sysvar", SyscallGetRentSysvar::vm) + .expect("Registration failed"); + BuiltinProgram::new_loader(vm_config, function_registry) } From 34e9932c7ca10d6f4becd7edf9ce59460f880f56 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Wed, 11 Sep 2024 21:30:29 +0900 Subject: [PATCH 334/529] Introduce ledger-tool simulate-block-production (#2733) * Introduce ledger-tool simulate-block-production * Move counting code out of time-sensitive loop * Avoid misleading ::clone() altogether * Use while instead of loop+break * Add comment of using BTreeMap * Reduce simulation jitter due to mem deallocs * Rename to CostTracker::new_from_parent_limits() * Make ::load() take a slice * Clean up retracer code a bit * Add comment about BaningTracer even inside sim * Remove redundant dcou dev-dependencies * Apply suggestions from code review Co-authored-by: Andrew Fitzgerald * Fix up and promote to doc comments * Make warm-up code and doc simpler * Further clean up timed_batches_to_send * Fix wrong units... * Replace new_with_dummy_keypair() with traits * Tweak --no-block-cost-limits description * Remove redundant dev-dependencies * Use RwLock to mimic real ClusterInfo * Fix typo * Refactor too long BankingSimulator::start() * Reduce indent * Calculate required_duration in advance * Use correct format specifier instead of cast * Align formatting by using ::* * Make envs overridable * Add comment for SOLANA_VALIDATOR_EXIT_TIMEOUT * Clarify comment a bit * Fix typoss * Fix typos Co-authored-by: Andrew Fitzgerald * Use correct variant name: DeserializeError * Remove SimulatorLoopLogger::new() * Fix typos more * Add explicit _batch in field names * Avoid unneeded events: Vec<_> buffering * Manually adjust logging code styles * Align name: spawn_sender_loop/enter_simulator_loop * Refactor by introducing {Sender,Simulator}Loop * Fix out-of-sync sim due to timed preprocessing * Fix too-early base_simulation_time creation * Don't log confusing info! after leader slots * Add justification comment of BroadcastStage * Align timeout values * Comment about snapshot_slot=50 * Don't squash all errors unconditionally * Remove repetitive exitence check * Promote no_block_cost_limits logging level * Make ci/run-sanity.sh more robust * Improve wordking of --enable-hash-overrides * Remove marker-file based abortion mechanism * Remove needless touch --------- Co-authored-by: Andrew Fitzgerald --- ci/run-sanity.sh | 42 +- core/Cargo.toml | 5 +- core/benches/forwarder.rs | 2 +- core/src/banking_simulation.rs | 920 ++++++++++++++++++ core/src/banking_stage.rs | 46 +- core/src/banking_stage/forward_worker.rs | 12 +- core/src/banking_stage/forwarder.rs | 15 +- .../scheduler_controller.rs | 17 +- core/src/banking_trace.rs | 10 +- core/src/lib.rs | 1 + core/src/next_leader.rs | 5 +- cost-model/src/cost_tracker.rs | 14 + ledger-tool/Cargo.toml | 2 +- ledger-tool/src/args.rs | 4 + ledger-tool/src/main.rs | 188 +++- ledger/src/blockstore.rs | 14 +- ledger/src/blockstore_processor.rs | 38 +- runtime/src/bank.rs | 113 ++- validator/src/admin_rpc_service.rs | 9 +- 19 files changed, 1401 insertions(+), 56 deletions(-) create mode 100644 core/src/banking_simulation.rs diff --git a/ci/run-sanity.sh b/ci/run-sanity.sh index 88a6f40b1adf28..17c47a7a956de8 100755 --- a/ci/run-sanity.sh +++ b/ci/run-sanity.sh @@ -5,9 +5,27 @@ cd "$(dirname "$0")/.." # shellcheck source=multinode-demo/common.sh source multinode-demo/common.sh +if [[ -z $CI ]]; then + # Build eargerly if needed for local development. Otherwise, odd timing error occurs... + $solana_keygen --version + $solana_genesis --version + $solana_faucet --version + $solana_cli --version + $agave_validator --version + $solana_ledger_tool --version +fi + rm -rf config/run/init-completed config/ledger config/snapshot-ledger -SOLANA_RUN_SH_VALIDATOR_ARGS="--full-snapshot-interval-slots 200" timeout 120 ./scripts/run.sh & +# Sanity-check that agave-validator can successfully terminate itself without relying on +# process::exit() by extending the timeout... +# Also the banking_tracer thread needs some extra time to flush due to +# unsynchronized and buffered IO. +validator_timeout="${SOLANA_VALIDATOR_EXIT_TIMEOUT:-120}" +SOLANA_RUN_SH_VALIDATOR_ARGS="${SOLANA_RUN_SH_VALIDATOR_ARGS} --full-snapshot-interval-slots 200" \ + SOLANA_VALIDATOR_EXIT_TIMEOUT="$validator_timeout" \ + timeout "$validator_timeout" ./scripts/run.sh & + pid=$! attempts=20 @@ -21,7 +39,10 @@ while [[ ! -f config/run/init-completed ]]; do fi done -snapshot_slot=1 +# Needs bunch of slots for simulate-block-production. +# Better yet, run ~20 secs to run longer than its warm-up. +# As a bonus, this works as a sanity test of general slot-rooting behavior. +snapshot_slot=50 latest_slot=0 # wait a bit longer than snapshot_slot @@ -39,5 +60,18 @@ $solana_ledger_tool create-snapshot --ledger config/ledger "$snapshot_slot" conf cp config/ledger/genesis.tar.bz2 config/snapshot-ledger $solana_ledger_tool copy --ledger config/ledger \ --target-db config/snapshot-ledger --starting-slot "$snapshot_slot" --ending-slot "$latest_slot" -$solana_ledger_tool verify --ledger config/snapshot-ledger --block-verification-method blockstore-processor -$solana_ledger_tool verify --ledger config/snapshot-ledger --block-verification-method unified-scheduler +$solana_ledger_tool verify --abort-on-invalid-block \ + --ledger config/snapshot-ledger --block-verification-method blockstore-processor +$solana_ledger_tool verify --abort-on-invalid-block \ + --ledger config/snapshot-ledger --block-verification-method unified-scheduler + +first_simulated_slot=$((latest_slot / 2)) +purge_slot=$((first_simulated_slot + latest_slot / 4)) +echo "First simulated slot: ${first_simulated_slot}" +# Purge some slots so that later verify fails if sim is broken +$solana_ledger_tool purge --ledger config/ledger "$purge_slot" +$solana_ledger_tool simulate-block-production --ledger config/ledger \ + --first-simulated-slot $first_simulated_slot +# Slots should be available and correctly replayable upto snapshot_slot at least. +$solana_ledger_tool verify --abort-on-invalid-block \ + --ledger config/ledger --enable-hash-overrides --halt-at-slot "$snapshot_slot" diff --git a/core/Cargo.toml b/core/Cargo.toml index bde6144e142ae0..bbceb94dbc659e 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -105,7 +105,6 @@ solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-poh = { workspace = true, features = ["dev-context-only-utils"] } solana-program-runtime = { workspace = true } -solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-stake-program = { workspace = true } solana-unified-scheduler-pool = { workspace = true, features = [ @@ -123,7 +122,9 @@ sysctl = { workspace = true } rustc_version = { workspace = true, optional = true } [features] -dev-context-only-utils = [] +dev-context-only-utils = [ + "solana-runtime/dev-context-only-utils", +] frozen-abi = [ "dep:rustc_version", "dep:solana-frozen-abi", diff --git a/core/benches/forwarder.rs b/core/benches/forwarder.rs index bf40e1be7ac17e..10a050f3d97d4b 100644 --- a/core/benches/forwarder.rs +++ b/core/benches/forwarder.rs @@ -34,7 +34,7 @@ use { struct BenchSetup { exit: Arc, poh_service: PohService, - forwarder: Forwarder, + forwarder: Forwarder>, unprocessed_packet_batches: UnprocessedTransactionStorage, tracker: LeaderSlotMetricsTracker, stats: BankingStageStats, diff --git a/core/src/banking_simulation.rs b/core/src/banking_simulation.rs new file mode 100644 index 00000000000000..a8a67b3e5653b2 --- /dev/null +++ b/core/src/banking_simulation.rs @@ -0,0 +1,920 @@ +#![cfg(feature = "dev-context-only-utils")] +use { + crate::{ + banking_stage::{BankingStage, LikeClusterInfo}, + banking_trace::{ + BankingPacketBatch, BankingTracer, ChannelLabel, TimedTracedEvent, TracedEvent, + TracedSender, TracerThread, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, BASENAME, + }, + validator::BlockProductionMethod, + }, + bincode::deserialize_from, + crossbeam_channel::{unbounded, Sender}, + itertools::Itertools, + log::*, + solana_client::connection_cache::ConnectionCache, + solana_gossip::{ + cluster_info::{ClusterInfo, Node}, + contact_info::ContactInfo, + }, + solana_ledger::{ + blockstore::{Blockstore, PurgeType}, + leader_schedule_cache::LeaderScheduleCache, + }, + solana_poh::{ + poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, + poh_service::{PohService, DEFAULT_HASHES_PER_BATCH, DEFAULT_PINNED_CPU_CORE}, + }, + solana_runtime::{ + bank::{Bank, HashOverrides}, + bank_forks::BankForks, + installed_scheduler_pool::BankWithScheduler, + prioritization_fee_cache::PrioritizationFeeCache, + }, + solana_sdk::{ + clock::{Slot, DEFAULT_MS_PER_SLOT, HOLD_TRANSACTIONS_SLOT_OFFSET}, + genesis_config::GenesisConfig, + pubkey::Pubkey, + shred_version::compute_shred_version, + signature::Signer, + signer::keypair::Keypair, + }, + solana_streamer::socket::SocketAddrSpace, + solana_turbine::broadcast_stage::{BroadcastStage, BroadcastStageType}, + std::{ + collections::BTreeMap, + fmt::Display, + fs::File, + io::{self, BufRead, BufReader}, + net::{Ipv4Addr, UdpSocket}, + path::PathBuf, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + thread::{self, sleep, JoinHandle}, + time::{Duration, SystemTime}, + }, + thiserror::Error, +}; + +/// This creates a simulated environment around `BankingStage` to produce leader's blocks based on +/// recorded banking trace events (`TimedTracedEvent`). +/// +/// At a high level, the task of `BankingStage` is to pack transactions into assigned, +/// fixed-duration, leader blocks. So, there are 3 abstract inputs to simulate: blocks, time, and +/// transactions. +/// +/// In the context of simulation, the first two are simple; both are well defined. +/// +/// For ancestor blocks, we first replay a certain number of blocks immediately up to target +/// simulation leader's slot with `halt_at_slot` mechanism. Ultimately freezing the ancestor block +/// with expected and deterministic hashes. This has the added possible benefit of warming caches +/// that may be used during simulation. +/// +/// After replay, a minor tweak is applied during simulation: we forcibly override leader's hashes +/// as the simulated `BankingStage` creates them, using recorded `BlockAndBankHash` events. This is +/// to provide indistinguishable sysvars to TX execution and identical TX age resolution as the +/// simulation goes on. Otherwise, the vast majority of TX processing would differ because the +/// simulated block's hashes would differ than the recorded ones as block composition difference is +/// inevitable. +/// +/// As in the real environment, for PoH time we use the `PohRecorder`. This is simply a 400ms +/// timer, external to `BankingStage` and thus mostly irrelevant to `BankingStage` performance. For +/// wall time, we use the first `BankStatus::BlockAndBankHash` and `SystemTime::now()` to define +/// T=0 for simulation. Then, simulation progress is timed accordingly. For context, this syncing +/// is necessary because all trace events are recorded in UTC, not relative to poh nor to leader +/// schedule for simplicity at recording. +/// +/// Lastly, the last and most complicated input to simulate: transactions. +/// +/// A closer look of the transaction load profile is below, regardless of internal banking +/// implementation and simulation: +/// +/// Due to solana's general tx broadcast strategy of client's submission and optional node +/// forwarding, many transactions often arrive before the first leader slot begins. Thus, the +/// initial leader block creation typically starts with rather large number of schedule-able +/// transactions. Also, note that additional transactions arrive during the 4 leader slot window +/// (roughly ~1.6 seconds). +/// +/// Simulation must mimic this load pattern while being agnostic to internal banking impl as much +/// as possible. For that agnostic objective, `TracedSender`s were introduced into the `SigVerify` +/// stage and gossip subsystem by `BankingTracer` to trace **all** `BankingPacketBatch`s' exact +/// payload and _sender_'s timing with `SystemTime::now()` for all `ChannelLabel`s. This deliberate +/// tracing placement is not to be affected by any `BankingStage`'s internal capacity (if any) nor +/// by its channel consumption pattern. +/// +/// BankingSimulator consists of 2 phases chronologically: warm-up and on-the-fly. The 2 phases are +/// segregated by the aforementioned T=0. +/// +/// Both phases just send `BankingPacketBatch` in the same fashion, pretending to be +/// `SigVerifyStage`/gossip from a single thread to busy loop for precise T=N at ~1us granularity. +/// +/// Warm-up starts at T=-WARMUP_DURATION (~ 13 secs). As soon as warm up is initiated, we invoke +/// `BankingStage::new_num_threads()` as well to simulate the pre-leader slot's tx-buffering time. +pub struct BankingSimulator { + banking_trace_events: BankingTraceEvents, + first_simulated_slot: Slot, +} + +#[derive(Error, Debug)] +pub enum SimulateError { + #[error("IO Error: {0}")] + IoError(#[from] io::Error), + + #[error("Deserialization Error: {0}")] + DeserializeError(#[from] bincode::Error), +} + +// Defined to be enough to cover the holding phase prior to leader slots with some idling (+5 secs) +const WARMUP_DURATION: Duration = + Duration::from_millis(HOLD_TRANSACTIONS_SLOT_OFFSET * DEFAULT_MS_PER_SLOT + 5000); + +/// BTreeMap is intentional because events could be unordered slightly due to tracing jitter. +type PacketBatchesByTime = BTreeMap; + +type FreezeTimeBySlot = BTreeMap; + +type TimedBatchesToSend = Vec<( + (Duration, (ChannelLabel, BankingPacketBatch)), + (usize, usize), +)>; + +type EventSenderThread = JoinHandle<(TracedSender, TracedSender, TracedSender)>; + +#[derive(Default)] +pub struct BankingTraceEvents { + packet_batches_by_time: PacketBatchesByTime, + freeze_time_by_slot: FreezeTimeBySlot, + hash_overrides: HashOverrides, +} + +impl BankingTraceEvents { + fn read_event_file( + event_file_path: &PathBuf, + mut callback: impl FnMut(TimedTracedEvent), + ) -> Result<(), SimulateError> { + let mut reader = BufReader::new(File::open(event_file_path)?); + + // EOF is reached at a correct deserialization boundary or just the file is just empty. + // We want to look-ahead the buf, so NOT calling reader.consume(..) is correct. + while !reader.fill_buf()?.is_empty() { + callback(deserialize_from(&mut reader)?); + } + + Ok(()) + } + + pub fn load(event_file_paths: &[PathBuf]) -> Result { + let mut event_count = 0; + let mut events = Self::default(); + for event_file_path in event_file_paths { + let old_event_count = event_count; + let read_result = Self::read_event_file(event_file_path, |event| { + event_count += 1; + events.load_event(event); + }); + info!( + "Read {} events from {:?}", + event_count - old_event_count, + event_file_path, + ); + + if matches!( + read_result, + Err(SimulateError::DeserializeError(ref deser_err)) + if matches!( + &**deser_err, + bincode::ErrorKind::Io(io_err) + if io_err.kind() == std::io::ErrorKind::UnexpectedEof + ) + ) { + // Silence errors here as this can happen under normal operation... + warn!( + "Reading {:?} failed {:?} due to file corruption or unclean validator shutdown", + event_file_path, read_result, + ); + } else { + read_result? + } + } + + Ok(events) + } + + fn load_event(&mut self, TimedTracedEvent(event_time, event): TimedTracedEvent) { + match event { + TracedEvent::PacketBatch(label, batch) => { + // Deserialized PacketBatches will mostly be ordered by event_time, but this + // isn't guaranteed when traced, because time are measured by multiple _sender_ + // threads without synchronization among them to avoid overhead. + // + // Also, there's a possibility of system clock change. In this case, + // the simulation is meaningless, though... + // + // Somewhat naively assume that event_times (nanosecond resolution) won't + // collide. + let is_new = self + .packet_batches_by_time + .insert(event_time, (label, batch)) + .is_none(); + assert!(is_new); + } + TracedEvent::BlockAndBankHash(slot, blockhash, bank_hash) => { + let is_new = self.freeze_time_by_slot.insert(slot, event_time).is_none(); + self.hash_overrides.add_override(slot, blockhash, bank_hash); + assert!(is_new); + } + } + } + + pub fn hash_overrides(&self) -> &HashOverrides { + &self.hash_overrides + } +} + +struct DummyClusterInfo { + // Artificially wrap Pubkey with RwLock to induce lock contention if any to mimic the real + // ClusterInfo + id: RwLock, +} + +impl LikeClusterInfo for Arc { + fn id(&self) -> Pubkey { + *self.id.read().unwrap() + } + + fn lookup_contact_info(&self, _id: &Pubkey, _map: F) -> Option + where + F: FnOnce(&ContactInfo) -> Y, + { + None + } +} + +struct SimulatorLoopLogger { + simulated_leader: Pubkey, + freeze_time_by_slot: FreezeTimeBySlot, + base_event_time: SystemTime, + base_simulation_time: SystemTime, +} + +impl SimulatorLoopLogger { + fn bank_costs(bank: &Bank) -> (u64, u64) { + bank.read_cost_tracker() + .map(|t| (t.block_cost(), t.vote_cost())) + .unwrap() + } + + fn log_frozen_bank_cost(&self, bank: &Bank) { + info!( + "bank cost: slot: {} {:?} (frozen)", + bank.slot(), + Self::bank_costs(bank), + ); + } + + fn log_ongoing_bank_cost(&self, bank: &Bank) { + debug!( + "bank cost: slot: {} {:?} (ongoing)", + bank.slot(), + Self::bank_costs(bank), + ); + } + + fn log_jitter(&self, bank: &Bank) { + let old_slot = bank.slot(); + if let Some(event_time) = self.freeze_time_by_slot.get(&old_slot) { + if log_enabled!(log::Level::Info) { + let current_simulation_time = SystemTime::now(); + let elapsed_simulation_time = current_simulation_time + .duration_since(self.base_simulation_time) + .unwrap(); + let elapsed_event_time = event_time.duration_since(self.base_event_time).unwrap(); + info!( + "jitter(parent_slot: {}): {}{:?} (sim: {:?} event: {:?})", + old_slot, + if elapsed_simulation_time > elapsed_event_time { + "+" + } else { + "-" + }, + if elapsed_simulation_time > elapsed_event_time { + elapsed_simulation_time - elapsed_event_time + } else { + elapsed_event_time - elapsed_simulation_time + }, + elapsed_simulation_time, + elapsed_event_time, + ); + } + } + } + + fn on_new_leader(&self, bank: &Bank, new_slot: Slot, new_leader: Pubkey) { + self.log_frozen_bank_cost(bank); + info!( + "{} isn't leader anymore at slot {}; new leader: {}", + self.simulated_leader, new_slot, new_leader + ); + } +} + +struct SenderLoop { + parent_slot: Slot, + first_simulated_slot: Slot, + non_vote_sender: TracedSender, + tpu_vote_sender: TracedSender, + gossip_vote_sender: TracedSender, + exit: Arc, + raw_base_event_time: SystemTime, + total_batch_count: usize, + timed_batches_to_send: TimedBatchesToSend, +} + +impl SenderLoop { + fn log_starting(&self) { + info!( + "simulating events: {} (out of {}), starting at slot {} (based on {} from traced event slot: {}) (warmup: -{:?})", + self.timed_batches_to_send.len(), self.total_batch_count, self.first_simulated_slot, + SenderLoopLogger::format_as_timestamp(self.raw_base_event_time), + self.parent_slot, WARMUP_DURATION, + ); + } + + fn spawn(self, base_simulation_time: SystemTime) -> Result { + let handle = thread::Builder::new() + .name("solSimSender".into()) + .spawn(move || self.start(base_simulation_time))?; + Ok(handle) + } + + fn start( + mut self, + base_simulation_time: SystemTime, + ) -> (TracedSender, TracedSender, TracedSender) { + let mut logger = SenderLoopLogger::new( + &self.non_vote_sender, + &self.tpu_vote_sender, + &self.gossip_vote_sender, + ); + let mut simulation_duration = Duration::default(); + for ((required_duration, (label, batches_with_stats)), (batch_count, tx_count)) in + self.timed_batches_to_send.drain(..) + { + // Busy loop for most accurate sending timings + while simulation_duration < required_duration { + let current_simulation_time = SystemTime::now(); + simulation_duration = current_simulation_time + .duration_since(base_simulation_time) + .unwrap(); + } + + let sender = match label { + ChannelLabel::NonVote => &self.non_vote_sender, + ChannelLabel::TpuVote => &self.tpu_vote_sender, + ChannelLabel::GossipVote => &self.gossip_vote_sender, + ChannelLabel::Dummy => unreachable!(), + }; + sender.send(batches_with_stats).unwrap(); + + logger.on_sending_batches(&simulation_duration, label, batch_count, tx_count); + if self.exit.load(Ordering::Relaxed) { + break; + } + } + logger.on_terminating(); + drop(self.timed_batches_to_send); + // hold these senders in join_handle to control banking stage termination! + ( + self.non_vote_sender, + self.tpu_vote_sender, + self.gossip_vote_sender, + ) + } +} + +struct SimulatorLoop { + bank: BankWithScheduler, + parent_slot: Slot, + first_simulated_slot: Slot, + freeze_time_by_slot: FreezeTimeBySlot, + base_event_time: SystemTime, + poh_recorder: Arc>, + simulated_leader: Pubkey, + bank_forks: Arc>, + blockstore: Arc, + leader_schedule_cache: Arc, + retransmit_slots_sender: Sender, + retracer: Arc, +} + +impl SimulatorLoop { + fn enter( + self, + base_simulation_time: SystemTime, + sender_thread: EventSenderThread, + ) -> (EventSenderThread, Sender) { + sleep(WARMUP_DURATION); + info!("warmup done!"); + self.start(base_simulation_time, sender_thread) + } + + fn start( + self, + base_simulation_time: SystemTime, + sender_thread: EventSenderThread, + ) -> (EventSenderThread, Sender) { + let logger = SimulatorLoopLogger { + simulated_leader: self.simulated_leader, + base_event_time: self.base_event_time, + base_simulation_time, + freeze_time_by_slot: self.freeze_time_by_slot, + }; + let mut bank = self.bank; + loop { + if self.poh_recorder.read().unwrap().bank().is_none() { + let next_leader_slot = self.leader_schedule_cache.next_leader_slot( + &self.simulated_leader, + bank.slot(), + &bank, + Some(&self.blockstore), + GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS, + ); + debug!("{next_leader_slot:?}"); + self.poh_recorder + .write() + .unwrap() + .reset(bank.clone_without_scheduler(), next_leader_slot); + info!("Bank::new_from_parent()!"); + + logger.log_jitter(&bank); + bank.freeze(); + let new_slot = if bank.slot() == self.parent_slot { + info!("initial leader block!"); + self.first_simulated_slot + } else { + info!("next leader block!"); + bank.slot() + 1 + }; + let new_leader = self + .leader_schedule_cache + .slot_leader_at(new_slot, None) + .unwrap(); + if new_leader != self.simulated_leader { + logger.on_new_leader(&bank, new_slot, new_leader); + break; + } else if sender_thread.is_finished() { + warn!("sender thread existed maybe due to completion of sending traced events"); + break; + } else { + info!("new leader bank slot: {new_slot}"); + } + let new_bank = Bank::new_from_parent( + bank.clone_without_scheduler(), + &self.simulated_leader, + new_slot, + ); + // make sure parent is frozen for finalized hashes via the above + // new()-ing of its child bank + self.retracer + .hash_event(bank.slot(), &bank.last_blockhash(), &bank.hash()); + if *bank.collector_id() == self.simulated_leader { + logger.log_frozen_bank_cost(&bank); + } + self.retransmit_slots_sender.send(bank.slot()).unwrap(); + self.bank_forks.write().unwrap().insert(new_bank); + bank = self + .bank_forks + .read() + .unwrap() + .working_bank_with_scheduler() + .clone_with_scheduler(); + self.poh_recorder + .write() + .unwrap() + .set_bank(bank.clone_with_scheduler(), false); + } else { + logger.log_ongoing_bank_cost(&bank); + } + + sleep(Duration::from_millis(10)); + } + + (sender_thread, self.retransmit_slots_sender) + } +} + +struct SimulatorThreads { + poh_service: PohService, + banking_stage: BankingStage, + broadcast_stage: BroadcastStage, + retracer_thread: TracerThread, + exit: Arc, +} + +impl SimulatorThreads { + fn finish(self, sender_thread: EventSenderThread, retransmit_slots_sender: Sender) { + info!("Sleeping a bit before signaling exit"); + sleep(Duration::from_millis(100)); + self.exit.store(true, Ordering::Relaxed); + + // The order is important. Consuming sender_thread by joining will drop some channels. That + // triggers termination of banking_stage, in turn retracer thread will be terminated. + sender_thread.join().unwrap(); + self.banking_stage.join().unwrap(); + self.poh_service.join().unwrap(); + if let Some(retracer_thread) = self.retracer_thread { + retracer_thread.join().unwrap().unwrap(); + } + + info!("Joining broadcast stage..."); + drop(retransmit_slots_sender); + self.broadcast_stage.join().unwrap(); + } +} + +struct SenderLoopLogger<'a> { + non_vote_sender: &'a TracedSender, + tpu_vote_sender: &'a TracedSender, + gossip_vote_sender: &'a TracedSender, + last_log_duration: Duration, + last_tx_count: usize, + last_non_vote_batch_count: usize, + last_tpu_vote_tx_count: usize, + last_gossip_vote_tx_count: usize, + non_vote_batch_count: usize, + non_vote_tx_count: usize, + tpu_vote_batch_count: usize, + tpu_vote_tx_count: usize, + gossip_vote_batch_count: usize, + gossip_vote_tx_count: usize, +} + +impl<'a> SenderLoopLogger<'a> { + fn new( + non_vote_sender: &'a TracedSender, + tpu_vote_sender: &'a TracedSender, + gossip_vote_sender: &'a TracedSender, + ) -> Self { + Self { + non_vote_sender, + tpu_vote_sender, + gossip_vote_sender, + last_log_duration: Duration::default(), + last_tx_count: 0, + last_non_vote_batch_count: 0, + last_tpu_vote_tx_count: 0, + last_gossip_vote_tx_count: 0, + non_vote_batch_count: 0, + non_vote_tx_count: 0, + tpu_vote_batch_count: 0, + tpu_vote_tx_count: 0, + gossip_vote_batch_count: 0, + gossip_vote_tx_count: 0, + } + } + + fn on_sending_batches( + &mut self, + &simulation_duration: &Duration, + label: ChannelLabel, + batch_count: usize, + tx_count: usize, + ) { + debug!( + "sent {:?} {} batches ({} txes)", + label, batch_count, tx_count + ); + + use ChannelLabel::*; + let (total_batch_count, total_tx_count) = match label { + NonVote => (&mut self.non_vote_batch_count, &mut self.non_vote_tx_count), + TpuVote => (&mut self.tpu_vote_batch_count, &mut self.tpu_vote_tx_count), + GossipVote => ( + &mut self.gossip_vote_batch_count, + &mut self.gossip_vote_tx_count, + ), + Dummy => unreachable!(), + }; + *total_batch_count += batch_count; + *total_tx_count += tx_count; + + let log_interval = simulation_duration - self.last_log_duration; + if log_interval > Duration::from_millis(100) { + let current_tx_count = + self.non_vote_tx_count + self.tpu_vote_tx_count + self.gossip_vote_tx_count; + let duration = log_interval.as_secs_f64(); + let tps = (current_tx_count - self.last_tx_count) as f64 / duration; + let non_vote_tps = + (self.non_vote_tx_count - self.last_non_vote_batch_count) as f64 / duration; + let tpu_vote_tps = + (self.tpu_vote_tx_count - self.last_tpu_vote_tx_count) as f64 / duration; + let gossip_vote_tps = + (self.gossip_vote_tx_count - self.last_gossip_vote_tx_count) as f64 / duration; + info!( + "senders(non-,tpu-,gossip-vote): tps: {:.0} (={:.0}+{:.0}+{:.0}) over {:?} not-recved: ({}+{}+{})", + tps, non_vote_tps, tpu_vote_tps, gossip_vote_tps, log_interval, + self.non_vote_sender.len(), self.tpu_vote_sender.len(), self.gossip_vote_sender.len(), + ); + self.last_log_duration = simulation_duration; + self.last_tx_count = current_tx_count; + ( + self.last_non_vote_batch_count, + self.last_tpu_vote_tx_count, + self.last_gossip_vote_tx_count, + ) = ( + self.non_vote_tx_count, + self.tpu_vote_tx_count, + self.gossip_vote_batch_count, + ); + } + } + + fn on_terminating(self) { + info!( + "terminating to send...: non_vote: {} ({}), tpu_vote: {} ({}), gossip_vote: {} ({})", + self.non_vote_batch_count, + self.non_vote_tx_count, + self.tpu_vote_batch_count, + self.tpu_vote_tx_count, + self.gossip_vote_batch_count, + self.gossip_vote_tx_count, + ); + } + + fn format_as_timestamp(time: SystemTime) -> impl Display { + let time: chrono::DateTime = time.into(); + time.format("%Y-%m-%d %H:%M:%S.%f") + } +} + +impl BankingSimulator { + pub fn new(banking_trace_events: BankingTraceEvents, first_simulated_slot: Slot) -> Self { + Self { + banking_trace_events, + first_simulated_slot, + } + } + + pub fn parent_slot(&self) -> Option { + self.banking_trace_events + .freeze_time_by_slot + .range(..self.first_simulated_slot) + .last() + .map(|(slot, _time)| slot) + .copied() + } + + fn prepare_simulation( + self, + genesis_config: GenesisConfig, + bank_forks: Arc>, + blockstore: Arc, + block_production_method: BlockProductionMethod, + ) -> (SenderLoop, SimulatorLoop, SimulatorThreads) { + let parent_slot = self.parent_slot().unwrap(); + let mut packet_batches_by_time = self.banking_trace_events.packet_batches_by_time; + let freeze_time_by_slot = self.banking_trace_events.freeze_time_by_slot; + let bank = bank_forks + .read() + .unwrap() + .working_bank_with_scheduler() + .clone_with_scheduler(); + + let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); + assert_eq!(parent_slot, bank.slot()); + + let simulated_leader = leader_schedule_cache + .slot_leader_at(self.first_simulated_slot, None) + .unwrap(); + info!( + "Simulated leader and slot: {}, {}", + simulated_leader, self.first_simulated_slot, + ); + + let exit = Arc::new(AtomicBool::default()); + + if let Some(end_slot) = blockstore + .slot_meta_iterator(self.first_simulated_slot) + .unwrap() + .map(|(s, _)| s) + .last() + { + info!("purging slots {}, {}", self.first_simulated_slot, end_slot); + blockstore.purge_from_next_slots(self.first_simulated_slot, end_slot); + blockstore.purge_slots(self.first_simulated_slot, end_slot, PurgeType::Exact); + info!("done: purging"); + } else { + info!("skipping purging..."); + } + + info!("Poh is starting!"); + + let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new_with_clear_signal( + bank.tick_height(), + bank.last_blockhash(), + bank.clone(), + None, + bank.ticks_per_slot(), + false, + blockstore.clone(), + blockstore.get_new_shred_signal(0), + &leader_schedule_cache, + &genesis_config.poh_config, + None, + exit.clone(), + ); + let poh_recorder = Arc::new(RwLock::new(poh_recorder)); + let poh_service = PohService::new( + poh_recorder.clone(), + &genesis_config.poh_config, + exit.clone(), + bank.ticks_per_slot(), + DEFAULT_PINNED_CPU_CORE, + DEFAULT_HASHES_PER_BATCH, + record_receiver, + ); + + // Enable BankingTracer to approximate the real environment as close as possible because + // it's not expected to disable BankingTracer on production environments. + // + // It's not likely for it to affect the banking stage performance noticeably. So, make sure + // that assumption is held here. That said, it incurs additional channel sending, + // SystemTime::now() and buffered seq IO, and indirectly functions as a background dropper + // of `BankingPacketBatch`. + // + // Lastly, the actual retraced events can be used to evaluate simulation timing accuracy in + // the future. + let (retracer, retracer_thread) = BankingTracer::new(Some(( + &blockstore.banking_retracer_path(), + exit.clone(), + BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, + ))) + .unwrap(); + assert!(retracer.is_enabled()); + info!( + "Enabled banking retracer (dir_byte_limit: {})", + BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, + ); + + let (non_vote_sender, non_vote_receiver) = retracer.create_channel_non_vote(); + let (tpu_vote_sender, tpu_vote_receiver) = retracer.create_channel_tpu_vote(); + let (gossip_vote_sender, gossip_vote_receiver) = retracer.create_channel_gossip_vote(); + + let connection_cache = Arc::new(ConnectionCache::new("connection_cache_sim")); + let (replay_vote_sender, _replay_vote_receiver) = unbounded(); + let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded(); + let shred_version = compute_shred_version( + &genesis_config.hash(), + Some(&bank_forks.read().unwrap().root_bank().hard_forks()), + ); + let (sender, _receiver) = tokio::sync::mpsc::channel(1); + + // Create a completely-dummy ClusterInfo for the broadcast stage. + // We only need it to write shreds into the blockstore and it seems given ClusterInfo is + // irrelevant for the neccesary minimum work for this simulation. + let random_keypair = Arc::new(Keypair::new()); + let cluster_info = Arc::new(ClusterInfo::new( + Node::new_localhost_with_pubkey(&random_keypair.pubkey()).info, + random_keypair, + SocketAddrSpace::Unspecified, + )); + // Broadcast stage is needed to save the simulated blocks for post-run analysis by + // inserting produced shreds into the blockstore. + let broadcast_stage = BroadcastStageType::Standard.new_broadcast_stage( + vec![UdpSocket::bind((Ipv4Addr::LOCALHOST, 0)).unwrap()], + cluster_info.clone(), + entry_receiver, + retransmit_slots_receiver, + exit.clone(), + blockstore.clone(), + bank_forks.clone(), + shred_version, + sender, + ); + + info!("Start banking stage!..."); + // Create a partially-dummy ClusterInfo for the banking stage. + let cluster_info = Arc::new(DummyClusterInfo { + id: simulated_leader.into(), + }); + let prioritization_fee_cache = &Arc::new(PrioritizationFeeCache::new(0u64)); + let banking_stage = BankingStage::new_num_threads( + block_production_method.clone(), + &cluster_info, + &poh_recorder, + non_vote_receiver, + tpu_vote_receiver, + gossip_vote_receiver, + BankingStage::num_threads(), + None, + replay_vote_sender, + None, + connection_cache, + bank_forks.clone(), + prioritization_fee_cache, + false, + ); + + let (&_slot, &raw_base_event_time) = freeze_time_by_slot + .range(parent_slot..) + .next() + .expect("timed hashes"); + let base_event_time = raw_base_event_time - WARMUP_DURATION; + + let total_batch_count = packet_batches_by_time.len(); + let timed_batches_to_send = packet_batches_by_time.split_off(&base_event_time); + let batch_and_tx_counts = timed_batches_to_send + .values() + .map(|(_label, batches_with_stats)| { + let batches = &batches_with_stats.0; + ( + batches.len(), + batches.iter().map(|batch| batch.len()).sum::(), + ) + }) + .collect::>(); + // Convert to a large plain old Vec and drain on it, finally dropping it outside + // the simulation loop to avoid jitter due to interleaved deallocs of BTreeMap. + let timed_batches_to_send = timed_batches_to_send + .into_iter() + .map(|(event_time, batches)| { + (event_time.duration_since(base_event_time).unwrap(), batches) + }) + .zip_eq(batch_and_tx_counts) + .collect::>(); + + let sender_loop = SenderLoop { + parent_slot, + first_simulated_slot: self.first_simulated_slot, + non_vote_sender, + tpu_vote_sender, + gossip_vote_sender, + exit: exit.clone(), + raw_base_event_time, + total_batch_count, + timed_batches_to_send, + }; + + let simulator_loop = SimulatorLoop { + bank, + parent_slot, + first_simulated_slot: self.first_simulated_slot, + freeze_time_by_slot, + base_event_time, + poh_recorder, + simulated_leader, + bank_forks, + blockstore, + leader_schedule_cache, + retransmit_slots_sender, + retracer, + }; + + let simulator_threads = SimulatorThreads { + poh_service, + banking_stage, + broadcast_stage, + retracer_thread, + exit, + }; + + (sender_loop, simulator_loop, simulator_threads) + } + + pub fn start( + self, + genesis_config: GenesisConfig, + bank_forks: Arc>, + blockstore: Arc, + block_production_method: BlockProductionMethod, + ) -> Result<(), SimulateError> { + let (sender_loop, simulator_loop, simulator_threads) = self.prepare_simulation( + genesis_config, + bank_forks, + blockstore, + block_production_method, + ); + + sender_loop.log_starting(); + let base_simulation_time = SystemTime::now(); + // Spawning and entering these two loops must be done at the same time as they're timed. + // So, all the mundane setup must be done in advance. + let sender_thread = sender_loop.spawn(base_simulation_time)?; + let (sender_thread, retransmit_slots_sender) = + simulator_loop.enter(base_simulation_time, sender_thread); + + simulator_threads.finish(sender_thread, retransmit_slots_sender); + + Ok(()) + } + + pub fn event_file_name(index: usize) -> String { + if index == 0 { + BASENAME.to_string() + } else { + format!("{BASENAME}.{index}") + } + } +} diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 5c9768aa0bc215..8dff75832106a9 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -31,7 +31,7 @@ use { crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, histogram::Histogram, solana_client::connection_cache::ConnectionCache, - solana_gossip::cluster_info::ClusterInfo, + solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_ledger::blockstore_processor::TransactionStatusSender, solana_measure::measure_us, solana_perf::{data_budget::DataBudget, packet::PACKETS_PER_BATCH}, @@ -40,9 +40,10 @@ use { bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, vote_sender_types::ReplayVoteSender, }, - solana_sdk::timing::AtomicInterval, + solana_sdk::{pubkey::Pubkey, timing::AtomicInterval}, std::{ cmp, env, + ops::Deref, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, RwLock, @@ -323,12 +324,33 @@ pub struct FilterForwardingResults { pub(crate) total_filter_packets_us: u64, } +pub trait LikeClusterInfo: Send + Sync + 'static + Clone { + fn id(&self) -> Pubkey; + + fn lookup_contact_info(&self, id: &Pubkey, map: F) -> Option + where + F: FnOnce(&ContactInfo) -> Y; +} + +impl LikeClusterInfo for Arc { + fn id(&self) -> Pubkey { + self.deref().id() + } + + fn lookup_contact_info(&self, id: &Pubkey, map: F) -> Option + where + F: FnOnce(&ContactInfo) -> Y, + { + self.deref().lookup_contact_info(id, map) + } +} + impl BankingStage { /// Create the stage using `bank`. Exit when `verified_receiver` is dropped. #[allow(clippy::too_many_arguments)] pub fn new( block_production_method: BlockProductionMethod, - cluster_info: &Arc, + cluster_info: &impl LikeClusterInfo, poh_recorder: &Arc>, non_vote_receiver: BankingPacketReceiver, tpu_vote_receiver: BankingPacketReceiver, @@ -362,7 +384,7 @@ impl BankingStage { #[allow(clippy::too_many_arguments)] pub fn new_num_threads( block_production_method: BlockProductionMethod, - cluster_info: &Arc, + cluster_info: &impl LikeClusterInfo, poh_recorder: &Arc>, non_vote_receiver: BankingPacketReceiver, tpu_vote_receiver: BankingPacketReceiver, @@ -413,7 +435,7 @@ impl BankingStage { #[allow(clippy::too_many_arguments)] pub fn new_thread_local_multi_iterator( - cluster_info: &Arc, + cluster_info: &impl LikeClusterInfo, poh_recorder: &Arc>, non_vote_receiver: BankingPacketReceiver, tpu_vote_receiver: BankingPacketReceiver, @@ -497,7 +519,7 @@ impl BankingStage { #[allow(clippy::too_many_arguments)] pub fn new_central_scheduler( - cluster_info: &Arc, + cluster_info: &impl LikeClusterInfo, poh_recorder: &Arc>, non_vote_receiver: BankingPacketReceiver, tpu_vote_receiver: BankingPacketReceiver, @@ -629,7 +651,7 @@ impl BankingStage { Self { bank_thread_hdls } } - fn spawn_thread_local_multi_iterator_thread( + fn spawn_thread_local_multi_iterator_thread( id: u32, packet_receiver: BankingPacketReceiver, bank_forks: Arc>, @@ -637,7 +659,7 @@ impl BankingStage { committer: Committer, transaction_recorder: TransactionRecorder, log_messages_bytes_limit: Option, - mut forwarder: Forwarder, + mut forwarder: Forwarder, unprocessed_transaction_storage: UnprocessedTransactionStorage, ) -> JoinHandle<()> { let mut packet_receiver = PacketReceiver::new(id, packet_receiver, bank_forks); @@ -664,9 +686,9 @@ impl BankingStage { } #[allow(clippy::too_many_arguments)] - fn process_buffered_packets( + fn process_buffered_packets( decision_maker: &DecisionMaker, - forwarder: &mut Forwarder, + forwarder: &mut Forwarder, consumer: &Consumer, unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, banking_stage_stats: &BankingStageStats, @@ -730,10 +752,10 @@ impl BankingStage { } } - fn process_loop( + fn process_loop( packet_receiver: &mut PacketReceiver, decision_maker: &DecisionMaker, - forwarder: &mut Forwarder, + forwarder: &mut Forwarder, consumer: &Consumer, id: u32, mut unprocessed_transaction_storage: UnprocessedTransactionStorage, diff --git a/core/src/banking_stage/forward_worker.rs b/core/src/banking_stage/forward_worker.rs index 6c9fd45e029c2f..61cf311f0a8cf8 100644 --- a/core/src/banking_stage/forward_worker.rs +++ b/core/src/banking_stage/forward_worker.rs @@ -4,6 +4,7 @@ use { scheduler_messages::{FinishedForwardWork, ForwardWork}, ForwardOption, }, + crate::banking_stage::LikeClusterInfo, crossbeam_channel::{Receiver, RecvError, SendError, Sender}, thiserror::Error, }; @@ -16,19 +17,19 @@ pub enum ForwardWorkerError { Send(#[from] SendError), } -pub(crate) struct ForwardWorker { +pub(crate) struct ForwardWorker { forward_receiver: Receiver, forward_option: ForwardOption, - forwarder: Forwarder, + forwarder: Forwarder, forwarded_sender: Sender, } #[allow(dead_code)] -impl ForwardWorker { +impl ForwardWorker { pub fn new( forward_receiver: Receiver, forward_option: ForwardOption, - forwarder: Forwarder, + forwarder: Forwarder, forwarded_sender: Sender, ) -> Self { Self { @@ -90,6 +91,7 @@ mod tests { }, crossbeam_channel::unbounded, solana_client::connection_cache::ConnectionCache, + solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ blockstore::Blockstore, genesis_utils::GenesisConfigInfo, get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, @@ -121,7 +123,7 @@ mod tests { forwarded_receiver: Receiver, } - fn setup_test_frame() -> (TestFrame, ForwardWorker) { + fn setup_test_frame() -> (TestFrame, ForwardWorker>) { let GenesisConfigInfo { genesis_config, mint_keypair, diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index 563c93861cd30e..82af221842dd0b 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -6,14 +6,15 @@ use { ForwardOption, }, crate::{ - banking_stage::immutable_deserialized_packet::ImmutableDeserializedPacket, + banking_stage::{ + immutable_deserialized_packet::ImmutableDeserializedPacket, LikeClusterInfo, + }, next_leader::{next_leader, next_leader_tpu_vote}, tracer_packet_stats::TracerPacketStats, }, solana_client::connection_cache::ConnectionCache, solana_connection_cache::client_connection::ClientConnection as TpuConnection, solana_feature_set::FeatureSet, - solana_gossip::cluster_info::ClusterInfo, solana_measure::measure_us, solana_perf::{data_budget::DataBudget, packet::Packet}, solana_poh::poh_recorder::PohRecorder, @@ -27,21 +28,21 @@ use { }, }; -pub struct Forwarder { +pub struct Forwarder { poh_recorder: Arc>, bank_forks: Arc>, socket: UdpSocket, - cluster_info: Arc, + cluster_info: T, connection_cache: Arc, data_budget: Arc, forward_packet_batches_by_accounts: ForwardPacketBatchesByAccounts, } -impl Forwarder { +impl Forwarder { pub fn new( poh_recorder: Arc>, bank_forks: Arc>, - cluster_info: Arc, + cluster_info: T, connection_cache: Arc, data_budget: Arc, ) -> Self { @@ -307,7 +308,7 @@ mod tests { unprocessed_packet_batches::{DeserializedPacket, UnprocessedPacketBatches}, unprocessed_transaction_storage::ThreadType, }, - solana_gossip::cluster_info::Node, + solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{blockstore::Blockstore, genesis_utils::GenesisConfigInfo}, solana_perf::packet::PacketFlags, solana_poh::{poh_recorder::create_test_recorder, poh_service::PohService}, diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index b576fd1576511d..9966a0527d0286 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -19,7 +19,7 @@ use { forwarder::Forwarder, immutable_deserialized_packet::ImmutableDeserializedPacket, packet_deserializer::PacketDeserializer, - ForwardOption, TOTAL_BUFFERED_PACKETS, + ForwardOption, LikeClusterInfo, TOTAL_BUFFERED_PACKETS, }, arrayvec::ArrayVec, crossbeam_channel::RecvTimeoutError, @@ -44,7 +44,7 @@ use { }; /// Controls packet and transaction flow into scheduler, and scheduling execution. -pub(crate) struct SchedulerController { +pub(crate) struct SchedulerController { /// Decision maker for determining what should be done with transactions. decision_maker: DecisionMaker, /// Packet/Transaction ingress. @@ -68,17 +68,17 @@ pub(crate) struct SchedulerController { /// Metric report handles for the worker threads. worker_metrics: Vec>, /// State for forwarding packets to the leader, if enabled. - forwarder: Option, + forwarder: Option>, } -impl SchedulerController { +impl SchedulerController { pub fn new( decision_maker: DecisionMaker, packet_deserializer: PacketDeserializer, bank_forks: Arc>, scheduler: PrioGraphScheduler, worker_metrics: Vec>, - forwarder: Option, + forwarder: Option>, ) -> Self { Self { decision_maker, @@ -670,6 +670,7 @@ mod tests { }, crossbeam_channel::{unbounded, Receiver, Sender}, itertools::Itertools, + solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ blockstore::Blockstore, genesis_utils::GenesisConfigInfo, get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, @@ -705,7 +706,7 @@ mod tests { finished_consume_work_sender: Sender, } - fn create_test_frame(num_threads: usize) -> (TestFrame, SchedulerController) { + fn create_test_frame(num_threads: usize) -> (TestFrame, SchedulerController>) { let GenesisConfigInfo { mut genesis_config, mint_keypair, @@ -800,7 +801,9 @@ mod tests { // in order to keep the decision as recent as possible for processing. // In the tests, the decision will not become stale, so it is more convenient // to receive first and then schedule. - fn test_receive_then_schedule(scheduler_controller: &mut SchedulerController) { + fn test_receive_then_schedule( + scheduler_controller: &mut SchedulerController>, + ) { let decision = scheduler_controller .decision_maker .make_consume_or_forward_decision(); diff --git a/core/src/banking_trace.rs b/core/src/banking_trace.rs index cc077dfa2c2755..150e9a33e1940f 100644 --- a/core/src/banking_trace.rs +++ b/core/src/banking_trace.rs @@ -42,7 +42,7 @@ pub enum TraceError { TooSmallDirByteLimit(DirByteLimit, DirByteLimit), } -const BASENAME: &str = "events"; +pub(crate) const BASENAME: &str = "events"; const TRACE_FILE_ROTATE_COUNT: u64 = 14; // target 2 weeks retention under normal load const TRACE_FILE_WRITE_INTERVAL_MS: u64 = 100; const BUF_WRITER_CAPACITY: usize = 10 * 1024 * 1024; @@ -359,6 +359,14 @@ impl TracedSender { } self.sender.send(batch) } + + pub fn len(&self) -> usize { + self.sender.len() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } } #[cfg(any(test, feature = "dev-context-only-utils"))] diff --git a/core/src/lib.rs b/core/src/lib.rs index da9d69ed508875..2ba671ca62b580 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -10,6 +10,7 @@ pub mod accounts_hash_verifier; pub mod admin_rpc_post_init; +pub mod banking_simulation; pub mod banking_stage; pub mod banking_trace; pub mod cache_block_meta_service; diff --git a/core/src/next_leader.rs b/core/src/next_leader.rs index 7e77ecd869e4a1..738e728dcc4a30 100644 --- a/core/src/next_leader.rs +++ b/core/src/next_leader.rs @@ -1,4 +1,5 @@ use { + crate::banking_stage::LikeClusterInfo, itertools::Itertools, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_poh::poh_recorder::PohRecorder, @@ -34,14 +35,14 @@ pub(crate) fn upcoming_leader_tpu_vote_sockets( } pub(crate) fn next_leader_tpu_vote( - cluster_info: &ClusterInfo, + cluster_info: &impl LikeClusterInfo, poh_recorder: &RwLock, ) -> Option<(Pubkey, SocketAddr)> { next_leader(cluster_info, poh_recorder, ContactInfo::tpu_vote) } pub(crate) fn next_leader( - cluster_info: &ClusterInfo, + cluster_info: &impl LikeClusterInfo, poh_recorder: &RwLock, port_selector: F, ) -> Option<(Pubkey, SocketAddr)> diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 23583068fb13b3..55d905047990a5 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -106,6 +106,16 @@ impl Default for CostTracker { } impl CostTracker { + pub fn new_from_parent_limits(&self) -> Self { + let mut new = Self::default(); + new.set_limits( + self.account_cost_limit, + self.block_cost_limit, + self.vote_cost_limit, + ); + new + } + pub fn reset(&mut self) { self.cost_by_writable_accounts.clear(); self.block_cost = 0; @@ -192,6 +202,10 @@ impl CostTracker { self.block_cost } + pub fn vote_cost(&self) -> u64 { + self.vote_cost + } + pub fn transaction_count(&self) -> u64 { self.transaction_count } diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 39061de55f2382..abfa07ade49d38 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -32,7 +32,7 @@ solana-bpf-loader-program = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-output = { workspace = true } solana-compute-budget = { workspace = true } -solana-core = { workspace = true } +solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-cost-model = { workspace = true } solana-entry = { workspace = true } solana-feature-set = { workspace = true } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index d2bc0f691e130f..d01c542465256f 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -214,6 +214,8 @@ pub fn parse_process_options(ledger_path: &Path, arg_matches: &ArgMatches<'_>) - let debug_keys = pubkeys_of(arg_matches, "debug_key") .map(|pubkeys| Arc::new(pubkeys.into_iter().collect::>())); let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + let abort_on_invalid_block = arg_matches.is_present("abort_on_invalid_block"); + let no_block_cost_limits = arg_matches.is_present("no_block_cost_limits"); ProcessOptions { new_hard_forks, @@ -230,6 +232,8 @@ pub fn parse_process_options(ledger_path: &Path, arg_matches: &ArgMatches<'_>) - allow_dead_slots, halt_at_slot, use_snapshot_archives_at_startup, + abort_on_invalid_block, + no_block_cost_limits, ..ProcessOptions::default() } } diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 0c74d53f3e41e2..a9b9a864bd9b42 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -31,13 +31,14 @@ use { }, solana_cli_output::OutputFormat, solana_core::{ + banking_simulation::{BankingSimulator, BankingTraceEvents}, system_monitor_service::{SystemMonitorService, SystemMonitorStatsReportConfig}, - validator::BlockVerificationMethod, + validator::{BlockProductionMethod, BlockVerificationMethod}, }, solana_cost_model::{cost_model::CostModel, cost_tracker::CostTracker}, solana_feature_set::{self as feature_set, FeatureSet}, solana_ledger::{ - blockstore::{create_new_ledger, Blockstore}, + blockstore::{banking_trace_path, create_new_ledger, Blockstore}, blockstore_options::{AccessType, LedgerColumnOptions}, blockstore_processor::{ ProcessSlotCallback, TransactionStatusMessage, TransactionStatusSender, @@ -83,8 +84,8 @@ use { }, std::{ collections::{HashMap, HashSet}, - ffi::OsStr, - fs::File, + ffi::{OsStr, OsString}, + fs::{read_dir, File}, io::{self, Write}, mem::swap, path::{Path, PathBuf}, @@ -536,6 +537,70 @@ fn assert_capitalization(bank: &Bank) { assert!(bank.calculate_and_verify_capitalization(debug_verify)); } +fn load_banking_trace_events_or_exit(ledger_path: &Path) -> BankingTraceEvents { + let file_paths = read_banking_trace_event_file_paths_or_exit(banking_trace_path(ledger_path)); + + info!("Using: banking trace event files: {file_paths:?}"); + match BankingTraceEvents::load(&file_paths) { + Ok(banking_trace_events) => banking_trace_events, + Err(error) => { + eprintln!("Failed to load banking trace events: {error:?}"); + exit(1) + } + } +} + +fn read_banking_trace_event_file_paths_or_exit(banking_trace_path: PathBuf) -> Vec { + info!("Using: banking trace events dir: {banking_trace_path:?}"); + + let entries = match read_dir(&banking_trace_path) { + Ok(entries) => entries, + Err(error) => { + eprintln!("Error: failed to open banking_trace_path: {error:?}"); + exit(1); + } + }; + + let mut entry_names = entries + .flat_map(|entry| entry.ok().map(|entry| entry.file_name())) + .collect::>(); + + let mut event_file_paths = vec![]; + + if entry_names.is_empty() { + warn!("banking_trace_path dir is empty."); + return event_file_paths; + } + + for index in 0.. { + let event_file_name: OsString = BankingSimulator::event_file_name(index).into(); + if entry_names.remove(&event_file_name) { + event_file_paths.push(banking_trace_path.join(event_file_name)); + } else { + break; + } + } + + if event_file_paths.is_empty() { + warn!("Error: no event files found"); + } + + if !entry_names.is_empty() { + let full_names = entry_names + .into_iter() + .map(|name| banking_trace_path.join(name)) + .collect::>(); + warn!( + "Some files in {banking_trace_path:?} is ignored due to gapped events file rotation \ + or unrecognized names: {full_names:?}" + ); + } + + // Reverse to load in the chronicle order (note that this isn't strictly needed) + event_file_paths.reverse(); + event_file_paths +} + struct SlotRecorderConfig { transaction_recorder: Option>, transaction_status_sender: Option, @@ -1146,6 +1211,30 @@ fn main() { "geyser_plugin_config", ]) .help("In addition to the bank hash, optionally include accounts and/or transactions details for the slot"), + ) + .arg( + Arg::with_name("abort_on_invalid_block") + .long("abort-on-invalid-block") + .takes_value(false) + .help( + "Exits with failed status early as soon as any bad block is detected", + ), + ) + .arg( + Arg::with_name("no_block_cost_limits") + .long("no-block-cost-limits") + .takes_value(false) + .help("Disable block cost limits effectively by setting them to the max"), + ) + .arg( + Arg::with_name("enable_hash_overrides") + .long("enable-hash-overrides") + .takes_value(false) + .help( + "Enable override of blockhashes and bank hashes from banking trace \ + event files to correctly verify blocks produced by \ + the simulate-block-production subcommand", + ), ), ) .subcommand( @@ -1388,6 +1477,36 @@ fn main() { .help("If snapshot creation should succeed with a capitalization delta."), ), ) + .subcommand( + SubCommand::with_name("simulate-block-production") + .about("Simulate producing blocks with banking trace event files in the ledger") + .arg(&load_genesis_config_arg) + .args(&accounts_db_config_args) + .args(&snapshot_config_args) + .arg( + Arg::with_name("block_production_method") + .long("block-production-method") + .value_name("METHOD") + .takes_value(true) + .possible_values(BlockProductionMethod::cli_names()) + .help(BlockProductionMethod::cli_message()), + ) + .arg( + Arg::with_name("first_simulated_slot") + .long("first-simulated-slot") + .value_name("SLOT") + .validator(is_slot) + .takes_value(true) + .required(true) + .help("Start simulation at the given slot") + ) + .arg( + Arg::with_name("no_block_cost_limits") + .long("no-block-cost-limits") + .takes_value(false) + .help("Disable block cost limits effectively by setting them to the max"), + ), + ) .subcommand( SubCommand::with_name("accounts") .about("Print account stats and contents after processing the ledger") @@ -1662,6 +1781,12 @@ fn main() { ); let mut process_options = parse_process_options(&ledger_path, arg_matches); + if arg_matches.is_present("enable_hash_overrides") { + let banking_trace_events = load_banking_trace_events_or_exit(&ledger_path); + process_options.hash_overrides = + Some(banking_trace_events.hash_overrides().clone()); + } + let (slot_callback, slot_recorder_config) = setup_slot_recording(arg_matches); process_options.slot_callback = slot_callback; let transaction_status_sender = slot_recorder_config @@ -2350,6 +2475,61 @@ fn main() { system_monitor_service.join().unwrap(); } } + ("simulate-block-production", Some(arg_matches)) => { + let mut process_options = parse_process_options(&ledger_path, arg_matches); + + let banking_trace_events = load_banking_trace_events_or_exit(&ledger_path); + process_options.hash_overrides = + Some(banking_trace_events.hash_overrides().clone()); + + let slot = value_t!(arg_matches, "first_simulated_slot", Slot).unwrap(); + let simulator = BankingSimulator::new(banking_trace_events, slot); + let Some(parent_slot) = simulator.parent_slot() else { + eprintln!( + "Couldn't determine parent_slot of first_simulated_slot: {slot} \ + due to missing banking_trace_event data." + ); + exit(1); + }; + process_options.halt_at_slot = Some(parent_slot); + + let blockstore = Arc::new(open_blockstore( + &ledger_path, + arg_matches, + AccessType::Primary, // needed for purging already existing simulated block shreds... + )); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let LoadAndProcessLedgerOutput { bank_forks, .. } = + load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + blockstore.clone(), + process_options, + None, // transaction status sender + ); + + let block_production_method = value_t!( + arg_matches, + "block_production_method", + BlockProductionMethod + ) + .unwrap_or_default(); + + info!("Using: block-production-method: {block_production_method}"); + + match simulator.start( + genesis_config, + bank_forks, + blockstore, + block_production_method, + ) { + Ok(()) => println!("Ok"), + Err(error) => { + eprintln!("{error:?}"); + exit(1); + } + }; + } ("accounts", Some(arg_matches)) => { let process_options = parse_process_options(&ledger_path, arg_matches); let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 2101896d9a0558..54f612483be958 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -299,6 +299,14 @@ impl SlotMetaWorkingSetEntry { } } +pub fn banking_trace_path(path: &Path) -> PathBuf { + path.join("banking_trace") +} + +pub fn banking_retrace_path(path: &Path) -> PathBuf { + path.join("banking_retrace") +} + impl Blockstore { pub fn db(self) -> Arc { self.db @@ -309,7 +317,11 @@ impl Blockstore { } pub fn banking_trace_path(&self) -> PathBuf { - self.ledger_path.join("banking_trace") + banking_trace_path(&self.ledger_path) + } + + pub fn banking_retracer_path(&self) -> PathBuf { + banking_retrace_path(&self.ledger_path) } /// Opens a Ledger in directory, provides "infinite" window of shreds diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index b34bdee591dd9c..89a013531f407c 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1,5 +1,3 @@ -#[cfg(feature = "dev-context-only-utils")] -use qualifier_attr::qualifiers; use { crate::{ block_error::BlockError, @@ -76,6 +74,8 @@ use { thiserror::Error, ExecuteTimingType::{NumExecuteBatches, TotalBatchesLen}, }; +#[cfg(feature = "dev-context-only-utils")] +use {qualifier_attr::qualifiers, solana_runtime::bank::HashOverrides}; pub struct TransactionBatchWithIndexes<'a, 'b> { pub batch: TransactionBatch<'a, 'b>, @@ -768,6 +768,10 @@ pub struct ProcessOptions { /// This is useful for debugging. pub run_final_accounts_hash_calc: bool, pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, + #[cfg(feature = "dev-context-only-utils")] + pub hash_overrides: Option, + pub abort_on_invalid_block: bool, + pub no_block_cost_limits: bool, } pub fn test_process_blockstore( @@ -903,6 +907,20 @@ pub fn process_blockstore_from_root( // Starting slot must be a root, and thus has no parents assert_eq!(bank_forks.read().unwrap().banks().len(), 1); let bank = bank_forks.read().unwrap().root_bank(); + #[cfg(feature = "dev-context-only-utils")] + if let Some(hash_overrides) = &opts.hash_overrides { + info!( + "Will override following slots' hashes: {:#?}", + hash_overrides + ); + bank.set_hash_overrides(hash_overrides.clone()); + } + if opts.no_block_cost_limits { + warn!("setting block cost limits to MAX"); + bank.write_cost_tracker() + .unwrap() + .set_limits(u64::MAX, u64::MAX, u64::MAX); + } assert!(bank.parent().is_none()); (bank.slot(), bank.hash()) }; @@ -1837,7 +1855,7 @@ fn load_frozen_forks( let mut progress = ConfirmationProgress::new(last_entry_hash); let mut m = Measure::start("process_single_slot"); let bank = bank_forks.write().unwrap().insert_from_ledger(bank); - if process_single_slot( + if let Err(error) = process_single_slot( blockstore, &bank, replay_tx_thread_pool, @@ -1849,10 +1867,11 @@ fn load_frozen_forks( entry_notification_sender, None, timing, - ) - .is_err() - { + ) { assert!(bank_forks.write().unwrap().remove(bank.slot()).is_some()); + if opts.abort_on_invalid_block { + Err(error)? + } continue; } txs += progress.num_txs; @@ -2055,6 +2074,13 @@ pub fn process_single_slot( replay_vote_sender, timing, ) + .and_then(|()| { + if let Some((result, completed_timings)) = bank.wait_for_completed_scheduler() { + timing.accumulate(&completed_timings); + result? + } + Ok(()) + }) .map_err(|err| { let slot = bank.slot(); warn!("slot {} failed to verify: {}", slot, err); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e0072749a4c078..ee6e785a1e6b68 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -506,6 +506,8 @@ impl PartialEq for Bank { if std::ptr::eq(self, other) { return true; } + // Suppress rustfmt until https://github.com/rust-lang/rustfmt/issues/5920 is fixed ... + #[rustfmt::skip] let Self { skipped_rewrites: _, rc: _, @@ -544,6 +546,8 @@ impl PartialEq for Bank { stakes_cache, epoch_stakes, is_delta, + #[cfg(feature = "dev-context-only-utils")] + hash_overrides, // TODO: Confirm if all these fields are intentionally ignored! rewards: _, cluster_type: _, @@ -601,6 +605,10 @@ impl PartialEq for Bank { && *stakes_cache.stakes() == *other.stakes_cache.stakes() && epoch_stakes == &other.epoch_stakes && is_delta.load(Relaxed) == other.is_delta.load(Relaxed) + // No deadlock is possbile, when Arc::ptr_eq() returns false, because of being + // different Mutexes. + && (Arc::ptr_eq(hash_overrides, &other.hash_overrides) || + *hash_overrides.lock().unwrap() == *other.hash_overrides.lock().unwrap()) } } @@ -669,6 +677,50 @@ pub trait DropCallback: fmt::Debug { #[derive(Debug, Default)] pub struct OptionalDropCallback(Option>); +#[derive(Default, Debug, Clone, PartialEq)] +#[cfg(feature = "dev-context-only-utils")] +pub struct HashOverrides { + hashes: HashMap, +} + +#[cfg(feature = "dev-context-only-utils")] +impl HashOverrides { + fn get_hash_override(&self, slot: Slot) -> Option<&HashOverride> { + self.hashes.get(&slot) + } + + fn get_blockhash_override(&self, slot: Slot) -> Option<&Hash> { + self.get_hash_override(slot) + .map(|hash_override| &hash_override.blockhash) + } + + fn get_bank_hash_override(&self, slot: Slot) -> Option<&Hash> { + self.get_hash_override(slot) + .map(|hash_override| &hash_override.bank_hash) + } + + pub fn add_override(&mut self, slot: Slot, blockhash: Hash, bank_hash: Hash) { + let is_new = self + .hashes + .insert( + slot, + HashOverride { + blockhash, + bank_hash, + }, + ) + .is_none(); + assert!(is_new); + } +} + +#[derive(Debug, Clone, PartialEq)] +#[cfg(feature = "dev-context-only-utils")] +struct HashOverride { + blockhash: Hash, + bank_hash: Hash, +} + /// Manager for the state of all accounts and programs after processing its entries. #[derive(Debug)] pub struct Bank { @@ -845,6 +897,11 @@ pub struct Bank { /// Fee structure to use for assessing transaction fees. fee_structure: FeeStructure, + + /// blockhash and bank_hash overrides keyed by slot for simulated block production. + /// This _field_ was needed to be DCOU-ed to avoid 2 locks per bank freezing... + #[cfg(feature = "dev-context-only-utils")] + hash_overrides: Arc>, } struct VoteWithStakeDelegations { @@ -963,6 +1020,8 @@ impl Bank { compute_budget: None, transaction_account_lock_limit: None, fee_structure: FeeStructure::default(), + #[cfg(feature = "dev-context-only-utils")] + hash_overrides: Arc::new(Mutex::new(HashOverrides::default())), }; bank.transaction_processor = @@ -1204,7 +1263,7 @@ impl Bank { .map(|drop_callback| drop_callback.clone_box()), )), freeze_started: AtomicBool::new(false), - cost_tracker: RwLock::new(CostTracker::default()), + cost_tracker: RwLock::new(parent.read_cost_tracker().unwrap().new_from_parent_limits()), accounts_data_size_initial, accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), @@ -1215,6 +1274,8 @@ impl Bank { compute_budget: parent.compute_budget, transaction_account_lock_limit: parent.transaction_account_lock_limit, fee_structure: parent.fee_structure.clone(), + #[cfg(feature = "dev-context-only-utils")] + hash_overrides: parent.hash_overrides.clone(), }; let (_, ancestors_time_us) = measure_us!({ @@ -1591,6 +1652,8 @@ impl Bank { compute_budget: runtime_config.compute_budget, transaction_account_lock_limit: runtime_config.transaction_account_lock_limit, fee_structure: FeeStructure::default(), + #[cfg(feature = "dev-context-only-utils")] + hash_overrides: Arc::new(Mutex::new(HashOverrides::default())), }; bank.transaction_processor = @@ -3139,6 +3202,27 @@ impl Bank { // readers can starve this write lock acquisition and ticks would be slowed down too // much if the write lock is acquired for each tick. let mut w_blockhash_queue = self.blockhash_queue.write().unwrap(); + + #[cfg(feature = "dev-context-only-utils")] + let blockhash_override = self + .hash_overrides + .lock() + .unwrap() + .get_blockhash_override(self.slot()) + .copied() + .inspect(|blockhash_override| { + if blockhash_override != blockhash { + info!( + "bank: slot: {}: overrode blockhash: {} with {}", + self.slot(), + blockhash, + blockhash_override + ); + } + }); + #[cfg(feature = "dev-context-only-utils")] + let blockhash = blockhash_override.as_ref().unwrap_or(blockhash); + w_blockhash_queue.register_hash(blockhash, self.fee_rate_governor.lamports_per_signature); self.update_recent_blockhashes_locked(&w_blockhash_queue); } @@ -5264,6 +5348,29 @@ impl Bank { hash = hard_forked_hash; } + #[cfg(feature = "dev-context-only-utils")] + let hash_override = self + .hash_overrides + .lock() + .unwrap() + .get_bank_hash_override(slot) + .copied() + .inspect(|&hash_override| { + if hash_override != hash { + info!( + "bank: slot: {}: overrode bank hash: {} with {}", + self.slot(), + hash, + hash_override + ); + } + }); + // Avoid to optimize out `hash` along with the whole computation by super smart rustc. + // hash_override is used by ledger-tool's simulate-block-production, which prefers + // the actual bank freezing processing for accurate simulation. + #[cfg(feature = "dev-context-only-utils")] + let hash = hash_override.unwrap_or(std::hint::black_box(hash)); + let bank_hash_stats = self .rc .accounts @@ -6821,6 +6928,10 @@ impl Bank { None => Err(TransactionError::AccountNotFound), } } + + pub fn set_hash_overrides(&self, hash_overrides: HashOverrides) { + *self.hash_overrides.lock().unwrap() = hash_overrides; + } } /// Compute how much an account has changed size. This function is useful when the data size delta diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 99ef4b53a0b94d..45af1f26dfd183 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -26,7 +26,7 @@ use { }, std::{ collections::{HashMap, HashSet}, - error, + env, error, fmt::{self, Display}, net::SocketAddr, path::{Path, PathBuf}, @@ -266,7 +266,12 @@ impl AdminRpc for AdminRpcImpl { // (rocksdb background processing or some other stuck thread perhaps?). // // If the process is still alive after five seconds, exit harder - thread::sleep(Duration::from_secs(5)); + thread::sleep(Duration::from_secs( + env::var("SOLANA_VALIDATOR_EXIT_TIMEOUT") + .ok() + .and_then(|x| x.parse().ok()) + .unwrap_or(5), + )); warn!("validator exit timeout"); std::process::exit(0); }) From 383b49fdc898ff637c2a01f886fed4b0989ea743 Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Wed, 11 Sep 2024 10:27:15 -0500 Subject: [PATCH 335/529] Publish Agave docs (#2887) * Add publish-docs.sh to build.sh * Add debugging echos * Fake being in CI * Update vercel doc project names * Remove debugging echos. Remove CI var. --- docs/build.sh | 11 +++++++++++ docs/publish-docs.sh | 8 ++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/docs/build.sh b/docs/build.sh index 4a122678a19b09..cc84d82c68bf05 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -15,3 +15,14 @@ source ../ci/rust-version.sh # Build from /src into /build npm run build echo $? + +# Publish only from merge commits and beta release tags +if [[ -n $CI ]]; then + if [[ -z $CI_PULL_REQUEST ]]; then + if [[ -n $CI_TAG ]] && [[ $CI_TAG != $BETA_CHANNEL* ]]; then + echo "not a beta tag" + exit 0 + fi + ./publish-docs.sh + fi +fi diff --git a/docs/publish-docs.sh b/docs/publish-docs.sh index 0cbedcf882001d..c14e9bc9dc3dea 100755 --- a/docs/publish-docs.sh +++ b/docs/publish-docs.sh @@ -9,15 +9,15 @@ fi CONFIG_FILE=vercel.json if [[ -n $CI_TAG ]]; then - PROJECT_NAME=docs-solana-com + PROJECT_NAME=docs-anza-xyz else eval "$(../ci/channel-info.sh)" case $CHANNEL in edge) - PROJECT_NAME=edge-docs-solana-com + PROJECT_NAME=edge-docs-anza-xyz ;; beta) - PROJECT_NAME=beta-docs-solana-com + PROJECT_NAME=beta-docs-anza-xyz ;; *) PROJECT_NAME=docs @@ -151,4 +151,4 @@ EOF echo "VERCEL_TOKEN is undefined. Needed for Vercel authentication." exit 1 } -vercel deploy . --local-config="$CONFIG_FILE" --confirm --token "$VERCEL_TOKEN" --prod +vercel deploy . --local-config="$CONFIG_FILE" --yes --token "$VERCEL_TOKEN" --prod From 1184bc8c9acb9d5a3ad6a02db7a96113547ea3cc Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 11 Sep 2024 14:07:37 -0400 Subject: [PATCH 336/529] clippy: Fixes blocks_in_conditions warning (#2900) --- cli/src/cluster_query.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 3792a4689f2d3c..f5c53f0fdce359 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -1436,9 +1436,10 @@ pub fn process_ping( rpc_client: &RpcClient, ) -> ProcessResult { let (signal_sender, signal_receiver) = unbounded(); - match ctrlc::try_set_handler(move || { + let handler = move || { let _ = signal_sender.send(()); - }) { + }; + match ctrlc::try_set_handler(handler) { // It's possible to set the ctrl-c handler more than once in testing // situations, so let that case through Err(ctrlc::Error::MultipleHandlers) => {} From 1334fb5248390dfdd193feeec5fa8f86763668fc Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 11 Sep 2024 15:28:06 -0400 Subject: [PATCH 337/529] banking_stage: do not insert legacy vote ixs, refactor & unstaked (#2888) * banking_stage: do not insert legacy vote ixs, refactor & unstaked * pr feedback: use matches instead of separate fn --- core/src/banking_stage.rs | 10 +- core/src/banking_stage/forwarder.rs | 3 + .../banking_stage/latest_unprocessed_votes.rs | 296 +++++++++++++++--- .../unprocessed_transaction_storage.rs | 46 ++- 4 files changed, 297 insertions(+), 58 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 8dff75832106a9..6f22db2d41a87b 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -456,7 +456,10 @@ impl BankingStage { let batch_limit = TOTAL_BUFFERED_PACKETS / ((num_threads - NUM_VOTE_PROCESSING_THREADS) as usize); // Keeps track of extraneous vote transactions for the vote threads - let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new()); + let latest_unprocessed_votes = { + let bank = bank_forks.read().unwrap().working_bank(); + Arc::new(LatestUnprocessedVotes::new(&bank)) + }; let decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone()); let committer = Committer::new( @@ -539,7 +542,10 @@ impl BankingStage { // Once an entry has been recorded, its blockhash is registered with the bank. let data_budget = Arc::new(DataBudget::default()); // Keeps track of extraneous vote transactions for the vote threads - let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new()); + let latest_unprocessed_votes = { + let bank = bank_forks.read().unwrap().working_bank(); + Arc::new(LatestUnprocessedVotes::new(&bank)) + }; let decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone()); let committer = Committer::new( diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index 82af221842dd0b..41e8e09fb372d2 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -102,6 +102,9 @@ impl Forwarder { // load all accounts from address loader; let current_bank = self.bank_forks.read().unwrap().working_bank(); + // if we have crossed an epoch boundary, recache any state + unprocessed_transaction_storage.cache_epoch_boundary_info(¤t_bank); + // sanitize and filter packets that are no longer valid (could be too old, a duplicate of something // already processed), then add to forwarding buffer. let filter_forwarding_result = unprocessed_transaction_storage diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 0ddaaeafa4ac7e..b586a973cc3cd0 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -10,6 +10,7 @@ use { solana_sdk::{ account::from_account, clock::{Slot, UnixTimestamp}, + feature_set::{self}, hash::Hash, program_utils::limited_deserialize, pubkey::Pubkey, @@ -22,7 +23,7 @@ use { collections::HashMap, ops::DerefMut, sync::{ - atomic::{AtomicUsize, Ordering}, + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, RwLock, }, }, @@ -47,18 +48,23 @@ pub struct LatestValidatorVotePacket { } impl LatestValidatorVotePacket { - pub fn new(packet: Packet, vote_source: VoteSource) -> Result { + pub fn new( + packet: Packet, + vote_source: VoteSource, + deprecate_legacy_vote_ixs: bool, + ) -> Result { if !packet.meta().is_simple_vote_tx() { return Err(DeserializedPacketError::VoteTransactionError); } let vote = Arc::new(ImmutableDeserializedPacket::new(packet)?); - Self::new_from_immutable(vote, vote_source) + Self::new_from_immutable(vote, vote_source, deprecate_legacy_vote_ixs) } pub fn new_from_immutable( vote: Arc, vote_source: VoteSource, + deprecate_legacy_vote_ixs: bool, ) -> Result { let message = vote.transaction().get_message(); let (_, instruction) = message @@ -66,9 +72,20 @@ impl LatestValidatorVotePacket { .next() .ok_or(DeserializedPacketError::VoteTransactionError)?; + let instruction_filter = |ix: &VoteInstruction| { + if deprecate_legacy_vote_ixs { + matches!( + ix, + VoteInstruction::TowerSync(_) | VoteInstruction::TowerSyncSwitch(_, _), + ) + } else { + ix.is_single_vote_state_update() + } + }; + match limited_deserialize::(&instruction.data) { Ok(vote_state_update_instruction) - if vote_state_update_instruction.is_single_vote_state_update() => + if instruction_filter(&vote_state_update_instruction) => { let &pubkey = message .message @@ -127,26 +144,6 @@ impl LatestValidatorVotePacket { } } -pub(crate) fn weighted_random_order_by_stake<'a>( - bank: &Bank, - pubkeys: impl Iterator, -) -> impl Iterator + 'static { - // Efraimidis and Spirakis algo for weighted random sample without replacement - let staked_nodes = bank.current_epoch_staked_nodes(); - let mut pubkey_with_weight: Vec<(f64, Pubkey)> = pubkeys - .filter_map(|&pubkey| { - let stake = staked_nodes.get(&pubkey).copied().unwrap_or(0); - if stake == 0 { - None // Ignore votes from unstaked validators - } else { - Some((thread_rng().gen::().powf(1.0 / (stake as f64)), pubkey)) - } - }) - .collect::>(); - pubkey_with_weight.sort_by(|(w1, _), (w2, _)| w1.partial_cmp(w2).unwrap()); - pubkey_with_weight.into_iter().map(|(_, pubkey)| pubkey) -} - #[derive(Default, Debug)] pub(crate) struct VoteBatchInsertionMetrics { pub(crate) num_dropped_gossip: usize, @@ -157,11 +154,23 @@ pub(crate) struct VoteBatchInsertionMetrics { pub struct LatestUnprocessedVotes { latest_votes_per_pubkey: RwLock>>>, num_unprocessed_votes: AtomicUsize, + // These are only ever written to by the tpu vote thread + cached_staked_nodes: RwLock>>, + deprecate_legacy_vote_ixs: AtomicBool, + current_epoch: AtomicU64, } impl LatestUnprocessedVotes { - pub fn new() -> Self { - Self::default() + pub fn new(bank: &Bank) -> Self { + let deprecate_legacy_vote_ixs = bank + .feature_set + .is_active(&feature_set::deprecate_legacy_vote_ixs::id()); + Self { + cached_staked_nodes: RwLock::new(bank.current_epoch_staked_nodes().clone()), + current_epoch: AtomicU64::new(bank.epoch()), + deprecate_legacy_vote_ixs: AtomicBool::new(deprecate_legacy_vote_ixs), + ..Self::default() + } } pub fn len(&self) -> usize { @@ -172,6 +181,17 @@ impl LatestUnprocessedVotes { self.len() == 0 } + fn filter_unstaked_votes<'a>( + &'a self, + votes: impl Iterator + 'a, + ) -> impl Iterator + 'a { + let staked_nodes = self.cached_staked_nodes.read().unwrap(); + votes.filter(move |vote| { + let stake = staked_nodes.get(&vote.pubkey()).copied().unwrap_or(0); + stake > 0 + }) + } + pub(crate) fn insert_batch( &self, votes: impl Iterator, @@ -180,7 +200,7 @@ impl LatestUnprocessedVotes { let mut num_dropped_gossip = 0; let mut num_dropped_tpu = 0; - for vote in votes { + for vote in self.filter_unstaked_votes(votes) { if let Some(vote) = self.update_latest_vote(vote, should_replenish_taken_votes) { match vote.vote_source { VoteSource::Gossip => num_dropped_gossip += 1, @@ -291,6 +311,48 @@ impl LatestUnprocessedVotes { .and_then(|l| l.read().unwrap().timestamp()) } + #[cfg(test)] + pub(crate) fn set_staked_nodes(&self, staked_nodes: &[Pubkey]) { + let staked_nodes: HashMap = + staked_nodes.iter().map(|pk| (*pk, 1u64)).collect(); + *self.cached_staked_nodes.write().unwrap() = Arc::new(staked_nodes); + } + + fn weighted_random_order_by_stake(&self) -> impl Iterator { + // Efraimidis and Spirakis algo for weighted random sample without replacement + let staked_nodes = self.cached_staked_nodes.read().unwrap(); + let latest_votes_per_pubkey = self.latest_votes_per_pubkey.read().unwrap(); + let mut pubkey_with_weight: Vec<(f64, Pubkey)> = latest_votes_per_pubkey + .keys() + .filter_map(|&pubkey| { + let stake = staked_nodes.get(&pubkey).copied().unwrap_or(0); + if stake == 0 { + None // Ignore votes from unstaked validators + } else { + Some((thread_rng().gen::().powf(1.0 / (stake as f64)), pubkey)) + } + }) + .collect::>(); + pubkey_with_weight.sort_by(|(w1, _), (w2, _)| w1.partial_cmp(w2).unwrap()); + pubkey_with_weight.into_iter().map(|(_, pubkey)| pubkey) + } + + /// Recache the staked nodes based on a bank from the new epoch. + /// This should only be run by the TPU vote thread + pub(super) fn cache_epoch_boundary_info(&self, bank: &Bank) { + if bank.epoch() <= self.current_epoch.load(Ordering::Relaxed) { + return; + } + let mut staked_nodes = self.cached_staked_nodes.write().unwrap(); + *staked_nodes = bank.current_epoch_staked_nodes().clone(); + self.current_epoch.store(bank.epoch(), Ordering::Relaxed); + self.deprecate_legacy_vote_ixs.store( + bank.feature_set + .is_active(&feature_set::deprecate_legacy_vote_ixs::id()), + Ordering::Relaxed, + ); + } + /// Returns how many packets were forwardable /// Performs a weighted random order based on stake and stops forwarding at the first error /// Votes from validators with 0 stakes are ignored @@ -299,11 +361,7 @@ impl LatestUnprocessedVotes { bank: Arc, forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, ) -> usize { - let pubkeys_by_stake = { - let binding = self.latest_votes_per_pubkey.read().unwrap(); - weighted_random_order_by_stake(&bank, binding.keys()) - }; - + let pubkeys_by_stake = self.weighted_random_order_by_stake(); let mut forwarded_count: usize = 0; for pubkey in pubkeys_by_stake { let Some(vote) = self.get_entry(pubkey) else { @@ -361,11 +419,7 @@ impl LatestUnprocessedVotes { ); } - let pubkeys_by_stake = { - let binding = self.latest_votes_per_pubkey.read().unwrap(); - weighted_random_order_by_stake(&bank, binding.keys()) - }; - pubkeys_by_stake + self.weighted_random_order_by_stake() .filter_map(|pubkey| { self.get_entry(pubkey).and_then(|lock| { let mut latest_vote = lock.write().unwrap(); @@ -410,6 +464,10 @@ impl LatestUnprocessedVotes { } }); } + + pub(super) fn should_deprecate_legacy_vote_ixs(&self) -> bool { + self.deprecate_legacy_vote_ixs.load(Ordering::Relaxed) + } } #[cfg(test)] @@ -424,7 +482,10 @@ mod tests { epoch_stakes::EpochStakes, genesis_utils::{self, ValidatorVoteKeypairs}, }, - solana_sdk::{hash::Hash, signature::Signer, system_transaction::transfer}, + solana_sdk::{ + epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, genesis_config::GenesisConfig, hash::Hash, + signature::Signer, system_transaction::transfer, + }, solana_vote_program::{ vote_state::TowerSync, vote_transaction::new_tower_sync_transaction, }, @@ -452,7 +513,7 @@ mod tests { .meta_mut() .flags .set(PacketFlags::SIMPLE_VOTE_TX, true); - LatestValidatorVotePacket::new(packet, vote_source).unwrap() + LatestValidatorVotePacket::new(packet, vote_source, true).unwrap() } fn deserialize_packets<'a>( @@ -461,7 +522,8 @@ mod tests { vote_source: VoteSource, ) -> impl Iterator + 'a { packet_indexes.iter().filter_map(move |packet_index| { - LatestValidatorVotePacket::new(packet_batch[*packet_index].clone(), vote_source).ok() + LatestValidatorVotePacket::new(packet_batch[*packet_index].clone(), vote_source, true) + .ok() }) } @@ -541,9 +603,13 @@ mod tests { #[test] fn test_update_latest_vote() { - let latest_unprocessed_votes = LatestUnprocessedVotes::new(); + let latest_unprocessed_votes = LatestUnprocessedVotes::default(); let keypair_a = ValidatorVoteKeypairs::new_rand(); let keypair_b = ValidatorVoteKeypairs::new_rand(); + latest_unprocessed_votes.set_staked_nodes(&[ + keypair_a.node_keypair.pubkey(), + keypair_b.node_keypair.pubkey(), + ]); let vote_a = from_slots(vec![(0, 2), (1, 1)], VoteSource::Gossip, &keypair_a, None); let vote_b = from_slots( @@ -744,7 +810,7 @@ mod tests { fn test_update_latest_vote_race() { // There was a race condition in updating the same pubkey in the hashmap // when the entry does not initially exist. - let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new()); + let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::default()); const NUM_VOTES: usize = 100; let keypairs = Arc::new( @@ -752,6 +818,11 @@ mod tests { .map(|_| ValidatorVoteKeypairs::new_rand()) .collect_vec(), ); + let staked_nodes = keypairs + .iter() + .map(|kp| kp.node_keypair.pubkey()) + .collect_vec(); + latest_unprocessed_votes.set_staked_nodes(&staked_nodes); // Insert votes in parallel let insert_vote = |latest_unprocessed_votes: &LatestUnprocessedVotes, @@ -783,7 +854,7 @@ mod tests { #[test] fn test_simulate_threads() { - let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::new()); + let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::default()); let latest_unprocessed_votes_tpu = latest_unprocessed_votes.clone(); let keypairs = Arc::new( (0..10) @@ -791,6 +862,11 @@ mod tests { .collect_vec(), ); let keypairs_tpu = keypairs.clone(); + let staked_nodes = keypairs + .iter() + .map(|kp| kp.node_keypair.pubkey()) + .collect_vec(); + latest_unprocessed_votes.set_staked_nodes(&staked_nodes); let vote_limit = 1000; let gossip = Builder::new() @@ -846,11 +922,17 @@ mod tests { #[test] fn test_forwardable_packets() { - let latest_unprocessed_votes = LatestUnprocessedVotes::new(); - let mut bank = Bank::default_for_tests(); + let latest_unprocessed_votes = LatestUnprocessedVotes::default(); + let bank_0 = Bank::new_for_tests(&GenesisConfig::default()); + let mut bank = Bank::new_from_parent( + Arc::new(bank_0), + &Pubkey::new_unique(), + MINIMUM_SLOTS_PER_EPOCH, + ); + assert_eq!(bank.epoch(), 1); bank.set_epoch_stakes_for_test( - bank.epoch().saturating_add(1), - EpochStakes::new_for_tests(HashMap::new(), bank.epoch().saturating_add(1)), + bank.epoch().saturating_add(2), + EpochStakes::new_for_tests(HashMap::new(), bank.epoch().saturating_add(2)), ); let bank = Arc::new(bank); let mut forward_packet_batches_by_accounts = @@ -864,7 +946,8 @@ mod tests { latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); - // Don't forward 0 stake accounts + // Recache on epoch boundary and don't forward 0 stake accounts + latest_unprocessed_votes.cache_epoch_boundary_info(&bank); let forwarded = latest_unprocessed_votes .get_and_insert_forwardable_packets(bank, &mut forward_packet_batches_by_accounts); assert_eq!(0, forwarded); @@ -882,11 +965,17 @@ mod tests { 200, ) .genesis_config; - let bank = Bank::new_for_tests(&config); + let bank_0 = Bank::new_for_tests(&config); + let bank = Bank::new_from_parent( + Arc::new(bank_0), + &Pubkey::new_unique(), + 2 * MINIMUM_SLOTS_PER_EPOCH, + ); let mut forward_packet_batches_by_accounts = ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); // Don't forward votes from gossip + latest_unprocessed_votes.cache_epoch_boundary_info(&bank); let forwarded = latest_unprocessed_votes.get_and_insert_forwardable_packets( Arc::new(bank), &mut forward_packet_batches_by_accounts, @@ -907,11 +996,17 @@ mod tests { 200, ) .genesis_config; - let bank = Arc::new(Bank::new_for_tests(&config)); + let bank_0 = Bank::new_for_tests(&config); + let bank = Arc::new(Bank::new_from_parent( + Arc::new(bank_0), + &Pubkey::new_unique(), + 3 * MINIMUM_SLOTS_PER_EPOCH, + )); let mut forward_packet_batches_by_accounts = ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); // Forward from TPU + latest_unprocessed_votes.cache_epoch_boundary_info(&bank); let forwarded = latest_unprocessed_votes.get_and_insert_forwardable_packets( bank.clone(), &mut forward_packet_batches_by_accounts, @@ -944,11 +1039,17 @@ mod tests { #[test] fn test_clear_forwarded_packets() { - let latest_unprocessed_votes = LatestUnprocessedVotes::new(); + let latest_unprocessed_votes = LatestUnprocessedVotes::default(); let keypair_a = ValidatorVoteKeypairs::new_rand(); let keypair_b = ValidatorVoteKeypairs::new_rand(); let keypair_c = ValidatorVoteKeypairs::new_rand(); let keypair_d = ValidatorVoteKeypairs::new_rand(); + latest_unprocessed_votes.set_staked_nodes(&[ + keypair_a.node_keypair.pubkey(), + keypair_b.node_keypair.pubkey(), + keypair_c.node_keypair.pubkey(), + keypair_d.node_keypair.pubkey(), + ]); let vote_a = from_slots(vec![(1, 1)], VoteSource::Gossip, &keypair_a, None); let mut vote_b = from_slots(vec![(2, 1)], VoteSource::Tpu, &keypair_b, None); @@ -982,4 +1083,97 @@ mod tests { latest_unprocessed_votes.get_latest_vote_slot(keypair_d.node_keypair.pubkey()) ); } + + #[test] + fn test_insert_batch_unstaked() { + let keypair_a = ValidatorVoteKeypairs::new_rand(); + let keypair_b = ValidatorVoteKeypairs::new_rand(); + let keypair_c = ValidatorVoteKeypairs::new_rand(); + let keypair_d = ValidatorVoteKeypairs::new_rand(); + + let vote_a = from_slots(vec![(1, 1)], VoteSource::Gossip, &keypair_a, None); + let vote_b = from_slots(vec![(2, 1)], VoteSource::Tpu, &keypair_b, None); + let vote_c = from_slots(vec![(3, 1)], VoteSource::Tpu, &keypair_c, None); + let vote_d = from_slots(vec![(4, 1)], VoteSource::Gossip, &keypair_d, None); + let votes = [ + vote_a.clone(), + vote_b.clone(), + vote_c.clone(), + vote_d.clone(), + ] + .into_iter(); + + let bank_0 = Bank::new_for_tests(&GenesisConfig::default()); + let latest_unprocessed_votes = LatestUnprocessedVotes::new(&bank_0); + + // Insert batch should filter out all votes as they are unstaked + latest_unprocessed_votes.insert_batch(votes.clone(), true); + assert!(latest_unprocessed_votes.is_empty()); + + // Bank in same epoch should not update stakes + let config = genesis_utils::create_genesis_config_with_leader( + 100, + &keypair_a.node_keypair.pubkey(), + 200, + ) + .genesis_config; + let bank_0 = Bank::new_for_tests(&config); + let bank = Bank::new_from_parent( + Arc::new(bank_0), + &Pubkey::new_unique(), + MINIMUM_SLOTS_PER_EPOCH - 1, + ); + assert_eq!(bank.epoch(), 0); + latest_unprocessed_votes.cache_epoch_boundary_info(&bank); + latest_unprocessed_votes.insert_batch(votes.clone(), true); + assert!(latest_unprocessed_votes.is_empty()); + + // Bank in next epoch should update stakes + let config = genesis_utils::create_genesis_config_with_leader( + 100, + &keypair_b.node_keypair.pubkey(), + 200, + ) + .genesis_config; + let bank_0 = Bank::new_for_tests(&config); + let bank = Bank::new_from_parent( + Arc::new(bank_0), + &Pubkey::new_unique(), + MINIMUM_SLOTS_PER_EPOCH, + ); + assert_eq!(bank.epoch(), 1); + latest_unprocessed_votes.cache_epoch_boundary_info(&bank); + latest_unprocessed_votes.insert_batch(votes.clone(), true); + assert_eq!(latest_unprocessed_votes.len(), 1); + assert_eq!( + latest_unprocessed_votes.get_latest_vote_slot(keypair_b.node_keypair.pubkey()), + Some(vote_b.slot()) + ); + + // Previously unstaked votes are not (yet) removed + let config = genesis_utils::create_genesis_config_with_leader( + 100, + &keypair_c.node_keypair.pubkey(), + 200, + ) + .genesis_config; + let bank_0 = Bank::new_for_tests(&config); + let bank = Bank::new_from_parent( + Arc::new(bank_0), + &Pubkey::new_unique(), + 3 * MINIMUM_SLOTS_PER_EPOCH, + ); + assert_eq!(bank.epoch(), 2); + latest_unprocessed_votes.cache_epoch_boundary_info(&bank); + latest_unprocessed_votes.insert_batch(votes.clone(), true); + assert_eq!(latest_unprocessed_votes.len(), 2); + assert_eq!( + latest_unprocessed_votes.get_latest_vote_slot(keypair_b.node_keypair.pubkey()), + Some(vote_b.slot()) + ); + assert_eq!( + latest_unprocessed_votes.get_latest_vote_slot(keypair_c.node_keypair.pubkey()), + Some(vote_c.slot()) + ); + } } diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 35bc04a2997995..1ee7363e0d1924 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -414,6 +414,13 @@ impl UnprocessedTransactionStorage { ), } } + + pub(crate) fn cache_epoch_boundary_info(&mut self, bank: &Bank) { + match self { + Self::LocalTransactionStorage(_) => (), + Self::VoteStorage(vote_storage) => vote_storage.cache_epoch_boundary_info(bank), + } + } } impl VoteStorage { @@ -451,6 +458,8 @@ impl VoteStorage { LatestValidatorVotePacket::new_from_immutable( Arc::new(deserialized_packet), self.vote_source, + self.latest_unprocessed_votes + .should_deprecate_legacy_vote_ixs(), ) .ok() }), @@ -514,6 +523,10 @@ impl VoteStorage { should_process_packet, ); + let deprecate_legacy_vote_ixs = self + .latest_unprocessed_votes + .should_deprecate_legacy_vote_ixs(); + while let Some((packets, payload)) = scanner.iterate() { let vote_packets = packets.iter().map(|p| (*p).clone()).collect_vec(); @@ -523,6 +536,7 @@ impl VoteStorage { LatestValidatorVotePacket::new_from_immutable( vote_packets[*i].clone(), self.vote_source, + deprecate_legacy_vote_ixs, ) .ok() }), @@ -531,7 +545,12 @@ impl VoteStorage { } else { self.latest_unprocessed_votes.insert_batch( vote_packets.into_iter().filter_map(|packet| { - LatestValidatorVotePacket::new_from_immutable(packet, self.vote_source).ok() + LatestValidatorVotePacket::new_from_immutable( + packet, + self.vote_source, + deprecate_legacy_vote_ixs, + ) + .ok() }), true, // should_replenish_taken_votes ); @@ -540,6 +559,14 @@ impl VoteStorage { scanner.finalize().payload.reached_end_of_slot } + + fn cache_epoch_boundary_info(&mut self, bank: &Bank) { + if matches!(self.vote_source, VoteSource::Gossip) { + panic!("Gossip vote thread should not be checking epoch boundary"); + } + self.latest_unprocessed_votes + .cache_epoch_boundary_info(bank); + } } impl ThreadLocalUnprocessedPackets { @@ -1246,9 +1273,16 @@ mod tests { assert!(deserialized_packets.contains(&big_transfer)); } - for vote_source in [VoteSource::Gossip, VoteSource::Tpu] { + for (vote_source, staked) in [VoteSource::Gossip, VoteSource::Tpu] + .into_iter() + .flat_map(|vs| [(vs, true), (vs, false)]) + { + let latest_unprocessed_votes = LatestUnprocessedVotes::default(); + if staked { + latest_unprocessed_votes.set_staked_nodes(&[keypair.pubkey()]); + } let mut transaction_storage = UnprocessedTransactionStorage::new_vote_storage( - Arc::new(LatestUnprocessedVotes::new()), + Arc::new(latest_unprocessed_votes), vote_source, ); transaction_storage.insert_batch(vec![ @@ -1256,7 +1290,7 @@ mod tests { ImmutableDeserializedPacket::new(vote.clone())?, ImmutableDeserializedPacket::new(big_transfer.clone())?, ]); - assert_eq!(1, transaction_storage.len()); + assert_eq!(if staked { 1 } else { 0 }, transaction_storage.len()); } Ok(()) } @@ -1282,8 +1316,10 @@ mod tests { )?; vote.meta_mut().flags.set(PacketFlags::SIMPLE_VOTE_TX, true); + let latest_unprocessed_votes = LatestUnprocessedVotes::default(); + latest_unprocessed_votes.set_staked_nodes(&[node_keypair.pubkey()]); let mut transaction_storage = UnprocessedTransactionStorage::new_vote_storage( - Arc::new(LatestUnprocessedVotes::new()), + Arc::new(latest_unprocessed_votes), VoteSource::Tpu, ); From 19a177dee6fa7bbadcbe4d37d4b97f26862814d1 Mon Sep 17 00:00:00 2001 From: Gaurav Dhiman Date: Thu, 12 Sep 2024 06:10:36 +0200 Subject: [PATCH 338/529] docs: fix tonic crates.io url (#2904) --- docs/src/implemented-proposals/rpc-transaction-history.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/implemented-proposals/rpc-transaction-history.md b/docs/src/implemented-proposals/rpc-transaction-history.md index 522b9160fb9883..87926ef27a22b1 100644 --- a/docs/src/implemented-proposals/rpc-transaction-history.md +++ b/docs/src/implemented-proposals/rpc-transaction-history.md @@ -61,7 +61,7 @@ all transactions to build up the necessary metadata. ## Accessing BigTable BigTable has a gRPC endpoint that can be accessed using the -[tonic](https://crates.io/crates/crate) and the raw protobuf API, as currently +[tonic](https://crates.io/crates/tonic) and the raw protobuf API, as currently no higher-level Rust crate for BigTable exists. Practically this makes parsing the results of BigTable queries more complicated but is not a significant issue. From fc4069a8ae55e2d3ba7b345c53f15279e7ba5923 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 12 Sep 2024 14:27:05 +0200 Subject: [PATCH 339/529] Feature - `disable_sbpf_v1_execution` for tests (#2884) * Adds the feature * Removes the feature reject_callx_r10 from tests * Cleans up reject_callx_r10. --- programs/bpf_loader/src/syscalls/mod.rs | 11 +++++----- runtime/src/bank/tests.rs | 29 ++++++++++++------------- sdk/feature-set/src/lib.rs | 10 +++++++++ svm/tests/mock_bank.rs | 2 +- 4 files changed, 31 insertions(+), 21 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 2c5d8e9feed26d..65cc88d753a149 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -21,12 +21,12 @@ use { solana_feature_set::{ self as feature_set, abort_on_invalid_curve, blake3_syscall_enabled, bpf_account_data_direct_mapping, curve25519_syscall_enabled, - disable_deploy_of_alloc_free_syscall, disable_fees_sysvar, + disable_deploy_of_alloc_free_syscall, disable_fees_sysvar, disable_sbpf_v1_execution, enable_alt_bn128_compression_syscall, enable_alt_bn128_syscall, enable_big_mod_exp_syscall, enable_get_epoch_stake_syscall, enable_partitioned_epoch_reward, enable_poseidon_syscall, error_on_syscall_bpf_function_hash_collisions, get_sysvar_syscall_enabled, - last_restart_slot_sysvar, partitioned_epoch_rewards_superfeature, reject_callx_r10, - remaining_compute_units_syscall_enabled, FeatureSet, + last_restart_slot_sysvar, partitioned_epoch_rewards_superfeature, + reenable_sbpf_v1_execution, remaining_compute_units_syscall_enabled, FeatureSet, }, solana_log_collector::{ic_logger_msg, ic_msg}, solana_poseidon as poseidon, @@ -299,8 +299,9 @@ pub fn create_program_runtime_environment_v1<'a>( sanitize_user_provided_values: true, external_internal_function_hash_collision: feature_set .is_active(&error_on_syscall_bpf_function_hash_collisions::id()), - reject_callx_r10: feature_set.is_active(&reject_callx_r10::id()), - enable_sbpf_v1: true, + reject_callx_r10: true, + enable_sbpf_v1: !feature_set.is_active(&disable_sbpf_v1_execution::id()) + || feature_set.is_active(&reenable_sbpf_v1_execution::id()), enable_sbpf_v2: false, optimize_rodata: false, aligned_memory_mapping: !feature_set.is_active(&bpf_account_data_direct_mapping::id()), diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 4df464cdfea6ff..915dfb65bf5e00 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -11917,13 +11917,15 @@ fn test_feature_activation_loaded_programs_cache_preparation_phase() { let (mut genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); genesis_config .accounts - .remove(&feature_set::reject_callx_r10::id()); + .remove(&feature_set::disable_sbpf_v1_execution::id()); + genesis_config + .accounts + .remove(&feature_set::reenable_sbpf_v1_execution::id()); let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Program Setup let program_keypair = Keypair::new(); - let program_data = - include_bytes!("../../../programs/bpf_loader/test_elfs/out/callx-r10-sbfv1.so"); + let program_data = include_bytes!("../../../programs/bpf_loader/test_elfs/out/noop_aligned.so"); let program_account = AccountSharedData::from(Account { lamports: Rent::default().minimum_balance(program_data.len()).min(1), data: program_data.to_vec(), @@ -11946,19 +11948,13 @@ fn test_feature_activation_loaded_programs_cache_preparation_phase() { // Load the program with the old environment. let transaction = Transaction::new(&signers, message.clone(), bank.last_blockhash()); let result_without_feature_enabled = bank.process_transaction(&transaction); - assert_eq!( - result_without_feature_enabled, - Err(TransactionError::InstructionError( - 0, - InstructionError::ProgramFailedToComplete - )) - ); + assert_eq!(result_without_feature_enabled, Ok(())); // Schedule feature activation to trigger a change of environment at the epoch boundary. let feature_account_balance = std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); bank.store_account( - &feature_set::reject_callx_r10::id(), + &feature_set::disable_sbpf_v1_execution::id(), &feature::create_account(&Feature { activated_at: None }, feature_account_balance), ); @@ -12030,7 +12026,10 @@ fn test_feature_activation_loaded_programs_epoch_transition() { let (mut genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); genesis_config .accounts - .remove(&feature_set::reject_callx_r10::id()); + .remove(&feature_set::disable_fees_sysvar::id()); + genesis_config + .accounts + .remove(&feature_set::reenable_sbpf_v1_execution::id()); let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Program Setup @@ -12063,7 +12062,7 @@ fn test_feature_activation_loaded_programs_epoch_transition() { let feature_account_balance = std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); bank.store_account( - &feature_set::reject_callx_r10::id(), + &feature_set::disable_fees_sysvar::id(), &feature::create_account(&Feature { activated_at: None }, feature_account_balance), ); @@ -12999,9 +12998,9 @@ fn test_deploy_last_epoch_slot() { let (mut genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); genesis_config .accounts - .remove(&feature_set::reject_callx_r10::id()); + .remove(&feature_set::disable_fees_sysvar::id()); let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::reject_callx_r10::id()); + bank.activate_feature(&feature_set::disable_fees_sysvar::id()); // go to the last slot in the epoch let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); diff --git a/sdk/feature-set/src/lib.rs b/sdk/feature-set/src/lib.rs index 58bb290a1e653c..f2b094e90850dd 100644 --- a/sdk/feature-set/src/lib.rs +++ b/sdk/feature-set/src/lib.rs @@ -863,6 +863,14 @@ pub mod deprecate_legacy_vote_ixs { solana_program::declare_id!("depVvnQ2UysGrhwdiwU42tCadZL8GcBb1i2GYhMopQv"); } +pub mod disable_sbpf_v1_execution { + solana_program::declare_id!("TestFeature11111111111111111111111111111111"); +} + +pub mod reenable_sbpf_v1_execution { + solana_program::declare_id!("TestFeature21111111111111111111111111111111"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -1073,6 +1081,8 @@ lazy_static! { (enable_turbine_extended_fanout_experiments::id(), "enable turbine extended fanout experiments #"), (deprecate_legacy_vote_ixs::id(), "Deprecate legacy vote instructions"), (partitioned_epoch_rewards_superfeature::id(), "replaces enable_partitioned_epoch_reward to enable partitioned rewards at epoch boundary SIMD-0118"), + (disable_sbpf_v1_execution::id(), "Disables execution of SBPFv1 programs"), + (reenable_sbpf_v1_execution::id(), "Re-enables execution of SBPFv1 programs"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 5797d514888201..8adbd2a55bf13e 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -299,7 +299,7 @@ fn create_custom_environment<'a>() -> BuiltinProgram> { noop_instruction_rate: 256, sanitize_user_provided_values: true, external_internal_function_hash_collision: false, - reject_callx_r10: false, + reject_callx_r10: true, enable_sbpf_v1: true, enable_sbpf_v2: false, optimize_rodata: false, From 3f2a4aecfeda5ba70c4be5191bd79eefafafab55 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Thu, 12 Sep 2024 08:32:17 -0500 Subject: [PATCH 340/529] metric: report estimated_mem_bytes for in-mem index (#2898) * report estimated_mem_bytes * pr feedback * Update accounts-db/src/accounts_index/in_mem_accounts_index.rs Co-authored-by: Brooks --------- Co-authored-by: HaoranYi Co-authored-by: Brooks --- accounts-db/src/accounts_index/in_mem_accounts_index.rs | 7 +++++++ accounts-db/src/bucket_map_holder_stats.rs | 6 +++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index 5566ab4420cfd8..eb1bc71d0f6583 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -980,6 +980,13 @@ impl + Into> InMemAccountsIndex usize { + std::mem::size_of::() + + std::mem::size_of::() + + std::mem::size_of::>() + } + fn should_evict_based_on_age( current_age: Age, entry: &AccountMapEntry, diff --git a/accounts-db/src/bucket_map_holder_stats.rs b/accounts-db/src/bucket_map_holder_stats.rs index 35a80c228d7278..7ebcf2d034ee53 100644 --- a/accounts-db/src/bucket_map_holder_stats.rs +++ b/accounts-db/src/bucket_map_holder_stats.rs @@ -1,6 +1,6 @@ use { crate::{ - accounts_index::{DiskIndexValue, IndexValue}, + accounts_index::{in_mem_accounts_index::InMemAccountsIndex, DiskIndexValue, IndexValue}, bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, }, solana_sdk::timing::AtomicInterval, @@ -61,7 +61,6 @@ pub struct BucketMapHolderStats { last_was_startup: AtomicBool, last_time: AtomicInterval, bins: u64, - pub estimate_mem: AtomicU64, pub flush_should_evict_us: AtomicU64, } @@ -263,7 +262,8 @@ impl BucketMapHolderStats { }, ( "estimate_mem_bytes", - self.estimate_mem.load(Ordering::Relaxed), + self.count_in_mem.load(Ordering::Relaxed) + * InMemAccountsIndex::::approx_size_of_one_entry(), i64 ), ( From 43b5830cb4fa118b26a39f36c593041c49ad3d1a Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Thu, 12 Sep 2024 18:54:33 +0400 Subject: [PATCH 341/529] Extract transaction-status-client-types (#2870) * extract ttransaction-status-client-types and transform associated methods to free funcs: UiInstruction::parse -> parse_ui_instruction; UiInnerInstructions::parse => parse_ui_inner_instructions; UiTransactionStatusMeta::parse -> parse_ui_transaction_status_meta * replace transaction_status with transaction_status_client_types in rpc_client_api * update description * also rip out types used by rpc-client * unused imports * fix feature activation after workspace change * move tests * sort deps --- Cargo.lock | 21 +- Cargo.toml | 4 +- ledger-tool/src/main.rs | 4 +- programs/sbf/Cargo.lock | 21 +- rpc-client-api/Cargo.toml | 2 +- rpc-client-api/src/config.rs | 2 +- rpc-client-api/src/custom_error.rs | 2 +- rpc-client-api/src/response.rs | 2 +- rpc-client/Cargo.toml | 2 +- rpc-client/src/mock_sender.rs | 2 +- rpc-client/src/nonblocking/rpc_client.rs | 10 +- rpc-client/src/rpc_client.rs | 10 +- rpc/src/rpc.rs | 4 +- sdk/Cargo.toml | 7 +- transaction-status-client-types/Cargo.toml | 25 + transaction-status-client-types/src/lib.rs | 810 +++++++++++++ .../src/option_serializer.rs | 0 transaction-status/Cargo.toml | 1 + transaction-status/src/lib.rs | 1027 +++-------------- transaction-status/src/parse_accounts.rs | 17 +- transaction-status/src/parse_instruction.rs | 10 +- 21 files changed, 1045 insertions(+), 938 deletions(-) create mode 100644 transaction-status-client-types/Cargo.toml create mode 100644 transaction-status-client-types/src/lib.rs rename {transaction-status => transaction-status-client-types}/src/option_serializer.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 83992bc32a7586..187aba8daa5d97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7374,7 +7374,7 @@ dependencies = [ "solana-account-decoder", "solana-rpc-client-api", "solana-sdk", - "solana-transaction-status", + "solana-transaction-status-client-types", "solana-version", "solana-vote-program", "tokio", @@ -7398,7 +7398,7 @@ dependencies = [ "solana-account-decoder", "solana-inline-spl", "solana-sdk", - "solana-transaction-status", + "solana-transaction-status-client-types", "solana-version", "thiserror", ] @@ -8154,6 +8154,7 @@ dependencies = [ "serde_json", "solana-account-decoder", "solana-sdk", + "solana-transaction-status-client-types", "spl-associated-token-account", "spl-memo", "spl-token", @@ -8163,6 +8164,22 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-transaction-status-client-types" +version = "2.1.0" +dependencies = [ + "base64 0.22.1", + "bincode", + "bs58", + "serde", + "serde_derive", + "serde_json", + "solana-account-decoder", + "solana-sdk", + "solana-signature", + "thiserror", +] + [[package]] name = "solana-turbine" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index c0e93451fbaa6e..aa59267b158ccd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -141,6 +141,7 @@ members = [ "transaction-dos", "transaction-metrics-tracker", "transaction-status", + "transaction-status-client-types", "transaction-view", "turbine", "type-overrides", @@ -432,7 +433,7 @@ solana-remote-wallet = { path = "remote-wallet", version = "=2.1.0", default-fea solana-sanitize = { path = "sanitize", version = "=2.1.0" } solana-serde-varint = { path = "sdk/serde-varint", version = "=2.1.0" } solana-sha256-hasher = { path = "sdk/sha256-hasher", version = "=2.1.0" } -solana-signature = { path = "sdk/signature", version = "=2.1.0" } +solana-signature = { path = "sdk/signature", version = "=2.1.0", default-features = false } solana-timings = { path = "timings", version = "=2.1.0" } solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=2.1.0" } solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=2.1.0" } @@ -461,6 +462,7 @@ solana-test-validator = { path = "test-validator", version = "=2.1.0" } solana-thin-client = { path = "thin-client", version = "=2.1.0" } solana-tpu-client = { path = "tpu-client", version = "=2.1.0", default-features = false } solana-transaction-status = { path = "transaction-status", version = "=2.1.0" } +solana-transaction-status-client-types = { path = "transaction-status-client-types", version = "=2.1.0" } solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", version = "=2.1.0" } solana-turbine = { path = "turbine", version = "=2.1.0" } solana-type-overrides = { path = "type-overrides", version = "=2.1.0" } diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index a9b9a864bd9b42..682f2bf8a1aa0f 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -76,7 +76,7 @@ use { transaction::{MessageHash, SanitizedTransaction, SimpleAddressLoader}, }, solana_stake_program::{points::PointValue, stake_state}, - solana_transaction_status::UiInstruction, + solana_transaction_status::parse_ui_instruction, solana_unified_scheduler_pool::DefaultSchedulerPool, solana_vote_program::{ self, @@ -759,7 +759,7 @@ fn record_transactions( let instructions = message .instructions() .iter() - .map(|ix| UiInstruction::parse(ix, &message.account_keys(), None)) + .map(|ix| parse_ui_instruction(ix, &message.account_keys(), None)) .collect(); let is_simple_vote_tx = tx.is_simple_vote_transaction(); diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7ec3d180fb55c9..622c921d0512ea 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5728,7 +5728,7 @@ dependencies = [ "solana-account-decoder", "solana-rpc-client-api", "solana-sdk", - "solana-transaction-status", + "solana-transaction-status-client-types", "solana-version", "solana-vote-program", "tokio", @@ -5751,7 +5751,7 @@ dependencies = [ "solana-account-decoder", "solana-inline-spl", "solana-sdk", - "solana-transaction-status", + "solana-transaction-status-client-types", "solana-version", "thiserror", ] @@ -6729,6 +6729,7 @@ dependencies = [ "serde_json", "solana-account-decoder", "solana-sdk", + "solana-transaction-status-client-types", "spl-associated-token-account", "spl-memo", "spl-token", @@ -6738,6 +6739,22 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-transaction-status-client-types" +version = "2.1.0" +dependencies = [ + "base64 0.22.1", + "bincode", + "bs58", + "serde", + "serde_derive", + "serde_json", + "solana-account-decoder", + "solana-sdk", + "solana-signature", + "thiserror", +] + [[package]] name = "solana-turbine" version = "2.1.0" diff --git a/rpc-client-api/Cargo.toml b/rpc-client-api/Cargo.toml index 021e069960e39e..d0eb73608dea5e 100644 --- a/rpc-client-api/Cargo.toml +++ b/rpc-client-api/Cargo.toml @@ -23,7 +23,7 @@ serde_json = { workspace = true } solana-account-decoder = { workspace = true } solana-inline-spl = { workspace = true } solana-sdk = { workspace = true } -solana-transaction-status = { workspace = true } +solana-transaction-status-client-types = { workspace = true } solana-version = { workspace = true } thiserror = { workspace = true } diff --git a/rpc-client-api/src/config.rs b/rpc-client-api/src/config.rs index db13ea1280d829..9eea7fb6508514 100644 --- a/rpc-client-api/src/config.rs +++ b/rpc-client-api/src/config.rs @@ -5,7 +5,7 @@ use { clock::{Epoch, Slot}, commitment_config::{CommitmentConfig, CommitmentLevel}, }, - solana_transaction_status::{TransactionDetails, UiTransactionEncoding}, + solana_transaction_status_client_types::{TransactionDetails, UiTransactionEncoding}, }; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] diff --git a/rpc-client-api/src/custom_error.rs b/rpc-client-api/src/custom_error.rs index 8ef3bfae0faa33..d1e9c84f15a231 100644 --- a/rpc-client-api/src/custom_error.rs +++ b/rpc-client-api/src/custom_error.rs @@ -3,7 +3,7 @@ use { crate::response::RpcSimulateTransactionResult, jsonrpc_core::{Error, ErrorCode}, solana_sdk::clock::Slot, - solana_transaction_status::EncodeError, + solana_transaction_status_client_types::EncodeError, thiserror::Error, }; diff --git a/rpc-client-api/src/response.rs b/rpc-client-api/src/response.rs index fcb330103057e4..ee38c15d921f52 100644 --- a/rpc-client-api/src/response.rs +++ b/rpc-client-api/src/response.rs @@ -8,7 +8,7 @@ use { inflation::Inflation, transaction::{Result, TransactionError}, }, - solana_transaction_status::{ + solana_transaction_status_client_types::{ ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock, UiInnerInstructions, UiTransactionReturnData, }, diff --git a/rpc-client/Cargo.toml b/rpc-client/Cargo.toml index 26b0ed8322ce91..3970f9c2b945b6 100644 --- a/rpc-client/Cargo.toml +++ b/rpc-client/Cargo.toml @@ -25,7 +25,7 @@ serde_json = { workspace = true } solana-account-decoder = { workspace = true } solana-rpc-client-api = { workspace = true } solana-sdk = { workspace = true } -solana-transaction-status = { workspace = true } +solana-transaction-status-client-types = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/rpc-client/src/mock_sender.rs b/rpc-client/src/mock_sender.rs index 6654804cc32c25..b355c1e0be684f 100644 --- a/rpc-client/src/mock_sender.rs +++ b/rpc-client/src/mock_sender.rs @@ -30,7 +30,7 @@ use { sysvar::epoch_schedule::EpochSchedule, transaction::{self, Transaction, TransactionError, TransactionVersion}, }, - solana_transaction_status::{ + solana_transaction_status_client_types::{ option_serializer::OptionSerializer, EncodedConfirmedBlock, EncodedConfirmedTransactionWithStatusMeta, EncodedTransaction, EncodedTransactionWithStatusMeta, Rewards, TransactionBinaryEncoding, diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs index 0ca5f76a49f829..8c580b58efb716 100644 --- a/rpc-client/src/nonblocking/rpc_client.rs +++ b/rpc-client/src/nonblocking/rpc_client.rs @@ -46,7 +46,7 @@ use { signature::Signature, transaction, }, - solana_transaction_status::{ + solana_transaction_status_client_types::{ EncodedConfirmedBlock, EncodedConfirmedTransactionWithStatusMeta, TransactionStatus, UiConfirmedBlock, UiTransactionEncoding, }, @@ -2277,7 +2277,7 @@ impl RpcClient { /// # Examples /// /// ``` - /// # use solana_transaction_status::UiTransactionEncoding; + /// # use solana_transaction_status_client_types::UiTransactionEncoding; /// # use solana_rpc_client_api::client_error::Error; /// # use solana_rpc_client::nonblocking::rpc_client::RpcClient; /// # futures::executor::block_on(async { @@ -2312,7 +2312,7 @@ impl RpcClient { /// # Examples /// /// ``` - /// # use solana_transaction_status::{ + /// # use solana_transaction_status_client_types::{ /// # TransactionDetails, /// # UiTransactionEncoding, /// # }; @@ -2709,7 +2709,7 @@ impl RpcClient { /// # signer::keypair::Keypair, /// # system_transaction, /// # }; - /// # use solana_transaction_status::UiTransactionEncoding; + /// # use solana_transaction_status_client_types::UiTransactionEncoding; /// # futures::executor::block_on(async { /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); /// # let alice = Keypair::new(); @@ -2769,7 +2769,7 @@ impl RpcClient { /// # system_transaction, /// # commitment_config::CommitmentConfig, /// # }; - /// # use solana_transaction_status::UiTransactionEncoding; + /// # use solana_transaction_status_client_types::UiTransactionEncoding; /// # futures::executor::block_on(async { /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); /// # let alice = Keypair::new(); diff --git a/rpc-client/src/rpc_client.rs b/rpc-client/src/rpc_client.rs index 32bd08cef49f03..baf9ba92eca42d 100644 --- a/rpc-client/src/rpc_client.rs +++ b/rpc-client/src/rpc_client.rs @@ -42,7 +42,7 @@ use { signature::Signature, transaction::{self, uses_durable_nonce, Transaction, VersionedTransaction}, }, - solana_transaction_status::{ + solana_transaction_status_client_types::{ EncodedConfirmedBlock, EncodedConfirmedTransactionWithStatusMeta, TransactionStatus, UiConfirmedBlock, UiTransactionEncoding, }, @@ -1968,7 +1968,7 @@ impl RpcClient { /// ``` /// # use solana_rpc_client_api::client_error::Error; /// # use solana_rpc_client::rpc_client::RpcClient; - /// # use solana_transaction_status::UiTransactionEncoding; + /// # use solana_transaction_status_client_types::UiTransactionEncoding; /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); /// # let slot = rpc_client.get_slot()?; /// let encoding = UiTransactionEncoding::Base58; @@ -2002,7 +2002,7 @@ impl RpcClient { /// # client_error::Error, /// # }; /// # use solana_rpc_client::rpc_client::RpcClient; - /// # use solana_transaction_status::{ + /// # use solana_transaction_status_client_types::{ /// # TransactionDetails, /// # UiTransactionEncoding, /// # }; @@ -2344,7 +2344,7 @@ impl RpcClient { /// # signer::keypair::Keypair, /// # system_transaction, /// # }; - /// # use solana_transaction_status::UiTransactionEncoding; + /// # use solana_transaction_status_client_types::UiTransactionEncoding; /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); /// # let alice = Keypair::new(); /// # let bob = Keypair::new(); @@ -2397,7 +2397,7 @@ impl RpcClient { /// # system_transaction, /// # commitment_config::CommitmentConfig, /// # }; - /// # use solana_transaction_status::UiTransactionEncoding; + /// # use solana_transaction_status_client_types::UiTransactionEncoding; /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); /// # let alice = Keypair::new(); /// # let bob = Keypair::new(); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 3c8b1c1217ae64..08fe5f3355f7af 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -3293,7 +3293,7 @@ pub mod rpc_full { use { super::*, solana_sdk::message::{SanitizedVersionedMessage, VersionedMessage}, - solana_transaction_status::UiInnerInstructions, + solana_transaction_status::parse_ui_inner_instructions, }; #[rpc] pub trait Full { @@ -3884,7 +3884,7 @@ pub mod rpc_full { let inner_instructions = inner_instructions.map(|info| { map_inner_instructions(info) - .map(|converted| UiInnerInstructions::parse(converted, &account_keys)) + .map(|converted| parse_ui_inner_instructions(converted, &account_keys)) .collect() }); diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index a14c58db882378..afc2d7e6c584cf 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -97,7 +97,12 @@ solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } solana-serde-varint = { workspace = true } solana-short-vec = { workspace = true } -solana-signature = { workspace = true, features = ["rand", "serde", "verify"], optional = true } +solana-signature = { workspace = true, features = [ + "rand", + "serde", + "std", + "verify", +], optional = true } thiserror = { workspace = true } [target.'cfg(target_arch = "wasm32")'.dependencies] diff --git a/transaction-status-client-types/Cargo.toml b/transaction-status-client-types/Cargo.toml new file mode 100644 index 00000000000000..4298bf10b68a00 --- /dev/null +++ b/transaction-status-client-types/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "solana-transaction-status-client-types" +description = "Core RPC client types for solana-transaction-status" +documentation = "https://docs.rs/solana-transaction-status-client-types" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +base64 = { workspace = true } +bincode = { workspace = true } +bs58 = { workspace = true } +serde = { workspace = true } +serde_derive = { workspace = true } +serde_json = { workspace = true } +solana-account-decoder = { workspace = true } +solana-sdk = { workspace = true } +solana-signature = { workspace = true, default-features = false } +thiserror = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/transaction-status-client-types/src/lib.rs b/transaction-status-client-types/src/lib.rs new file mode 100644 index 00000000000000..ee471e9ae914b1 --- /dev/null +++ b/transaction-status-client-types/src/lib.rs @@ -0,0 +1,810 @@ +//! Core types for solana-transaction-status +use { + crate::option_serializer::OptionSerializer, + base64::{prelude::BASE64_STANDARD, Engine}, + core::fmt, + serde_derive::{Deserialize, Serialize}, + serde_json::Value, + solana_account_decoder::parse_token::UiTokenAmount, + solana_sdk::{ + commitment_config::CommitmentConfig, + instruction::CompiledInstruction, + message::{ + v0::{LoadedAddresses, MessageAddressTableLookup}, + MessageHeader, + }, + reward_type::RewardType, + transaction::{ + Result as TransactionResult, TransactionError, TransactionVersion, VersionedTransaction, + }, + transaction_context::TransactionReturnData, + }, + solana_signature::Signature, + thiserror::Error, +}; +pub mod option_serializer; + +#[derive(Serialize, Deserialize, Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[serde(rename_all = "camelCase")] +pub enum TransactionBinaryEncoding { + Base58, + Base64, +} + +#[derive(Serialize, Deserialize, Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[serde(rename_all = "camelCase")] +pub enum UiTransactionEncoding { + Binary, // Legacy. Retained for RPC backwards compatibility + Base64, + Base58, + Json, + JsonParsed, +} + +impl UiTransactionEncoding { + pub fn into_binary_encoding(&self) -> Option { + match self { + Self::Binary | Self::Base58 => Some(TransactionBinaryEncoding::Base58), + Self::Base64 => Some(TransactionBinaryEncoding::Base64), + _ => None, + } + } +} + +impl fmt::Display for UiTransactionEncoding { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let v = serde_json::to_value(self).map_err(|_| fmt::Error)?; + let s = v.as_str().ok_or(fmt::Error)?; + write!(f, "{s}") + } +} + +#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum TransactionDetails { + Full, + Signatures, + None, + Accounts, +} + +impl Default for TransactionDetails { + fn default() -> Self { + Self::Full + } +} + +#[derive(Error, Debug, PartialEq, Eq, Clone)] +pub enum EncodeError { + #[error("Encoding does not support transaction version {0}")] + UnsupportedTransactionVersion(u8), +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ConfirmedTransactionStatusWithSignature { + pub signature: Signature, + pub slot: u64, + pub err: Option, + pub memo: Option, + pub block_time: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum TransactionConfirmationStatus { + Processed, + Confirmed, + Finalized, +} + +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct UiConfirmedBlock { + pub previous_blockhash: String, + pub blockhash: String, + pub parent_slot: u64, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub transactions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub signatures: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub rewards: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub num_reward_partitions: Option, + pub block_time: Option, + pub block_height: Option, +} + +/// A duplicate representation of a Transaction for pretty JSON serialization +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiTransaction { + pub signatures: Vec, + pub message: UiMessage, +} + +/// A duplicate representation of a Message, in parsed format, for pretty JSON serialization +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiParsedMessage { + pub account_keys: Vec, + pub recent_blockhash: String, + pub instructions: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub address_table_lookups: Option>, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct ParsedAccount { + pub pubkey: String, + pub writable: bool, + pub signer: bool, + pub source: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub enum ParsedAccountSource { + Transaction, + LookupTable, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase", untagged)] +pub enum UiMessage { + Parsed(UiParsedMessage), + Raw(UiRawMessage), +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase", untagged)] +pub enum EncodedTransaction { + LegacyBinary(String), // Old way of expressing base-58, retained for RPC backwards compatibility + Binary(String, TransactionBinaryEncoding), + Json(UiTransaction), + Accounts(UiAccountsList), +} + +impl EncodedTransaction { + pub fn decode(&self) -> Option { + let (blob, encoding) = match self { + Self::Json(_) | Self::Accounts(_) => return None, + Self::LegacyBinary(blob) => (blob, TransactionBinaryEncoding::Base58), + Self::Binary(blob, encoding) => (blob, *encoding), + }; + + let transaction: Option = match encoding { + TransactionBinaryEncoding::Base58 => bs58::decode(blob) + .into_vec() + .ok() + .and_then(|bytes| bincode::deserialize(&bytes).ok()), + TransactionBinaryEncoding::Base64 => BASE64_STANDARD + .decode(blob) + .ok() + .and_then(|bytes| bincode::deserialize(&bytes).ok()), + }; + + transaction.filter(|transaction| transaction.sanitize().is_ok()) + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EncodedTransactionWithStatusMeta { + pub transaction: EncodedTransaction, + pub meta: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Reward { + pub pubkey: String, + pub lamports: i64, + pub post_balance: u64, // Account balance in lamports after `lamports` was applied + pub reward_type: Option, + pub commission: Option, // Vote account commission when the reward was credited, only present for voting and staking rewards +} + +pub type Rewards = Vec; + +/// A duplicate representation of a MessageAddressTableLookup, in raw format, for pretty JSON serialization +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiAddressTableLookup { + pub account_key: String, + pub writable_indexes: Vec, + pub readonly_indexes: Vec, +} + +impl From<&MessageAddressTableLookup> for UiAddressTableLookup { + fn from(lookup: &MessageAddressTableLookup) -> Self { + Self { + account_key: lookup.account_key.to_string(), + writable_indexes: lookup.writable_indexes.clone(), + readonly_indexes: lookup.readonly_indexes.clone(), + } + } +} + +/// A duplicate representation of TransactionStatusMeta with `err` field +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiTransactionStatusMeta { + pub err: Option, + pub status: TransactionResult<()>, // This field is deprecated. See https://github.com/solana-labs/solana/issues/9302 + pub fee: u64, + pub pre_balances: Vec, + pub post_balances: Vec, + #[serde( + default = "OptionSerializer::none", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub inner_instructions: OptionSerializer>, + #[serde( + default = "OptionSerializer::none", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub log_messages: OptionSerializer>, + #[serde( + default = "OptionSerializer::none", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub pre_token_balances: OptionSerializer>, + #[serde( + default = "OptionSerializer::none", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub post_token_balances: OptionSerializer>, + #[serde( + default = "OptionSerializer::none", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub rewards: OptionSerializer, + #[serde( + default = "OptionSerializer::skip", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub loaded_addresses: OptionSerializer, + #[serde( + default = "OptionSerializer::skip", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub return_data: OptionSerializer, + #[serde( + default = "OptionSerializer::skip", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub compute_units_consumed: OptionSerializer, +} + +impl From for UiTransactionStatusMeta { + fn from(meta: TransactionStatusMeta) -> Self { + Self { + err: meta.status.clone().err(), + status: meta.status, + fee: meta.fee, + pre_balances: meta.pre_balances, + post_balances: meta.post_balances, + inner_instructions: meta + .inner_instructions + .map(|ixs| ixs.into_iter().map(Into::into).collect()) + .into(), + log_messages: meta.log_messages.into(), + pre_token_balances: meta + .pre_token_balances + .map(|balance| balance.into_iter().map(Into::into).collect()) + .into(), + post_token_balances: meta + .post_token_balances + .map(|balance| balance.into_iter().map(Into::into).collect()) + .into(), + rewards: meta.rewards.into(), + loaded_addresses: Some(UiLoadedAddresses::from(&meta.loaded_addresses)).into(), + return_data: OptionSerializer::or_skip( + meta.return_data.map(|return_data| return_data.into()), + ), + compute_units_consumed: OptionSerializer::or_skip(meta.compute_units_consumed), + } + } +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct UiTransactionReturnData { + pub program_id: String, + pub data: (String, UiReturnDataEncoding), +} + +impl Default for UiTransactionReturnData { + fn default() -> Self { + Self { + program_id: String::default(), + data: (String::default(), UiReturnDataEncoding::Base64), + } + } +} + +impl From for UiTransactionReturnData { + fn from(return_data: TransactionReturnData) -> Self { + Self { + program_id: return_data.program_id.to_string(), + data: ( + BASE64_STANDARD.encode(return_data.data), + UiReturnDataEncoding::Base64, + ), + } + } +} + +#[derive(Serialize, Deserialize, Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[serde(rename_all = "camelCase")] +pub enum UiReturnDataEncoding { + Base64, +} + +/// A duplicate representation of LoadedAddresses +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiLoadedAddresses { + pub writable: Vec, + pub readonly: Vec, +} + +impl From<&LoadedAddresses> for UiLoadedAddresses { + fn from(loaded_addresses: &LoadedAddresses) -> Self { + Self { + writable: loaded_addresses + .writable + .iter() + .map(ToString::to_string) + .collect(), + readonly: loaded_addresses + .readonly + .iter() + .map(ToString::to_string) + .collect(), + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct TransactionTokenBalance { + pub account_index: u8, + pub mint: String, + pub ui_token_amount: UiTokenAmount, + pub owner: String, + pub program_id: String, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiTransactionTokenBalance { + pub account_index: u8, + pub mint: String, + pub ui_token_amount: UiTokenAmount, + #[serde( + default = "OptionSerializer::skip", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub owner: OptionSerializer, + #[serde( + default = "OptionSerializer::skip", + skip_serializing_if = "OptionSerializer::should_skip" + )] + pub program_id: OptionSerializer, +} + +impl From for UiTransactionTokenBalance { + fn from(token_balance: TransactionTokenBalance) -> Self { + Self { + account_index: token_balance.account_index, + mint: token_balance.mint, + ui_token_amount: token_balance.ui_token_amount, + owner: if !token_balance.owner.is_empty() { + OptionSerializer::Some(token_balance.owner) + } else { + OptionSerializer::Skip + }, + program_id: if !token_balance.program_id.is_empty() { + OptionSerializer::Some(token_balance.program_id) + } else { + OptionSerializer::Skip + }, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiAccountsList { + pub signatures: Vec, + pub account_keys: Vec, +} + +/// A duplicate representation of a Message, in raw format, for pretty JSON serialization +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiRawMessage { + pub header: MessageHeader, + pub account_keys: Vec, + pub recent_blockhash: String, + pub instructions: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub address_table_lookups: Option>, +} + +/// A duplicate representation of a CompiledInstruction for pretty JSON serialization +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiCompiledInstruction { + pub program_id_index: u8, + pub accounts: Vec, + pub data: String, + pub stack_height: Option, +} + +impl UiCompiledInstruction { + pub fn from(instruction: &CompiledInstruction, stack_height: Option) -> Self { + Self { + program_id_index: instruction.program_id_index, + accounts: instruction.accounts.clone(), + data: bs58::encode(&instruction.data).into_string(), + stack_height, + } + } +} + +/// A duplicate representation of an Instruction for pretty JSON serialization +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase", untagged)] +pub enum UiInstruction { + Compiled(UiCompiledInstruction), + Parsed(UiParsedInstruction), +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase", untagged)] +pub enum UiParsedInstruction { + Parsed(ParsedInstruction), + PartiallyDecoded(UiPartiallyDecodedInstruction), +} + +/// A partially decoded CompiledInstruction that includes explicit account addresses +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiPartiallyDecodedInstruction { + pub program_id: String, + pub accounts: Vec, + pub data: String, + pub stack_height: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct ParsedInstruction { + pub program: String, + pub program_id: String, + pub parsed: Value, + pub stack_height: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UiInnerInstructions { + /// Transaction instruction index + pub index: u8, + /// List of inner instructions + pub instructions: Vec, +} + +impl From for UiInnerInstructions { + fn from(inner_instructions: InnerInstructions) -> Self { + Self { + index: inner_instructions.index, + instructions: inner_instructions + .instructions + .iter() + .map( + |InnerInstruction { + instruction: ix, + stack_height, + }| { + UiInstruction::Compiled(UiCompiledInstruction::from(ix, *stack_height)) + }, + ) + .collect(), + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct InnerInstructions { + /// Transaction instruction index + pub index: u8, + /// List of inner instructions + pub instructions: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct InnerInstruction { + /// Compiled instruction + pub instruction: CompiledInstruction, + /// Invocation stack height of the instruction, + pub stack_height: Option, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct TransactionStatusMeta { + pub status: TransactionResult<()>, + pub fee: u64, + pub pre_balances: Vec, + pub post_balances: Vec, + pub inner_instructions: Option>, + pub log_messages: Option>, + pub pre_token_balances: Option>, + pub post_token_balances: Option>, + pub rewards: Option, + pub loaded_addresses: LoadedAddresses, + pub return_data: Option, + pub compute_units_consumed: Option, +} + +impl Default for TransactionStatusMeta { + fn default() -> Self { + Self { + status: Ok(()), + fee: 0, + pre_balances: vec![], + post_balances: vec![], + inner_instructions: None, + log_messages: None, + pre_token_balances: None, + post_token_balances: None, + rewards: None, + loaded_addresses: LoadedAddresses::default(), + return_data: None, + compute_units_consumed: None, + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EncodedConfirmedBlock { + pub previous_blockhash: String, + pub blockhash: String, + pub parent_slot: u64, + pub transactions: Vec, + pub rewards: Rewards, + pub num_partitions: Option, + pub block_time: Option, + pub block_height: Option, +} + +impl From for EncodedConfirmedBlock { + fn from(block: UiConfirmedBlock) -> Self { + Self { + previous_blockhash: block.previous_blockhash, + blockhash: block.blockhash, + parent_slot: block.parent_slot, + transactions: block.transactions.unwrap_or_default(), + rewards: block.rewards.unwrap_or_default(), + num_partitions: block.num_reward_partitions, + block_time: block.block_time, + block_height: block.block_height, + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EncodedConfirmedTransactionWithStatusMeta { + pub slot: u64, + #[serde(flatten)] + pub transaction: EncodedTransactionWithStatusMeta, + pub block_time: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionStatus { + pub slot: u64, + pub confirmations: Option, // None = rooted + pub status: TransactionResult<()>, // legacy field + pub err: Option, + pub confirmation_status: Option, +} + +impl TransactionStatus { + pub fn satisfies_commitment(&self, commitment_config: CommitmentConfig) -> bool { + if commitment_config.is_finalized() { + self.confirmations.is_none() + } else if commitment_config.is_confirmed() { + if let Some(status) = &self.confirmation_status { + *status != TransactionConfirmationStatus::Processed + } else { + // These fallback cases handle TransactionStatus RPC responses from older software + self.confirmations.is_some() && self.confirmations.unwrap() > 1 + || self.confirmations.is_none() + } + } else { + true + } + } + + // Returns `confirmation_status`, or if is_none, determines the status from confirmations. + // Facilitates querying nodes on older software + pub fn confirmation_status(&self) -> TransactionConfirmationStatus { + match &self.confirmation_status { + Some(status) => status.clone(), + None => { + if self.confirmations.is_none() { + TransactionConfirmationStatus::Finalized + } else if self.confirmations.unwrap() > 0 { + TransactionConfirmationStatus::Confirmed + } else { + TransactionConfirmationStatus::Processed + } + } + } + } +} + +#[cfg(test)] +mod test { + use {super::*, serde_json::json}; + + #[test] + fn test_decode_invalid_transaction() { + // This transaction will not pass sanitization + let unsanitary_transaction = EncodedTransaction::Binary( + "ju9xZWuDBX4pRxX2oZkTjxU5jB4SSTgEGhX8bQ8PURNzyzqKMPPpNvWihx8zUe\ + FfrbVNoAaEsNKZvGzAnTDy5bhNT9kt6KFCTBixpvrLCzg4M5UdFUQYrn1gdgjX\ + pLHxcaShD81xBNaFDgnA2nkkdHnKtZt4hVSfKAmw3VRZbjrZ7L2fKZBx21CwsG\ + hD6onjM2M3qZW5C8J6d1pj41MxKmZgPBSha3MyKkNLkAGFASK" + .to_string(), + TransactionBinaryEncoding::Base58, + ); + assert!(unsanitary_transaction.decode().is_none()); + } + + #[test] + fn test_satisfies_commitment() { + let status = TransactionStatus { + slot: 0, + confirmations: None, + status: Ok(()), + err: None, + confirmation_status: Some(TransactionConfirmationStatus::Finalized), + }; + + assert!(status.satisfies_commitment(CommitmentConfig::finalized())); + assert!(status.satisfies_commitment(CommitmentConfig::confirmed())); + assert!(status.satisfies_commitment(CommitmentConfig::processed())); + + let status = TransactionStatus { + slot: 0, + confirmations: Some(10), + status: Ok(()), + err: None, + confirmation_status: Some(TransactionConfirmationStatus::Confirmed), + }; + + assert!(!status.satisfies_commitment(CommitmentConfig::finalized())); + assert!(status.satisfies_commitment(CommitmentConfig::confirmed())); + assert!(status.satisfies_commitment(CommitmentConfig::processed())); + + let status = TransactionStatus { + slot: 0, + confirmations: Some(1), + status: Ok(()), + err: None, + confirmation_status: Some(TransactionConfirmationStatus::Processed), + }; + + assert!(!status.satisfies_commitment(CommitmentConfig::finalized())); + assert!(!status.satisfies_commitment(CommitmentConfig::confirmed())); + assert!(status.satisfies_commitment(CommitmentConfig::processed())); + + let status = TransactionStatus { + slot: 0, + confirmations: Some(0), + status: Ok(()), + err: None, + confirmation_status: None, + }; + + assert!(!status.satisfies_commitment(CommitmentConfig::finalized())); + assert!(!status.satisfies_commitment(CommitmentConfig::confirmed())); + assert!(status.satisfies_commitment(CommitmentConfig::processed())); + + // Test single_gossip fallback cases + let status = TransactionStatus { + slot: 0, + confirmations: Some(1), + status: Ok(()), + err: None, + confirmation_status: None, + }; + assert!(!status.satisfies_commitment(CommitmentConfig::confirmed())); + + let status = TransactionStatus { + slot: 0, + confirmations: Some(2), + status: Ok(()), + err: None, + confirmation_status: None, + }; + assert!(status.satisfies_commitment(CommitmentConfig::confirmed())); + + let status = TransactionStatus { + slot: 0, + confirmations: None, + status: Ok(()), + err: None, + confirmation_status: None, + }; + assert!(status.satisfies_commitment(CommitmentConfig::confirmed())); + } + + #[test] + fn test_serde_empty_fields() { + fn test_serde<'de, T: serde::Serialize + serde::Deserialize<'de>>( + json_input: &'de str, + expected_json_output: &str, + ) { + let typed_meta: T = serde_json::from_str(json_input).unwrap(); + let reserialized_value = json!(typed_meta); + + let expected_json_output_value: serde_json::Value = + serde_json::from_str(expected_json_output).unwrap(); + assert_eq!(reserialized_value, expected_json_output_value); + } + + let json_input = "{\ + \"err\":null,\ + \"status\":{\"Ok\":null},\ + \"fee\":1234,\ + \"preBalances\":[1,2,3],\ + \"postBalances\":[4,5,6]\ + }"; + let expected_json_output = "{\ + \"err\":null,\ + \"status\":{\"Ok\":null},\ + \"fee\":1234,\ + \"preBalances\":[1,2,3],\ + \"postBalances\":[4,5,6],\ + \"innerInstructions\":null,\ + \"logMessages\":null,\ + \"preTokenBalances\":null,\ + \"postTokenBalances\":null,\ + \"rewards\":null\ + }"; + test_serde::(json_input, expected_json_output); + + let json_input = "{\ + \"accountIndex\":5,\ + \"mint\":\"DXM2yVSouSg1twmQgHLKoSReqXhtUroehWxrTgPmmfWi\",\ + \"uiTokenAmount\": { + \"amount\": \"1\",\ + \"decimals\": 0,\ + \"uiAmount\": 1.0,\ + \"uiAmountString\": \"1\"\ + }\ + }"; + let expected_json_output = "{\ + \"accountIndex\":5,\ + \"mint\":\"DXM2yVSouSg1twmQgHLKoSReqXhtUroehWxrTgPmmfWi\",\ + \"uiTokenAmount\": { + \"amount\": \"1\",\ + \"decimals\": 0,\ + \"uiAmount\": 1.0,\ + \"uiAmountString\": \"1\"\ + }\ + }"; + test_serde::(json_input, expected_json_output); + } +} diff --git a/transaction-status/src/option_serializer.rs b/transaction-status-client-types/src/option_serializer.rs similarity index 100% rename from transaction-status/src/option_serializer.rs rename to transaction-status-client-types/src/option_serializer.rs diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 24a8434ab4ad31..3d913df0864d48 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -22,6 +22,7 @@ serde_derive = { workspace = true } serde_json = { workspace = true } solana-account-decoder = { workspace = true } solana-sdk = { workspace = true } +solana-transaction-status-client-types = { workspace = true } spl-associated-token-account = { workspace = true, features = ["no-entrypoint"] } spl-memo = { workspace = true, features = ["no-entrypoint"] } spl-token = { workspace = true, features = ["no-entrypoint"] } diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 76f7e277c1571c..e7d0719e0f26e4 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -1,33 +1,42 @@ #![allow(clippy::arithmetic_side_effects)] -pub use {crate::extract_memos::extract_and_fmt_memos, solana_sdk::reward_type::RewardType}; +pub use { + crate::extract_memos::extract_and_fmt_memos, + solana_sdk::reward_type::RewardType, + solana_transaction_status_client_types::{ + option_serializer, ConfirmedTransactionStatusWithSignature, EncodeError, + EncodedConfirmedBlock, EncodedConfirmedTransactionWithStatusMeta, EncodedTransaction, + EncodedTransactionWithStatusMeta, InnerInstruction, InnerInstructions, Reward, Rewards, + TransactionBinaryEncoding, TransactionConfirmationStatus, TransactionDetails, + TransactionStatus, TransactionStatusMeta, TransactionTokenBalance, UiAccountsList, + UiAddressTableLookup, UiCompiledInstruction, UiConfirmedBlock, UiInnerInstructions, + UiInstruction, UiLoadedAddresses, UiMessage, UiParsedInstruction, UiParsedMessage, + UiPartiallyDecodedInstruction, UiRawMessage, UiReturnDataEncoding, UiTransaction, + UiTransactionEncoding, UiTransactionReturnData, UiTransactionStatusMeta, + UiTransactionTokenBalance, + }, +}; use { crate::{ option_serializer::OptionSerializer, - parse_accounts::{parse_legacy_message_accounts, parse_v0_message_accounts, ParsedAccount}, - parse_instruction::{parse, ParsedInstruction}, + parse_accounts::{parse_legacy_message_accounts, parse_v0_message_accounts}, + parse_instruction::parse, }, base64::{prelude::BASE64_STANDARD, Engine}, - solana_account_decoder::parse_token::UiTokenAmount, solana_sdk::{ clock::{Slot, UnixTimestamp}, - commitment_config::CommitmentConfig, hash::Hash, instruction::CompiledInstruction, message::{ - v0::{self, LoadedAddresses, LoadedMessage, MessageAddressTableLookup}, - AccountKeys, Message, MessageHeader, VersionedMessage, + v0::{self, LoadedAddresses, LoadedMessage}, + AccountKeys, Message, VersionedMessage, }, pubkey::Pubkey, reserved_account_keys::ReservedAccountKeys, signature::Signature, - transaction::{ - Result as TransactionResult, Transaction, TransactionError, TransactionVersion, - VersionedTransaction, - }, - transaction_context::TransactionReturnData, + transaction::{Transaction, TransactionError, TransactionVersion, VersionedTransaction}, }, - std::{collections::HashSet, fmt}, + std::collections::HashSet, thiserror::Error, }; @@ -37,7 +46,6 @@ extern crate lazy_static; extern crate serde_derive; pub mod extract_memos; -pub mod option_serializer; pub mod parse_accounts; pub mod parse_address_lookup_table; pub mod parse_associated_token; @@ -55,12 +63,6 @@ pub struct BlockEncodingOptions { pub max_supported_transaction_version: Option, } -#[derive(Error, Debug, PartialEq, Eq, Clone)] -pub enum EncodeError { - #[error("Encoding does not support transaction version {0}")] - UnsupportedTransactionVersion(u8), -} - /// Represents types that can be encoded into one of several encoding formats pub trait Encodable { type Encoded; @@ -83,154 +85,38 @@ trait JsonAccounts { fn build_json_accounts(&self) -> Self::Encoded; } -#[derive(Serialize, Deserialize, Clone, Copy, Debug, Eq, Hash, PartialEq)] -#[serde(rename_all = "camelCase")] -pub enum TransactionBinaryEncoding { - Base58, - Base64, -} - -#[derive(Serialize, Deserialize, Clone, Copy, Debug, Eq, Hash, PartialEq)] -#[serde(rename_all = "camelCase")] -pub enum UiTransactionEncoding { - Binary, // Legacy. Retained for RPC backwards compatibility - Base64, - Base58, - Json, - JsonParsed, -} - -impl UiTransactionEncoding { - pub fn into_binary_encoding(&self) -> Option { - match self { - Self::Binary | Self::Base58 => Some(TransactionBinaryEncoding::Base58), - Self::Base64 => Some(TransactionBinaryEncoding::Base64), - _ => None, - } - } -} - -impl fmt::Display for UiTransactionEncoding { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let v = serde_json::to_value(self).map_err(|_| fmt::Error)?; - let s = v.as_str().ok_or(fmt::Error)?; - write!(f, "{s}") - } -} - -#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum TransactionDetails { - Full, - Signatures, - None, - Accounts, -} - -impl Default for TransactionDetails { - fn default() -> Self { - Self::Full - } -} - -/// A duplicate representation of an Instruction for pretty JSON serialization -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", untagged)] -pub enum UiInstruction { - Compiled(UiCompiledInstruction), - Parsed(UiParsedInstruction), -} - -impl UiInstruction { - pub fn parse( - instruction: &CompiledInstruction, - account_keys: &AccountKeys, - stack_height: Option, - ) -> Self { - let program_id = &account_keys[instruction.program_id_index as usize]; - if let Ok(parsed_instruction) = parse(program_id, instruction, account_keys, stack_height) { - UiInstruction::Parsed(UiParsedInstruction::Parsed(parsed_instruction)) - } else { - UiInstruction::Parsed(UiParsedInstruction::PartiallyDecoded( - UiPartiallyDecodedInstruction::from(instruction, account_keys, stack_height), - )) - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", untagged)] -pub enum UiParsedInstruction { - Parsed(ParsedInstruction), - PartiallyDecoded(UiPartiallyDecodedInstruction), -} - -/// A duplicate representation of a CompiledInstruction for pretty JSON serialization -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiCompiledInstruction { - pub program_id_index: u8, - pub accounts: Vec, - pub data: String, - pub stack_height: Option, -} - -impl UiCompiledInstruction { - fn from(instruction: &CompiledInstruction, stack_height: Option) -> Self { - Self { - program_id_index: instruction.program_id_index, - accounts: instruction.accounts.clone(), - data: bs58::encode(&instruction.data).into_string(), - stack_height, - } +fn make_ui_partially_decoded_instruction( + instruction: &CompiledInstruction, + account_keys: &AccountKeys, + stack_height: Option, +) -> UiPartiallyDecodedInstruction { + UiPartiallyDecodedInstruction { + program_id: account_keys[instruction.program_id_index as usize].to_string(), + accounts: instruction + .accounts + .iter() + .map(|&i| account_keys[i as usize].to_string()) + .collect(), + data: bs58::encode(instruction.data.clone()).into_string(), + stack_height, } } -/// A partially decoded CompiledInstruction that includes explicit account addresses -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiPartiallyDecodedInstruction { - pub program_id: String, - pub accounts: Vec, - pub data: String, - pub stack_height: Option, -} - -impl UiPartiallyDecodedInstruction { - fn from( - instruction: &CompiledInstruction, - account_keys: &AccountKeys, - stack_height: Option, - ) -> Self { - Self { - program_id: account_keys[instruction.program_id_index as usize].to_string(), - accounts: instruction - .accounts - .iter() - .map(|&i| account_keys[i as usize].to_string()) - .collect(), - data: bs58::encode(instruction.data.clone()).into_string(), - stack_height, - } +pub fn parse_ui_instruction( + instruction: &CompiledInstruction, + account_keys: &AccountKeys, + stack_height: Option, +) -> UiInstruction { + let program_id = &account_keys[instruction.program_id_index as usize]; + if let Ok(parsed_instruction) = parse(program_id, instruction, account_keys, stack_height) { + UiInstruction::Parsed(UiParsedInstruction::Parsed(parsed_instruction)) + } else { + UiInstruction::Parsed(UiParsedInstruction::PartiallyDecoded( + make_ui_partially_decoded_instruction(instruction, account_keys, stack_height), + )) } } -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct InnerInstructions { - /// Transaction instruction index - pub index: u8, - /// List of inner instructions - pub instructions: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct InnerInstruction { - /// Compiled instruction - pub instruction: CompiledInstruction, - /// Invocation stack height of the instruction, - pub stack_height: Option, -} - /// Maps a list of inner instructions from `solana_sdk` into a list of this /// crate's representation of inner instructions (with instruction indices). pub fn map_inner_instructions( @@ -252,382 +138,94 @@ pub fn map_inner_instructions( .filter(|i| !i.instructions.is_empty()) } -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiInnerInstructions { - /// Transaction instruction index - pub index: u8, - /// List of inner instructions - pub instructions: Vec, -} - -impl UiInnerInstructions { - pub fn parse(inner_instructions: InnerInstructions, account_keys: &AccountKeys) -> Self { - Self { - index: inner_instructions.index, - instructions: inner_instructions - .instructions - .iter() - .map( - |InnerInstruction { - instruction: ix, - stack_height, - }| { - UiInstruction::parse(ix, account_keys, *stack_height) - }, - ) - .collect(), - } - } -} - -impl From for UiInnerInstructions { - fn from(inner_instructions: InnerInstructions) -> Self { - Self { - index: inner_instructions.index, - instructions: inner_instructions - .instructions - .iter() - .map( - |InnerInstruction { - instruction: ix, - stack_height, - }| { - UiInstruction::Compiled(UiCompiledInstruction::from(ix, *stack_height)) - }, - ) - .collect(), - } - } -} - -#[derive(Clone, Debug, PartialEq)] -pub struct TransactionTokenBalance { - pub account_index: u8, - pub mint: String, - pub ui_token_amount: UiTokenAmount, - pub owner: String, - pub program_id: String, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiTransactionTokenBalance { - pub account_index: u8, - pub mint: String, - pub ui_token_amount: UiTokenAmount, - #[serde( - default = "OptionSerializer::skip", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub owner: OptionSerializer, - #[serde( - default = "OptionSerializer::skip", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub program_id: OptionSerializer, -} - -impl From for UiTransactionTokenBalance { - fn from(token_balance: TransactionTokenBalance) -> Self { - Self { - account_index: token_balance.account_index, - mint: token_balance.mint, - ui_token_amount: token_balance.ui_token_amount, - owner: if !token_balance.owner.is_empty() { - OptionSerializer::Some(token_balance.owner) - } else { - OptionSerializer::Skip - }, - program_id: if !token_balance.program_id.is_empty() { - OptionSerializer::Some(token_balance.program_id) - } else { - OptionSerializer::Skip - }, - } - } -} - -#[derive(Clone, Debug, PartialEq)] -pub struct TransactionStatusMeta { - pub status: TransactionResult<()>, - pub fee: u64, - pub pre_balances: Vec, - pub post_balances: Vec, - pub inner_instructions: Option>, - pub log_messages: Option>, - pub pre_token_balances: Option>, - pub post_token_balances: Option>, - pub rewards: Option, - pub loaded_addresses: LoadedAddresses, - pub return_data: Option, - pub compute_units_consumed: Option, -} - -impl Default for TransactionStatusMeta { - fn default() -> Self { - Self { - status: Ok(()), - fee: 0, - pre_balances: vec![], - post_balances: vec![], - inner_instructions: None, - log_messages: None, - pre_token_balances: None, - post_token_balances: None, - rewards: None, - loaded_addresses: LoadedAddresses::default(), - return_data: None, - compute_units_consumed: None, - } - } -} - -/// A duplicate representation of TransactionStatusMeta with `err` field -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiTransactionStatusMeta { - pub err: Option, - pub status: TransactionResult<()>, // This field is deprecated. See https://github.com/solana-labs/solana/issues/9302 - pub fee: u64, - pub pre_balances: Vec, - pub post_balances: Vec, - #[serde( - default = "OptionSerializer::none", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub inner_instructions: OptionSerializer>, - #[serde( - default = "OptionSerializer::none", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub log_messages: OptionSerializer>, - #[serde( - default = "OptionSerializer::none", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub pre_token_balances: OptionSerializer>, - #[serde( - default = "OptionSerializer::none", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub post_token_balances: OptionSerializer>, - #[serde( - default = "OptionSerializer::none", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub rewards: OptionSerializer, - #[serde( - default = "OptionSerializer::skip", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub loaded_addresses: OptionSerializer, - #[serde( - default = "OptionSerializer::skip", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub return_data: OptionSerializer, - #[serde( - default = "OptionSerializer::skip", - skip_serializing_if = "OptionSerializer::should_skip" - )] - pub compute_units_consumed: OptionSerializer, -} - -/// A duplicate representation of LoadedAddresses -#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiLoadedAddresses { - pub writable: Vec, - pub readonly: Vec, -} - -impl From<&LoadedAddresses> for UiLoadedAddresses { - fn from(loaded_addresses: &LoadedAddresses) -> Self { - Self { - writable: loaded_addresses - .writable - .iter() - .map(ToString::to_string) - .collect(), - readonly: loaded_addresses - .readonly - .iter() - .map(ToString::to_string) - .collect(), - } - } -} - -impl UiTransactionStatusMeta { - fn parse(meta: TransactionStatusMeta, static_keys: &[Pubkey], show_rewards: bool) -> Self { - let account_keys = AccountKeys::new(static_keys, Some(&meta.loaded_addresses)); - Self { - err: meta.status.clone().err(), - status: meta.status, - fee: meta.fee, - pre_balances: meta.pre_balances, - post_balances: meta.post_balances, - inner_instructions: meta - .inner_instructions - .map(|ixs| { - ixs.into_iter() - .map(|ix| UiInnerInstructions::parse(ix, &account_keys)) - .collect() - }) - .into(), - log_messages: meta.log_messages.into(), - pre_token_balances: meta - .pre_token_balances - .map(|balance| balance.into_iter().map(Into::into).collect()) - .into(), - post_token_balances: meta - .post_token_balances - .map(|balance| balance.into_iter().map(Into::into).collect()) - .into(), - rewards: if show_rewards { meta.rewards } else { None }.into(), - loaded_addresses: OptionSerializer::Skip, - return_data: OptionSerializer::or_skip( - meta.return_data.map(|return_data| return_data.into()), - ), - compute_units_consumed: OptionSerializer::or_skip(meta.compute_units_consumed), - } - } - - fn build_simple(meta: TransactionStatusMeta, show_rewards: bool) -> Self { - Self { - err: meta.status.clone().err(), - status: meta.status, - fee: meta.fee, - pre_balances: meta.pre_balances, - post_balances: meta.post_balances, - inner_instructions: OptionSerializer::Skip, - log_messages: OptionSerializer::Skip, - pre_token_balances: meta - .pre_token_balances - .map(|balance| balance.into_iter().map(Into::into).collect()) - .into(), - post_token_balances: meta - .post_token_balances - .map(|balance| balance.into_iter().map(Into::into).collect()) - .into(), - rewards: if show_rewards { - meta.rewards.into() - } else { - OptionSerializer::Skip - }, - loaded_addresses: OptionSerializer::Skip, - return_data: OptionSerializer::Skip, - compute_units_consumed: OptionSerializer::Skip, - } - } -} - -impl From for UiTransactionStatusMeta { - fn from(meta: TransactionStatusMeta) -> Self { - Self { - err: meta.status.clone().err(), - status: meta.status, - fee: meta.fee, - pre_balances: meta.pre_balances, - post_balances: meta.post_balances, - inner_instructions: meta - .inner_instructions - .map(|ixs| ixs.into_iter().map(Into::into).collect()) - .into(), - log_messages: meta.log_messages.into(), - pre_token_balances: meta - .pre_token_balances - .map(|balance| balance.into_iter().map(Into::into).collect()) - .into(), - post_token_balances: meta - .post_token_balances - .map(|balance| balance.into_iter().map(Into::into).collect()) - .into(), - rewards: meta.rewards.into(), - loaded_addresses: Some(UiLoadedAddresses::from(&meta.loaded_addresses)).into(), - return_data: OptionSerializer::or_skip( - meta.return_data.map(|return_data| return_data.into()), - ), - compute_units_consumed: OptionSerializer::or_skip(meta.compute_units_consumed), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum TransactionConfirmationStatus { - Processed, - Confirmed, - Finalized, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionStatus { - pub slot: Slot, - pub confirmations: Option, // None = rooted - pub status: TransactionResult<()>, // legacy field - pub err: Option, - pub confirmation_status: Option, -} - -impl TransactionStatus { - pub fn satisfies_commitment(&self, commitment_config: CommitmentConfig) -> bool { - if commitment_config.is_finalized() { - self.confirmations.is_none() - } else if commitment_config.is_confirmed() { - if let Some(status) = &self.confirmation_status { - *status != TransactionConfirmationStatus::Processed - } else { - // These fallback cases handle TransactionStatus RPC responses from older software - self.confirmations.is_some() && self.confirmations.unwrap() > 1 - || self.confirmations.is_none() - } +pub fn parse_ui_inner_instructions( + inner_instructions: InnerInstructions, + account_keys: &AccountKeys, +) -> UiInnerInstructions { + UiInnerInstructions { + index: inner_instructions.index, + instructions: inner_instructions + .instructions + .iter() + .map( + |InnerInstruction { + instruction: ix, + stack_height, + }| { parse_ui_instruction(ix, account_keys, *stack_height) }, + ) + .collect(), + } +} + +fn build_simple_ui_transaction_status_meta( + meta: TransactionStatusMeta, + show_rewards: bool, +) -> UiTransactionStatusMeta { + UiTransactionStatusMeta { + err: meta.status.clone().err(), + status: meta.status, + fee: meta.fee, + pre_balances: meta.pre_balances, + post_balances: meta.post_balances, + inner_instructions: OptionSerializer::Skip, + log_messages: OptionSerializer::Skip, + pre_token_balances: meta + .pre_token_balances + .map(|balance| balance.into_iter().map(Into::into).collect()) + .into(), + post_token_balances: meta + .post_token_balances + .map(|balance| balance.into_iter().map(Into::into).collect()) + .into(), + rewards: if show_rewards { + meta.rewards.into() } else { - true - } - } - - // Returns `confirmation_status`, or if is_none, determines the status from confirmations. - // Facilitates querying nodes on older software - pub fn confirmation_status(&self) -> TransactionConfirmationStatus { - match &self.confirmation_status { - Some(status) => status.clone(), - None => { - if self.confirmations.is_none() { - TransactionConfirmationStatus::Finalized - } else if self.confirmations.unwrap() > 0 { - TransactionConfirmationStatus::Confirmed - } else { - TransactionConfirmationStatus::Processed - } - } - } + OptionSerializer::Skip + }, + loaded_addresses: OptionSerializer::Skip, + return_data: OptionSerializer::Skip, + compute_units_consumed: OptionSerializer::Skip, + } +} + +fn parse_ui_transaction_status_meta( + meta: TransactionStatusMeta, + static_keys: &[Pubkey], + show_rewards: bool, +) -> UiTransactionStatusMeta { + let account_keys = AccountKeys::new(static_keys, Some(&meta.loaded_addresses)); + UiTransactionStatusMeta { + err: meta.status.clone().err(), + status: meta.status, + fee: meta.fee, + pre_balances: meta.pre_balances, + post_balances: meta.post_balances, + inner_instructions: meta + .inner_instructions + .map(|ixs| { + ixs.into_iter() + .map(|ix| parse_ui_inner_instructions(ix, &account_keys)) + .collect() + }) + .into(), + log_messages: meta.log_messages.into(), + pre_token_balances: meta + .pre_token_balances + .map(|balance| balance.into_iter().map(Into::into).collect()) + .into(), + post_token_balances: meta + .post_token_balances + .map(|balance| balance.into_iter().map(Into::into).collect()) + .into(), + rewards: if show_rewards { meta.rewards } else { None }.into(), + loaded_addresses: OptionSerializer::Skip, + return_data: OptionSerializer::or_skip( + meta.return_data.map(|return_data| return_data.into()), + ), + compute_units_consumed: OptionSerializer::or_skip(meta.compute_units_consumed), } } -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ConfirmedTransactionStatusWithSignature { - pub signature: Signature, - pub slot: Slot, - pub err: Option, - pub memo: Option, - pub block_time: Option, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Reward { - pub pubkey: String, - pub lamports: i64, - pub post_balance: u64, // Account balance in lamports after `lamports` was applied - pub reward_type: Option, - pub commission: Option, // Vote account commission when the reward was credited, only present for voting and staking rewards -} - -pub type Rewards = Vec; - #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct RewardsAndNumPartitions { pub rewards: Rewards, @@ -785,52 +383,6 @@ impl ConfirmedBlock { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct EncodedConfirmedBlock { - pub previous_blockhash: String, - pub blockhash: String, - pub parent_slot: Slot, - pub transactions: Vec, - pub rewards: Rewards, - pub num_partitions: Option, - pub block_time: Option, - pub block_height: Option, -} - -impl From for EncodedConfirmedBlock { - fn from(block: UiConfirmedBlock) -> Self { - Self { - previous_blockhash: block.previous_blockhash, - blockhash: block.blockhash, - parent_slot: block.parent_slot, - transactions: block.transactions.unwrap_or_default(), - rewards: block.rewards.unwrap_or_default(), - num_partitions: block.num_reward_partitions, - block_time: block.block_time, - block_height: block.block_height, - } - } -} - -#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] -#[serde(rename_all = "camelCase")] -pub struct UiConfirmedBlock { - pub previous_blockhash: String, - pub blockhash: String, - pub parent_slot: Slot, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub transactions: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub signatures: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub rewards: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub num_reward_partitions: Option, - pub block_time: Option, - pub block_height: Option, -} - // Confirmed block with type guarantees that transaction metadata is always // present, as well as a list of the entry data needed to cryptographically // verify the block. Used for uploading to BigTable. @@ -966,7 +518,7 @@ impl VersionedTransactionWithStatusMeta { Ok(EncodedTransactionWithStatusMeta { transaction: self.transaction.encode_with_meta(encoding, &self.meta), meta: Some(match encoding { - UiTransactionEncoding::JsonParsed => UiTransactionStatusMeta::parse( + UiTransactionEncoding::JsonParsed => parse_ui_transaction_status_meta( self.meta, self.transaction.message.static_account_keys(), show_rewards, @@ -1020,7 +572,7 @@ impl VersionedTransactionWithStatusMeta { .collect(), account_keys, }), - meta: Some(UiTransactionStatusMeta::build_simple( + meta: Some(build_simple_ui_transaction_status_meta( self.meta, show_rewards, )), @@ -1029,15 +581,6 @@ impl VersionedTransactionWithStatusMeta { } } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct EncodedTransactionWithStatusMeta { - pub transaction: EncodedTransaction, - pub meta: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub version: Option, -} - #[derive(Debug, Clone, PartialEq)] pub struct ConfirmedTransactionWithStatusMeta { pub slot: Slot, @@ -1074,24 +617,6 @@ impl ConfirmedTransactionWithStatusMeta { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct EncodedConfirmedTransactionWithStatusMeta { - pub slot: Slot, - #[serde(flatten)] - pub transaction: EncodedTransactionWithStatusMeta, - pub block_time: Option, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", untagged)] -pub enum EncodedTransaction { - LegacyBinary(String), // Old way of expressing base-58, retained for RPC backwards compatibility - Binary(String, TransactionBinaryEncoding), - Json(UiTransaction), - Accounts(UiAccountsList), -} - impl EncodableWithMeta for VersionedTransaction { type Encoded = EncodedTransaction; fn encode_with_meta( @@ -1203,44 +728,6 @@ impl JsonAccounts for Transaction { } } -impl EncodedTransaction { - pub fn decode(&self) -> Option { - let (blob, encoding) = match self { - Self::Json(_) | Self::Accounts(_) => return None, - Self::LegacyBinary(blob) => (blob, TransactionBinaryEncoding::Base58), - Self::Binary(blob, encoding) => (blob, *encoding), - }; - - let transaction: Option = match encoding { - TransactionBinaryEncoding::Base58 => bs58::decode(blob) - .into_vec() - .ok() - .and_then(|bytes| bincode::deserialize(&bytes).ok()), - TransactionBinaryEncoding::Base64 => BASE64_STANDARD - .decode(blob) - .ok() - .and_then(|bytes| bincode::deserialize(&bytes).ok()), - }; - - transaction.filter(|transaction| transaction.sanitize().is_ok()) - } -} - -/// A duplicate representation of a Transaction for pretty JSON serialization -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiTransaction { - pub signatures: Vec, - pub message: UiMessage, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", untagged)] -pub enum UiMessage { - Parsed(UiParsedMessage), - Raw(UiRawMessage), -} - impl Encodable for Message { type Encoded = UiMessage; fn encode(&self, encoding: UiTransactionEncoding) -> Self::Encoded { @@ -1252,7 +739,7 @@ impl Encodable for Message { instructions: self .instructions .iter() - .map(|instruction| UiInstruction::parse(instruction, &account_keys, None)) + .map(|instruction| parse_ui_instruction(instruction, &account_keys, None)) .collect(), address_table_lookups: None, }) @@ -1286,7 +773,7 @@ impl Encodable for v0::Message { instructions: self .instructions .iter() - .map(|instruction| UiInstruction::parse(instruction, &account_keys, None)) + .map(|instruction| parse_ui_instruction(instruction, &account_keys, None)) .collect(), address_table_lookups: None, }) @@ -1327,7 +814,7 @@ impl EncodableWithMeta for v0::Message { instructions: self .instructions .iter() - .map(|instruction| UiInstruction::parse(instruction, &account_keys, None)) + .map(|instruction| parse_ui_instruction(instruction, &account_keys, None)) .collect(), address_table_lookups: Some( self.address_table_lookups.iter().map(Into::into).collect(), @@ -1354,55 +841,6 @@ impl EncodableWithMeta for v0::Message { } } -/// A duplicate representation of a Message, in raw format, for pretty JSON serialization -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiRawMessage { - pub header: MessageHeader, - pub account_keys: Vec, - pub recent_blockhash: String, - pub instructions: Vec, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub address_table_lookups: Option>, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiAccountsList { - pub signatures: Vec, - pub account_keys: Vec, -} - -/// A duplicate representation of a MessageAddressTableLookup, in raw format, for pretty JSON serialization -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiAddressTableLookup { - pub account_key: String, - pub writable_indexes: Vec, - pub readonly_indexes: Vec, -} - -impl From<&MessageAddressTableLookup> for UiAddressTableLookup { - fn from(lookup: &MessageAddressTableLookup) -> Self { - Self { - account_key: lookup.account_key.to_string(), - writable_indexes: lookup.writable_indexes.clone(), - readonly_indexes: lookup.readonly_indexes.clone(), - } - } -} - -/// A duplicate representation of a Message, in parsed format, for pretty JSON serialization -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UiParsedMessage { - pub account_keys: Vec, - pub recent_blockhash: String, - pub instructions: Vec, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub address_table_lookups: Option>, -} - // A serialized `Vec` is stored in the `tx-by-addr` table. The row keys are // the one's compliment of the slot so that rows may be listed in reverse order #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -1414,194 +852,9 @@ pub struct TransactionByAddrInfo { pub block_time: Option, } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct UiTransactionReturnData { - pub program_id: String, - pub data: (String, UiReturnDataEncoding), -} - -impl Default for UiTransactionReturnData { - fn default() -> Self { - Self { - program_id: String::default(), - data: (String::default(), UiReturnDataEncoding::Base64), - } - } -} - -impl From for UiTransactionReturnData { - fn from(return_data: TransactionReturnData) -> Self { - Self { - program_id: return_data.program_id.to_string(), - data: ( - BASE64_STANDARD.encode(return_data.data), - UiReturnDataEncoding::Base64, - ), - } - } -} - -#[derive(Serialize, Deserialize, Clone, Copy, Debug, Eq, Hash, PartialEq)] -#[serde(rename_all = "camelCase")] -pub enum UiReturnDataEncoding { - Base64, -} - #[cfg(test)] mod test { - use {super::*, serde_json::json}; - - #[test] - fn test_decode_invalid_transaction() { - // This transaction will not pass sanitization - let unsanitary_transaction = EncodedTransaction::Binary( - "ju9xZWuDBX4pRxX2oZkTjxU5jB4SSTgEGhX8bQ8PURNzyzqKMPPpNvWihx8zUe\ - FfrbVNoAaEsNKZvGzAnTDy5bhNT9kt6KFCTBixpvrLCzg4M5UdFUQYrn1gdgjX\ - pLHxcaShD81xBNaFDgnA2nkkdHnKtZt4hVSfKAmw3VRZbjrZ7L2fKZBx21CwsG\ - hD6onjM2M3qZW5C8J6d1pj41MxKmZgPBSha3MyKkNLkAGFASK" - .to_string(), - TransactionBinaryEncoding::Base58, - ); - assert!(unsanitary_transaction.decode().is_none()); - } - - #[test] - fn test_satisfies_commitment() { - let status = TransactionStatus { - slot: 0, - confirmations: None, - status: Ok(()), - err: None, - confirmation_status: Some(TransactionConfirmationStatus::Finalized), - }; - - assert!(status.satisfies_commitment(CommitmentConfig::finalized())); - assert!(status.satisfies_commitment(CommitmentConfig::confirmed())); - assert!(status.satisfies_commitment(CommitmentConfig::processed())); - - let status = TransactionStatus { - slot: 0, - confirmations: Some(10), - status: Ok(()), - err: None, - confirmation_status: Some(TransactionConfirmationStatus::Confirmed), - }; - - assert!(!status.satisfies_commitment(CommitmentConfig::finalized())); - assert!(status.satisfies_commitment(CommitmentConfig::confirmed())); - assert!(status.satisfies_commitment(CommitmentConfig::processed())); - - let status = TransactionStatus { - slot: 0, - confirmations: Some(1), - status: Ok(()), - err: None, - confirmation_status: Some(TransactionConfirmationStatus::Processed), - }; - - assert!(!status.satisfies_commitment(CommitmentConfig::finalized())); - assert!(!status.satisfies_commitment(CommitmentConfig::confirmed())); - assert!(status.satisfies_commitment(CommitmentConfig::processed())); - - let status = TransactionStatus { - slot: 0, - confirmations: Some(0), - status: Ok(()), - err: None, - confirmation_status: None, - }; - - assert!(!status.satisfies_commitment(CommitmentConfig::finalized())); - assert!(!status.satisfies_commitment(CommitmentConfig::confirmed())); - assert!(status.satisfies_commitment(CommitmentConfig::processed())); - - // Test single_gossip fallback cases - let status = TransactionStatus { - slot: 0, - confirmations: Some(1), - status: Ok(()), - err: None, - confirmation_status: None, - }; - assert!(!status.satisfies_commitment(CommitmentConfig::confirmed())); - - let status = TransactionStatus { - slot: 0, - confirmations: Some(2), - status: Ok(()), - err: None, - confirmation_status: None, - }; - assert!(status.satisfies_commitment(CommitmentConfig::confirmed())); - - let status = TransactionStatus { - slot: 0, - confirmations: None, - status: Ok(()), - err: None, - confirmation_status: None, - }; - assert!(status.satisfies_commitment(CommitmentConfig::confirmed())); - } - - #[test] - fn test_serde_empty_fields() { - fn test_serde<'de, T: serde::Serialize + serde::Deserialize<'de>>( - json_input: &'de str, - expected_json_output: &str, - ) { - let typed_meta: T = serde_json::from_str(json_input).unwrap(); - let reserialized_value = json!(typed_meta); - - let expected_json_output_value: serde_json::Value = - serde_json::from_str(expected_json_output).unwrap(); - assert_eq!(reserialized_value, expected_json_output_value); - } - - let json_input = "{\ - \"err\":null,\ - \"status\":{\"Ok\":null},\ - \"fee\":1234,\ - \"preBalances\":[1,2,3],\ - \"postBalances\":[4,5,6]\ - }"; - let expected_json_output = "{\ - \"err\":null,\ - \"status\":{\"Ok\":null},\ - \"fee\":1234,\ - \"preBalances\":[1,2,3],\ - \"postBalances\":[4,5,6],\ - \"innerInstructions\":null,\ - \"logMessages\":null,\ - \"preTokenBalances\":null,\ - \"postTokenBalances\":null,\ - \"rewards\":null\ - }"; - test_serde::(json_input, expected_json_output); - - let json_input = "{\ - \"accountIndex\":5,\ - \"mint\":\"DXM2yVSouSg1twmQgHLKoSReqXhtUroehWxrTgPmmfWi\",\ - \"uiTokenAmount\": { - \"amount\": \"1\",\ - \"decimals\": 0,\ - \"uiAmount\": 1.0,\ - \"uiAmountString\": \"1\"\ - }\ - }"; - let expected_json_output = "{\ - \"accountIndex\":5,\ - \"mint\":\"DXM2yVSouSg1twmQgHLKoSReqXhtUroehWxrTgPmmfWi\",\ - \"uiTokenAmount\": { - \"amount\": \"1\",\ - \"decimals\": 0,\ - \"uiAmount\": 1.0,\ - \"uiAmountString\": \"1\"\ - }\ - }"; - test_serde::(json_input, expected_json_output); - } + use super::*; #[test] fn test_ui_transaction_status_meta_ctors_serialization() { @@ -1662,13 +915,13 @@ mod test { }", ) .unwrap(); - let ui_meta_parse_with_rewards = UiTransactionStatusMeta::parse(meta.clone(), &[], true); + let ui_meta_parse_with_rewards = parse_ui_transaction_status_meta(meta.clone(), &[], true); assert_eq!( serde_json::to_value(ui_meta_parse_with_rewards).unwrap(), expected_json_output_value ); - let ui_meta_parse_no_rewards = UiTransactionStatusMeta::parse(meta, &[], false); + let ui_meta_parse_no_rewards = parse_ui_transaction_status_meta(meta, &[], false); assert_eq!( serde_json::to_value(ui_meta_parse_no_rewards).unwrap(), expected_json_output_value diff --git a/transaction-status/src/parse_accounts.rs b/transaction-status/src/parse_accounts.rs index 22e1ada6c77ac5..b8edbab4f298ac 100644 --- a/transaction-status/src/parse_accounts.rs +++ b/transaction-status/src/parse_accounts.rs @@ -2,22 +2,7 @@ use solana_sdk::{ message::{v0::LoadedMessage, Message}, reserved_account_keys::ReservedAccountKeys, }; - -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct ParsedAccount { - pub pubkey: String, - pub writable: bool, - pub signer: bool, - pub source: Option, -} - -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub enum ParsedAccountSource { - Transaction, - LookupTable, -} +pub use solana_transaction_status_client_types::{ParsedAccount, ParsedAccountSource}; pub fn parse_legacy_message_accounts(message: &Message) -> Vec { let reserved_account_keys = ReservedAccountKeys::new_all_activated().active; diff --git a/transaction-status/src/parse_instruction.rs b/transaction-status/src/parse_instruction.rs index 0f53c79b57df33..f5f9100b971e6c 100644 --- a/transaction-status/src/parse_instruction.rs +++ b/transaction-status/src/parse_instruction.rs @@ -1,3 +1,4 @@ +pub use solana_transaction_status_client_types::ParsedInstruction; use { crate::{ extract_memos::{spl_memo_id_v1, spl_memo_id_v3}, @@ -75,15 +76,6 @@ pub enum ParseInstructionError { SerdeJsonError(#[from] serde_json::error::Error), } -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct ParsedInstruction { - pub program: String, - pub program_id: String, - pub parsed: Value, - pub stack_height: Option, -} - #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct ParsedInstructionEnum { From d7011b5c69810af61cc13d6b057f30926f053199 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 12 Sep 2024 14:33:54 -0400 Subject: [PATCH 342/529] Removes slot param from shrink_storage() (#2886) --- accounts-db/src/accounts_db.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 169e3d498f3fa3..06a6235654c3fd 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4353,7 +4353,9 @@ impl AccountsDb { ); } - fn do_shrink_slot_store(&self, slot: Slot, store: &AccountStorageEntry) { + /// Shrinks `store` by rewriting the alive accounts to a new storage + fn shrink_storage(&self, store: &AccountStorageEntry) { + let slot = store.slot(); if self.accounts_cache.contains(slot) { // It is not correct to shrink a slot while it is in the write cache until flush is complete and the slot is removed from the write cache. // There can exist a window after a slot is made a root and before the write cache flushing for that slot begins and then completes. @@ -4555,10 +4557,9 @@ impl AccountsDb { .storage .get_slot_storage_entry_shrinking_in_progress_ok(slot) { - if !Self::is_shrinking_productive(slot, &store) { - return; + if Self::is_shrinking_productive(slot, &store) { + self.shrink_storage(&store) } - self.do_shrink_slot_store(slot, &store) } } @@ -5104,7 +5105,7 @@ impl AccountsDb { .fetch_add(1, Ordering::Relaxed); } let mut measure = Measure::start("shrink_candidate_slots-ms"); - self.do_shrink_slot_store(slot, &slot_shrink_candidate); + self.shrink_storage(&slot_shrink_candidate); measure.stop(); inc_new_counter_info!("shrink_candidate_slots-ms", measure.as_ms() as usize); }); From c8c6d2a641373de3c7a76ca20bc5e8a424e82f5f Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 12 Sep 2024 14:34:07 -0400 Subject: [PATCH 343/529] Uses IntSet in AccountSlots (#2892) --- accounts-db/src/accounts_db.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 06a6235654c3fd..85cd47cad19446 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -821,7 +821,7 @@ impl<'a> MultiThreadProgress<'a> { pub type AtomicAccountsFileId = AtomicU32; pub type AccountsFileId = u32; -type AccountSlots = HashMap>; +type AccountSlots = HashMap>; type SlotOffsets = HashMap>; type ReclaimResult = (AccountSlots, SlotOffsets); type PubkeysRemovedFromAccountsIndex = HashSet; @@ -15444,7 +15444,7 @@ pub mod tests { &pubkeys_removed_from_accounts_index, ); assert_eq!( - vec![(pk1, vec![slot1].into_iter().collect::>())], + vec![(pk1, vec![slot1].into_iter().collect::>())], purged_stored_account_slots.into_iter().collect::>() ); let expected = u64::from(already_removed); @@ -15498,7 +15498,7 @@ pub mod tests { &pubkeys_removed_from_accounts_index, ); assert_eq!( - vec![(pk1, vec![slot1].into_iter().collect::>())], + vec![(pk1, vec![slot1].into_iter().collect::>())], purged_stored_account_slots.into_iter().collect::>() ); assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), 0); @@ -15536,7 +15536,7 @@ pub mod tests { ); for (pk, slots) in [(pk1, vec![slot1, slot2]), (pk2, vec![slot1])] { let result = purged_stored_account_slots.remove(&pk).unwrap(); - assert_eq!(result, slots.into_iter().collect::>()); + assert_eq!(result, slots.into_iter().collect::>()); } assert!(purged_stored_account_slots.is_empty()); assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), 0); From 2f8f910a5cae585417a3f01f63542f5a8c4320e9 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 12 Sep 2024 13:59:39 -0500 Subject: [PATCH 344/529] TransactionBatch: generic over transaction type (#2836) --- Cargo.lock | 1 + core/src/banking_stage/committer.rs | 6 +++--- core/src/banking_stage/consumer.rs | 2 +- ledger/Cargo.toml | 1 + ledger/src/blockstore_processor.rs | 21 ++++++++++---------- ledger/src/token_balances.rs | 4 ++-- programs/sbf/Cargo.lock | 1 + runtime/src/bank.rs | 30 ++++++++++++++++++++--------- runtime/src/transaction_batch.rs | 23 ++++++++++++---------- 9 files changed, 54 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 187aba8daa5d97..6c06b529821ce7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6729,6 +6729,7 @@ dependencies = [ "solana-storage-bigtable", "solana-storage-proto", "solana-svm", + "solana-svm-transaction", "solana-timings", "solana-transaction-status", "solana-vote", diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index ff27104251759e..ed718bbdee95fd 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -12,7 +12,7 @@ use { transaction_batch::TransactionBatch, vote_sender_types::ReplayVoteSender, }, - solana_sdk::{pubkey::Pubkey, saturating_add_assign}, + solana_sdk::{pubkey::Pubkey, saturating_add_assign, transaction::SanitizedTransaction}, solana_svm::{ transaction_commit_result::{TransactionCommitResult, TransactionCommitResultExtensions}, transaction_processing_result::{ @@ -67,7 +67,7 @@ impl Committer { pub(super) fn commit_transactions( &self, - batch: &TransactionBatch, + batch: &TransactionBatch, processing_results: Vec, starting_transaction_index: Option, bank: &Arc, @@ -129,7 +129,7 @@ impl Committer { &self, commit_results: Vec, bank: &Arc, - batch: &TransactionBatch, + batch: &TransactionBatch, pre_balance_info: &mut PreBalanceInfo, starting_transaction_index: Option, ) { diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index b8cd383a896634..4bd6f50cd18b65 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -550,7 +550,7 @@ impl Consumer { fn execute_and_commit_transactions_locked( &self, bank: &Arc, - batch: &TransactionBatch, + batch: &TransactionBatch, ) -> ExecuteAndCommitTransactionsOutput { let transaction_status_sender_enabled = self.committer.transaction_status_sender_enabled(); let mut execute_and_commit_timings = LeaderExecuteAndCommitTimings::default(); diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 44367a30fd5ec7..b12fcddf2b9b41 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -59,6 +59,7 @@ solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-storage-proto = { workspace = true } solana-svm = { workspace = true } +solana-svm-transaction = { workspace = true } solana-timings = { workspace = true } solana-transaction-status = { workspace = true } solana-vote = { workspace = true } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 89a013531f407c..28b1b09b025dea 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -56,6 +56,7 @@ use { transaction_commit_result::{TransactionCommitResult, TransactionCommitResultExtensions}, transaction_processor::ExecutionRecordingConfig, }, + solana_svm_transaction::svm_transaction::SVMTransaction, solana_timings::{report_execute_timings, ExecuteTimingType, ExecuteTimings}, solana_transaction_status::token_balances::TransactionTokenBalancesSet, solana_vote::vote_account::VoteAccountsHashMap, @@ -77,8 +78,8 @@ use { #[cfg(feature = "dev-context-only-utils")] use {qualifier_attr::qualifiers, solana_runtime::bank::HashOverrides}; -pub struct TransactionBatchWithIndexes<'a, 'b> { - pub batch: TransactionBatch<'a, 'b>, +pub struct TransactionBatchWithIndexes<'a, 'b, Tx: SVMTransaction + Clone> { + pub batch: TransactionBatch<'a, 'b, Tx>, pub transaction_indexes: Vec, } @@ -98,7 +99,7 @@ fn first_err(results: &[Result<()>]) -> Result<()> { // Includes transaction signature for unit-testing fn get_first_error( - batch: &TransactionBatch, + batch: &TransactionBatch, commit_results: &[TransactionCommitResult], ) -> Option<(Result<()>, Signature)> { let mut first_err = None; @@ -133,7 +134,7 @@ fn create_thread_pool(num_threads: usize) -> ThreadPool { } pub fn execute_batch( - batch: &TransactionBatchWithIndexes, + batch: &TransactionBatchWithIndexes, bank: &Arc, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, @@ -282,7 +283,7 @@ impl ExecuteBatchesInternalMetrics { fn execute_batches_internal( bank: &Arc, replay_tx_thread_pool: &ThreadPool, - batches: &[TransactionBatchWithIndexes], + batches: &[TransactionBatchWithIndexes], transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, log_messages_bytes_limit: Option, @@ -360,7 +361,7 @@ fn execute_batches_internal( fn process_batches( bank: &BankWithScheduler, replay_tx_thread_pool: &ThreadPool, - batches: &[TransactionBatchWithIndexes], + batches: &[TransactionBatchWithIndexes], transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, batch_execution_timing: &mut BatchExecutionTiming, @@ -415,7 +416,7 @@ fn process_batches( fn schedule_batches_for_execution( bank: &BankWithScheduler, - batches: &[TransactionBatchWithIndexes], + batches: &[TransactionBatchWithIndexes], ) -> Result<()> { for TransactionBatchWithIndexes { batch, @@ -439,7 +440,7 @@ fn rebatch_transactions<'a>( start: usize, end: usize, transaction_indexes: &'a [usize], -) -> TransactionBatchWithIndexes<'a, 'a> { +) -> TransactionBatchWithIndexes<'a, 'a, SanitizedTransaction> { let txs = &sanitized_txs[start..=end]; let results = &lock_results[start..=end]; let mut tx_batch = TransactionBatch::new(results.to_vec(), bank, Cow::from(txs)); @@ -455,7 +456,7 @@ fn rebatch_transactions<'a>( fn rebatch_and_execute_batches( bank: &Arc, replay_tx_thread_pool: &ThreadPool, - batches: &[TransactionBatchWithIndexes], + batches: &[TransactionBatchWithIndexes], transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timing: &mut BatchExecutionTiming, @@ -494,7 +495,7 @@ fn rebatch_and_execute_batches( let target_batch_count = get_thread_count() as u64; - let mut tx_batches: Vec = vec![]; + let mut tx_batches: Vec> = vec![]; let rebatched_txs = if total_cost > target_batch_count.saturating_mul(minimal_tx_cost) { let target_batch_cost = total_cost / target_batch_count; let mut batch_cost: u64 = 0; diff --git a/ledger/src/token_balances.rs b/ledger/src/token_balances.rs index cc074dfcc0da0a..6190f8f1d20423 100644 --- a/ledger/src/token_balances.rs +++ b/ledger/src/token_balances.rs @@ -6,7 +6,7 @@ use { solana_measure::measure::Measure, solana_metrics::datapoint_debug, solana_runtime::{bank::Bank, transaction_batch::TransactionBatch}, - solana_sdk::{account::ReadableAccount, pubkey::Pubkey}, + solana_sdk::{account::ReadableAccount, pubkey::Pubkey, transaction::SanitizedTransaction}, solana_transaction_status::{ token_balances::TransactionTokenBalances, TransactionTokenBalance, }, @@ -37,7 +37,7 @@ fn get_mint_decimals(bank: &Bank, mint: &Pubkey) -> Option { pub fn collect_token_balances( bank: &Bank, - batch: &TransactionBatch, + batch: &TransactionBatch, mint_decimals: &mut HashMap, ) -> TransactionTokenBalances { let mut balances: TransactionTokenBalances = vec![]; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 622c921d0512ea..c6c664274fa9a0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5273,6 +5273,7 @@ dependencies = [ "solana-storage-bigtable", "solana-storage-proto", "solana-svm", + "solana-svm-transaction", "solana-timings", "solana-transaction-status", "solana-vote", diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ee6e785a1e6b68..d4e78bf5180ae4 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3317,7 +3317,10 @@ impl Bank { /// Prepare a transaction batch from a list of versioned transactions from /// an entry. Used for tests only. - pub fn prepare_entry_batch(&self, txs: Vec) -> Result { + pub fn prepare_entry_batch( + &self, + txs: Vec, + ) -> Result> { let sanitized_txs = txs .into_iter() .map(|tx| { @@ -3346,7 +3349,7 @@ impl Bank { pub fn prepare_sanitized_batch<'a, 'b>( &'a self, txs: &'b [SanitizedTransaction], - ) -> TransactionBatch<'a, 'b> { + ) -> TransactionBatch<'a, 'b, SanitizedTransaction> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_results = self .rc @@ -3361,7 +3364,7 @@ impl Bank { &'a self, transactions: &'b [SanitizedTransaction], transaction_results: impl Iterator>, - ) -> TransactionBatch<'a, 'b> { + ) -> TransactionBatch<'a, 'b, SanitizedTransaction> { // this lock_results could be: Ok, AccountInUse, WouldExceedBlockMaxLimit or WouldExceedAccountMaxLimit let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_results = self.rc.accounts.lock_accounts_with_results( @@ -3376,7 +3379,7 @@ impl Bank { pub fn prepare_unlocked_batch_from_single_tx<'a>( &'a self, transaction: &'a SanitizedTransaction, - ) -> TransactionBatch<'_, '_> { + ) -> TransactionBatch<'_, '_, SanitizedTransaction> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_result = validate_account_locks(transaction.message().account_keys(), tx_account_lock_limit); @@ -3534,7 +3537,10 @@ impl Bank { .is_hash_valid_for_age(hash, max_age) } - pub fn collect_balances(&self, batch: &TransactionBatch) -> TransactionBalances { + pub fn collect_balances( + &self, + batch: &TransactionBatch, + ) -> TransactionBalances { let mut balances: TransactionBalances = vec![]; for transaction in batch.sanitized_transactions() { let mut transaction_balances: Vec = vec![]; @@ -3548,7 +3554,7 @@ impl Bank { pub fn load_and_execute_transactions( &self, - batch: &TransactionBatch, + batch: &TransactionBatch, max_age: usize, timings: &mut ExecuteTimings, error_counters: &mut TransactionErrorMetrics, @@ -4682,7 +4688,7 @@ impl Bank { #[must_use] pub fn load_execute_and_commit_transactions( &self, - batch: &TransactionBatch, + batch: &TransactionBatch, max_age: usize, collect_balances: bool, recording_config: ExecutionRecordingConfig, @@ -4788,7 +4794,10 @@ impl Bank { } #[must_use] - fn process_transaction_batch(&self, batch: &TransactionBatch) -> Vec> { + fn process_transaction_batch( + &self, + batch: &TransactionBatch, + ) -> Vec> { self.load_execute_and_commit_transactions( batch, MAX_PROCESSING_AGE, @@ -6796,7 +6805,10 @@ impl Bank { } /// Prepare a transaction batch from a list of legacy transactions. Used for tests only. - pub fn prepare_batch_for_tests(&self, txs: Vec) -> TransactionBatch { + pub fn prepare_batch_for_tests( + &self, + txs: Vec, + ) -> TransactionBatch { let transaction_account_lock_limit = self.get_transaction_account_lock_limit(); let sanitized_txs = txs .into_iter() diff --git a/runtime/src/transaction_batch.rs b/runtime/src/transaction_batch.rs index ecec27e02e93aa..1d33be2c57c591 100644 --- a/runtime/src/transaction_batch.rs +++ b/runtime/src/transaction_batch.rs @@ -1,22 +1,21 @@ use { - crate::bank::Bank, - solana_sdk::transaction::{Result, SanitizedTransaction}, - std::borrow::Cow, + crate::bank::Bank, solana_sdk::transaction::Result, + solana_svm_transaction::svm_transaction::SVMTransaction, std::borrow::Cow, }; // Represents the results of trying to lock a set of accounts -pub struct TransactionBatch<'a, 'b> { +pub struct TransactionBatch<'a, 'b, Tx: SVMTransaction + Clone> { lock_results: Vec>, bank: &'a Bank, - sanitized_txs: Cow<'b, [SanitizedTransaction]>, + sanitized_txs: Cow<'b, [Tx]>, needs_unlock: bool, } -impl<'a, 'b> TransactionBatch<'a, 'b> { +impl<'a, 'b, Tx: SVMTransaction + Clone> TransactionBatch<'a, 'b, Tx> { pub fn new( lock_results: Vec>, bank: &'a Bank, - sanitized_txs: Cow<'b, [SanitizedTransaction]>, + sanitized_txs: Cow<'b, [Tx]>, ) -> Self { assert_eq!(lock_results.len(), sanitized_txs.len()); Self { @@ -31,7 +30,7 @@ impl<'a, 'b> TransactionBatch<'a, 'b> { &self.lock_results } - pub fn sanitized_transactions(&self) -> &[SanitizedTransaction] { + pub fn sanitized_transactions(&self) -> &[Tx] { &self.sanitized_txs } @@ -82,7 +81,7 @@ impl<'a, 'b> TransactionBatch<'a, 'b> { } // Unlock all locked accounts in destructor. -impl<'a, 'b> Drop for TransactionBatch<'a, 'b> { +impl<'a, 'b, Tx: SVMTransaction + Clone> Drop for TransactionBatch<'a, 'b, Tx> { fn drop(&mut self) { if self.needs_unlock() { self.set_needs_unlock(false); @@ -100,7 +99,11 @@ mod tests { use { super::*, crate::genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, - solana_sdk::{signature::Keypair, system_transaction, transaction::TransactionError}, + solana_sdk::{ + signature::Keypair, + system_transaction, + transaction::{SanitizedTransaction, TransactionError}, + }, }; #[test] From 8e95604619eec11ed884034116de151047c021e6 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 12 Sep 2024 18:49:55 -0400 Subject: [PATCH 345/529] Removes slot param from is_shrinking_productive() (#2889) --- accounts-db/src/accounts_db.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 85cd47cad19446..5860a3431db8c9 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4557,7 +4557,7 @@ impl AccountsDb { .storage .get_slot_storage_entry_shrinking_in_progress_ok(slot) { - if Self::is_shrinking_productive(slot, &store) { + if Self::is_shrinking_productive(&store) { self.shrink_storage(&store) } } @@ -8029,7 +8029,7 @@ impl AccountsDb { alive_bytes >= total_bytes } - fn is_shrinking_productive(slot: Slot, store: &AccountStorageEntry) -> bool { + fn is_shrinking_productive(store: &AccountStorageEntry) -> bool { let alive_count = store.count(); let stored_count = store.approx_stored_count(); let alive_bytes = store.alive_bytes() as u64; @@ -8038,7 +8038,7 @@ impl AccountsDb { if Self::should_not_shrink(alive_bytes, total_bytes) { trace!( "shrink_slot_forced ({}): not able to shrink at all: alive/stored: {}/{} ({}b / {}b) save: {}", - slot, + store.slot(), alive_count, stored_count, alive_bytes, @@ -8130,7 +8130,7 @@ impl AccountsDb { offsets.sort_unstable(); let dead_bytes = store.accounts.get_account_sizes(&offsets).iter().sum(); store.remove_accounts(dead_bytes, reset_accounts, offsets.len()); - if Self::is_shrinking_productive(*slot, &store) + if Self::is_shrinking_productive(&store) && self.is_candidate_for_shrink(&store) { // Checking that this single storage entry is ready for shrinking, @@ -14930,7 +14930,7 @@ pub mod tests { AccountsFileProvider::AppendVec, )); store.add_account(file_size as usize); - assert!(!AccountsDb::is_shrinking_productive(slot, &store)); + assert!(!AccountsDb::is_shrinking_productive(&store)); let store = Arc::new(AccountStorageEntry::new( path, @@ -14942,10 +14942,10 @@ pub mod tests { store.add_account(file_size as usize / 2); store.add_account(file_size as usize / 4); store.remove_accounts(file_size as usize / 4, false, 1); - assert!(AccountsDb::is_shrinking_productive(slot, &store)); + assert!(AccountsDb::is_shrinking_productive(&store)); store.add_account(file_size as usize / 2); - assert!(!AccountsDb::is_shrinking_productive(slot, &store)); + assert!(!AccountsDb::is_shrinking_productive(&store)); } #[test] From 1b3eb3df5e244cdbdedb7eff8978823ef0611669 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 12 Sep 2024 18:50:31 -0400 Subject: [PATCH 346/529] Uses IntMap in SlotOffsets (#2891) --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 5860a3431db8c9..dcff28d0cfbb32 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -822,7 +822,7 @@ pub type AtomicAccountsFileId = AtomicU32; pub type AccountsFileId = u32; type AccountSlots = HashMap>; -type SlotOffsets = HashMap>; +type SlotOffsets = IntMap>; type ReclaimResult = (AccountSlots, SlotOffsets); type PubkeysRemovedFromAccountsIndex = HashSet; type ShrinkCandidates = IntSet; From 5155158296ca25f55591249a3281627b886f4c5f Mon Sep 17 00:00:00 2001 From: John Vandenberg Date: Fri, 13 Sep 2024 21:40:08 +0800 Subject: [PATCH 347/529] Replace async-mutex with async-lock (#2869) --- Cargo.lock | 45 ++++++++++++++++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 41 +++++++++++++++++--- quic-client/Cargo.toml | 2 +- quic-client/src/nonblocking/quic_client.rs | 2 +- 5 files changed, 75 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c06b529821ce7..a65a517821bbde 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -677,7 +677,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.2", "futures-core", ] @@ -696,12 +696,14 @@ dependencies = [ ] [[package]] -name = "async-mutex" -version = "1.4.0" +name = "async-lock" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener", + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite", ] [[package]] @@ -1470,9 +1472,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.1.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -2178,6 +2180,27 @@ version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" +[[package]] +name = "event-listener" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite", +] + [[package]] name = "fake-simd" version = "0.1.2" @@ -3945,6 +3968,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + [[package]] name = "parking_lot" version = "0.11.2" @@ -7238,7 +7267,7 @@ dependencies = [ name = "solana-quic-client" version = "2.1.0" dependencies = [ - "async-mutex", + "async-lock", "async-trait", "crossbeam-channel", "futures 0.3.30", diff --git a/Cargo.toml b/Cargo.toml index aa59267b158ccd..ed0b1b0504ee4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -198,7 +198,7 @@ arrayvec = "0.7.6" assert_cmd = "2.0" assert_matches = "1.5.0" async-channel = "1.9.0" -async-mutex = "1.4.0" +async-lock = "3.4.0" async-trait = "0.1.82" atty = "0.2.11" backoff = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c6c664274fa9a0..0a49b92d94e2d8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -452,7 +452,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] @@ -471,12 +471,14 @@ dependencies = [ ] [[package]] -name = "async-mutex" -version = "1.4.0" +name = "async-lock" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener", + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite", ] [[package]] @@ -1628,6 +1630,27 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite", +] + [[package]] name = "fast-math" version = "0.1.1" @@ -3282,6 +3305,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + [[package]] name = "parking_lot" version = "0.11.2" @@ -5606,7 +5635,7 @@ dependencies = [ name = "solana-quic-client" version = "2.1.0" dependencies = [ - "async-mutex", + "async-lock", "async-trait", "futures 0.3.30", "itertools 0.12.1", diff --git a/quic-client/Cargo.toml b/quic-client/Cargo.toml index 811b5b8a80a961..05800896c30f80 100644 --- a/quic-client/Cargo.toml +++ b/quic-client/Cargo.toml @@ -10,7 +10,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -async-mutex = { workspace = true } +async-lock = { workspace = true } async-trait = { workspace = true } futures = { workspace = true } itertools = { workspace = true } diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index 1195211189a63a..b40e89ec216c9d 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -2,7 +2,7 @@ //! and provides an interface for sending data which is restricted by the //! server's flow control. use { - async_mutex::Mutex, + async_lock::Mutex, async_trait::async_trait, futures::future::{join_all, TryFutureExt}, itertools::Itertools, From 7c82cfd9814f7391ff2933d7d12fca1a75561832 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 13 Sep 2024 23:18:37 +0900 Subject: [PATCH 348/529] Rework the validator scan-for-incorrect-shred-version function (#2851) This scan is important for ensuring that invalidated blocks are cleared from the Blockstore around cluster outage/restart situations. The scan previously relied on --wait-for-supermajority being set in addition to --expected-shred-version. After a cluster had restarted, nodes could restart by downloading a new snapshot, but still having invalid state in their blockstore that disrupted normal operation. The key modifications from this change are: - The blockstore is always examined, even with wfsm arg - The blockstore bounds are compared to hard forks to evaluate whether or not the blockstore should be scanned for invalid shreds - The blockstore scan is done with computed shred version to avoid any opportunity for an operator setting a bad expected shred version and wiping their ledger out --- core/src/validator.rs | 264 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 232 insertions(+), 32 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 555b1221df6e1c..7915c2117f4f8b 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -113,6 +113,7 @@ use { epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, exit::Exit, genesis_config::{ClusterType, GenesisConfig}, + hard_forks::HardForks, hash::Hash, pubkey::Pubkey, shred_version::compute_shred_version, @@ -596,25 +597,8 @@ impl Validator { )); } let genesis_config = load_genesis(config, ledger_path)?; - metrics_config_sanity_check(genesis_config.cluster_type)?; - if let Some(expected_shred_version) = config.expected_shred_version { - if let Some(wait_for_supermajority_slot) = config.wait_for_supermajority { - *start_progress.write().unwrap() = ValidatorStartProgress::CleaningBlockStore; - backup_and_clear_blockstore( - ledger_path, - config, - wait_for_supermajority_slot + 1, - expected_shred_version, - ) - .context( - "Failed to backup and clear shreds with incorrect shred version from \ - blockstore", - )?; - } - } - info!("Cleaning accounts paths.."); *start_progress.write().unwrap() = ValidatorStartProgress::CleaningAccounts; let mut timer = Measure::start("clean_accounts_paths"); @@ -741,7 +725,10 @@ impl Validator { check_poh_speed(&bank_forks.read().unwrap().root_bank(), None)?; } - let hard_forks = bank_forks.read().unwrap().root_bank().hard_forks(); + let (root_slot, hard_forks) = { + let root_bank = bank_forks.read().unwrap().root_bank(); + (root_bank.slot(), root_bank.hard_forks()) + }; let shred_version = compute_shred_version(&genesis_config.hash(), Some(&hard_forks)); info!( "shred version: {shred_version}, hard forks: {:?}", @@ -758,6 +745,23 @@ impl Validator { } } + if let Some(start_slot) = should_cleanup_blockstore_incorrect_shred_versions( + config, + &blockstore, + root_slot, + &hard_forks, + )? { + *start_progress.write().unwrap() = ValidatorStartProgress::CleaningBlockStore; + cleanup_blockstore_incorrect_shred_versions( + &blockstore, + config, + start_slot, + shred_version, + )?; + } else { + info!("Skipping the blockstore check for shreds with incorrect version"); + } + node.info.set_shred_version(shred_version); node.info.set_wallclock(timestamp()); Self::print_node_info(&node); @@ -2181,9 +2185,71 @@ fn maybe_warp_slot( Ok(()) } +/// Returns the starting slot at which the blockstore should be scanned for +/// shreds with an incorrect shred version, or None if the check is unnecessary +fn should_cleanup_blockstore_incorrect_shred_versions( + config: &ValidatorConfig, + blockstore: &Blockstore, + root_slot: Slot, + hard_forks: &HardForks, +) -> Result, BlockstoreError> { + // Perform the check if we are booting as part of a cluster restart at slot root_slot + let maybe_cluster_restart_slot = maybe_cluster_restart_with_hard_fork(config, root_slot); + if maybe_cluster_restart_slot.is_some() { + return Ok(Some(root_slot + 1)); + } + + // If there are no hard forks, the shred version cannot have changed + let Some(latest_hard_fork) = hard_forks.iter().last().map(|(slot, _)| *slot) else { + return Ok(None); + }; + + // If the blockstore is empty, there are certainly no shreds with an incorrect version + let Some(blockstore_max_slot) = blockstore.highest_slot()? else { + return Ok(None); + }; + let blockstore_min_slot = blockstore.lowest_slot(); + info!( + "Blockstore contains data from slot {blockstore_min_slot} to {blockstore_max_slot}, the \ + latest hard fork is {latest_hard_fork}" + ); + + if latest_hard_fork < blockstore_min_slot { + // latest_hard_fork < blockstore_min_slot <= blockstore_max_slot + // + // All slots in the blockstore are newer than the latest hard fork, and only shreds with + // the correct shred version should have been inserted since the latest hard fork + // + // This is the normal case where the last cluster restart & hard fork was a while ago; we + // can skip the check for this case + Ok(None) + } else if latest_hard_fork < blockstore_max_slot { + // blockstore_min_slot < latest_hard_fork < blockstore_max_slot + // + // This could be a case where there was a cluster restart, but this node was not part of + // the supermajority that actually restarted the cluster. Rather, this node likely + // downloaded a new snapshot while retaining the blockstore, including slots beyond the + // chosen restart slot. We need to perform the blockstore check for this case + // + // Note that the downloaded snapshot slot (root_slot) could be greater than the latest hard + // fork slot. Even though this node will only replay slots after root_slot, start the check + // at latest_hard_fork + 1 to check (and possibly purge) any invalid state. + Ok(Some(latest_hard_fork + 1)) + } else { + // blockstore_min_slot <= blockstore_max_slot <= latest_hard_fork + // + // All slots in the blockstore are older than the latest hard fork. The blockstore check + // would start from latest_hard_fork + 1; skip the check as there are no slots to check + // + // This is kind of an unusual case to hit, maybe a node has been offline for a long time + // and just restarted with a new downloaded snapshot but the old blockstore + Ok(None) + } +} + /// Searches the blockstore for data shreds with a shred version that differs /// from the passed `expected_shred_version` -fn blockstore_contains_incorrect_shred_version( +fn scan_blockstore_for_incorrect_shred_version( blockstore: &Blockstore, start_slot: Slot, expected_shred_version: u16, @@ -2193,7 +2259,7 @@ fn blockstore_contains_incorrect_shred_version( // Search for shreds with incompatible version in blockstore let slot_meta_iterator = blockstore.slot_meta_iterator(start_slot)?; - info!("Searching blockstore for shred with incorrect version.."); + info!("Searching blockstore for shred with incorrect version from slot {start_slot}"); for (slot, _meta) in slot_meta_iterator { let shreds = blockstore.get_data_shreds_for_slot(slot, 0)?; for shred in &shreds { @@ -2211,16 +2277,14 @@ fn blockstore_contains_incorrect_shred_version( /// If the blockstore contains any shreds with the incorrect shred version, /// copy them to a backup blockstore and purge them from the actual blockstore. -fn backup_and_clear_blockstore( - ledger_path: &Path, +fn cleanup_blockstore_incorrect_shred_versions( + blockstore: &Blockstore, config: &ValidatorConfig, start_slot: Slot, expected_shred_version: u16, ) -> Result<(), BlockstoreError> { - let blockstore = - Blockstore::open_with_options(ledger_path, blockstore_options_from_config(config))?; - let incorrect_shred_version = blockstore_contains_incorrect_shred_version( - &blockstore, + let incorrect_shred_version = scan_blockstore_for_incorrect_shred_version( + blockstore, start_slot, expected_shred_version, )?; @@ -2245,7 +2309,7 @@ fn backup_and_clear_blockstore( end_slot ); match Blockstore::open_with_options( - &ledger_path.join(backup_folder), + &blockstore.ledger_path().join(backup_folder), blockstore_options_from_config(config), ) { Ok(backup_blockstore) => { @@ -2341,6 +2405,9 @@ pub enum ValidatorError { #[error("Bad expected bank hash")] BadExpectedBankHash, + #[error("blockstore error: {0}")] + Blockstore(#[source] BlockstoreError), + #[error("genesis hash mismatch: actual={0}, expected={1}")] GenesisHashMismatch(Hash, Hash), @@ -2661,7 +2728,143 @@ mod tests { } #[test] - fn test_backup_and_clear_blockstore() { + fn test_should_cleanup_blockstore_incorrect_shred_versions() { + solana_logger::setup(); + + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let mut validator_config = ValidatorConfig::default_for_test(); + let mut hard_forks = HardForks::default(); + let mut root_slot; + + // Do check from root_slot + 1 if wait_for_supermajority (10) == root_slot (10) + root_slot = 10; + validator_config.wait_for_supermajority = Some(root_slot); + assert_eq!( + should_cleanup_blockstore_incorrect_shred_versions( + &validator_config, + &blockstore, + root_slot, + &hard_forks + ) + .unwrap(), + Some(root_slot + 1) + ); + + // No check if wait_for_supermajority (10) < root_slot (15) (no hard forks) + // Arguably operator error to pass a value for wait_for_supermajority in this case + root_slot = 15; + assert_eq!( + should_cleanup_blockstore_incorrect_shred_versions( + &validator_config, + &blockstore, + root_slot, + &hard_forks + ) + .unwrap(), + None, + ); + + // Emulate cluster restart at slot 10 + // No check if wait_for_supermajority (10) < root_slot (15) (empty blockstore) + hard_forks.register(10); + assert_eq!( + should_cleanup_blockstore_incorrect_shred_versions( + &validator_config, + &blockstore, + root_slot, + &hard_forks + ) + .unwrap(), + None, + ); + + // Insert some shreds at newer slots than hard fork + let entries = entry::create_ticks(1, 0, Hash::default()); + for i in 20..35 { + let shreds = blockstore::entries_to_test_shreds( + &entries, + i, // slot + i - 1, // parent_slot + true, // is_full_slot + 1, // version + true, // merkle_variant + ); + blockstore.insert_shreds(shreds, None, true).unwrap(); + } + + // No check as all blockstore data is newer than latest hard fork + assert_eq!( + should_cleanup_blockstore_incorrect_shred_versions( + &validator_config, + &blockstore, + root_slot, + &hard_forks + ) + .unwrap(), + None, + ); + + // Emulate cluster restart at slot 25 + // Do check from root_slot + 1 regardless of whether wait_for_supermajority set correctly + root_slot = 25; + hard_forks.register(root_slot); + validator_config.wait_for_supermajority = Some(root_slot); + assert_eq!( + should_cleanup_blockstore_incorrect_shred_versions( + &validator_config, + &blockstore, + root_slot, + &hard_forks + ) + .unwrap(), + Some(root_slot + 1), + ); + validator_config.wait_for_supermajority = None; + assert_eq!( + should_cleanup_blockstore_incorrect_shred_versions( + &validator_config, + &blockstore, + root_slot, + &hard_forks + ) + .unwrap(), + Some(root_slot + 1), + ); + + // Do check with advanced root slot, even without wait_for_supermajority set correctly + // Check starts from latest hard fork + 1 + root_slot = 30; + let latest_hard_fork = hard_forks.iter().last().unwrap().0; + assert_eq!( + should_cleanup_blockstore_incorrect_shred_versions( + &validator_config, + &blockstore, + root_slot, + &hard_forks + ) + .unwrap(), + Some(latest_hard_fork + 1), + ); + + // Purge blockstore up to latest hard fork + // No check since all blockstore data newer than latest hard fork + blockstore.purge_slots(0, latest_hard_fork, PurgeType::Exact); + assert_eq!( + should_cleanup_blockstore_incorrect_shred_versions( + &validator_config, + &blockstore, + root_slot, + &hard_forks + ) + .unwrap(), + None, + ); + } + + #[test] + fn test_cleanup_blockstore_incorrect_shred_versions() { solana_logger::setup(); let validator_config = ValidatorConfig::default_for_test(); @@ -2680,12 +2883,9 @@ mod tests { ); blockstore.insert_shreds(shreds, None, true).unwrap(); } - drop(blockstore); // this purges and compacts all slots greater than or equal to 5 - backup_and_clear_blockstore(ledger_path.path(), &validator_config, 5, 2).unwrap(); - - let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + cleanup_blockstore_incorrect_shred_versions(&blockstore, &validator_config, 5, 2).unwrap(); // assert that slots less than 5 aren't affected assert!(blockstore.meta(4).unwrap().unwrap().next_slots.is_empty()); for i in 5..10 { From 031b1eb5be0600c9cd28ea70976b650605ab8a1c Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 13 Sep 2024 10:19:22 -0400 Subject: [PATCH 349/529] Uses Offset in SlotOffsets (#2890) --- accounts-db/src/accounts_db.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index dcff28d0cfbb32..9842a6d9d4d0db 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -25,7 +25,7 @@ mod scan_account_storage; use qualifier_attr::qualifiers; use { crate::{ - account_info::{AccountInfo, StorageLocation}, + account_info::{AccountInfo, Offset, StorageLocation}, account_storage::{ meta::StoredAccountMeta, AccountStorage, AccountStorageStatus, ShrinkInProgress, }, @@ -822,7 +822,7 @@ pub type AtomicAccountsFileId = AtomicU32; pub type AccountsFileId = u32; type AccountSlots = HashMap>; -type SlotOffsets = IntMap>; +type SlotOffsets = IntMap>; type ReclaimResult = (AccountSlots, SlotOffsets); type PubkeysRemovedFromAccountsIndex = HashSet; type ShrinkCandidates = IntSet; From 875f8b432c57dc5bc488d03d7075620efed4bbc2 Mon Sep 17 00:00:00 2001 From: Ata Tekeli Date: Fri, 13 Sep 2024 17:30:25 +0300 Subject: [PATCH 350/529] revert dockerfile and only have shell script changes (apt to apt-get) (#2917) * Update Dockerfile with apt-get * create multistage build for ci/docker and add apt-get to some of the shell scripts * revert the dockerfile to its original place --- .github/scripts/downstream-project-spl-install-deps.sh | 4 ++-- ci/docker/Dockerfile | 2 +- ci/setup-new-buildkite-agent/setup-new-machine.sh | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/scripts/downstream-project-spl-install-deps.sh b/.github/scripts/downstream-project-spl-install-deps.sh index b2daa79d498f2b..69d5e512e83f68 100755 --- a/.github/scripts/downstream-project-spl-install-deps.sh +++ b/.github/scripts/downstream-project-spl-install-deps.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash set -e -sudo apt update -sudo apt install libudev-dev binutils-dev libunwind-dev protobuf-compiler -y +sudo apt-get update +sudo apt-get install libudev-dev binutils-dev libunwind-dev protobuf-compiler -y diff --git a/ci/docker/Dockerfile b/ci/docker/Dockerfile index 622b017146f817..7f7b69c30ce68f 100644 --- a/ci/docker/Dockerfile +++ b/ci/docker/Dockerfile @@ -124,4 +124,4 @@ RUN \ chmod +x codecov && \ mv codecov /usr/bin && \ # clean lists - rm -rf /var/lib/apt/lists/* + rm -rf /var/lib/apt/lists/* \ No newline at end of file diff --git a/ci/setup-new-buildkite-agent/setup-new-machine.sh b/ci/setup-new-buildkite-agent/setup-new-machine.sh index cbaedd2d0a9dcc..5f309279336c73 100755 --- a/ci/setup-new-buildkite-agent/setup-new-machine.sh +++ b/ci/setup-new-buildkite-agent/setup-new-machine.sh @@ -11,15 +11,15 @@ check_ssh_authorized_keys || exit 1 set -ex -apt update -apt upgrade -y +apt-get update +apt-get upgrade -y cat >/etc/apt/apt.conf.d/99-solana <<'EOF' // Set and persist extra caps on iftop binary Dpkg::Post-Invoke { "which iftop 2>&1 >/dev/null && setcap cap_net_raw=eip $(which iftop) || true"; }; EOF -apt install -y build-essential pkg-config clang cmake sysstat linux-tools-common \ +apt-get install -y build-essential pkg-config clang cmake sysstat linux-tools-common \ linux-generic-hwe-18.04-edge linux-tools-generic-hwe-18.04-edge \ iftop heaptrack jq ruby python3-venv gcc-multilib libudev-dev From b4ed7a4a2d545c6cd484d2c1805a762dea964d43 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Fri, 13 Sep 2024 20:30:08 +0400 Subject: [PATCH 351/529] Extract instruction crate (#2405) * extract instruction crate * remove thiserror * make bincode optional in solana-instruction * make serde optional in solana-instruction * fix doc tests * fmt * harmonize features * fmt Cargo.toml * clippy * fix syscalls.rs imports * update digests * update digest * unused imports * post-rebase fmt * fix doc links * use workspace lints * fmt * make rustc_version dep optional * update digest * update digest * update import * fmt * fix dup import * remove dev-context-only-utils (no longer needed) * fmt * remove unnecessary allow() * fix overwriting instruction syscall stubs * fmt * move get_processed_sibling_instruction and get_stack_height back to solana-program to avoid breaking change * fix path * fix typo * move the checked_add utility function back to solana-program * move AccountMeta to its own file * fix bad import * move ProcessedSiblingInstruction to syscalls.rs * move CompiledInstruction back to solana-program * move ProcessedSiblingInstruction back into lib.rs * make std optional in solana-instruction * fix required features for frozen-abi * update digest * update digest * missing import * update digest * don't assume std is imported in frozen-abi macros * fix import warnings * simplify cfg usage --- Cargo.lock | 22 + Cargo.toml | 2 + frozen-abi/macro/src/lib.rs | 4 +- program-test/Cargo.toml | 1 + program-test/src/lib.rs | 2 +- programs/sbf/Cargo.lock | 18 + runtime/src/bank.rs | 2 +- sdk/instruction/Cargo.toml | 58 +++ sdk/instruction/build.rs | 1 + sdk/instruction/src/account_meta.rs | 102 ++++ sdk/instruction/src/error.rs | 434 ++++++++++++++++ sdk/instruction/src/lib.rs | 292 +++++++++++ sdk/instruction/src/syscalls.rs | 8 + sdk/program/Cargo.toml | 14 +- sdk/program/src/instruction.rs | 664 +----------------------- sdk/program/src/program_error.rs | 99 +--- sdk/program/src/program_stubs.rs | 5 +- sdk/program/src/syscalls/definitions.rs | 14 +- sdk/src/transaction/mod.rs | 2 +- 19 files changed, 996 insertions(+), 748 deletions(-) create mode 100644 sdk/instruction/Cargo.toml create mode 120000 sdk/instruction/build.rs create mode 100644 sdk/instruction/src/account_meta.rs create mode 100644 sdk/instruction/src/error.rs create mode 100644 sdk/instruction/src/lib.rs create mode 100644 sdk/instruction/src/syscalls.rs diff --git a/Cargo.lock b/Cargo.lock index a65a517821bbde..fb0f87b78b7640 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6671,6 +6671,26 @@ dependencies = [ "solana-program", ] +[[package]] +name = "solana-instruction" +version = "2.1.0" +dependencies = [ + "bincode", + "borsh 1.5.1", + "getrandom 0.2.10", + "js-sys", + "num-traits", + "rustc_version 0.4.1", + "serde", + "serde_derive", + "solana-define-syscall", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-instruction", + "solana-pubkey", + "wasm-bindgen", +] + [[package]] name = "solana-keygen" version = "2.1.0" @@ -7108,6 +7128,7 @@ dependencies = [ "solana-frozen-abi", "solana-frozen-abi-macro", "solana-hash", + "solana-instruction", "solana-logger", "solana-msg", "solana-program-memory", @@ -7191,6 +7212,7 @@ dependencies = [ "solana-compute-budget", "solana-feature-set", "solana-inline-spl", + "solana-instruction", "solana-log-collector", "solana-logger", "solana-program-runtime", diff --git a/Cargo.toml b/Cargo.toml index ed0b1b0504ee4c..2b8cbd137f4e12 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,6 +110,7 @@ members = [ "sdk/feature-set", "sdk/gen-headers", "sdk/hash", + "sdk/instruction", "sdk/macro", "sdk/msg", "sdk/package-metadata-macro", @@ -403,6 +404,7 @@ solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=2.1 solana-gossip = { path = "gossip", version = "=2.1.0" } solana-hash = { path = "sdk/hash", version = "=2.1.0" } solana-inline-spl = { path = "inline-spl", version = "=2.1.0" } +solana-instruction = { path = "sdk/instruction", version = "=2.1.0", default-features = false } solana-lattice-hash = { path = "lattice-hash", version = "=2.1.0" } solana-ledger = { path = "ledger", version = "=2.1.0" } solana-loader-v4-program = { path = "programs/loader-v4", version = "=2.1.0" } diff --git a/frozen-abi/macro/src/lib.rs b/frozen-abi/macro/src/lib.rs index 9a735e2c5f7185..bb3f7886169111 100644 --- a/frozen-abi/macro/src/lib.rs +++ b/frozen-abi/macro/src/lib.rs @@ -256,7 +256,7 @@ fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream { let enum_name = #type_str; use ::serde::ser::Serialize; use ::solana_frozen_abi::abi_example::AbiExample; - digester.update_with_string(format!("enum {} (variants = {})", enum_name, #variant_count)); + digester.update_with_string(::std::format!("enum {} (variants = {})", enum_name, #variant_count)); #serialized_variants digester.create_child() } @@ -303,7 +303,7 @@ fn quote_for_test( ::solana_frozen_abi::__private::log::error!("digest error: {:#?}", result); } result.unwrap(); - let actual_digest = format!("{}", hash); + let actual_digest = ::std::format!("{}", hash); if ::std::env::var("SOLANA_ABI_BULK_UPDATE").is_ok() { if #expected_digest != actual_digest { #p!("sed -i -e 's/{}/{}/g' $(git grep --files-with-matches frozen_abi)", #expected_digest, hash); diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index c96cb8d28b4ceb..d3df42d38abe44 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -25,6 +25,7 @@ solana-bpf-loader-program = { workspace = true } solana-compute-budget = { workspace = true } solana-feature-set = { workspace = true } solana-inline-spl = { workspace = true } +solana-instruction = { workspace = true } solana-log-collector = { workspace = true } solana-logger = { workspace = true } solana-program-runtime = { workspace = true } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index d4d6388436c373..38199b270f1d52 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -17,6 +17,7 @@ use { solana_bpf_loader_program::serialization::serialize_parameters, solana_compute_budget::compute_budget::ComputeBudget, solana_feature_set::FEATURE_NAMES, + solana_instruction::{error::InstructionError, Instruction}, solana_log_collector::ic_msg, solana_program_runtime::{ invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, stable_log, @@ -37,7 +38,6 @@ use { fee_calculator::{FeeRateGovernor, DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE}, genesis_config::{ClusterType, GenesisConfig}, hash::Hash, - instruction::{Instruction, InstructionError}, native_token::sol_to_lamports, poh_config::PohConfig, program_error::{ProgramError, UNSUPPORTED_SYSVAR}, diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 0a49b92d94e2d8..cbe090c6449fe3 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5241,6 +5241,22 @@ dependencies = [ "solana-program", ] +[[package]] +name = "solana-instruction" +version = "2.1.0" +dependencies = [ + "bincode", + "borsh 1.5.1", + "getrandom 0.2.10", + "js-sys", + "num-traits", + "serde", + "serde_derive", + "solana-define-syscall", + "solana-pubkey", + "wasm-bindgen", +] + [[package]] name = "solana-lattice-hash" version = "2.1.0" @@ -5498,6 +5514,7 @@ dependencies = [ "solana-decode-error", "solana-define-syscall", "solana-hash", + "solana-instruction", "solana-msg", "solana-program-memory", "solana-program-option", @@ -5572,6 +5589,7 @@ dependencies = [ "solana-compute-budget", "solana-feature-set", "solana-inline-spl", + "solana-instruction", "solana-log-collector", "solana-logger", "solana-program-runtime", diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d4e78bf5180ae4..78121d7bcf453a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -247,7 +247,7 @@ struct RentMetrics { pub type BankStatusCache = StatusCache>; #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "EQwW6Ym6ECKaAREnAgkhXYisBQovuraBKSALdJ8koZzq") + frozen_abi(digest = "BswQL6n7kKwgHFKcwMCQcrWjt8h59Vh6KkNb75iaqG2B") )] pub type BankSlotDelta = SlotDelta>; diff --git a/sdk/instruction/Cargo.toml b/sdk/instruction/Cargo.toml new file mode 100644 index 00000000000000..4fd6a8d3c7947f --- /dev/null +++ b/sdk/instruction/Cargo.toml @@ -0,0 +1,58 @@ +[package] +name = "solana-instruction" +description = "Types for directing the execution of Solana programs." +documentation = "https://docs.rs/solana-instruction" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bincode = { workspace = true, optional = true } +borsh = { workspace = true, optional = true } +num-traits = { workspace = true } +serde = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } +solana-pubkey = { workspace = true, default-features = false } + +[target.'cfg(target_os = "solana")'.dependencies] +solana-define-syscall = { workspace = true } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +getrandom = { workspace = true, features = ["js", "wasm-bindgen"] } +js-sys = { workspace = true } +wasm-bindgen = { workspace = true } + +[dev-dependencies] +solana-instruction = { path = ".", features = ["borsh"] } + +[build-dependencies] +rustc_version = { workspace = true, optional = true } + +[features] +bincode = ["dep:bincode", "dep:serde"] +borsh = ["dep:borsh"] +default = ["std"] +frozen-abi = [ + "dep:rustc_version", + "dep:solana-frozen-abi", + "dep:solana-frozen-abi-macro", + "serde", + "std", +] +serde = [ + "dep:serde", + "dep:serde_derive", + "solana-pubkey/serde", +] +std = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/sdk/instruction/build.rs b/sdk/instruction/build.rs new file mode 120000 index 00000000000000..84539eddaa6ded --- /dev/null +++ b/sdk/instruction/build.rs @@ -0,0 +1 @@ +../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/instruction/src/account_meta.rs b/sdk/instruction/src/account_meta.rs new file mode 100644 index 00000000000000..7f9f9a3dbc2974 --- /dev/null +++ b/sdk/instruction/src/account_meta.rs @@ -0,0 +1,102 @@ +use solana_pubkey::Pubkey; + +/// Describes a single account read or written by a program during instruction +/// execution. +/// +/// When constructing an [`Instruction`], a list of all accounts that may be +/// read or written during the execution of that instruction must be supplied. +/// Any account that may be mutated by the program during execution, either its +/// data or metadata such as held lamports, must be writable. +/// +/// Note that because the Solana runtime schedules parallel transaction +/// execution around which accounts are writable, care should be taken that only +/// accounts which actually may be mutated are specified as writable. As the +/// default [`AccountMeta::new`] constructor creates writable accounts, this is +/// a minor hazard: use [`AccountMeta::new_readonly`] to specify that an account +/// is not writable. +#[repr(C)] +#[cfg_attr( + feature = "serde", + derive(serde_derive::Serialize, serde_derive::Deserialize) +)] +#[derive(Debug, Default, PartialEq, Eq, Clone)] +pub struct AccountMeta { + /// An account's public key. + pub pubkey: Pubkey, + /// True if an `Instruction` requires a `Transaction` signature matching `pubkey`. + pub is_signer: bool, + /// True if the account data or metadata may be mutated during program execution. + pub is_writable: bool, +} + +impl AccountMeta { + /// Construct metadata for a writable account. + /// + /// # Examples + /// + /// ``` + /// # use solana_pubkey::Pubkey; + /// # use solana_instruction::{AccountMeta, Instruction}; + /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # + /// # #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] + /// # pub struct MyInstruction; + /// # + /// # let instruction = MyInstruction; + /// # let from = Pubkey::new_unique(); + /// # let to = Pubkey::new_unique(); + /// # let program_id = Pubkey::new_unique(); + /// let instr = Instruction::new_with_borsh( + /// program_id, + /// &instruction, + /// vec![ + /// AccountMeta::new(from, true), + /// AccountMeta::new(to, false), + /// ], + /// ); + /// ``` + pub fn new(pubkey: Pubkey, is_signer: bool) -> Self { + Self { + pubkey, + is_signer, + is_writable: true, + } + } + + /// Construct metadata for a read-only account. + /// + /// # Examples + /// + /// ``` + /// # use solana_pubkey::Pubkey; + /// # use solana_instruction::{AccountMeta, Instruction}; + /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # + /// # #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] + /// # pub struct MyInstruction; + /// # + /// # let instruction = MyInstruction; + /// # let from = Pubkey::new_unique(); + /// # let to = Pubkey::new_unique(); + /// # let from_account_storage = Pubkey::new_unique(); + /// # let program_id = Pubkey::new_unique(); + /// let instr = Instruction::new_with_borsh( + /// program_id, + /// &instruction, + /// vec![ + /// AccountMeta::new(from, true), + /// AccountMeta::new(to, false), + /// AccountMeta::new_readonly(from_account_storage, false), + /// ], + /// ); + /// ``` + pub fn new_readonly(pubkey: Pubkey, is_signer: bool) -> Self { + Self { + pubkey, + is_signer, + is_writable: false, + } + } +} diff --git a/sdk/instruction/src/error.rs b/sdk/instruction/src/error.rs new file mode 100644 index 00000000000000..5cef257b3f31cb --- /dev/null +++ b/sdk/instruction/src/error.rs @@ -0,0 +1,434 @@ +#[cfg(feature = "frozen-abi")] +use solana_frozen_abi_macro::{AbiEnumVisitor, AbiExample}; +#[cfg(feature = "std")] +use { + core::fmt, + num_traits::ToPrimitive, + std::string::{String, ToString}, +}; + +/// Builtin return values occupy the upper 32 bits +const BUILTIN_BIT_SHIFT: usize = 32; +macro_rules! to_builtin { + ($error:expr) => { + ($error as u64) << BUILTIN_BIT_SHIFT + }; +} + +pub const CUSTOM_ZERO: u64 = to_builtin!(1); +pub const INVALID_ARGUMENT: u64 = to_builtin!(2); +pub const INVALID_INSTRUCTION_DATA: u64 = to_builtin!(3); +pub const INVALID_ACCOUNT_DATA: u64 = to_builtin!(4); +pub const ACCOUNT_DATA_TOO_SMALL: u64 = to_builtin!(5); +pub const INSUFFICIENT_FUNDS: u64 = to_builtin!(6); +pub const INCORRECT_PROGRAM_ID: u64 = to_builtin!(7); +pub const MISSING_REQUIRED_SIGNATURES: u64 = to_builtin!(8); +pub const ACCOUNT_ALREADY_INITIALIZED: u64 = to_builtin!(9); +pub const UNINITIALIZED_ACCOUNT: u64 = to_builtin!(10); +pub const NOT_ENOUGH_ACCOUNT_KEYS: u64 = to_builtin!(11); +pub const ACCOUNT_BORROW_FAILED: u64 = to_builtin!(12); +pub const MAX_SEED_LENGTH_EXCEEDED: u64 = to_builtin!(13); +pub const INVALID_SEEDS: u64 = to_builtin!(14); +pub const BORSH_IO_ERROR: u64 = to_builtin!(15); +pub const ACCOUNT_NOT_RENT_EXEMPT: u64 = to_builtin!(16); +pub const UNSUPPORTED_SYSVAR: u64 = to_builtin!(17); +pub const ILLEGAL_OWNER: u64 = to_builtin!(18); +pub const MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED: u64 = to_builtin!(19); +pub const INVALID_ACCOUNT_DATA_REALLOC: u64 = to_builtin!(20); +pub const MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED: u64 = to_builtin!(21); +pub const BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS: u64 = to_builtin!(22); +pub const INVALID_ACCOUNT_OWNER: u64 = to_builtin!(23); +pub const ARITHMETIC_OVERFLOW: u64 = to_builtin!(24); +pub const IMMUTABLE: u64 = to_builtin!(25); +pub const INCORRECT_AUTHORITY: u64 = to_builtin!(26); +// Warning: Any new error codes added here must also be: +// - Added to the below conversions +// - Added as an equivalent to ProgramError and InstructionError +// - Be featurized in the BPF loader to return `InstructionError::InvalidError` +// until the feature is activated + +/// Reasons the runtime might have rejected an instruction. +/// +/// Members of this enum must not be removed, but new ones can be added. +/// Also, it is crucial that meta-information if any that comes along with +/// an error be consistent across software versions. For example, it is +/// dangerous to include error strings from 3rd party crates because they could +/// change at any time and changes to them are difficult to detect. +#[cfg(feature = "std")] +#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] +#[cfg_attr( + feature = "serde", + derive(serde_derive::Serialize, serde_derive::Deserialize) +)] +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum InstructionError { + /// Deprecated! Use CustomError instead! + /// The program instruction returned an error + GenericError, + + /// The arguments provided to a program were invalid + InvalidArgument, + + /// An instruction's data contents were invalid + InvalidInstructionData, + + /// An account's data contents was invalid + InvalidAccountData, + + /// An account's data was too small + AccountDataTooSmall, + + /// An account's balance was too small to complete the instruction + InsufficientFunds, + + /// The account did not have the expected program id + IncorrectProgramId, + + /// A signature was required but not found + MissingRequiredSignature, + + /// An initialize instruction was sent to an account that has already been initialized. + AccountAlreadyInitialized, + + /// An attempt to operate on an account that hasn't been initialized. + UninitializedAccount, + + /// Program's instruction lamport balance does not equal the balance after the instruction + UnbalancedInstruction, + + /// Program illegally modified an account's program id + ModifiedProgramId, + + /// Program spent the lamports of an account that doesn't belong to it + ExternalAccountLamportSpend, + + /// Program modified the data of an account that doesn't belong to it + ExternalAccountDataModified, + + /// Read-only account's lamports modified + ReadonlyLamportChange, + + /// Read-only account's data was modified + ReadonlyDataModified, + + /// An account was referenced more than once in a single instruction + // Deprecated, instructions can now contain duplicate accounts + DuplicateAccountIndex, + + /// Executable bit on account changed, but shouldn't have + ExecutableModified, + + /// Rent_epoch account changed, but shouldn't have + RentEpochModified, + + /// The instruction expected additional account keys + NotEnoughAccountKeys, + + /// Program other than the account's owner changed the size of the account data + AccountDataSizeChanged, + + /// The instruction expected an executable account + AccountNotExecutable, + + /// Failed to borrow a reference to account data, already borrowed + AccountBorrowFailed, + + /// Account data has an outstanding reference after a program's execution + AccountBorrowOutstanding, + + /// The same account was multiply passed to an on-chain program's entrypoint, but the program + /// modified them differently. A program can only modify one instance of the account because + /// the runtime cannot determine which changes to pick or how to merge them if both are modified + DuplicateAccountOutOfSync, + + /// Allows on-chain programs to implement program-specific error types and see them returned + /// by the Solana runtime. A program-specific error may be any type that is represented as + /// or serialized to a u32 integer. + Custom(u32), + + /// The return value from the program was invalid. Valid errors are either a defined builtin + /// error value or a user-defined error in the lower 32 bits. + InvalidError, + + /// Executable account's data was modified + ExecutableDataModified, + + /// Executable account's lamports modified + ExecutableLamportChange, + + /// Executable accounts must be rent exempt + ExecutableAccountNotRentExempt, + + /// Unsupported program id + UnsupportedProgramId, + + /// Cross-program invocation call depth too deep + CallDepth, + + /// An account required by the instruction is missing + MissingAccount, + + /// Cross-program invocation reentrancy not allowed for this instruction + ReentrancyNotAllowed, + + /// Length of the seed is too long for address generation + MaxSeedLengthExceeded, + + /// Provided seeds do not result in a valid address + InvalidSeeds, + + /// Failed to reallocate account data of this length + InvalidRealloc, + + /// Computational budget exceeded + ComputationalBudgetExceeded, + + /// Cross-program invocation with unauthorized signer or writable account + PrivilegeEscalation, + + /// Failed to create program execution environment + ProgramEnvironmentSetupFailure, + + /// Program failed to complete + ProgramFailedToComplete, + + /// Program failed to compile + ProgramFailedToCompile, + + /// Account is immutable + Immutable, + + /// Incorrect authority provided + IncorrectAuthority, + + /// Failed to serialize or deserialize account data + /// + /// Warning: This error should never be emitted by the runtime. + /// + /// This error includes strings from the underlying 3rd party Borsh crate + /// which can be dangerous because the error strings could change across + /// Borsh versions. Only programs can use this error because they are + /// consistent across Solana software versions. + /// + BorshIoError(String), + + /// An account does not have enough lamports to be rent-exempt + AccountNotRentExempt, + + /// Invalid account owner + InvalidAccountOwner, + + /// Program arithmetic overflowed + ArithmeticOverflow, + + /// Unsupported sysvar + UnsupportedSysvar, + + /// Illegal account owner + IllegalOwner, + + /// Accounts data allocations exceeded the maximum allowed per transaction + MaxAccountsDataAllocationsExceeded, + + /// Max accounts exceeded + MaxAccountsExceeded, + + /// Max instruction trace length exceeded + MaxInstructionTraceLengthExceeded, + + /// Builtin programs must consume compute units + BuiltinProgramsMustConsumeComputeUnits, + // Note: For any new error added here an equivalent ProgramError and its + // conversions must also be added +} + +#[cfg(feature = "std")] +impl std::error::Error for InstructionError {} + +#[cfg(feature = "std")] +impl fmt::Display for InstructionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + InstructionError::GenericError => f.write_str("generic instruction error"), + InstructionError::InvalidArgument => f.write_str("invalid program argument"), + InstructionError::InvalidInstructionData => f.write_str("invalid instruction data"), + InstructionError::InvalidAccountData => { + f.write_str("invalid account data for instruction") + } + InstructionError::AccountDataTooSmall => { + f.write_str("account data too small for instruction") + } + InstructionError::InsufficientFunds => { + f.write_str("insufficient funds for instruction") + } + InstructionError::IncorrectProgramId => { + f.write_str("incorrect program id for instruction") + } + InstructionError::MissingRequiredSignature => { + f.write_str("missing required signature for instruction") + } + InstructionError::AccountAlreadyInitialized => { + f.write_str("instruction requires an uninitialized account") + } + InstructionError::UninitializedAccount => { + f.write_str("instruction requires an initialized account") + } + InstructionError::UnbalancedInstruction => { + f.write_str("sum of account balances before and after instruction do not match") + } + InstructionError::ModifiedProgramId => { + f.write_str("instruction illegally modified the program id of an account") + } + InstructionError::ExternalAccountLamportSpend => { + f.write_str("instruction spent from the balance of an account it does not own") + } + InstructionError::ExternalAccountDataModified => { + f.write_str("instruction modified data of an account it does not own") + } + InstructionError::ReadonlyLamportChange => { + f.write_str("instruction changed the balance of a read-only account") + } + InstructionError::ReadonlyDataModified => { + f.write_str("instruction modified data of a read-only account") + } + InstructionError::DuplicateAccountIndex => { + f.write_str("instruction contains duplicate accounts") + } + InstructionError::ExecutableModified => { + f.write_str("instruction changed executable bit of an account") + } + InstructionError::RentEpochModified => { + f.write_str("instruction modified rent epoch of an account") + } + InstructionError::NotEnoughAccountKeys => { + f.write_str("insufficient account keys for instruction") + } + InstructionError::AccountDataSizeChanged => f.write_str( + "program other than the account's owner changed the size of the account data", + ), + InstructionError::AccountNotExecutable => { + f.write_str("instruction expected an executable account") + } + InstructionError::AccountBorrowFailed => f.write_str( + "instruction tries to borrow reference for an account which is already borrowed", + ), + InstructionError::AccountBorrowOutstanding => { + f.write_str("instruction left account with an outstanding borrowed reference") + } + InstructionError::DuplicateAccountOutOfSync => { + f.write_str("instruction modifications of multiply-passed account differ") + } + InstructionError::Custom(num) => { + write!(f, "custom program error: {num:#x}") + } + InstructionError::InvalidError => f.write_str("program returned invalid error code"), + InstructionError::ExecutableDataModified => { + f.write_str("instruction changed executable accounts data") + } + InstructionError::ExecutableLamportChange => { + f.write_str("instruction changed the balance of an executable account") + } + InstructionError::ExecutableAccountNotRentExempt => { + f.write_str("executable accounts must be rent exempt") + } + InstructionError::UnsupportedProgramId => f.write_str("Unsupported program id"), + InstructionError::CallDepth => { + f.write_str("Cross-program invocation call depth too deep") + } + InstructionError::MissingAccount => { + f.write_str("An account required by the instruction is missing") + } + InstructionError::ReentrancyNotAllowed => { + f.write_str("Cross-program invocation reentrancy not allowed for this instruction") + } + InstructionError::MaxSeedLengthExceeded => { + f.write_str("Length of the seed is too long for address generation") + } + InstructionError::InvalidSeeds => { + f.write_str("Provided seeds do not result in a valid address") + } + InstructionError::InvalidRealloc => f.write_str("Failed to reallocate account data"), + InstructionError::ComputationalBudgetExceeded => { + f.write_str("Computational budget exceeded") + } + InstructionError::PrivilegeEscalation => { + f.write_str("Cross-program invocation with unauthorized signer or writable account") + } + InstructionError::ProgramEnvironmentSetupFailure => { + f.write_str("Failed to create program execution environment") + } + InstructionError::ProgramFailedToComplete => f.write_str("Program failed to complete"), + InstructionError::ProgramFailedToCompile => f.write_str("Program failed to compile"), + InstructionError::Immutable => f.write_str("Account is immutable"), + InstructionError::IncorrectAuthority => f.write_str("Incorrect authority provided"), + InstructionError::BorshIoError(s) => { + write!(f, "Failed to serialize or deserialize account data: {s}",) + } + InstructionError::AccountNotRentExempt => { + f.write_str("An account does not have enough lamports to be rent-exempt") + } + InstructionError::InvalidAccountOwner => f.write_str("Invalid account owner"), + InstructionError::ArithmeticOverflow => f.write_str("Program arithmetic overflowed"), + InstructionError::UnsupportedSysvar => f.write_str("Unsupported sysvar"), + InstructionError::IllegalOwner => f.write_str("Provided owner is not allowed"), + InstructionError::MaxAccountsDataAllocationsExceeded => f.write_str( + "Accounts data allocations exceeded the maximum allowed per transaction", + ), + InstructionError::MaxAccountsExceeded => f.write_str("Max accounts exceeded"), + InstructionError::MaxInstructionTraceLengthExceeded => { + f.write_str("Max instruction trace length exceeded") + } + InstructionError::BuiltinProgramsMustConsumeComputeUnits => { + f.write_str("Builtin programs must consume compute units") + } + } + } +} + +#[cfg(feature = "std")] +impl From for InstructionError +where + T: ToPrimitive, +{ + fn from(error: T) -> Self { + let error = error.to_u64().unwrap_or(0xbad_c0de); + match error { + CUSTOM_ZERO => Self::Custom(0), + INVALID_ARGUMENT => Self::InvalidArgument, + INVALID_INSTRUCTION_DATA => Self::InvalidInstructionData, + INVALID_ACCOUNT_DATA => Self::InvalidAccountData, + ACCOUNT_DATA_TOO_SMALL => Self::AccountDataTooSmall, + INSUFFICIENT_FUNDS => Self::InsufficientFunds, + INCORRECT_PROGRAM_ID => Self::IncorrectProgramId, + MISSING_REQUIRED_SIGNATURES => Self::MissingRequiredSignature, + ACCOUNT_ALREADY_INITIALIZED => Self::AccountAlreadyInitialized, + UNINITIALIZED_ACCOUNT => Self::UninitializedAccount, + NOT_ENOUGH_ACCOUNT_KEYS => Self::NotEnoughAccountKeys, + ACCOUNT_BORROW_FAILED => Self::AccountBorrowFailed, + MAX_SEED_LENGTH_EXCEEDED => Self::MaxSeedLengthExceeded, + INVALID_SEEDS => Self::InvalidSeeds, + BORSH_IO_ERROR => Self::BorshIoError("Unknown".to_string()), + ACCOUNT_NOT_RENT_EXEMPT => Self::AccountNotRentExempt, + UNSUPPORTED_SYSVAR => Self::UnsupportedSysvar, + ILLEGAL_OWNER => Self::IllegalOwner, + MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED => Self::MaxAccountsDataAllocationsExceeded, + INVALID_ACCOUNT_DATA_REALLOC => Self::InvalidRealloc, + MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED => Self::MaxInstructionTraceLengthExceeded, + BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS => { + Self::BuiltinProgramsMustConsumeComputeUnits + } + INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, + ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, + IMMUTABLE => Self::Immutable, + INCORRECT_AUTHORITY => Self::IncorrectAuthority, + _ => { + // A valid custom error has no bits set in the upper 32 + if error >> BUILTIN_BIT_SHIFT == 0 { + Self::Custom(error as u32) + } else { + Self::InvalidError + } + } + } + } +} diff --git a/sdk/instruction/src/lib.rs b/sdk/instruction/src/lib.rs new file mode 100644 index 00000000000000..1b3aa31855ad98 --- /dev/null +++ b/sdk/instruction/src/lib.rs @@ -0,0 +1,292 @@ +//! Types for directing the execution of Solana programs. +//! +//! Every invocation of a Solana program executes a single instruction, as +//! defined by the [`Instruction`] type. An instruction is primarily a vector of +//! bytes, the contents of which are program-specific, and not interpreted by +//! the Solana runtime. This allows flexibility in how programs behave, how they +//! are controlled by client software, and what data encodings they use. +//! +//! Besides the instruction data, every account a program may read or write +//! while executing a given instruction is also included in `Instruction`, as +//! [`AccountMeta`] values. The runtime uses this information to efficiently +//! schedule execution of transactions. +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![allow(clippy::arithmetic_side_effects)] +#![no_std] + +#[cfg(feature = "std")] +extern crate std; +#[cfg(feature = "std")] +use {solana_pubkey::Pubkey, std::vec::Vec}; +pub mod account_meta; +#[cfg(feature = "std")] +pub use account_meta::AccountMeta; +pub mod error; +#[cfg(target_os = "solana")] +pub mod syscalls; + +/// A directive for a single invocation of a Solana program. +/// +/// An instruction specifies which program it is calling, which accounts it may +/// read or modify, and additional data that serves as input to the program. One +/// or more instructions are included in transactions submitted by Solana +/// clients. Instructions are also used to describe [cross-program +/// invocations][cpi]. +/// +/// [cpi]: https://solana.com/docs/core/cpi +/// +/// During execution, a program will receive a list of account data as one of +/// its arguments, in the same order as specified during `Instruction` +/// construction. +/// +/// While Solana is agnostic to the format of the instruction data, it has +/// built-in support for serialization via [`borsh`] and [`bincode`]. +/// +/// [`borsh`]: https://docs.rs/borsh/latest/borsh/ +/// [`bincode`]: https://docs.rs/bincode/latest/bincode/ +/// +/// # Specifying account metadata +/// +/// When constructing an [`Instruction`], a list of all accounts that may be +/// read or written during the execution of that instruction must be supplied as +/// [`AccountMeta`] values. +/// +/// Any account whose data may be mutated by the program during execution must +/// be specified as writable. During execution, writing to an account that was +/// not specified as writable will cause the transaction to fail. Writing to an +/// account that is not owned by the program will cause the transaction to fail. +/// +/// Any account whose lamport balance may be mutated by the program during +/// execution must be specified as writable. During execution, mutating the +/// lamports of an account that was not specified as writable will cause the +/// transaction to fail. While _subtracting_ lamports from an account not owned +/// by the program will cause the transaction to fail, _adding_ lamports to any +/// account is allowed, as long is it is mutable. +/// +/// Accounts that are not read or written by the program may still be specified +/// in an `Instruction`'s account list. These will affect scheduling of program +/// execution by the runtime, but will otherwise be ignored. +/// +/// When building a transaction, the Solana runtime coalesces all accounts used +/// by all instructions in that transaction, along with accounts and permissions +/// required by the runtime, into a single account list. Some accounts and +/// account permissions required by the runtime to process a transaction are +/// _not_ required to be included in an `Instruction`s account list. These +/// include: +/// +/// - The program ID — it is a separate field of `Instruction` +/// - The transaction's fee-paying account — it is added during [`Message`] +/// construction. A program may still require the fee payer as part of the +/// account list if it directly references it. +/// +/// [`Message`]: https://docs.rs/solana-program/latest/solana_program/message/legacy/struct.Message.html +/// +/// Programs may require signatures from some accounts, in which case they +/// should be specified as signers during `Instruction` construction. The +/// program must still validate during execution that the account is a signer. +#[cfg(all(feature = "std", not(target_arch = "wasm32")))] +#[cfg_attr( + feature = "serde", + derive(serde_derive::Serialize, serde_derive::Deserialize) +)] +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct Instruction { + /// Pubkey of the program that executes this instruction. + pub program_id: Pubkey, + /// Metadata describing accounts that should be passed to the program. + pub accounts: Vec, + /// Opaque data passed to the program for its own interpretation. + pub data: Vec, +} + +/// wasm-bindgen version of the Instruction struct. +/// This duplication is required until https://github.com/rustwasm/wasm-bindgen/issues/3671 +/// is fixed. This must not diverge from the regular non-wasm Instruction struct. +#[cfg(all(feature = "std", target_arch = "wasm32"))] +#[wasm_bindgen::prelude::wasm_bindgen] +pub struct Instruction { + #[wasm_bindgen(skip)] + pub program_id: Pubkey, + #[wasm_bindgen(skip)] + pub accounts: Vec, + #[wasm_bindgen(skip)] + pub data: Vec, +} + +#[cfg(feature = "std")] +impl Instruction { + #[cfg(feature = "borsh")] + /// Create a new instruction from a value, encoded with [`borsh`]. + /// + /// [`borsh`]: https://docs.rs/borsh/latest/borsh/ + /// + /// `program_id` is the address of the program that will execute the instruction. + /// `accounts` contains a description of all accounts that may be accessed by the program. + /// + /// Borsh serialization is often preferred over bincode as it has a stable + /// [specification] and an [implementation in JavaScript][jsb], neither of + /// which are true of bincode. + /// + /// [specification]: https://borsh.io/ + /// [jsb]: https://github.com/near/borsh-js + /// + /// # Examples + /// + /// ``` + /// # use solana_pubkey::Pubkey; + /// # use solana_instruction::{AccountMeta, Instruction}; + /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # + /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] + /// pub struct MyInstruction { + /// pub lamports: u64, + /// } + /// + /// pub fn create_instruction( + /// program_id: &Pubkey, + /// from: &Pubkey, + /// to: &Pubkey, + /// lamports: u64, + /// ) -> Instruction { + /// let instr = MyInstruction { lamports }; + /// + /// Instruction::new_with_borsh( + /// *program_id, + /// &instr, + /// vec![ + /// AccountMeta::new(*from, true), + /// AccountMeta::new(*to, false), + /// ], + /// ) + /// } + /// ``` + pub fn new_with_borsh( + program_id: Pubkey, + data: &T, + accounts: Vec, + ) -> Self { + let data = borsh::to_vec(data).unwrap(); + Self { + program_id, + accounts, + data, + } + } + + #[cfg(feature = "bincode")] + /// Create a new instruction from a value, encoded with [`bincode`]. + /// + /// [`bincode`]: https://docs.rs/bincode/latest/bincode/ + /// + /// `program_id` is the address of the program that will execute the instruction. + /// `accounts` contains a description of all accounts that may be accessed by the program. + /// + /// # Examples + /// + /// ``` + /// # use solana_pubkey::Pubkey; + /// # use solana_instruction::{AccountMeta, Instruction}; + /// # use serde::{Serialize, Deserialize}; + /// # + /// #[derive(Serialize, Deserialize)] + /// pub struct MyInstruction { + /// pub lamports: u64, + /// } + /// + /// pub fn create_instruction( + /// program_id: &Pubkey, + /// from: &Pubkey, + /// to: &Pubkey, + /// lamports: u64, + /// ) -> Instruction { + /// let instr = MyInstruction { lamports }; + /// + /// Instruction::new_with_bincode( + /// *program_id, + /// &instr, + /// vec![ + /// AccountMeta::new(*from, true), + /// AccountMeta::new(*to, false), + /// ], + /// ) + /// } + /// ``` + pub fn new_with_bincode( + program_id: Pubkey, + data: &T, + accounts: Vec, + ) -> Self { + let data = bincode::serialize(data).unwrap(); + Self { + program_id, + accounts, + data, + } + } + + /// Create a new instruction from a byte slice. + /// + /// `program_id` is the address of the program that will execute the instruction. + /// `accounts` contains a description of all accounts that may be accessed by the program. + /// + /// The caller is responsible for ensuring the correct encoding of `data` as expected + /// by the callee program. + /// + /// # Examples + /// + /// ``` + /// # use solana_pubkey::Pubkey; + /// # use solana_instruction::{AccountMeta, Instruction}; + /// # + /// # use borsh::{io::Error, BorshSerialize, BorshDeserialize}; + /// # + /// #[derive(BorshSerialize, BorshDeserialize)] + /// # #[borsh(crate = "borsh")] + /// pub struct MyInstruction { + /// pub lamports: u64, + /// } + /// + /// pub fn create_instruction( + /// program_id: &Pubkey, + /// from: &Pubkey, + /// to: &Pubkey, + /// lamports: u64, + /// ) -> Result { + /// let instr = MyInstruction { lamports }; + /// + /// let mut instr_in_bytes: Vec = Vec::new(); + /// instr.serialize(&mut instr_in_bytes)?; + /// + /// Ok(Instruction::new_with_bytes( + /// *program_id, + /// &instr_in_bytes, + /// vec![ + /// AccountMeta::new(*from, true), + /// AccountMeta::new(*to, false), + /// ], + /// )) + /// } + /// ``` + pub fn new_with_bytes(program_id: Pubkey, data: &[u8], accounts: Vec) -> Self { + Self { + program_id, + accounts, + data: data.to_vec(), + } + } +} + +// Stack height when processing transaction-level instructions +pub const TRANSACTION_LEVEL_STACK_HEIGHT: usize = 1; + +/// Use to query and convey information about the sibling instruction components +/// when calling the `sol_get_processed_sibling_instruction` syscall. +#[repr(C)] +#[derive(Default, Debug, Clone, Copy, Eq, PartialEq)] +pub struct ProcessedSiblingInstruction { + /// Length of the instruction data + pub data_len: u64, + /// Number of AccountMeta structures + pub accounts_len: u64, +} diff --git a/sdk/instruction/src/syscalls.rs b/sdk/instruction/src/syscalls.rs new file mode 100644 index 00000000000000..854fea7de5f190 --- /dev/null +++ b/sdk/instruction/src/syscalls.rs @@ -0,0 +1,8 @@ +use { + crate::{AccountMeta, ProcessedSiblingInstruction}, + solana_define_syscall::define_syscall, + solana_pubkey::Pubkey, +}; + +define_syscall!(fn sol_get_processed_sibling_instruction(index: u64, meta: *mut ProcessedSiblingInstruction, program_id: *mut Pubkey, data: *mut u8, accounts: *mut AccountMeta) -> u64); +define_syscall!(fn sol_get_stack_height() -> u64); diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 1a25c1fde1352d..2f2c6feaebad69 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -41,6 +41,11 @@ solana-hash = { workspace = true, features = [ "serde", "std", ] } +solana-instruction = { workspace = true, default-features = false, features = [ + "bincode", + "serde", + "std", +] } solana-msg = { workspace = true } solana-program-memory = { workspace = true } solana-program-option = { workspace = true } @@ -107,13 +112,20 @@ crate-type = ["cdylib", "rlib"] [features] default = ["borsh"] -borsh = ["dep:borsh", "dep:borsh0-10", "solana-hash/borsh", "solana-pubkey/borsh"] +borsh = [ + "dep:borsh", + "dep:borsh0-10", + "solana-hash/borsh", + "solana-instruction/borsh", + "solana-pubkey/borsh" +] dev-context-only-utils = ["dep:qualifier_attr"] frozen-abi = [ "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-hash/frozen-abi", + "solana-instruction/frozen-abi", "solana-pubkey/frozen-abi", "solana-short-vec/frozen-abi" ] diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index 2a686c75dec2e6..c380ecdbcde9c3 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -1,631 +1,14 @@ -//! Types for directing the execution of Solana programs. -//! -//! Every invocation of a Solana program executes a single instruction, as -//! defined by the [`Instruction`] type. An instruction is primarily a vector of -//! bytes, the contents of which are program-specific, and not interpreted by -//! the Solana runtime. This allows flexibility in how programs behave, how they -//! are controlled by client software, and what data encodings they use. -//! -//! Besides the instruction data, every account a program may read or write -//! while executing a given instruction is also included in `Instruction`, as -//! [`AccountMeta`] values. The runtime uses this information to efficiently -//! schedule execution of transactions. - -#![allow(clippy::arithmetic_side_effects)] - -#[cfg(target_arch = "wasm32")] -use crate::wasm_bindgen; -#[cfg(feature = "borsh")] -use borsh::BorshSerialize; +#[cfg(feature = "frozen-abi")] +use solana_frozen_abi_macro::AbiExample; +pub use solana_instruction::{ + error::InstructionError, AccountMeta, Instruction, ProcessedSiblingInstruction, + TRANSACTION_LEVEL_STACK_HEIGHT, +}; use { - crate::pubkey::Pubkey, bincode::serialize, serde::Serialize, solana_sanitize::Sanitize, - solana_short_vec as short_vec, thiserror::Error, + bincode::serialize, serde::Serialize, solana_pubkey::Pubkey, solana_sanitize::Sanitize, + solana_short_vec as short_vec, }; -/// Reasons the runtime might have rejected an instruction. -/// -/// Members of this enum must not be removed, but new ones can be added. -/// Also, it is crucial that meta-information if any that comes along with -/// an error be consistent across software versions. For example, it is -/// dangerous to include error strings from 3rd party crates because they could -/// change at any time and changes to them are difficult to detect. -#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] -#[derive(Serialize, Deserialize, Debug, Error, PartialEq, Eq, Clone)] -pub enum InstructionError { - /// Deprecated! Use CustomError instead! - /// The program instruction returned an error - #[error("generic instruction error")] - GenericError, - - /// The arguments provided to a program were invalid - #[error("invalid program argument")] - InvalidArgument, - - /// An instruction's data contents were invalid - #[error("invalid instruction data")] - InvalidInstructionData, - - /// An account's data contents was invalid - #[error("invalid account data for instruction")] - InvalidAccountData, - - /// An account's data was too small - #[error("account data too small for instruction")] - AccountDataTooSmall, - - /// An account's balance was too small to complete the instruction - #[error("insufficient funds for instruction")] - InsufficientFunds, - - /// The account did not have the expected program id - #[error("incorrect program id for instruction")] - IncorrectProgramId, - - /// A signature was required but not found - #[error("missing required signature for instruction")] - MissingRequiredSignature, - - /// An initialize instruction was sent to an account that has already been initialized. - #[error("instruction requires an uninitialized account")] - AccountAlreadyInitialized, - - /// An attempt to operate on an account that hasn't been initialized. - #[error("instruction requires an initialized account")] - UninitializedAccount, - - /// Program's instruction lamport balance does not equal the balance after the instruction - #[error("sum of account balances before and after instruction do not match")] - UnbalancedInstruction, - - /// Program illegally modified an account's program id - #[error("instruction illegally modified the program id of an account")] - ModifiedProgramId, - - /// Program spent the lamports of an account that doesn't belong to it - #[error("instruction spent from the balance of an account it does not own")] - ExternalAccountLamportSpend, - - /// Program modified the data of an account that doesn't belong to it - #[error("instruction modified data of an account it does not own")] - ExternalAccountDataModified, - - /// Read-only account's lamports modified - #[error("instruction changed the balance of a read-only account")] - ReadonlyLamportChange, - - /// Read-only account's data was modified - #[error("instruction modified data of a read-only account")] - ReadonlyDataModified, - - /// An account was referenced more than once in a single instruction - // Deprecated, instructions can now contain duplicate accounts - #[error("instruction contains duplicate accounts")] - DuplicateAccountIndex, - - /// Executable bit on account changed, but shouldn't have - #[error("instruction changed executable bit of an account")] - ExecutableModified, - - /// Rent_epoch account changed, but shouldn't have - #[error("instruction modified rent epoch of an account")] - RentEpochModified, - - /// The instruction expected additional account keys - #[error("insufficient account keys for instruction")] - NotEnoughAccountKeys, - - /// Program other than the account's owner changed the size of the account data - #[error("program other than the account's owner changed the size of the account data")] - AccountDataSizeChanged, - - /// The instruction expected an executable account - #[error("instruction expected an executable account")] - AccountNotExecutable, - - /// Failed to borrow a reference to account data, already borrowed - #[error("instruction tries to borrow reference for an account which is already borrowed")] - AccountBorrowFailed, - - /// Account data has an outstanding reference after a program's execution - #[error("instruction left account with an outstanding borrowed reference")] - AccountBorrowOutstanding, - - /// The same account was multiply passed to an on-chain program's entrypoint, but the program - /// modified them differently. A program can only modify one instance of the account because - /// the runtime cannot determine which changes to pick or how to merge them if both are modified - #[error("instruction modifications of multiply-passed account differ")] - DuplicateAccountOutOfSync, - - /// Allows on-chain programs to implement program-specific error types and see them returned - /// by the Solana runtime. A program-specific error may be any type that is represented as - /// or serialized to a u32 integer. - #[error("custom program error: {0:#x}")] - Custom(u32), - - /// The return value from the program was invalid. Valid errors are either a defined builtin - /// error value or a user-defined error in the lower 32 bits. - #[error("program returned invalid error code")] - InvalidError, - - /// Executable account's data was modified - #[error("instruction changed executable accounts data")] - ExecutableDataModified, - - /// Executable account's lamports modified - #[error("instruction changed the balance of an executable account")] - ExecutableLamportChange, - - /// Executable accounts must be rent exempt - #[error("executable accounts must be rent exempt")] - ExecutableAccountNotRentExempt, - - /// Unsupported program id - #[error("Unsupported program id")] - UnsupportedProgramId, - - /// Cross-program invocation call depth too deep - #[error("Cross-program invocation call depth too deep")] - CallDepth, - - /// An account required by the instruction is missing - #[error("An account required by the instruction is missing")] - MissingAccount, - - /// Cross-program invocation reentrancy not allowed for this instruction - #[error("Cross-program invocation reentrancy not allowed for this instruction")] - ReentrancyNotAllowed, - - /// Length of the seed is too long for address generation - #[error("Length of the seed is too long for address generation")] - MaxSeedLengthExceeded, - - /// Provided seeds do not result in a valid address - #[error("Provided seeds do not result in a valid address")] - InvalidSeeds, - - /// Failed to reallocate account data of this length - #[error("Failed to reallocate account data")] - InvalidRealloc, - - /// Computational budget exceeded - #[error("Computational budget exceeded")] - ComputationalBudgetExceeded, - - /// Cross-program invocation with unauthorized signer or writable account - #[error("Cross-program invocation with unauthorized signer or writable account")] - PrivilegeEscalation, - - /// Failed to create program execution environment - #[error("Failed to create program execution environment")] - ProgramEnvironmentSetupFailure, - - /// Program failed to complete - #[error("Program failed to complete")] - ProgramFailedToComplete, - - /// Program failed to compile - #[error("Program failed to compile")] - ProgramFailedToCompile, - - /// Account is immutable - #[error("Account is immutable")] - Immutable, - - /// Incorrect authority provided - #[error("Incorrect authority provided")] - IncorrectAuthority, - - /// Failed to serialize or deserialize account data - /// - /// Warning: This error should never be emitted by the runtime. - /// - /// This error includes strings from the underlying 3rd party Borsh crate - /// which can be dangerous because the error strings could change across - /// Borsh versions. Only programs can use this error because they are - /// consistent across Solana software versions. - /// - #[error("Failed to serialize or deserialize account data: {0}")] - BorshIoError(String), - - /// An account does not have enough lamports to be rent-exempt - #[error("An account does not have enough lamports to be rent-exempt")] - AccountNotRentExempt, - - /// Invalid account owner - #[error("Invalid account owner")] - InvalidAccountOwner, - - /// Program arithmetic overflowed - #[error("Program arithmetic overflowed")] - ArithmeticOverflow, - - /// Unsupported sysvar - #[error("Unsupported sysvar")] - UnsupportedSysvar, - - /// Illegal account owner - #[error("Provided owner is not allowed")] - IllegalOwner, - - /// Accounts data allocations exceeded the maximum allowed per transaction - #[error("Accounts data allocations exceeded the maximum allowed per transaction")] - MaxAccountsDataAllocationsExceeded, - - /// Max accounts exceeded - #[error("Max accounts exceeded")] - MaxAccountsExceeded, - - /// Max instruction trace length exceeded - #[error("Max instruction trace length exceeded")] - MaxInstructionTraceLengthExceeded, - - /// Builtin programs must consume compute units - #[error("Builtin programs must consume compute units")] - BuiltinProgramsMustConsumeComputeUnits, - // Note: For any new error added here an equivalent ProgramError and its - // conversions must also be added -} - -/// A directive for a single invocation of a Solana program. -/// -/// An instruction specifies which program it is calling, which accounts it may -/// read or modify, and additional data that serves as input to the program. One -/// or more instructions are included in transactions submitted by Solana -/// clients. Instructions are also used to describe [cross-program -/// invocations][cpi]. -/// -/// [cpi]: https://solana.com/docs/core/cpi -/// -/// During execution, a program will receive a list of account data as one of -/// its arguments, in the same order as specified during `Instruction` -/// construction. -/// -/// While Solana is agnostic to the format of the instruction data, it has -/// built-in support for serialization via [`borsh`] and [`bincode`]. -/// -/// [`borsh`]: https://docs.rs/borsh/latest/borsh/ -/// [`bincode`]: https://docs.rs/bincode/latest/bincode/ -/// -/// # Specifying account metadata -/// -/// When constructing an [`Instruction`], a list of all accounts that may be -/// read or written during the execution of that instruction must be supplied as -/// [`AccountMeta`] values. -/// -/// Any account whose data may be mutated by the program during execution must -/// be specified as writable. During execution, writing to an account that was -/// not specified as writable will cause the transaction to fail. Writing to an -/// account that is not owned by the program will cause the transaction to fail. -/// -/// Any account whose lamport balance may be mutated by the program during -/// execution must be specified as writable. During execution, mutating the -/// lamports of an account that was not specified as writable will cause the -/// transaction to fail. While _subtracting_ lamports from an account not owned -/// by the program will cause the transaction to fail, _adding_ lamports to any -/// account is allowed, as long is it is mutable. -/// -/// Accounts that are not read or written by the program may still be specified -/// in an `Instruction`'s account list. These will affect scheduling of program -/// execution by the runtime, but will otherwise be ignored. -/// -/// When building a transaction, the Solana runtime coalesces all accounts used -/// by all instructions in that transaction, along with accounts and permissions -/// required by the runtime, into a single account list. Some accounts and -/// account permissions required by the runtime to process a transaction are -/// _not_ required to be included in an `Instruction`s account list. These -/// include: -/// -/// - The program ID — it is a separate field of `Instruction` -/// - The transaction's fee-paying account — it is added during [`Message`] -/// construction. A program may still require the fee payer as part of the -/// account list if it directly references it. -/// -/// [`Message`]: crate::message::Message -/// -/// Programs may require signatures from some accounts, in which case they -/// should be specified as signers during `Instruction` construction. The -/// program must still validate during execution that the account is a signer. -#[cfg(not(target_arch = "wasm32"))] -#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] -pub struct Instruction { - /// Pubkey of the program that executes this instruction. - pub program_id: Pubkey, - /// Metadata describing accounts that should be passed to the program. - pub accounts: Vec, - /// Opaque data passed to the program for its own interpretation. - pub data: Vec, -} - -/// wasm-bindgen version of the Instruction struct. -/// This duplication is required until https://github.com/rustwasm/wasm-bindgen/issues/3671 -/// is fixed. This must not diverge from the regular non-wasm Instruction struct. -#[cfg(target_arch = "wasm32")] -#[wasm_bindgen] -pub struct Instruction { - #[wasm_bindgen(skip)] - pub program_id: Pubkey, - #[wasm_bindgen(skip)] - pub accounts: Vec, - #[wasm_bindgen(skip)] - pub data: Vec, -} - -impl Instruction { - #[cfg(feature = "borsh")] - /// Create a new instruction from a value, encoded with [`borsh`]. - /// - /// [`borsh`]: https://docs.rs/borsh/latest/borsh/ - /// - /// `program_id` is the address of the program that will execute the instruction. - /// `accounts` contains a description of all accounts that may be accessed by the program. - /// - /// Borsh serialization is often preferred over bincode as it has a stable - /// [specification] and an [implementation in JavaScript][jsb], neither of - /// which are true of bincode. - /// - /// [specification]: https://borsh.io/ - /// [jsb]: https://github.com/near/borsh-js - /// - /// # Examples - /// - /// ``` - /// # use solana_program::{ - /// # pubkey::Pubkey, - /// # instruction::{AccountMeta, Instruction}, - /// # }; - /// # use borsh::{BorshSerialize, BorshDeserialize}; - /// # - /// #[derive(BorshSerialize, BorshDeserialize)] - /// # #[borsh(crate = "borsh")] - /// pub struct MyInstruction { - /// pub lamports: u64, - /// } - /// - /// pub fn create_instruction( - /// program_id: &Pubkey, - /// from: &Pubkey, - /// to: &Pubkey, - /// lamports: u64, - /// ) -> Instruction { - /// let instr = MyInstruction { lamports }; - /// - /// Instruction::new_with_borsh( - /// *program_id, - /// &instr, - /// vec![ - /// AccountMeta::new(*from, true), - /// AccountMeta::new(*to, false), - /// ], - /// ) - /// } - /// ``` - pub fn new_with_borsh( - program_id: Pubkey, - data: &T, - accounts: Vec, - ) -> Self { - let data = borsh::to_vec(data).unwrap(); - Self { - program_id, - accounts, - data, - } - } - - /// Create a new instruction from a value, encoded with [`bincode`]. - /// - /// [`bincode`]: https://docs.rs/bincode/latest/bincode/ - /// - /// `program_id` is the address of the program that will execute the instruction. - /// `accounts` contains a description of all accounts that may be accessed by the program. - /// - /// # Examples - /// - /// ``` - /// # use solana_program::{ - /// # pubkey::Pubkey, - /// # instruction::{AccountMeta, Instruction}, - /// # }; - /// # use serde::{Serialize, Deserialize}; - /// # - /// #[derive(Serialize, Deserialize)] - /// pub struct MyInstruction { - /// pub lamports: u64, - /// } - /// - /// pub fn create_instruction( - /// program_id: &Pubkey, - /// from: &Pubkey, - /// to: &Pubkey, - /// lamports: u64, - /// ) -> Instruction { - /// let instr = MyInstruction { lamports }; - /// - /// Instruction::new_with_bincode( - /// *program_id, - /// &instr, - /// vec![ - /// AccountMeta::new(*from, true), - /// AccountMeta::new(*to, false), - /// ], - /// ) - /// } - /// ``` - pub fn new_with_bincode( - program_id: Pubkey, - data: &T, - accounts: Vec, - ) -> Self { - let data = serialize(data).unwrap(); - Self { - program_id, - accounts, - data, - } - } - - /// Create a new instruction from a byte slice. - /// - /// `program_id` is the address of the program that will execute the instruction. - /// `accounts` contains a description of all accounts that may be accessed by the program. - /// - /// The caller is responsible for ensuring the correct encoding of `data` as expected - /// by the callee program. - /// - /// # Examples - /// - /// ``` - /// # use solana_program::{ - /// # pubkey::Pubkey, - /// # instruction::{AccountMeta, Instruction}, - /// # }; - /// # use borsh::{io::Error, BorshSerialize, BorshDeserialize}; - /// # - /// #[derive(BorshSerialize, BorshDeserialize)] - /// # #[borsh(crate = "borsh")] - /// pub struct MyInstruction { - /// pub lamports: u64, - /// } - /// - /// pub fn create_instruction( - /// program_id: &Pubkey, - /// from: &Pubkey, - /// to: &Pubkey, - /// lamports: u64, - /// ) -> Result { - /// let instr = MyInstruction { lamports }; - /// - /// let mut instr_in_bytes: Vec = Vec::new(); - /// instr.serialize(&mut instr_in_bytes)?; - /// - /// Ok(Instruction::new_with_bytes( - /// *program_id, - /// &instr_in_bytes, - /// vec![ - /// AccountMeta::new(*from, true), - /// AccountMeta::new(*to, false), - /// ], - /// )) - /// } - /// ``` - pub fn new_with_bytes(program_id: Pubkey, data: &[u8], accounts: Vec) -> Self { - Self { - program_id, - accounts, - data: data.to_vec(), - } - } -} - -/// Addition that returns [`InstructionError::InsufficientFunds`] on overflow. -/// -/// This is an internal utility function. -#[doc(hidden)] -pub fn checked_add(a: u64, b: u64) -> Result { - a.checked_add(b).ok_or(InstructionError::InsufficientFunds) -} - -/// Describes a single account read or written by a program during instruction -/// execution. -/// -/// When constructing an [`Instruction`], a list of all accounts that may be -/// read or written during the execution of that instruction must be supplied. -/// Any account that may be mutated by the program during execution, either its -/// data or metadata such as held lamports, must be writable. -/// -/// Note that because the Solana runtime schedules parallel transaction -/// execution around which accounts are writable, care should be taken that only -/// accounts which actually may be mutated are specified as writable. As the -/// default [`AccountMeta::new`] constructor creates writable accounts, this is -/// a minor hazard: use [`AccountMeta::new_readonly`] to specify that an account -/// is not writable. -#[repr(C)] -#[derive(Debug, Default, PartialEq, Eq, Clone, Serialize, Deserialize)] -pub struct AccountMeta { - /// An account's public key. - pub pubkey: Pubkey, - /// True if an `Instruction` requires a `Transaction` signature matching `pubkey`. - pub is_signer: bool, - /// True if the account data or metadata may be mutated during program execution. - pub is_writable: bool, -} - -impl AccountMeta { - /// Construct metadata for a writable account. - /// - /// # Examples - /// - /// ``` - /// # use solana_program::{ - /// # pubkey::Pubkey, - /// # instruction::{AccountMeta, Instruction}, - /// # }; - /// # use borsh::{BorshSerialize, BorshDeserialize}; - /// # - /// # #[derive(BorshSerialize, BorshDeserialize)] - /// # #[borsh(crate = "borsh")] - /// # pub struct MyInstruction; - /// # - /// # let instruction = MyInstruction; - /// # let from = Pubkey::new_unique(); - /// # let to = Pubkey::new_unique(); - /// # let program_id = Pubkey::new_unique(); - /// let instr = Instruction::new_with_borsh( - /// program_id, - /// &instruction, - /// vec![ - /// AccountMeta::new(from, true), - /// AccountMeta::new(to, false), - /// ], - /// ); - /// ``` - pub fn new(pubkey: Pubkey, is_signer: bool) -> Self { - Self { - pubkey, - is_signer, - is_writable: true, - } - } - - /// Construct metadata for a read-only account. - /// - /// # Examples - /// - /// ``` - /// # use solana_program::{ - /// # pubkey::Pubkey, - /// # instruction::{AccountMeta, Instruction}, - /// # }; - /// # use borsh::{BorshSerialize, BorshDeserialize}; - /// # - /// # #[derive(BorshSerialize, BorshDeserialize)] - /// # #[borsh(crate = "borsh")] - /// # pub struct MyInstruction; - /// # - /// # let instruction = MyInstruction; - /// # let from = Pubkey::new_unique(); - /// # let to = Pubkey::new_unique(); - /// # let from_account_storage = Pubkey::new_unique(); - /// # let program_id = Pubkey::new_unique(); - /// let instr = Instruction::new_with_borsh( - /// program_id, - /// &instruction, - /// vec![ - /// AccountMeta::new(from, true), - /// AccountMeta::new(to, false), - /// AccountMeta::new_readonly(from_account_storage, false), - /// ], - /// ); - /// ``` - pub fn new_readonly(pubkey: Pubkey, is_signer: bool) -> Self { - Self { - pubkey, - is_signer, - is_writable: false, - } - } -} - /// A compact encoding of an instruction. /// /// A `CompiledInstruction` is a component of a multi-instruction [`Message`], @@ -672,17 +55,6 @@ impl CompiledInstruction { } } -/// Use to query and convey information about the sibling instruction components -/// when calling the `sol_get_processed_sibling_instruction` syscall. -#[repr(C)] -#[derive(Default, Debug, Clone, Copy, Eq, PartialEq)] -pub struct ProcessedSiblingInstruction { - /// Length of the instruction data - pub data_len: u64, - /// Number of AccountMeta structures - pub accounts_len: u64, -} - /// Returns a sibling instruction from the processed sibling instruction list. /// /// The processed sibling instruction list is a reverse-ordered list of @@ -699,10 +71,10 @@ pub fn get_processed_sibling_instruction(index: usize) -> Option { #[cfg(target_os = "solana")] { let mut meta = ProcessedSiblingInstruction::default(); - let mut program_id = Pubkey::default(); + let mut program_id = solana_pubkey::Pubkey::default(); if 1 == unsafe { - crate::syscalls::sol_get_processed_sibling_instruction( + solana_instruction::syscalls::sol_get_processed_sibling_instruction( index as u64, &mut meta, &mut program_id, @@ -716,7 +88,7 @@ pub fn get_processed_sibling_instruction(index: usize) -> Option { accounts.resize_with(meta.accounts_len as usize, AccountMeta::default); let _ = unsafe { - crate::syscalls::sol_get_processed_sibling_instruction( + solana_instruction::syscalls::sol_get_processed_sibling_instruction( index as u64, &mut meta, &mut program_id, @@ -735,16 +107,13 @@ pub fn get_processed_sibling_instruction(index: usize) -> Option { crate::program_stubs::sol_get_processed_sibling_instruction(index) } -// Stack height when processing transaction-level instructions -pub const TRANSACTION_LEVEL_STACK_HEIGHT: usize = 1; - /// Get the current stack height, transaction-level instructions are height /// TRANSACTION_LEVEL_STACK_HEIGHT, fist invoked inner instruction is height /// TRANSACTION_LEVEL_STACK_HEIGHT + 1, etc... pub fn get_stack_height() -> usize { #[cfg(target_os = "solana")] unsafe { - crate::syscalls::sol_get_stack_height() as usize + solana_instruction::syscalls::sol_get_stack_height() as usize } #[cfg(not(target_os = "solana"))] @@ -752,3 +121,12 @@ pub fn get_stack_height() -> usize { crate::program_stubs::sol_get_stack_height() as usize } } + +// TODO: remove this. +/// Addition that returns [`InstructionError::InsufficientFunds`] on overflow. +/// +/// This is an internal utility function. +#[doc(hidden)] +pub fn checked_add(a: u64, b: u64) -> Result { + a.checked_add(b).ok_or(InstructionError::InsufficientFunds) +} diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index f225561a52d389..9bf25ae7d0ac28 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -3,9 +3,19 @@ #![allow(clippy::arithmetic_side_effects)] #[cfg(feature = "borsh")] use borsh::io::Error as BorshIoError; +pub use solana_instruction::error::{ + ACCOUNT_ALREADY_INITIALIZED, ACCOUNT_BORROW_FAILED, ACCOUNT_DATA_TOO_SMALL, + ACCOUNT_NOT_RENT_EXEMPT, ARITHMETIC_OVERFLOW, BORSH_IO_ERROR, + BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS, CUSTOM_ZERO, ILLEGAL_OWNER, IMMUTABLE, + INCORRECT_AUTHORITY, INCORRECT_PROGRAM_ID, INSUFFICIENT_FUNDS, INVALID_ACCOUNT_DATA, + INVALID_ACCOUNT_DATA_REALLOC, INVALID_ACCOUNT_OWNER, INVALID_ARGUMENT, + INVALID_INSTRUCTION_DATA, INVALID_SEEDS, MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED, + MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED, MAX_SEED_LENGTH_EXCEEDED, MISSING_REQUIRED_SIGNATURES, + NOT_ENOUGH_ACCOUNT_KEYS, UNINITIALIZED_ACCOUNT, UNSUPPORTED_SYSVAR, +}; use { crate::{instruction::InstructionError, msg, pubkey::PubkeyError}, - num_traits::{FromPrimitive, ToPrimitive}, + num_traits::FromPrimitive, solana_decode_error::DecodeError, std::convert::TryFrom, thiserror::Error, @@ -125,46 +135,6 @@ impl PrintProgramError for ProgramError { } } -/// Builtin return values occupy the upper 32 bits -const BUILTIN_BIT_SHIFT: usize = 32; -macro_rules! to_builtin { - ($error:expr) => { - ($error as u64) << BUILTIN_BIT_SHIFT - }; -} - -pub const CUSTOM_ZERO: u64 = to_builtin!(1); -pub const INVALID_ARGUMENT: u64 = to_builtin!(2); -pub const INVALID_INSTRUCTION_DATA: u64 = to_builtin!(3); -pub const INVALID_ACCOUNT_DATA: u64 = to_builtin!(4); -pub const ACCOUNT_DATA_TOO_SMALL: u64 = to_builtin!(5); -pub const INSUFFICIENT_FUNDS: u64 = to_builtin!(6); -pub const INCORRECT_PROGRAM_ID: u64 = to_builtin!(7); -pub const MISSING_REQUIRED_SIGNATURES: u64 = to_builtin!(8); -pub const ACCOUNT_ALREADY_INITIALIZED: u64 = to_builtin!(9); -pub const UNINITIALIZED_ACCOUNT: u64 = to_builtin!(10); -pub const NOT_ENOUGH_ACCOUNT_KEYS: u64 = to_builtin!(11); -pub const ACCOUNT_BORROW_FAILED: u64 = to_builtin!(12); -pub const MAX_SEED_LENGTH_EXCEEDED: u64 = to_builtin!(13); -pub const INVALID_SEEDS: u64 = to_builtin!(14); -pub const BORSH_IO_ERROR: u64 = to_builtin!(15); -pub const ACCOUNT_NOT_RENT_EXEMPT: u64 = to_builtin!(16); -pub const UNSUPPORTED_SYSVAR: u64 = to_builtin!(17); -pub const ILLEGAL_OWNER: u64 = to_builtin!(18); -pub const MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED: u64 = to_builtin!(19); -pub const INVALID_ACCOUNT_DATA_REALLOC: u64 = to_builtin!(20); -pub const MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED: u64 = to_builtin!(21); -pub const BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS: u64 = to_builtin!(22); -pub const INVALID_ACCOUNT_OWNER: u64 = to_builtin!(23); -pub const ARITHMETIC_OVERFLOW: u64 = to_builtin!(24); -pub const IMMUTABLE: u64 = to_builtin!(25); -pub const INCORRECT_AUTHORITY: u64 = to_builtin!(26); -// Warning: Any new program errors added here must also be: -// - Added to the below conversions -// - Added as an equivalent to InstructionError -// - Be featureized in the BPF loader to return `InstructionError::InvalidError` -// until the feature is activated - impl From for u64 { fn from(error: ProgramError) -> Self { match error { @@ -288,53 +258,6 @@ impl TryFrom for ProgramError { } } -impl From for InstructionError -where - T: ToPrimitive, -{ - fn from(error: T) -> Self { - let error = error.to_u64().unwrap_or(0xbad_c0de); - match error { - CUSTOM_ZERO => Self::Custom(0), - INVALID_ARGUMENT => Self::InvalidArgument, - INVALID_INSTRUCTION_DATA => Self::InvalidInstructionData, - INVALID_ACCOUNT_DATA => Self::InvalidAccountData, - ACCOUNT_DATA_TOO_SMALL => Self::AccountDataTooSmall, - INSUFFICIENT_FUNDS => Self::InsufficientFunds, - INCORRECT_PROGRAM_ID => Self::IncorrectProgramId, - MISSING_REQUIRED_SIGNATURES => Self::MissingRequiredSignature, - ACCOUNT_ALREADY_INITIALIZED => Self::AccountAlreadyInitialized, - UNINITIALIZED_ACCOUNT => Self::UninitializedAccount, - NOT_ENOUGH_ACCOUNT_KEYS => Self::NotEnoughAccountKeys, - ACCOUNT_BORROW_FAILED => Self::AccountBorrowFailed, - MAX_SEED_LENGTH_EXCEEDED => Self::MaxSeedLengthExceeded, - INVALID_SEEDS => Self::InvalidSeeds, - BORSH_IO_ERROR => Self::BorshIoError("Unknown".to_string()), - ACCOUNT_NOT_RENT_EXEMPT => Self::AccountNotRentExempt, - UNSUPPORTED_SYSVAR => Self::UnsupportedSysvar, - ILLEGAL_OWNER => Self::IllegalOwner, - MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED => Self::MaxAccountsDataAllocationsExceeded, - INVALID_ACCOUNT_DATA_REALLOC => Self::InvalidRealloc, - MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED => Self::MaxInstructionTraceLengthExceeded, - BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS => { - Self::BuiltinProgramsMustConsumeComputeUnits - } - INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, - ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, - IMMUTABLE => Self::Immutable, - INCORRECT_AUTHORITY => Self::IncorrectAuthority, - _ => { - // A valid custom error has no bits set in the upper 32 - if error >> BUILTIN_BIT_SHIFT == 0 { - Self::Custom(error as u32) - } else { - Self::InvalidError - } - } - } - } -} - impl From for ProgramError { fn from(error: PubkeyError) -> Self { match error { diff --git a/sdk/program/src/program_stubs.rs b/sdk/program/src/program_stubs.rs index 77dabd37c3418c..140d235d3b6861 100644 --- a/sdk/program/src/program_stubs.rs +++ b/sdk/program/src/program_stubs.rs @@ -4,10 +4,11 @@ use { crate::{ - account_info::AccountInfo, entrypoint::ProgramResult, instruction::Instruction, - program_error::UNSUPPORTED_SYSVAR, pubkey::Pubkey, + account_info::AccountInfo, entrypoint::ProgramResult, program_error::UNSUPPORTED_SYSVAR, + pubkey::Pubkey, }, base64::{prelude::BASE64_STANDARD, Engine}, + solana_instruction::Instruction, solana_program_memory::stubs, std::sync::{Arc, RwLock}, }; diff --git a/sdk/program/src/syscalls/definitions.rs b/sdk/program/src/syscalls/definitions.rs index 3bf812f6470bbf..221a3a3c652edb 100644 --- a/sdk/program/src/syscalls/definitions.rs +++ b/sdk/program/src/syscalls/definitions.rs @@ -1,5 +1,9 @@ #[cfg(target_feature = "static-syscalls")] pub use solana_define_syscall::sys_hash; +#[deprecated(since = "2.1.0", note = "Use `solana_instruction::syscalls` instead")] +pub use solana_instruction::syscalls::{ + sol_get_processed_sibling_instruction, sol_get_stack_height, +}; #[deprecated(since = "2.1.0", note = "Use `solana_msg::sol_log` instead.")] pub use solana_msg::sol_log; #[deprecated( @@ -18,13 +22,7 @@ pub use solana_pubkey::syscalls::{ pub use solana_secp256k1_recover::sol_secp256k1_recover; #[deprecated(since = "2.1.0", note = "Use solana_sha256_hasher::sol_sha256 instead")] pub use solana_sha256_hasher::sol_sha256; -use { - crate::{ - instruction::{AccountMeta, ProcessedSiblingInstruction}, - pubkey::Pubkey, - }, - solana_define_syscall::define_syscall, -}; +use {crate::pubkey::Pubkey, solana_define_syscall::define_syscall}; define_syscall!(fn sol_log_64_(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64)); define_syscall!(fn sol_log_compute_units_()); define_syscall!(fn sol_keccak256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); @@ -34,8 +32,6 @@ define_syscall!(fn sol_invoke_signed_rust(instruction_addr: *const u8, account_i define_syscall!(fn sol_set_return_data(data: *const u8, length: u64)); define_syscall!(fn sol_get_return_data(data: *mut u8, length: u64, program_id: *mut Pubkey) -> u64); define_syscall!(fn sol_log_data(data: *const u8, data_len: u64)); -define_syscall!(fn sol_get_processed_sibling_instruction(index: u64, meta: *mut ProcessedSiblingInstruction, program_id: *mut Pubkey, data: *mut u8, accounts: *mut AccountMeta) -> u64); -define_syscall!(fn sol_get_stack_height() -> u64); define_syscall!(fn sol_curve_validate_point(curve_id: u64, point_addr: *const u8, result: *mut u8) -> u64); define_syscall!(fn sol_curve_group_op(curve_id: u64, group_op: u64, left_input_addr: *const u8, right_input_addr: *const u8, result_point_addr: *mut u8) -> u64); define_syscall!(fn sol_curve_multiscalar_mul(curve_id: u64, scalars_addr: *const u8, points_addr: *const u8, points_len: u64, result_point_addr: *mut u8) -> u64); diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index ec4ff6a161a007..18a8d1022708d6 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -200,7 +200,7 @@ pub struct Transaction { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "5mA54x7skHmXUoVfvwNSDrSo4F8kXJSrDrKrLMcUkAib") + frozen_abi(digest = "H7xQFcd1MtMv9QKZWGatBAXwhg28tpeX59P3s8ZZLAY4") )] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] pub struct Transaction { From 83453226852901f6f7bc0524419449c8fee29a2d Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Fri, 13 Sep 2024 18:42:11 +0200 Subject: [PATCH 352/529] resolve `cargo check` problems with tpu-client (#2913) resolve cargo check problems with tpu-client --- tpu-client/src/nonblocking/tpu_client.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tpu-client/src/nonblocking/tpu_client.rs b/tpu-client/src/nonblocking/tpu_client.rs index 8c21ea4f510fc0..1b63059a6b8c86 100644 --- a/tpu-client/src/nonblocking/tpu_client.rs +++ b/tpu-client/src/nonblocking/tpu_client.rs @@ -2,10 +2,7 @@ pub use crate::tpu_client::Result; use { crate::tpu_client::{RecentLeaderSlots, TpuClientConfig, MAX_FANOUT_SLOTS}, bincode::serialize, - futures_util::{ - future::{join_all, FutureExt}, - stream::StreamExt, - }, + futures_util::{future::join_all, stream::StreamExt}, log::*, solana_connection_cache::{ connection_cache::{ @@ -33,8 +30,6 @@ use { }, std::{ collections::{HashMap, HashSet}, - future::Future, - iter, net::SocketAddr, str::FromStr, sync::{ @@ -51,10 +46,12 @@ use { #[cfg(feature = "spinner")] use { crate::tpu_client::{SEND_TRANSACTION_INTERVAL, TRANSACTION_RESEND_INTERVAL}, + futures_util::FutureExt, indicatif::ProgressBar, solana_rpc_client::spinner::{self, SendTransactionProgress}, solana_rpc_client_api::request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, solana_sdk::{message::Message, signers::Signers, transaction::TransactionError}, + std::{future::Future, iter}, }; #[derive(Error, Debug)] @@ -308,6 +305,7 @@ where // // Useful for end-users who don't need a persistent connection to each validator, // and want to abort more quickly. +#[cfg(feature = "spinner")] async fn timeout_future>>( timeout_duration: Duration, future: Fut, @@ -333,6 +331,7 @@ async fn sleep_and_set_message( Ok(()) } +#[cfg(feature = "spinner")] async fn sleep_and_send_wire_transaction_to_addr( sleep_duration: Duration, connection_cache: &ConnectionCache, From d7de94c6c1003da15281b5c8268b93087d1bc7fb Mon Sep 17 00:00:00 2001 From: Iaroslav Mazur Date: Fri, 13 Sep 2024 19:57:20 +0300 Subject: [PATCH 353/529] docs: programs.md (#2908) * docs: programs.md Enhance the documentation of the Native Programs Problem Poor documentation and wrong grammar (sometimes) * chore: pr review * Update docs/src/runtime/programs.md * Update docs/src/runtime/programs.md * Update docs/src/runtime/programs.md --------- Co-authored-by: Jon C --- docs/src/runtime/programs.md | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/docs/src/runtime/programs.md b/docs/src/runtime/programs.md index 018169ee1c68f9..bcb43b1ad51070 100644 --- a/docs/src/runtime/programs.md +++ b/docs/src/runtime/programs.md @@ -13,9 +13,9 @@ change is needed, new instructions are added and previous ones are marked deprecated. Apps can upgrade on their own timeline without concern of breakages across upgrades. -For each native program the program id and description each supported -instruction is provided. A transaction can mix and match instructions from different -programs, as well include instructions from on-chain programs. +For each native program, the program id and the description of each supported +instruction are provided. A transaction can mix and match instructions from different +programs, as well as include instructions from on-chain programs. ## System Program @@ -27,15 +27,16 @@ transfer lamports from System Program owned accounts and pay transaction fees. ## Config Program -Add configuration data to the chain and the list of public keys that are permitted to modify it +Add configuration data to the chain, followed by the list of public keys that +are allowed to modify it - Program id: `Config1111111111111111111111111111111111111` - Instructions: [config_instruction](https://docs.rs/solana-config-program/VERSION_FOR_DOCS_RS/solana_config_program/config_instruction/index.html) Unlike the other programs, the Config program does not define any individual -instructions. It has just one implicit instruction, a "store" instruction. Its -instruction data is a set of keys that gate access to the account, and the -data to store in it. +instructions. It has just one implicit instruction: "store". Its +instruction data is a set of keys that gate access to the account and the +data to store inside of it. ## Stake Program @@ -64,21 +65,21 @@ Deploys, upgrades, and executes programs on the chain. - Program id: `BPFLoaderUpgradeab1e11111111111111111111111` - Instructions: [LoaderInstruction](https://docs.rs/solana-sdk/VERSION_FOR_DOCS_RS/solana_sdk/loader_upgradeable_instruction/enum.UpgradeableLoaderInstruction.html) -The BPF Upgradeable Loader marks itself as "owner" of the executable and +The BPF Upgradeable Loader marks itself as the "owner" of the executable and program-data accounts it creates to store your program. When a user invokes an -instruction via a program id, the Solana runtime will load both your the program +instruction via a program id, the Solana runtime loads both your program and its owner, the BPF Upgradeable Loader. The runtime then passes your program -to the BPF Upgradeable Loader to process the instruction. +to the BPF Upgradeable Loader for it to process the instruction. [More information about deployment](../cli/examples/deploy-a-program.md) ## Ed25519 Program -Verify ed25519 signature program. This program takes an ed25519 signature, public key, and message. +The program for verifying ed25519 signatures. It takes an ed25519 signature, a public key, and a message. Multiple signatures can be verified. If any of the signatures fail to verify, an error is returned. - Program id: `Ed25519SigVerify111111111111111111111111111` -- Instructions: [new_ed25519_instruction](https://github.com/solana-labs/solana/blob/master/sdk/src/ed25519_instruction.rs#L45) +- Instructions: [ed25519_instruction](https://docs.rs/solana-sdk/VERSION_FOR_DOCS_RS/solana_sdk/ed25519_instruction/index.html) The ed25519 program processes an instruction. The first `u8` is a count of the number of signatures to check, which is followed by a single byte padding. After that, the @@ -96,7 +97,7 @@ struct Ed25519SignatureOffsets { } ``` -Pseudo code of the operation: +The pseudo code of the signature verification: ``` process_instruction() { @@ -139,7 +140,7 @@ struct Secp256k1SignatureOffsets { } ``` -Pseudo code of the operation: +The pseudo code of the recovery verification: ``` process_instruction() { @@ -170,6 +171,6 @@ by the signature cost verify multiplier. ### Optimization notes The operation will have to take place after (at least partial) deserialization, -but all inputs come from the transaction data itself, this allows it to be +but all inputs come from the transaction data itself, which allows it to be relatively easy to execute in parallel to transaction processing and PoH verification. From fc0183d73477fc24ee4b7c46b80efdd44d4b9cf1 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Sat, 14 Sep 2024 01:41:16 -0700 Subject: [PATCH 354/529] Use quinn 0.11.x (#1641) 1. A incoming connection can be ignored now when we do rate limiting -- which is more efficient -- does not require queueing outgoing packets. 2. rustls interface changes of ServerCertVerifier applied to SkipServerVerification implementations changes in Cargo.toml to handle curve25519-dalek 3.2.1 patching because of zeroize version constraint due to the newer rustls. The workaround is applied to downstream tests. 3. Quinn 0.11.x introduced new error codes which we need to handle. 4. stream finish is no longer an async function. --- .../scripts/downstream-project-spl-common.sh | 4 + Cargo.lock | 214 ++++++++++++++---- Cargo.toml | 8 +- core/src/repair/quic_endpoint.rs | 116 ++++++---- programs/sbf/Cargo.lock | 209 +++++++++++++---- quic-client/Cargo.toml | 2 +- quic-client/src/nonblocking/quic_client.rs | 69 ++++-- scripts/build-downstream-anchor-projects.sh | 1 + streamer/Cargo.toml | 2 +- streamer/src/nonblocking/quic.rs | 81 ++++--- streamer/src/nonblocking/testing_utilities.rs | 65 ++++-- streamer/src/quic.rs | 89 ++++++-- streamer/src/tls_certificates.rs | 15 +- turbine/src/quic_endpoint.rs | 91 +++++--- 14 files changed, 708 insertions(+), 258 deletions(-) diff --git a/.github/scripts/downstream-project-spl-common.sh b/.github/scripts/downstream-project-spl-common.sh index e70c10d070b7e9..f5e0a7e448f729 100644 --- a/.github/scripts/downstream-project-spl-common.sh +++ b/.github/scripts/downstream-project-spl-common.sh @@ -30,3 +30,7 @@ sed -i 's/solana-geyser-plugin-interface/agave-geyser-plugin-interface/g' ./Carg # should be removed when spl bump their curve25519-dalek sed -i "s/^curve25519-dalek =.*/curve25519-dalek = \"4.1.3\"/" token/confidential-transfer/proof-generation/Cargo.toml + +# fix curve25519-dalek + +sed -i '/\[patch.crates-io\]/a curve25519-dalek = { git = "https://github.com/anza-xyz/curve25519-dalek.git", rev = "b500cdc2a920cd5bff9e2dd974d7b97349d61464" }' ./Cargo.toml diff --git a/Cargo.lock b/Cargo.lock index fb0f87b78b7640..0ce95129d77bea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -886,7 +886,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", "syn 2.0.77", ] @@ -1279,6 +1279,12 @@ dependencies = [ "libc", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -1470,6 +1476,16 @@ dependencies = [ "unreachable", ] +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1749,9 +1765,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", "digest 0.9.0", @@ -2043,7 +2059,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek 3.2.1", + "curve25519-dalek 3.2.0", "ed25519", "rand 0.7.3", "serde", @@ -2877,7 +2893,7 @@ dependencies = [ "futures-util", "http", "hyper", - "rustls", + "rustls 0.21.12", "tokio", "tokio-rustls", ] @@ -3087,6 +3103,26 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine 4.6.7", + "jni-sys", + "log", + "thiserror", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.24" @@ -3847,9 +3883,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -4466,16 +4502,17 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quinn" -version = "0.10.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash", - "rustls", + "rustc-hash 2.0.0", + "rustls 0.23.12", + "socket2 0.5.7", "thiserror", "tokio", "tracing", @@ -4483,16 +4520,16 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand 0.8.5", - "ring 0.16.20", - "rustc-hash", - "rustls", - "rustls-native-certs", + "ring 0.17.3", + "rustc-hash 2.0.0", + "rustls 0.23.12", + "rustls-platform-verifier", "slab", "thiserror", "tinyvec", @@ -4501,15 +4538,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.4.0" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ - "bytes", "libc", + "once_cell", "socket2 0.5.7", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -4800,7 +4837,7 @@ dependencies = [ "once_cell", "percent-encoding 2.3.1", "pin-project-lite", - "rustls", + "rustls 0.21.12", "rustls-pemfile 1.0.0", "serde", "serde_json", @@ -4916,6 +4953,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc_version" version = "0.3.3" @@ -4964,40 +5007,89 @@ checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.3", - "rustls-webpki", + "rustls-webpki 0.101.7", "sct", ] +[[package]] +name = "rustls" +version = "0.23.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +dependencies = [ + "once_cell", + "ring 0.17.3", + "rustls-pki-types", + "rustls-webpki 0.102.7", + "subtle", + "zeroize", +] + [[package]] name = "rustls-native-certs" -version = "0.6.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 0.2.1", + "rustls-pemfile 2.1.3", + "rustls-pki-types", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" -version = "0.2.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" dependencies = [ "base64 0.13.1", ] [[package]] name = "rustls-pemfile" -version = "1.0.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" + +[[package]] +name = "rustls-platform-verifier" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "034f511ae2823d6a657925d5f7cf821d7458367ce0a54fbe743c471fdddd8111" +dependencies = [ + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls 0.23.12", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki 0.102.7", + "security-framework", + "security-framework-sys", + "webpki-roots 0.26.5", + "winapi 0.3.9", ] +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -5008,6 +5100,17 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustls-webpki" +version = "0.102.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +dependencies = [ + "ring 0.17.3", + "rustls-pki-types", + "untrusted 0.9.0", +] + [[package]] name = "rustversion" version = "1.0.17" @@ -5081,22 +5184,23 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.4.2" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" +checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" dependencies = [ "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", + "num-bigint 0.4.6", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.4.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -5442,9 +5546,12 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.5" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] [[package]] name = "smallvec" @@ -6250,7 +6357,7 @@ dependencies = [ "rayon", "rolling-file", "rustc_version 0.4.1", - "rustls", + "rustls 0.23.12", "serde", "serde_bytes", "serde_derive", @@ -7298,7 +7405,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls", + "rustls 0.23.12", "solana-connection-cache", "solana-logger", "solana-measure", @@ -7902,7 +8009,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls", + "rustls 0.23.12", "smallvec", "socket2 0.5.7", "solana-logger", @@ -8249,7 +8356,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls", + "rustls 0.23.12", "solana-entry", "solana-feature-set", "solana-gossip", @@ -8546,7 +8653,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c1941b5ef0c3ce8f2ac5dd984d0fb1a97423c4ff2a02eec81e3913f02e2ac2b" dependencies = [ "byteorder", - "combine", + "combine 3.8.1", "gdbstub", "hash32", "libc", @@ -9194,7 +9301,7 @@ dependencies = [ "once_cell", "pbkdf2 0.4.0", "rand 0.7.3", - "rustc-hash", + "rustc-hash 1.1.0", "sha2 0.9.9", "thiserror", "unicode-normalization", @@ -9282,7 +9389,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.12", "tokio", ] @@ -9321,7 +9428,7 @@ checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", - "rustls", + "rustls 0.21.12", "tokio", "tokio-rustls", "tungstenite", @@ -9568,7 +9675,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "rustls", + "rustls 0.21.12", "sha1", "thiserror", "url 2.5.2", @@ -9878,7 +9985,7 @@ version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" dependencies = [ - "rustls-webpki", + "rustls-webpki 0.101.7", ] [[package]] @@ -9887,6 +9994,15 @@ version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +[[package]] +name = "webpki-roots" +version = "0.26.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "which" version = "4.2.2" @@ -10215,9 +10331,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.3.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 2b8cbd137f4e12..6b4eac9eff9fe8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -325,8 +325,8 @@ prost-types = "0.11.9" protobuf-src = "1.1.0" qstring = "0.7.2" qualifier_attr = { version = "0.2.2", default-features = false } -quinn = "0.10.2" -quinn-proto = "0.10.6" +quinn = "0.11.4" +quinn-proto = "0.11.7" quote = "1.0" rand = "0.8.5" rand_chacha = "0.3.1" @@ -338,7 +338,7 @@ reqwest-middleware = "0.2.5" rolling-file = "0.2.0" rpassword = "7.3" rustc_version = "0.4" -rustls = { version = "0.21.12", default-features = false, features = ["quic"] } +rustls = { version = "0.23.9", default-features = false } scopeguard = "1.2.0" semver = "1.0.23" seqlock = "0.2.0" @@ -522,7 +522,7 @@ winapi = "0.3.8" winreg = "0.50" x509-parser = "0.14.0" # See "zeroize versioning issues" below if you are updating this version. -zeroize = { version = "1.3", default-features = false } +zeroize = { version = "1.7", default-features = false } zstd = "0.13.2" [patch.crates-io] diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index abaf6d03483f10..5f4fea3f637e73 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -5,11 +5,15 @@ use { itertools::Itertools, log::error, quinn::{ - ClientConfig, ConnectError, Connecting, Connection, ConnectionError, Endpoint, - EndpointConfig, IdleTimeout, ReadError, ReadToEndError, RecvStream, SendStream, + crypto::rustls::{QuicClientConfig, QuicServerConfig}, + ClientConfig, ClosedStream, ConnectError, Connecting, Connection, ConnectionError, + Endpoint, EndpointConfig, IdleTimeout, ReadError, ReadToEndError, RecvStream, SendStream, ServerConfig, TokioRuntime, TransportConfig, VarInt, WriteError, }, - rustls::{Certificate, KeyLogFile, PrivateKey}, + rustls::{ + pki_types::{CertificateDer, PrivateKeyDer}, + CertificateError, KeyLogFile, + }, serde_bytes::ByteBuf, solana_quic_client::nonblocking::quic_client::SkipServerVerification, solana_runtime::bank_forks::BankForks, @@ -105,6 +109,8 @@ pub(crate) enum Error { TlsError(#[from] rustls::Error), #[error(transparent)] WriteError(#[from] WriteError), + #[error(transparent)] + ClosedStream(#[from] ClosedStream), } macro_rules! add_metric { @@ -122,7 +128,7 @@ pub(crate) fn new_quic_endpoint( bank_forks: Arc>, ) -> Result<(Endpoint, AsyncSender, AsyncTryJoinHandle), Error> { let (cert, key) = new_dummy_x509_certificate(keypair); - let server_config = new_server_config(cert.clone(), key.clone())?; + let server_config = new_server_config(cert.clone(), key.clone_key())?; let client_config = new_client_config(cert, key)?; let mut endpoint = { // Endpoint::new requires entering the runtime context, @@ -168,29 +174,36 @@ pub(crate) fn close_quic_endpoint(endpoint: &Endpoint) { ); } -fn new_server_config(cert: Certificate, key: PrivateKey) -> Result { +fn new_server_config( + cert: CertificateDer<'static>, + key: PrivateKeyDer<'static>, +) -> Result { let mut config = rustls::ServerConfig::builder() - .with_safe_defaults() - .with_client_cert_verifier(Arc::new(SkipClientVerification {})) + .with_client_cert_verifier(SkipClientVerification::new()) .with_single_cert(vec![cert], key)?; config.alpn_protocols = vec![ALPN_REPAIR_PROTOCOL_ID.to_vec()]; config.key_log = Arc::new(KeyLogFile::new()); - let mut config = ServerConfig::with_crypto(Arc::new(config)); + let quic_server_config = QuicServerConfig::try_from(config) + .map_err(|_err| rustls::Error::InvalidCertificate(CertificateError::BadSignature))?; + + let mut config = ServerConfig::with_crypto(Arc::new(quic_server_config)); config .transport_config(Arc::new(new_transport_config())) - .use_retry(true) .migration(false); Ok(config) } -fn new_client_config(cert: Certificate, key: PrivateKey) -> Result { +fn new_client_config( + cert: CertificateDer<'static>, + key: PrivateKeyDer<'static>, +) -> Result { let mut config = rustls::ClientConfig::builder() - .with_safe_defaults() - .with_custom_certificate_verifier(Arc::new(SkipServerVerification {})) + .dangerous() + .with_custom_certificate_verifier(SkipServerVerification::new()) .with_client_auth_cert(vec![cert], key)?; config.enable_early_data = true; config.alpn_protocols = vec![ALPN_REPAIR_PROTOCOL_ID.to_vec()]; - let mut config = ClientConfig::new(Arc::new(config)); + let mut config = ClientConfig::new(Arc::new(QuicClientConfig::try_from(config).unwrap())); config.transport_config(Arc::new(new_transport_config())); Ok(config) } @@ -219,17 +232,26 @@ async fn run_server( let stats = Arc::::default(); let report_metrics_task = tokio::task::spawn(report_metrics_task("repair_quic_server", stats.clone())); - while let Some(connecting) = endpoint.accept().await { - tokio::task::spawn(handle_connecting_task( - endpoint.clone(), - connecting, - remote_request_sender.clone(), - bank_forks.clone(), - prune_cache_pending.clone(), - router.clone(), - cache.clone(), - stats.clone(), - )); + while let Some(incoming) = endpoint.accept().await { + let remote_addr: SocketAddr = incoming.remote_address(); + let connecting = incoming.accept(); + match connecting { + Ok(connecting) => { + tokio::task::spawn(handle_connecting_task( + endpoint.clone(), + connecting, + remote_request_sender.clone(), + bank_forks.clone(), + prune_cache_pending.clone(), + router.clone(), + cache.clone(), + stats.clone(), + )); + } + Err(error) => { + debug!("Error while accepting incoming connection: {error:?} from {remote_addr}"); + } + } } report_metrics_task.abort(); } @@ -504,7 +526,7 @@ async fn handle_streams( send_stream.write_all(&size.to_le_bytes()).await?; send_stream.write_all(&chunk).await?; } - send_stream.finish().await.map_err(Error::from) + send_stream.finish().map_err(Error::from) } async fn send_requests_task( @@ -565,7 +587,7 @@ async fn send_request( const READ_TIMEOUT_DURATION: Duration = Duration::from_secs(10); let (mut send_stream, mut recv_stream) = connection.open_bi().await?; send_stream.write_all(&bytes).await?; - send_stream.finish().await?; + send_stream.finish()?; // Each response is at most PACKET_DATA_SIZE bytes and requires // an additional 8 bytes to encode its length. let size = PACKET_DATA_SIZE @@ -778,6 +800,7 @@ struct RepairQuicStats { connection_error_timed_out: AtomicU64, connection_error_transport_error: AtomicU64, connection_error_version_mismatch: AtomicU64, + connection_error_connection_limit_exceeded: AtomicU64, invalid_identity: AtomicU64, no_response_received: AtomicU64, read_to_end_error_connection_lost: AtomicU64, @@ -792,6 +815,12 @@ struct RepairQuicStats { write_error_stopped: AtomicU64, write_error_unknown_stream: AtomicU64, write_error_zero_rtt_rejected: AtomicU64, + connect_error_cids_exhausted: AtomicU64, + connect_error_invalid_server_name: AtomicU64, + connection_error_cids_exhausted: AtomicU64, + closed_streams: AtomicU64, + read_to_end_error_closed_stream: AtomicU64, + write_error_closed_stream: AtomicU64, } async fn report_metrics_task(name: &'static str, stats: Arc) { @@ -808,12 +837,6 @@ fn record_error(err: &Error, stats: &RepairQuicStats) { Error::ConnectError(ConnectError::EndpointStopping) => { add_metric!(stats.connect_error_other) } - Error::ConnectError(ConnectError::TooManyConnections) => { - add_metric!(stats.connect_error_too_many_connections) - } - Error::ConnectError(ConnectError::InvalidDnsName(_)) => { - add_metric!(stats.connect_error_other) - } Error::ConnectError(ConnectError::InvalidRemoteAddress(_)) => { add_metric!(stats.connect_error_invalid_remote_address) } @@ -851,9 +874,6 @@ fn record_error(err: &Error, stats: &RepairQuicStats) { Error::ReadToEndError(ReadToEndError::Read(ReadError::ConnectionLost(_))) => { add_metric!(stats.read_to_end_error_connection_lost) } - Error::ReadToEndError(ReadToEndError::Read(ReadError::UnknownStream)) => { - add_metric!(stats.read_to_end_error_unknown_stream) - } Error::ReadToEndError(ReadToEndError::Read(ReadError::IllegalOrderedRead)) => { add_metric!(stats.read_to_end_error_illegal_ordered_read) } @@ -869,12 +889,27 @@ fn record_error(err: &Error, stats: &RepairQuicStats) { Error::WriteError(WriteError::ConnectionLost(_)) => { add_metric!(stats.write_error_connection_lost) } - Error::WriteError(WriteError::UnknownStream) => { - add_metric!(stats.write_error_unknown_stream) - } Error::WriteError(WriteError::ZeroRttRejected) => { add_metric!(stats.write_error_zero_rtt_rejected) } + Error::ConnectError(ConnectError::CidsExhausted) => { + add_metric!(stats.connect_error_cids_exhausted) + } + Error::ConnectError(ConnectError::InvalidServerName(_)) => { + add_metric!(stats.connect_error_invalid_server_name) + } + Error::ConnectionError(ConnectionError::CidsExhausted) => { + add_metric!(stats.connection_error_cids_exhausted) + } + Error::ClosedStream(_) => { + add_metric!(stats.closed_streams) + } + Error::ReadToEndError(ReadToEndError::Read(ReadError::ClosedStream)) => { + add_metric!(stats.read_to_end_error_closed_stream) + } + Error::WriteError(WriteError::ClosedStream) => { + add_metric!(stats.write_error_closed_stream) + } } } @@ -936,6 +971,11 @@ fn report_metrics(name: &'static str, stats: &RepairQuicStats) { reset_metric!(stats.connection_error_version_mismatch), i64 ), + ( + "connection_error_connection_limit_exceeded", + reset_metric!(stats.connection_error_connection_limit_exceeded), + i64 + ), ( "invalid_identity", reset_metric!(stats.invalid_identity), diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index cbe090c6449fe3..c275a7fe049f48 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -662,7 +662,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", "syn 2.0.58", ] @@ -927,6 +927,12 @@ dependencies = [ "libc", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -1027,6 +1033,16 @@ dependencies = [ "unreachable", ] +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1207,9 +1223,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder 1.5.0", "digest 0.9.0", @@ -1484,7 +1500,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek 3.2.1", + "curve25519-dalek 3.2.0", "ed25519", "rand 0.7.3", "serde", @@ -2209,7 +2225,7 @@ dependencies = [ "futures-util", "http", "hyper", - "rustls", + "rustls 0.21.12", "tokio", "tokio-rustls", ] @@ -2408,6 +2424,26 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine 4.6.7", + "jni-sys", + "log", + "thiserror", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.21" @@ -3208,9 +3244,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "opaque-debug" @@ -3739,16 +3775,17 @@ dependencies = [ [[package]] name = "quinn" -version = "0.10.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash", - "rustls", + "rustc-hash 2.0.0", + "rustls 0.23.12", + "socket2 0.5.7", "thiserror", "tokio", "tracing", @@ -3756,16 +3793,16 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand 0.8.5", - "ring 0.16.20", - "rustc-hash", - "rustls", - "rustls-native-certs", + "ring 0.17.3", + "rustc-hash 2.0.0", + "rustls 0.23.12", + "rustls-platform-verifier", "slab", "thiserror", "tinyvec", @@ -3774,15 +3811,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.4.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" dependencies = [ - "bytes", "libc", + "once_cell", "socket2 0.5.7", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -4008,8 +4045,8 @@ dependencies = [ "once_cell", "percent-encoding 2.3.1", "pin-project-lite", - "rustls", - "rustls-pemfile", + "rustls 0.21.12", + "rustls-pemfile 1.0.0", "serde", "serde_json", "serde_urlencoded", @@ -4124,6 +4161,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc_version" version = "0.4.0" @@ -4163,18 +4206,33 @@ checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.3", - "rustls-webpki", + "rustls-webpki 0.101.7", "sct", ] +[[package]] +name = "rustls" +version = "0.23.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +dependencies = [ + "once_cell", + "ring 0.17.3", + "rustls-pki-types", + "rustls-webpki 0.102.7", + "subtle", + "zeroize", +] + [[package]] name = "rustls-native-certs" -version = "0.6.3" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 2.1.3", + "rustls-pki-types", "schannel", "security-framework", ] @@ -4188,6 +4246,49 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "rustls-pemfile" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +dependencies = [ + "base64 0.22.1", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" + +[[package]] +name = "rustls-platform-verifier" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afbb878bdfdf63a336a5e63561b1835e7a8c91524f51621db870169eac84b490" +dependencies = [ + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls 0.23.12", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki 0.102.7", + "security-framework", + "security-framework-sys", + "webpki-roots 0.26.5", + "winapi 0.3.9", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -4198,6 +4299,17 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustls-webpki" +version = "0.102.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +dependencies = [ + "ring 0.17.3", + "rustls-pki-types", + "untrusted 0.9.0", +] + [[package]] name = "rustversion" version = "1.0.17" @@ -4260,6 +4372,7 @@ dependencies = [ "core-foundation", "core-foundation-sys", "libc", + "num-bigint 0.4.6", "security-framework-sys", ] @@ -4504,9 +4617,12 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.2" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] [[package]] name = "smallvec" @@ -4959,7 +5075,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls", + "rustls 0.23.12", "serde", "serde_bytes", "serde_derive", @@ -5661,7 +5777,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls", + "rustls 0.23.12", "solana-connection-cache", "solana-measure", "solana-metrics", @@ -6603,7 +6719,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls", + "rustls 0.23.12", "smallvec", "socket2 0.5.7", "solana-measure", @@ -6819,7 +6935,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls", + "rustls 0.23.12", "solana-entry", "solana-feature-set", "solana-gossip", @@ -7047,7 +7163,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c1941b5ef0c3ce8f2ac5dd984d0fb1a97423c4ff2a02eec81e3913f02e2ac2b" dependencies = [ "byteorder 1.5.0", - "combine", + "combine 3.8.1", "hash32", "libc", "log", @@ -7630,7 +7746,7 @@ dependencies = [ "once_cell", "pbkdf2 0.4.0", "rand 0.7.3", - "rustc-hash", + "rustc-hash 1.1.0", "sha2 0.9.9", "thiserror", "unicode-normalization", @@ -7710,7 +7826,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.12", "tokio", ] @@ -7749,7 +7865,7 @@ checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", - "rustls", + "rustls 0.21.12", "tokio", "tokio-rustls", "tungstenite", @@ -7833,7 +7949,7 @@ dependencies = [ "percent-encoding 2.3.1", "pin-project", "prost", - "rustls-pemfile", + "rustls-pemfile 1.0.0", "tokio", "tokio-rustls", "tokio-stream", @@ -7971,7 +8087,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "rustls", + "rustls 0.21.12", "sha1", "thiserror", "url 2.5.2", @@ -8260,7 +8376,7 @@ version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" dependencies = [ - "rustls-webpki", + "rustls-webpki 0.101.7", ] [[package]] @@ -8269,6 +8385,15 @@ version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +[[package]] +name = "webpki-roots" +version = "0.26.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "which" version = "4.2.5" @@ -8525,9 +8650,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.3.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] diff --git a/quic-client/Cargo.toml b/quic-client/Cargo.toml index 05800896c30f80..428b03cb0a6892 100644 --- a/quic-client/Cargo.toml +++ b/quic-client/Cargo.toml @@ -18,7 +18,7 @@ lazy_static = { workspace = true } log = { workspace = true } quinn = { workspace = true } quinn-proto = { workspace = true } -rustls = { workspace = true, features = ["dangerous_configuration"] } +rustls = { workspace = true } solana-connection-cache = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index b40e89ec216c9d..c6a66fe56bcd10 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -8,8 +8,9 @@ use { itertools::Itertools, log::*, quinn::{ - ClientConfig, ConnectError, Connection, ConnectionError, Endpoint, EndpointConfig, - IdleTimeout, TokioRuntime, TransportConfig, WriteError, + crypto::rustls::QuicClientConfig, ClientConfig, ClosedStream, ConnectError, Connection, + ConnectionError, Endpoint, EndpointConfig, IdleTimeout, TokioRuntime, TransportConfig, + WriteError, }, solana_connection_cache::{ client_connection::ClientStats, connection_cache_stats::ConnectionCacheStats, @@ -38,31 +39,63 @@ use { tokio::{sync::OnceCell, time::timeout}, }; -pub struct SkipServerVerification; +#[derive(Debug)] +pub struct SkipServerVerification(Arc); impl SkipServerVerification { pub fn new() -> Arc { - Arc::new(Self) + Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider()))) } } -impl rustls::client::ServerCertVerifier for SkipServerVerification { +impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &rustls::pki_types::CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + rustls::crypto::verify_tls12_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &rustls::pki_types::CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + rustls::crypto::verify_tls13_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn supported_verify_schemes(&self) -> Vec { + self.0.signature_verification_algorithms.supported_schemes() + } + fn verify_server_cert( &self, - _end_entity: &rustls::Certificate, - _intermediates: &[rustls::Certificate], - _server_name: &rustls::ServerName, - _scts: &mut dyn Iterator, + _end_entity: &rustls::pki_types::CertificateDer<'_>, + _intermediates: &[rustls::pki_types::CertificateDer<'_>], + _server_name: &rustls::pki_types::ServerName<'_>, _ocsp_response: &[u8], - _now: std::time::SystemTime, - ) -> Result { - Ok(rustls::client::ServerCertVerified::assertion()) + _now: rustls::pki_types::UnixTime, + ) -> Result { + Ok(rustls::client::danger::ServerCertVerified::assertion()) } } pub struct QuicClientCertificate { - pub certificate: rustls::Certificate, - pub key: rustls::PrivateKey, + pub certificate: rustls::pki_types::CertificateDer<'static>, + pub key: rustls::pki_types::PrivateKeyDer<'static>, } /// A lazy-initialized Quic Endpoint @@ -80,6 +113,8 @@ pub enum QuicError { ConnectionError(#[from] ConnectionError), #[error(transparent)] ConnectError(#[from] ConnectError), + #[error(transparent)] + ClosedStream(#[from] ClosedStream), } impl From for ClientErrorKind { @@ -115,17 +150,17 @@ impl QuicLazyInitializedEndpoint { }; let mut crypto = rustls::ClientConfig::builder() - .with_safe_defaults() + .dangerous() .with_custom_certificate_verifier(SkipServerVerification::new()) .with_client_auth_cert( vec![self.client_certificate.certificate.clone()], - self.client_certificate.key.clone(), + self.client_certificate.key.clone_key(), ) .expect("Failed to set QUIC client certificates"); crypto.enable_early_data = true; crypto.alpn_protocols = vec![ALPN_TPU_PROTOCOL_ID.to_vec()]; - let mut config = ClientConfig::new(Arc::new(crypto)); + let mut config = ClientConfig::new(Arc::new(QuicClientConfig::try_from(crypto).unwrap())); let mut transport_config = TransportConfig::default(); let timeout = IdleTimeout::try_from(QUIC_MAX_TIMEOUT).unwrap(); diff --git a/scripts/build-downstream-anchor-projects.sh b/scripts/build-downstream-anchor-projects.sh index e282a446c7c79a..fe2f5e813ee613 100755 --- a/scripts/build-downstream-anchor-projects.sh +++ b/scripts/build-downstream-anchor-projects.sh @@ -69,6 +69,7 @@ anchor() { patch_crates_io_solana Cargo.toml "$solana_dir" patch_spl_crates . Cargo.toml "$spl_dir" + sed -i '/\[patch.crates-io\]/a curve25519-dalek = { git = "https://github.com/anza-xyz/curve25519-dalek.git", rev = "b500cdc2a920cd5bff9e2dd974d7b97349d61464" }' ./Cargo.toml $cargo test # serum_dex and mpl-token-metadata are using caret versions of solana and SPL dependencies # rather pull and patch those as well, ignore for now diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 89ce80c910c80f..873cb4459e3327 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -28,7 +28,7 @@ percentage = { workspace = true } quinn = { workspace = true } quinn-proto = { workspace = true } rand = { workspace = true } -rustls = { workspace = true, features = ["dangerous_configuration"] } +rustls = { workspace = true } smallvec = { workspace = true } socket2 = { workspace = true } solana-measure = { workspace = true } diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index f1b0a5a7efd5ed..760902a6aab06b 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -198,7 +198,7 @@ pub fn spawn_server_multi( let concurrent_connections = (max_staked_connections + max_unstaked_connections) / sockets.len(); let max_concurrent_connections = concurrent_connections + concurrent_connections / 4; - let (config, _cert) = configure_server(keypair, max_concurrent_connections)?; + let (config, _) = configure_server(keypair)?; let endpoints = sockets .into_iter() @@ -239,7 +239,7 @@ pub fn spawn_server_multi( #[allow(clippy::too_many_arguments)] async fn run_server( name: &'static str, - incoming: Vec, + endpoints: Vec, packet_sender: Sender, exit: Arc, max_connections_per_peer: usize, @@ -268,7 +268,7 @@ async fn run_server( )); stats .quic_endpoints_count - .store(incoming.len(), Ordering::Relaxed); + .store(endpoints.len(), Ordering::Relaxed); let staked_connection_table: Arc> = Arc::new(Mutex::new(ConnectionTable::new())); let (sender, receiver) = async_unbounded(); @@ -280,7 +280,7 @@ async fn run_server( coalesce, )); - let mut accepts = incoming + let mut accepts = endpoints .iter() .enumerate() .map(|(i, incoming)| { @@ -296,7 +296,7 @@ async fn run_server( if let Some((connecting, i)) = ready { accepts.push( Box::pin(EndpointAccept { - accept: incoming[i].accept(), + accept: endpoints[i].accept(), endpoint: i, } )); @@ -316,11 +316,11 @@ async fn run_server( last_datapoint = Instant::now(); } - if let Ok(Some(connection)) = timeout_connection { + if let Ok(Some(incoming)) = timeout_connection { stats .total_incoming_connection_attempts .fetch_add(1, Ordering::Relaxed); - let remote_address = connection.remote_address(); + let remote_address = incoming.remote_address(); // first check overall connection rate limit: if !overall_connection_rate_limiter.is_allowed() { @@ -331,6 +331,7 @@ async fn run_server( stats .connection_rate_limited_across_all .fetch_add(1, Ordering::Relaxed); + incoming.ignore(); continue; } @@ -349,26 +350,35 @@ async fn run_server( stats .connection_rate_limited_per_ipaddr .fetch_add(1, Ordering::Relaxed); + incoming.ignore(); continue; } stats .outstanding_incoming_connection_attempts .fetch_add(1, Ordering::Relaxed); - tokio::spawn(setup_connection( - connection, - unstaked_connection_table.clone(), - staked_connection_table.clone(), - sender.clone(), - max_connections_per_peer, - staked_nodes.clone(), - max_staked_connections, - max_unstaked_connections, - max_streams_per_ms, - stats.clone(), - wait_for_chunk_timeout, - stream_load_ema.clone(), - )); + let connecting = incoming.accept(); + match connecting { + Ok(connecting) => { + tokio::spawn(setup_connection( + connecting, + unstaked_connection_table.clone(), + staked_connection_table.clone(), + sender.clone(), + max_connections_per_peer, + staked_nodes.clone(), + max_staked_connections, + max_unstaked_connections, + max_streams_per_ms, + stats.clone(), + wait_for_chunk_timeout, + stream_load_ema.clone(), + )); + } + Err(err) => { + debug!("Incoming::accept(): error {:?}", err); + } + } } else { debug!("accept(): Timed out waiting for connection"); } @@ -394,7 +404,7 @@ pub fn get_remote_pubkey(connection: &Connection) -> Option { // Use the client cert only if it is self signed and the chain length is 1. connection .peer_identity()? - .downcast::>() + .downcast::>() .ok() .filter(|certs| certs.len() == 1)? .first() @@ -1263,7 +1273,7 @@ impl Drop for ConnectionEntry { } } -#[derive(Copy, Clone, Eq, Hash, PartialEq)] +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] enum ConnectionTableKey { IP(IpAddr), Pubkey(Pubkey), @@ -1422,7 +1432,7 @@ struct EndpointAccept<'a> { } impl<'a> Future for EndpointAccept<'a> { - type Output = (Option, usize); + type Output = (Option, usize); fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll { let i = self.endpoint; @@ -1463,7 +1473,7 @@ pub mod test { for i in 0..total { let mut s1 = conn1.open_uni().await.unwrap(); s1.write_all(&[0u8]).await.unwrap(); - s1.finish().await.unwrap(); + s1.finish().unwrap(); info!("done {}", i); sleep(Duration::from_millis(1000)).await; } @@ -1488,7 +1498,7 @@ pub mod test { let s2 = conn2.open_uni().await; if let Ok(mut s2) = s2 { s1.write_all(&[0u8]).await.unwrap(); - s1.finish().await.unwrap(); + s1.finish().unwrap(); // Send enough data to create more than 1 chunks. // The first will try to open the connection (which should fail). // The following chunks will enable the detection of connection failure. @@ -1496,9 +1506,6 @@ pub mod test { s2.write_all(&data) .await .expect_err("shouldn't be able to open 2 connections"); - s2.finish() - .await - .expect_err("shouldn't be able to open 2 connections"); } else { // It has been noticed if there is already connection open against the server, this open_uni can fail // with ApplicationClosed(ApplicationClose) error due to CONNECTION_CLOSE_CODE_TOO_MANY before writing to @@ -1521,9 +1528,9 @@ pub mod test { let mut s1 = c1.open_uni().await.unwrap(); let mut s2 = c2.open_uni().await.unwrap(); s1.write_all(&[0u8]).await.unwrap(); - s1.finish().await.unwrap(); + s1.finish().unwrap(); s2.write_all(&[0u8]).await.unwrap(); - s2.finish().await.unwrap(); + s2.finish().unwrap(); num_expected_packets += 2; sleep(Duration::from_millis(200)).await; } @@ -1563,7 +1570,7 @@ pub mod test { for _ in 0..num_bytes { s1.write_all(&[0u8]).await.unwrap(); } - s1.finish().await.unwrap(); + s1.finish().unwrap(); let mut all_packets = vec![]; let now = Instant::now(); @@ -1598,7 +1605,8 @@ pub mod test { // Ignoring any errors here. s1.finish() will test the error condition s1.write_all(&[0u8]).await.unwrap_or_default(); } - s1.finish().await.unwrap_err(); + s1.finish().unwrap_or_default(); + s1.stopped().await.unwrap_err(); } } @@ -1712,7 +1720,6 @@ pub mod test { // Test that more writes to the stream will fail (i.e. the stream is no longer writable // after the timeouts) assert!(s1.write_all(&[0u8]).await.is_err()); - assert!(s1.finish().await.is_err()); exit.store(true, Ordering::Relaxed); join_handle.await.unwrap(); @@ -1775,7 +1782,7 @@ pub mod test { let mut s1 = conn1.open_uni().await.unwrap(); s1.write_all(&[0u8]).await.unwrap(); - s1.finish().await.unwrap(); + s1.finish().unwrap(); let mut s2 = conn2.open_uni().await.unwrap(); conn1.close( @@ -1789,7 +1796,7 @@ pub mod test { assert_eq!(stats.connection_removed.load(Ordering::Relaxed), 1); s2.write_all(&[0u8]).await.unwrap(); - s2.finish().await.unwrap(); + s2.finish().unwrap(); conn2.close( CONNECTION_CLOSE_CODE_DROPPED_ENTRY.into(), @@ -2308,7 +2315,7 @@ pub mod test { let mut send_stream = client_connection.open_uni().await.unwrap(); let data = format!("{i}").into_bytes(); send_stream.write_all(&data).await.unwrap(); - send_stream.finish().await.unwrap(); + send_stream.finish().unwrap(); } let elapsed_sending: f64 = start_time.elapsed().as_secs_f64(); info!("Elapsed sending: {elapsed_sending}"); diff --git a/streamer/src/nonblocking/testing_utilities.rs b/streamer/src/nonblocking/testing_utilities.rs index d0a1fa98d6d182..4a63458e7c6d74 100644 --- a/streamer/src/nonblocking/testing_utilities.rs +++ b/streamer/src/nonblocking/testing_utilities.rs @@ -10,7 +10,10 @@ use { tls_certificates::new_dummy_x509_certificate, }, crossbeam_channel::unbounded, - quinn::{ClientConfig, Connection, EndpointConfig, IdleTimeout, TokioRuntime, TransportConfig}, + quinn::{ + crypto::rustls::QuicClientConfig, ClientConfig, Connection, EndpointConfig, IdleTimeout, + TokioRuntime, TransportConfig, + }, solana_perf::packet::PacketBatch, solana_sdk::{ net::DEFAULT_TPU_COALESCE, @@ -25,25 +28,57 @@ use { tokio::task::JoinHandle, }; -struct SkipServerVerification; +#[derive(Debug)] +pub struct SkipServerVerification(Arc); impl SkipServerVerification { - fn new() -> Arc { - Arc::new(Self) + pub fn new() -> Arc { + Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider()))) } } -impl rustls::client::ServerCertVerifier for SkipServerVerification { +impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &rustls::pki_types::CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + rustls::crypto::verify_tls12_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &rustls::pki_types::CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + rustls::crypto::verify_tls13_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn supported_verify_schemes(&self) -> Vec { + self.0.signature_verification_algorithms.supported_schemes() + } + fn verify_server_cert( &self, - _end_entity: &rustls::Certificate, - _intermediates: &[rustls::Certificate], - _server_name: &rustls::ServerName, - _scts: &mut dyn Iterator, + _end_entity: &rustls::pki_types::CertificateDer<'_>, + _intermediates: &[rustls::pki_types::CertificateDer<'_>], + _server_name: &rustls::pki_types::ServerName<'_>, _ocsp_response: &[u8], - _now: std::time::SystemTime, - ) -> Result { - Ok(rustls::client::ServerCertVerified::assertion()) + _now: rustls::pki_types::UnixTime, + ) -> Result { + Ok(rustls::client::danger::ServerCertVerified::assertion()) } } @@ -51,15 +86,15 @@ pub fn get_client_config(keypair: &Keypair) -> ClientConfig { let (cert, key) = new_dummy_x509_certificate(keypair); let mut crypto = rustls::ClientConfig::builder() - .with_safe_defaults() + .dangerous() .with_custom_certificate_verifier(SkipServerVerification::new()) .with_client_auth_cert(vec![cert], key) - .expect("Provided key should be correctly set."); + .expect("Failed to use client certificate"); crypto.enable_early_data = true; crypto.alpn_protocols = vec![ALPN_TPU_PROTOCOL_ID.to_vec()]; - let mut config = ClientConfig::new(Arc::new(crypto)); + let mut config = ClientConfig::new(Arc::new(QuicClientConfig::try_from(crypto).unwrap())); let mut transport_config = TransportConfig::default(); let timeout = IdleTimeout::try_from(QUIC_MAX_TIMEOUT).unwrap(); diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index a3c294236ae773..e9ca06a10bb133 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -5,8 +5,15 @@ use { }, crossbeam_channel::Sender, pem::Pem, - quinn::{Endpoint, IdleTimeout, ServerConfig}, - rustls::{server::ClientCertVerified, Certificate, DistinguishedName, KeyLogFile}, + quinn::{ + crypto::rustls::{NoInitialCipherSuite, QuicServerConfig}, + Endpoint, IdleTimeout, ServerConfig, + }, + rustls::{ + pki_types::{CertificateDer, UnixTime}, + server::danger::ClientCertVerified, + DistinguishedName, KeyLogFile, + }, solana_perf::packet::PacketBatch, solana_sdk::{ packet::PACKET_DATA_SIZE, @@ -20,7 +27,7 @@ use { Arc, Mutex, RwLock, }, thread, - time::{Duration, SystemTime}, + time::Duration, }, tokio::runtime::Runtime, }; @@ -31,11 +38,12 @@ pub const MAX_UNSTAKED_CONNECTIONS: usize = 500; // This will be adjusted and parameterized in follow-on PRs. pub const DEFAULT_QUIC_ENDPOINTS: usize = 1; -pub struct SkipClientVerification; +#[derive(Debug)] +pub struct SkipClientVerification(Arc); impl SkipClientVerification { pub fn new() -> Arc { - Arc::new(Self) + Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider()))) } } @@ -45,18 +53,58 @@ pub struct SpawnServerResult { pub key_updater: Arc, } -impl rustls::server::ClientCertVerifier for SkipClientVerification { - fn client_auth_root_subjects(&self) -> &[DistinguishedName] { +impl rustls::server::danger::ClientCertVerifier for SkipClientVerification { + fn verify_client_cert( + &self, + _end_entity: &CertificateDer, + _intermediates: &[CertificateDer], + _now: UnixTime, + ) -> Result { + Ok(rustls::server::danger::ClientCertVerified::assertion()) + } + + fn root_hint_subjects(&self) -> &[DistinguishedName] { &[] } - fn verify_client_cert( + fn verify_tls12_signature( &self, - _end_entity: &Certificate, - _intermediates: &[Certificate], - _now: SystemTime, - ) -> Result { - Ok(rustls::server::ClientCertVerified::assertion()) + message: &[u8], + cert: &rustls::pki_types::CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + rustls::crypto::verify_tls12_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &rustls::pki_types::CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + rustls::crypto::verify_tls13_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn supported_verify_schemes(&self) -> Vec { + self.0.signature_verification_algorithms.supported_schemes() + } + + fn offer_client_auth(&self) -> bool { + true + } + + fn client_auth_mandatory(&self) -> bool { + self.offer_client_auth() } } @@ -64,25 +112,22 @@ impl rustls::server::ClientCertVerifier for SkipClientVerification { #[allow(clippy::field_reassign_with_default)] // https://github.com/rust-lang/rust-clippy/issues/6527 pub(crate) fn configure_server( identity_keypair: &Keypair, - max_concurrent_connections: usize, ) -> Result<(ServerConfig, String), QuicServerError> { let (cert, priv_key) = new_dummy_x509_certificate(identity_keypair); let cert_chain_pem_parts = vec![Pem { tag: "CERTIFICATE".to_string(), - contents: cert.0.clone(), + contents: cert.as_ref().to_vec(), }]; let cert_chain_pem = pem::encode_many(&cert_chain_pem_parts); let mut server_tls_config = rustls::ServerConfig::builder() - .with_safe_defaults() .with_client_cert_verifier(SkipClientVerification::new()) .with_single_cert(vec![cert], priv_key)?; server_tls_config.alpn_protocols = vec![ALPN_TPU_PROTOCOL_ID.to_vec()]; server_tls_config.key_log = Arc::new(KeyLogFile::new()); + let quic_server_config = QuicServerConfig::try_from(server_tls_config)?; - let mut server_config = ServerConfig::with_crypto(Arc::new(server_tls_config)); - server_config.concurrent_connections(max_concurrent_connections as u32); - server_config.use_retry(true); + let mut server_config = ServerConfig::with_crypto(Arc::new(quic_server_config)); let config = Arc::get_mut(&mut server_config.transport).unwrap(); // QUIC_MAX_CONCURRENT_STREAMS doubled, which was found to improve reliability @@ -122,16 +167,17 @@ pub enum QuicServerError { EndpointFailed(std::io::Error), #[error("TLS error: {0}")] TlsError(#[from] rustls::Error), + #[error("No initial cipher suite")] + NoInitialCipherSuite(#[from] NoInitialCipherSuite), } pub struct EndpointKeyUpdater { endpoints: Vec, - max_concurrent_connections: usize, } impl NotifyKeyUpdate for EndpointKeyUpdater { fn update_key(&self, key: &Keypair) -> Result<(), Box> { - let (config, _) = configure_server(key, self.max_concurrent_connections)?; + let (config, _) = configure_server(key)?; for endpoint in &self.endpoints { endpoint.set_server_config(Some(config.clone())); } @@ -632,7 +678,6 @@ pub fn spawn_server_multi( .unwrap(); let updater = EndpointKeyUpdater { endpoints: result.endpoints.clone(), - max_concurrent_connections: result.max_concurrent_connections, }; Ok(SpawnServerResult { endpoints: result.endpoints, diff --git a/streamer/src/tls_certificates.rs b/streamer/src/tls_certificates.rs index 866f6155abe3f6..fba1441de88a86 100644 --- a/streamer/src/tls_certificates.rs +++ b/streamer/src/tls_certificates.rs @@ -3,7 +3,12 @@ use { x509_parser::{prelude::*, public_key::PublicKey}, }; -pub fn new_dummy_x509_certificate(keypair: &Keypair) -> (rustls::Certificate, rustls::PrivateKey) { +pub fn new_dummy_x509_certificate( + keypair: &Keypair, +) -> ( + rustls::pki_types::CertificateDer<'static>, + rustls::pki_types::PrivateKeyDer<'static>, +) { // Unfortunately, rustls does not accept a "raw" Ed25519 key. // We have to convert it to DER and pass it to the library. @@ -91,12 +96,14 @@ pub fn new_dummy_x509_certificate(keypair: &Keypair) -> (rustls::Certificate, ru ]); ( - rustls::Certificate(cert_der), - rustls::PrivateKey(key_pkcs8_der), + rustls::pki_types::CertificateDer::from(cert_der), + rustls::pki_types::PrivateKeyDer::try_from(key_pkcs8_der).unwrap(), ) } -pub fn get_pubkey_from_tls_certificate(der_cert: &rustls::Certificate) -> Option { +pub fn get_pubkey_from_tls_certificate( + der_cert: &rustls::pki_types::CertificateDer, +) -> Option { let (_, cert) = X509Certificate::from_der(der_cert.as_ref()).ok()?; match cert.public_key().parsed().ok()? { PublicKey::Unknown(key) => Pubkey::try_from(key).ok(), diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index 40559b1ae8ef7c..47fb173838a5f1 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -4,11 +4,15 @@ use { futures::future::TryJoin, log::error, quinn::{ + crypto::rustls::{QuicClientConfig, QuicServerConfig}, ClientConfig, ConnectError, Connecting, Connection, ConnectionError, Endpoint, EndpointConfig, IdleTimeout, SendDatagramError, ServerConfig, TokioRuntime, TransportConfig, VarInt, }, - rustls::{Certificate, KeyLogFile, PrivateKey}, + rustls::{ + pki_types::{CertificateDer, PrivateKeyDer}, + CertificateError, KeyLogFile, + }, solana_quic_client::nonblocking::quic_client::SkipServerVerification, solana_runtime::bank_forks::BankForks, solana_sdk::{pubkey::Pubkey, signature::Keypair}, @@ -102,7 +106,7 @@ pub fn new_quic_endpoint( Error, > { let (cert, key) = new_dummy_x509_certificate(keypair); - let server_config = new_server_config(cert.clone(), key.clone())?; + let server_config = new_server_config(cert.clone(), key.clone_key())?; let client_config = new_client_config(cert, key)?; let mut endpoint = { // Endpoint::new requires entering the runtime context, @@ -148,29 +152,36 @@ pub fn close_quic_endpoint(endpoint: &Endpoint) { ); } -fn new_server_config(cert: Certificate, key: PrivateKey) -> Result { +fn new_server_config( + cert: CertificateDer<'static>, + key: PrivateKeyDer<'static>, +) -> Result { let mut config = rustls::ServerConfig::builder() - .with_safe_defaults() - .with_client_cert_verifier(Arc::new(SkipClientVerification {})) + .with_client_cert_verifier(SkipClientVerification::new()) .with_single_cert(vec![cert], key)?; config.alpn_protocols = vec![ALPN_TURBINE_PROTOCOL_ID.to_vec()]; config.key_log = Arc::new(KeyLogFile::new()); - let mut config = ServerConfig::with_crypto(Arc::new(config)); + let quic_server_config = QuicServerConfig::try_from(config) + .map_err(|_err| rustls::Error::InvalidCertificate(CertificateError::BadSignature))?; + + let mut config = ServerConfig::with_crypto(Arc::new(quic_server_config)); config .transport_config(Arc::new(new_transport_config())) - .use_retry(true) .migration(false); Ok(config) } -fn new_client_config(cert: Certificate, key: PrivateKey) -> Result { +fn new_client_config( + cert: CertificateDer<'static>, + key: PrivateKeyDer<'static>, +) -> Result { let mut config = rustls::ClientConfig::builder() - .with_safe_defaults() - .with_custom_certificate_verifier(Arc::new(SkipServerVerification {})) + .dangerous() + .with_custom_certificate_verifier(SkipServerVerification::new()) .with_client_auth_cert(vec![cert], key)?; config.enable_early_data = true; config.alpn_protocols = vec![ALPN_TURBINE_PROTOCOL_ID.to_vec()]; - let mut config = ClientConfig::new(Arc::new(config)); + let mut config = ClientConfig::new(Arc::new(QuicClientConfig::try_from(config).unwrap())); config.transport_config(Arc::new(new_transport_config())); Ok(config) } @@ -202,17 +213,29 @@ async fn run_server( let stats = Arc::::default(); let report_metrics_task = tokio::task::spawn(report_metrics_task("turbine_quic_server", stats.clone())); - while let Some(connecting) = endpoint.accept().await { - tokio::task::spawn(handle_connecting_task( - endpoint.clone(), - connecting, - sender.clone(), - bank_forks.clone(), - prune_cache_pending.clone(), - router.clone(), - cache.clone(), - stats.clone(), - )); + while let Some(incoming) = endpoint.accept().await { + let remote_addr: SocketAddr = incoming.remote_address(); + let connecting = incoming.accept(); + match connecting { + Ok(connecting) => { + tokio::task::spawn(handle_connecting_task( + endpoint.clone(), + connecting, + sender.clone(), + bank_forks.clone(), + prune_cache_pending.clone(), + router.clone(), + cache.clone(), + stats.clone(), + )); + } + Err(error) => { + debug!( + "Error while accepting incoming connection: {error:?} from {}", + remote_addr + ); + } + } } report_metrics_task.abort(); } @@ -629,11 +652,15 @@ struct TurbineQuicStats { connection_error_timed_out: AtomicU64, connection_error_transport_error: AtomicU64, connection_error_version_mismatch: AtomicU64, + connection_error_connection_limit_exceeded: AtomicU64, invalid_identity: AtomicU64, router_try_send_error_full: AtomicU64, send_datagram_error_connection_lost: AtomicU64, send_datagram_error_too_large: AtomicU64, send_datagram_error_unsupported_by_peer: AtomicU64, + connect_error_cids_exhausted: AtomicU64, + connect_error_invalid_server_name: AtomicU64, + connection_error_cids_exhausted: AtomicU64, } async fn report_metrics_task(name: &'static str, stats: Arc) { @@ -649,12 +676,6 @@ fn record_error(err: &Error, stats: &TurbineQuicStats) { Error::ConnectError(ConnectError::EndpointStopping) => { add_metric!(stats.connect_error_other) } - Error::ConnectError(ConnectError::TooManyConnections) => { - add_metric!(stats.connect_error_too_many_connections) - } - Error::ConnectError(ConnectError::InvalidDnsName(_)) => { - add_metric!(stats.connect_error_other) - } Error::ConnectError(ConnectError::InvalidRemoteAddress(_)) => { add_metric!(stats.connect_error_invalid_remote_address) } @@ -696,6 +717,15 @@ fn record_error(err: &Error, stats: &TurbineQuicStats) { add_metric!(stats.send_datagram_error_connection_lost) } Error::TlsError(_) => (), + Error::ConnectError(ConnectError::CidsExhausted) => { + add_metric!(stats.connect_error_cids_exhausted) + } + Error::ConnectError(ConnectError::InvalidServerName(_)) => { + add_metric!(stats.connect_error_invalid_server_name) + } + Error::ConnectionError(ConnectionError::CidsExhausted) => { + add_metric!(stats.connection_error_cids_exhausted) + } } } @@ -757,6 +787,11 @@ fn report_metrics(name: &'static str, stats: &TurbineQuicStats) { reset_metric!(stats.connection_error_version_mismatch), i64 ), + ( + "connection_error_connection_limit_exceeded", + reset_metric!(stats.connection_error_connection_limit_exceeded), + i64 + ), ( "invalid_identity", reset_metric!(stats.invalid_identity), From 9b8dd0eb7a8e22135eff55fce3c69e211064bc76 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sun, 15 Sep 2024 14:15:09 +0800 Subject: [PATCH 355/529] ci: bump buildkite/trigger-pipeline-action to v2.2.0 (#2932) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 78a6d029d4c063..73b8b0d25399d6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Trigger a Buildkite Build - uses: "buildkite/trigger-pipeline-action@v2.0.0" + uses: "buildkite/trigger-pipeline-action@v2.2.0" with: buildkite_api_access_token: ${{ secrets.TRIGGER_BK_BUILD_TOKEN }} pipeline: "anza/agave-secondary" From d65ad982c45fd45489299634204c67f3423f1893 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sun, 15 Sep 2024 14:15:29 +0800 Subject: [PATCH 356/529] ci: bump mozilla-actions/sccache-action to v0.0.5 (#2931) --- .github/workflows/cargo.yml | 2 +- .github/workflows/downstream-project-anchor.yml | 2 +- .github/workflows/downstream-project-spl.yml | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml index e64852e42835e6..01db66c1a1bd4a 100644 --- a/.github/workflows/cargo.yml +++ b/.github/workflows/cargo.yml @@ -36,7 +36,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: mozilla-actions/sccache-action@v0.0.4 + - uses: mozilla-actions/sccache-action@v0.0.5 with: version: "v0.8.1" diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml index 89a7acd101942c..9e6b1591b6c23e 100644 --- a/.github/workflows/downstream-project-anchor.yml +++ b/.github/workflows/downstream-project-anchor.yml @@ -50,7 +50,7 @@ jobs: run: | .github/scripts/purge-ubuntu-runner.sh - - uses: mozilla-actions/sccache-action@v0.0.4 + - uses: mozilla-actions/sccache-action@v0.0.5 with: version: "v0.8.1" diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index d2065f178fd5a5..a3f45a29b30b0d 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -45,7 +45,7 @@ jobs: run: | .github/scripts/purge-ubuntu-runner.sh - - uses: mozilla-actions/sccache-action@v0.0.4 + - uses: mozilla-actions/sccache-action@v0.0.5 with: version: "v0.8.1" @@ -97,7 +97,7 @@ jobs: run: | .github/scripts/purge-ubuntu-runner.sh - - uses: mozilla-actions/sccache-action@v0.0.4 + - uses: mozilla-actions/sccache-action@v0.0.5 with: version: "v0.8.1" @@ -151,7 +151,7 @@ jobs: run: | .github/scripts/purge-ubuntu-runner.sh - - uses: mozilla-actions/sccache-action@v0.0.4 + - uses: mozilla-actions/sccache-action@v0.0.5 with: version: "v0.8.1" From 699981fc9aa53098bb400e046f65c87b2f6136c3 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sun, 15 Sep 2024 14:15:43 +0800 Subject: [PATCH 357/529] ci: bump softprops/action-gh-release to v2 (#2933) --- .github/workflows/release-artifacts.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 4dca7118c11348..8b1da1d7e28426 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -102,7 +102,7 @@ jobs: path: ./windows-release/ - name: Release - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2 with: tag_name: ${{ needs.windows-build.outputs.tag }} draft: true From 65c89965621ce4f619c345c6940c461ea127b745 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 16 Sep 2024 11:30:43 -0500 Subject: [PATCH 358/529] OwnedOrBorrowed for TransactionBatch (#2837) * OwnedOrBorrowed * relax trait bound to SVMMessage * update batch with indexes trait bound * import --- ledger/benches/blockstore_processor.rs | 18 +++++++++------- ledger/src/blockstore_processor.rs | 10 ++++----- runtime/src/bank.rs | 13 ++++++----- runtime/src/transaction_batch.rs | 30 ++++++++++++++++++++------ 4 files changed, 44 insertions(+), 27 deletions(-) diff --git a/ledger/benches/blockstore_processor.rs b/ledger/benches/blockstore_processor.rs index 65b0ac229978a8..1fd9b4f937ce6c 100644 --- a/ledger/benches/blockstore_processor.rs +++ b/ledger/benches/blockstore_processor.rs @@ -12,8 +12,10 @@ use { genesis_utils::{create_genesis_config, GenesisConfigInfo}, }, solana_runtime::{ - bank::Bank, bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, - transaction_batch::TransactionBatch, + bank::Bank, + bank_forks::BankForks, + prioritization_fee_cache::PrioritizationFeeCache, + transaction_batch::{OwnedOrBorrowed, TransactionBatch}, }, solana_sdk::{ account::{Account, ReadableAccount}, @@ -24,10 +26,7 @@ use { transaction::SanitizedTransaction, }, solana_timings::ExecuteTimings, - std::{ - borrow::Cow, - sync::{Arc, RwLock}, - }, + std::sync::{Arc, RwLock}, test::Bencher, }; @@ -136,8 +135,11 @@ fn bench_execute_batch( let batches: Vec<_> = transactions .chunks(batch_size) .map(|txs| { - let mut batch = - TransactionBatch::new(vec![Ok(()); txs.len()], &bank, Cow::Borrowed(txs)); + let mut batch = TransactionBatch::new( + vec![Ok(()); txs.len()], + &bank, + OwnedOrBorrowed::Borrowed(txs), + ); batch.set_needs_unlock(false); TransactionBatchWithIndexes { batch, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 28b1b09b025dea..986e1a2549b13b 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -37,7 +37,7 @@ use { installed_scheduler_pool::BankWithScheduler, prioritization_fee_cache::PrioritizationFeeCache, runtime_config::RuntimeConfig, - transaction_batch::TransactionBatch, + transaction_batch::{OwnedOrBorrowed, TransactionBatch}, vote_sender_types::ReplayVoteSender, }, solana_sdk::{ @@ -56,12 +56,11 @@ use { transaction_commit_result::{TransactionCommitResult, TransactionCommitResultExtensions}, transaction_processor::ExecutionRecordingConfig, }, - solana_svm_transaction::svm_transaction::SVMTransaction, + solana_svm_transaction::svm_message::SVMMessage, solana_timings::{report_execute_timings, ExecuteTimingType, ExecuteTimings}, solana_transaction_status::token_balances::TransactionTokenBalancesSet, solana_vote::vote_account::VoteAccountsHashMap, std::{ - borrow::Cow, collections::{HashMap, HashSet}, ops::Index, path::PathBuf, @@ -78,7 +77,7 @@ use { #[cfg(feature = "dev-context-only-utils")] use {qualifier_attr::qualifiers, solana_runtime::bank::HashOverrides}; -pub struct TransactionBatchWithIndexes<'a, 'b, Tx: SVMTransaction + Clone> { +pub struct TransactionBatchWithIndexes<'a, 'b, Tx: SVMMessage> { pub batch: TransactionBatch<'a, 'b, Tx>, pub transaction_indexes: Vec, } @@ -443,7 +442,8 @@ fn rebatch_transactions<'a>( ) -> TransactionBatchWithIndexes<'a, 'a, SanitizedTransaction> { let txs = &sanitized_txs[start..=end]; let results = &lock_results[start..=end]; - let mut tx_batch = TransactionBatch::new(results.to_vec(), bank, Cow::from(txs)); + let mut tx_batch = + TransactionBatch::new(results.to_vec(), bank, OwnedOrBorrowed::Borrowed(txs)); tx_batch.set_needs_unlock(false); let transaction_indexes = transaction_indexes[start..=end].to_vec(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 78121d7bcf453a..43884d107a065e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -56,7 +56,7 @@ use { }, stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, - transaction_batch::TransactionBatch, + transaction_batch::{OwnedOrBorrowed, TransactionBatch}, }, byteorder::{ByteOrder, LittleEndian}, dashmap::{DashMap, DashSet}, @@ -175,7 +175,6 @@ use { solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap}, solana_vote_program::vote_state::VoteState, std::{ - borrow::Cow, collections::{HashMap, HashSet}, convert::TryFrom, fmt, @@ -3341,7 +3340,7 @@ impl Bank { Ok(TransactionBatch::new( lock_results, self, - Cow::Owned(sanitized_txs), + OwnedOrBorrowed::Owned(sanitized_txs), )) } @@ -3355,7 +3354,7 @@ impl Bank { .rc .accounts .lock_accounts(txs.iter(), tx_account_lock_limit); - TransactionBatch::new(lock_results, self, Cow::Borrowed(txs)) + TransactionBatch::new(lock_results, self, OwnedOrBorrowed::Borrowed(txs)) } /// Prepare a locked transaction batch from a list of sanitized transactions, and their cost @@ -3372,7 +3371,7 @@ impl Bank { transaction_results, tx_account_lock_limit, ); - TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions)) + TransactionBatch::new(lock_results, self, OwnedOrBorrowed::Borrowed(transactions)) } /// Prepare a transaction batch from a single transaction without locking accounts @@ -3386,7 +3385,7 @@ impl Bank { let mut batch = TransactionBatch::new( vec![lock_result], self, - Cow::Borrowed(slice::from_ref(transaction)), + OwnedOrBorrowed::Borrowed(slice::from_ref(transaction)), ); batch.set_needs_unlock(false); batch @@ -6818,7 +6817,7 @@ impl Bank { .rc .accounts .lock_accounts(sanitized_txs.iter(), transaction_account_lock_limit); - TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs)) + TransactionBatch::new(lock_results, self, OwnedOrBorrowed::Owned(sanitized_txs)) } /// Set the initial accounts data size diff --git a/runtime/src/transaction_batch.rs b/runtime/src/transaction_batch.rs index 1d33be2c57c591..4fbb23523910fd 100644 --- a/runtime/src/transaction_batch.rs +++ b/runtime/src/transaction_batch.rs @@ -1,21 +1,37 @@ use { - crate::bank::Bank, solana_sdk::transaction::Result, - solana_svm_transaction::svm_transaction::SVMTransaction, std::borrow::Cow, + crate::bank::Bank, core::ops::Deref, solana_sdk::transaction::Result, + solana_svm_transaction::svm_message::SVMMessage, }; +pub enum OwnedOrBorrowed<'a, T> { + Owned(Vec), + Borrowed(&'a [T]), +} + +impl Deref for OwnedOrBorrowed<'_, T> { + type Target = [T]; + + fn deref(&self) -> &Self::Target { + match self { + OwnedOrBorrowed::Owned(v) => v, + OwnedOrBorrowed::Borrowed(v) => v, + } + } +} + // Represents the results of trying to lock a set of accounts -pub struct TransactionBatch<'a, 'b, Tx: SVMTransaction + Clone> { +pub struct TransactionBatch<'a, 'b, Tx: SVMMessage> { lock_results: Vec>, bank: &'a Bank, - sanitized_txs: Cow<'b, [Tx]>, + sanitized_txs: OwnedOrBorrowed<'b, Tx>, needs_unlock: bool, } -impl<'a, 'b, Tx: SVMTransaction + Clone> TransactionBatch<'a, 'b, Tx> { +impl<'a, 'b, Tx: SVMMessage> TransactionBatch<'a, 'b, Tx> { pub fn new( lock_results: Vec>, bank: &'a Bank, - sanitized_txs: Cow<'b, [Tx]>, + sanitized_txs: OwnedOrBorrowed<'b, Tx>, ) -> Self { assert_eq!(lock_results.len(), sanitized_txs.len()); Self { @@ -81,7 +97,7 @@ impl<'a, 'b, Tx: SVMTransaction + Clone> TransactionBatch<'a, 'b, Tx> { } // Unlock all locked accounts in destructor. -impl<'a, 'b, Tx: SVMTransaction + Clone> Drop for TransactionBatch<'a, 'b, Tx> { +impl<'a, 'b, Tx: SVMMessage> Drop for TransactionBatch<'a, 'b, Tx> { fn drop(&mut self) { if self.needs_unlock() { self.set_needs_unlock(false); From 8aae27131ac41b839709c30aea9e132f826e82ce Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Mon, 16 Sep 2024 11:42:49 -0500 Subject: [PATCH 359/529] ancient shrink bug fix - remove skip slots (#2927) * remove skip slots * fix tests * add comments to explain assert condition --------- Co-authored-by: HaoranYi --- accounts-db/src/ancient_append_vecs.rs | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 0eb3309ac5133a..68d4f0b365e9fd 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -851,6 +851,12 @@ impl AccountsDb { } } let unpackable_slots_count = remove.len(); + + // Remove skipped slots + for i in remove.iter().rev() { + accounts_to_combine.remove(*i); + } + target_slots_sorted.sort_unstable(); self.shrink_ancient_stats .slots_cannot_move_count @@ -1755,7 +1761,20 @@ pub mod tests { &tuning, many_ref_slots, ); - let expected_accounts_to_combine = num_slots; + let expected_accounts_to_combine = if num_slots >= 3 + && two_refs + && many_ref_slots == IncludeManyRefSlots::Skip + { + // In this test setup, 2.5 regular slots fits into 1 ancient slot. + // When there are two_refs and when slots < 3, all regular slots can fit into one ancient slots. + // Therefore, we should have all slots that can be combined for slots < 3. + // However, when slots >=3, we need more than one ancient slots. The pack algorithm will need to first + // find at least [ceiling(num_slots/2.5) - 1] slots that's doesn't have many_refs before we can pack slots with many_refs. + // Since all the slots have many_refs, we can't find any eligible slot to combine. + 0 + } else { + num_slots + }; (0..accounts_to_combine .target_slots_sorted .len() @@ -1858,7 +1877,8 @@ pub mod tests { assert_eq!( accounts_to_combine.accounts_to_combine.len(), // if we are only trying to pack a single slot of multi-refs, it will succeed - if !two_refs || many_ref_slots == IncludeManyRefSlots::Include || num_slots == 1 || num_slots == 2 {num_slots} else {0}, + // if num_slots = 2 and skip multi-ref slots, accounts_to_combine should be empty. + if !two_refs || many_ref_slots == IncludeManyRefSlots::Include || num_slots == 1 || (num_slots == 2 && many_ref_slots != IncludeManyRefSlots::Skip) {num_slots} else {0}, "method: {method:?}, num_slots: {num_slots}, two_refs: {two_refs}, many_refs: {many_ref_slots:?}" ); From 2a962c93ea3e0b607909a9b34f0bb9fedbd30396 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Mon, 16 Sep 2024 20:44:53 +0400 Subject: [PATCH 360/529] Extract program-error crate (#2413) * extract program-error crate * remove `default-features = false` that doesn't do anything * make serde optional in program-error crate * update lock file * remove thiserror from program-error crate * update lock file * move ProgramResult definition to program-error crate * unused import * fix features * missing whitespace Co-authored-by: Jon C --------- Co-authored-by: Jon C --- Cargo.lock | 15 ++ Cargo.toml | 2 + programs/sbf/Cargo.lock | 15 ++ sdk/program-error/Cargo.toml | 29 +++ sdk/program-error/src/lib.rs | 316 +++++++++++++++++++++++++++++++ sdk/program/Cargo.toml | 2 + sdk/program/src/entrypoint.rs | 6 +- sdk/program/src/program_error.rs | 288 ++-------------------------- 8 files changed, 394 insertions(+), 279 deletions(-) create mode 100644 sdk/program-error/Cargo.toml create mode 100644 sdk/program-error/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 0ce95129d77bea..522e1fe4f5e549 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7238,6 +7238,7 @@ dependencies = [ "solana-instruction", "solana-logger", "solana-msg", + "solana-program-error", "solana-program-memory", "solana-program-option", "solana-pubkey", @@ -7253,6 +7254,20 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "solana-program-error" +version = "2.1.0" +dependencies = [ + "borsh 1.5.1", + "num-traits", + "serde", + "serde_derive", + "solana-decode-error", + "solana-instruction", + "solana-msg", + "solana-pubkey", +] + [[package]] name = "solana-program-memory" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 6b4eac9eff9fe8..8229865fef7a7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,6 +115,7 @@ members = [ "sdk/msg", "sdk/package-metadata-macro", "sdk/program", + "sdk/program-error", "sdk/program-memory", "sdk/program-option", "sdk/pubkey", @@ -423,6 +424,7 @@ solana-perf = { path = "perf", version = "=2.1.0" } solana-poh = { path = "poh", version = "=2.1.0" } solana-poseidon = { path = "poseidon", version = "=2.1.0" } solana-program = { path = "sdk/program", version = "=2.1.0", default-features = false } +solana-program-error = { path = "sdk/program-error", version = "=2.1.0" } solana-program-memory = { path = "sdk/program-memory", version = "=2.1.0" } solana-program-option = { path = "sdk/program-option", version = "=2.1.0" } solana-program-runtime = { path = "program-runtime", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c275a7fe049f48..729ffb286a593b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5632,6 +5632,7 @@ dependencies = [ "solana-hash", "solana-instruction", "solana-msg", + "solana-program-error", "solana-program-memory", "solana-program-option", "solana-pubkey", @@ -5645,6 +5646,20 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "solana-program-error" +version = "2.1.0" +dependencies = [ + "borsh 1.5.1", + "num-traits", + "serde", + "serde_derive", + "solana-decode-error", + "solana-instruction", + "solana-msg", + "solana-pubkey", +] + [[package]] name = "solana-program-memory" version = "2.1.0" diff --git a/sdk/program-error/Cargo.toml b/sdk/program-error/Cargo.toml new file mode 100644 index 00000000000000..64860dee0a0331 --- /dev/null +++ b/sdk/program-error/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "solana-program-error" +description = "Solana ProgramError type and related definitions." +documentation = "https://docs.rs/solana-program-error" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +borsh = { workspace = true, optional = true } +num-traits = { workspace = true } +serde = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-decode-error = { workspace = true } +solana-instruction = { workspace = true, default-features = false, features = [ + "std", +] } +solana-msg = { workspace = true } +solana-pubkey = { workspace = true, default-features = false } + +[features] +borsh = ["dep:borsh"] +serde = ["dep:serde", "dep:serde_derive"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program-error/src/lib.rs b/sdk/program-error/src/lib.rs new file mode 100644 index 00000000000000..87c79595bd688a --- /dev/null +++ b/sdk/program-error/src/lib.rs @@ -0,0 +1,316 @@ +//! The [`ProgramError`] type and related definitions. + +#![allow(clippy::arithmetic_side_effects)] +#[cfg(feature = "borsh")] +use borsh::io::Error as BorshIoError; +#[cfg(feature = "serde")] +use serde_derive::{Deserialize, Serialize}; +use { + core::fmt, + num_traits::FromPrimitive, + solana_decode_error::DecodeError, + solana_instruction::error::{ + InstructionError, ACCOUNT_ALREADY_INITIALIZED, ACCOUNT_BORROW_FAILED, + ACCOUNT_DATA_TOO_SMALL, ACCOUNT_NOT_RENT_EXEMPT, ARITHMETIC_OVERFLOW, BORSH_IO_ERROR, + BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS, CUSTOM_ZERO, ILLEGAL_OWNER, IMMUTABLE, + INCORRECT_AUTHORITY, INCORRECT_PROGRAM_ID, INSUFFICIENT_FUNDS, INVALID_ACCOUNT_DATA, + INVALID_ACCOUNT_DATA_REALLOC, INVALID_ACCOUNT_OWNER, INVALID_ARGUMENT, + INVALID_INSTRUCTION_DATA, INVALID_SEEDS, MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED, + MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED, MAX_SEED_LENGTH_EXCEEDED, + MISSING_REQUIRED_SIGNATURES, NOT_ENOUGH_ACCOUNT_KEYS, UNINITIALIZED_ACCOUNT, + UNSUPPORTED_SYSVAR, + }, + solana_msg::msg, + solana_pubkey::PubkeyError, + std::convert::TryFrom, +}; + +pub type ProgramResult = std::result::Result<(), ProgramError>; + +/// Reasons the program may fail +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ProgramError { + /// Allows on-chain programs to implement program-specific error types and see them returned + /// by the Solana runtime. A program-specific error may be any type that is represented as + /// or serialized to a u32 integer. + Custom(u32), + InvalidArgument, + InvalidInstructionData, + InvalidAccountData, + AccountDataTooSmall, + InsufficientFunds, + IncorrectProgramId, + MissingRequiredSignature, + AccountAlreadyInitialized, + UninitializedAccount, + NotEnoughAccountKeys, + AccountBorrowFailed, + MaxSeedLengthExceeded, + InvalidSeeds, + BorshIoError(String), + AccountNotRentExempt, + UnsupportedSysvar, + IllegalOwner, + MaxAccountsDataAllocationsExceeded, + InvalidRealloc, + MaxInstructionTraceLengthExceeded, + BuiltinProgramsMustConsumeComputeUnits, + InvalidAccountOwner, + ArithmeticOverflow, + Immutable, + IncorrectAuthority, +} + +impl std::error::Error for ProgramError {} + +impl fmt::Display for ProgramError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ProgramError::Custom(num) => write!(f,"Custom program error: {num:#x}"), + ProgramError::InvalidArgument + => f.write_str("The arguments provided to a program instruction were invalid"), + ProgramError::InvalidInstructionData + => f.write_str("An instruction's data contents was invalid"), + ProgramError::InvalidAccountData + => f.write_str("An account's data contents was invalid"), + ProgramError::AccountDataTooSmall + => f.write_str("An account's data was too small"), + ProgramError::InsufficientFunds + => f.write_str("An account's balance was too small to complete the instruction"), + ProgramError::IncorrectProgramId + => f.write_str("The account did not have the expected program id"), + ProgramError::MissingRequiredSignature + => f.write_str("A signature was required but not found"), + ProgramError::AccountAlreadyInitialized + => f.write_str("An initialize instruction was sent to an account that has already been initialized"), + ProgramError::UninitializedAccount + => f.write_str("An attempt to operate on an account that hasn't been initialized"), + ProgramError::NotEnoughAccountKeys + => f.write_str("The instruction expected additional account keys"), + ProgramError::AccountBorrowFailed + => f.write_str("Failed to borrow a reference to account data, already borrowed"), + ProgramError::MaxSeedLengthExceeded + => f.write_str("Length of the seed is too long for address generation"), + ProgramError::InvalidSeeds + => f.write_str("Provided seeds do not result in a valid address"), + ProgramError::BorshIoError(s) => write!(f, "IO Error: {s}"), + ProgramError::AccountNotRentExempt + => f.write_str("An account does not have enough lamports to be rent-exempt"), + ProgramError::UnsupportedSysvar + => f.write_str("Unsupported sysvar"), + ProgramError::IllegalOwner + => f.write_str("Provided owner is not allowed"), + ProgramError::MaxAccountsDataAllocationsExceeded + => f.write_str("Accounts data allocations exceeded the maximum allowed per transaction"), + ProgramError::InvalidRealloc + => f.write_str("Account data reallocation was invalid"), + ProgramError::MaxInstructionTraceLengthExceeded + => f.write_str("Instruction trace length exceeded the maximum allowed per transaction"), + ProgramError::BuiltinProgramsMustConsumeComputeUnits + => f.write_str("Builtin programs must consume compute units"), + ProgramError::InvalidAccountOwner + => f.write_str("Invalid account owner"), + ProgramError::ArithmeticOverflow + => f.write_str("Program arithmetic overflowed"), + ProgramError::Immutable + => f.write_str("Account is immutable"), + ProgramError::IncorrectAuthority + => f.write_str("Incorrect authority provided"), + } + } +} + +pub trait PrintProgramError { + fn print(&self) + where + E: 'static + std::error::Error + DecodeError + PrintProgramError + FromPrimitive; +} + +impl PrintProgramError for ProgramError { + fn print(&self) + where + E: 'static + std::error::Error + DecodeError + PrintProgramError + FromPrimitive, + { + match self { + Self::Custom(error) => { + if let Some(custom_error) = E::decode_custom_error_to_enum(*error) { + custom_error.print::(); + } else { + msg!("Error: Unknown"); + } + } + Self::InvalidArgument => msg!("Error: InvalidArgument"), + Self::InvalidInstructionData => msg!("Error: InvalidInstructionData"), + Self::InvalidAccountData => msg!("Error: InvalidAccountData"), + Self::AccountDataTooSmall => msg!("Error: AccountDataTooSmall"), + Self::InsufficientFunds => msg!("Error: InsufficientFunds"), + Self::IncorrectProgramId => msg!("Error: IncorrectProgramId"), + Self::MissingRequiredSignature => msg!("Error: MissingRequiredSignature"), + Self::AccountAlreadyInitialized => msg!("Error: AccountAlreadyInitialized"), + Self::UninitializedAccount => msg!("Error: UninitializedAccount"), + Self::NotEnoughAccountKeys => msg!("Error: NotEnoughAccountKeys"), + Self::AccountBorrowFailed => msg!("Error: AccountBorrowFailed"), + Self::MaxSeedLengthExceeded => msg!("Error: MaxSeedLengthExceeded"), + Self::InvalidSeeds => msg!("Error: InvalidSeeds"), + Self::BorshIoError(_) => msg!("Error: BorshIoError"), + Self::AccountNotRentExempt => msg!("Error: AccountNotRentExempt"), + Self::UnsupportedSysvar => msg!("Error: UnsupportedSysvar"), + Self::IllegalOwner => msg!("Error: IllegalOwner"), + Self::MaxAccountsDataAllocationsExceeded => { + msg!("Error: MaxAccountsDataAllocationsExceeded") + } + Self::InvalidRealloc => msg!("Error: InvalidRealloc"), + Self::MaxInstructionTraceLengthExceeded => { + msg!("Error: MaxInstructionTraceLengthExceeded") + } + Self::BuiltinProgramsMustConsumeComputeUnits => { + msg!("Error: BuiltinProgramsMustConsumeComputeUnits") + } + Self::InvalidAccountOwner => msg!("Error: InvalidAccountOwner"), + Self::ArithmeticOverflow => msg!("Error: ArithmeticOverflow"), + Self::Immutable => msg!("Error: Immutable"), + Self::IncorrectAuthority => msg!("Error: IncorrectAuthority"), + } + } +} + +impl From for u64 { + fn from(error: ProgramError) -> Self { + match error { + ProgramError::InvalidArgument => INVALID_ARGUMENT, + ProgramError::InvalidInstructionData => INVALID_INSTRUCTION_DATA, + ProgramError::InvalidAccountData => INVALID_ACCOUNT_DATA, + ProgramError::AccountDataTooSmall => ACCOUNT_DATA_TOO_SMALL, + ProgramError::InsufficientFunds => INSUFFICIENT_FUNDS, + ProgramError::IncorrectProgramId => INCORRECT_PROGRAM_ID, + ProgramError::MissingRequiredSignature => MISSING_REQUIRED_SIGNATURES, + ProgramError::AccountAlreadyInitialized => ACCOUNT_ALREADY_INITIALIZED, + ProgramError::UninitializedAccount => UNINITIALIZED_ACCOUNT, + ProgramError::NotEnoughAccountKeys => NOT_ENOUGH_ACCOUNT_KEYS, + ProgramError::AccountBorrowFailed => ACCOUNT_BORROW_FAILED, + ProgramError::MaxSeedLengthExceeded => MAX_SEED_LENGTH_EXCEEDED, + ProgramError::InvalidSeeds => INVALID_SEEDS, + ProgramError::BorshIoError(_) => BORSH_IO_ERROR, + ProgramError::AccountNotRentExempt => ACCOUNT_NOT_RENT_EXEMPT, + ProgramError::UnsupportedSysvar => UNSUPPORTED_SYSVAR, + ProgramError::IllegalOwner => ILLEGAL_OWNER, + ProgramError::MaxAccountsDataAllocationsExceeded => { + MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED + } + ProgramError::InvalidRealloc => INVALID_ACCOUNT_DATA_REALLOC, + ProgramError::MaxInstructionTraceLengthExceeded => { + MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED + } + ProgramError::BuiltinProgramsMustConsumeComputeUnits => { + BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS + } + ProgramError::InvalidAccountOwner => INVALID_ACCOUNT_OWNER, + ProgramError::ArithmeticOverflow => ARITHMETIC_OVERFLOW, + ProgramError::Immutable => IMMUTABLE, + ProgramError::IncorrectAuthority => INCORRECT_AUTHORITY, + ProgramError::Custom(error) => { + if error == 0 { + CUSTOM_ZERO + } else { + error as u64 + } + } + } + } +} + +impl From for ProgramError { + fn from(error: u64) -> Self { + match error { + CUSTOM_ZERO => Self::Custom(0), + INVALID_ARGUMENT => Self::InvalidArgument, + INVALID_INSTRUCTION_DATA => Self::InvalidInstructionData, + INVALID_ACCOUNT_DATA => Self::InvalidAccountData, + ACCOUNT_DATA_TOO_SMALL => Self::AccountDataTooSmall, + INSUFFICIENT_FUNDS => Self::InsufficientFunds, + INCORRECT_PROGRAM_ID => Self::IncorrectProgramId, + MISSING_REQUIRED_SIGNATURES => Self::MissingRequiredSignature, + ACCOUNT_ALREADY_INITIALIZED => Self::AccountAlreadyInitialized, + UNINITIALIZED_ACCOUNT => Self::UninitializedAccount, + NOT_ENOUGH_ACCOUNT_KEYS => Self::NotEnoughAccountKeys, + ACCOUNT_BORROW_FAILED => Self::AccountBorrowFailed, + MAX_SEED_LENGTH_EXCEEDED => Self::MaxSeedLengthExceeded, + INVALID_SEEDS => Self::InvalidSeeds, + BORSH_IO_ERROR => Self::BorshIoError("Unknown".to_string()), + ACCOUNT_NOT_RENT_EXEMPT => Self::AccountNotRentExempt, + UNSUPPORTED_SYSVAR => Self::UnsupportedSysvar, + ILLEGAL_OWNER => Self::IllegalOwner, + MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED => Self::MaxAccountsDataAllocationsExceeded, + INVALID_ACCOUNT_DATA_REALLOC => Self::InvalidRealloc, + MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED => Self::MaxInstructionTraceLengthExceeded, + BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS => { + Self::BuiltinProgramsMustConsumeComputeUnits + } + INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, + ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, + IMMUTABLE => Self::Immutable, + INCORRECT_AUTHORITY => Self::IncorrectAuthority, + _ => Self::Custom(error as u32), + } + } +} + +impl TryFrom for ProgramError { + type Error = InstructionError; + + fn try_from(error: InstructionError) -> Result { + match error { + Self::Error::Custom(err) => Ok(Self::Custom(err)), + Self::Error::InvalidArgument => Ok(Self::InvalidArgument), + Self::Error::InvalidInstructionData => Ok(Self::InvalidInstructionData), + Self::Error::InvalidAccountData => Ok(Self::InvalidAccountData), + Self::Error::AccountDataTooSmall => Ok(Self::AccountDataTooSmall), + Self::Error::InsufficientFunds => Ok(Self::InsufficientFunds), + Self::Error::IncorrectProgramId => Ok(Self::IncorrectProgramId), + Self::Error::MissingRequiredSignature => Ok(Self::MissingRequiredSignature), + Self::Error::AccountAlreadyInitialized => Ok(Self::AccountAlreadyInitialized), + Self::Error::UninitializedAccount => Ok(Self::UninitializedAccount), + Self::Error::NotEnoughAccountKeys => Ok(Self::NotEnoughAccountKeys), + Self::Error::AccountBorrowFailed => Ok(Self::AccountBorrowFailed), + Self::Error::MaxSeedLengthExceeded => Ok(Self::MaxSeedLengthExceeded), + Self::Error::InvalidSeeds => Ok(Self::InvalidSeeds), + Self::Error::BorshIoError(err) => Ok(Self::BorshIoError(err)), + Self::Error::AccountNotRentExempt => Ok(Self::AccountNotRentExempt), + Self::Error::UnsupportedSysvar => Ok(Self::UnsupportedSysvar), + Self::Error::IllegalOwner => Ok(Self::IllegalOwner), + Self::Error::MaxAccountsDataAllocationsExceeded => { + Ok(Self::MaxAccountsDataAllocationsExceeded) + } + Self::Error::InvalidRealloc => Ok(Self::InvalidRealloc), + Self::Error::MaxInstructionTraceLengthExceeded => { + Ok(Self::MaxInstructionTraceLengthExceeded) + } + Self::Error::BuiltinProgramsMustConsumeComputeUnits => { + Ok(Self::BuiltinProgramsMustConsumeComputeUnits) + } + Self::Error::InvalidAccountOwner => Ok(Self::InvalidAccountOwner), + Self::Error::ArithmeticOverflow => Ok(Self::ArithmeticOverflow), + Self::Error::Immutable => Ok(Self::Immutable), + Self::Error::IncorrectAuthority => Ok(Self::IncorrectAuthority), + _ => Err(error), + } + } +} + +impl From for ProgramError { + fn from(error: PubkeyError) -> Self { + match error { + PubkeyError::MaxSeedLengthExceeded => Self::MaxSeedLengthExceeded, + PubkeyError::InvalidSeeds => Self::InvalidSeeds, + PubkeyError::IllegalOwner => Self::IllegalOwner, + } + } +} + +#[cfg(feature = "borsh")] +impl From for ProgramError { + fn from(error: BorshIoError) -> Self { + Self::BorshIoError(format!("{error}")) + } +} diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 2f2c6feaebad69..0d4d0afae6a1fd 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -47,6 +47,7 @@ solana-instruction = { workspace = true, default-features = false, features = [ "std", ] } solana-msg = { workspace = true } +solana-program-error = { workspace = true, features = ["serde"] } solana-program-memory = { workspace = true } solana-program-option = { workspace = true } solana-pubkey = { workspace = true, features = ["bytemuck", "curve25519", "serde", "std"] } @@ -117,6 +118,7 @@ borsh = [ "dep:borsh0-10", "solana-hash/borsh", "solana-instruction/borsh", + "solana-program-error/borsh", "solana-pubkey/borsh" ] dev-context-only-utils = ["dep:qualifier_attr"] diff --git a/sdk/program/src/entrypoint.rs b/sdk/program/src/entrypoint.rs index dbb6d9ab753651..c0353a8dc94b05 100644 --- a/sdk/program/src/entrypoint.rs +++ b/sdk/program/src/entrypoint.rs @@ -5,8 +5,9 @@ //! [`bpf_loader`]: crate::bpf_loader extern crate alloc; +pub use solana_program_error::ProgramResult; use { - crate::{account_info::AccountInfo, program_error::ProgramError, pubkey::Pubkey}, + crate::{account_info::AccountInfo, pubkey::Pubkey}, alloc::vec::Vec, std::{ alloc::Layout, @@ -14,13 +15,10 @@ use { mem::{size_of, MaybeUninit}, ptr::null_mut, rc::Rc, - result::Result as ResultGeneric, slice::{from_raw_parts, from_raw_parts_mut}, }, }; -pub type ProgramResult = ResultGeneric<(), ProgramError>; - /// User implemented function to process an instruction /// /// program_id: Program ID of the currently executing program accounts: Accounts diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index 9bf25ae7d0ac28..cfa46328571d96 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -1,276 +1,14 @@ -//! The [`ProgramError`] type and related definitions. - -#![allow(clippy::arithmetic_side_effects)] -#[cfg(feature = "borsh")] -use borsh::io::Error as BorshIoError; -pub use solana_instruction::error::{ - ACCOUNT_ALREADY_INITIALIZED, ACCOUNT_BORROW_FAILED, ACCOUNT_DATA_TOO_SMALL, - ACCOUNT_NOT_RENT_EXEMPT, ARITHMETIC_OVERFLOW, BORSH_IO_ERROR, - BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS, CUSTOM_ZERO, ILLEGAL_OWNER, IMMUTABLE, - INCORRECT_AUTHORITY, INCORRECT_PROGRAM_ID, INSUFFICIENT_FUNDS, INVALID_ACCOUNT_DATA, - INVALID_ACCOUNT_DATA_REALLOC, INVALID_ACCOUNT_OWNER, INVALID_ARGUMENT, - INVALID_INSTRUCTION_DATA, INVALID_SEEDS, MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED, - MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED, MAX_SEED_LENGTH_EXCEEDED, MISSING_REQUIRED_SIGNATURES, - NOT_ENOUGH_ACCOUNT_KEYS, UNINITIALIZED_ACCOUNT, UNSUPPORTED_SYSVAR, +pub use { + solana_instruction::error::{ + ACCOUNT_ALREADY_INITIALIZED, ACCOUNT_BORROW_FAILED, ACCOUNT_DATA_TOO_SMALL, + ACCOUNT_NOT_RENT_EXEMPT, ARITHMETIC_OVERFLOW, BORSH_IO_ERROR, + BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS, CUSTOM_ZERO, ILLEGAL_OWNER, IMMUTABLE, + INCORRECT_AUTHORITY, INCORRECT_PROGRAM_ID, INSUFFICIENT_FUNDS, INVALID_ACCOUNT_DATA, + INVALID_ACCOUNT_DATA_REALLOC, INVALID_ACCOUNT_OWNER, INVALID_ARGUMENT, + INVALID_INSTRUCTION_DATA, INVALID_SEEDS, MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED, + MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED, MAX_SEED_LENGTH_EXCEEDED, + MISSING_REQUIRED_SIGNATURES, NOT_ENOUGH_ACCOUNT_KEYS, UNINITIALIZED_ACCOUNT, + UNSUPPORTED_SYSVAR, + }, + solana_program_error::{PrintProgramError, ProgramError}, }; -use { - crate::{instruction::InstructionError, msg, pubkey::PubkeyError}, - num_traits::FromPrimitive, - solana_decode_error::DecodeError, - std::convert::TryFrom, - thiserror::Error, -}; - -/// Reasons the program may fail -#[derive(Clone, Debug, Deserialize, Eq, Error, PartialEq, Serialize)] -pub enum ProgramError { - /// Allows on-chain programs to implement program-specific error types and see them returned - /// by the Solana runtime. A program-specific error may be any type that is represented as - /// or serialized to a u32 integer. - #[error("Custom program error: {0:#x}")] - Custom(u32), - #[error("The arguments provided to a program instruction were invalid")] - InvalidArgument, - #[error("An instruction's data contents was invalid")] - InvalidInstructionData, - #[error("An account's data contents was invalid")] - InvalidAccountData, - #[error("An account's data was too small")] - AccountDataTooSmall, - #[error("An account's balance was too small to complete the instruction")] - InsufficientFunds, - #[error("The account did not have the expected program id")] - IncorrectProgramId, - #[error("A signature was required but not found")] - MissingRequiredSignature, - #[error("An initialize instruction was sent to an account that has already been initialized")] - AccountAlreadyInitialized, - #[error("An attempt to operate on an account that hasn't been initialized")] - UninitializedAccount, - #[error("The instruction expected additional account keys")] - NotEnoughAccountKeys, - #[error("Failed to borrow a reference to account data, already borrowed")] - AccountBorrowFailed, - #[error("Length of the seed is too long for address generation")] - MaxSeedLengthExceeded, - #[error("Provided seeds do not result in a valid address")] - InvalidSeeds, - #[error("IO Error: {0}")] - BorshIoError(String), - #[error("An account does not have enough lamports to be rent-exempt")] - AccountNotRentExempt, - #[error("Unsupported sysvar")] - UnsupportedSysvar, - #[error("Provided owner is not allowed")] - IllegalOwner, - #[error("Accounts data allocations exceeded the maximum allowed per transaction")] - MaxAccountsDataAllocationsExceeded, - #[error("Account data reallocation was invalid")] - InvalidRealloc, - #[error("Instruction trace length exceeded the maximum allowed per transaction")] - MaxInstructionTraceLengthExceeded, - #[error("Builtin programs must consume compute units")] - BuiltinProgramsMustConsumeComputeUnits, - #[error("Invalid account owner")] - InvalidAccountOwner, - #[error("Program arithmetic overflowed")] - ArithmeticOverflow, - #[error("Account is immutable")] - Immutable, - #[error("Incorrect authority provided")] - IncorrectAuthority, -} - -pub trait PrintProgramError { - fn print(&self) - where - E: 'static + std::error::Error + DecodeError + PrintProgramError + FromPrimitive; -} - -impl PrintProgramError for ProgramError { - fn print(&self) - where - E: 'static + std::error::Error + DecodeError + PrintProgramError + FromPrimitive, - { - match self { - Self::Custom(error) => { - if let Some(custom_error) = E::decode_custom_error_to_enum(*error) { - custom_error.print::(); - } else { - msg!("Error: Unknown"); - } - } - Self::InvalidArgument => msg!("Error: InvalidArgument"), - Self::InvalidInstructionData => msg!("Error: InvalidInstructionData"), - Self::InvalidAccountData => msg!("Error: InvalidAccountData"), - Self::AccountDataTooSmall => msg!("Error: AccountDataTooSmall"), - Self::InsufficientFunds => msg!("Error: InsufficientFunds"), - Self::IncorrectProgramId => msg!("Error: IncorrectProgramId"), - Self::MissingRequiredSignature => msg!("Error: MissingRequiredSignature"), - Self::AccountAlreadyInitialized => msg!("Error: AccountAlreadyInitialized"), - Self::UninitializedAccount => msg!("Error: UninitializedAccount"), - Self::NotEnoughAccountKeys => msg!("Error: NotEnoughAccountKeys"), - Self::AccountBorrowFailed => msg!("Error: AccountBorrowFailed"), - Self::MaxSeedLengthExceeded => msg!("Error: MaxSeedLengthExceeded"), - Self::InvalidSeeds => msg!("Error: InvalidSeeds"), - Self::BorshIoError(_) => msg!("Error: BorshIoError"), - Self::AccountNotRentExempt => msg!("Error: AccountNotRentExempt"), - Self::UnsupportedSysvar => msg!("Error: UnsupportedSysvar"), - Self::IllegalOwner => msg!("Error: IllegalOwner"), - Self::MaxAccountsDataAllocationsExceeded => { - msg!("Error: MaxAccountsDataAllocationsExceeded") - } - Self::InvalidRealloc => msg!("Error: InvalidRealloc"), - Self::MaxInstructionTraceLengthExceeded => { - msg!("Error: MaxInstructionTraceLengthExceeded") - } - Self::BuiltinProgramsMustConsumeComputeUnits => { - msg!("Error: BuiltinProgramsMustConsumeComputeUnits") - } - Self::InvalidAccountOwner => msg!("Error: InvalidAccountOwner"), - Self::ArithmeticOverflow => msg!("Error: ArithmeticOverflow"), - Self::Immutable => msg!("Error: Immutable"), - Self::IncorrectAuthority => msg!("Error: IncorrectAuthority"), - } - } -} - -impl From for u64 { - fn from(error: ProgramError) -> Self { - match error { - ProgramError::InvalidArgument => INVALID_ARGUMENT, - ProgramError::InvalidInstructionData => INVALID_INSTRUCTION_DATA, - ProgramError::InvalidAccountData => INVALID_ACCOUNT_DATA, - ProgramError::AccountDataTooSmall => ACCOUNT_DATA_TOO_SMALL, - ProgramError::InsufficientFunds => INSUFFICIENT_FUNDS, - ProgramError::IncorrectProgramId => INCORRECT_PROGRAM_ID, - ProgramError::MissingRequiredSignature => MISSING_REQUIRED_SIGNATURES, - ProgramError::AccountAlreadyInitialized => ACCOUNT_ALREADY_INITIALIZED, - ProgramError::UninitializedAccount => UNINITIALIZED_ACCOUNT, - ProgramError::NotEnoughAccountKeys => NOT_ENOUGH_ACCOUNT_KEYS, - ProgramError::AccountBorrowFailed => ACCOUNT_BORROW_FAILED, - ProgramError::MaxSeedLengthExceeded => MAX_SEED_LENGTH_EXCEEDED, - ProgramError::InvalidSeeds => INVALID_SEEDS, - ProgramError::BorshIoError(_) => BORSH_IO_ERROR, - ProgramError::AccountNotRentExempt => ACCOUNT_NOT_RENT_EXEMPT, - ProgramError::UnsupportedSysvar => UNSUPPORTED_SYSVAR, - ProgramError::IllegalOwner => ILLEGAL_OWNER, - ProgramError::MaxAccountsDataAllocationsExceeded => { - MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED - } - ProgramError::InvalidRealloc => INVALID_ACCOUNT_DATA_REALLOC, - ProgramError::MaxInstructionTraceLengthExceeded => { - MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED - } - ProgramError::BuiltinProgramsMustConsumeComputeUnits => { - BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS - } - ProgramError::InvalidAccountOwner => INVALID_ACCOUNT_OWNER, - ProgramError::ArithmeticOverflow => ARITHMETIC_OVERFLOW, - ProgramError::Immutable => IMMUTABLE, - ProgramError::IncorrectAuthority => INCORRECT_AUTHORITY, - ProgramError::Custom(error) => { - if error == 0 { - CUSTOM_ZERO - } else { - error as u64 - } - } - } - } -} - -impl From for ProgramError { - fn from(error: u64) -> Self { - match error { - CUSTOM_ZERO => Self::Custom(0), - INVALID_ARGUMENT => Self::InvalidArgument, - INVALID_INSTRUCTION_DATA => Self::InvalidInstructionData, - INVALID_ACCOUNT_DATA => Self::InvalidAccountData, - ACCOUNT_DATA_TOO_SMALL => Self::AccountDataTooSmall, - INSUFFICIENT_FUNDS => Self::InsufficientFunds, - INCORRECT_PROGRAM_ID => Self::IncorrectProgramId, - MISSING_REQUIRED_SIGNATURES => Self::MissingRequiredSignature, - ACCOUNT_ALREADY_INITIALIZED => Self::AccountAlreadyInitialized, - UNINITIALIZED_ACCOUNT => Self::UninitializedAccount, - NOT_ENOUGH_ACCOUNT_KEYS => Self::NotEnoughAccountKeys, - ACCOUNT_BORROW_FAILED => Self::AccountBorrowFailed, - MAX_SEED_LENGTH_EXCEEDED => Self::MaxSeedLengthExceeded, - INVALID_SEEDS => Self::InvalidSeeds, - BORSH_IO_ERROR => Self::BorshIoError("Unknown".to_string()), - ACCOUNT_NOT_RENT_EXEMPT => Self::AccountNotRentExempt, - UNSUPPORTED_SYSVAR => Self::UnsupportedSysvar, - ILLEGAL_OWNER => Self::IllegalOwner, - MAX_ACCOUNTS_DATA_ALLOCATIONS_EXCEEDED => Self::MaxAccountsDataAllocationsExceeded, - INVALID_ACCOUNT_DATA_REALLOC => Self::InvalidRealloc, - MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED => Self::MaxInstructionTraceLengthExceeded, - BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS => { - Self::BuiltinProgramsMustConsumeComputeUnits - } - INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, - ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, - IMMUTABLE => Self::Immutable, - INCORRECT_AUTHORITY => Self::IncorrectAuthority, - _ => Self::Custom(error as u32), - } - } -} - -impl TryFrom for ProgramError { - type Error = InstructionError; - - fn try_from(error: InstructionError) -> Result { - match error { - Self::Error::Custom(err) => Ok(Self::Custom(err)), - Self::Error::InvalidArgument => Ok(Self::InvalidArgument), - Self::Error::InvalidInstructionData => Ok(Self::InvalidInstructionData), - Self::Error::InvalidAccountData => Ok(Self::InvalidAccountData), - Self::Error::AccountDataTooSmall => Ok(Self::AccountDataTooSmall), - Self::Error::InsufficientFunds => Ok(Self::InsufficientFunds), - Self::Error::IncorrectProgramId => Ok(Self::IncorrectProgramId), - Self::Error::MissingRequiredSignature => Ok(Self::MissingRequiredSignature), - Self::Error::AccountAlreadyInitialized => Ok(Self::AccountAlreadyInitialized), - Self::Error::UninitializedAccount => Ok(Self::UninitializedAccount), - Self::Error::NotEnoughAccountKeys => Ok(Self::NotEnoughAccountKeys), - Self::Error::AccountBorrowFailed => Ok(Self::AccountBorrowFailed), - Self::Error::MaxSeedLengthExceeded => Ok(Self::MaxSeedLengthExceeded), - Self::Error::InvalidSeeds => Ok(Self::InvalidSeeds), - Self::Error::BorshIoError(err) => Ok(Self::BorshIoError(err)), - Self::Error::AccountNotRentExempt => Ok(Self::AccountNotRentExempt), - Self::Error::UnsupportedSysvar => Ok(Self::UnsupportedSysvar), - Self::Error::IllegalOwner => Ok(Self::IllegalOwner), - Self::Error::MaxAccountsDataAllocationsExceeded => { - Ok(Self::MaxAccountsDataAllocationsExceeded) - } - Self::Error::InvalidRealloc => Ok(Self::InvalidRealloc), - Self::Error::MaxInstructionTraceLengthExceeded => { - Ok(Self::MaxInstructionTraceLengthExceeded) - } - Self::Error::BuiltinProgramsMustConsumeComputeUnits => { - Ok(Self::BuiltinProgramsMustConsumeComputeUnits) - } - Self::Error::InvalidAccountOwner => Ok(Self::InvalidAccountOwner), - Self::Error::ArithmeticOverflow => Ok(Self::ArithmeticOverflow), - Self::Error::Immutable => Ok(Self::Immutable), - Self::Error::IncorrectAuthority => Ok(Self::IncorrectAuthority), - _ => Err(error), - } - } -} - -impl From for ProgramError { - fn from(error: PubkeyError) -> Self { - match error { - PubkeyError::MaxSeedLengthExceeded => Self::MaxSeedLengthExceeded, - PubkeyError::InvalidSeeds => Self::InvalidSeeds, - PubkeyError::IllegalOwner => Self::IllegalOwner, - } - } -} - -#[cfg(feature = "borsh")] -impl From for ProgramError { - fn from(error: BorshIoError) -> Self { - Self::BorshIoError(format!("{error}")) - } -} From 2a41dbbce4acc294939f113092a2d0b1714a4a28 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 01:30:34 +0800 Subject: [PATCH 361/529] build(deps): bump rustls from 0.23.12 to 0.23.13 (#2941) * build(deps): bump rustls from 0.23.12 to 0.23.13 Bumps [rustls](https://github.com/rustls/rustls) from 0.23.12 to 0.23.13. - [Release notes](https://github.com/rustls/rustls/releases) - [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustls/rustls/compare/v/0.23.12...v/0.23.13) --- updated-dependencies: - dependency-name: rustls dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 26 +++++++++++++------------- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 522e1fe4f5e549..18ac97403d7bc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4511,7 +4511,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -4528,7 +4528,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.3", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-platform-verifier", "slab", "thiserror", @@ -5013,14 +5013,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring 0.17.3", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -5074,10 +5074,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "security-framework", "security-framework-sys", "webpki-roots 0.26.5", @@ -5102,9 +5102,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.3", "rustls-pki-types", @@ -6357,7 +6357,7 @@ dependencies = [ "rayon", "rolling-file", "rustc_version 0.4.1", - "rustls 0.23.12", + "rustls 0.23.13", "serde", "serde_bytes", "serde_derive", @@ -7420,7 +7420,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.12", + "rustls 0.23.13", "solana-connection-cache", "solana-logger", "solana-measure", @@ -8024,7 +8024,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.12", + "rustls 0.23.13", "smallvec", "socket2 0.5.7", "solana-logger", @@ -8371,7 +8371,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.12", + "rustls 0.23.13", "solana-entry", "solana-feature-set", "solana-gossip", diff --git a/Cargo.toml b/Cargo.toml index 8229865fef7a7f..fc48a46f0c1602 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -339,7 +339,7 @@ reqwest-middleware = "0.2.5" rolling-file = "0.2.0" rpassword = "7.3" rustc_version = "0.4" -rustls = { version = "0.23.9", default-features = false } +rustls = { version = "0.23.13", default-features = false } scopeguard = "1.2.0" semver = "1.0.23" seqlock = "0.2.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 729ffb286a593b..8e05dbe5e36e65 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3784,7 +3784,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -3801,7 +3801,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.3", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-platform-verifier", "slab", "thiserror", @@ -4212,14 +4212,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring 0.17.3", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -4273,10 +4273,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "security-framework", "security-framework-sys", "webpki-roots 0.26.5", @@ -4301,9 +4301,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.3", "rustls-pki-types", @@ -5075,7 +5075,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.12", + "rustls 0.23.13", "serde", "serde_bytes", "serde_derive", @@ -5792,7 +5792,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.12", + "rustls 0.23.13", "solana-connection-cache", "solana-measure", "solana-metrics", @@ -6734,7 +6734,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.12", + "rustls 0.23.13", "smallvec", "socket2 0.5.7", "solana-measure", @@ -6950,7 +6950,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.12", + "rustls 0.23.13", "solana-entry", "solana-feature-set", "solana-gossip", From a02d9272ef75737be01bf5d88eb23631e9f4a543 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 01:30:51 +0800 Subject: [PATCH 362/529] build(deps): bump tempfile from 3.10.1 to 3.12.0 (#2940) * build(deps): bump tempfile from 3.10.1 to 3.12.0 Bumps [tempfile](https://github.com/Stebalien/tempfile) from 3.10.1 to 3.12.0. - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/commits) --- updated-dependencies: - dependency-name: tempfile dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 7 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 75 +++++++++++++++++++++++++---------------- 3 files changed, 51 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18ac97403d7bc4..0d2a3e1d6adcca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9156,14 +9156,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if 1.0.0", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index fc48a46f0c1602..53f100e20f9114 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -501,7 +501,7 @@ sysctl = "0.4.6" systemstat = "0.2.3" tar = "0.4.41" tarpc = "0.29.0" -tempfile = "3.10.1" +tempfile = "3.12.0" test-case = "3.3.1" thiserror = "1.0.63" tiny-bip39 = "0.8.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 8e05dbe5e36e65..df93e8abb7f184 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -972,7 +972,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-targets 0.52.6", ] [[package]] @@ -2656,7 +2656,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if 1.0.0", - "windows-targets 0.52.0", + "windows-targets 0.48.0", ] [[package]] @@ -7634,14 +7634,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if 1.0.0", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -8478,7 +8479,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -8498,17 +8508,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -8519,9 +8530,9 @@ checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -8531,9 +8542,9 @@ checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -8543,9 +8554,15 @@ checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -8555,9 +8572,9 @@ checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -8567,9 +8584,9 @@ checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -8579,9 +8596,9 @@ checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -8591,9 +8608,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" From c4b42ab045860d7b13b3912eafb30e6d2f4e593f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 01:32:28 +0800 Subject: [PATCH 363/529] build(deps): bump lz4 from 1.26.0 to 1.27.0 (#2939) * build(deps): bump lz4 from 1.26.0 to 1.27.0 Bumps [lz4](https://github.com/10xGenomics/lz4-rs) from 1.26.0 to 1.27.0. - [Release notes](https://github.com/10xGenomics/lz4-rs/releases) - [Changelog](https://github.com/10XGenomics/lz4-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/10xGenomics/lz4-rs/commits) --- updated-dependencies: - dependency-name: lz4 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 18 +++++++++--------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d2a3e1d6adcca..6fdfa5f8df553c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1271,12 +1271,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.94" +version = "1.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" +checksum = "2d74707dde2ba56f86ae90effb3b43ddd369504387e718014de010cec7959800" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -3125,9 +3126,9 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.24" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] @@ -3459,19 +3460,18 @@ dependencies = [ [[package]] name = "lz4" -version = "1.26.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958b4caa893816eea05507c20cfe47574a43d9a697138a7872990bba8a0ece68" +checksum = "a231296ca742e418c43660cb68e082486ff2538e8db432bc818580f3965025ed" dependencies = [ - "libc", "lz4-sys", ] [[package]] name = "lz4-sys" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109de74d5d2353660401699a4174a4ff23fcc649caf553df71933c7fb45ad868" +checksum = "fcb44a01837a858d47e5a630d2ccf304c8efcc4b83b8f9f75b7a9ee4fcc6e57d" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index 53f100e20f9114..18e6c3e879fbea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -296,7 +296,7 @@ libsecp256k1 = { version = "0.6.0", default-features = false, features = [ light-poseidon = "0.2.0" log = "0.4.22" lru = "0.7.7" -lz4 = "1.26.0" +lz4 = "1.27.0" memmap2 = "0.5.10" memoffset = "0.9" merlin = "3" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index df93e8abb7f184..274f0c53586722 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -919,12 +919,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.94" +version = "1.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" +checksum = "2d74707dde2ba56f86ae90effb3b43ddd369504387e718014de010cec7959800" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -2446,9 +2447,9 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.21" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] @@ -2829,19 +2830,18 @@ dependencies = [ [[package]] name = "lz4" -version = "1.26.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958b4caa893816eea05507c20cfe47574a43d9a697138a7872990bba8a0ece68" +checksum = "a231296ca742e418c43660cb68e082486ff2538e8db432bc818580f3965025ed" dependencies = [ - "libc", "lz4-sys", ] [[package]] name = "lz4-sys" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109de74d5d2353660401699a4174a4ff23fcc649caf553df71933c7fb45ad868" +checksum = "fcb44a01837a858d47e5a630d2ccf304c8efcc4b83b8f9f75b7a9ee4fcc6e57d" dependencies = [ "cc", "libc", From 6082e8434dfe1fa34be9bc38dfa76b863c1819ea Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 17 Sep 2024 10:08:48 +0400 Subject: [PATCH 364/529] Remove frozen-abi build script (#2911) * replace cfg(RUSTC_WITH_SPECIALIZATION) with cfg(feature = "frozen-abi") * remove the build scripts for the two frozen-abi crates * remove all rustc_version deps * remove a rustc_version dep that I missed * fix duplicate lines in Cargo.toml files * remove build.rs from instruction crate * remove rustc_version from instruction crate * remove no-longer-needed check-cfg entries * update lock file after rebase --- Cargo.lock | 31 ---------------- Cargo.toml | 3 -- accounts-db/Cargo.toml | 12 +++---- accounts-db/build.rs | 1 - accounts-db/src/accounts_db.rs | 2 +- accounts-db/src/lib.rs | 2 +- bloom/Cargo.toml | 12 +++---- bloom/build.rs | 1 - bloom/src/lib.rs | 2 +- builtins-default-costs/Cargo.toml | 8 ++--- builtins-default-costs/build.rs | 1 - builtins-default-costs/src/lib.rs | 2 +- cargo-registry/Cargo.toml | 3 -- compute-budget/Cargo.toml | 8 ++--- compute-budget/build.rs | 1 - compute-budget/src/compute_budget.rs | 2 +- compute-budget/src/lib.rs | 2 +- core/Cargo.toml | 12 +++---- core/build.rs | 1 - core/src/lib.rs | 2 +- cost-model/Cargo.toml | 12 +++---- cost-model/build.rs | 1 - cost-model/src/lib.rs | 2 +- curves/secp256k1-recover/Cargo.toml | 13 +++---- curves/secp256k1-recover/build.rs | 1 - curves/secp256k1-recover/src/lib.rs | 2 +- frozen-abi/Cargo.toml | 8 ++--- frozen-abi/build.rs | 25 ------------- frozen-abi/macro/Cargo.toml | 8 ++--- frozen-abi/macro/build.rs | 1 - frozen-abi/macro/src/lib.rs | 45 +++++++++++------------- frozen-abi/src/lib.rs | 10 +++--- gossip/Cargo.toml | 12 +++---- gossip/build.rs | 1 - gossip/src/contact_info.rs | 2 +- gossip/src/lib.rs | 2 +- ledger/Cargo.toml | 12 +++---- ledger/build.rs | 1 - ledger/src/lib.rs | 2 +- perf/Cargo.toml | 14 ++++---- perf/build.rs | 24 ------------- perf/src/lib.rs | 2 +- perf/src/recycler.rs | 2 +- program-runtime/Cargo.toml | 12 +++---- program-runtime/build.rs | 1 - program-runtime/src/lib.rs | 2 +- program-runtime/src/loaded_programs.rs | 4 +-- program-runtime/src/sysvar_cache.rs | 2 +- programs/address-lookup-table/Cargo.toml | 3 -- programs/address-lookup-table/build.rs | 1 - programs/address-lookup-table/src/lib.rs | 2 +- programs/stake-tests/Cargo.toml | 3 -- programs/stake/Cargo.toml | 3 -- programs/stake/build.rs | 1 - programs/stake/src/lib.rs | 2 +- programs/vote/Cargo.toml | 12 +++---- programs/vote/build.rs | 1 - programs/vote/src/lib.rs | 2 +- runtime-transaction/Cargo.toml | 3 -- runtime-transaction/build.rs | 1 - runtime-transaction/src/lib.rs | 2 +- runtime/Cargo.toml | 12 +++---- runtime/build.rs | 1 - runtime/src/bank/serde_snapshot.rs | 2 +- runtime/src/lib.rs | 2 +- runtime/src/serde_snapshot.rs | 4 +-- runtime/src/serde_snapshot/storage.rs | 2 +- runtime/src/serde_snapshot/utils.rs | 8 ++--- runtime/src/stake_account.rs | 4 +-- sdk/Cargo.toml | 12 +++---- sdk/build.rs | 1 - sdk/feature-set/Cargo.toml | 12 +++---- sdk/feature-set/build.rs | 1 - sdk/feature-set/src/lib.rs | 2 +- sdk/hash/Cargo.toml | 12 +++---- sdk/hash/build.rs | 1 - sdk/hash/src/lib.rs | 2 +- sdk/instruction/Cargo.toml | 4 --- sdk/instruction/build.rs | 1 - sdk/instruction/src/lib.rs | 2 +- sdk/program/Cargo.toml | 4 --- sdk/program/build.rs | 1 - sdk/program/src/lib.rs | 2 +- sdk/pubkey/Cargo.toml | 12 +++---- sdk/pubkey/build.rs | 1 - sdk/pubkey/src/lib.rs | 2 +- sdk/signature/Cargo.toml | 12 +++---- sdk/signature/build.rs | 1 - sdk/signature/src/lib.rs | 2 +- sdk/src/fee.rs | 2 +- sdk/src/lib.rs | 2 +- sdk/src/packet.rs | 6 ++-- sdk/src/reserved_account_keys.rs | 2 +- short-vec/Cargo.toml | 13 +++---- short-vec/build.rs | 1 - short-vec/src/lib.rs | 2 +- svm/Cargo.toml | 12 +++---- svm/build.rs | 1 - svm/src/lib.rs | 2 +- svm/src/message_processor.rs | 2 +- svm/src/runtime_config.rs | 2 +- version/Cargo.toml | 12 +++---- version/build.rs | 1 - version/src/lib.rs | 2 +- vote/Cargo.toml | 12 +++---- vote/build.rs | 1 - vote/src/lib.rs | 2 +- 107 files changed, 210 insertions(+), 356 deletions(-) delete mode 120000 accounts-db/build.rs delete mode 120000 bloom/build.rs delete mode 120000 builtins-default-costs/build.rs delete mode 120000 compute-budget/build.rs delete mode 120000 core/build.rs delete mode 120000 cost-model/build.rs delete mode 120000 curves/secp256k1-recover/build.rs delete mode 100644 frozen-abi/build.rs delete mode 120000 frozen-abi/macro/build.rs delete mode 120000 gossip/build.rs delete mode 120000 ledger/build.rs delete mode 120000 program-runtime/build.rs delete mode 120000 programs/address-lookup-table/build.rs delete mode 120000 programs/stake/build.rs delete mode 120000 programs/vote/build.rs delete mode 120000 runtime-transaction/build.rs delete mode 120000 runtime/build.rs delete mode 120000 sdk/build.rs delete mode 120000 sdk/feature-set/build.rs delete mode 120000 sdk/hash/build.rs delete mode 120000 sdk/instruction/build.rs delete mode 120000 sdk/program/build.rs delete mode 120000 sdk/pubkey/build.rs delete mode 120000 sdk/signature/build.rs delete mode 120000 short-vec/build.rs delete mode 120000 svm/build.rs delete mode 120000 version/build.rs delete mode 120000 vote/build.rs diff --git a/Cargo.lock b/Cargo.lock index 6fdfa5f8df553c..cc21ffa28ef13e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -87,7 +87,6 @@ dependencies = [ "hex", "hyper", "log", - "rustc_version 0.4.1", "serde", "serde_derive", "serde_json", @@ -5713,7 +5712,6 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustc_version 0.4.1", "seqlock", "serde", "serde_bytes", @@ -5753,7 +5751,6 @@ dependencies = [ "log", "num-derive", "num-traits", - "rustc_version 0.4.1", "solana-feature-set", "solana-log-collector", "solana-program", @@ -5918,7 +5915,6 @@ dependencies = [ "log", "rand 0.8.5", "rayon", - "rustc_version 0.4.1", "serde", "serde_derive", "solana-frozen-abi", @@ -6013,7 +6009,6 @@ dependencies = [ "lazy_static", "log", "rand 0.8.5", - "rustc_version 0.4.1", "solana-address-lookup-table-program", "solana-bpf-loader-program", "solana-compute-budget-program", @@ -6273,7 +6268,6 @@ dependencies = [ name = "solana-compute-budget" version = "2.1.0" dependencies = [ - "rustc_version 0.4.1", "solana-frozen-abi", "solana-sdk", ] @@ -6356,7 +6350,6 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustc_version 0.4.1", "rustls 0.23.13", "serde", "serde_bytes", @@ -6431,7 +6424,6 @@ dependencies = [ "lazy_static", "log", "rand 0.8.5", - "rustc_version 0.4.1", "solana-builtins-default-costs", "solana-compute-budget", "solana-feature-set", @@ -6587,7 +6579,6 @@ name = "solana-feature-set" version = "2.1.0" dependencies = [ "lazy_static", - "rustc_version 0.4.1", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-program", @@ -6612,7 +6603,6 @@ dependencies = [ "im", "log", "memmap2", - "rustc_version 0.4.1", "serde", "serde_bytes", "serde_derive", @@ -6628,7 +6618,6 @@ version = "2.1.0" dependencies = [ "proc-macro2", "quote", - "rustc_version 0.4.1", "syn 2.0.77", ] @@ -6714,7 +6703,6 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustc_version 0.4.1", "serde", "serde_bytes", "serde_derive", @@ -6759,7 +6747,6 @@ dependencies = [ "bytemuck", "bytemuck_derive", "js-sys", - "rustc_version 0.4.1", "serde", "serde_derive", "solana-atomic-u64", @@ -6787,7 +6774,6 @@ dependencies = [ "getrandom 0.2.10", "js-sys", "num-traits", - "rustc_version 0.4.1", "serde", "serde_derive", "solana-define-syscall", @@ -6860,7 +6846,6 @@ dependencies = [ "rayon", "reed-solomon-erasure", "rocksdb", - "rustc_version 0.4.1", "scopeguard", "serde", "serde_bytes", @@ -7125,7 +7110,6 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustc_version 0.4.1", "serde", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -7220,7 +7204,6 @@ dependencies = [ "parking_lot 0.12.3", "qualifier_attr", "rand 0.8.5", - "rustc_version 0.4.1", "serde", "serde_bytes", "serde_derive", @@ -7295,7 +7278,6 @@ dependencies = [ "num-traits", "percentage", "rand 0.8.5", - "rustc_version 0.4.1", "serde", "solana-compute-budget", "solana-feature-set", @@ -7366,7 +7348,6 @@ dependencies = [ "js-sys", "num-traits", "rand 0.8.5", - "rustc_version 0.4.1", "serde", "serde_derive", "solana-atomic-u64", @@ -7665,7 +7646,6 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "regex", - "rustc_version 0.4.1", "serde", "serde_derive", "serde_json", @@ -7726,7 +7706,6 @@ dependencies = [ "criterion", "log", "rand 0.8.5", - "rustc_version 0.4.1", "solana-builtins-default-costs", "solana-compute-budget", "solana-program", @@ -7774,7 +7753,6 @@ dependencies = [ "qualifier_attr", "rand 0.7.3", "rand 0.8.5", - "rustc_version 0.4.1", "serde", "serde_bytes", "serde_derive", @@ -7822,7 +7800,6 @@ dependencies = [ "anyhow", "borsh 1.5.1", "libsecp256k1", - "rustc_version 0.4.1", "solana-define-syscall", "solana-frozen-abi", "solana-frozen-abi-macro", @@ -7877,7 +7854,6 @@ version = "2.1.0" dependencies = [ "assert_matches", "bincode", - "rustc_version 0.4.1", "serde", "serde_json", "solana-frozen-abi", @@ -7893,7 +7869,6 @@ dependencies = [ "ed25519-dalek", "generic-array 0.14.7", "rand 0.8.5", - "rustc_version 0.4.1", "serde", "serde_derive", "solana-frozen-abi", @@ -7926,7 +7901,6 @@ dependencies = [ "bincode", "log", "proptest", - "rustc_version 0.4.1", "solana-compute-budget", "solana-config-program", "solana-feature-set", @@ -7945,7 +7919,6 @@ version = "2.1.0" dependencies = [ "assert_matches", "bincode", - "rustc_version 0.4.1", "solana-feature-set", "solana-program-test", "solana-sdk", @@ -8052,7 +8025,6 @@ dependencies = [ "prost", "qualifier_attr", "rand 0.8.5", - "rustc_version 0.4.1", "serde", "serde_derive", "shuttle", @@ -8461,7 +8433,6 @@ name = "solana-version" version = "2.1.0" dependencies = [ "log", - "rustc_version 0.4.1", "semver 1.0.23", "serde", "serde_derive", @@ -8481,7 +8452,6 @@ dependencies = [ "itertools 0.12.1", "log", "rand 0.8.5", - "rustc_version 0.4.1", "serde", "serde_derive", "solana-frozen-abi", @@ -8499,7 +8469,6 @@ dependencies = [ "log", "num-derive", "num-traits", - "rustc_version 0.4.1", "serde", "serde_derive", "solana-feature-set", diff --git a/Cargo.toml b/Cargo.toml index 18e6c3e879fbea..9605e91f04d90a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -178,8 +178,6 @@ level = "warn" check-cfg = [ 'cfg(target_os, values("solana"))', 'cfg(feature, values("frozen-abi", "no-entrypoint"))', - 'cfg(RUSTC_WITH_SPECIALIZATION)', - 'cfg(RUSTC_WITHOUT_SPECIALIZATION)', ] [workspace.dependencies] @@ -338,7 +336,6 @@ reqwest = { version = "0.11.27", default-features = false } reqwest-middleware = "0.2.5" rolling-file = "0.2.0" rpassword = "7.3" -rustc_version = "0.4" rustls = { version = "0.23.13", default-features = false } scopeguard = "1.2.0" semver = "1.0.23" diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 2eb876219da96a..4d84e51daa0fbb 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -37,8 +37,12 @@ serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } smallvec = { workspace = true, features = ["const_generics"] } solana-bucket-map = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-inline-spl = { workspace = true } solana-lattice-hash = { workspace = true } solana-measure = { workspace = true } @@ -79,9 +83,6 @@ test-case = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] dev-context-only-utils = [ "dep:qualifier_attr", @@ -89,7 +90,6 @@ dev-context-only-utils = [ "dep:solana-vote-program", ] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", diff --git a/accounts-db/build.rs b/accounts-db/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/accounts-db/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9842a6d9d4d0db..f863c81007b110 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2415,7 +2415,7 @@ pub fn make_min_priority_thread_pool() -> ThreadPool { .unwrap() } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl solana_frozen_abi::abi_example::AbiExample for AccountsDb { fn example() -> Self { let accounts_db = AccountsDb::new_single_for_tests(); diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 68ffe068bec6f2..8e7b4faf926b75 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] #[macro_use] diff --git a/bloom/Cargo.toml b/bloom/Cargo.toml index 4c9d88893fbacf..f9d82460ec11c8 100644 --- a/bloom/Cargo.toml +++ b/bloom/Cargo.toml @@ -16,8 +16,12 @@ log = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-sanitize = { workspace = true } solana-sdk = { workspace = true } @@ -31,12 +35,8 @@ name = "solana_bloom" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", diff --git a/bloom/build.rs b/bloom/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/bloom/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/bloom/src/lib.rs b/bloom/src/lib.rs index e188a9c2a1873a..944643883dd978 100644 --- a/bloom/src/lib.rs +++ b/bloom/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] pub mod bloom; #[cfg_attr(feature = "frozen-abi", macro_use)] diff --git a/builtins-default-costs/Cargo.toml b/builtins-default-costs/Cargo.toml index f2173aabed588f..bdd29a267280b5 100644 --- a/builtins-default-costs/Cargo.toml +++ b/builtins-default-costs/Cargo.toml @@ -17,7 +17,9 @@ solana-address-lookup-table-program = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-compute-budget-program = { workspace = true } solana-config-program = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-loader-v4-program = { workspace = true } solana-sdk = { workspace = true } solana-stake-program = { workspace = true } @@ -35,12 +37,8 @@ rand = "0.8.5" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "solana-vote-program/frozen-abi", ] diff --git a/builtins-default-costs/build.rs b/builtins-default-costs/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/builtins-default-costs/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/builtins-default-costs/src/lib.rs b/builtins-default-costs/src/lib.rs index 43c5c3043fcfc0..915064b4b79e35 100644 --- a/builtins-default-costs/src/lib.rs +++ b/builtins-default-costs/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] use { ahash::AHashMap, diff --git a/cargo-registry/Cargo.toml b/cargo-registry/Cargo.toml index 3224845067a87a..fa876b944bca60 100644 --- a/cargo-registry/Cargo.toml +++ b/cargo-registry/Cargo.toml @@ -39,8 +39,5 @@ toml = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] dev-context-only-utils = [] diff --git a/compute-budget/Cargo.toml b/compute-budget/Cargo.toml index bbcaa27b8d57d5..07ba01cfdc785f 100644 --- a/compute-budget/Cargo.toml +++ b/compute-budget/Cargo.toml @@ -10,15 +10,13 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-sdk = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "solana-sdk/frozen-abi", ] diff --git a/compute-budget/build.rs b/compute-budget/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/compute-budget/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/compute-budget/src/compute_budget.rs b/compute-budget/src/compute_budget.rs index 5539e812645349..577a000b7c6799 100644 --- a/compute-budget/src/compute_budget.rs +++ b/compute-budget/src/compute_budget.rs @@ -1,6 +1,6 @@ use crate::compute_budget_limits::{self, ComputeBudgetLimits, DEFAULT_HEAP_COST}; -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { fn example() -> Self { // ComputeBudget is not Serialize so just rely on Default. diff --git a/compute-budget/src/lib.rs b/compute-budget/src/lib.rs index f6ff865be67185..46a8312dcffd27 100644 --- a/compute-budget/src/lib.rs +++ b/compute-budget/src/lib.rs @@ -1,5 +1,5 @@ //! Solana compute budget types and default configurations. -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] pub mod compute_budget; pub mod compute_budget_limits; diff --git a/core/Cargo.toml b/core/Cargo.toml index bbceb94dbc659e..4d0797908627f5 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -54,8 +54,12 @@ solana-cost-model = { workspace = true } solana-entry = { workspace = true } solana-feature-set = { workspace = true } solana-fee = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-geyser-plugin-manager = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true } @@ -118,15 +122,11 @@ test-case = { workspace = true } [target."cfg(unix)".dependencies] sysctl = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] dev-context-only-utils = [ "solana-runtime/dev-context-only-utils", ] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-accounts-db/frozen-abi", diff --git a/core/build.rs b/core/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/core/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/core/src/lib.rs b/core/src/lib.rs index 2ba671ca62b580..c88488f0876667 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] #![recursion_limit = "2048"] //! The `solana` library implements the Solana high-performance blockchain architecture. diff --git a/cost-model/Cargo.toml b/cost-model/Cargo.toml index 2339f2e9d3eee4..54564e1e0dd9ca 100644 --- a/cost-model/Cargo.toml +++ b/cost-model/Cargo.toml @@ -16,8 +16,12 @@ log = { workspace = true } solana-builtins-default-costs = { workspace = true } solana-compute-budget = { workspace = true } solana-feature-set = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-metrics = { workspace = true } solana-runtime-transaction = { workspace = true } solana-sdk = { workspace = true } @@ -40,12 +44,8 @@ test-case = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-compute-budget/frozen-abi", diff --git a/cost-model/build.rs b/cost-model/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/cost-model/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/cost-model/src/lib.rs b/cost-model/src/lib.rs index d0e043c9ad77ed..f408a18de3b377 100644 --- a/cost-model/src/lib.rs +++ b/cost-model/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] pub mod block_cost_limits; diff --git a/curves/secp256k1-recover/Cargo.toml b/curves/secp256k1-recover/Cargo.toml index 6f983ef6d713c4..daa138f76e18e6 100644 --- a/curves/secp256k1-recover/Cargo.toml +++ b/curves/secp256k1-recover/Cargo.toml @@ -11,8 +11,12 @@ edition = { workspace = true } [dependencies] borsh = { workspace = true, optional = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } thiserror = { workspace = true } [target.'cfg(target_os = "solana")'.dependencies] @@ -28,12 +32,9 @@ borsh = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dev-dependencies] libsecp256k1 = { workspace = true, features = ["hmac"] } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] borsh = ["dep:borsh"] -frozen-abi = ["dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] +frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/curves/secp256k1-recover/build.rs b/curves/secp256k1-recover/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/curves/secp256k1-recover/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/curves/secp256k1-recover/src/lib.rs b/curves/secp256k1-recover/src/lib.rs index 837f2b8cf3aed3..55c764b214a1e3 100644 --- a/curves/secp256k1-recover/src/lib.rs +++ b/curves/secp256k1-recover/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] //! Public key recovery from [secp256k1] ECDSA signatures. //! //! [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1 diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 18382a28b3b1bd..78f0c6a67e5f83 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -29,12 +29,10 @@ bitflags = { workspace = true, features = ["serde"] } serde_bytes = { workspace = true } solana-logger = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true } - [features] -default = ["frozen-abi"] -# no reason to deactivate this. It's needed because the build.rs is reused elsewhere +default = [] +# activate the frozen-abi feature when we actually want to do frozen-abi testing, +# otherwise leave it off because it requires nightly Rust frozen-abi = [] [lints] diff --git a/frozen-abi/build.rs b/frozen-abi/build.rs deleted file mode 100644 index a95ef31ad70f65..00000000000000 --- a/frozen-abi/build.rs +++ /dev/null @@ -1,25 +0,0 @@ -#[cfg(feature = "frozen-abi")] -extern crate rustc_version; -#[cfg(feature = "frozen-abi")] -use rustc_version::{version_meta, Channel}; - -fn main() { - #[cfg(feature = "frozen-abi")] - // Copied and adapted from - // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example - // Licensed under Apache-2.0 + MIT - match version_meta().unwrap().channel { - Channel::Stable => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Beta => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Nightly => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - Channel::Dev => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - } -} diff --git a/frozen-abi/macro/Cargo.toml b/frozen-abi/macro/Cargo.toml index f5cb98c1287e45..e0f61637913331 100644 --- a/frozen-abi/macro/Cargo.toml +++ b/frozen-abi/macro/Cargo.toml @@ -17,12 +17,10 @@ proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true, features = ["full", "extra-traits"] } -[build-dependencies] -rustc_version = { workspace = true } - [features] -default = ["frozen-abi"] -# no reason to deactivate this. It's needed because the build.rs is reused elsewhere +default = [] +# activate the frozen-abi feature when we actually want to do frozen-abi testing, +# otherwise leave it off because it requires nightly Rust frozen-abi = [] [lints] diff --git a/frozen-abi/macro/build.rs b/frozen-abi/macro/build.rs deleted file mode 120000 index 10238032f5f6ec..00000000000000 --- a/frozen-abi/macro/build.rs +++ /dev/null @@ -1 +0,0 @@ -../build.rs \ No newline at end of file diff --git a/frozen-abi/macro/src/lib.rs b/frozen-abi/macro/src/lib.rs index bb3f7886169111..abdb0908f8901a 100644 --- a/frozen-abi/macro/src/lib.rs +++ b/frozen-abi/macro/src/lib.rs @@ -1,43 +1,38 @@ extern crate proc_macro; -// This file littered with these essential cfgs so ensure them. -#[cfg(not(any(RUSTC_WITH_SPECIALIZATION, RUSTC_WITHOUT_SPECIALIZATION)))] -compile_error!("rustc_version is missing in build dependency and build.rs is not specified"); - -#[cfg(any(RUSTC_WITH_SPECIALIZATION, RUSTC_WITHOUT_SPECIALIZATION))] use proc_macro::TokenStream; // Define dummy macro_attribute and macro_derive for stable rustc -#[cfg(RUSTC_WITHOUT_SPECIALIZATION)] +#[cfg(not(feature = "frozen-abi"))] #[proc_macro_attribute] pub fn frozen_abi(_attrs: TokenStream, item: TokenStream) -> TokenStream { item } -#[cfg(RUSTC_WITHOUT_SPECIALIZATION)] +#[cfg(not(feature = "frozen-abi"))] #[proc_macro_derive(AbiExample)] pub fn derive_abi_sample(_item: TokenStream) -> TokenStream { "".parse().unwrap() } -#[cfg(RUSTC_WITHOUT_SPECIALIZATION)] +#[cfg(not(feature = "frozen-abi"))] #[proc_macro_derive(AbiEnumVisitor)] pub fn derive_abi_enum_visitor(_item: TokenStream) -> TokenStream { "".parse().unwrap() } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] use proc_macro2::{Span, TokenStream as TokenStream2, TokenTree}; -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] use quote::{quote, ToTokens}; -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] use syn::{ parse_macro_input, Attribute, Error, Fields, Ident, Item, ItemEnum, ItemStruct, ItemType, LitStr, Variant, }; -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn filter_serde_attrs(attrs: &[Attribute]) -> bool { fn contains_skip(tokens: TokenStream2) -> bool { for token in tokens.into_iter() { @@ -72,7 +67,7 @@ fn filter_serde_attrs(attrs: &[Attribute]) -> bool { false } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn filter_allow_attrs(attrs: &mut Vec) { attrs.retain(|attr| { let ss = &attr.path().segments.first().unwrap().ident.to_string(); @@ -80,7 +75,7 @@ fn filter_allow_attrs(attrs: &mut Vec) { }); } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn derive_abi_sample_enum_type(input: ItemEnum) -> TokenStream { let type_name = &input.ident; @@ -157,7 +152,7 @@ fn derive_abi_sample_enum_type(input: ItemEnum) -> TokenStream { result.into() } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn derive_abi_sample_struct_type(input: ItemStruct) -> TokenStream { let type_name = &input.ident; let mut sample_fields = quote! {}; @@ -212,7 +207,7 @@ fn derive_abi_sample_struct_type(input: ItemStruct) -> TokenStream { result.into() } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] #[proc_macro_derive(AbiExample)] pub fn derive_abi_sample(item: TokenStream) -> TokenStream { let item = parse_macro_input!(item as Item); @@ -226,7 +221,7 @@ pub fn derive_abi_sample(item: TokenStream) -> TokenStream { } } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream { let type_name = &input.ident; let mut serialized_variants = quote! {}; @@ -264,7 +259,7 @@ fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream { }).into() } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] #[proc_macro_derive(AbiEnumVisitor)] pub fn derive_abi_enum_visitor(item: TokenStream) -> TokenStream { let item = parse_macro_input!(item as Item); @@ -277,7 +272,7 @@ pub fn derive_abi_enum_visitor(item: TokenStream) -> TokenStream { } } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn quote_for_test( test_mod_ident: &Ident, type_name: &Ident, @@ -321,12 +316,12 @@ fn quote_for_test( } } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn test_mod_name(type_name: &Ident) -> Ident { Ident::new(&format!("{type_name}_frozen_abi"), Span::call_site()) } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream { let type_name = &input.ident; let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); @@ -337,7 +332,7 @@ fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream result.into() } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn frozen_abi_struct_type(input: ItemStruct, expected_digest: &str) -> TokenStream { let type_name = &input.ident; let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); @@ -348,7 +343,7 @@ fn frozen_abi_struct_type(input: ItemStruct, expected_digest: &str) -> TokenStre result.into() } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn quote_sample_variant( type_name: &Ident, ty_generics: &syn::TypeGenerics, @@ -394,7 +389,7 @@ fn quote_sample_variant( } } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] fn frozen_abi_enum_type(input: ItemEnum, expected_digest: &str) -> TokenStream { let type_name = &input.ident; let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); @@ -405,7 +400,7 @@ fn frozen_abi_enum_type(input: ItemEnum, expected_digest: &str) -> TokenStream { result.into() } -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] #[proc_macro_attribute] pub fn frozen_abi(attrs: TokenStream, item: TokenStream) -> TokenStream { let mut expected_digest: Option = None; diff --git a/frozen-abi/src/lib.rs b/frozen-abi/src/lib.rs index d9007738a68f16..4c6819e1c297a6 100644 --- a/frozen-abi/src/lib.rs +++ b/frozen-abi/src/lib.rs @@ -1,17 +1,17 @@ #![allow(incomplete_features)] -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr(feature = "frozen-abi", feature(specialization))] // Allows macro expansion of `use ::solana_frozen_abi::*` to work within this crate extern crate self as solana_frozen_abi; -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] pub mod abi_digester; -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] pub mod abi_example; -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] mod hash; -#[cfg(RUSTC_WITH_SPECIALIZATION)] +#[cfg(feature = "frozen-abi")] #[macro_use] extern crate solana_frozen_abi_macro; diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 679c4a93f91e09..48449a5d078809 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -33,8 +33,12 @@ solana-client = { workspace = true } solana-connection-cache = { workspace = true } solana-entry = { workspace = true } solana-feature-set = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-ledger = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } @@ -62,12 +66,8 @@ serial_test = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } test-case = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-bloom/frozen-abi", diff --git a/gossip/build.rs b/gossip/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/gossip/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 3b05a0fd5fbc81..4a39ae34a6cbd3 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -625,7 +625,7 @@ pub(crate) fn get_quic_socket(socket: &SocketAddr) -> Result )) } -#[cfg(all(test, RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(all(test, feature = "frozen-abi"))] impl solana_frozen_abi::abi_example::AbiExample for ContactInfo { fn example() -> Self { Self { diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs index c164a4cd3fde96..b1c90c335edb4b 100644 --- a/gossip/src/lib.rs +++ b/gossip/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] pub mod cluster_info; diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index b12fcddf2b9b41..15c56d07c94c33 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -46,8 +46,12 @@ solana-bpf-loader-program = { workspace = true } solana-cost-model = { workspace = true } solana-entry = { workspace = true } solana-feature-set = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-perf = { workspace = true } @@ -90,13 +94,9 @@ solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } spl-pod = { workspace = true } test-case = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] dev-context-only-utils = [] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-runtime/frozen-abi", diff --git a/ledger/build.rs b/ledger/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/ledger/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index a7007b49fa4223..c47d6317fc08e6 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] #![recursion_limit = "2048"] diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 0e90fd7ccf231a..cc0354a2677e9e 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -21,8 +21,12 @@ log = { workspace = true } rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-metrics = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } @@ -43,12 +47,8 @@ rand_chacha = { workspace = true } solana-logger = { workspace = true } test-case = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", @@ -70,6 +70,4 @@ level = "warn" check-cfg = [ 'cfg(build_target_feature_avx)', 'cfg(build_target_feature_avx2)', - 'cfg(RUSTC_WITH_SPECIALIZATION)', - 'cfg(RUSTC_WITHOUT_SPECIALIZATION)', ] diff --git a/perf/build.rs b/perf/build.rs index a38f6c73307cd7..025c71008f092b 100644 --- a/perf/build.rs +++ b/perf/build.rs @@ -1,8 +1,3 @@ -#[cfg(feature = "frozen-abi")] -extern crate rustc_version; -#[cfg(feature = "frozen-abi")] -use rustc_version::{version_meta, Channel}; - fn main() { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { @@ -13,23 +8,4 @@ fn main() { println!("cargo:rustc-cfg=build_target_feature_avx2"); } } - - // Copied and adapted from - // https://github.com/Kimundi/rustc-version-rs/blob/1d692a965f4e48a8cb72e82cda953107c0d22f47/README.md#example - // Licensed under Apache-2.0 + MIT - #[cfg(feature = "frozen-abi")] - match version_meta().unwrap().channel { - Channel::Stable => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Beta => { - println!("cargo:rustc-cfg=RUSTC_WITHOUT_SPECIALIZATION"); - } - Channel::Nightly => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - Channel::Dev => { - println!("cargo:rustc-cfg=RUSTC_WITH_SPECIALIZATION"); - } - } } diff --git a/perf/src/lib.rs b/perf/src/lib.rs index b3d09c91eb99d0..c70b3afbb0d49a 100644 --- a/perf/src/lib.rs +++ b/perf/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] pub mod cuda_runtime; pub mod data_budget; pub mod deduper; diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs index 3053d5717a913c..3af4e554f870eb 100644 --- a/perf/src/recycler.rs +++ b/perf/src/recycler.rs @@ -57,7 +57,7 @@ impl Default for RecyclerX { } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl solana_frozen_abi::abi_example::AbiExample for RecyclerX> { diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index af276bc12562d0..4e764de35b5854 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -23,8 +23,12 @@ rand = { workspace = true } serde = { workspace = true } solana-compute-budget = { workspace = true } solana-feature-set = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-log-collector = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } @@ -48,12 +52,8 @@ name = "solana_program_runtime" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-compute-budget/frozen-abi", diff --git a/program-runtime/build.rs b/program-runtime/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/program-runtime/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/program-runtime/src/lib.rs b/program-runtime/src/lib.rs index 7033b96aa875e1..3a690d1d0824e9 100644 --- a/program-runtime/src/lib.rs +++ b/program-runtime/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![deny(clippy::arithmetic_side_effects)] #![deny(clippy::indexing_slicing)] diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index e7b49a38951623..181f5280db71d6 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -1347,7 +1347,7 @@ impl ProgramCache { } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl solana_frozen_abi::abi_example::AbiExample for ProgramCacheEntry { fn example() -> Self { // ProgramCacheEntry isn't serializable by definition. @@ -1355,7 +1355,7 @@ impl solana_frozen_abi::abi_example::AbiExample for ProgramCacheEntry { } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl solana_frozen_abi::abi_example::AbiExample for ProgramCache { fn example() -> Self { // ProgramCache isn't serializable by definition. diff --git a/program-runtime/src/sysvar_cache.rs b/program-runtime/src/sysvar_cache.rs index 79124bd93f379e..8b4bc614375b64 100644 --- a/program-runtime/src/sysvar_cache.rs +++ b/program-runtime/src/sysvar_cache.rs @@ -16,7 +16,7 @@ use { solana_type_overrides::sync::Arc, }; -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl ::solana_frozen_abi::abi_example::AbiExample for SysvarCache { fn example() -> Self { // SysvarCache is not Serialize so just rely on Default. diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml index 30e1ca5a0f3d7b..7c0836219e8508 100644 --- a/programs/address-lookup-table/Cargo.toml +++ b/programs/address-lookup-table/Cargo.toml @@ -24,9 +24,6 @@ solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [lib] crate-type = ["lib"] name = "solana_address_lookup_table_program" diff --git a/programs/address-lookup-table/build.rs b/programs/address-lookup-table/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/programs/address-lookup-table/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs index e146dd184b5385..65dae7585777c1 100644 --- a/programs/address-lookup-table/src/lib.rs +++ b/programs/address-lookup-table/src/lib.rs @@ -1,5 +1,5 @@ #![allow(incomplete_features)] -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr(feature = "frozen-abi", feature(specialization))] #[cfg(not(target_os = "solana"))] pub mod processor; diff --git a/programs/stake-tests/Cargo.toml b/programs/stake-tests/Cargo.toml index f73a7a4195dd0a..e38ae57f62b791 100644 --- a/programs/stake-tests/Cargo.toml +++ b/programs/stake-tests/Cargo.toml @@ -20,8 +20,5 @@ solana-sdk = { workspace = true } solana-vote-program = { workspace = true } test-case = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index d5a14a98d4d0ec..5d5dbd442afa3c 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -27,9 +27,6 @@ solana-compute-budget = { workspace = true } solana-logger = { workspace = true } test-case = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [lib] crate-type = ["lib"] name = "solana_stake_program" diff --git a/programs/stake/build.rs b/programs/stake/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/programs/stake/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/programs/stake/src/lib.rs b/programs/stake/src/lib.rs index 4ea8df91501a85..e56d9d4b996676 100644 --- a/programs/stake/src/lib.rs +++ b/programs/stake/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] #[deprecated( since = "1.8.0", diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index d76f5e1980818b..80d4cbe34d4987 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -17,8 +17,12 @@ num-traits = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } solana-feature-set = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-metrics = { workspace = true } solana-program = { workspace = true } solana-program-runtime = { workspace = true } @@ -30,9 +34,6 @@ assert_matches = { workspace = true } solana-logger = { workspace = true } test-case = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [lib] crate-type = ["lib"] name = "solana_vote_program" @@ -42,7 +43,6 @@ targets = ["x86_64-unknown-linux-gnu"] [features] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-program/frozen-abi", diff --git a/programs/vote/build.rs b/programs/vote/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/programs/vote/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/programs/vote/src/lib.rs b/programs/vote/src/lib.rs index b63eaa1c5eb643..978d42f6d98282 100644 --- a/programs/vote/src/lib.rs +++ b/programs/vote/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] pub mod vote_processor; pub mod vote_state; diff --git a/runtime-transaction/Cargo.toml b/runtime-transaction/Cargo.toml index 69d172ec112c7c..772a9e28af93df 100644 --- a/runtime-transaction/Cargo.toml +++ b/runtime-transaction/Cargo.toml @@ -31,9 +31,6 @@ solana-program = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [[bench]] name = "process_compute_budget_instructions" harness = false diff --git a/runtime-transaction/build.rs b/runtime-transaction/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/runtime-transaction/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/runtime-transaction/src/lib.rs b/runtime-transaction/src/lib.rs index 28d54b4eb3b6b8..9b79a97d40e874 100644 --- a/runtime-transaction/src/lib.rs +++ b/runtime-transaction/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] mod compute_budget_instruction_details; diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 5a740f32805aed..583a79d0359081 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -56,8 +56,12 @@ solana-config-program = { workspace = true } solana-cost-model = { workspace = true } solana-feature-set = { workspace = true } solana-fee = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-inline-spl = { workspace = true } solana-loader-v4-program = { workspace = true } solana-measure = { workspace = true } @@ -113,13 +117,9 @@ test-case = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] dev-context-only-utils = ["solana-svm/dev-context-only-utils"] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-accounts-db/frozen-abi", diff --git a/runtime/build.rs b/runtime/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/runtime/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 69de472175978e..d0a422e1ad95d0 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -502,7 +502,7 @@ mod tests { assert_eq!(dbank.epoch_reward_status, EpochRewardStatus::Inactive); } - #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] + #[cfg(feature = "frozen-abi")] mod test_bank_serialize { use { super::*, diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index d066628c717b34..d4c78936f1b517 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] #[macro_use] diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index b49e336ac4aa27..7dd6e7bf7f21e8 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -283,7 +283,7 @@ impl From for SerializableVersionedBank { } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableVersionedBank {} /// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a @@ -817,7 +817,7 @@ impl<'a> Serialize for SerializableAccountsDb<'a> { } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl<'a> solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccountsDb<'a> {} #[allow(clippy::too_many_arguments)] diff --git a/runtime/src/serde_snapshot/storage.rs b/runtime/src/serde_snapshot/storage.rs index 7308e24c6e025a..c079a6f34d39cc 100644 --- a/runtime/src/serde_snapshot/storage.rs +++ b/runtime/src/serde_snapshot/storage.rs @@ -36,5 +36,5 @@ impl From<&AccountStorageEntry> for SerializableAccountStorageEntry { } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccountStorageEntry {} diff --git a/runtime/src/serde_snapshot/utils.rs b/runtime/src/serde_snapshot/utils.rs index a9b953ba4851bf..3484d255b79951 100644 --- a/runtime/src/serde_snapshot/utils.rs +++ b/runtime/src/serde_snapshot/utils.rs @@ -2,7 +2,7 @@ use serde::{ ser::{SerializeSeq, SerializeTuple}, Serialize, Serializer, }; -#[cfg(all(test, RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(all(test, feature = "frozen-abi"))] use solana_frozen_abi::abi_example::TransparentAsHelper; // consumes an iterator and returns an object that will serialize as a serde seq @@ -17,7 +17,7 @@ where iter: std::cell::RefCell>, } - #[cfg(all(test, RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] + #[cfg(all(test, feature = "frozen-abi"))] impl TransparentAsHelper for SerializableSequencedIterator {} impl Serialize for SerializableSequencedIterator @@ -56,7 +56,7 @@ where iter: std::cell::RefCell>, } - #[cfg(all(test, RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] + #[cfg(all(test, feature = "frozen-abi"))] impl TransparentAsHelper for SerializableSequencedIterator {} impl Serialize for SerializableSequencedIterator @@ -95,7 +95,7 @@ where iter: std::cell::RefCell>, } - #[cfg(all(test, RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] + #[cfg(all(test, feature = "frozen-abi"))] impl TransparentAsHelper for SerializableMappedIterator {} impl Serialize for SerializableMappedIterator diff --git a/runtime/src/stake_account.rs b/runtime/src/stake_account.rs index d4fe0d65784f30..85cbb9e852ae54 100644 --- a/runtime/src/stake_account.rs +++ b/runtime/src/stake_account.rs @@ -1,4 +1,4 @@ -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] use solana_frozen_abi::abi_example::AbiExample; use { solana_sdk::{ @@ -98,7 +98,7 @@ impl PartialEq> for StakeAccount { } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl AbiExample for StakeAccount { fn example() -> Self { use solana_sdk::{ diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index afc2d7e6c584cf..8793f1e58bd1ab 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -37,7 +37,6 @@ full = [ borsh = ["dep:borsh", "solana-program/borsh", "solana-secp256k1-recover/borsh"] dev-context-only-utils = ["qualifier_attr"] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-feature-set/frozen-abi", @@ -88,8 +87,12 @@ solana-bn254 = { workspace = true } solana-decode-error = { workspace = true } solana-derivation-path = { workspace = true } solana-feature-set = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-program = { workspace = true } solana-program-memory = { workspace = true } solana-sanitize = { workspace = true } @@ -121,9 +124,6 @@ solana-sdk = { path = ".", features = ["dev-context-only-utils"] } static_assertions = { workspace = true } tiny-bip39 = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/build.rs b/sdk/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/sdk/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/feature-set/Cargo.toml b/sdk/feature-set/Cargo.toml index 02da9d0cc4f2f9..d6f2824b08982e 100644 --- a/sdk/feature-set/Cargo.toml +++ b/sdk/feature-set/Cargo.toml @@ -11,16 +11,16 @@ edition = { workspace = true } [dependencies] lazy_static = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-program = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", ] diff --git a/sdk/feature-set/build.rs b/sdk/feature-set/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/sdk/feature-set/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/feature-set/src/lib.rs b/sdk/feature-set/src/lib.rs index f2b094e90850dd..6567404a947db5 100644 --- a/sdk/feature-set/src/lib.rs +++ b/sdk/feature-set/src/lib.rs @@ -17,7 +17,7 @@ //! 3. Add desired logic to check for and switch on feature availability. //! //! For more information on how features are picked up, see comments for `Feature`. -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] use { lazy_static::lazy_static, diff --git a/sdk/hash/Cargo.toml b/sdk/hash/Cargo.toml index de0dafd76226da..ecf502e0ba243d 100644 --- a/sdk/hash/Cargo.toml +++ b/sdk/hash/Cargo.toml @@ -20,8 +20,12 @@ bytemuck_derive = { workspace = true, optional = true } serde = { workspace = true, optional = true } serde_derive = { workspace = true, optional = true } solana-atomic-u64 = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-sanitize = { workspace = true } [dev-dependencies] @@ -31,16 +35,12 @@ solana-hash = { path = ".", features = ["dev-context-only-utils"] } js-sys = { workspace = true } wasm-bindgen = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] borsh = ["dep:borsh", "std"] bytemuck = ["dep:bytemuck", "dep:bytemuck_derive"] default = ["std"] dev-context-only-utils = ["bs58/alloc"] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "std" diff --git a/sdk/hash/build.rs b/sdk/hash/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/sdk/hash/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/hash/src/lib.rs b/sdk/hash/src/lib.rs index dfc2e3efce8cf3..c5cad6f58a5861 100644 --- a/sdk/hash/src/lib.rs +++ b/sdk/hash/src/lib.rs @@ -1,5 +1,5 @@ #![no_std] -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #[cfg(feature = "borsh")] use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; #[cfg(any(feature = "std", target_arch = "wasm32"))] diff --git a/sdk/instruction/Cargo.toml b/sdk/instruction/Cargo.toml index 4fd6a8d3c7947f..0f96abda001702 100644 --- a/sdk/instruction/Cargo.toml +++ b/sdk/instruction/Cargo.toml @@ -30,15 +30,11 @@ wasm-bindgen = { workspace = true } [dev-dependencies] solana-instruction = { path = ".", features = ["borsh"] } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] bincode = ["dep:bincode", "dep:serde"] borsh = ["dep:borsh"] default = ["std"] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "serde", diff --git a/sdk/instruction/build.rs b/sdk/instruction/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/sdk/instruction/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/instruction/src/lib.rs b/sdk/instruction/src/lib.rs index 1b3aa31855ad98..8627a204bb41d8 100644 --- a/sdk/instruction/src/lib.rs +++ b/sdk/instruction/src/lib.rs @@ -10,7 +10,7 @@ //! while executing a given instruction is also included in `Instruction`, as //! [`AccountMeta`] values. The runtime uses this information to efficiently //! schedule execution of transactions. -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] #![no_std] diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 0d4d0afae6a1fd..51a8367dfa83f0 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -102,9 +102,6 @@ solana-pubkey = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } test-case = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -123,7 +120,6 @@ borsh = [ ] dev-context-only-utils = ["dep:qualifier_attr"] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-hash/frozen-abi", diff --git a/sdk/program/build.rs b/sdk/program/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/sdk/program/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 7fb630240861de..8488c00daf937f 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -464,7 +464,7 @@ //! [lut]: https://docs.solanalabs.com/proposals/versioned-transactions #![allow(incomplete_features)] -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr(feature = "frozen-abi", feature(specialization))] // Allows macro expansion of `use ::solana_program::*` to work within this crate extern crate self as solana_program; diff --git a/sdk/pubkey/Cargo.toml b/sdk/pubkey/Cargo.toml index b7c9434e7e9ca9..64260e6f085191 100644 --- a/sdk/pubkey/Cargo.toml +++ b/sdk/pubkey/Cargo.toml @@ -21,8 +21,12 @@ serde = { workspace = true, optional = true } serde_derive = { workspace = true, optional = true } solana-atomic-u64 = { workspace = true } solana-decode-error = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-sanitize = { workspace = true } [target.'cfg(target_os = "solana")'.dependencies] @@ -55,9 +59,6 @@ solana-pubkey = { path = ".", features = [ strum = { workspace = true } strum_macros = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] borsh = ["dep:borsh", "dep:borsh0-10", "std"] bytemuck = ["dep:bytemuck", "dep:bytemuck_derive"] @@ -65,7 +66,6 @@ curve25519 = ["dep:curve25519-dalek", "sha2"] default = ["std"] dev-context-only-utils = ["dep:arbitrary", "std"] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro" ] diff --git a/sdk/pubkey/build.rs b/sdk/pubkey/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/sdk/pubkey/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/pubkey/src/lib.rs b/sdk/pubkey/src/lib.rs index 82a9753df8f6da..c29e3654c396e7 100644 --- a/sdk/pubkey/src/lib.rs +++ b/sdk/pubkey/src/lib.rs @@ -1,6 +1,6 @@ //! Solana account addresses. #![no_std] -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] #[cfg(any(feature = "std", target_arch = "wasm32"))] diff --git a/sdk/signature/Cargo.toml b/sdk/signature/Cargo.toml index e6ac2a0af65554..989200b9281b61 100644 --- a/sdk/signature/Cargo.toml +++ b/sdk/signature/Cargo.toml @@ -16,8 +16,12 @@ generic-array = { workspace = true, features = ["more_lengths"] } rand = { workspace = true, optional = true } serde = { workspace = true, optional = true } serde_derive = { workspace = true, optional = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-sanitize = { workspace = true } [dev-dependencies] @@ -25,13 +29,9 @@ curve25519-dalek = { workspace = true } ed25519-dalek = { workspace = true } solana-program = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] default = ["std"] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "std" diff --git a/sdk/signature/build.rs b/sdk/signature/build.rs deleted file mode 120000 index 84539eddaa6ded..00000000000000 --- a/sdk/signature/build.rs +++ /dev/null @@ -1 +0,0 @@ -../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/signature/src/lib.rs b/sdk/signature/src/lib.rs index 8d2434c2d3fa14..1521391824a8da 100644 --- a/sdk/signature/src/lib.rs +++ b/sdk/signature/src/lib.rs @@ -1,6 +1,6 @@ //! 64-byte signature type. #![no_std] -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #[cfg(any(test, feature = "verify"))] use core::convert::TryInto; #[cfg(feature = "serde")] diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index 24e61e1d14848f..22d04812fa0d76 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -215,7 +215,7 @@ impl Default for FeeStructure { } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl ::solana_frozen_abi::abi_example::AbiExample for FeeStructure { fn example() -> Self { FeeStructure::default() diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 47f680603c65c3..07c92473cbb46a 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -30,7 +30,7 @@ //! [`clap`]: https://docs.rs/clap #![allow(incomplete_features)] -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr(feature = "frozen-abi", feature(specialization))] // Allows macro expansion of `use ::solana_sdk::*` to work within this crate extern crate self as solana_sdk; diff --git a/sdk/src/packet.rs b/sdk/src/packet.rs index 8d3ef8b3e539cf..1aa93308820d47 100644 --- a/sdk/src/packet.rs +++ b/sdk/src/packet.rs @@ -50,17 +50,17 @@ pub struct Meta { pub flags: PacketFlags, } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl ::solana_frozen_abi::abi_example::AbiExample for PacketFlags { fn example() -> Self { Self::empty() } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl ::solana_frozen_abi::abi_example::TransparentAsHelper for PacketFlags {} -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl ::solana_frozen_abi::abi_example::EvenAsOpaque for PacketFlags { const TYPE_NAME_MATCHER: &'static str = "::_::InternalBitFlags"; } diff --git a/sdk/src/reserved_account_keys.rs b/sdk/src/reserved_account_keys.rs index 0ce4ac632bb250..49cde7fd9bcd79 100644 --- a/sdk/src/reserved_account_keys.rs +++ b/sdk/src/reserved_account_keys.rs @@ -27,7 +27,7 @@ mod zk_elgamal_proof_program { // ReservedAccountKeys is not serialized into or deserialized from bank // snapshots but the bank requires this trait to be implemented anyways. -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl ::solana_frozen_abi::abi_example::AbiExample for ReservedAccountKeys { fn example() -> Self { // ReservedAccountKeys is not Serialize so just rely on Default. diff --git a/short-vec/Cargo.toml b/short-vec/Cargo.toml index 2ab13eaad53a34..3d1a772f50ae7b 100644 --- a/short-vec/Cargo.toml +++ b/short-vec/Cargo.toml @@ -9,13 +9,14 @@ homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [dependencies] serde = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } [dev-dependencies] assert_matches = { workspace = true } @@ -23,7 +24,7 @@ bincode = { workspace = true } serde_json = { workspace = true } [features] -frozen-abi = ["dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] +frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/short-vec/build.rs b/short-vec/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/short-vec/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/short-vec/src/lib.rs b/short-vec/src/lib.rs index f1f9f554e28882..138d3d79dd4ba2 100644 --- a/short-vec/src/lib.rs +++ b/short-vec/src/lib.rs @@ -1,5 +1,5 @@ //! Compact serde-encoding of vectors with small length. -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] #[cfg(feature = "frozen-abi")] use solana_frozen_abi_macro::AbiExample; diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 2b2a44646e7604..be94989d892121 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -20,8 +20,12 @@ solana-bpf-loader-program = { workspace = true } solana-compute-budget = { workspace = true } solana-feature-set = { workspace = true } solana-fee = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-loader-v4-program = { workspace = true } solana-log-collector = { workspace = true } solana-measure = { workspace = true } @@ -60,13 +64,9 @@ test-case = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] dev-context-only-utils = ["dep:qualifier_attr"] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-compute-budget/frozen-abi", diff --git a/svm/build.rs b/svm/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/svm/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/svm/src/lib.rs b/svm/src/lib.rs index f3cbbaa0f9cb18..7a42e3ce4b10f8 100644 --- a/svm/src/lib.rs +++ b/svm/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] pub mod account_loader; diff --git a/svm/src/message_processor.rs b/svm/src/message_processor.rs index 7b9d248e5b981f..589e1d1362a6bf 100644 --- a/svm/src/message_processor.rs +++ b/svm/src/message_processor.rs @@ -16,7 +16,7 @@ use { #[derive(Debug, Default, Clone, serde_derive::Deserialize, serde_derive::Serialize)] pub struct MessageProcessor {} -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl ::solana_frozen_abi::abi_example::AbiExample for MessageProcessor { fn example() -> Self { // MessageProcessor's fields are #[serde(skip)]-ed and not Serialize diff --git a/svm/src/runtime_config.rs b/svm/src/runtime_config.rs index 7e063bc30e5b5d..e7ee19a1115873 100644 --- a/svm/src/runtime_config.rs +++ b/svm/src/runtime_config.rs @@ -1,6 +1,6 @@ use solana_compute_budget::compute_budget::ComputeBudget; -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] +#[cfg(feature = "frozen-abi")] impl ::solana_frozen_abi::abi_example::AbiExample for RuntimeConfig { fn example() -> Self { // RuntimeConfig is not Serialize so just rely on Default. diff --git a/version/Cargo.toml b/version/Cargo.toml index 8fd935b5ae9b25..3372a5cabbbcba 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -15,8 +15,12 @@ semver = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } solana-feature-set = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-sanitize = { workspace = true } solana-sdk = { workspace = true } solana-serde-varint = { workspace = true } @@ -24,7 +28,6 @@ solana-serde-varint = { workspace = true } [features] dummy-for-ci-check = [] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", @@ -36,8 +39,5 @@ name = "solana_version" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [lints] workspace = true diff --git a/version/build.rs b/version/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/version/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/version/src/lib.rs b/version/src/lib.rs index 2a09817ccfbcd6..64e25775fbcba9 100644 --- a/version/src/lib.rs +++ b/version/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] extern crate serde_derive; pub use self::legacy::{LegacyVersion1, LegacyVersion2}; diff --git a/vote/Cargo.toml b/vote/Cargo.toml index 89f3b5e433f49f..ae27fd04c781fb 100644 --- a/vote/Cargo.toml +++ b/vote/Cargo.toml @@ -15,8 +15,12 @@ log = { workspace = true } rand = { workspace = true, optional = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } -solana-frozen-abi = { workspace = true, optional = true } -solana-frozen-abi-macro = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } solana-sdk = { workspace = true } thiserror = { workspace = true } @@ -31,13 +35,9 @@ rand = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -rustc_version = { workspace = true, optional = true } - [features] dev-context-only-utils = ["dep:rand"] frozen-abi = [ - "dep:rustc_version", "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", diff --git a/vote/build.rs b/vote/build.rs deleted file mode 120000 index ae66c237c5f4fd..00000000000000 --- a/vote/build.rs +++ /dev/null @@ -1 +0,0 @@ -../frozen-abi/build.rs \ No newline at end of file diff --git a/vote/src/lib.rs b/vote/src/lib.rs index 5f2fa1a6d5be92..53d8fdc37e06a7 100644 --- a/vote/src/lib.rs +++ b/vote/src/lib.rs @@ -1,4 +1,4 @@ -#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] pub mod vote_account; From cfd393654f84c36a3c49f15dbe25e16a0269008d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 13:44:52 +0800 Subject: [PATCH 365/529] build(deps): bump anyhow from 1.0.87 to 1.0.89 (#2948) * build(deps): bump anyhow from 1.0.87 to 1.0.89 Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.87 to 1.0.89. - [Release notes](https://github.com/dtolnay/anyhow/releases) - [Commits](https://github.com/dtolnay/anyhow/compare/1.0.87...1.0.89) --- updated-dependencies: - dependency-name: anyhow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cc21ffa28ef13e..b36880247950b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -430,9 +430,9 @@ checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anyhow" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "aquamarine" diff --git a/Cargo.toml b/Cargo.toml index 9605e91f04d90a..98d2508c5e9548 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -186,7 +186,7 @@ agave-transaction-view = { path = "transaction-view", version = "=2.1.0" } aquamarine = "0.3.3" aes-gcm-siv = "0.11.1" ahash = "0.8.10" -anyhow = "1.0.87" +anyhow = "1.0.89" arbitrary = "1.3.2" ark-bn254 = "0.4.0" ark-ec = "0.4.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 274f0c53586722..7798d62c39aead 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -235,9 +235,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "aquamarine" @@ -2657,7 +2657,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if 1.0.0", - "windows-targets 0.48.0", + "windows-targets 0.52.6", ] [[package]] From 92eca1192b055d896558a78759d4e79ab4721ff1 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Wed, 18 Sep 2024 09:32:11 -0500 Subject: [PATCH 366/529] store-histogram: skip not found file meta (#2906) * add error message to show th filename when the file not fund * Update accounts-db/store-histogram/src/main.rs Co-authored-by: Brooks * fmt * skip when meatadata fails --------- Co-authored-by: HaoranYi Co-authored-by: Brooks --- accounts-db/store-histogram/src/main.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/accounts-db/store-histogram/src/main.rs b/accounts-db/store-histogram/src/main.rs index 9d0b87327de9d8..f1fea98a8f79c8 100644 --- a/accounts-db/store-histogram/src/main.rs +++ b/accounts-db/store-histogram/src/main.rs @@ -249,8 +249,16 @@ fn main() { for entry in dir.flatten() { if let Some(name) = entry.path().file_name() { let name = name.to_str().unwrap().split_once(".").unwrap().0; - let len = fs::metadata(entry.path()).unwrap().len(); - info.push((name.parse::().unwrap(), len as usize)); + match fs::metadata(entry.path()) { + Ok(meta) => { + info.push((name.parse::().unwrap(), meta.len() as usize)); + } + Err(_) => { + // skip when metadata fails. This can happen when you are running this tool while a validator is running. + // It could clean something away and delete it after getting the dir but before opening the file. + continue; + } + } // eprintln!("{name}, {len}"); } } From 4d999b0648c9b83e87de871fc1485cc0ecf45d05 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Sep 2024 18:08:11 -0400 Subject: [PATCH 367/529] Uses actual number of items in generate_index_for_slot() (#2919) --- accounts-db/src/accounts_db.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index f863c81007b110..758a0a56b46c05 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8860,6 +8860,7 @@ impl AccountsDb { } items_local.push(info.index_info); }); + let items_len = items_local.len(); let items = items_local.into_iter().map(|info| { if let Some(amount_to_top_off_rent_this_account) = Self::stats_for_rent_payers( &info.pubkey, @@ -8884,11 +8885,7 @@ impl AccountsDb { ) }); self.accounts_index - .insert_new_if_missing_into_primary_index( - slot, - storage.approx_stored_count(), - items, - ) + .insert_new_if_missing_into_primary_index(slot, items_len, items) }; if secondary { // scan storage a second time to update the secondary index From a1962851bf1f3024ad7a964b6936d41e4a2d18a1 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Sep 2024 18:08:26 -0400 Subject: [PATCH 368/529] Uses scan_pubkeys() in accounts_count() (#2943) --- accounts-db/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 758a0a56b46c05..7a96392b0a1825 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9472,7 +9472,7 @@ pub(crate) enum UpdateIndexThreadSelection { impl AccountStorageEntry { fn accounts_count(&self) -> usize { let mut count = 0; - self.accounts.scan_accounts(|_| { + self.accounts.scan_pubkeys(|_| { count += 1; }); count From 18f68ffbfb33f39c52903e901c71767fdbdcb424 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 18 Sep 2024 18:08:43 -0400 Subject: [PATCH 369/529] Uses real stored count in tests (#2945) --- accounts-db/src/accounts_db.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7a96392b0a1825..b46db6bcc278f6 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9932,7 +9932,7 @@ pub mod tests { assert_eq!(append_vec.alive_bytes(), expected_alive_bytes); } // total # accounts in append vec - assert_eq!(append_vec.approx_stored_count(), 2); + assert_eq!(append_vec.accounts_count(), 2); // # alive accounts assert_eq!(append_vec.count(), 1); // all account data alive @@ -10772,8 +10772,8 @@ pub mod tests { let slot_1_store = &db.storage.get_slot_storage_entry(1).unwrap(); assert_eq!(slot_0_store.count(), 2); assert_eq!(slot_1_store.count(), 2); - assert_eq!(slot_0_store.approx_stored_count(), 2); - assert_eq!(slot_1_store.approx_stored_count(), 2); + assert_eq!(slot_0_store.accounts_count(), 2); + assert_eq!(slot_1_store.accounts_count(), 2); } // overwrite old rooted account version; only the r_slot_0_stores.count() should be @@ -10786,8 +10786,8 @@ pub mod tests { let slot_1_store = &db.storage.get_slot_storage_entry(1).unwrap(); assert_eq!(slot_0_store.count(), 1); assert_eq!(slot_1_store.count(), 2); - assert_eq!(slot_0_store.approx_stored_count(), 2); - assert_eq!(slot_1_store.approx_stored_count(), 2); + assert_eq!(slot_0_store.accounts_count(), 2); + assert_eq!(slot_1_store.accounts_count(), 2); } }); From 1c49101f37e29a633d71082324320175a93c72b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 10:02:54 +0800 Subject: [PATCH 370/529] build(deps): bump bytes from 1.7.1 to 1.7.2 (#2949) * build(deps): bump bytes from 1.7.1 to 1.7.2 Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.7.1 to 1.7.2. - [Release notes](https://github.com/tokio-rs/bytes/releases) - [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/bytes/compare/v1.7.1...v1.7.2) --- updated-dependencies: - dependency-name: bytes dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- programs/sbf/Cargo.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b36880247950b7..e7917ab5f8f0d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1180,9 +1180,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bytesize" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 7798d62c39aead..2a1ab65ff5edda 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -882,9 +882,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bzip2" From 7c54e92b02bdfb92a492b7ccd3e4e20288766d97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 10:03:03 +0800 Subject: [PATCH 371/529] build(deps): bump arrayref from 0.3.8 to 0.3.9 (#2947) * build(deps): bump arrayref from 0.3.8 to 0.3.9 Bumps [arrayref](https://github.com/droundy/arrayref) from 0.3.8 to 0.3.9. - [Commits](https://github.com/droundy/arrayref/commits) --- updated-dependencies: - dependency-name: arrayref dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7917ab5f8f0d3..29a42caa9b6828 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -588,9 +588,9 @@ checksum = "9ad284aeb45c13f2fb4f084de4a420ebf447423bdf9386c0540ce33cb3ef4b8c" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" diff --git a/Cargo.toml b/Cargo.toml index 98d2508c5e9548..20ca674d61856c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -193,7 +193,7 @@ ark-ec = "0.4.0" ark-ff = "0.4.0" ark-serialize = "0.4.0" array-bytes = "=1.4.1" -arrayref = "0.3.8" +arrayref = "0.3.9" arrayvec = "0.7.6" assert_cmd = "2.0" assert_matches = "1.5.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2a1ab65ff5edda..656c3afa2b15f6 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -384,9 +384,9 @@ checksum = "9ad284aeb45c13f2fb4f084de4a420ebf447423bdf9386c0540ce33cb3ef4b8c" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" From c1b465dcb9e990d6f3aaeb6200540ac34321b278 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 19 Sep 2024 04:19:25 -0400 Subject: [PATCH 372/529] Fixes rent collection when skipping rewrites (#2910) --- runtime/src/bank.rs | 38 ++++++++++--------- runtime/src/bank/tests.rs | 78 ++++++++++++++++++--------------------- 2 files changed, 55 insertions(+), 61 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 43884d107a065e..ee764f92233116 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4304,14 +4304,14 @@ impl Bank { let mut time_collecting_rent_us = 0; let mut time_storing_accounts_us = 0; let can_skip_rewrites = self.bank_hash_skips_rent_rewrites(); - let test_skip_rewrites_but_include_hash_in_bank_hash = !can_skip_rewrites - && self - .rc - .accounts - .accounts_db - .test_skip_rewrites_but_include_in_bank_hash; + let test_skip_rewrites_but_include_in_bank_hash = self + .rc + .accounts + .accounts_db + .test_skip_rewrites_but_include_in_bank_hash; let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { + let rent_epoch_pre = account.rent_epoch(); let (rent_collected_info, collect_rent_us) = measure_us!(collect_rent_from_account( &self.feature_set, &self.rent_collector, @@ -4319,15 +4319,22 @@ impl Bank { account )); time_collecting_rent_us += collect_rent_us; + let rent_epoch_post = account.rent_epoch(); + + // did the account change in any way due to rent collection? + let account_changed = + rent_collected_info.rent_amount != 0 || rent_epoch_post != rent_epoch_pre; + + // always store the account, regardless if it changed or not + let always_store_accounts = + !can_skip_rewrites && !test_skip_rewrites_but_include_in_bank_hash; // only store accounts where we collected rent // but get the hash for all these accounts even if collected rent is 0 (= not updated). // Also, there's another subtle side-effect from rewrites: this // ensures we verify the whole on-chain state (= all accounts) // via the bank delta hash slowly once per an epoch. - if (!can_skip_rewrites && !test_skip_rewrites_but_include_hash_in_bank_hash) - || !Self::skip_rewrite(rent_collected_info.rent_amount, account) - { + if account_changed || always_store_accounts { if rent_collected_info.rent_amount > 0 { if let Some(rent_paying_pubkeys) = rent_paying_pubkeys { if !rent_paying_pubkeys.contains(pubkey) { @@ -4357,7 +4364,10 @@ impl Bank { } total_rent_collected_info += rent_collected_info; accounts_to_store.push((pubkey, account)); - } else if test_skip_rewrites_but_include_hash_in_bank_hash { + } else if !account_changed + && !can_skip_rewrites + && test_skip_rewrites_but_include_in_bank_hash + { // include rewrites that we skipped in the accounts delta hash. // This is what consensus requires prior to activation of bank_hash_skips_rent_rewrites. // This code path exists to allow us to test the long term effects on validators when the skipped rewrites @@ -4513,14 +4523,6 @@ impl Bank { }); } - /// return true iff storing this account is just a rewrite and can be skipped - fn skip_rewrite(rent_amount: u64, account: &AccountSharedData) -> bool { - // if rent was != 0 - // or special case for default rent value - // these cannot be skipped and must be written - rent_amount == 0 && account.rent_epoch() != 0 - } - pub(crate) fn fixed_cycle_partitions_between_slots( &self, starting_slot: Slot, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 915dfb65bf5e00..13fb573bc8afad 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -1762,7 +1762,9 @@ fn test_collect_rent_from_accounts() { solana_logger::setup(); for skip_rewrites in [false, true] { - let zero_lamport_pubkey = Pubkey::from([0; 32]); + let address1 = Pubkey::new_unique(); + let address2 = Pubkey::new_unique(); + let address3 = Pubkey::new_unique(); let (genesis_bank, bank_forks) = create_simple_test_arc_bank(100000); let mut first_bank = new_from_parent(genesis_bank.clone()); @@ -1784,12 +1786,20 @@ fn test_collect_rent_from_accounts() { let data_size = 0; // make sure we're rent exempt let lamports = later_bank.get_minimum_balance_for_rent_exemption(data_size); // cannot be 0 or we zero out rent_epoch in rent collection and we need to be rent exempt - let mut account = AccountSharedData::new(lamports, data_size, &Pubkey::default()); - account.set_rent_epoch(later_bank.epoch() - 1); // non-zero, but less than later_bank's epoch + let mut account1 = AccountSharedData::new(lamports, data_size, &Pubkey::default()); + let mut account2 = AccountSharedData::new(lamports, data_size, &Pubkey::default()); + let mut account3 = AccountSharedData::new(lamports, data_size, &Pubkey::default()); + account1.set_rent_epoch(later_bank.epoch() - 1); // non-zero, but less than later_bank's epoch + account2.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); // already marked as rent exempt + account3.set_rent_epoch(0); // stake accounts in genesis have a rent epoch of 0 // loaded from previous slot, so we skip rent collection on it let _result = later_bank.collect_rent_from_accounts( - vec![(zero_lamport_pubkey, account, later_slot - 1)], + vec![ + (address1, account1, later_slot - 1), + (address2, account2, later_slot - 1), + (address3, account3, later_slot - 1), + ], None, PartitionIndex::default(), ); @@ -1800,12 +1810,22 @@ fn test_collect_rent_from_accounts() { .accounts_db .get_pubkey_hash_for_slot(later_slot) .0; + + // ensure account1 *is* stored because the account *did* change + // (its rent epoch must be updated to RENT_EXEMPT_RENT_EPOCH) + assert!(deltas.iter().map(|(pubkey, _)| pubkey).contains(&address1)); + + // if doing rewrites, ensure account2 *is* stored + // if skipping rewrites, ensure account2 is *not* stored + // (because the account did *not* change) assert_eq!( - !deltas - .iter() - .any(|(pubkey, _)| pubkey == &zero_lamport_pubkey), - skip_rewrites + deltas.iter().map(|(pubkey, _)| pubkey).contains(&address2), + !skip_rewrites, ); + + // ensure account3 *is* stored because the account *did* change + // (same as account1 above) + assert!(deltas.iter().map(|(pubkey, _)| pubkey).contains(&address3)); } } @@ -11044,39 +11064,6 @@ fn test_update_accounts_data_size() { } } -#[test] -fn test_skip_rewrite() { - solana_logger::setup(); - let mut account = AccountSharedData::default(); - let bank_slot = 10; - for account_rent_epoch in 0..3 { - account.set_rent_epoch(account_rent_epoch); - for rent_amount in [0, 1] { - for loaded_slot in (bank_slot - 1)..=bank_slot { - for old_rent_epoch in account_rent_epoch.saturating_sub(1)..=account_rent_epoch { - let skip = Bank::skip_rewrite(rent_amount, &account); - let mut should_skip = true; - if rent_amount != 0 || account_rent_epoch == 0 { - should_skip = false; - } - assert_eq!( - skip, - should_skip, - "{:?}", - ( - account_rent_epoch, - old_rent_epoch, - rent_amount, - loaded_slot, - old_rent_epoch - ) - ); - } - } - } - } -} - #[derive(Serialize, Deserialize)] enum MockReallocInstruction { Realloc(usize, u64, Pubkey), @@ -12266,6 +12253,11 @@ where .transfer_and_confirm(mint_lamports, &mint_keypair, &alice_pubkey) .unwrap(); + // create and freeze a bank a few epochs in the future to trigger rent + // collection to visit (and rewrite) all accounts + let bank = new_from_parent_next_epoch(bank, &bank_forks, 2); + bank.freeze(); // trigger rent collection + // create zero-lamports account to be cleaned let account = AccountSharedData::new(0, len1, &program); let slot = bank.slot() + 1; @@ -12313,9 +12305,9 @@ fn test_create_zero_lamport_with_clean() { bank.squash(); bank.force_flush_accounts_cache(); // do clean and assert that it actually did its job - assert_eq!(4, bank.get_snapshot_storages(None).len()); + assert_eq!(5, bank.get_snapshot_storages(None).len()); bank.clean_accounts(); - assert_eq!(3, bank.get_snapshot_storages(None).len()); + assert_eq!(4, bank.get_snapshot_storages(None).len()); }); } From 33167a32a3090ad22aa0f7449ab6ede67a0e114f Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 19 Sep 2024 06:40:11 -0400 Subject: [PATCH 373/529] Adds datapoint if rent collection changes only the rent epoch (#2922) --- runtime/src/bank.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ee764f92233116..a61f86adef5063 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4322,8 +4322,8 @@ impl Bank { let rent_epoch_post = account.rent_epoch(); // did the account change in any way due to rent collection? - let account_changed = - rent_collected_info.rent_amount != 0 || rent_epoch_post != rent_epoch_pre; + let rent_epoch_changed = rent_epoch_post != rent_epoch_pre; + let account_changed = rent_collected_info.rent_amount != 0 || rent_epoch_changed; // always store the account, regardless if it changed or not let always_store_accounts = @@ -4361,6 +4361,17 @@ impl Bank { ); } } + } else { + debug_assert_eq!(rent_collected_info.rent_amount, 0); + if rent_epoch_changed { + datapoint_info!( + "bank-rent_collection_updated_only_rent_epoch", + ("slot", self.slot(), i64), + ("pubkey", pubkey.to_string(), String), + ("rent_epoch_pre", rent_epoch_pre, i64), + ("rent_epoch_post", rent_epoch_post, i64), + ); + } } total_rent_collected_info += rent_collected_info; accounts_to_store.push((pubkey, account)); From 037838a156b433c53d28be8fd5405f520b23a8cd Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 19 Sep 2024 06:40:25 -0400 Subject: [PATCH 374/529] Uses real stored count in check_storage() (#2944) --- accounts-db/src/accounts_db.rs | 18 +++++++----------- runtime/src/serde_snapshot/tests.rs | 20 ++++++++++---------- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index b46db6bcc278f6..367500156d2b18 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9557,15 +9557,11 @@ impl AccountsDb { } } - pub fn check_storage(&self, slot: Slot, count: usize) { - assert!(self.storage.get_slot_storage_entry(slot).is_some()); + pub fn check_storage(&self, slot: Slot, alive_count: usize, total_count: usize) { let store = self.storage.get_slot_storage_entry(slot).unwrap(); - let total_count = store.count(); assert_eq!(store.status(), AccountStorageStatus::Available); - assert_eq!(total_count, count); - let (expected_store_count, actual_store_count): (usize, usize) = - (store.approx_stored_count(), store.accounts_count()); - assert_eq!(expected_store_count, actual_store_count); + assert_eq!(store.count(), alive_count); + assert_eq!(store.accounts_count(), total_count); } pub fn create_account( @@ -10758,7 +10754,7 @@ pub mod tests { let mut pubkeys: Vec = vec![]; db.create_account(&mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0); db.add_root_and_flush_write_cache(0); - db.check_storage(0, 2); + db.check_storage(0, 2, 2); let pubkey = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey); @@ -10910,7 +10906,7 @@ pub mod tests { accounts.create_account(&mut pubkeys, 0, 100, 0, 0); update_accounts(&accounts, &pubkeys, 0, 99); accounts.add_root_and_flush_write_cache(0); - accounts.check_storage(0, 100); + accounts.check_storage(0, 100, 100); } #[test] @@ -11834,9 +11830,9 @@ pub mod tests { // storage for slot 1 had 2 accounts, now has 1 after pubkey 1 // was reclaimed - accounts.check_storage(1, 1); + accounts.check_storage(1, 1, 2); // storage for slot 2 had 1 accounts, now has 1 - accounts.check_storage(2, 1); + accounts.check_storage(2, 1, 1); } #[test] diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index cfc283d54eba2d..6123c43ad259b7 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -324,7 +324,7 @@ mod serde_snapshot_tests { accounts.create_account(&mut pubkeys, 0, 100, 0, 0); if pass == 0 { accounts.add_root_and_flush_write_cache(0); - accounts.check_storage(0, 100); + accounts.check_storage(0, 100, 100); accounts.clean_accounts_for_tests(); accounts.check_accounts(&pubkeys, 0, 100, 1); // clean should have done nothing @@ -334,7 +334,7 @@ mod serde_snapshot_tests { // do some updates to those accounts and re-check accounts.modify_accounts(&pubkeys, 0, 100, 2); accounts.add_root_and_flush_write_cache(0); - accounts.check_storage(0, 100); + accounts.check_storage(0, 100, 100); accounts.check_accounts(&pubkeys, 0, 100, 2); accounts.calculate_accounts_delta_hash(0); @@ -356,7 +356,7 @@ mod serde_snapshot_tests { accounts.calculate_accounts_delta_hash(latest_slot); accounts.add_root_and_flush_write_cache(latest_slot); - accounts.check_storage(1, 21); + accounts.check_storage(1, 21, 21); // CREATE SLOT 2 let latest_slot = 2; @@ -376,7 +376,7 @@ mod serde_snapshot_tests { accounts.calculate_accounts_delta_hash(latest_slot); accounts.add_root_and_flush_write_cache(latest_slot); - accounts.check_storage(2, 31); + accounts.check_storage(2, 31, 31); let ancestors = linear_ancestors(latest_slot); accounts.update_accounts_hash_for_tests(latest_slot, &ancestors, false, false); @@ -385,11 +385,11 @@ mod serde_snapshot_tests { // The first 20 accounts of slot 0 have been updated in slot 2, as well as // accounts 30 and 31 (overwritten with zero-lamport accounts in slot 1 and // slot 2 respectively), so only 78 accounts are left in slot 0's storage entries. - accounts.check_storage(0, 78); + accounts.check_storage(0, 78, 100); // 10 of the 21 accounts have been modified in slot 2, so only 11 // accounts left in slot 1. - accounts.check_storage(1, 11); - accounts.check_storage(2, 31); + accounts.check_storage(1, 11, 21); + accounts.check_storage(2, 31, 31); let daccounts = reconstruct_accounts_db_via_serialization(&accounts, latest_slot, storage_access); @@ -417,9 +417,9 @@ mod serde_snapshot_tests { // Don't check the first 35 accounts which have not been modified on slot 0 daccounts.check_accounts(&pubkeys[35..], 0, 65, 37); daccounts.check_accounts(&pubkeys1, 1, 10, 1); - daccounts.check_storage(0, 100); - daccounts.check_storage(1, 21); - daccounts.check_storage(2, 31); + daccounts.check_storage(0, 100, 100); + daccounts.check_storage(1, 21, 21); + daccounts.check_storage(2, 31, 31); assert_eq!( daccounts.update_accounts_hash_for_tests(latest_slot, &ancestors, false, false,), From 18d0428e3a43b48b23e8b8645235d94ddd6b6883 Mon Sep 17 00:00:00 2001 From: Jon C Date: Mon, 23 Sep 2024 23:16:37 +0200 Subject: [PATCH 375/529] cli: Use simulated compute unit limit in stake interactions (#2710) * cli: Use simulated compute unit limit in stake interactions #### Problem The CLI has the ability to simulate transactions before sending to use the correct number of compute units, but stake commands are still using the default compute unit limit. #### Summary of changes Update tests to use a compute unit limit, and then update the stake commands to use the simulated compute unit limit. * Simulate for compute units in stake commands * Pass compute unit limit enum to `simulate_and_update...` --- cli/src/compute_budget.rs | 36 +++++++---- cli/src/nonce.rs | 20 +++--- cli/src/program.rs | 31 ++++++--- cli/src/spend_utils.rs | 14 ++--- cli/src/stake.rs | 70 ++++++++++++++++----- cli/tests/stake.rs | 128 ++++++++++++++++++++------------------ 6 files changed, 187 insertions(+), 112 deletions(-) diff --git a/cli/src/compute_budget.rs b/cli/src/compute_budget.rs index 31237181ed771a..3b461357d253c0 100644 --- a/cli/src/compute_budget.rs +++ b/cli/src/compute_budget.rs @@ -12,12 +12,12 @@ use { }, }; -// This enum is equivalent to an Option but was added to self-document -// the ok variants and has the benefit of not forcing the caller to use -// the result if they don't care about it. +/// Enum capturing the possible results of updating a message based on the +/// compute unit limits consumed during simulation. pub(crate) enum UpdateComputeUnitLimitResult { UpdatedInstructionIndex(usize), NoInstructionFound, + SimulationNotConfigured, } fn get_compute_unit_limit_instruction_index(message: &Message) -> Option { @@ -83,8 +83,14 @@ pub(crate) fn simulate_for_compute_unit_limit( simulate_for_compute_unit_limit_unchecked(rpc_client, message) } -// Returns the index of the compute unit limit instruction +/// Simulates a message and returns the index of the compute unit limit +/// instruction +/// +/// If the message does not contain a compute unit limit instruction, or if +/// simulation was not configured, then the function will not simulate the +/// message. pub(crate) fn simulate_and_update_compute_unit_limit( + compute_unit_limit: &ComputeUnitLimit, rpc_client: &RpcClient, message: &mut Message, ) -> Result> { @@ -93,15 +99,23 @@ pub(crate) fn simulate_and_update_compute_unit_limit( return Ok(UpdateComputeUnitLimitResult::NoInstructionFound); }; - let compute_unit_limit = simulate_for_compute_unit_limit_unchecked(rpc_client, message)?; + match compute_unit_limit { + ComputeUnitLimit::Simulated => { + let compute_unit_limit = + simulate_for_compute_unit_limit_unchecked(rpc_client, message)?; - // Overwrite the compute unit limit instruction with the actual units consumed - message.instructions[compute_unit_limit_ix_index].data = - ComputeBudgetInstruction::set_compute_unit_limit(compute_unit_limit).data; + // Overwrite the compute unit limit instruction with the actual units consumed + message.instructions[compute_unit_limit_ix_index].data = + ComputeBudgetInstruction::set_compute_unit_limit(compute_unit_limit).data; - Ok(UpdateComputeUnitLimitResult::UpdatedInstructionIndex( - compute_unit_limit_ix_index, - )) + Ok(UpdateComputeUnitLimitResult::UpdatedInstructionIndex( + compute_unit_limit_ix_index, + )) + } + ComputeUnitLimit::Static(_) | ComputeUnitLimit::Default => { + Ok(UpdateComputeUnitLimitResult::SimulationNotConfigured) + } + } } pub(crate) struct ComputeUnitConfig { diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index 708ce2b677afdc..67b1be0b6e124d 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -415,6 +415,7 @@ pub fn process_authorize_nonce_account( let latest_blockhash = rpc_client.get_latest_blockhash()?; let nonce_authority = config.signers[nonce_authority]; + let compute_unit_limit = ComputeUnitLimit::Simulated; let ixs = vec![authorize_nonce_account( nonce_account, &nonce_authority.pubkey(), @@ -423,10 +424,10 @@ pub fn process_authorize_nonce_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }); let mut message = Message::new(&ixs, Some(&config.signers[0].pubkey())); - simulate_and_update_compute_unit_limit(rpc_client, &mut message)?; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, latest_blockhash)?; @@ -575,6 +576,7 @@ pub fn process_new_nonce( } let nonce_authority = config.signers[nonce_authority]; + let compute_unit_limit = ComputeUnitLimit::Simulated; let ixs = vec![advance_nonce_account( nonce_account, &nonce_authority.pubkey(), @@ -582,11 +584,11 @@ pub fn process_new_nonce( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }); let latest_blockhash = rpc_client.get_latest_blockhash()?; let mut message = Message::new(&ixs, Some(&config.signers[0].pubkey())); - simulate_and_update_compute_unit_limit(rpc_client, &mut message)?; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, latest_blockhash)?; check_account_for_fee_with_commitment( @@ -643,6 +645,7 @@ pub fn process_withdraw_from_nonce_account( let latest_blockhash = rpc_client.get_latest_blockhash()?; let nonce_authority = config.signers[nonce_authority]; + let compute_unit_limit = ComputeUnitLimit::Simulated; let ixs = vec![withdraw_nonce_account( nonce_account, &nonce_authority.pubkey(), @@ -652,10 +655,10 @@ pub fn process_withdraw_from_nonce_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }); let mut message = Message::new(&ixs, Some(&config.signers[0].pubkey())); - simulate_and_update_compute_unit_limit(rpc_client, &mut message)?; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, latest_blockhash)?; check_account_for_fee_with_commitment( @@ -677,14 +680,15 @@ pub(crate) fn process_upgrade_nonce_account( compute_unit_price: Option, ) -> ProcessResult { let latest_blockhash = rpc_client.get_latest_blockhash()?; + let compute_unit_limit = ComputeUnitLimit::Simulated; let ixs = vec![upgrade_nonce_account(nonce_account)] .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }); let mut message = Message::new(&ixs, Some(&config.signers[0].pubkey())); - simulate_and_update_compute_unit_limit(rpc_client, &mut message)?; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, latest_blockhash)?; check_account_for_fee_with_commitment( diff --git a/cli/src/program.rs b/cli/src/program.rs index d4192589aa86a5..59d0960d265da2 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -2420,6 +2420,7 @@ fn do_process_program_deploy( use_rpc: bool, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; + let compute_unit_limit = ComputeUnitLimit::Simulated; let (initial_instructions, balance_needed, buffer_program_data) = if let Some(buffer_program_data) = buffer_program_data { @@ -2442,7 +2443,7 @@ fn do_process_program_deploy( Some(Message::new_with_blockhash( &initial_instructions.with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }), Some(&fee_payer_signer.pubkey()), &blockhash, @@ -2462,7 +2463,7 @@ fn do_process_program_deploy( let instructions = vec![instruction].with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }); Message::new_with_blockhash(&instructions, Some(&fee_payer_signer.pubkey()), &blockhash) }; @@ -2489,7 +2490,7 @@ fn do_process_program_deploy( )? .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }); Some(Message::new_with_blockhash( @@ -2523,6 +2524,7 @@ fn do_process_program_deploy( Some(program_signers), max_sign_attempts, use_rpc, + &compute_unit_limit, )?; let program_id = CliProgramId { @@ -2550,6 +2552,7 @@ fn do_process_write_buffer( use_rpc: bool, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; + let compute_unit_limit = ComputeUnitLimit::Simulated; let (initial_instructions, balance_needed, buffer_program_data) = if let Some(buffer_program_data) = buffer_program_data { @@ -2572,7 +2575,7 @@ fn do_process_write_buffer( Some(Message::new_with_blockhash( &initial_instructions.with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }), Some(&fee_payer_signer.pubkey()), &blockhash, @@ -2592,7 +2595,7 @@ fn do_process_write_buffer( let instructions = vec![instruction].with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }); Message::new_with_blockhash(&instructions, Some(&fee_payer_signer.pubkey()), &blockhash) }; @@ -2630,6 +2633,7 @@ fn do_process_write_buffer( None, max_sign_attempts, use_rpc, + &compute_unit_limit, )?; let buffer = CliProgramBuffer { @@ -2658,6 +2662,7 @@ fn do_process_program_upgrade( use_rpc: bool, ) -> ProcessResult { let blockhash = rpc_client.get_latest_blockhash()?; + let compute_unit_limit = ComputeUnitLimit::Simulated; let (initial_message, write_messages, balance_needed) = if let Some(buffer_signer) = buffer_signer @@ -2714,7 +2719,7 @@ fn do_process_program_upgrade( )] .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }); Message::new_with_blockhash(&instructions, Some(&fee_payer_signer.pubkey()), &blockhash) }; @@ -2743,7 +2748,7 @@ fn do_process_program_upgrade( )] .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Simulated, + compute_unit_limit, }); let final_message = Message::new_with_blockhash( &final_instructions, @@ -2776,6 +2781,7 @@ fn do_process_program_upgrade( Some(&[upgrade_authority]), max_sign_attempts, use_rpc, + &compute_unit_limit, )?; let program_id = CliProgramId { @@ -2912,11 +2918,12 @@ fn send_deploy_messages( final_signers: Option<&[&dyn Signer]>, max_sign_attempts: usize, use_rpc: bool, + compute_unit_limit: &ComputeUnitLimit, ) -> Result, Box> { if let Some(mut message) = initial_message { if let Some(initial_signer) = initial_signer { trace!("Preparing the required accounts"); - simulate_and_update_compute_unit_limit(&rpc_client, &mut message)?; + simulate_and_update_compute_unit_limit(compute_unit_limit, &rpc_client, &mut message)?; let mut initial_transaction = Transaction::new_unsigned(message.clone()); let blockhash = rpc_client.get_latest_blockhash()?; @@ -2947,7 +2954,11 @@ fn send_deploy_messages( { let mut message = write_messages[0].clone(); if let UpdateComputeUnitLimitResult::UpdatedInstructionIndex(ix_index) = - simulate_and_update_compute_unit_limit(&rpc_client, &mut message)? + simulate_and_update_compute_unit_limit( + compute_unit_limit, + &rpc_client, + &mut message, + )? { for msg in &mut write_messages { // Write messages are all assumed to be identical except @@ -3024,7 +3035,7 @@ fn send_deploy_messages( if let Some(final_signers) = final_signers { trace!("Deploying program"); - simulate_and_update_compute_unit_limit(&rpc_client, &mut message)?; + simulate_and_update_compute_unit_limit(compute_unit_limit, &rpc_client, &mut message)?; let mut final_tx = Transaction::new_unsigned(message); let blockhash = rpc_client.get_latest_blockhash()?; let mut signers = final_signers.to_vec(); diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index f09887d831beaf..c36f40553133e5 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -197,18 +197,18 @@ where let mut dummy_message = build_message(lamports); dummy_message.recent_blockhash = *blockhash; - let compute_unit_info = if compute_unit_limit == ComputeUnitLimit::Simulated { - // Simulate for correct compute units + let compute_unit_info = if let UpdateComputeUnitLimitResult::UpdatedInstructionIndex(ix_index) = - simulate_and_update_compute_unit_limit(rpc_client, &mut dummy_message)? + simulate_and_update_compute_unit_limit( + &compute_unit_limit, + rpc_client, + &mut dummy_message, + )? { Some((ix_index, dummy_message.instructions[ix_index].data.clone())) } else { None - } - } else { - None - }; + }; ( get_fee_for_messages(rpc_client, &[&dummy_message])?, compute_unit_info, diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 6073ea4f873c01..a5434cd312d598 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -5,7 +5,9 @@ use { log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, }, - compute_budget::{ComputeUnitConfig, WithComputeUnitConfig}, + compute_budget::{ + simulate_and_update_compute_unit_limit, ComputeUnitConfig, WithComputeUnitConfig, + }, feature::get_feature_activation_epoch, memo::WithMemo, nonce::check_nonce_account, @@ -1393,7 +1395,10 @@ pub fn process_create_stake_account( let fee_payer = config.signers[fee_payer]; let nonce_authority = config.signers[nonce_authority]; - let compute_unit_limit = ComputeUnitLimit::Default; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let build_message = |lamports| { let authorized = Authorized { staker: staker.unwrap_or(from.pubkey()), @@ -1601,11 +1606,15 @@ pub fn process_stake_authorize( )); } } + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; ixs = ixs .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; @@ -1613,7 +1622,7 @@ pub fn process_stake_authorize( let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; - let message = if let Some(nonce_account) = &nonce_account { + let mut message = if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, Some(&fee_payer.pubkey()), @@ -1623,6 +1632,7 @@ pub fn process_stake_authorize( } else { Message::new(&ixs, Some(&fee_payer.pubkey())) }; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); if sign_only { @@ -1684,6 +1694,10 @@ pub fn process_deactivate_stake_account( *stake_account_pubkey }; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let ixs = vec![if deactivate_delinquent { let stake_account = rpc_client.get_account(&stake_account_address)?; if stake_account.owner != stake::program::id() { @@ -1753,13 +1767,13 @@ pub fn process_deactivate_stake_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; - let message = if let Some(nonce_account) = &nonce_account { + let mut message = if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, Some(&fee_payer.pubkey()), @@ -1769,6 +1783,7 @@ pub fn process_deactivate_stake_account( } else { Message::new(&ixs, Some(&fee_payer.pubkey())) }; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); if sign_only { @@ -1834,7 +1849,10 @@ pub fn process_withdraw_stake( let fee_payer = config.signers[fee_payer]; let nonce_authority = config.signers[nonce_authority]; - let compute_unit_limit = ComputeUnitLimit::Default; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let build_message = |lamports| { let ixs = vec![stake_instruction::withdraw( &stake_account_address, @@ -2016,6 +2034,10 @@ pub fn process_split_stake( rent_exempt_reserve, )); } + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; if let Some(seed) = split_stake_account_seed { ixs.append( &mut stake_instruction::split_with_seed( @@ -2029,7 +2051,7 @@ pub fn process_split_stake( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }), ) } else { @@ -2043,14 +2065,14 @@ pub fn process_split_stake( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }), ) }; let nonce_authority = config.signers[nonce_authority]; - let message = if let Some(nonce_account) = &nonce_account { + let mut message = if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, Some(&fee_payer.pubkey()), @@ -2060,6 +2082,7 @@ pub fn process_split_stake( } else { Message::new(&ixs, Some(&fee_payer.pubkey())) }; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); if sign_only { @@ -2146,6 +2169,10 @@ pub fn process_merge_stake( let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let ixs = stake_instruction::merge( stake_account_pubkey, source_stake_account_pubkey, @@ -2154,12 +2181,12 @@ pub fn process_merge_stake( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); let nonce_authority = config.signers[nonce_authority]; - let message = if let Some(nonce_account) = &nonce_account { + let mut message = if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, Some(&fee_payer.pubkey()), @@ -2169,6 +2196,7 @@ pub fn process_merge_stake( } else { Message::new(&ixs, Some(&fee_payer.pubkey())) }; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); if sign_only { @@ -2225,6 +2253,10 @@ pub fn process_stake_set_lockup( let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; let custodian = config.signers[custodian]; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let ixs = vec![if new_custodian_signer.is_some() { stake_instruction::set_lockup_checked(stake_account_pubkey, lockup, &custodian.pubkey()) } else { @@ -2233,7 +2265,7 @@ pub fn process_stake_set_lockup( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; @@ -2257,7 +2289,7 @@ pub fn process_stake_set_lockup( } } - let message = if let Some(nonce_account) = &nonce_account { + let mut message = if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, Some(&fee_payer.pubkey()), @@ -2267,6 +2299,7 @@ pub fn process_stake_set_lockup( } else { Message::new(&ixs, Some(&fee_payer.pubkey())) }; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); if sign_only { @@ -2707,6 +2740,10 @@ pub fn process_delegate_stake( let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let ixs = vec![stake_instruction::delegate_stake( stake_account_pubkey, &stake_authority.pubkey(), @@ -2715,13 +2752,13 @@ pub fn process_delegate_stake( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; - let message = if let Some(nonce_account) = &nonce_account { + let mut message = if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, Some(&fee_payer.pubkey()), @@ -2731,6 +2768,7 @@ pub fn process_delegate_stake( } else { Message::new(&ixs, Some(&fee_payer.pubkey())) }; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); if sign_only { diff --git a/cli/tests/stake.rs b/cli/tests/stake.rs index bcc5f6bb7b39ef..f679de5aacfec4 100644 --- a/cli/tests/stake.rs +++ b/cli/tests/stake.rs @@ -31,6 +31,7 @@ use { }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::{TestValidator, TestValidatorGenesis}, + test_case::test_case, }; #[test] @@ -196,8 +197,9 @@ fn test_stake_delegation_force() { process_command(&config).unwrap(); } -#[test] -fn test_seed_stake_delegation_and_deactivation() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_seed_stake_delegation_and_deactivation(compute_unit_price: Option) { solana_logger::setup(); let mint_keypair = Keypair::new(); @@ -252,7 +254,7 @@ fn test_seed_stake_delegation_and_deactivation() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_validator).unwrap(); @@ -269,7 +271,7 @@ fn test_seed_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_validator).unwrap(); @@ -286,13 +288,14 @@ fn test_seed_stake_delegation_and_deactivation() { memo: None, seed: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_validator).unwrap(); } -#[test] -fn test_stake_delegation_and_deactivation() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_stake_delegation_and_deactivation(compute_unit_price: Option) { solana_logger::setup(); let mint_keypair = Keypair::new(); @@ -342,7 +345,7 @@ fn test_stake_delegation_and_deactivation() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_validator).unwrap(); @@ -360,7 +363,7 @@ fn test_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_validator).unwrap(); @@ -377,13 +380,14 @@ fn test_stake_delegation_and_deactivation() { memo: None, seed: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_validator).unwrap(); } -#[test] -fn test_offline_stake_delegation_and_deactivation() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_offline_stake_delegation_and_deactivation(compute_unit_price: Option) { solana_logger::setup(); let mint_keypair = Keypair::new(); @@ -457,7 +461,7 @@ fn test_offline_stake_delegation_and_deactivation() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_validator).unwrap(); @@ -475,7 +479,7 @@ fn test_offline_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); @@ -497,7 +501,7 @@ fn test_offline_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); @@ -515,7 +519,7 @@ fn test_offline_stake_delegation_and_deactivation() { memo: None, seed: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); @@ -536,13 +540,14 @@ fn test_offline_stake_delegation_and_deactivation() { memo: None, seed: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); } -#[test] -fn test_nonced_stake_delegation_and_deactivation() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_nonced_stake_delegation_and_deactivation(compute_unit_price: Option) { solana_logger::setup(); let mint_keypair = Keypair::new(); @@ -590,7 +595,7 @@ fn test_nonced_stake_delegation_and_deactivation() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); @@ -603,7 +608,7 @@ fn test_nonced_stake_delegation_and_deactivation() { nonce_authority: Some(config.signers[0].pubkey()), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); @@ -634,7 +639,7 @@ fn test_nonced_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); @@ -664,13 +669,14 @@ fn test_nonced_stake_delegation_and_deactivation() { memo: None, seed: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); } -#[test] -fn test_stake_authorize() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_stake_authorize(compute_unit_price: Option) { solana_logger::setup(); let mint_keypair = Keypair::new(); @@ -732,7 +738,7 @@ fn test_stake_authorize() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); @@ -757,7 +763,7 @@ fn test_stake_authorize() { fee_payer: 0, custodian: None, no_wait: false, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); @@ -799,7 +805,7 @@ fn test_stake_authorize() { fee_payer: 0, custodian: None, no_wait: false, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); @@ -831,7 +837,7 @@ fn test_stake_authorize() { fee_payer: 0, custodian: None, no_wait: false, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); @@ -863,7 +869,7 @@ fn test_stake_authorize() { fee_payer: 0, custodian: None, no_wait: false, - compute_unit_price: None, + compute_unit_price, }; config_offline.output_format = OutputFormat::JsonCompact; let sign_reply = process_command(&config_offline).unwrap(); @@ -888,7 +894,7 @@ fn test_stake_authorize() { fee_payer: 0, custodian: None, no_wait: false, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); @@ -911,7 +917,7 @@ fn test_stake_authorize() { nonce_authority: Some(offline_authority_pubkey), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); @@ -946,7 +952,7 @@ fn test_stake_authorize() { fee_payer: 0, custodian: None, no_wait: false, - compute_unit_price: None, + compute_unit_price, }; let sign_reply = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sign_reply); @@ -975,7 +981,7 @@ fn test_stake_authorize() { fee_payer: 0, custodian: None, no_wait: false, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); @@ -1178,8 +1184,9 @@ fn test_stake_authorize_with_fee_payer() { ); } -#[test] -fn test_stake_split() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_stake_split(compute_unit_price: Option) { solana_logger::setup(); let mint_keypair = Keypair::new(); @@ -1252,7 +1259,7 @@ fn test_stake_split() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); check_balance!(10 * stake_balance, &rpc_client, &stake_account_pubkey,); @@ -1269,7 +1276,7 @@ fn test_stake_split() { nonce_authority: Some(offline_pubkey), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); check_balance!(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey()); @@ -1301,7 +1308,7 @@ fn test_stake_split() { seed: None, lamports: 2 * stake_balance, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, rent_exempt_reserve: Some(minimum_balance), }; config_offline.output_format = OutputFormat::JsonCompact; @@ -1326,7 +1333,7 @@ fn test_stake_split() { seed: None, lamports: 2 * stake_balance, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, rent_exempt_reserve: None, }; process_command(&config).unwrap(); @@ -1338,8 +1345,9 @@ fn test_stake_split() { ); } -#[test] -fn test_stake_set_lockup() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_stake_set_lockup(compute_unit_price: Option) { solana_logger::setup(); let mint_keypair = Keypair::new(); @@ -1418,11 +1426,10 @@ fn test_stake_set_lockup() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); check_balance!(10 * stake_balance, &rpc_client, &stake_account_pubkey,); - check_balance!(10 * stake_balance, &rpc_client, &stake_account_pubkey,); // Online set lockup let lockup = LockupArgs { @@ -1443,7 +1450,7 @@ fn test_stake_set_lockup() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); @@ -1480,7 +1487,7 @@ fn test_stake_set_lockup() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); @@ -1502,7 +1509,7 @@ fn test_stake_set_lockup() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); @@ -1536,7 +1543,7 @@ fn test_stake_set_lockup() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); @@ -1553,7 +1560,7 @@ fn test_stake_set_lockup() { nonce_authority: Some(offline_pubkey), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); check_balance!(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey); @@ -1586,7 +1593,7 @@ fn test_stake_set_lockup() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); @@ -1609,7 +1616,7 @@ fn test_stake_set_lockup() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap(); @@ -1626,8 +1633,9 @@ fn test_stake_set_lockup() { assert_eq!(current_lockup.custodian, offline_pubkey); } -#[test] -fn test_offline_nonced_create_stake_account_and_withdraw() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_offline_nonced_create_stake_account_and_withdraw(compute_unit_price: Option) { solana_logger::setup(); let mint_keypair = Keypair::new(); @@ -1683,7 +1691,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { nonce_authority: Some(offline_pubkey), memo: None, amount: SpendAmount::Some(minimum_nonce_balance), - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); @@ -1717,7 +1725,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); @@ -1745,7 +1753,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); check_balance!(50_000_000_000, &rpc_client, &stake_pubkey); @@ -1778,7 +1786,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { memo: None, seed: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); @@ -1801,7 +1809,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { memo: None, seed: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); check_balance!(50_000_000_000, &rpc_client, &recipient_pubkey); @@ -1835,7 +1843,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; let sig_response = process_command(&config_offline).unwrap(); let sign_only = parse_sign_only_reply_string(&sig_response); @@ -1861,7 +1869,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { memo: None, fee_payer: 0, from: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let seed_address = From 1bcb252464dfccdc62be0015abc9889b930c8dc9 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Mon, 23 Sep 2024 22:20:02 -0400 Subject: [PATCH 376/529] banking_stage: evict unstaked votes on epoch boundary (#2960) --- .../banking_stage/latest_unprocessed_votes.rs | 43 +++++++++++++++---- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index b586a973cc3cd0..c126d1a40ed6b4 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -351,6 +351,28 @@ impl LatestUnprocessedVotes { .is_active(&feature_set::deprecate_legacy_vote_ixs::id()), Ordering::Relaxed, ); + + // Evict any now unstaked pubkeys + let mut latest_votes_per_pubkey = self.latest_votes_per_pubkey.write().unwrap(); + let mut unstaked_votes = 0; + latest_votes_per_pubkey.retain(|pubkey, vote| { + let is_present = !vote.read().unwrap().is_vote_taken(); + let should_evict = match staked_nodes.get(pubkey) { + None => true, + Some(stake) => *stake == 0, + }; + if is_present && should_evict { + unstaked_votes += 1; + } + !should_evict + }); + self.num_unprocessed_votes + .fetch_sub(unstaked_votes, Ordering::Relaxed); + datapoint_info!( + "latest_unprocessed_votes-epoch-boundary", + ("epoch", bank.epoch(), i64), + ("evicted_unstaked_votes", unstaked_votes, i64) + ); } /// Returns how many packets were forwardable @@ -943,8 +965,10 @@ mod tests { let vote_a = from_slots(vec![(1, 1)], VoteSource::Gossip, &keypair_a, None); let vote_b = from_slots(vec![(2, 1)], VoteSource::Tpu, &keypair_b, None); - latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); - latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); + latest_unprocessed_votes + .update_latest_vote(vote_a.clone(), false /* should replenish */); + latest_unprocessed_votes + .update_latest_vote(vote_b.clone(), false /* should replenish */); // Recache on epoch boundary and don't forward 0 stake accounts latest_unprocessed_votes.cache_epoch_boundary_info(&bank); @@ -976,6 +1000,10 @@ mod tests { // Don't forward votes from gossip latest_unprocessed_votes.cache_epoch_boundary_info(&bank); + latest_unprocessed_votes + .update_latest_vote(vote_a.clone(), false /* should replenish */); + latest_unprocessed_votes + .update_latest_vote(vote_b.clone(), false /* should replenish */); let forwarded = latest_unprocessed_votes.get_and_insert_forwardable_packets( Arc::new(bank), &mut forward_packet_batches_by_accounts, @@ -1007,6 +1035,8 @@ mod tests { // Forward from TPU latest_unprocessed_votes.cache_epoch_boundary_info(&bank); + latest_unprocessed_votes.update_latest_vote(vote_a, false /* should replenish */); + latest_unprocessed_votes.update_latest_vote(vote_b, false /* should replenish */); let forwarded = latest_unprocessed_votes.get_and_insert_forwardable_packets( bank.clone(), &mut forward_packet_batches_by_accounts, @@ -1150,7 +1180,7 @@ mod tests { Some(vote_b.slot()) ); - // Previously unstaked votes are not (yet) removed + // Previously unstaked votes are removed let config = genesis_utils::create_genesis_config_with_leader( 100, &keypair_c.node_keypair.pubkey(), @@ -1165,12 +1195,9 @@ mod tests { ); assert_eq!(bank.epoch(), 2); latest_unprocessed_votes.cache_epoch_boundary_info(&bank); + assert_eq!(latest_unprocessed_votes.len(), 0); latest_unprocessed_votes.insert_batch(votes.clone(), true); - assert_eq!(latest_unprocessed_votes.len(), 2); - assert_eq!( - latest_unprocessed_votes.get_latest_vote_slot(keypair_b.node_keypair.pubkey()), - Some(vote_b.slot()) - ); + assert_eq!(latest_unprocessed_votes.len(), 1); assert_eq!( latest_unprocessed_votes.get_latest_vote_slot(keypair_c.node_keypair.pubkey()), Some(vote_c.slot()) From 43cc2dd43494c12b50e8cd0fd6be1200379199d1 Mon Sep 17 00:00:00 2001 From: Jon C Date: Tue, 24 Sep 2024 11:07:46 +0200 Subject: [PATCH 377/529] cli: Use simulated compute units in vote interactions (#2696) * cli: Add simulated compute units to vote interactions #### Problem The CLI can simulate to get the compute budget used by a transaction, but vote interactions are still using the default compute unit limit. #### Summary of changes Add tests for setting a compute unit price with `test_case`, and then change the compute unit limit to `Simulated`. * Use simulated compute units * Fix rebase issues --- cli/src/vote.rs | 48 ++++++++++++++++++++++++++++++++++++----------- cli/tests/vote.rs | 47 ++++++++++++++++++++++++---------------------- 2 files changed, 62 insertions(+), 33 deletions(-) diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 2e451a3d7f2150..66b925d9b88418 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -5,7 +5,9 @@ use { log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, }, - compute_budget::{ComputeUnitConfig, WithComputeUnitConfig}, + compute_budget::{ + simulate_and_update_compute_unit_limit, ComputeUnitConfig, WithComputeUnitConfig, + }, memo::WithMemo, nonce::check_nonce_account, spend_utils::{resolve_spend_tx_and_check_account_balances, SpendAmount}, @@ -821,7 +823,10 @@ pub fn process_create_vote_account( let nonce_authority = config.signers[nonce_authority]; let space = VoteStateVersions::vote_state_size_of(true) as u64; - let compute_unit_limit = ComputeUnitLimit::Default; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let build_message = |lamports| { let vote_init = VoteInit { node_pubkey: identity_pubkey, @@ -1001,11 +1006,16 @@ pub fn process_vote_authorize( vote_authorize, // vote or withdraw ) }; + + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let ixs = vec![vote_ix] .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; @@ -1013,7 +1023,7 @@ pub fn process_vote_authorize( let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; - let message = if let Some(nonce_account) = &nonce_account { + let mut message = if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, Some(&fee_payer.pubkey()), @@ -1023,6 +1033,7 @@ pub fn process_vote_authorize( } else { Message::new(&ixs, Some(&fee_payer.pubkey())) }; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); if sign_only { @@ -1079,6 +1090,10 @@ pub fn process_vote_update_validator( (&new_identity_pubkey, "new_identity_account".to_string()), )?; let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let ixs = vec![vote_instruction::update_validator_identity( vote_account_pubkey, &authorized_withdrawer.pubkey(), @@ -1087,12 +1102,12 @@ pub fn process_vote_update_validator( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; - let message = if let Some(nonce_account) = &nonce_account { + let mut message = if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, Some(&fee_payer.pubkey()), @@ -1102,6 +1117,7 @@ pub fn process_vote_update_validator( } else { Message::new(&ixs, Some(&fee_payer.pubkey())) }; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); if sign_only { @@ -1152,6 +1168,10 @@ pub fn process_vote_update_commission( ) -> ProcessResult { let authorized_withdrawer = config.signers[withdraw_authority]; let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let ixs = vec![vote_instruction::update_commission( vote_account_pubkey, &authorized_withdrawer.pubkey(), @@ -1160,12 +1180,12 @@ pub fn process_vote_update_commission( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; - let message = if let Some(nonce_account) = &nonce_account { + let mut message = if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( ixs, Some(&fee_payer.pubkey()), @@ -1175,6 +1195,7 @@ pub fn process_vote_update_commission( } else { Message::new(&ixs, Some(&fee_payer.pubkey())) }; + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); if sign_only { tx.try_partial_sign(&config.signers, recent_blockhash)?; @@ -1318,7 +1339,10 @@ pub fn process_withdraw_from_vote_account( let fee_payer = config.signers[fee_payer]; let nonce_authority = config.signers[nonce_authority]; - let compute_unit_limit = ComputeUnitLimit::Default; + let compute_unit_limit = match blockhash_query { + BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + }; let build_message = |lamports| { let ixs = vec![withdraw( vote_account_pubkey, @@ -1441,6 +1465,7 @@ pub fn process_close_vote_account( let current_balance = rpc_client.get_balance(vote_account_pubkey)?; + let compute_unit_limit = ComputeUnitLimit::Simulated; let ixs = vec![withdraw( vote_account_pubkey, &withdraw_authority.pubkey(), @@ -1450,10 +1475,11 @@ pub fn process_close_vote_account( .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, - compute_unit_limit: ComputeUnitLimit::Default, + compute_unit_limit, }); - let message = Message::new(&ixs, Some(&fee_payer.pubkey())); + let mut message = Message::new(&ixs, Some(&fee_payer.pubkey())); + simulate_and_update_compute_unit_limit(&compute_unit_limit, rpc_client, &mut message)?; let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, latest_blockhash)?; check_account_for_fee_with_commitment( diff --git a/cli/tests/vote.rs b/cli/tests/vote.rs index fb83d232a6f557..2a3f1104526186 100644 --- a/cli/tests/vote.rs +++ b/cli/tests/vote.rs @@ -17,10 +17,12 @@ use { solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidator, solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions}, + test_case::test_case, }; -#[test] -fn test_vote_authorize_and_withdraw() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_vote_authorize_and_withdraw(compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); @@ -56,7 +58,7 @@ fn test_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let vote_account = rpc_client @@ -88,7 +90,7 @@ fn test_vote_authorize_and_withdraw() { fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let expected_balance = expected_balance + 10_000; @@ -110,7 +112,7 @@ fn test_vote_authorize_and_withdraw() { fee_payer: 0, authorized: 0, new_authorized: None, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let vote_account = rpc_client @@ -136,7 +138,7 @@ fn test_vote_authorize_and_withdraw() { fee_payer: 0, authorized: 1, new_authorized: Some(1), - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap_err(); // unsigned by new authority should fail config.signers = vec![ @@ -157,7 +159,7 @@ fn test_vote_authorize_and_withdraw() { fee_payer: 0, authorized: 1, new_authorized: Some(2), - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let vote_account = rpc_client @@ -182,7 +184,7 @@ fn test_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); let expected_balance = expected_balance - 1_000; @@ -203,7 +205,7 @@ fn test_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); @@ -216,15 +218,16 @@ fn test_vote_authorize_and_withdraw() { destination_account_pubkey: destination_account, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config).unwrap(); check_balance!(0, &rpc_client, &vote_account_pubkey); check_balance!(expected_balance, &rpc_client, &destination_account); } -#[test] -fn test_offline_vote_authorize_and_withdraw() { +#[test_case(None; "base")] +#[test_case(Some(1_000_000); "with_compute_unit_price")] +fn test_offline_vote_authorize_and_withdraw(compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); @@ -283,7 +286,7 @@ fn test_offline_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); let vote_account = rpc_client @@ -315,7 +318,7 @@ fn test_offline_vote_authorize_and_withdraw() { fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); let expected_balance = expected_balance + 10_000; @@ -337,7 +340,7 @@ fn test_offline_vote_authorize_and_withdraw() { fee_payer: 0, authorized: 0, new_authorized: None, - compute_unit_price: None, + compute_unit_price, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); @@ -360,7 +363,7 @@ fn test_offline_vote_authorize_and_withdraw() { fee_payer: 0, authorized: 0, new_authorized: None, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); let vote_account = rpc_client @@ -387,7 +390,7 @@ fn test_offline_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; config_offline.output_format = OutputFormat::JsonCompact; let sig_response = process_command(&config_offline).unwrap(); @@ -408,7 +411,7 @@ fn test_offline_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); let expected_balance = expected_balance - 1_000; @@ -435,7 +438,7 @@ fn test_offline_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_offline).unwrap(); config_offline.output_format = OutputFormat::JsonCompact; @@ -456,7 +459,7 @@ fn test_offline_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); @@ -476,7 +479,7 @@ fn test_offline_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_offline).unwrap(); config_offline.output_format = OutputFormat::JsonCompact; @@ -498,7 +501,7 @@ fn test_offline_vote_authorize_and_withdraw() { nonce_authority: 0, memo: None, fee_payer: 0, - compute_unit_price: None, + compute_unit_price, }; process_command(&config_payer).unwrap(); check_balance!(0, &rpc_client, &vote_account_pubkey); From 90d3761b9b335e3546d15ef7f691d8bb9ae6bce9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 19:36:45 +0800 Subject: [PATCH 378/529] build(deps): bump thiserror from 1.0.63 to 1.0.64 (#2962) * build(deps): bump thiserror from 1.0.63 to 1.0.64 Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.63 to 1.0.64. - [Release notes](https://github.com/dtolnay/thiserror/releases) - [Commits](https://github.com/dtolnay/thiserror/compare/1.0.63...1.0.64) --- updated-dependencies: - dependency-name: thiserror dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 29a42caa9b6828..a29c3af93cb5dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9203,18 +9203,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 20ca674d61856c..7fdd1c26c0a40a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -500,7 +500,7 @@ tar = "0.4.41" tarpc = "0.29.0" tempfile = "3.12.0" test-case = "3.3.1" -thiserror = "1.0.63" +thiserror = "1.0.64" tiny-bip39 = "0.8.2" # Update solana-tokio patch below when updating this version tokio = "1.29.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 656c3afa2b15f6..fbd6e981fe7fa6 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7671,18 +7671,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", From 651530a36e8a6557b83228e73439baedc4e917c4 Mon Sep 17 00:00:00 2001 From: dmakarov Date: Tue, 24 Sep 2024 10:20:59 -0400 Subject: [PATCH 379/529] Refactor remove_uncleaned_slots to reduce memory consumption (#2954) * Refactor remove_uncleaned_slots to reduce memory consumption * Avoid copying pubkeys --- accounts-db/src/accounts_db.rs | 164 ++++++++++----------------------- 1 file changed, 49 insertions(+), 115 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 367500156d2b18..b6a99f29aad7e8 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3069,32 +3069,24 @@ impl AccountsDb { .collect() } - /// Remove `slots` from `uncleaned_pubkeys` and collect all pubkeys - /// - /// For each slot in the list of uncleaned slots, remove it from the `uncleaned_pubkeys` Map - /// and collect all the pubkeys to return. - fn remove_uncleaned_slots_and_collect_pubkeys( - &self, - uncleaned_slots: Vec, - ) -> Vec> { - uncleaned_slots - .into_iter() - .filter_map(|uncleaned_slot| { - self.uncleaned_pubkeys - .remove(&uncleaned_slot) - .map(|(_removed_slot, removed_pubkeys)| removed_pubkeys) - }) - .collect() - } - - /// Remove uncleaned slots, up to a maximum slot, and return the collected pubkeys - /// - fn remove_uncleaned_slots_and_collect_pubkeys_up_to_slot( + /// For each slot in the list of uncleaned slots, up to a maximum + /// slot, remove it from the `uncleaned_pubkeys` and move all the + /// pubkeys to `candidates` for cleaning. + fn remove_uncleaned_slots_up_to_slot_and_move_pubkeys( &self, max_slot_inclusive: Slot, - ) -> Vec> { + candidates: &[RwLock>], + ) { let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot_inclusive); - self.remove_uncleaned_slots_and_collect_pubkeys(uncleaned_slots) + for uncleaned_slot in uncleaned_slots.into_iter() { + if let Some((_removed_slot, removed_pubkeys)) = + self.uncleaned_pubkeys.remove(&uncleaned_slot) + { + for removed_pubkey in removed_pubkeys { + self.insert_pubkey(candidates, removed_pubkey); + } + } + } } fn count_pubkeys(candidates: &[RwLock>]) -> u64 { @@ -3104,10 +3096,19 @@ impl AccountsDb { .sum::() as u64 } - /// Construct a vec of pubkeys for cleaning from: - /// uncleaned_pubkeys - the delta set of updated pubkeys in rooted slots from the last clean - /// dirty_stores - set of stores which had accounts removed or recently rooted - /// returns the minimum slot we encountered + fn insert_pubkey(&self, candidates: &[RwLock>], pubkey: Pubkey) { + let index = self.accounts_index.bin_calculator.bin_from_pubkey(&pubkey); + let mut candidates_bin = candidates[index].write().unwrap(); + candidates_bin.insert(pubkey, CleaningInfo::default()); + } + + /// Construct a list of candidates for cleaning from: + /// - dirty_stores -- set of stores which had accounts + /// removed or recently rooted; + /// - uncleaned_pubkeys -- the delta set of updated pubkeys in + /// rooted slots from the last clean. + /// + /// The function also returns the minimum slot we encountered. fn construct_candidate_clean_keys( &self, max_clean_root_inclusive: Option, @@ -3138,12 +3139,6 @@ impl AccountsDb { std::iter::repeat_with(|| RwLock::new(HashMap::::new())) .take(num_bins) .collect(); - - let insert_pubkey = |pubkey: &Pubkey| { - let index = self.accounts_index.bin_calculator.bin_from_pubkey(pubkey); - let mut candidates_bin = candidates[index].write().unwrap(); - candidates_bin.insert(*pubkey, CleaningInfo::default()); - }; let dirty_ancient_stores = AtomicUsize::default(); let mut dirty_store_routine = || { let chunk_size = 1.max(dirty_stores_len.saturating_div(rayon::current_num_threads())); @@ -3156,7 +3151,9 @@ impl AccountsDb { dirty_ancient_stores.fetch_add(1, Ordering::Relaxed); } oldest_dirty_slot = oldest_dirty_slot.min(*slot); - store.accounts.scan_pubkeys(insert_pubkey); + store + .accounts + .scan_pubkeys(|pubkey| self.insert_pubkey(&candidates, *pubkey)); }); oldest_dirty_slot }) @@ -3186,25 +3183,14 @@ impl AccountsDb { timings.dirty_ancient_stores = dirty_ancient_stores.load(Ordering::Relaxed); let mut collect_delta_keys = Measure::start("key_create"); - let delta_keys = - self.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(max_slot_inclusive); + self.remove_uncleaned_slots_up_to_slot_and_move_pubkeys(max_slot_inclusive, &candidates); collect_delta_keys.stop(); timings.collect_delta_keys_us += collect_delta_keys.as_us(); - let mut delta_insert = Measure::start("delta_insert"); - self.thread_pool_clean.install(|| { - delta_keys.par_iter().for_each(|keys| { - for key in keys { - insert_pubkey(key); - } - }); - }); - delta_insert.stop(); - timings.delta_insert_us += delta_insert.as_us(); - timings.delta_key_count = Self::count_pubkeys(&candidates); - // Check if we should purge any of the zero_lamport_accounts_to_purge_later, based on the + // Check if we should purge any of the + // zero_lamport_accounts_to_purge_later, based on the // latest_full_snapshot_slot. let latest_full_snapshot_slot = self.latest_full_snapshot_slot(); assert!( @@ -3217,7 +3203,7 @@ impl AccountsDb { let is_candidate_for_clean = max_slot_inclusive >= *slot && latest_full_snapshot_slot >= *slot; if is_candidate_for_clean { - insert_pubkey(pubkey); + self.insert_pubkey(&candidates, *pubkey); } !is_candidate_for_clean }); @@ -14810,64 +14796,6 @@ pub mod tests { assert_eq!(uncleaned_slots3, [slot1, slot2, slot3]); } - #[test] - fn test_remove_uncleaned_slots_and_collect_pubkeys() { - solana_logger::setup(); - let db = AccountsDb::new_single_for_tests(); - - let slot1 = 11; - let slot2 = 222; - let slot3 = 3333; - - let pubkey1 = Pubkey::new_unique(); - let pubkey2 = Pubkey::new_unique(); - let pubkey3 = Pubkey::new_unique(); - - let account1 = AccountSharedData::new(0, 0, &pubkey1); - let account2 = AccountSharedData::new(0, 0, &pubkey2); - let account3 = AccountSharedData::new(0, 0, &pubkey3); - - db.store_for_tests(slot1, &[(&pubkey1, &account1)]); - db.store_for_tests(slot2, &[(&pubkey2, &account2)]); - db.store_for_tests(slot3, &[(&pubkey3, &account3)]); - - db.add_root(slot1); - // slot 2 is _not_ a root on purpose - db.add_root(slot3); - - db.uncleaned_pubkeys.insert(slot1, vec![pubkey1]); - db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]); - db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]); - - let uncleaned_pubkeys1 = db - .remove_uncleaned_slots_and_collect_pubkeys(vec![slot1]) - .into_iter() - .flatten() - .collect::>(); - let uncleaned_pubkeys2 = db - .remove_uncleaned_slots_and_collect_pubkeys(vec![slot2]) - .into_iter() - .flatten() - .collect::>(); - let uncleaned_pubkeys3 = db - .remove_uncleaned_slots_and_collect_pubkeys(vec![slot3]) - .into_iter() - .flatten() - .collect::>(); - - assert!(uncleaned_pubkeys1.contains(&pubkey1)); - assert!(!uncleaned_pubkeys1.contains(&pubkey2)); - assert!(!uncleaned_pubkeys1.contains(&pubkey3)); - - assert!(!uncleaned_pubkeys2.contains(&pubkey1)); - assert!(uncleaned_pubkeys2.contains(&pubkey2)); - assert!(!uncleaned_pubkeys2.contains(&pubkey3)); - - assert!(!uncleaned_pubkeys3.contains(&pubkey1)); - assert!(!uncleaned_pubkeys3.contains(&pubkey2)); - assert!(uncleaned_pubkeys3.contains(&pubkey3)); - } - #[test] fn test_remove_uncleaned_slots_and_collect_pubkeys_up_to_slot() { solana_logger::setup(); @@ -14897,15 +14825,21 @@ pub mod tests { db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]); db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]); - let uncleaned_pubkeys = db - .remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(slot3) - .into_iter() - .flatten() - .collect::>(); + let num_bins = db.accounts_index.bins(); + let candidates: Box<_> = + std::iter::repeat_with(|| RwLock::new(HashMap::::new())) + .take(num_bins) + .collect(); + db.remove_uncleaned_slots_up_to_slot_and_move_pubkeys(slot3, &candidates); - assert!(uncleaned_pubkeys.contains(&pubkey1)); - assert!(uncleaned_pubkeys.contains(&pubkey2)); - assert!(uncleaned_pubkeys.contains(&pubkey3)); + let candidates_contain = |pubkey: &Pubkey| { + candidates + .iter() + .any(|bin| bin.read().unwrap().contains(pubkey)) + }; + assert!(candidates_contain(&pubkey1)); + assert!(candidates_contain(&pubkey2)); + assert!(candidates_contain(&pubkey3)); } #[test] From e615270d4f8aed7182386a39b03eade48a5c6055 Mon Sep 17 00:00:00 2001 From: dmakarov Date: Tue, 24 Sep 2024 13:06:13 -0400 Subject: [PATCH 380/529] Add candidates by sorted keys, minimizing number of lock acquires (#2965) Add candidates by sorted keys, decreasing number of lock acquires This is a minor optimization of how new cleaning candidates are created in the candidates list. The candidate bin lock is acquired only once for each bin because the pubkeys are sorted by bins, and previously unlocked bin is reused until pubkeys from a new bin start being added to the candidates list. --- accounts-db/src/accounts_db.rs | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index b6a99f29aad7e8..162ce25cede85e 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3079,11 +3079,34 @@ impl AccountsDb { ) { let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot_inclusive); for uncleaned_slot in uncleaned_slots.into_iter() { - if let Some((_removed_slot, removed_pubkeys)) = + if let Some((_removed_slot, mut removed_pubkeys)) = self.uncleaned_pubkeys.remove(&uncleaned_slot) { - for removed_pubkey in removed_pubkeys { - self.insert_pubkey(candidates, removed_pubkey); + // Sort all keys by bin index so that we can insert + // them in `candidates` more efficiently. + removed_pubkeys.sort_by(|a, b| { + self.accounts_index + .bin_calculator + .bin_from_pubkey(a) + .cmp(&self.accounts_index.bin_calculator.bin_from_pubkey(b)) + }); + if let Some(first_removed_pubkey) = removed_pubkeys.first() { + let mut prev_bin = self + .accounts_index + .bin_calculator + .bin_from_pubkey(first_removed_pubkey); + let mut candidates_bin = candidates[prev_bin].write().unwrap(); + for removed_pubkey in removed_pubkeys { + let curr_bin = self + .accounts_index + .bin_calculator + .bin_from_pubkey(&removed_pubkey); + if curr_bin != prev_bin { + candidates_bin = candidates[curr_bin].write().unwrap(); + prev_bin = curr_bin; + } + candidates_bin.insert(removed_pubkey, CleaningInfo::default()); + } } } } From 62d0f2518b048d9de43d14fc3c9eaf9200f25991 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Tue, 24 Sep 2024 13:22:12 -0500 Subject: [PATCH 381/529] fix comments (#2972) Co-authored-by: HaoranYi --- accounts-db/src/pubkey_bins.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts-db/src/pubkey_bins.rs b/accounts-db/src/pubkey_bins.rs index ec1d2ad62ea880..ea1a412530efc4 100644 --- a/accounts-db/src/pubkey_bins.rs +++ b/accounts-db/src/pubkey_bins.rs @@ -2,7 +2,7 @@ use solana_sdk::pubkey::Pubkey; #[derive(Debug)] pub struct PubkeyBinCalculator24 { - // how many bits from the first 2 bytes to shift away to ignore when calculating bin + // how many bits from the first 3 bytes to shift away to ignore when calculating bin shift_bits: u32, } From 6b9f569d198ede06e7e28e0d83b275fa66e05b99 Mon Sep 17 00:00:00 2001 From: 4r33x Date: Tue, 24 Sep 2024 21:49:36 +0300 Subject: [PATCH 382/529] bump blake3 1.5.1 -> 1.5.4 (#2959) * bump blake3 1.5.1 -> 1.5.4 * bump blake3 1.5.1 -> 1.5.4 --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- sdk/program/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a29c3af93cb5dd..cd4377a2bdb21b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -943,9 +943,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.1" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" dependencies = [ "arrayref", "arrayvec", diff --git a/Cargo.toml b/Cargo.toml index 7fdd1c26c0a40a..3f22cf63d50ca6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -205,7 +205,7 @@ backoff = "0.4.0" base64 = "0.22.1" bincode = "1.3.3" bitflags = { version = "2.6.0" } -blake3 = "1.5.1" +blake3 = "1.5.4" borsh = { version = "1.5.1", features = ["derive", "unstable__schema"] } bs58 = { version = "0.5.1", default-features = false } bv = "0.11.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index fbd6e981fe7fa6..05e8a5b9403ad9 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -693,9 +693,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.1" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" dependencies = [ "arrayref", "arrayvec", diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 51a8367dfa83f0..dd8b6aaa9ae227 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -13,7 +13,7 @@ rust-version = "1.79.0" # solana platform-tools rust ve [dependencies] bincode = { workspace = true } -blake3 = { workspace = true, features = ["digest", "traits-preview"] } +blake3 = { workspace = true, features = ["traits-preview"] } borsh = { workspace = true, optional = true } borsh0-10 = { package = "borsh", version = "0.10.3", optional = true } bs58 = { workspace = true, features = ["alloc"] } From 192ba91455dc9ab50c6c5b00388c67488528266d Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Wed, 25 Sep 2024 00:20:59 -0700 Subject: [PATCH 383/529] svm: better test coverage for fee-only (#2912) --- svm/tests/integration_test.rs | 41 ++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 53d9d04f445183..2e63865d1ebead 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -524,9 +524,14 @@ fn program_medley() -> Vec { vec![test_entry] } -fn simple_transfer() -> Vec { +fn simple_transfer(enable_fee_only_transactions: bool) -> Vec { let mut test_entry = SvmTestEntry::default(); let transfer_amount = LAMPORTS_PER_SOL; + if enable_fee_only_transactions { + test_entry + .enabled_features + .push(feature_set::enable_transaction_loading_failure_fees::id()); + } // 0: a transfer that succeeds { @@ -578,7 +583,7 @@ fn simple_transfer() -> Vec { test_entry.decrease_expected_lamports(&source, LAMPORTS_PER_SIGNATURE); } - // 2: a non-executable transfer that fails before loading + // 2: a non-processable transfer that fails before loading { test_entry.transaction_batch.push(TransactionBatchItem { transaction: system_transaction::transfer( @@ -592,8 +597,7 @@ fn simple_transfer() -> Vec { }); } - // 3: a non-executable transfer that fails loading the fee-payer - // NOTE when we support the processed/executed distinction, this is NOT processed + // 3: a non-processable transfer that fails loading the fee-payer { test_entry.push_transaction_with_status( system_transaction::transfer( @@ -606,9 +610,7 @@ fn simple_transfer() -> Vec { ); } - // 4: a non-executable transfer that fails loading the program - // NOTE when we support the processed/executed distinction, this IS processed - // thus this test case will fail with the feature enabled + // 4: a processable non-executable transfer that fails loading the program { let source_keypair = Keypair::new(); let source = source_keypair.pubkey(); @@ -625,6 +627,13 @@ fn simple_transfer() -> Vec { system_instruction::transfer(&source, &Pubkey::new_unique(), transfer_amount); instruction.program_id = Pubkey::new_unique(); + let expected_status = if enable_fee_only_transactions { + test_entry.decrease_expected_lamports(&source, LAMPORTS_PER_SIGNATURE); + ExecutionStatus::ProcessedFailed + } else { + ExecutionStatus::Discarded + }; + test_entry.push_transaction_with_status( Transaction::new_signed_with_payer( &[instruction], @@ -632,17 +641,14 @@ fn simple_transfer() -> Vec { &[&source_keypair], Hash::default(), ), - ExecutionStatus::Discarded, + expected_status, ); } vec![test_entry] } -fn simple_nonce_fee_only( - enable_fee_only_transactions: bool, - fee_paying_nonce: bool, -) -> Vec { +fn simple_nonce(enable_fee_only_transactions: bool, fee_paying_nonce: bool) -> Vec { let mut test_entry = SvmTestEntry::default(); if enable_fee_only_transactions { test_entry @@ -842,11 +848,12 @@ fn simple_nonce_fee_only( } #[test_case(program_medley())] -#[test_case(simple_transfer())] -#[test_case(simple_nonce_fee_only(false, false))] -#[test_case(simple_nonce_fee_only(true, false))] -#[test_case(simple_nonce_fee_only(false, true))] -#[test_case(simple_nonce_fee_only(true, true))] +#[test_case(simple_transfer(false))] +#[test_case(simple_transfer(true))] +#[test_case(simple_nonce(false, false))] +#[test_case(simple_nonce(true, false))] +#[test_case(simple_nonce(false, true))] +#[test_case(simple_nonce(true, true))] fn svm_integration(test_entries: Vec) { for test_entry in test_entries { execute_test_entry(test_entry); From b4ae724e37bc6a387e692cbe7bd65222b886a798 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Wed, 25 Sep 2024 17:18:36 +0900 Subject: [PATCH 384/529] [secp256k1] Remove old `libsecp256k1_fail_on_bad_count` features (#2795) --- Cargo.lock | 6 +++--- sdk/src/secp256k1_instruction.rs | 12 +++--------- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd4377a2bdb21b..b60b5f18b34b32 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5079,7 +5079,7 @@ dependencies = [ "rustls-webpki 0.102.8", "security-framework", "security-framework-sys", - "webpki-roots 0.26.5", + "webpki-roots 0.26.6", "winapi 0.3.9", ] @@ -9981,9 +9981,9 @@ checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "webpki-roots" -version = "0.26.5" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] diff --git a/sdk/src/secp256k1_instruction.rs b/sdk/src/secp256k1_instruction.rs index 825e9bdbd03d5d..af7681672ba147 100644 --- a/sdk/src/secp256k1_instruction.rs +++ b/sdk/src/secp256k1_instruction.rs @@ -791,9 +791,7 @@ use { crate::{instruction::Instruction, precompiles::PrecompileError}, digest::Digest, serde_derive::{Deserialize, Serialize}, - solana_feature_set::{ - libsecp256k1_fail_on_bad_count, libsecp256k1_fail_on_bad_count2, FeatureSet, - }, + solana_feature_set::FeatureSet, }; pub const HASHED_PUBKEY_SERIALIZED_SIZE: usize = 20; @@ -926,17 +924,13 @@ pub fn construct_eth_pubkey( pub fn verify( data: &[u8], instruction_datas: &[&[u8]], - feature_set: &FeatureSet, + _feature_set: &FeatureSet, ) -> Result<(), PrecompileError> { if data.is_empty() { return Err(PrecompileError::InvalidInstructionDataSize); } let count = data[0] as usize; - if (feature_set.is_active(&libsecp256k1_fail_on_bad_count::id()) - || feature_set.is_active(&libsecp256k1_fail_on_bad_count2::id())) - && count == 0 - && data.len() > 1 - { + if count == 0 && data.len() > 1 { // count is zero but the instruction data indicates that is probably not // correct, fail the instruction to catch probable invalid secp256k1 // instruction construction. From 66b99267eaee2aae91cc07b4f46260b516ce68ff Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 25 Sep 2024 13:19:46 +0400 Subject: [PATCH 385/529] extract serialize-utils crate (#2926) * extract serialize-utils crate * use newer borsh in tests * Trigger Build --- Cargo.lock | 14 +++++++++ Cargo.toml | 3 ++ programs/sbf/Cargo.lock | 10 +++++++ sdk/program/Cargo.toml | 3 +- sdk/program/src/lib.rs | 3 +- sdk/serialize-utils/Cargo.toml | 30 +++++++++++++++++++ .../src}/cursor.rs | 29 ++++++++---------- .../mod.rs => serialize-utils/src/lib.rs} | 2 +- 8 files changed, 74 insertions(+), 20 deletions(-) create mode 100644 sdk/serialize-utils/Cargo.toml rename sdk/{program/src/serialize_utils => serialize-utils/src}/cursor.rs (81%) rename sdk/{program/src/serialize_utils/mod.rs => serialize-utils/src/lib.rs} (96%) diff --git a/Cargo.lock b/Cargo.lock index b60b5f18b34b32..d88d25b668dad6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7229,6 +7229,7 @@ dependencies = [ "solana-sdk-macro", "solana-secp256k1-recover", "solana-serde-varint", + "solana-serialize-utils", "solana-sha256-hasher", "solana-short-vec", "static_assertions", @@ -7839,6 +7840,19 @@ dependencies = [ "solana-short-vec", ] +[[package]] +name = "solana-serialize-utils" +version = "2.1.0" +dependencies = [ + "bincode", + "borsh 1.5.1", + "rand 0.8.5", + "serde", + "solana-instruction", + "solana-pubkey", + "solana-sanitize", +] + [[package]] name = "solana-sha256-hasher" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 3f22cf63d50ca6..bcad7d79f20d56 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,6 +120,7 @@ members = [ "sdk/program-option", "sdk/pubkey", "sdk/serde-varint", + "sdk/serialize-utils", "sdk/sha256-hasher", "sdk/signature", "send-transaction-service", @@ -207,6 +208,7 @@ bincode = "1.3.3" bitflags = { version = "2.6.0" } blake3 = "1.5.4" borsh = { version = "1.5.1", features = ["derive", "unstable__schema"] } +borsh0-10 = { package = "borsh", version = "0.10.3" } bs58 = { version = "0.5.1", default-features = false } bv = "0.11.1" byte-unit = "4.0.19" @@ -433,6 +435,7 @@ solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.1.0" } solana-remote-wallet = { path = "remote-wallet", version = "=2.1.0", default-features = false } solana-sanitize = { path = "sanitize", version = "=2.1.0" } solana-serde-varint = { path = "sdk/serde-varint", version = "=2.1.0" } +solana-serialize-utils = { path = "sdk/serialize-utils", version = "=2.1.0" } solana-sha256-hasher = { path = "sdk/sha256-hasher", version = "=2.1.0" } solana-signature = { path = "sdk/signature", version = "=2.1.0", default-features = false } solana-timings = { path = "timings", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 05e8a5b9403ad9..5c20c5febd509c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5640,6 +5640,7 @@ dependencies = [ "solana-sdk-macro", "solana-secp256k1-recover", "solana-serde-varint", + "solana-serialize-utils", "solana-sha256-hasher", "solana-short-vec", "thiserror", @@ -6621,6 +6622,15 @@ dependencies = [ "serde", ] +[[package]] +name = "solana-serialize-utils" +version = "2.1.0" +dependencies = [ + "solana-instruction", + "solana-pubkey", + "solana-sanitize", +] + [[package]] name = "solana-sha256-hasher" version = "2.1.0" diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index dd8b6aaa9ae227..345ad1348c491b 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -15,7 +15,7 @@ rust-version = "1.79.0" # solana platform-tools rust ve bincode = { workspace = true } blake3 = { workspace = true, features = ["traits-preview"] } borsh = { workspace = true, optional = true } -borsh0-10 = { package = "borsh", version = "0.10.3", optional = true } +borsh0-10 = { workspace = true, optional = true } bs58 = { workspace = true, features = ["alloc"] } bv = { workspace = true, features = ["serde"] } bytemuck = { workspace = true } @@ -55,6 +55,7 @@ solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } solana-serde-varint = { workspace = true } +solana-serialize-utils = { workspace = true } solana-sha256-hasher = { workspace = true, features = ["sha2"] } solana-short-vec = { workspace = true } thiserror = { workspace = true } diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 8488c00daf937f..f57b034f2ee208 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -513,7 +513,6 @@ pub mod program_stubs; pub mod program_utils; pub mod rent; pub mod secp256k1_program; -pub mod serialize_utils; pub mod slot_hashes; pub mod slot_history; pub mod stable_layout; @@ -534,6 +533,8 @@ pub use solana_sanitize as sanitize; pub use solana_secp256k1_recover as secp256k1_recover; #[deprecated(since = "2.1.0", note = "Use `solana-serde-varint` crate instead")] pub use solana_serde_varint as serde_varint; +#[deprecated(since = "2.1.0", note = "Use `solana-serialize-utils` crate instead")] +pub use solana_serialize_utils as serialize_utils; #[deprecated(since = "2.1.0", note = "Use `solana-short-vec` crate instead")] pub use solana_short_vec as short_vec; #[cfg(target_arch = "wasm32")] diff --git a/sdk/serialize-utils/Cargo.toml b/sdk/serialize-utils/Cargo.toml new file mode 100644 index 00000000000000..ea37c89b218ab0 --- /dev/null +++ b/sdk/serialize-utils/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "solana-serialize-utils" +description = "Solana helpers for reading and writing bytes." +documentation = "https://docs.rs/solana-serialize-utils" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-instruction = { workspace = true, default-features = false, features = [ + "std", +] } +solana-pubkey = { workspace = true, default-features = false } +solana-sanitize = { workspace = true } + +[dev-dependencies] +bincode = { workspace = true } +borsh = { workspace = true } +rand = { workspace = true } +serde = { workspace = true } +solana-pubkey = { workspace = true, default-features = false, features = [ + "borsh", + "serde", +] } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/src/serialize_utils/cursor.rs b/sdk/serialize-utils/src/cursor.rs similarity index 81% rename from sdk/program/src/serialize_utils/cursor.rs rename to sdk/serialize-utils/src/cursor.rs index 3d4dedd092ed3a..5e4639281ad388 100644 --- a/sdk/program/src/serialize_utils/cursor.rs +++ b/sdk/serialize-utils/src/cursor.rs @@ -1,15 +1,13 @@ use { - crate::{ - instruction::InstructionError, - pubkey::{Pubkey, PUBKEY_BYTES}, - }, + solana_instruction::error::InstructionError, + solana_pubkey::{Pubkey, PUBKEY_BYTES}, std::{ io::{BufRead as _, Cursor, Read}, ptr, }, }; -pub(crate) fn read_u8>(cursor: &mut Cursor) -> Result { +pub fn read_u8>(cursor: &mut Cursor) -> Result { let mut buf = [0; 1]; cursor .read_exact(&mut buf) @@ -18,7 +16,7 @@ pub(crate) fn read_u8>(cursor: &mut Cursor) -> Result>(cursor: &mut Cursor) -> Result { +pub fn read_u32>(cursor: &mut Cursor) -> Result { let mut buf = [0; 4]; cursor .read_exact(&mut buf) @@ -27,7 +25,7 @@ pub(crate) fn read_u32>(cursor: &mut Cursor) -> Result>(cursor: &mut Cursor) -> Result { +pub fn read_u64>(cursor: &mut Cursor) -> Result { let mut buf = [0; 8]; cursor .read_exact(&mut buf) @@ -36,7 +34,7 @@ pub(crate) fn read_u64>(cursor: &mut Cursor) -> Result>( +pub fn read_option_u64>( cursor: &mut Cursor, ) -> Result, InstructionError> { let variant = read_u8(cursor)?; @@ -47,7 +45,7 @@ pub(crate) fn read_option_u64>( } } -pub(crate) fn read_i64>(cursor: &mut Cursor) -> Result { +pub fn read_i64>(cursor: &mut Cursor) -> Result { let mut buf = [0; 8]; cursor .read_exact(&mut buf) @@ -56,7 +54,7 @@ pub(crate) fn read_i64>(cursor: &mut Cursor) -> Result, pubkey: *mut Pubkey, ) -> Result<(), InstructionError> { @@ -77,9 +75,7 @@ pub(crate) fn read_pubkey_into( Ok(()) } -pub(crate) fn read_pubkey>( - cursor: &mut Cursor, -) -> Result { +pub fn read_pubkey>(cursor: &mut Cursor) -> Result { let mut buf = [0; 32]; cursor .read_exact(&mut buf) @@ -88,7 +84,7 @@ pub(crate) fn read_pubkey>( Ok(Pubkey::from(buf)) } -pub(crate) fn read_bool>(cursor: &mut Cursor) -> Result { +pub fn read_bool>(cursor: &mut Cursor) -> Result { let byte = read_u8(cursor)?; match byte { 0 => Ok(false), @@ -97,7 +93,6 @@ pub(crate) fn read_bool>(cursor: &mut Cursor) -> Result( + fn test_read( reader: fn(&mut Cursor>) -> Result, test_value: T, ) { @@ -166,7 +161,7 @@ mod test { let mut cursor = Cursor::new(bincode_bytes); let bincode_read = reader(&mut cursor).unwrap(); - let borsh_bytes = borsh0_10::to_vec(&test_value).unwrap(); + let borsh_bytes = borsh::to_vec(&test_value).unwrap(); let mut cursor = Cursor::new(borsh_bytes); let borsh_read = reader(&mut cursor).unwrap(); diff --git a/sdk/program/src/serialize_utils/mod.rs b/sdk/serialize-utils/src/lib.rs similarity index 96% rename from sdk/program/src/serialize_utils/mod.rs rename to sdk/serialize-utils/src/lib.rs index 6b1396f3481d5c..93c70713c29c70 100644 --- a/sdk/program/src/serialize_utils/mod.rs +++ b/sdk/serialize-utils/src/lib.rs @@ -1,7 +1,7 @@ //! Helpers for reading and writing bytes. #![allow(clippy::arithmetic_side_effects)] -use {crate::pubkey::Pubkey, solana_sanitize::SanitizeError}; +use {solana_pubkey::Pubkey, solana_sanitize::SanitizeError}; pub mod cursor; From 6f5be6ceb78b9a4d89443b8ee05f2866e71fa9e2 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 25 Sep 2024 13:32:58 +0400 Subject: [PATCH 386/529] Extract account-info crate (#2429) * extract account-info crate * make bincode optional in account-info crate * update lock file * update doc examples * remove solana-program from doc examples * remove solana-program from dev deps * fmt * move MAX_PERMITTED_DATA_INCREASE to account-info crate --- Cargo.lock | 12 +++++ Cargo.toml | 2 + programs/sbf/Cargo.lock | 12 +++++ sdk/account-info/Cargo.toml | 23 ++++++++++ .../src/debug_account_data.rs | 0 .../src/lib.rs} | 46 ++++++++++--------- sdk/program/Cargo.toml | 1 + sdk/program/src/entrypoint.rs | 5 +- sdk/program/src/lib.rs | 8 ++-- 9 files changed, 79 insertions(+), 30 deletions(-) create mode 100644 sdk/account-info/Cargo.toml rename sdk/{program => account-info}/src/debug_account_data.rs (100%) rename sdk/{program/src/account_info.rs => account-info/src/lib.rs} (94%) diff --git a/Cargo.lock b/Cargo.lock index d88d25b668dad6..c0c981aed1f905 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5634,6 +5634,17 @@ dependencies = [ "zstd", ] +[[package]] +name = "solana-account-info" +version = "2.1.0" +dependencies = [ + "bincode", + "serde", + "solana-program-error", + "solana-program-memory", + "solana-pubkey", +] + [[package]] name = "solana-accounts-bench" version = "2.1.0" @@ -7211,6 +7222,7 @@ dependencies = [ "serial_test", "sha2 0.10.8", "sha3", + "solana-account-info", "solana-atomic-u64", "solana-clock", "solana-decode-error", diff --git a/Cargo.toml b/Cargo.toml index bcad7d79f20d56..52dae95fe98cd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -101,6 +101,7 @@ members = [ "runtime-transaction", "sanitize", "sdk", + "sdk/account-info", "sdk/atomic-u64", "sdk/cargo-build-sbf", "sdk/cargo-test-sbf", @@ -359,6 +360,7 @@ smpl_jwt = "0.7.1" socket2 = "0.5.7" soketto = "0.7" solana-account-decoder = { path = "account-decoder", version = "=2.1.0" } +solana-account-info = { path = "sdk/account-info", version = "=2.1.0" } solana-accounts-db = { path = "accounts-db", version = "=2.1.0" } solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=2.1.0" } solana-atomic-u64 = { path = "sdk/atomic-u64", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 5c20c5febd509c..8e468912203892 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4704,6 +4704,17 @@ dependencies = [ "zstd", ] +[[package]] +name = "solana-account-info" +version = "2.1.0" +dependencies = [ + "bincode", + "serde", + "solana-program-error", + "solana-program-memory", + "solana-pubkey", +] + [[package]] name = "solana-accounts-db" version = "2.1.0" @@ -5625,6 +5636,7 @@ dependencies = [ "serde_derive", "sha2 0.10.8", "sha3", + "solana-account-info", "solana-atomic-u64", "solana-clock", "solana-decode-error", diff --git a/sdk/account-info/Cargo.toml b/sdk/account-info/Cargo.toml new file mode 100644 index 00000000000000..55dc3d8a30ef44 --- /dev/null +++ b/sdk/account-info/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "solana-account-info" +description = "Solana AccountInfo and related definitions." +documentation = "https://docs.rs/solana-account-info" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bincode = { workspace = true, optional = true } +serde = { workspace = true, optional = true } +solana-program-error = { workspace = true } +solana-program-memory = { workspace = true } +solana-pubkey = { workspace = true, default-features = false } + +[features] +bincode = ["dep:bincode", "dep:serde"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/src/debug_account_data.rs b/sdk/account-info/src/debug_account_data.rs similarity index 100% rename from sdk/program/src/debug_account_data.rs rename to sdk/account-info/src/debug_account_data.rs diff --git a/sdk/program/src/account_info.rs b/sdk/account-info/src/lib.rs similarity index 94% rename from sdk/program/src/account_info.rs rename to sdk/account-info/src/lib.rs index fb7614903673b8..aed2b36669ad86 100644 --- a/sdk/program/src/account_info.rs +++ b/sdk/account-info/src/lib.rs @@ -1,12 +1,9 @@ //! Account information. use { - crate::{ - debug_account_data::*, entrypoint::MAX_PERMITTED_DATA_INCREASE, - program_error::ProgramError, pubkey::Pubkey, - }, - solana_clock::Epoch, + solana_program_error::ProgramError, solana_program_memory::sol_memset, + solana_pubkey::Pubkey, std::{ cell::{Ref, RefCell, RefMut}, fmt, @@ -14,6 +11,10 @@ use { slice::from_raw_parts_mut, }, }; +pub mod debug_account_data; + +/// Maximum number of bytes a program may add to an account during a single realloc +pub const MAX_PERMITTED_DATA_INCREASE: usize = 1_024 * 10; /// Account information #[derive(Clone)] @@ -28,7 +29,7 @@ pub struct AccountInfo<'a> { /// Program that owns this account pub owner: &'a Pubkey, /// The epoch at which this account will next owe rent - pub rent_epoch: Epoch, + pub rent_epoch: u64, /// Was the transaction signed by this account's public key? pub is_signer: bool, /// Is the account writable? @@ -49,7 +50,7 @@ impl<'a> fmt::Debug for AccountInfo<'a> { .field("rent_epoch", &self.rent_epoch) .field("lamports", &self.lamports()) .field("data.len", &self.data_len()); - debug_account_data(&self.data.borrow(), &mut f); + debug_account_data::debug_account_data(&self.data.borrow(), &mut f); f.finish_non_exhaustive() } @@ -203,7 +204,7 @@ impl<'a> AccountInfo<'a> { data: &'a mut [u8], owner: &'a Pubkey, executable: bool, - rent_epoch: Epoch, + rent_epoch: u64, ) -> Self { Self { key, @@ -217,10 +218,12 @@ impl<'a> AccountInfo<'a> { } } + #[cfg(feature = "bincode")] pub fn deserialize_data(&self) -> Result { bincode::deserialize(&self.data.borrow()) } + #[cfg(feature = "bincode")] pub fn serialize_data(&self, state: &T) -> Result<(), bincode::Error> { if bincode::serialized_size(state)? > self.data_len() as u64 { return Err(Box::new(bincode::ErrorKind::SizeLimit)); @@ -242,7 +245,7 @@ impl<'a, T: IntoAccountInfo<'a>> From for AccountInfo<'a> { /// Provides information required to construct an `AccountInfo`, used in /// conversion implementations. pub trait Account { - fn get(&mut self) -> (&mut u64, &mut [u8], &Pubkey, bool, Epoch); + fn get(&mut self) -> (&mut u64, &mut [u8], &Pubkey, bool, u64); } /// Convert (&'a Pubkey, &'a mut T) where T: Account into an `AccountInfo` @@ -293,12 +296,10 @@ impl<'a, T: Account> IntoAccountInfo<'a> for &'a mut (Pubkey, T) { /// # Examples /// /// ``` -/// use solana_program::{ -/// account_info::{AccountInfo, next_account_info}, -/// entrypoint::ProgramResult, -/// pubkey::Pubkey, -/// }; -/// # use solana_program::program_error::ProgramError; +/// use solana_program_error::ProgramResult; +/// use solana_account_info::{AccountInfo, next_account_info}; +/// use solana_pubkey::Pubkey; +/// # use solana_program_error::ProgramError; /// /// pub fn process_instruction( /// program_id: &Pubkey, @@ -344,12 +345,10 @@ pub fn next_account_info<'a, 'b, I: Iterator>>( /// # Examples /// /// ``` -/// use solana_program::{ -/// account_info::{AccountInfo, next_account_info, next_account_infos}, -/// entrypoint::ProgramResult, -/// pubkey::Pubkey, -/// }; -/// # use solana_program::program_error::ProgramError; +/// use solana_program_error::ProgramResult; +/// use solana_account_info::{AccountInfo, next_account_info, next_account_infos}; +/// use solana_pubkey::Pubkey; +/// # use solana_program_error::ProgramError; /// /// pub fn process_instruction( /// program_id: &Pubkey, @@ -398,7 +397,10 @@ impl<'a> AsRef> for AccountInfo<'a> { #[cfg(test)] mod tests { - use super::*; + use { + super::*, + crate::debug_account_data::{Hex, MAX_DEBUG_ACCOUNT_DATA}, + }; #[test] fn test_next_account_infos() { diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 345ad1348c491b..6b0214ce1edcb3 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -31,6 +31,7 @@ serde_bytes = { workspace = true } serde_derive = { workspace = true } sha2 = { workspace = true } sha3 = { workspace = true } +solana-account-info = { workspace = true, features = ["bincode"] } solana-atomic-u64 = { workspace = true } solana-clock = { workspace = true, features = ["serde"] } solana-decode-error = { workspace = true } diff --git a/sdk/program/src/entrypoint.rs b/sdk/program/src/entrypoint.rs index c0353a8dc94b05..f360d5ef2b2ece 100644 --- a/sdk/program/src/entrypoint.rs +++ b/sdk/program/src/entrypoint.rs @@ -5,7 +5,6 @@ //! [`bpf_loader`]: crate::bpf_loader extern crate alloc; -pub use solana_program_error::ProgramResult; use { crate::{account_info::AccountInfo, pubkey::Pubkey}, alloc::vec::Vec, @@ -18,6 +17,7 @@ use { slice::{from_raw_parts, from_raw_parts_mut}, }, }; +pub use {solana_account_info::MAX_PERMITTED_DATA_INCREASE, solana_program_error::ProgramResult}; /// User implemented function to process an instruction /// @@ -310,9 +310,6 @@ unsafe impl std::alloc::GlobalAlloc for BumpAllocator { } } -/// Maximum number of bytes a program may add to an account during a single realloc -pub const MAX_PERMITTED_DATA_INCREASE: usize = 1_024 * 10; - /// `assert_eq(std::mem::align_of::(), 8)` is true for BPF but not for some host machines pub const BPF_ALIGN_OF_U128: usize = 8; diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index f57b034f2ee208..d5da36d7c6dcdf 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -469,7 +469,6 @@ // Allows macro expansion of `use ::solana_program::*` to work within this crate extern crate self as solana_program; -pub mod account_info; pub mod address_lookup_table; pub mod big_mod_exp; pub mod blake3; @@ -483,7 +482,6 @@ pub mod bpf_loader; pub mod bpf_loader_deprecated; pub mod bpf_loader_upgradeable; pub mod compute_units; -pub mod debug_account_data; pub mod ed25519_program; pub mod entrypoint; pub mod entrypoint_deprecated; @@ -540,8 +538,10 @@ pub use solana_short_vec as short_vec; #[cfg(target_arch = "wasm32")] pub use wasm_bindgen::prelude::wasm_bindgen; pub use { - solana_clock as clock, solana_msg::msg, solana_program_option as program_option, - solana_pubkey as pubkey, + solana_account_info::{self as account_info, debug_account_data}, + solana_clock as clock, + solana_msg::msg, + solana_program_option as program_option, solana_pubkey as pubkey, }; /// The [config native program][np]. From ba03bd1f8f989923555e8ca045b05bb0a6479d5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 09:46:10 +0000 Subject: [PATCH 387/529] build(deps): bump libc from 0.2.158 to 0.2.159 (#2977) * build(deps): bump libc from 0.2.158 to 0.2.159 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.158 to 0.2.159. - [Release notes](https://github.com/rust-lang/libc/releases) - [Changelog](https://github.com/rust-lang/libc/blob/0.2.159/CHANGELOG.md) - [Commits](https://github.com/rust-lang/libc/compare/0.2.158...0.2.159) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c0c981aed1f905..d4e7c68957a087 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3314,9 +3314,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" diff --git a/Cargo.toml b/Cargo.toml index 52dae95fe98cd9..15c81aa44f86b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -288,7 +288,7 @@ jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" lazy-lru = "0.1.3" lazy_static = "1.5.0" -libc = "0.2.158" +libc = "0.2.159" libloading = "0.7.4" libsecp256k1 = { version = "0.6.0", default-features = false, features = [ "std", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 8e468912203892..b26fb56d7aa91a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2636,9 +2636,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" From 56672cd01f76eb39d5766566ed103d3c20864cf2 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 25 Sep 2024 06:30:40 -0500 Subject: [PATCH 388/529] generic get nonce and ix signers (#2826) --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 1 + runtime/Cargo.toml | 1 + runtime/src/bank/check_transactions.rs | 12 +- runtime/src/lib.rs | 1 + runtime/src/nonce_extraction.rs | 345 +++++++++++++++++++++++++ sdk/program/src/program_utils.rs | 8 +- 7 files changed, 362 insertions(+), 7 deletions(-) create mode 100644 runtime/src/nonce_extraction.rs diff --git a/Cargo.lock b/Cargo.lock index d4e7c68957a087..018e12ddf437f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7680,6 +7680,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-perf", + "solana-program", "solana-program-runtime", "solana-rayon-threadlimit", "solana-runtime", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b26fb56d7aa91a..f9296e57150435 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6012,6 +6012,7 @@ dependencies = [ "solana-measure", "solana-metrics", "solana-perf", + "solana-program", "solana-program-runtime", "solana-rayon-threadlimit", "solana-runtime-transaction", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 583a79d0359081..849d583b01aa3c 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -67,6 +67,7 @@ solana-loader-v4-program = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-perf = { workspace = true } +solana-program = { workspace = true } solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-runtime-transaction = { workspace = true } diff --git a/runtime/src/bank/check_transactions.rs b/runtime/src/bank/check_transactions.rs index 0f0d70f15b07ab..7164d76fcb7440 100644 --- a/runtime/src/bank/check_transactions.rs +++ b/runtime/src/bank/check_transactions.rs @@ -1,5 +1,6 @@ use { super::{Bank, BankStatusCache}, + crate::nonce_extraction::{get_durable_nonce, get_ix_signers}, solana_accounts_db::blockhash_queue::BlockhashQueue, solana_perf::perf_libs, solana_sdk::{ @@ -9,7 +10,6 @@ use { MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU, }, - message::SanitizedMessage, nonce::{ state::{ Data as NonceData, DurableNonce, State as NonceState, Versions as NonceVersions, @@ -25,6 +25,7 @@ use { nonce_info::NonceInfo, transaction_error_metrics::TransactionErrorMetrics, }, + solana_svm_transaction::svm_message::SVMMessage, }; impl Bank { @@ -135,7 +136,7 @@ impl Bank { pub(super) fn check_load_and_advance_message_nonce_account( &self, - message: &SanitizedMessage, + message: &impl SVMMessage, next_durable_nonce: &DurableNonce, next_lamports_per_signature: u64, ) -> Option<(NonceInfo, u64)> { @@ -165,15 +166,14 @@ impl Bank { pub(super) fn load_message_nonce_account( &self, - message: &SanitizedMessage, + message: &impl SVMMessage, ) -> Option<(Pubkey, AccountSharedData, NonceData)> { - let nonce_address = message.get_durable_nonce()?; + let nonce_address = get_durable_nonce(message)?; let nonce_account = self.get_account_with_fixed_root(nonce_address)?; let nonce_data = nonce_account::verify_nonce_account(&nonce_account, message.recent_blockhash())?; - let nonce_is_authorized = message - .get_ix_signers(NONCED_TX_MARKER_IX_INDEX as usize) + let nonce_is_authorized = get_ix_signers(message, NONCED_TX_MARKER_IX_INDEX as usize) .any(|signer| signer == &nonce_data.authority); if !nonce_is_authorized { return None; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index d4c78936f1b517..bd11e97668eec0 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -16,6 +16,7 @@ pub mod genesis_utils; pub mod installed_scheduler_pool; pub mod loader_utils; pub mod non_circulating_supply; +mod nonce_extraction; pub mod prioritization_fee; pub mod prioritization_fee_cache; pub mod rent_collector; diff --git a/runtime/src/nonce_extraction.rs b/runtime/src/nonce_extraction.rs new file mode 100644 index 00000000000000..ec3587c91501f0 --- /dev/null +++ b/runtime/src/nonce_extraction.rs @@ -0,0 +1,345 @@ +//! Functionality derived from the `SVMMessage` base functions. +//! + +use { + solana_program::program_utils::limited_deserialize, + solana_sdk::{ + nonce::NONCED_TX_MARKER_IX_INDEX, pubkey::Pubkey, system_instruction::SystemInstruction, + system_program, + }, + solana_svm_transaction::svm_message::SVMMessage, +}; + +/// If the message uses a durable nonce, return the pubkey of the nonce account +pub fn get_durable_nonce(message: &impl SVMMessage) -> Option<&Pubkey> { + let account_keys = message.account_keys(); + message + .instructions_iter() + .nth(usize::from(NONCED_TX_MARKER_IX_INDEX)) + .filter( + |ix| match account_keys.get(usize::from(ix.program_id_index)) { + Some(program_id) => system_program::check_id(program_id), + _ => false, + }, + ) + .filter(|ix| { + matches!( + limited_deserialize(ix.data, 4 /* serialized size of AdvanceNonceAccount */), + Ok(SystemInstruction::AdvanceNonceAccount) + ) + }) + .and_then(|ix| { + ix.accounts.first().and_then(|idx| { + let index = usize::from(*idx); + if !message.is_writable(index) { + None + } else { + account_keys.get(index) + } + }) + }) +} + +/// For the instruction at `index`, return an iterator over input accounts +/// that are signers. +pub fn get_ix_signers(message: &impl SVMMessage, index: usize) -> impl Iterator { + message + .instructions_iter() + .nth(index) + .into_iter() + .flat_map(|ix| { + ix.accounts + .iter() + .copied() + .map(usize::from) + .filter(|index| message.is_signer(*index)) + .filter_map(|signer_index| message.account_keys().get(signer_index)) + }) +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{ + hash::Hash, + instruction::CompiledInstruction, + message::{ + legacy, + v0::{self, LoadedAddresses, MessageAddressTableLookup}, + MessageHeader, SanitizedMessage, SanitizedVersionedMessage, SimpleAddressLoader, + VersionedMessage, + }, + }, + std::collections::HashSet, + }; + + #[test] + fn test_get_durable_nonce() { + fn create_message_for_test( + num_signers: u8, + num_writable: u8, + account_keys: Vec, + instructions: Vec, + loaded_addresses: Option, + ) -> SanitizedMessage { + let header = MessageHeader { + num_required_signatures: num_signers, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: u8::try_from(account_keys.len()).unwrap() + - num_writable, + }; + let (versioned_message, loader) = match loaded_addresses { + None => ( + VersionedMessage::Legacy(legacy::Message { + header, + account_keys, + recent_blockhash: Hash::default(), + instructions, + }), + SimpleAddressLoader::Disabled, + ), + Some(loaded_addresses) => ( + VersionedMessage::V0(v0::Message { + header, + account_keys, + recent_blockhash: Hash::default(), + instructions, + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: (0..loaded_addresses.writable.len()) + .map(|x| x as u8) + .collect(), + readonly_indexes: (0..loaded_addresses.readonly.len()) + .map(|x| (loaded_addresses.writable.len() + x) as u8) + .collect(), + }], + }), + SimpleAddressLoader::Enabled(loaded_addresses), + ), + }; + SanitizedMessage::try_new( + SanitizedVersionedMessage::try_new(versioned_message).unwrap(), + loader, + &HashSet::new(), + ) + .unwrap() + } + + // No instructions - no nonce + { + let message = create_message_for_test(1, 1, vec![Pubkey::new_unique()], vec![], None); + assert!(message.get_durable_nonce().is_none()); + assert!(get_durable_nonce(&message).is_none()); + } + + // system program id instruction - invalid + { + let message = create_message_for_test( + 1, + 1, + vec![Pubkey::new_unique(), system_program::id()], + vec![CompiledInstruction::new_from_raw_parts(1, vec![], vec![])], + None, + ); + assert!(message.get_durable_nonce().is_none()); + assert!(get_durable_nonce(&message).is_none()); + } + + // system program id instruction - not nonce + { + let message = create_message_for_test( + 1, + 1, + vec![Pubkey::new_unique(), system_program::id()], + vec![CompiledInstruction::new( + 1, + &SystemInstruction::Transfer { lamports: 1 }, + vec![0, 0], + )], + None, + ); + assert!(message.get_durable_nonce().is_none()); + assert!(get_durable_nonce(&message).is_none()); + } + + // system program id - nonce instruction (no accounts) + { + let message = create_message_for_test( + 1, + 1, + vec![Pubkey::new_unique(), system_program::id()], + vec![CompiledInstruction::new( + 1, + &SystemInstruction::AdvanceNonceAccount, + vec![], + )], + None, + ); + assert!(message.get_durable_nonce().is_none()); + assert!(get_durable_nonce(&message).is_none()); + } + + // system program id - nonce instruction (non-fee-payer, non-writable) + { + let payer = Pubkey::new_unique(); + let nonce = Pubkey::new_unique(); + let message = create_message_for_test( + 1, + 1, + vec![payer, nonce, system_program::id()], + vec![CompiledInstruction::new( + 1, + &SystemInstruction::AdvanceNonceAccount, + vec![1], + )], + None, + ); + assert!(message.get_durable_nonce().is_none()); + assert!(get_durable_nonce(&message).is_none()); + } + + // system program id - nonce instruction fee-payer + { + let payer_nonce = Pubkey::new_unique(); + let message = create_message_for_test( + 1, + 1, + vec![payer_nonce, system_program::id()], + vec![CompiledInstruction::new( + 1, + &SystemInstruction::AdvanceNonceAccount, + vec![0], + )], + None, + ); + assert_eq!(message.get_durable_nonce(), Some(&payer_nonce)); + assert_eq!(get_durable_nonce(&message), Some(&payer_nonce)); + } + + // system program id - nonce instruction w/ trailing bytes fee-payer + { + let payer_nonce = Pubkey::new_unique(); + let mut instruction_bytes = + bincode::serialize(&SystemInstruction::AdvanceNonceAccount).unwrap(); + instruction_bytes.push(0); // add a trailing byte + let message = create_message_for_test( + 1, + 1, + vec![payer_nonce, system_program::id()], + vec![CompiledInstruction::new_from_raw_parts( + 1, + instruction_bytes, + vec![0], + )], + None, + ); + assert_eq!(message.get_durable_nonce(), Some(&payer_nonce)); + assert_eq!(get_durable_nonce(&message), Some(&payer_nonce)); + } + + // system program id - nonce instruction (non-fee-payer) + { + let payer = Pubkey::new_unique(); + let nonce = Pubkey::new_unique(); + let message = create_message_for_test( + 1, + 2, + vec![payer, nonce, system_program::id()], + vec![CompiledInstruction::new( + 2, + &SystemInstruction::AdvanceNonceAccount, + vec![1], + )], + None, + ); + assert_eq!(message.get_durable_nonce(), Some(&nonce)); + assert_eq!(get_durable_nonce(&message), Some(&nonce)); + } + + // system program id - nonce instruction (non-fee-payer, multiple accounts) + { + let payer = Pubkey::new_unique(); + let other = Pubkey::new_unique(); + let nonce = Pubkey::new_unique(); + let message = create_message_for_test( + 1, + 3, + vec![payer, other, nonce, system_program::id()], + vec![CompiledInstruction::new( + 3, + &SystemInstruction::AdvanceNonceAccount, + vec![2, 1, 0], + )], + None, + ); + assert_eq!(message.get_durable_nonce(), Some(&nonce)); + assert_eq!(get_durable_nonce(&message), Some(&nonce)); + } + + // system program id - nonce instruction (non-fee-payer, loaded account) + { + let payer = Pubkey::new_unique(); + let nonce = Pubkey::new_unique(); + let message = create_message_for_test( + 1, + 1, + vec![payer, system_program::id()], + vec![CompiledInstruction::new( + 1, + &SystemInstruction::AdvanceNonceAccount, + vec![2, 0, 1], + )], + Some(LoadedAddresses { + writable: vec![nonce], + readonly: vec![], + }), + ); + assert_eq!(message.get_durable_nonce(), Some(&nonce)); + assert_eq!(get_durable_nonce(&message), Some(&nonce)); + } + } + + #[test] + fn test_get_ix_signers() { + let signer0 = Pubkey::new_unique(); + let signer1 = Pubkey::new_unique(); + let non_signer = Pubkey::new_unique(); + let loader_key = Pubkey::new_unique(); + let instructions = vec![ + CompiledInstruction::new(3, &(), vec![2, 0]), + CompiledInstruction::new(3, &(), vec![0, 1]), + CompiledInstruction::new(3, &(), vec![0, 0]), + ]; + + let message = SanitizedMessage::try_from_legacy_message( + legacy::Message::new_with_compiled_instructions( + 2, + 1, + 2, + vec![signer0, signer1, non_signer, loader_key], + Hash::default(), + instructions, + ), + &HashSet::default(), + ) + .unwrap(); + + assert_eq!( + get_ix_signers(&message, 0).collect::>(), + HashSet::from_iter([&signer0]) + ); + assert_eq!( + get_ix_signers(&message, 1).collect::>(), + HashSet::from_iter([&signer0, &signer1]) + ); + assert_eq!( + get_ix_signers(&message, 2).collect::>(), + HashSet::from_iter([&signer0]) + ); + assert_eq!( + get_ix_signers(&message, 3).collect::>(), + HashSet::default() + ); + } +} diff --git a/sdk/program/src/program_utils.rs b/sdk/program/src/program_utils.rs index 5308a340adbac4..f624a0f69b13fd 100644 --- a/sdk/program/src/program_utils.rs +++ b/sdk/program/src/program_utils.rs @@ -25,7 +25,7 @@ pub mod tests { #[test] fn test_limited_deserialize_advance_nonce_account() { let item = SystemInstruction::AdvanceNonceAccount; - let serialized = bincode::serialize(&item).unwrap(); + let mut serialized = bincode::serialize(&item).unwrap(); assert_eq!( serialized.len(), @@ -38,5 +38,11 @@ pub mod tests { Ok(&item) ); assert!(limited_deserialize::(&serialized, 3).is_err()); + + serialized.push(0); + assert_eq!( + limited_deserialize::(&serialized, 4).as_ref(), + Ok(&item) + ); } } From c2964109643362b51372004c84c97064e96d7524 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Wed, 25 Sep 2024 22:23:11 +0900 Subject: [PATCH 389/529] [accounts-db] remove unnecessary ed25519-dalek dependency (#2976) --- Cargo.lock | 1 - accounts-db/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 018e12ddf437f0..6cba387a9e551d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5706,7 +5706,6 @@ dependencies = [ "criterion", "crossbeam-channel", "dashmap", - "ed25519-dalek", "index_list", "indexmap 2.5.0", "itertools 0.12.1", diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 4d84e51daa0fbb..82a983ede37a20 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -65,7 +65,6 @@ name = "solana_accounts_db" [dev-dependencies] assert_matches = { workspace = true } criterion = { workspace = true } -ed25519-dalek = { workspace = true } libsecp256k1 = { workspace = true } memoffset = { workspace = true } rand_chacha = { workspace = true } From d22e8e36d84ec4fad88b97cb633b704d1a90e197 Mon Sep 17 00:00:00 2001 From: Brooks Date: Wed, 25 Sep 2024 15:04:31 -0400 Subject: [PATCH 390/529] Upgrades serde_with to 3.9.0 (#2984) --- Cargo.lock | 9 +++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 9 +++++---- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6cba387a9e551d..74ebc9d81659e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5305,19 +5305,20 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.3" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" dependencies = [ "serde", + "serde_derive", "serde_with_macros", ] [[package]] name = "serde_with_macros" -version = "2.3.3" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" +checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ "darling", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 15c81aa44f86b7..e4c8041093c5cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -347,7 +347,7 @@ serde = "1.0.210" # must match the serde_derive version, see https://github.com/ serde_bytes = "0.11.15" serde_derive = "1.0.210" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.128" -serde_with = { version = "2.3.3", default-features = false } +serde_with = { version = "3.9.0", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" sha2 = "0.10.8" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index f9296e57150435..29c585ff6ac4ec 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4456,19 +4456,20 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.3" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" dependencies = [ "serde", + "serde_derive", "serde_with_macros", ] [[package]] name = "serde_with_macros" -version = "2.3.3" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" +checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ "darling", "proc-macro2", From 105c365cd6fdffaa90075d3af1887ab393e933d1 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 25 Sep 2024 16:11:43 -0500 Subject: [PATCH 391/529] runtime-transaction: get_signature_details (#2847) --- Cargo.lock | 1 + programs/sbf/Cargo.lock | 1 + runtime-transaction/Cargo.toml | 5 + .../benches/get_signature_details.rs | 132 +++++++++++++ runtime-transaction/src/lib.rs | 1 + runtime-transaction/src/signature_details.rs | 173 ++++++++++++++++++ sdk/program/src/message/sanitized.rs | 12 ++ 7 files changed, 325 insertions(+) create mode 100644 runtime-transaction/benches/get_signature_details.rs create mode 100644 runtime-transaction/src/signature_details.rs diff --git a/Cargo.lock b/Cargo.lock index 74ebc9d81659e3..7e3f2f0bba6e54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7723,6 +7723,7 @@ dependencies = [ "solana-builtins-default-costs", "solana-compute-budget", "solana-program", + "solana-pubkey", "solana-sdk", "solana-svm-transaction", "thiserror", diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 29c585ff6ac4ec..5b307630646440 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6050,6 +6050,7 @@ dependencies = [ "log", "solana-builtins-default-costs", "solana-compute-budget", + "solana-pubkey", "solana-sdk", "solana-svm-transaction", "thiserror", diff --git a/runtime-transaction/Cargo.toml b/runtime-transaction/Cargo.toml index 772a9e28af93df..9ccb325593da89 100644 --- a/runtime-transaction/Cargo.toml +++ b/runtime-transaction/Cargo.toml @@ -14,6 +14,7 @@ agave-transaction-view = { workspace = true } log = { workspace = true } solana-builtins-default-costs = { workspace = true } solana-compute-budget = { workspace = true } +solana-pubkey = { workspace = true } solana-sdk = { workspace = true } solana-svm-transaction = { workspace = true } thiserror = { workspace = true } @@ -35,5 +36,9 @@ targets = ["x86_64-unknown-linux-gnu"] name = "process_compute_budget_instructions" harness = false +[[bench]] +name = "get_signature_details" +harness = false + [lints] workspace = true diff --git a/runtime-transaction/benches/get_signature_details.rs b/runtime-transaction/benches/get_signature_details.rs new file mode 100644 index 00000000000000..3a576574909a08 --- /dev/null +++ b/runtime-transaction/benches/get_signature_details.rs @@ -0,0 +1,132 @@ +use { + criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}, + solana_runtime_transaction::signature_details::get_precompile_signature_details, + solana_sdk::{instruction::CompiledInstruction, pubkey::Pubkey}, + solana_svm_transaction::instruction::SVMInstruction, +}; + +fn bench_get_signature_details_empty(c: &mut Criterion) { + let instructions = std::iter::empty(); + + c.benchmark_group("bench_get_signature_details_empty") + .throughput(Throughput::Elements(1)) + .bench_function("0 instructions", |bencher| { + bencher.iter(|| { + let instructions = black_box(instructions.clone()); + let _ = get_precompile_signature_details(instructions); + }); + }); +} + +fn bench_get_signature_details_no_sigs_unique(c: &mut Criterion) { + let program_ids = vec![Pubkey::new_unique(); 32]; + for num_instructions in [4, 32] { + let instructions = (0..num_instructions) + .map(|i| { + let program_id = &program_ids[i]; + ( + program_id, + CompiledInstruction { + program_id_index: i as u8, + accounts: vec![], + data: vec![], + }, + ) + }) + .collect::>(); + + c.benchmark_group("bench_get_signature_details_no_sigs_unique") + .throughput(Throughput::Elements(1)) + .bench_function(format!("{num_instructions} instructions"), |bencher| { + bencher.iter(|| { + let instructions = + black_box(instructions.iter().map(|(program_id, instruction)| { + (*program_id, SVMInstruction::from(instruction)) + })); + let _ = get_precompile_signature_details(instructions); + }); + }); + } +} + +fn bench_get_signature_details_packed_sigs(c: &mut Criterion) { + let program_ids = [ + solana_sdk::secp256k1_program::id(), + solana_sdk::ed25519_program::id(), + ]; + for num_instructions in [4, 64] { + let instructions = (0..num_instructions) + .map(|i| { + let index = i % 2; + let program_id = &program_ids[index]; + ( + program_id, + CompiledInstruction { + program_id_index: index as u8, + accounts: vec![], + data: vec![4], // some dummy number of signatures + }, + ) + }) + .collect::>(); + + c.benchmark_group("bench_get_signature_details_packed_sigs") + .throughput(Throughput::Elements(1)) + .bench_function(format!("{num_instructions} instructions"), |bencher| { + bencher.iter(|| { + let instructions = + black_box(instructions.iter().map(|(program_id, instruction)| { + (*program_id, SVMInstruction::from(instruction)) + })); + let _ = get_precompile_signature_details(instructions); + }); + }); + } +} + +fn bench_get_signature_details_mixed_sigs(c: &mut Criterion) { + let program_ids = [ + solana_sdk::secp256k1_program::id(), + solana_sdk::ed25519_program::id(), + ] + .into_iter() + .chain((0..6).map(|_| Pubkey::new_unique())) + .collect::>(); + for num_instructions in [4, 64] { + let instructions = (0..num_instructions) + .map(|i| { + let index = i % 8; + let program_id = &program_ids[index]; + ( + program_id, + CompiledInstruction { + program_id_index: index as u8, + accounts: vec![], + data: vec![4], // some dummy number of signatures + }, + ) + }) + .collect::>(); + + c.benchmark_group("bench_get_signature_details_mixed_sigs") + .throughput(Throughput::Elements(1)) + .bench_function(format!("{num_instructions} instructions"), |bencher| { + bencher.iter(|| { + let instructions = + black_box(instructions.iter().map(|(program_id, instruction)| { + (*program_id, SVMInstruction::from(instruction)) + })); + let _ = get_precompile_signature_details(instructions); + }); + }); + } +} + +criterion_group!( + benches, + bench_get_signature_details_empty, + bench_get_signature_details_no_sigs_unique, + bench_get_signature_details_packed_sigs, + bench_get_signature_details_mixed_sigs +); +criterion_main!(benches); diff --git a/runtime-transaction/src/lib.rs b/runtime-transaction/src/lib.rs index 9b79a97d40e874..40c31d4b4d653a 100644 --- a/runtime-transaction/src/lib.rs +++ b/runtime-transaction/src/lib.rs @@ -5,4 +5,5 @@ mod compute_budget_instruction_details; mod compute_budget_program_id_filter; pub mod instructions_processor; pub mod runtime_transaction; +pub mod signature_details; pub mod transaction_meta; diff --git a/runtime-transaction/src/signature_details.rs b/runtime-transaction/src/signature_details.rs new file mode 100644 index 00000000000000..8972873116a236 --- /dev/null +++ b/runtime-transaction/src/signature_details.rs @@ -0,0 +1,173 @@ +// static account keys has max +use { + agave_transaction_view::static_account_keys_frame::MAX_STATIC_ACCOUNTS_PER_PACKET as FILTER_SIZE, + solana_pubkey::Pubkey, solana_svm_transaction::instruction::SVMInstruction, +}; + +pub struct PrecompileSignatureDetails { + pub num_secp256k1_instruction_signatures: u64, + pub num_ed25519_instruction_signatures: u64, +} + +/// Get transaction signature details. +pub fn get_precompile_signature_details<'a>( + instructions: impl Iterator)>, +) -> PrecompileSignatureDetails { + let mut filter = SignatureDetailsFilter::new(); + + // Wrapping arithmetic is safe below because the maximum number of signatures + // per instruction is 255, and the maximum number of instructions per transaction + // is low enough that the sum of all signatures will not overflow a u64. + let mut num_secp256k1_instruction_signatures: u64 = 0; + let mut num_ed25519_instruction_signatures: u64 = 0; + for (program_id, instruction) in instructions { + let program_id_index = instruction.program_id_index; + match filter.is_signature(program_id_index, program_id) { + ProgramIdStatus::NotSignature => {} + ProgramIdStatus::Secp256k1 => { + num_secp256k1_instruction_signatures = num_secp256k1_instruction_signatures + .wrapping_add(get_num_signatures_in_instruction(&instruction)); + } + ProgramIdStatus::Ed25519 => { + num_ed25519_instruction_signatures = num_ed25519_instruction_signatures + .wrapping_add(get_num_signatures_in_instruction(&instruction)); + } + } + } + + PrecompileSignatureDetails { + num_secp256k1_instruction_signatures, + num_ed25519_instruction_signatures, + } +} + +#[inline] +fn get_num_signatures_in_instruction(instruction: &SVMInstruction) -> u64 { + u64::from(instruction.data.first().copied().unwrap_or(0)) +} + +#[derive(Copy, Clone)] +enum ProgramIdStatus { + NotSignature, + Secp256k1, + Ed25519, +} + +struct SignatureDetailsFilter { + // array of slots for all possible static and sanitized program_id_index, + // each slot indicates if a program_id_index has not been checked, or is + // already checked with result that can be reused. + flags: [Option; FILTER_SIZE as usize], +} + +impl SignatureDetailsFilter { + #[inline] + fn new() -> Self { + Self { + flags: [None; FILTER_SIZE as usize], + } + } + + #[inline] + fn is_signature(&mut self, index: u8, program_id: &Pubkey) -> ProgramIdStatus { + let flag = &mut self.flags[usize::from(index)]; + match flag { + Some(status) => *status, + None => { + *flag = Some(Self::check_program_id(program_id)); + *flag.as_ref().unwrap() + } + } + } + + #[inline] + fn check_program_id(program_id: &Pubkey) -> ProgramIdStatus { + if program_id == &solana_sdk::secp256k1_program::ID { + ProgramIdStatus::Secp256k1 + } else if program_id == &solana_sdk::ed25519_program::ID { + ProgramIdStatus::Ed25519 + } else { + ProgramIdStatus::NotSignature + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // simple convenience function so avoid having inconsistent program_id and program_id_index + fn make_instruction<'a>( + program_ids: &'a [Pubkey], + program_id_index: u8, + data: &'a [u8], + ) -> (&'a Pubkey, SVMInstruction<'a>) { + ( + &program_ids[program_id_index as usize], + SVMInstruction { + program_id_index, + accounts: &[], + data, + }, + ) + } + + #[test] + fn test_get_signature_details_no_instructions() { + let instructions = std::iter::empty(); + let signature_details = get_precompile_signature_details(instructions); + + assert_eq!(signature_details.num_secp256k1_instruction_signatures, 0); + assert_eq!(signature_details.num_ed25519_instruction_signatures, 0); + } + + #[test] + fn test_get_signature_details_no_sigs_unique() { + let program_ids = [Pubkey::new_unique(), Pubkey::new_unique()]; + let instructions = [ + make_instruction(&program_ids, 0, &[]), + make_instruction(&program_ids, 1, &[]), + ]; + + let signature_details = get_precompile_signature_details(instructions.into_iter()); + assert_eq!(signature_details.num_secp256k1_instruction_signatures, 0); + assert_eq!(signature_details.num_ed25519_instruction_signatures, 0); + } + + #[test] + fn test_get_signature_details_signatures_mixed() { + let program_ids = [ + Pubkey::new_unique(), + solana_sdk::secp256k1_program::ID, + solana_sdk::ed25519_program::ID, + ]; + let instructions = [ + make_instruction(&program_ids, 1, &[5]), + make_instruction(&program_ids, 2, &[3]), + make_instruction(&program_ids, 0, &[]), + make_instruction(&program_ids, 2, &[2]), + make_instruction(&program_ids, 1, &[1]), + make_instruction(&program_ids, 0, &[]), + ]; + + let signature_details = get_precompile_signature_details(instructions.into_iter()); + assert_eq!(signature_details.num_secp256k1_instruction_signatures, 6); + assert_eq!(signature_details.num_ed25519_instruction_signatures, 5); + } + + #[test] + fn test_get_signature_details_missing_num_signatures() { + let program_ids = [ + solana_sdk::secp256k1_program::ID, + solana_sdk::ed25519_program::ID, + ]; + let instructions = [ + make_instruction(&program_ids, 0, &[]), + make_instruction(&program_ids, 1, &[]), + ]; + + let signature_details = get_precompile_signature_details(instructions.into_iter()); + assert_eq!(signature_details.num_secp256k1_instruction_signatures, 0); + assert_eq!(signature_details.num_ed25519_instruction_signatures, 0); + } +} diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index 33b8c3fc3495d7..1f9c6a08187fcf 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -433,6 +433,18 @@ pub struct TransactionSignatureDetails { } impl TransactionSignatureDetails { + pub fn new( + num_transaction_signatures: u64, + num_secp256k1_instruction_signatures: u64, + num_ed25519_instruction_signatures: u64, + ) -> Self { + Self { + num_transaction_signatures, + num_secp256k1_instruction_signatures, + num_ed25519_instruction_signatures, + } + } + /// return total number of signature, treating pre-processor operations as signature pub(crate) fn total_signatures(&self) -> u64 { self.num_transaction_signatures From 443246dee0ec0cacea08d8bc63eed7d4e57089f7 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 26 Sep 2024 08:44:23 +0800 Subject: [PATCH 392/529] fix: set allocation size to 0 for transactions known to fail (#2966) --- cost-model/src/cost_model.rs | 223 +++++++++++++++++++++++++++++------ 1 file changed, 187 insertions(+), 36 deletions(-) diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index 4c1cc0df6edbfa..c1a36a7ac15e2b 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -21,7 +21,11 @@ use { instruction::CompiledInstruction, program_utils::limited_deserialize, pubkey::Pubkey, - system_instruction::SystemInstruction, + saturating_add_assign, + system_instruction::{ + SystemInstruction, MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION, + MAX_PERMITTED_DATA_LENGTH, + }, system_program, transaction::SanitizedTransaction, }, @@ -30,6 +34,13 @@ use { pub struct CostModel; +#[derive(Debug, PartialEq)] +enum SystemProgramAccountAllocation { + None, + Some(u64), + Failed, +} + impl CostModel { pub fn calculate_cost( transaction: &SanitizedTransaction, @@ -236,55 +247,71 @@ impl CostModel { fn calculate_account_data_size_on_deserialized_system_instruction( instruction: SystemInstruction, - ) -> u64 { + ) -> SystemProgramAccountAllocation { match instruction { - SystemInstruction::CreateAccount { - lamports: _lamports, - space, - owner: _owner, - } => space, - SystemInstruction::CreateAccountWithSeed { - base: _base, - seed: _seed, - lamports: _lamports, - space, - owner: _owner, - } => space, - SystemInstruction::Allocate { space } => space, - SystemInstruction::AllocateWithSeed { - base: _base, - seed: _seed, - space, - owner: _owner, - } => space, - _ => 0, + SystemInstruction::CreateAccount { space, .. } + | SystemInstruction::CreateAccountWithSeed { space, .. } + | SystemInstruction::Allocate { space } + | SystemInstruction::AllocateWithSeed { space, .. } => { + if space > MAX_PERMITTED_DATA_LENGTH { + SystemProgramAccountAllocation::Failed + } else { + SystemProgramAccountAllocation::Some(space) + } + } + _ => SystemProgramAccountAllocation::None, } } fn calculate_account_data_size_on_instruction( program_id: &Pubkey, instruction: &CompiledInstruction, - ) -> u64 { + ) -> SystemProgramAccountAllocation { if program_id == &system_program::id() { if let Ok(instruction) = limited_deserialize(&instruction.data) { - return Self::calculate_account_data_size_on_deserialized_system_instruction( - instruction, - ); + Self::calculate_account_data_size_on_deserialized_system_instruction(instruction) + } else { + SystemProgramAccountAllocation::Failed } + } else { + SystemProgramAccountAllocation::None } - 0 } /// eventually, potentially determine account data size of all writable accounts /// at the moment, calculate account data size of account creation fn calculate_allocated_accounts_data_size(transaction: &SanitizedTransaction) -> u64 { - transaction - .message() - .program_instructions_iter() - .map(|(program_id, instruction)| { - Self::calculate_account_data_size_on_instruction(program_id, instruction) - }) - .sum() + let mut tx_attempted_allocation_size: u64 = 0; + for (program_id, instruction) in transaction.message().program_instructions_iter() { + match Self::calculate_account_data_size_on_instruction(program_id, instruction) { + SystemProgramAccountAllocation::Failed => { + // If any system program instructions can be statically + // determined to fail, no allocations will actually be + // persisted by the transaction. So return 0 here so that no + // account allocation budget is used for this failed + // transaction. + return 0; + } + SystemProgramAccountAllocation::None => continue, + SystemProgramAccountAllocation::Some(ix_attempted_allocation_size) => { + saturating_add_assign!( + tx_attempted_allocation_size, + ix_attempted_allocation_size + ); + } + } + } + + // The runtime prevents transactions from allocating too much account + // data so clamp the attempted allocation size to the max amount. + // + // Note that if there are any custom bpf instructions in the transaction + // it's tricky to know whether a newly allocated account will be freed + // or not during an intermediate instruction in the transaction so we + // shouldn't assume that a large sum of allocations will necessarily + // lead to transaction failure. + (MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION as u64) + .min(tx_attempted_allocation_size) } } @@ -310,6 +337,130 @@ mod tests { (Keypair::new(), Hash::new_unique()) } + #[test] + fn test_calculate_allocated_accounts_data_size_no_allocation() { + let transaction = Transaction::new_unsigned(Message::new( + &[system_instruction::transfer( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 1, + )], + Some(&Pubkey::new_unique()), + )); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(transaction); + + assert_eq!( + CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + 0 + ); + } + + #[test] + fn test_calculate_allocated_accounts_data_size_multiple_allocations() { + let space1 = 100; + let space2 = 200; + let transaction = Transaction::new_unsigned(Message::new( + &[ + system_instruction::create_account( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 1, + space1, + &Pubkey::new_unique(), + ), + system_instruction::allocate(&Pubkey::new_unique(), space2), + ], + Some(&Pubkey::new_unique()), + )); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(transaction); + + assert_eq!( + CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + space1 + space2 + ); + } + + #[test] + fn test_calculate_allocated_accounts_data_size_max_limit() { + let spaces = [MAX_PERMITTED_DATA_LENGTH, MAX_PERMITTED_DATA_LENGTH, 100]; + assert!( + spaces.iter().copied().sum::() + > MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION as u64 + ); + let transaction = Transaction::new_unsigned(Message::new( + &[ + system_instruction::create_account( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 1, + spaces[0], + &Pubkey::new_unique(), + ), + system_instruction::create_account( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 1, + spaces[1], + &Pubkey::new_unique(), + ), + system_instruction::create_account( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 1, + spaces[2], + &Pubkey::new_unique(), + ), + ], + Some(&Pubkey::new_unique()), + )); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(transaction); + + assert_eq!( + CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION as u64, + ); + } + + #[test] + fn test_calculate_allocated_accounts_data_size_overflow() { + let transaction = Transaction::new_unsigned(Message::new( + &[ + system_instruction::create_account( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 1, + 100, + &Pubkey::new_unique(), + ), + system_instruction::allocate(&Pubkey::new_unique(), u64::MAX), + ], + Some(&Pubkey::new_unique()), + )); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(transaction); + + assert_eq!( + 0, // SystemProgramAccountAllocation::Failed, + CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + ); + } + + #[test] + fn test_calculate_allocated_accounts_data_size_invalid_ix() { + let transaction = Transaction::new_unsigned(Message::new( + &[ + system_instruction::allocate(&Pubkey::new_unique(), 100), + Instruction::new_with_bincode(system_program::id(), &(), vec![]), + ], + Some(&Pubkey::new_unique()), + )); + let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(transaction); + + assert_eq!( + 0, // SystemProgramAccountAllocation::Failed, + CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + ); + } + #[test] fn test_cost_model_data_len_cost() { let lamports = 0; @@ -339,14 +490,14 @@ mod tests { }, ] { assert_eq!( - space, + SystemProgramAccountAllocation::Some(space), CostModel::calculate_account_data_size_on_deserialized_system_instruction( instruction ) ); } assert_eq!( - 0, + SystemProgramAccountAllocation::None, CostModel::calculate_account_data_size_on_deserialized_system_instruction( SystemInstruction::TransferWithSeed { lamports, From 6543f82a2957fd9f6ec0f24c2c6a6531b1cd7814 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 26 Sep 2024 07:29:34 -0500 Subject: [PATCH 393/529] impl Deref for RuntimeTransaction (#2675) --- runtime-transaction/src/runtime_transaction.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index 966a24156d084f..aded0c9c819688 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -14,6 +14,7 @@ use { compute_budget_instruction_details::*, transaction_meta::{DynamicMeta, StaticMeta, TransactionMeta}, }, + core::ops::Deref, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_sdk::{ feature_set::FeatureSet, @@ -62,7 +63,15 @@ impl StaticMeta for RuntimeTransaction { } } -impl DynamicMeta for RuntimeTransaction {} +impl DynamicMeta for RuntimeTransaction {} + +impl Deref for RuntimeTransaction { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.transaction + } +} impl RuntimeTransaction { pub fn try_from( From ca306f59ad091e7b2c8d1ade0d695b825bf74363 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 26 Sep 2024 07:29:51 -0500 Subject: [PATCH 394/529] generic collect_balances (#2985) --- runtime/src/bank.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index a61f86adef5063..7a9a900bd8e019 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3538,12 +3538,12 @@ impl Bank { pub fn collect_balances( &self, - batch: &TransactionBatch, + batch: &TransactionBatch, ) -> TransactionBalances { let mut balances: TransactionBalances = vec![]; for transaction in batch.sanitized_transactions() { let mut transaction_balances: Vec = vec![]; - for account_key in transaction.message().account_keys().iter() { + for account_key in transaction.account_keys().iter() { transaction_balances.push(self.get_balance(account_key)); } balances.push(transaction_balances); From 584df68b359d31248e87dd83f4d066d63c5f8092 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Sep 2024 01:14:54 +0800 Subject: [PATCH 395/529] build(deps): bump async-trait from 0.1.82 to 0.1.83 (#2978) * build(deps): bump async-trait from 0.1.82 to 0.1.83 Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.82 to 0.1.83. - [Release notes](https://github.com/dtolnay/async-trait/releases) - [Commits](https://github.com/dtolnay/async-trait/compare/0.1.82...0.1.83) --- updated-dependencies: - dependency-name: async-trait dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e3f2f0bba6e54..4172562c9cffef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -728,9 +728,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index e4c8041093c5cb..d88cf6f580c93e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -201,7 +201,7 @@ assert_cmd = "2.0" assert_matches = "1.5.0" async-channel = "1.9.0" async-lock = "3.4.0" -async-trait = "0.1.82" +async-trait = "0.1.83" atty = "0.2.11" backoff = "0.4.0" base64 = "0.22.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 5b307630646440..dc6044c7d0be91 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", From a15fef1b26d9eb2767f7eadb8956f40a0f6887ce Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 26 Sep 2024 19:40:17 +0000 Subject: [PATCH 396/529] rolls out chained Merkle shreds to 100% of testnet slots (#2858) --- turbine/src/broadcast_stage/standard_broadcast_run.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 58cfc79fb3745a..09f3380bf9d8df 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -503,13 +503,12 @@ impl BroadcastRun for StandardBroadcastRun { } } -fn should_chain_merkle_shreds(slot: Slot, cluster_type: ClusterType) -> bool { +fn should_chain_merkle_shreds(_slot: Slot, cluster_type: ClusterType) -> bool { match cluster_type { ClusterType::Development => true, ClusterType::Devnet => false, ClusterType::MainnetBeta => false, - // Roll out chained Merkle shreds to ~53% of testnet slots. - ClusterType::Testnet => slot % 19 < 10, + ClusterType::Testnet => true, } } From a5b3c2b7e0aa761d8d8107c19b8fb8c2a9bdaa78 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 26 Sep 2024 16:23:34 -0400 Subject: [PATCH 397/529] Simplifies LtHash tests (#2993) --- lattice-hash/src/lt_hash.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/lattice-hash/src/lt_hash.rs b/lattice-hash/src/lt_hash.rs index aad54c728147eb..ef1ec4b6f41b4d 100644 --- a/lattice-hash/src/lt_hash.rs +++ b/lattice-hash/src/lt_hash.rs @@ -112,12 +112,14 @@ mod tests { } } + impl Copy for LtHash {} + // Ensure that if you mix-in then mix-out a hash, you get the original value #[test] fn test_inverse() { let a = LtHash::new_random(); let b = LtHash::new_random(); - assert_eq!(a.clone(), a.clone() + b.clone() - b.clone()); + assert_eq!(a, a + b - b); } // Ensure that mixing is commutative @@ -125,7 +127,7 @@ mod tests { fn test_commutative() { let a = LtHash::new_random(); let b = LtHash::new_random(); - assert_eq!(a.clone() + b.clone(), b.clone() + a.clone()); + assert_eq!(a + b, b + a); } // Ensure that mixing is associative @@ -134,10 +136,7 @@ mod tests { let a = LtHash::new_random(); let b = LtHash::new_random(); let c = LtHash::new_random(); - assert_eq!( - (a.clone() + b.clone()) + c.clone(), - a.clone() + (b.clone() + c.clone()), - ); + assert_eq!((a + b) + c, a + (b + c)); } // Ensure the correct lattice hash and checksum values are produced From 236895b2111456ec6d4e846887fffde92f8612e4 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 27 Sep 2024 04:52:03 -0500 Subject: [PATCH 398/529] Address Resolved TransactionView (#2792) --- sdk/program/src/message/sanitized.rs | 2 +- .../src/address_table_lookup_frame.rs | 9 + transaction-view/src/instructions_frame.rs | 10 +- transaction-view/src/lib.rs | 1 + transaction-view/src/message_header_frame.rs | 3 +- .../src/resolved_transaction_view.rs | 578 ++++++++++++++++++ transaction-view/src/result.rs | 1 + transaction-view/src/sanitize.rs | 4 +- transaction-view/src/signature_frame.rs | 1 + .../src/static_account_keys_frame.rs | 2 +- transaction-view/src/transaction_frame.rs | 13 +- transaction-view/src/transaction_view.rs | 66 +- 12 files changed, 670 insertions(+), 20 deletions(-) create mode 100644 transaction-view/src/resolved_transaction_view.rs diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index 1f9c6a08187fcf..622764f479537d 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -446,7 +446,7 @@ impl TransactionSignatureDetails { } /// return total number of signature, treating pre-processor operations as signature - pub(crate) fn total_signatures(&self) -> u64 { + pub fn total_signatures(&self) -> u64 { self.num_transaction_signatures .saturating_add(self.num_secp256k1_instruction_signatures) .saturating_add(self.num_ed25519_instruction_signatures) diff --git a/transaction-view/src/address_table_lookup_frame.rs b/transaction-view/src/address_table_lookup_frame.rs index 32730c6f10fd03..307e328a7cacba 100644 --- a/transaction-view/src/address_table_lookup_frame.rs +++ b/transaction-view/src/address_table_lookup_frame.rs @@ -6,6 +6,7 @@ use { }, result::{Result, TransactionViewError}, }, + core::fmt::{Debug, Formatter}, solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Signature}, solana_svm_transaction::message_address_table_lookup::SVMMessageAddressTableLookup, }; @@ -46,6 +47,7 @@ const MAX_ATLS_PER_PACKET: u8 = ((PACKET_DATA_SIZE - MIN_SIZED_PACKET_WITH_ATLS) / MIN_SIZED_ATL) as u8; /// Contains metadata about the address table lookups in a transaction packet. +#[derive(Debug)] pub(crate) struct AddressTableLookupFrame { /// The number of address table lookups in the transaction. pub(crate) num_address_table_lookups: u8, @@ -127,6 +129,7 @@ impl AddressTableLookupFrame { } } +#[derive(Clone)] pub struct AddressTableLookupIterator<'a> { pub(crate) bytes: &'a [u8], pub(crate) offset: usize, @@ -200,6 +203,12 @@ impl ExactSizeIterator for AddressTableLookupIterator<'_> { } } +impl Debug for AddressTableLookupIterator<'_> { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + #[cfg(test)] mod tests { use { diff --git a/transaction-view/src/instructions_frame.rs b/transaction-view/src/instructions_frame.rs index a908cba82aff5a..9e25c9743542be 100644 --- a/transaction-view/src/instructions_frame.rs +++ b/transaction-view/src/instructions_frame.rs @@ -6,11 +6,12 @@ use { }, result::Result, }, + core::fmt::{Debug, Formatter}, solana_svm_transaction::instruction::SVMInstruction, }; /// Contains metadata about the instructions in a transaction packet. -#[derive(Default)] +#[derive(Debug, Default)] pub(crate) struct InstructionsFrame { /// The number of instructions in the transaction. pub(crate) num_instructions: u16, @@ -71,6 +72,7 @@ impl InstructionsFrame { } } +#[derive(Clone)] pub struct InstructionsIterator<'a> { pub(crate) bytes: &'a [u8], pub(crate) offset: usize, @@ -138,6 +140,12 @@ impl ExactSizeIterator for InstructionsIterator<'_> { } } +impl Debug for InstructionsIterator<'_> { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + #[cfg(test)] mod tests { use { diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs index 047514c6e70ffc..62c69e40841f45 100644 --- a/transaction-view/src/lib.rs +++ b/transaction-view/src/lib.rs @@ -7,6 +7,7 @@ mod bytes; mod address_table_lookup_frame; mod instructions_frame; mod message_header_frame; +pub mod resolved_transaction_view; pub mod result; mod sanitize; mod signature_frame; diff --git a/transaction-view/src/message_header_frame.rs b/transaction-view/src/message_header_frame.rs index 435e58c48404d6..13eb8b3e7b629b 100644 --- a/transaction-view/src/message_header_frame.rs +++ b/transaction-view/src/message_header_frame.rs @@ -7,7 +7,7 @@ use { }; /// A byte that represents the version of the transaction. -#[derive(Copy, Clone, Default)] +#[derive(Copy, Clone, Debug, Default)] #[repr(u8)] pub enum TransactionVersion { #[default] @@ -16,6 +16,7 @@ pub enum TransactionVersion { } /// Metadata for accessing message header fields in a transaction view. +#[derive(Debug)] pub(crate) struct MessageHeaderFrame { /// The offset to the first byte of the message in the transaction packet. pub(crate) offset: u16, diff --git a/transaction-view/src/resolved_transaction_view.rs b/transaction-view/src/resolved_transaction_view.rs new file mode 100644 index 00000000000000..81a6c2f1886314 --- /dev/null +++ b/transaction-view/src/resolved_transaction_view.rs @@ -0,0 +1,578 @@ +use { + crate::{ + result::{Result, TransactionViewError}, + transaction_data::TransactionData, + transaction_view::TransactionView, + }, + core::{ + fmt::{Debug, Formatter}, + ops::Deref, + }, + solana_sdk::{ + bpf_loader_upgradeable, ed25519_program, + hash::Hash, + message::{v0::LoadedAddresses, AccountKeys, TransactionSignatureDetails}, + pubkey::Pubkey, + secp256k1_program, + }, + solana_svm_transaction::{ + instruction::SVMInstruction, message_address_table_lookup::SVMMessageAddressTableLookup, + svm_message::SVMMessage, + }, + std::collections::HashSet, +}; + +/// A parsed and sanitized transaction view that has had all address lookups +/// resolved. +pub struct ResolvedTransactionView { + /// The parsed and sanitized transction view. + view: TransactionView, + /// The resolved address lookups. + resolved_addresses: Option, + /// A cache for whether an address is writable. + writable_cache: Vec, // TODO: should this be a vec, bitset, or array[256]. +} + +impl Deref for ResolvedTransactionView { + type Target = TransactionView; + + fn deref(&self) -> &Self::Target { + &self.view + } +} + +impl ResolvedTransactionView { + /// Given a parsed and sanitized transaction view, and a set of resolved + /// addresses, create a resolved transaction view. + pub fn try_new( + view: TransactionView, + resolved_addresses: Option, + reserved_account_keys: &HashSet, + ) -> Result { + let resolved_addresses_ref = resolved_addresses.as_ref(); + + // verify that the number of readable and writable match up. + // This is a basic sanity check to make sure we're not passing a totally + // invalid set of resolved addresses. + if let Some(loaded_addresses) = resolved_addresses_ref { + if loaded_addresses.writable.len() != usize::from(view.total_writable_lookup_accounts()) + || loaded_addresses.readonly.len() + != usize::from(view.total_readonly_lookup_accounts()) + { + return Err(TransactionViewError::AddressLookupMismatch); + } + } else if view.total_writable_lookup_accounts() != 0 + || view.total_readonly_lookup_accounts() != 0 + { + return Err(TransactionViewError::AddressLookupMismatch); + } + + let writable_cache = + Self::cache_is_writable(&view, resolved_addresses_ref, reserved_account_keys); + Ok(Self { + view, + resolved_addresses, + writable_cache, + }) + } + + /// Helper function to check if an address is writable, + /// and cache the result. + /// This is done so we avoid recomputing the expensive checks each time we call + /// `is_writable` - since there is more to it than just checking index. + fn cache_is_writable( + view: &TransactionView, + resolved_addresses: Option<&LoadedAddresses>, + reserved_account_keys: &HashSet, + ) -> Vec { + // Build account keys so that we can iterate over and check if + // an address is writable. + let account_keys = AccountKeys::new(view.static_account_keys(), resolved_addresses); + + let mut is_writable_cache = Vec::with_capacity(account_keys.len()); + let num_static_account_keys = usize::from(view.num_static_account_keys()); + let num_writable_lookup_accounts = usize::from(view.total_writable_lookup_accounts()); + let num_signed_accounts = usize::from(view.num_required_signatures()); + let num_writable_unsigned_static_accounts = + usize::from(view.num_writable_unsigned_static_accounts()); + let num_writable_signed_static_accounts = + usize::from(view.num_writable_signed_static_accounts()); + + for (index, key) in account_keys.iter().enumerate() { + let is_requested_write = { + // If the account is a resolved address, check if it is writable. + if index >= num_static_account_keys { + let loaded_address_index = index.wrapping_sub(num_static_account_keys); + loaded_address_index < num_writable_lookup_accounts + } else if index >= num_signed_accounts { + let unsigned_account_index = index.wrapping_sub(num_signed_accounts); + unsigned_account_index < num_writable_unsigned_static_accounts + } else { + index < num_writable_signed_static_accounts + } + }; + + // If the key is reserved it cannot be writable. + is_writable_cache.push(is_requested_write && !reserved_account_keys.contains(key)); + } + + // If a program account is locked, it cannot be writable unless the + // upgradable loader is present. + // However, checking for the upgradable loader is somewhat expensive, so + // we only do it if we find a writable program id. + let mut is_upgradable_loader_present = None; + for ix in view.instructions_iter() { + let program_id_index = usize::from(ix.program_id_index); + if is_writable_cache[program_id_index] + && !*is_upgradable_loader_present.get_or_insert_with(|| { + for key in account_keys.iter() { + if key == &bpf_loader_upgradeable::ID { + return true; + } + } + false + }) + { + is_writable_cache[program_id_index] = false; + } + } + + is_writable_cache + } + + fn num_readonly_accounts(&self) -> usize { + usize::from(self.view.total_readonly_lookup_accounts()) + .wrapping_add(usize::from(self.view.num_readonly_signed_static_accounts())) + .wrapping_add(usize::from( + self.view.num_readonly_unsigned_static_accounts(), + )) + } + + fn signature_details(&self) -> TransactionSignatureDetails { + // counting the number of pre-processor operations separately + let mut num_secp256k1_instruction_signatures: u64 = 0; + let mut num_ed25519_instruction_signatures: u64 = 0; + for (program_id, instruction) in self.program_instructions_iter() { + if secp256k1_program::check_id(program_id) { + if let Some(num_verifies) = instruction.data.first() { + num_secp256k1_instruction_signatures = + num_secp256k1_instruction_signatures.wrapping_add(u64::from(*num_verifies)); + } + } else if ed25519_program::check_id(program_id) { + if let Some(num_verifies) = instruction.data.first() { + num_ed25519_instruction_signatures = + num_ed25519_instruction_signatures.wrapping_add(u64::from(*num_verifies)); + } + } + } + + TransactionSignatureDetails::new( + u64::from(self.view.num_required_signatures()), + num_secp256k1_instruction_signatures, + num_ed25519_instruction_signatures, + ) + } +} + +impl SVMMessage for ResolvedTransactionView { + fn num_total_signatures(&self) -> u64 { + self.signature_details().total_signatures() + } + + fn num_write_locks(&self) -> u64 { + self.account_keys() + .len() + .wrapping_sub(self.num_readonly_accounts()) as u64 + } + + fn recent_blockhash(&self) -> &Hash { + self.view.recent_blockhash() + } + + fn num_instructions(&self) -> usize { + usize::from(self.view.num_instructions()) + } + + fn instructions_iter(&self) -> impl Iterator { + self.view.instructions_iter() + } + + fn program_instructions_iter( + &self, + ) -> impl Iterator< + Item = ( + &solana_sdk::pubkey::Pubkey, + solana_svm_transaction::instruction::SVMInstruction, + ), + > { + self.view.program_instructions_iter() + } + + fn account_keys(&self) -> AccountKeys { + AccountKeys::new( + self.view.static_account_keys(), + self.resolved_addresses.as_ref(), + ) + } + + fn fee_payer(&self) -> &Pubkey { + &self.view.static_account_keys()[0] + } + + fn is_writable(&self, index: usize) -> bool { + self.writable_cache.get(index).copied().unwrap_or(false) + } + + fn is_signer(&self, index: usize) -> bool { + index < usize::from(self.view.num_required_signatures()) + } + + fn is_invoked(&self, key_index: usize) -> bool { + let Ok(index) = u8::try_from(key_index) else { + return false; + }; + self.view + .instructions_iter() + .any(|ix| ix.program_id_index == index) + } + + fn num_lookup_tables(&self) -> usize { + usize::from(self.view.num_address_table_lookups()) + } + + fn message_address_table_lookups(&self) -> impl Iterator { + self.view.address_table_lookup_iter() + } +} + +impl Debug for ResolvedTransactionView { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ResolvedTransactionView") + .field("view", &self.view) + .finish() + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::transaction_view::SanitizedTransactionView, + solana_sdk::{ + instruction::CompiledInstruction, + message::{ + v0::{self, MessageAddressTableLookup}, + MessageHeader, VersionedMessage, + }, + signature::Signature, + system_program, sysvar, + transaction::VersionedTransaction, + }, + }; + + #[test] + fn test_expected_loaded_addresses() { + // Expected addresses passed in, but `None` was passed. + let static_keys = vec![Pubkey::new_unique(), Pubkey::new_unique()]; + let transaction = VersionedTransaction { + signatures: vec![Signature::default()], + message: VersionedMessage::V0(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + instructions: vec![], + account_keys: static_keys, + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0], + readonly_indexes: vec![1], + }], + recent_blockhash: Hash::default(), + }), + }; + let bytes = bincode::serialize(&transaction).unwrap(); + let view = SanitizedTransactionView::try_new_sanitized(bytes.as_ref()).unwrap(); + let result = ResolvedTransactionView::try_new(view, None, &HashSet::default()); + assert!(matches!( + result, + Err(TransactionViewError::AddressLookupMismatch) + )); + } + + #[test] + fn test_unexpected_loaded_addresses() { + // Expected no addresses passed in, but `Some` was passed. + let static_keys = vec![Pubkey::new_unique(), Pubkey::new_unique()]; + let loaded_addresses = LoadedAddresses { + writable: vec![Pubkey::new_unique()], + readonly: vec![], + }; + let transaction = VersionedTransaction { + signatures: vec![Signature::default()], + message: VersionedMessage::V0(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + instructions: vec![], + account_keys: static_keys, + address_table_lookups: vec![], + recent_blockhash: Hash::default(), + }), + }; + let bytes = bincode::serialize(&transaction).unwrap(); + let view = SanitizedTransactionView::try_new_sanitized(bytes.as_ref()).unwrap(); + let result = + ResolvedTransactionView::try_new(view, Some(loaded_addresses), &HashSet::default()); + assert!(matches!( + result, + Err(TransactionViewError::AddressLookupMismatch) + )); + } + + #[test] + fn test_mismatched_loaded_address_lengths() { + // Loaded addresses only has 1 writable address, no readonly. + // The message ATL has 1 writable and 1 readonly. + let static_keys = vec![Pubkey::new_unique(), Pubkey::new_unique()]; + let loaded_addresses = LoadedAddresses { + writable: vec![Pubkey::new_unique()], + readonly: vec![], + }; + let transaction = VersionedTransaction { + signatures: vec![Signature::default()], + message: VersionedMessage::V0(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + instructions: vec![], + account_keys: static_keys, + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0], + readonly_indexes: vec![1], + }], + recent_blockhash: Hash::default(), + }), + }; + let bytes = bincode::serialize(&transaction).unwrap(); + let view = SanitizedTransactionView::try_new_sanitized(bytes.as_ref()).unwrap(); + let result = + ResolvedTransactionView::try_new(view, Some(loaded_addresses), &HashSet::default()); + assert!(matches!( + result, + Err(TransactionViewError::AddressLookupMismatch) + )); + } + + #[test] + fn test_is_writable() { + let reserved_account_keys = HashSet::from_iter([sysvar::clock::id(), system_program::id()]); + // Create a versioned transaction. + let create_transaction_with_keys = + |static_keys: Vec, loaded_addresses: &LoadedAddresses| VersionedTransaction { + signatures: vec![Signature::default()], + message: VersionedMessage::V0(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 1, + }, + account_keys: static_keys[..2].to_vec(), + recent_blockhash: Hash::default(), + instructions: vec![], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: (0..loaded_addresses.writable.len()) + .map(|x| (static_keys.len() + x) as u8) + .collect(), + readonly_indexes: (0..loaded_addresses.readonly.len()) + .map(|x| { + (static_keys.len() + loaded_addresses.writable.len() + x) as u8 + }) + .collect(), + }], + }), + }; + + let key0 = Pubkey::new_unique(); + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + { + let static_keys = vec![sysvar::clock::id(), key0]; + let loaded_addresses = LoadedAddresses { + writable: vec![key1], + readonly: vec![key2], + }; + let transaction = create_transaction_with_keys(static_keys, &loaded_addresses); + let bytes = bincode::serialize(&transaction).unwrap(); + let view = SanitizedTransactionView::try_new_sanitized(bytes.as_ref()).unwrap(); + let resolved_view = ResolvedTransactionView::try_new( + view, + Some(loaded_addresses), + &reserved_account_keys, + ) + .unwrap(); + + // demote reserved static key to readonly + let expected = vec![false, false, true, false]; + for (index, expected) in expected.into_iter().enumerate() { + assert_eq!(resolved_view.is_writable(index), expected); + } + } + + { + let static_keys = vec![system_program::id(), key0]; + let loaded_addresses = LoadedAddresses { + writable: vec![key1], + readonly: vec![key2], + }; + let transaction = create_transaction_with_keys(static_keys, &loaded_addresses); + let bytes = bincode::serialize(&transaction).unwrap(); + let view = SanitizedTransactionView::try_new_sanitized(bytes.as_ref()).unwrap(); + let resolved_view = ResolvedTransactionView::try_new( + view, + Some(loaded_addresses), + &reserved_account_keys, + ) + .unwrap(); + + // demote reserved static key to readonly + let expected = vec![false, false, true, false]; + for (index, expected) in expected.into_iter().enumerate() { + assert_eq!(resolved_view.is_writable(index), expected); + } + } + + { + let static_keys = vec![key0, key1]; + let loaded_addresses = LoadedAddresses { + writable: vec![system_program::id()], + readonly: vec![key2], + }; + let transaction = create_transaction_with_keys(static_keys, &loaded_addresses); + let bytes = bincode::serialize(&transaction).unwrap(); + let view = SanitizedTransactionView::try_new_sanitized(bytes.as_ref()).unwrap(); + let resolved_view = ResolvedTransactionView::try_new( + view, + Some(loaded_addresses), + &reserved_account_keys, + ) + .unwrap(); + + // demote loaded key to readonly + let expected = vec![true, false, false, false]; + for (index, expected) in expected.into_iter().enumerate() { + assert_eq!(resolved_view.is_writable(index), expected); + } + } + } + + #[test] + fn test_demote_writable_program() { + let reserved_account_keys = HashSet::default(); + let key0 = Pubkey::new_unique(); + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let key3 = Pubkey::new_unique(); + let key4 = Pubkey::new_unique(); + let loaded_addresses = LoadedAddresses { + writable: vec![key3, key4], + readonly: vec![], + }; + let create_transaction_with_static_keys = + |static_keys: Vec, loaded_addresses: &LoadedAddresses| VersionedTransaction { + signatures: vec![Signature::default()], + message: VersionedMessage::V0(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }], + account_keys: static_keys, + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: (0..loaded_addresses.writable.len()) + .map(|x| x as u8) + .collect(), + readonly_indexes: (0..loaded_addresses.readonly.len()) + .map(|x| (loaded_addresses.writable.len() + x) as u8) + .collect(), + }], + recent_blockhash: Hash::default(), + }), + }; + + // Demote writable program - static + { + let static_keys = vec![key0, key1, key2]; + let transaction = create_transaction_with_static_keys(static_keys, &loaded_addresses); + let bytes = bincode::serialize(&transaction).unwrap(); + let view = SanitizedTransactionView::try_new_sanitized(bytes.as_ref()).unwrap(); + let resolved_view = ResolvedTransactionView::try_new( + view, + Some(loaded_addresses.clone()), + &reserved_account_keys, + ) + .unwrap(); + + let expected = vec![true, false, true, true, true]; + for (index, expected) in expected.into_iter().enumerate() { + assert_eq!(resolved_view.is_writable(index), expected); + } + } + + // Do not demote writable program - static address: upgradable loader + { + let static_keys = vec![key0, key1, bpf_loader_upgradeable::ID]; + let transaction = create_transaction_with_static_keys(static_keys, &loaded_addresses); + let bytes = bincode::serialize(&transaction).unwrap(); + let view = SanitizedTransactionView::try_new_sanitized(bytes.as_ref()).unwrap(); + let resolved_view = ResolvedTransactionView::try_new( + view, + Some(loaded_addresses.clone()), + &reserved_account_keys, + ) + .unwrap(); + + let expected = vec![true, true, true, true, true]; + for (index, expected) in expected.into_iter().enumerate() { + assert_eq!(resolved_view.is_writable(index), expected); + } + } + + // Do not demote writable program - loaded address: upgradable loader + { + let static_keys = vec![key0, key1, key2]; + let loaded_addresses = LoadedAddresses { + writable: vec![key3], + readonly: vec![bpf_loader_upgradeable::ID], + }; + let transaction = create_transaction_with_static_keys(static_keys, &loaded_addresses); + let bytes = bincode::serialize(&transaction).unwrap(); + let view = SanitizedTransactionView::try_new_sanitized(bytes.as_ref()).unwrap(); + + let resolved_view = ResolvedTransactionView::try_new( + view, + Some(loaded_addresses.clone()), + &reserved_account_keys, + ) + .unwrap(); + + let expected = vec![true, true, true, true, false]; + for (index, expected) in expected.into_iter().enumerate() { + assert_eq!(resolved_view.is_writable(index), expected); + } + } + } +} diff --git a/transaction-view/src/result.rs b/transaction-view/src/result.rs index b94c6b26e63a58..028a7f1134b2aa 100644 --- a/transaction-view/src/result.rs +++ b/transaction-view/src/result.rs @@ -3,6 +3,7 @@ pub enum TransactionViewError { ParseError, SanitizeError, + AddressLookupMismatch, } pub type Result = core::result::Result; diff --git a/transaction-view/src/sanitize.rs b/transaction-view/src/sanitize.rs index b1aff7bb70cdd7..3e71bd30d32fde 100644 --- a/transaction-view/src/sanitize.rs +++ b/transaction-view/src/sanitize.rs @@ -30,7 +30,7 @@ fn sanitize_account_access(view: &UnsanitizedTransactionView view .num_static_account_keys() .wrapping_sub(view.num_required_signatures()) @@ -39,7 +39,7 @@ fn sanitize_account_access(view: &UnsanitizedTransactionView= view.num_required_signatures() { + if view.num_readonly_signed_static_accounts() >= view.num_required_signatures() { return Err(TransactionViewError::SanitizeError); } diff --git a/transaction-view/src/signature_frame.rs b/transaction-view/src/signature_frame.rs index b8176e538e01b7..2f4e96b1e48883 100644 --- a/transaction-view/src/signature_frame.rs +++ b/transaction-view/src/signature_frame.rs @@ -17,6 +17,7 @@ const MAX_SIGNATURES_PER_PACKET: u8 = (PACKET_DATA_SIZE / (core::mem::size_of::() + core::mem::size_of::())) as u8; /// Metadata for accessing transaction-level signatures in a transaction view. +#[derive(Debug)] pub(crate) struct SignatureFrame { /// The number of signatures in the transaction. pub(crate) num_signatures: u8, diff --git a/transaction-view/src/static_account_keys_frame.rs b/transaction-view/src/static_account_keys_frame.rs index 904c9905b0d68c..daea4401936ad3 100644 --- a/transaction-view/src/static_account_keys_frame.rs +++ b/transaction-view/src/static_account_keys_frame.rs @@ -14,7 +14,7 @@ pub const MAX_STATIC_ACCOUNTS_PER_PACKET: u8 = (PACKET_DATA_SIZE / core::mem::size_of::()) as u8; /// Contains metadata about the static account keys in a transaction packet. -#[derive(Default)] +#[derive(Debug, Default)] pub(crate) struct StaticAccountKeysFrame { /// The number of static accounts in the transaction. pub(crate) num_static_accounts: u8, diff --git a/transaction-view/src/transaction_frame.rs b/transaction-view/src/transaction_frame.rs index e556c14cb26a9d..933736d5d251f3 100644 --- a/transaction-view/src/transaction_frame.rs +++ b/transaction-view/src/transaction_frame.rs @@ -11,6 +11,7 @@ use { solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Signature}, }; +#[derive(Debug)] pub(crate) struct TransactionFrame { /// Signature framing data. signature: SignatureFrame, @@ -85,15 +86,15 @@ impl TransactionFrame { self.message_header.num_required_signatures } - /// Return the number of readonly signed accounts in the transaction. + /// Return the number of readonly signed static accounts in the transaction. #[inline] - pub(crate) fn num_readonly_signed_accounts(&self) -> u8 { + pub(crate) fn num_readonly_signed_static_accounts(&self) -> u8 { self.message_header.num_readonly_signed_accounts } - /// Return the number of readonly unsigned accounts in the transaction. + /// Return the number of readonly unsigned static accounts in the transaction. #[inline] - pub(crate) fn num_readonly_unsigned_accounts(&self) -> u8 { + pub(crate) fn num_readonly_unsigned_static_accounts(&self) -> u8 { self.message_header.num_readonly_unsigned_accounts } @@ -525,8 +526,8 @@ mod tests { assert_eq!(frame.num_signatures(), 1); assert!(matches!(frame.version(), TransactionVersion::Legacy)); assert_eq!(frame.num_required_signatures(), 1); - assert_eq!(frame.num_readonly_signed_accounts(), 0); - assert_eq!(frame.num_readonly_unsigned_accounts(), 1); + assert_eq!(frame.num_readonly_signed_static_accounts(), 0); + assert_eq!(frame.num_readonly_unsigned_static_accounts(), 1); assert_eq!(frame.num_static_account_keys(), 3); assert_eq!(frame.num_instructions(), 1); assert_eq!(frame.num_address_table_lookups(), 0); diff --git a/transaction-view/src/transaction_view.rs b/transaction-view/src/transaction_view.rs index b869a4fecb94a5..1d0c3c9bdc2034 100644 --- a/transaction-view/src/transaction_view.rs +++ b/transaction-view/src/transaction_view.rs @@ -5,7 +5,9 @@ use { result::Result, sanitize::sanitize, transaction_data::TransactionData, transaction_frame::TransactionFrame, }, + core::fmt::{Debug, Formatter}, solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Signature}, + solana_svm_transaction::instruction::SVMInstruction, }; // alias for convenience @@ -68,16 +70,16 @@ impl TransactionView { self.frame.num_required_signatures() } - /// Return the number of readonly signed accounts in the transaction. + /// Return the number of readonly signed static accounts in the transaction. #[inline] - pub fn num_readonly_signed_accounts(&self) -> u8 { - self.frame.num_readonly_signed_accounts() + pub fn num_readonly_signed_static_accounts(&self) -> u8 { + self.frame.num_readonly_signed_static_accounts() } - /// Return the number of readonly unsigned accounts in the transaction. + /// Return the number of readonly unsigned static accounts in the transaction. #[inline] - pub fn num_readonly_unsigned_accounts(&self) -> u8 { - self.frame.num_readonly_unsigned_accounts() + pub fn num_readonly_unsigned_static_accounts(&self) -> u8 { + self.frame.num_readonly_unsigned_static_accounts() } /// Return the number of static account keys in the transaction. @@ -164,6 +166,54 @@ impl TransactionView { } } +// Implementation that relies on sanitization checks having been run. +impl TransactionView { + /// Return an iterator over the instructions paired with their program ids. + pub fn program_instructions_iter(&self) -> impl Iterator { + self.instructions_iter().map(|ix| { + let program_id_index = usize::from(ix.program_id_index); + let program_id = &self.static_account_keys()[program_id_index]; + (program_id, ix) + }) + } + + /// Return the number of unsigned static account keys. + #[inline] + pub(crate) fn num_static_unsigned_static_accounts(&self) -> u8 { + self.num_static_account_keys() + .wrapping_sub(self.num_required_signatures()) + } + + /// Return the number of writable unsigned static accounts. + #[inline] + pub(crate) fn num_writable_unsigned_static_accounts(&self) -> u8 { + self.num_static_unsigned_static_accounts() + .wrapping_sub(self.num_readonly_unsigned_static_accounts()) + } + + /// Return the number of writable unsigned static accounts. + #[inline] + pub(crate) fn num_writable_signed_static_accounts(&self) -> u8 { + self.num_required_signatures() + .wrapping_sub(self.num_readonly_signed_static_accounts()) + } +} + +// Manual implementation of `Debug` - avoids bound on `D`. +// Prints nicely formatted struct-ish fields even for the iterator fields. +impl Debug for TransactionView { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + f.debug_struct("TransactionView") + .field("frame", &self.frame) + .field("signatures", &self.signatures()) + .field("static_account_keys", &self.static_account_keys()) + .field("recent_blockhash", &self.recent_blockhash()) + .field("instructions", &self.instructions_iter()) + .field("address_table_lookups", &self.address_table_lookup_iter()) + .finish() + } +} + #[cfg(test)] mod tests { use { @@ -188,11 +238,11 @@ mod tests { tx.message.header().num_required_signatures ); assert_eq!( - view.num_readonly_signed_accounts(), + view.num_readonly_signed_static_accounts(), tx.message.header().num_readonly_signed_accounts ); assert_eq!( - view.num_readonly_unsigned_accounts(), + view.num_readonly_unsigned_static_accounts(), tx.message.header().num_readonly_unsigned_accounts ); From 5c9c5c7b2aa8a51628ab1f6bb403b7891249c7f7 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 27 Sep 2024 04:52:22 -0500 Subject: [PATCH 399/529] RuntimeTransaction: cache TransactionSignatureDetails (#2983) --- .../src/runtime_transaction.rs | 32 +++++++++++++++++-- runtime-transaction/src/transaction_meta.rs | 10 ++++-- sdk/program/src/message/sanitized.rs | 2 +- 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index aded0c9c819688..24bb512b203a0d 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -12,6 +12,7 @@ use { crate::{ compute_budget_instruction_details::*, + signature_details::get_precompile_signature_details, transaction_meta::{DynamicMeta, StaticMeta, TransactionMeta}, }, core::ops::Deref, @@ -19,7 +20,7 @@ use { solana_sdk::{ feature_set::FeatureSet, hash::Hash, - message::AddressLoader, + message::{AddressLoader, TransactionSignatureDetails}, pubkey::Pubkey, simple_vote_transaction_checker::is_simple_vote_transaction, transaction::{Result, SanitizedTransaction, SanitizedVersionedTransaction}, @@ -28,7 +29,6 @@ use { std::collections::HashSet, }; -#[cfg_attr(test, derive(Eq, PartialEq))] #[derive(Debug)] pub struct RuntimeTransaction { transaction: T, @@ -56,6 +56,9 @@ impl StaticMeta for RuntimeTransaction { fn is_simple_vote_tx(&self) -> bool { self.meta.is_simple_vote_tx } + fn signature_details(&self) -> &TransactionSignatureDetails { + &self.meta.signature_details + } fn compute_budget_limits(&self, _feature_set: &FeatureSet) -> Result { self.meta .compute_budget_instruction_details @@ -83,6 +86,24 @@ impl RuntimeTransaction { .unwrap_or_else(|| is_simple_vote_transaction(&sanitized_versioned_tx)); let message_hash = message_hash.unwrap_or_else(|| sanitized_versioned_tx.get_message().message.hash()); + + let precompile_signature_details = get_precompile_signature_details( + sanitized_versioned_tx + .get_message() + .program_instructions_iter() + .map(|(program_id, ix)| (program_id, SVMInstruction::from(ix))), + ); + let signature_details = TransactionSignatureDetails::new( + u64::from( + sanitized_versioned_tx + .get_message() + .message + .header() + .num_required_signatures, + ), + precompile_signature_details.num_secp256k1_instruction_signatures, + precompile_signature_details.num_ed25519_instruction_signatures, + ); let compute_budget_instruction_details = ComputeBudgetInstructionDetails::try_from( sanitized_versioned_tx .get_message() @@ -95,6 +116,7 @@ impl RuntimeTransaction { meta: TransactionMeta { message_hash, is_simple_vote_tx, + signature_details, compute_budget_instruction_details, }, }) @@ -299,6 +321,12 @@ mod tests { assert_eq!(&hash, runtime_transaction_static.message_hash()); assert!(!runtime_transaction_static.is_simple_vote_tx()); + + let signature_details = &runtime_transaction_static.meta.signature_details; + assert_eq!(1, signature_details.num_transaction_signatures()); + assert_eq!(0, signature_details.num_secp256k1_instruction_signatures()); + assert_eq!(0, signature_details.num_ed25519_instruction_signatures()); + let compute_budget_limits = runtime_transaction_static .compute_budget_limits(&FeatureSet::default()) .unwrap(); diff --git a/runtime-transaction/src/transaction_meta.rs b/runtime-transaction/src/transaction_meta.rs index a2b3a746c5ebae..6ddce57e11dc5b 100644 --- a/runtime-transaction/src/transaction_meta.rs +++ b/runtime-transaction/src/transaction_meta.rs @@ -14,7 +14,10 @@ use { crate::compute_budget_instruction_details::ComputeBudgetInstructionDetails, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, - solana_sdk::{feature_set::FeatureSet, hash::Hash, transaction::Result}, + solana_sdk::{ + feature_set::FeatureSet, hash::Hash, message::TransactionSignatureDetails, + transaction::Result, + }, }; /// metadata can be extracted statically from sanitized transaction, @@ -22,6 +25,7 @@ use { pub trait StaticMeta { fn message_hash(&self) -> &Hash; fn is_simple_vote_tx(&self) -> bool; + fn signature_details(&self) -> &TransactionSignatureDetails; fn compute_budget_limits(&self, feature_set: &FeatureSet) -> Result; } @@ -32,10 +36,10 @@ pub trait StaticMeta { /// on-chain ALT, examples are: transaction usage costs, nonce account. pub trait DynamicMeta: StaticMeta {} -#[cfg_attr(test, derive(Eq, PartialEq))] -#[derive(Debug, Default)] +#[derive(Debug)] pub struct TransactionMeta { pub(crate) message_hash: Hash, pub(crate) is_simple_vote_tx: bool, + pub(crate) signature_details: TransactionSignatureDetails, pub(crate) compute_budget_instruction_details: ComputeBudgetInstructionDetails, } diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index 622764f479537d..3767ffef5bb51f 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -423,9 +423,9 @@ impl SanitizedMessage { } } -#[derive(Default)] /// Transaction signature details including the number of transaction signatures /// and precompile signatures. +#[derive(Debug, Default)] pub struct TransactionSignatureDetails { num_transaction_signatures: u64, num_secp256k1_instruction_signatures: u64, From d2cc71f0d446b8fdd8052106f38c784f3757c4aa Mon Sep 17 00:00:00 2001 From: Asten Date: Fri, 27 Sep 2024 22:05:29 +0800 Subject: [PATCH 400/529] Fix: Corrected the derivation command format (#2952) --- docs/src/cli/wallets/paper.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/cli/wallets/paper.md b/docs/src/cli/wallets/paper.md index 4e3c3c39ac8732..b90b1b9bfd3a7f 100644 --- a/docs/src/cli/wallets/paper.md +++ b/docs/src/cli/wallets/paper.md @@ -150,14 +150,14 @@ By default, `prompt:` will derive solana's base derivation path `m/44'/501'`. To derive a child key, supply the `?key=/` query string. ```bash -solana-keygen pubkey prompt://?key=0/1 +solana-keygen pubkey 'prompt://?key=0/1' ``` To use a derivation path other than solana's standard BIP44, you can supply `?full-path=m////`. ```bash -solana-keygen pubkey prompt://?full-path=m/44/2017/0/1 +solana-keygen pubkey 'prompt://?full-path=m/44/2017/0/1' ``` Because Solana uses Ed25519 keypairs, as per From 91161ffd7759265295c61785926d56bbc1b77a32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 28 Sep 2024 01:06:44 +0800 Subject: [PATCH 401/529] build(deps): bump lz4 from 1.27.0 to 1.28.0 (#2999) * build(deps): bump lz4 from 1.27.0 to 1.28.0 Bumps [lz4](https://github.com/10xGenomics/lz4-rs) from 1.27.0 to 1.28.0. - [Release notes](https://github.com/10xGenomics/lz4-rs/releases) - [Changelog](https://github.com/10XGenomics/lz4-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/10xGenomics/lz4-rs/commits) --- updated-dependencies: - dependency-name: lz4 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4172562c9cffef..b0b328ab035106 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3459,18 +3459,18 @@ dependencies = [ [[package]] name = "lz4" -version = "1.27.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a231296ca742e418c43660cb68e082486ff2538e8db432bc818580f3965025ed" +checksum = "4d1febb2b4a79ddd1980eede06a8f7902197960aa0383ffcfdd62fe723036725" dependencies = [ "lz4-sys", ] [[package]] name = "lz4-sys" -version = "1.11.0" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb44a01837a858d47e5a630d2ccf304c8efcc4b83b8f9f75b7a9ee4fcc6e57d" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index d88cf6f580c93e..08f5d54e4c901d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -297,7 +297,7 @@ libsecp256k1 = { version = "0.6.0", default-features = false, features = [ light-poseidon = "0.2.0" log = "0.4.22" lru = "0.7.7" -lz4 = "1.27.0" +lz4 = "1.28.0" memmap2 = "0.5.10" memoffset = "0.9" merlin = "3" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index dc6044c7d0be91..b77dbe3a7badf8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2830,18 +2830,18 @@ dependencies = [ [[package]] name = "lz4" -version = "1.27.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a231296ca742e418c43660cb68e082486ff2538e8db432bc818580f3965025ed" +checksum = "4d1febb2b4a79ddd1980eede06a8f7902197960aa0383ffcfdd62fe723036725" dependencies = [ "lz4-sys", ] [[package]] name = "lz4-sys" -version = "1.11.0" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb44a01837a858d47e5a630d2ccf304c8efcc4b83b8f9f75b7a9ee4fcc6e57d" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ "cc", "libc", From 73a7b25fba63d1f781e8179940ad9550a42f5112 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 28 Sep 2024 01:08:10 +0800 Subject: [PATCH 402/529] build(deps): bump tar from 0.4.41 to 0.4.42 (#2979) * build(deps): bump tar from 0.4.41 to 0.4.42 Bumps [tar](https://github.com/alexcrichton/tar-rs) from 0.4.41 to 0.4.42. - [Commits](https://github.com/alexcrichton/tar-rs/compare/0.4.41...0.4.42) --- updated-dependencies: - dependency-name: tar dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b0b328ab035106..f252db71e025b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9098,9 +9098,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb797dad5fb5b76fcf519e702f4a589483b5ef06567f160c392832c1f5e44909" +checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" dependencies = [ "filetime", "libc", diff --git a/Cargo.toml b/Cargo.toml index 08f5d54e4c901d..c4bd10559a8c17 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -501,7 +501,7 @@ syn = "2.0" sys-info = "0.9.1" sysctl = "0.4.6" systemstat = "0.2.3" -tar = "0.4.41" +tar = "0.4.42" tarpc = "0.29.0" tempfile = "3.12.0" test-case = "3.3.1" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b77dbe3a7badf8..c78af59223b460 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7604,9 +7604,9 @@ dependencies = [ [[package]] name = "tar" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb797dad5fb5b76fcf519e702f4a589483b5ef06567f160c392832c1f5e44909" +checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" dependencies = [ "filetime", "libc", From a31a55e84063353521542353baa2db15cee64269 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 27 Sep 2024 19:43:25 +0200 Subject: [PATCH 403/529] Cleanup - `external_internal_function_hash_collision` (#3000) Cleanup of external_internal_function_hash_collision. --- programs/bpf_loader/src/syscalls/mod.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 65cc88d753a149..b131ec712da101 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -24,9 +24,9 @@ use { disable_deploy_of_alloc_free_syscall, disable_fees_sysvar, disable_sbpf_v1_execution, enable_alt_bn128_compression_syscall, enable_alt_bn128_syscall, enable_big_mod_exp_syscall, enable_get_epoch_stake_syscall, enable_partitioned_epoch_reward, enable_poseidon_syscall, - error_on_syscall_bpf_function_hash_collisions, get_sysvar_syscall_enabled, - last_restart_slot_sysvar, partitioned_epoch_rewards_superfeature, - reenable_sbpf_v1_execution, remaining_compute_units_syscall_enabled, FeatureSet, + get_sysvar_syscall_enabled, last_restart_slot_sysvar, + partitioned_epoch_rewards_superfeature, reenable_sbpf_v1_execution, + remaining_compute_units_syscall_enabled, FeatureSet, }, solana_log_collector::{ic_logger_msg, ic_msg}, solana_poseidon as poseidon, @@ -297,8 +297,7 @@ pub fn create_program_runtime_environment_v1<'a>( reject_broken_elfs: reject_deployment_of_broken_elfs, noop_instruction_rate: 256, sanitize_user_provided_values: true, - external_internal_function_hash_collision: feature_set - .is_active(&error_on_syscall_bpf_function_hash_collisions::id()), + external_internal_function_hash_collision: true, reject_callx_r10: true, enable_sbpf_v1: !feature_set.is_active(&disable_sbpf_v1_execution::id()) || feature_set.is_active(&reenable_sbpf_v1_execution::id()), From 46f5fced6d3aab53c408c5db5734c995c21c5a95 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 27 Sep 2024 14:09:37 -0400 Subject: [PATCH 404/529] Upgrades rust to 1.81.0 (#3008) --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index a56a283d2abc1b..1de01fa45c4934 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.80.1" +channel = "1.81.0" From ce158213fd050b5ccd89bf0857cd06a4584069f6 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 27 Sep 2024 18:38:54 +0000 Subject: [PATCH 405/529] removes early return if prune_messages are empty (#3006) Even if there are no outgoing prune messages we still need to generate outgoing push messages for packets just received, so the code should not early return here: https://github.com/anza-xyz/agave/blob/d2cc71f0d/gossip/src/cluster_info.rs#L2400-L2402 --- gossip/src/cluster_info.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index f2e93765560ece..f0916970c9cae6 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2397,9 +2397,6 @@ impl ClusterInfo { .collect() }) }; - if prune_messages.is_empty() { - return; - } let mut packet_batch = PacketBatch::new_unpinned_with_recycler_data_and_dests( recycler, "handle_batch_push_messages", @@ -2429,7 +2426,9 @@ impl ClusterInfo { self.stats .packets_sent_push_messages_count .add_relaxed((packet_batch.len() - num_prune_packets) as u64); - let _ = response_sender.send(packet_batch); + if !packet_batch.is_empty() { + let _ = response_sender.send(packet_batch); + } } fn require_stake_for_gossip(&self, stakes: &HashMap) -> bool { From cc141d159b285d0968757455b350c7681ec393ba Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 27 Sep 2024 15:17:44 -0400 Subject: [PATCH 406/529] Removes AccountStorageEntry::approx_store_count (#2953) --- accounts-db/src/accounts_db.rs | 43 ++++------------------------------ 1 file changed, 5 insertions(+), 38 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 162ce25cede85e..3fc2d01af44f82 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1082,14 +1082,6 @@ pub struct AccountStorageEntry { /// the append_vec, once maxed out, then emptied, can be reclaimed count_and_status: SeqLock<(usize, AccountStorageStatus)>, - /// This is the total number of accounts stored ever since initialized to keep - /// track of lifetime count of all store operations. And this differs from - /// count_and_status in that this field won't be decremented. - /// - /// This is used as a rough estimate for slot shrinking. As such a relaxed - /// use case, this value ARE NOT strictly synchronized with count_and_status! - approx_store_count: AtomicUsize, - alive_bytes: AtomicUsize, } @@ -1110,7 +1102,6 @@ impl AccountStorageEntry { slot, accounts, count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), - approx_store_count: AtomicUsize::new(0), alive_bytes: AtomicUsize::new(0), } } @@ -1127,7 +1118,6 @@ impl AccountStorageEntry { id: self.id, slot: self.slot, count_and_status: SeqLock::new(*count_and_status), - approx_store_count: AtomicUsize::new(self.approx_stored_count()), alive_bytes: AtomicUsize::new(self.alive_bytes()), accounts, }) @@ -1137,14 +1127,13 @@ impl AccountStorageEntry { slot: Slot, id: AccountsFileId, accounts: AccountsFile, - num_accounts: usize, + _num_accounts: usize, ) -> Self { Self { id, slot, accounts, count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), - approx_store_count: AtomicUsize::new(num_accounts), alive_bytes: AtomicUsize::new(0), } } @@ -1178,10 +1167,6 @@ impl AccountStorageEntry { self.count_and_status.read().0 } - pub fn approx_stored_count(&self) -> usize { - self.approx_store_count.load(Ordering::Relaxed) - } - pub fn alive_bytes(&self) -> usize { self.alive_bytes.load(Ordering::Acquire) } @@ -1217,8 +1202,6 @@ impl AccountStorageEntry { fn add_accounts(&self, num_accounts: usize, num_bytes: usize) { let mut count_and_status = self.count_and_status.lock_write(); *count_and_status = (count_and_status.0 + num_accounts, count_and_status.1); - self.approx_store_count - .fetch_add(num_accounts, Ordering::Relaxed); self.alive_bytes.fetch_add(num_bytes, Ordering::Release); } @@ -4385,7 +4368,7 @@ impl AccountsDb { let shrink_collect = self.shrink_collect::>(store, &unique_accounts, &self.shrink_stats); - // This shouldn't happen if alive_bytes/approx_stored_count are accurate. + // This shouldn't happen if alive_bytes is accurate. // However, it is possible that the remaining alive bytes could be 0. In that case, the whole slot should be marked dead by clean. if Self::should_not_shrink( shrink_collect.alive_total_bytes as u64, @@ -8040,16 +8023,14 @@ impl AccountsDb { fn is_shrinking_productive(store: &AccountStorageEntry) -> bool { let alive_count = store.count(); - let stored_count = store.approx_stored_count(); let alive_bytes = store.alive_bytes() as u64; let total_bytes = store.capacity(); if Self::should_not_shrink(alive_bytes, total_bytes) { trace!( - "shrink_slot_forced ({}): not able to shrink at all: alive/stored: {}/{} ({}b / {}b) save: {}", + "shrink_slot_forced ({}): not able to shrink at all: num alive: {}, bytes alive: {}, bytes total: {}, bytes saved: {}", store.slot(), alive_count, - stored_count, alive_bytes, total_bytes, total_bytes.saturating_sub(alive_bytes), @@ -9378,12 +9359,6 @@ impl AccountsDb { store .alive_bytes .store(entry.stored_size, Ordering::Release); - assert!( - store.approx_stored_count() >= entry.count, - "{}, {}", - store.approx_stored_count(), - entry.count - ); } else { trace!("id: {} clearing count", id); store.count_and_status.lock_write().0 = 0; @@ -9424,11 +9399,10 @@ impl AccountsDb { for slot in &slots { let entry = self.storage.get_slot_storage_entry(*slot).unwrap(); info!( - " slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {}", + " slot: {} id: {} count_and_status: {:?} len: {} capacity: {}", slot, entry.id(), entry.count_and_status.read(), - entry.approx_store_count.load(Ordering::Relaxed), entry.accounts.len(), entry.accounts.capacity(), ); @@ -9653,10 +9627,7 @@ impl AccountsDb { pub fn all_account_count_in_accounts_file(&self, slot: Slot) -> usize { let store = self.storage.get_slot_storage_entry(slot); if let Some(store) = store { - let count = store.accounts_count(); - let stored_count = store.approx_stored_count(); - assert_eq!(stored_count, count); - count + store.accounts_count() } else { 0 } @@ -9921,10 +9892,6 @@ pub mod tests { // construct append vec with account to generate an index from append_vec.accounts.append_accounts(&storable_accounts, 0); - // append vecs set this at load - append_vec - .approx_store_count - .store(data.len(), Ordering::Relaxed); let genesis_config = GenesisConfig::default(); assert!(!db.accounts_index.contains(&pubkey)); From 690fad08d47bb6aff66dce3ce1c5ec8d92950b63 Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 27 Sep 2024 15:19:38 -0400 Subject: [PATCH 407/529] Supports deserializing accounts lt hash in snapshots (#2994) --- Cargo.lock | 2 ++ accounts-db/src/accounts_hash.rs | 4 ++++ programs/sbf/Cargo.lock | 2 ++ runtime/Cargo.toml | 2 ++ runtime/src/serde_snapshot.rs | 6 ++++++ runtime/src/serde_snapshot/types.rs | 22 ++++++++++++++++++++++ 6 files changed, 38 insertions(+) create mode 100644 runtime/src/serde_snapshot/types.rs diff --git a/Cargo.lock b/Cargo.lock index f252db71e025b5..3055831aaf7cf4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7662,6 +7662,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_with", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program", @@ -7675,6 +7676,7 @@ dependencies = [ "solana-frozen-abi", "solana-frozen-abi-macro", "solana-inline-spl", + "solana-lattice-hash", "solana-loader-v4-program", "solana-logger", "solana-measure", diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 4dddbded4d219e..3e70f3d1fa0edb 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1265,6 +1265,10 @@ pub struct AccountLtHash(pub LtHash); pub const ZERO_LAMPORT_ACCOUNT_LT_HASH: AccountLtHash = AccountLtHash(LtHash([0; LtHash::NUM_ELEMENTS])); +/// Lattice hash of all accounts +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct AccountsLtHash(pub LtHash); + /// Hash of accounts #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum AccountsHashKind { diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c78af59223b460..626f2624711289 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5998,6 +5998,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_with", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program", @@ -6009,6 +6010,7 @@ dependencies = [ "solana-feature-set", "solana-fee", "solana-inline-spl", + "solana-lattice-hash", "solana-loader-v4-program", "solana-measure", "solana-metrics", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 849d583b01aa3c..014995d592b612 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -46,6 +46,7 @@ regex = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } serde_json = { workspace = true } +serde_with = { workspace = true } solana-accounts-db = { workspace = true } solana-address-lookup-table-program = { workspace = true } solana-bpf-loader-program = { workspace = true } @@ -63,6 +64,7 @@ solana-frozen-abi-macro = { workspace = true, optional = true, features = [ "frozen-abi", ] } solana-inline-spl = { workspace = true } +solana-lattice-hash = { workspace = true } solana-loader-v4-program = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 7dd6e7bf7f21e8..1494413bcfb02e 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -57,10 +57,12 @@ use { thread::Builder, }, storage::SerializableStorage, + types::SerdeAccountsLtHash, }; mod storage; mod tests; +mod types; mod utils; pub(crate) use { @@ -400,6 +402,9 @@ struct ExtraFieldsToDeserialize { epoch_accounts_hash: Option, #[serde(deserialize_with = "default_on_eof")] versioned_epoch_stakes: HashMap, + #[serde(deserialize_with = "default_on_eof")] + #[allow(dead_code)] + accounts_lt_hash: Option, } /// Extra fields that are serialized at the end of snapshots. @@ -441,6 +446,7 @@ where incremental_snapshot_persistence, epoch_accounts_hash, versioned_epoch_stakes, + accounts_lt_hash: _, } = extra_fields; bank_fields.fee_rate_governor = bank_fields diff --git a/runtime/src/serde_snapshot/types.rs b/runtime/src/serde_snapshot/types.rs new file mode 100644 index 00000000000000..6dd9ef099cabaf --- /dev/null +++ b/runtime/src/serde_snapshot/types.rs @@ -0,0 +1,22 @@ +use {solana_accounts_db::accounts_hash::AccountsLtHash, solana_lattice_hash::lt_hash::LtHash}; + +/// Snapshot serde-safe AccountsLtHash +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[serde_with::serde_as] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct SerdeAccountsLtHash( + // serde only has array support up to 32 elements; anything larger needs to be handled manually + // see https://github.com/serde-rs/serde/issues/1937 for more information + #[serde_as(as = "[_; LtHash::NUM_ELEMENTS]")] pub [u16; LtHash::NUM_ELEMENTS], +); + +impl From for AccountsLtHash { + fn from(accounts_lt_hash: SerdeAccountsLtHash) -> Self { + Self(LtHash(accounts_lt_hash.0)) + } +} +impl From for SerdeAccountsLtHash { + fn from(accounts_lt_hash: AccountsLtHash) -> Self { + Self(accounts_lt_hash.0 .0) + } +} From 9f822c2f2c8d87bb2575df9df8e8949485a8b3b5 Mon Sep 17 00:00:00 2001 From: Brennan Date: Fri, 27 Sep 2024 12:21:14 -0700 Subject: [PATCH 408/529] increase writeable accounts cost hashmap size (#3009) --- cost-model/src/cost_tracker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 55d905047990a5..8caaa2ef3168cc 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -12,7 +12,7 @@ use { std::{cmp::Ordering, collections::HashMap}, }; -const WRITABLE_ACCOUNTS_PER_BLOCK: usize = 512; +const WRITABLE_ACCOUNTS_PER_BLOCK: usize = 4096; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum CostTrackerError { From bce28c028219a6f7f361834f65383ad0181811f3 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 27 Sep 2024 20:28:42 +0000 Subject: [PATCH 409/529] excludes node's pubkey from bloom filter of pruned origins (#2990) Bloom filter of pruned origins can return false positive for a node's own pubkey but a node should always be able to push its own values to other nodes in the cluster. --- gossip/src/push_active_set.rs | 44 ++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/gossip/src/push_active_set.rs b/gossip/src/push_active_set.rs index 1e7e3cbb22844c..83f84b8a0624ed 100644 --- a/gossip/src/push_active_set.rs +++ b/gossip/src/push_active_set.rs @@ -29,14 +29,15 @@ impl PushActiveSet { pub(crate) fn get_nodes<'a>( &'a self, - pubkey: &Pubkey, // This node. + pubkey: &'a Pubkey, // This node. origin: &'a Pubkey, // CRDS value owner. // If true forces gossip push even if the node has pruned the origin. should_force_push: impl FnMut(&Pubkey) -> bool + 'a, stakes: &HashMap, ) -> impl Iterator + 'a { let stake = stakes.get(pubkey).min(stakes.get(origin)); - self.get_entry(stake).get_nodes(origin, should_force_push) + self.get_entry(stake) + .get_nodes(pubkey, origin, should_force_push) } // Prunes origins for the given gossip node. @@ -110,14 +111,20 @@ impl PushActiveSetEntry { fn get_nodes<'a>( &'a self, - origin: &'a Pubkey, + pubkey: &'a Pubkey, // This node. + origin: &'a Pubkey, // CRDS value owner. // If true forces gossip push even if the node has pruned the origin. mut should_force_push: impl FnMut(&Pubkey) -> bool + 'a, ) -> impl Iterator + 'a { + let pubkey_eq_origin = pubkey == origin; self.0 .iter() .filter(move |(node, bloom_filter)| { - !bloom_filter.contains(origin) || should_force_push(node) + // Bloom filter can return false positive for origin == pubkey + // but a node should always be able to push its own values. + !bloom_filter.contains(origin) + || (pubkey_eq_origin && &pubkey != node) + || should_force_push(node) }) .map(|(node, _bloom_filter)| node) } @@ -175,7 +182,10 @@ fn get_stake_bucket(stake: Option<&u64>) -> usize { #[cfg(test)] mod tests { - use {super::*, rand::SeedableRng, rand_chacha::ChaChaRng, std::iter::repeat_with}; + use { + super::*, itertools::iproduct, rand::SeedableRng, rand_chacha::ChaChaRng, + std::iter::repeat_with, + }; #[test] fn test_get_stake_bucket() { @@ -274,13 +284,13 @@ mod tests { assert_eq!(entry.0.len(), 5); let keys = [&nodes[16], &nodes[11], &nodes[17], &nodes[14], &nodes[5]]; assert!(entry.0.keys().eq(keys)); - for origin in &nodes { + for (pubkey, origin) in iproduct!(&nodes, &nodes) { if !keys.contains(&origin) { - assert!(entry.get_nodes(origin, |_| false).eq(keys)); + assert!(entry.get_nodes(pubkey, origin, |_| false).eq(keys)); } else { - assert!(entry.get_nodes(origin, |_| true).eq(keys)); + assert!(entry.get_nodes(pubkey, origin, |_| true).eq(keys)); assert!(entry - .get_nodes(origin, |_| false) + .get_nodes(pubkey, origin, |_| false) .eq(keys.into_iter().filter(|&key| key != origin))); } } @@ -288,10 +298,10 @@ mod tests { for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } - for origin in keys { - assert!(entry.get_nodes(origin, |_| true).eq(keys)); + for (pubkey, origin) in iproduct!(&nodes, keys) { + assert!(entry.get_nodes(pubkey, origin, |_| true).eq(keys)); assert!(entry - .get_nodes(origin, |_| false) + .get_nodes(pubkey, origin, |_| false) .eq(keys.into_iter().filter(|&node| node != origin))); } // Assert that prune excludes node from get. @@ -299,10 +309,12 @@ mod tests { entry.prune(&nodes[11], origin); entry.prune(&nodes[14], origin); entry.prune(&nodes[19], origin); - assert!(entry.get_nodes(origin, |_| true).eq(keys)); - assert!(entry.get_nodes(origin, |_| false).eq(keys - .into_iter() - .filter(|&&node| node != nodes[11] && node != nodes[14]))); + for pubkey in &nodes { + assert!(entry.get_nodes(pubkey, origin, |_| true).eq(keys)); + assert!(entry.get_nodes(pubkey, origin, |_| false).eq(keys + .into_iter() + .filter(|&&node| pubkey == origin || (node != nodes[11] && node != nodes[14])))); + } // Assert that rotate adds new nodes. entry.rotate(&mut rng, 5, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [&nodes[11], &nodes[17], &nodes[14], &nodes[5], &nodes[7]]; From 9c2098450ca7e5271e3690277992fbc910be27d0 Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Fri, 27 Sep 2024 22:03:45 -0700 Subject: [PATCH 410/529] runtime: simplify account saver filter (#2992) --- runtime/src/account_saver.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/runtime/src/account_saver.rs b/runtime/src/account_saver.rs index 941e4934175af3..4d7be1150b89d1 100644 --- a/runtime/src/account_saver.rs +++ b/runtime/src/account_saver.rs @@ -110,18 +110,19 @@ fn collect_accounts_for_successful_tx<'a, T: SVMMessage>( transaction_ref: Option<&'a SanitizedTransaction>, transaction_accounts: &'a [TransactionAccount], ) { - for (_, (address, account)) in (0..transaction.account_keys().len()) - .zip(transaction_accounts) - .filter(|(i, _)| { - transaction.is_writable(*i) && { - // Accounts that are invoked and also not passed as an instruction - // account to a program don't need to be stored because it's assumed - // to be impossible for a committable transaction to modify an - // invoked account if said account isn't passed to some program. - !transaction.is_invoked(*i) || transaction.is_instruction_account(*i) - } - }) - { + for (i, (address, account)) in (0..transaction.account_keys().len()).zip(transaction_accounts) { + if !transaction.is_writable(i) { + continue; + } + + // Accounts that are invoked and also not passed as an instruction + // account to a program don't need to be stored because it's assumed + // to be impossible for a committable transaction to modify an + // invoked account if said account isn't passed to some program. + if transaction.is_invoked(i) && !transaction.is_instruction_account(i) { + continue; + } + collected_accounts.push((address, account)); if let Some(collected_account_transactions) = collected_account_transactions { collected_account_transactions From e490ae9f7ff9ae813e76386cdb9ca20504ebb8c0 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 30 Sep 2024 08:01:00 -0500 Subject: [PATCH 411/529] RuntimeTransactions: implement SVM traits (#2987) --- .../src/runtime_transaction.rs | 73 ++++++++++++++++++- 1 file changed, 71 insertions(+), 2 deletions(-) diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index 24bb512b203a0d..c8a15b6fbc7b7a 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -20,12 +20,16 @@ use { solana_sdk::{ feature_set::FeatureSet, hash::Hash, - message::{AddressLoader, TransactionSignatureDetails}, + message::{AccountKeys, AddressLoader, TransactionSignatureDetails}, pubkey::Pubkey, + signature::Signature, simple_vote_transaction_checker::is_simple_vote_transaction, transaction::{Result, SanitizedTransaction, SanitizedVersionedTransaction}, }, - solana_svm_transaction::instruction::SVMInstruction, + solana_svm_transaction::{ + instruction::SVMInstruction, message_address_table_lookup::SVMMessageAddressTableLookup, + svm_message::SVMMessage, svm_transaction::SVMTransaction, + }, std::collections::HashSet, }; @@ -153,6 +157,71 @@ impl RuntimeTransaction { } } +impl SVMMessage for RuntimeTransaction { + // override to access from the cached meta instead of re-calculating + fn num_total_signatures(&self) -> u64 { + self.meta.signature_details.total_signatures() + } + + fn num_write_locks(&self) -> u64 { + self.transaction.num_write_locks() + } + + fn recent_blockhash(&self) -> &Hash { + self.transaction.recent_blockhash() + } + + fn num_instructions(&self) -> usize { + self.transaction.num_instructions() + } + + fn instructions_iter(&self) -> impl Iterator { + self.transaction.instructions_iter() + } + + fn program_instructions_iter(&self) -> impl Iterator { + self.transaction.program_instructions_iter() + } + + fn account_keys(&self) -> AccountKeys { + self.transaction.account_keys() + } + + fn fee_payer(&self) -> &Pubkey { + self.transaction.fee_payer() + } + + fn is_writable(&self, index: usize) -> bool { + self.transaction.is_writable(index) + } + + fn is_signer(&self, index: usize) -> bool { + self.transaction.is_signer(index) + } + + fn is_invoked(&self, key_index: usize) -> bool { + self.transaction.is_invoked(key_index) + } + + fn num_lookup_tables(&self) -> usize { + self.transaction.num_lookup_tables() + } + + fn message_address_table_lookups(&self) -> impl Iterator { + self.transaction.message_address_table_lookups() + } +} + +impl SVMTransaction for RuntimeTransaction { + fn signature(&self) -> &Signature { + self.transaction.signature() + } + + fn signatures(&self) -> &[Signature] { + self.transaction.signatures() + } +} + #[cfg(test)] mod tests { use { From ab0977fb944c4ed84c88f4ec84febadd3513ea38 Mon Sep 17 00:00:00 2001 From: Ryo Onodera Date: Mon, 30 Sep 2024 22:21:22 +0900 Subject: [PATCH 412/529] Fix frozen_abi for not-Bytes serde_with::serde_as (#3018) --- Cargo.lock | 1 + frozen-abi/Cargo.toml | 2 ++ frozen-abi/src/abi_digester.rs | 30 ++++++++++++++++++++++++++++++ frozen-abi/src/abi_example.rs | 8 ++++++++ 4 files changed, 41 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 3055831aaf7cf4..b257ddd582124e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6617,6 +6617,7 @@ dependencies = [ "serde", "serde_bytes", "serde_derive", + "serde_with", "sha2 0.10.8", "solana-frozen-abi-macro", "solana-logger", diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 78f0c6a67e5f83..b85de97dee3b0f 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -15,6 +15,7 @@ bv = { workspace = true, features = ["serde"] } log = { workspace = true, features = ["std"] } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } +serde_with = { workspace = true } sha2 = { workspace = true } solana-frozen-abi-macro = { workspace = true } thiserror = { workspace = true } @@ -28,6 +29,7 @@ memmap2 = { workspace = true } bitflags = { workspace = true, features = ["serde"] } serde_bytes = { workspace = true } solana-logger = { workspace = true } +serde_with = { workspace = true, features = ["macros"] } [features] default = [] diff --git a/frozen-abi/src/abi_digester.rs b/frozen-abi/src/abi_digester.rs index 9d2ee5f296d470..c13fdd45022276 100644 --- a/frozen-abi/src/abi_digester.rs +++ b/frozen-abi/src/abi_digester.rs @@ -684,6 +684,36 @@ mod tests { } } + mod serde_with_abi { + use serde_with::{serde_as, Bytes}; + + // This is a minimized testcase based on solana_sdk::packet::Packet + #[serde_as] + #[derive(serde_derive::Serialize, AbiExample)] + #[frozen_abi(digest = "DcR9EB87D4uQBjUrsendvcFgS5KSF7okjnxGx8ZaDE8Z")] + struct U8ArrayWithBytes { + #[serde_as(as = "Bytes")] + foo: [u8; 42], + } + + #[serde_as] + #[derive(serde_derive::Serialize, AbiExample)] + #[frozen_abi(digest = "CVqaXh4pWCiUyAuZ6dZPCmbCEtJyNH3e6uwUpJzymT6b")] + struct U8ArrayWithGenericAs { + #[serde_as(as = "[_; 42]")] + foo: [u8; 42], + } + + // This is a minimized testcase based on solana_lattice_hash::lt_hash::LtHash + #[serde_as] + #[derive(serde_derive::Serialize, AbiExample)] + #[frozen_abi(digest = "A1J57qgtrhpqk6vD4tjV1CHLPagacBKsXJBBUB5mdp5W")] + struct NotU8ArrayWithGenericAs { + #[serde_as(as = "[_; 42]")] + bar: [u16; 42], + } + } + mod skip_should_be_same { #[frozen_abi(digest = "4LbuvQLX78XPbm4hqqZcHFHpseDJcw4qZL9EUZXSi2Ss")] #[derive(serde_derive::Serialize, AbiExample)] diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index d9187d16c04333..b20a38e0eeb68c 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -245,6 +245,14 @@ impl EvenAsOpaque for BitVec { const TYPE_NAME_MATCHER: &'static str = "bv::bit_vec::inner::"; } +use serde_with::ser::SerializeAsWrap; +impl<'a, T: ?Sized, U: ?Sized> TransparentAsHelper for SerializeAsWrap<'a, T, U> {} +// This (EvenAsOpaque) marker trait is needed for serde_with's serde_as(...) because this struct is +// basically a wrapper struct. +impl<'a, T: ?Sized, U: ?Sized> EvenAsOpaque for SerializeAsWrap<'a, T, U> { + const TYPE_NAME_MATCHER: &'static str = "serde_with::ser::SerializeAsWrap<"; +} + pub(crate) fn normalize_type_name(type_name: &str) -> String { type_name.chars().filter(|c| *c != '&').collect() } From c7283625a9efa64377597e7e098a0f9965ff259a Mon Sep 17 00:00:00 2001 From: Lucas Ste <38472950+LucasSte@users.noreply.github.com> Date: Mon, 30 Sep 2024 22:36:03 +0800 Subject: [PATCH 413/529] Use checked math for `_new_from_parent` (#2859) * Use checked math * Use checked operations --- runtime/src/bank.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7a9a900bd8e019..77661a4bc43cf2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1217,8 +1217,15 @@ impl Bank { epoch_schedule, collected_rent: AtomicU64::new(0), rent_collector: Self::get_rent_collector_from(&parent.rent_collector, epoch), - max_tick_height: (slot + 1) * parent.ticks_per_slot, - block_height: parent.block_height + 1, + max_tick_height: slot + .checked_add(1) + .expect("max tick height addition overflowed") + .checked_mul(parent.ticks_per_slot) + .expect("max tick height multiplication overflowed"), + block_height: parent + .block_height + .checked_add(1) + .expect("block height addition overflowed"), fee_rate_governor, capitalization: AtomicU64::new(parent.capitalization()), vote_only_bank, From 0b44eb62dbe10ad562faa4b3dc5c69a6d58a39bd Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 30 Sep 2024 11:42:15 -0500 Subject: [PATCH 414/529] runtime-transaction no gate traits (#2986) --- runtime-transaction/src/runtime_transaction.rs | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index c8a15b6fbc7b7a..14c13138025b31 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -41,19 +41,7 @@ pub struct RuntimeTransaction { meta: TransactionMeta, } -// These traits gate access to static and dynamic metadata -// so that only transactions with supporting message types -// can access them. -trait StaticMetaAccess {} -trait DynamicMetaAccess: StaticMetaAccess {} - -// Implement the gate traits for the message types that should -// have access to the static and dynamic metadata. -impl StaticMetaAccess for SanitizedVersionedTransaction {} -impl StaticMetaAccess for SanitizedTransaction {} -impl DynamicMetaAccess for SanitizedTransaction {} - -impl StaticMeta for RuntimeTransaction { +impl StaticMeta for RuntimeTransaction { fn message_hash(&self) -> &Hash { &self.meta.message_hash } @@ -70,7 +58,7 @@ impl StaticMeta for RuntimeTransaction { } } -impl DynamicMeta for RuntimeTransaction {} +impl DynamicMeta for RuntimeTransaction {} impl Deref for RuntimeTransaction { type Target = T; From 489f483e1d7b30ef114e0123994818b2accfa389 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Mon, 30 Sep 2024 17:37:39 +0000 Subject: [PATCH 415/529] reworks max number of outgoing push messages (#3016) max_bytes for outgoing push messages is pretty outdated and does not allow gossip to function properly with current testnet cluster size. In particular it does not allow to clear out queue of pending push messages unless the new_push_messages function is called very frequently which involves repeatedly locking/unlocking CRDS table. Additionally leaving gossip entries in the queue for the next round will add delay to propagating push messages which can compound as messages go through several hops. --- gossip/src/crds_gossip_push.rs | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index f525baa051dea6..49f04f6cd30d3a 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -21,10 +21,8 @@ use { push_active_set::PushActiveSet, received_cache::ReceivedCache, }, - bincode::serialized_size, itertools::Itertools, solana_sdk::{ - packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::{Keypair, Signer}, timing::timestamp, @@ -53,8 +51,6 @@ const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2; const CRDS_GOSSIP_PUSH_ACTIVE_SET_SIZE: usize = CRDS_GOSSIP_PUSH_FANOUT + 3; pub struct CrdsGossipPush { - /// Max bytes per message - max_bytes: usize, /// Active set of validators for push active_set: RwLock, /// Cursor into the crds table for values to push. @@ -74,8 +70,6 @@ pub struct CrdsGossipPush { impl Default for CrdsGossipPush { fn default() -> Self { Self { - // Allow upto 64 Crds Values per PUSH - max_bytes: PACKET_DATA_SIZE * 64, active_set: RwLock::default(), crds_cursor: Mutex::default(), received_cache: Mutex::new(ReceivedCache::new(2 * CRDS_UNIQUE_PUBKEY_CAPACITY)), @@ -180,10 +174,10 @@ impl CrdsGossipPush { usize, // number of values usize, // number of push messages ) { + const MAX_NUM_PUSHES: usize = 1 << 12; let active_set = self.active_set.read().unwrap(); let mut num_pushes = 0; let mut num_values = 0; - let mut total_bytes: usize = 0; let mut push_messages: HashMap> = HashMap::new(); let wallclock_window = self.wallclock_window(now); let mut crds_cursor = self.crds_cursor.lock().unwrap(); @@ -193,12 +187,7 @@ impl CrdsGossipPush { .get_entries(crds_cursor.deref_mut()) .map(|entry| &entry.value) .filter(|value| wallclock_window.contains(&value.wallclock())); - for value in entries { - let serialized_size = serialized_size(&value).unwrap(); - total_bytes = total_bytes.saturating_add(serialized_size as usize); - if total_bytes > self.max_bytes { - break; - } + 'outer: for value in entries { num_values += 1; let origin = value.pubkey(); let nodes = active_set.get_nodes( @@ -210,6 +199,9 @@ impl CrdsGossipPush { for node in nodes.take(self.push_fanout) { push_messages.entry(*node).or_default().push(value.clone()); num_pushes += 1; + if num_pushes >= MAX_NUM_PUSHES { + break 'outer; + } } } drop(crds); From 6f26b6532084c4a3e50252f35f746a1537b1a025 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 1 Oct 2024 20:06:56 +0400 Subject: [PATCH 416/529] add #![no_std] to solana-clock (#3023) --- sdk/clock/src/lib.rs | 1 + sdk/macro/src/lib.rs | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/sdk/clock/src/lib.rs b/sdk/clock/src/lib.rs index 870265f9c18eb9..d8006a9ec408f3 100644 --- a/sdk/clock/src/lib.rs +++ b/sdk/clock/src/lib.rs @@ -19,6 +19,7 @@ //! validator set][oracle]. //! //! [oracle]: https://docs.solanalabs.com/implemented-proposals/validator-timestamp-oracle +#![no_std] #[cfg(feature = "serde")] use serde_derive::{Deserialize, Serialize}; diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index df56e1e71ffc38..e3380712ef9d1a 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -319,7 +319,7 @@ pub fn derive_clone_zeroed(input: proc_macro::TokenStream) -> proc_macro::TokenS syn::Fields::Named(ref fields) => fields.named.iter().map(|f| { let name = &f.ident; quote! { - std::ptr::addr_of_mut!((*ptr).#name).write(self.#name); + core::ptr::addr_of_mut!((*ptr).#name).write(self.#name); } }), _ => unimplemented!(), @@ -332,9 +332,9 @@ pub fn derive_clone_zeroed(input: proc_macro::TokenStream) -> proc_macro::TokenS // This is not the case here, and intentionally so because we want to // guarantee zeroed padding. fn clone(&self) -> Self { - let mut value = std::mem::MaybeUninit::::uninit(); + let mut value = core::mem::MaybeUninit::::uninit(); unsafe { - std::ptr::write_bytes(&mut value, 0, 1); + core::ptr::write_bytes(&mut value, 0, 1); let ptr = value.as_mut_ptr(); #(#clone_statements)* value.assume_init() From 4ac0d578dac3a73ae6702a0b4d2f5ea6d03b6e41 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 1 Oct 2024 22:47:23 +0400 Subject: [PATCH 417/529] remove unused deps from solana-measure (#3024) --- Cargo.lock | 4 ---- measure/Cargo.toml | 4 ---- programs/sbf/Cargo.lock | 4 ---- 3 files changed, 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b257ddd582124e..65d7ce94aaee19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6990,10 +6990,6 @@ dependencies = [ [[package]] name = "solana-measure" version = "2.1.0" -dependencies = [ - "log", - "solana-sdk", -] [[package]] name = "solana-memory-management" diff --git a/measure/Cargo.toml b/measure/Cargo.toml index a35dbe4579a6a7..2cad7a2270278e 100644 --- a/measure/Cargo.toml +++ b/measure/Cargo.toml @@ -10,9 +10,5 @@ homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -[dependencies] -log = { workspace = true } -solana-sdk = { workspace = true } - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 626f2624711289..2b069250d219a5 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5497,10 +5497,6 @@ dependencies = [ [[package]] name = "solana-measure" version = "2.1.0" -dependencies = [ - "log", - "solana-sdk", -] [[package]] name = "solana-merkle-tree" From d940e77ec4ab780339e56157d2a11c6d80c0b3a7 Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 1 Oct 2024 17:01:12 -0400 Subject: [PATCH 418/529] Adds meas_dur!() macro (#3043) --- measure/src/macros.rs | 60 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 5 deletions(-) diff --git a/measure/src/macros.rs b/measure/src/macros.rs index f9ec0702db4d21..594929bf0ec64a 100644 --- a/measure/src/macros.rs +++ b/measure/src/macros.rs @@ -6,13 +6,15 @@ /// /// Use `measure_us!()` when you want to measure an expression in microseconds. /// +/// Use `meas_dur!()` when you want to measure an expression and get the Duration. +/// /// [`Measure`]: crate::measure::Measure /// /// # Examples /// /// ``` /// // Measure functions -/// # use solana_measure::{measure_time, measure_us}; +/// # use solana_measure::{measure_time, measure_us, meas_dur}; /// # fn foo() {} /// # fn bar(x: i32) {} /// # fn add(x: i32, y: i32) -> i32 {x + y} @@ -20,12 +22,13 @@ /// let (result, measure) = measure_time!(bar(42), "bar takes one parameter"); /// let (result, measure) = measure_time!(add(1, 2), "add takes two parameters and returns a value"); /// let (result, measure_us) = measure_us!(add(1, 2)); +/// let (result, duration) = meas_dur!(add(1, 2)); /// # assert_eq!(result, 1 + 2); /// ``` /// /// ``` /// // Measure methods -/// # use solana_measure::{measure_time, measure_us}; +/// # use solana_measure::{measure_time, measure_us, meas_dur}; /// # struct Foo { /// # f: i32, /// # } @@ -37,6 +40,7 @@ /// let foo = Foo { f: 42 }; /// let (result, measure) = measure_time!(foo.frobnicate(2), "measure methods"); /// let (result, measure_us) = measure_us!(foo.frobnicate(2)); +/// let (result, duration) = meas_dur!(foo.frobnicate(2)); /// # assert_eq!(result, 42 * 2); /// ``` /// @@ -82,10 +86,31 @@ macro_rules! measure_time { #[macro_export] macro_rules! measure_us { - ($val:expr) => {{ + ($expr:expr) => {{ + let (result, duration) = $crate::meas_dur!($expr); + (result, duration.as_micros() as u64) + }}; +} + +/// Measures how long it takes to execute an expression, and returns a Duration +/// +/// # Examples +/// +/// ``` +/// # use solana_measure::meas_dur; +/// # fn meow(x: i32, y: i32) -> i32 {x + y} +/// let (result, duration) = meas_dur!(meow(1, 2) + 3); +/// # assert_eq!(result, 1 + 2 + 3); +/// ``` +// +// The macro name, `meas_dur`, is "measure" + "duration". +// When said aloud, the pronunciation is close to "measure". +#[macro_export] +macro_rules! meas_dur { + ($expr:expr) => {{ let start = std::time::Instant::now(); - let result = $val; - (result, start.elapsed().as_micros() as u64) + let result = $expr; + (result, start.elapsed()) }}; } @@ -185,4 +210,29 @@ mod tests { assert_eq!(result, 3); } } + + #[test] + fn test_meas_dur_macro() { + // Ensure that the macro can be called with functions + { + let (result, _duration) = meas_dur!(my_multiply(3, 4)); + assert_eq!(result, 3 * 4); + + let (result, _duration) = meas_dur!(square(5)); + assert_eq!(result, 5 * 5) + } + + // Ensure that the macro can be called with methods + { + let some_struct = SomeStruct { x: 42 }; + let (result, _duration) = meas_dur!(some_struct.add_to(4)); + assert_eq!(result, 42 + 4); + } + + // Ensure that the macro can be called with blocks + { + let (result, _duration) = meas_dur!({ 1 + 2 }); + assert_eq!(result, 3); + } + } } From 18059246e5e845855a2d4f8f0441a4fabd101ca6 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Tue, 1 Oct 2024 16:25:50 -0500 Subject: [PATCH 419/529] clean up shrink candidate stat (#3037) rework shrink select stat Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 79 +++++++++++++--------------------- 1 file changed, 31 insertions(+), 48 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 3fc2d01af44f82..1d507ccb88a889 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4576,17 +4576,13 @@ impl AccountsDb { alive_ratio: f64, store: Arc, } - let mut measure = Measure::start("select_top_sparse_storage_entries-ms"); let mut store_usage: Vec = Vec::with_capacity(shrink_slots.len()); let mut total_alive_bytes: u64 = 0; - let mut candidates_count: usize = 0; let mut total_bytes: u64 = 0; - let mut total_candidate_stores: usize = 0; for slot in shrink_slots { let Some(store) = self.storage.get_slot_storage_entry(*slot) else { continue; }; - candidates_count += 1; let alive_bytes = store.alive_bytes(); total_alive_bytes += alive_bytes as u64; total_bytes += store.capacity(); @@ -4596,7 +4592,6 @@ impl AccountsDb { alive_ratio, store: store.clone(), }); - total_candidate_stores += 1; } store_usage.sort_by(|a, b| { a.alive_ratio @@ -4633,20 +4628,6 @@ impl AccountsDb { shrink_slots.insert(usage.slot, Arc::clone(store)); } } - measure.stop(); - inc_new_counter_debug!( - "shrink_select_top_sparse_storage_entries-ms", - measure.as_ms() as usize - ); - inc_new_counter_debug!( - "shrink_select_top_sparse_storage_entries-seeds", - candidates_count - ); - inc_new_counter_debug!( - "shrink_total_preliminary_candidate_stores", - total_candidate_stores - ); - (shrink_slots, shrink_slots_next_batch) } @@ -5050,8 +5031,8 @@ impl AccountsDb { let shrink_candidates_slots = std::mem::take(&mut *self.shrink_candidate_slots.lock().unwrap()); - - let (shrink_slots, shrink_slots_next_batch) = { + let candidates_count = shrink_candidates_slots.len(); + let ((shrink_slots, shrink_slots_next_batch), select_time_us) = measure_us!({ if let AccountShrinkThreshold::TotalSpace { shrink_ratio } = self.shrink_ratio { let (shrink_slots, shrink_slots_next_batch) = self.select_candidates_by_total_usage(&shrink_candidates_slots, shrink_ratio); @@ -5070,7 +5051,7 @@ impl AccountsDb { None, ) } - }; + }); if shrink_slots.is_empty() && shrink_slots_next_batch @@ -5084,41 +5065,43 @@ impl AccountsDb { let _guard = (!shrink_slots.is_empty()) .then_some(|| self.active_stats.activate(ActiveStatItem::Shrink)); - let mut measure_shrink_all_candidates = Measure::start("shrink_all_candidate_slots-ms"); - let num_candidates = shrink_slots.len(); - let shrink_candidates_count = shrink_slots.len(); - self.thread_pool_clean.install(|| { - shrink_slots - .into_par_iter() - .for_each(|(slot, slot_shrink_candidate)| { - if self.ancient_append_vec_offset.is_some() && slot < oldest_non_ancient_slot { - self.shrink_stats - .num_ancient_slots_shrunk - .fetch_add(1, Ordering::Relaxed); - } - let mut measure = Measure::start("shrink_candidate_slots-ms"); - self.shrink_storage(&slot_shrink_candidate); - measure.stop(); - inc_new_counter_info!("shrink_candidate_slots-ms", measure.as_ms() as usize); - }); + let num_selected = shrink_slots.len(); + let (_, shrink_all_us) = measure_us!({ + self.thread_pool_clean.install(|| { + shrink_slots + .into_par_iter() + .for_each(|(slot, slot_shrink_candidate)| { + if self.ancient_append_vec_offset.is_some() + && slot < oldest_non_ancient_slot + { + self.shrink_stats + .num_ancient_slots_shrunk + .fetch_add(1, Ordering::Relaxed); + } + self.shrink_storage(&slot_shrink_candidate); + }); + }) }); - measure_shrink_all_candidates.stop(); - inc_new_counter_info!( - "shrink_all_candidate_slots-ms", - measure_shrink_all_candidates.as_ms() as usize - ); - inc_new_counter_info!("shrink_all_candidate_slots-count", shrink_candidates_count); + let mut pended_counts: usize = 0; if let Some(shrink_slots_next_batch) = shrink_slots_next_batch { let mut shrink_slots = self.shrink_candidate_slots.lock().unwrap(); - pended_counts += shrink_slots_next_batch.len(); + pended_counts = shrink_slots_next_batch.len(); for slot in shrink_slots_next_batch { shrink_slots.insert(slot); } } - inc_new_counter_info!("shrink_pended_stores-count", pended_counts); - num_candidates + datapoint_info!( + "shrink_candidate_slots", + ("select_time_us", select_time_us, i64), + ("shrink_all_us", shrink_all_us, i64), + ("candidates_count", candidates_count, i64), + ("selected_count", num_selected, i64), + ("deferred_to_next_round_count", pended_counts, i64) + ); + + num_selected } /// This is only called at startup from bank when we are being extra careful such as when we downloaded a snapshot. From 4a1b75f8453bd6fa865d6a8c6e9c371fa294f9ed Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 2 Oct 2024 01:20:43 +0000 Subject: [PATCH 420/529] uses datagrams for repair over QUIC protocol (#2871) Incoming packets can be either: RepairProtocol RepairResponse or Shred + repair Nonce AncestorHashesResponse So, we need 3 QUIC endpoints on 3 separate sockets to correctly distinguish between these packets and send them down the right channel. 1) serve_repair_quic: The server side receives incoming RepairProtocols from the cluster and channels them to serve_repair using a Sender channel. The outgoing repair (or ancestor hashes) responses from serve_repair are sent back to the client side through a AsyncReceiver<(SocketAddr, Bytes)> channel and sent back to the remote node. 2) repair_quic: Outgoing repair requests from the repair_service are received by the client through a AsyncReceiver<(SocketAddr, Bytes)> channel and sent to serve_repair_quic socket of the remote node. Incoming repair responses (RepairResponse or Shred + repair Nonce) are channeled to shred-fetch-stage using a Sender<(Pubkey, SocketAddr, Bytes)> channel. 3) ancestor_hashes_requests_quic: Outgoing RepairProtocol::AncestorHashes from the ancestor_hashes_service are received by the client through a AsyncReceiver<(SocketAddr, Bytes)> channel and sent to serve_repair_quic socket of the remote node. Incoming AncestorHashesResponse are channeled back to ancestor_hashes_service using a Sender<(Pubkey, SocketAddr, Bytes)> channel. --- core/src/repair/ancestor_hashes_service.rs | 100 +-- core/src/repair/quic_endpoint.rs | 855 ++++++++++----------- core/src/repair/repair_service.rs | 23 +- core/src/repair/serve_repair.rs | 101 ++- core/src/repair/serve_repair_service.rs | 19 +- core/src/shred_fetch_stage.rs | 64 +- core/src/tvu.rs | 34 +- core/src/validator.rs | 98 ++- core/src/window_service.rs | 17 +- gossip/src/cluster_info.rs | 20 + net-utils/src/lib.rs | 2 +- 11 files changed, 615 insertions(+), 718 deletions(-) diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index a20794189a19ff..b36878976ef960 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -7,23 +7,22 @@ use { }, outstanding_requests::OutstandingRequests, packet_threshold::DynamicPacketToProcessThreshold, - quic_endpoint::LocalRequest, repair_service::{AncestorDuplicateSlotsSender, RepairInfo, RepairStatsGroup}, - request_response::RequestResponse, serve_repair::{ self, AncestorHashesRepairType, AncestorHashesResponse, RepairProtocol, ServeRepair, }, }, replay_stage::DUPLICATE_THRESHOLD, - shred_fetch_stage::receive_repair_quic_packets, + shred_fetch_stage::receive_quic_datagrams, }, bincode::serialize, + bytes::Bytes, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, dashmap::{mapref::entry::Entry::Occupied, DashMap}, solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol, ping_pong::Pong}, solana_ledger::blockstore::Blockstore, solana_perf::{ - packet::{deserialize_from_with_limit, Packet, PacketBatch}, + packet::{deserialize_from_with_limit, Packet, PacketBatch, PacketFlags}, recycler::Recycler, }, solana_runtime::bank::Bank, @@ -153,7 +152,8 @@ impl AncestorHashesService { exit: Arc, blockstore: Arc, ancestor_hashes_request_socket: Arc, - quic_endpoint_sender: AsyncSender, + ancestor_hashes_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, + ancestor_hashes_response_quic_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, repair_info: RepairInfo, ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver, ) -> Self { @@ -171,17 +171,17 @@ impl AncestorHashesService { Duration::from_millis(1), // coalesce false, // use_pinned_memory None, // in_vote_only_mode - false, // is_staked_service + false, // is_staked_service ); - let (quic_endpoint_response_sender, quic_endpoint_response_receiver) = unbounded(); let t_receiver_quic = { let exit = exit.clone(); Builder::new() .name(String::from("solAncHashQuic")) .spawn(|| { - receive_repair_quic_packets( - quic_endpoint_response_receiver, + receive_quic_datagrams( + ancestor_hashes_response_quic_receiver, + PacketFlags::REPAIR, response_sender, Recycler::default(), exit, @@ -210,8 +210,7 @@ impl AncestorHashesService { let t_ancestor_requests = Self::run_manage_ancestor_requests( ancestor_hashes_request_statuses, ancestor_hashes_request_socket, - quic_endpoint_sender, - quic_endpoint_response_sender, + ancestor_hashes_request_quic_sender, repair_info, outstanding_requests, exit, @@ -586,8 +585,7 @@ impl AncestorHashesService { fn run_manage_ancestor_requests( ancestor_hashes_request_statuses: Arc>, ancestor_hashes_request_socket: Arc, - quic_endpoint_sender: AsyncSender, - quic_endpoint_response_sender: Sender<(SocketAddr, Vec)>, + ancestor_hashes_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, repair_info: RepairInfo, outstanding_requests: Arc>, exit: Arc, @@ -627,8 +625,7 @@ impl AncestorHashesService { Self::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -650,8 +647,7 @@ impl AncestorHashesService { fn manage_ancestor_requests( ancestor_hashes_request_statuses: &DashMap, ancestor_hashes_request_socket: &UdpSocket, - quic_endpoint_sender: &AsyncSender, - quic_endpoint_response_sender: &Sender<(SocketAddr, Vec)>, + ancestor_hashes_request_quic_sender: &AsyncSender<(SocketAddr, Bytes)>, repair_info: &RepairInfo, outstanding_requests: &RwLock, ancestor_hashes_replay_update_receiver: &AncestorHashesReplayUpdateReceiver, @@ -750,8 +746,7 @@ impl AncestorHashesService { if Self::initiate_ancestor_hashes_requests_for_duplicate_slot( ancestor_hashes_request_statuses, ancestor_hashes_request_socket, - quic_endpoint_sender, - quic_endpoint_response_sender, + ancestor_hashes_request_quic_sender, &repair_info.cluster_slots, serve_repair, &repair_info.repair_validators, @@ -829,8 +824,7 @@ impl AncestorHashesService { fn initiate_ancestor_hashes_requests_for_duplicate_slot( ancestor_hashes_request_statuses: &DashMap, ancestor_hashes_request_socket: &UdpSocket, - quic_endpoint_sender: &AsyncSender, - quic_endpoint_response_sender: &Sender<(SocketAddr, Vec)>, + ancestor_hashes_request_quic_sender: &AsyncSender<(SocketAddr, Bytes)>, cluster_slots: &ClusterSlots, serve_repair: &ServeRepair, repair_validators: &Option>, @@ -873,16 +867,10 @@ impl AncestorHashesService { let _ = ancestor_hashes_request_socket.send_to(&request_bytes, socket_addr); } Protocol::QUIC => { - let num_expected_responses = - usize::try_from(ancestor_hashes_repair_type.num_expected_responses()) - .unwrap(); - let request = LocalRequest { - remote_address: *socket_addr, - bytes: request_bytes, - num_expected_responses, - response_sender: quic_endpoint_response_sender.clone(), - }; - if quic_endpoint_sender.blocking_send(request).is_err() { + if ancestor_hashes_request_quic_sender + .blocking_send((*socket_addr, Bytes::from(request_bytes))) + .is_err() + { // The receiver end of the channel is disconnected. break; } @@ -1316,10 +1304,12 @@ mod test { let t_packet_adapter = Builder::new() .spawn(|| adapt_repair_requests_packets(requests_receiver, remote_request_sender)) .unwrap(); + let (repair_response_quic_sender, _) = tokio::sync::mpsc::channel(/*buffer:*/ 128); let t_listen = responder_serve_repair.listen( blockstore, remote_request_receiver, response_sender, + repair_response_quic_sender, exit.clone(), ); @@ -1511,14 +1501,12 @@ mod test { repair_validators, .. } = repair_info; - let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = unbounded(); - let (quic_endpoint_sender, _quic_endpoint_sender) = + let (ancestor_hashes_request_quic_sender, _) = tokio::sync::mpsc::channel(/*buffer:*/ 128); AncestorHashesService::initiate_ancestor_hashes_requests_for_duplicate_slot( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &cluster_slots, &requester_serve_repair, &repair_validators, @@ -1568,8 +1556,7 @@ mod test { AncestorHashesService::initiate_ancestor_hashes_requests_for_duplicate_slot( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &cluster_slots, &requester_serve_repair, &repair_validators, @@ -1631,8 +1618,7 @@ mod test { AncestorHashesService::initiate_ancestor_hashes_requests_for_duplicate_slot( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &cluster_slots, &requester_serve_repair, &repair_validators, @@ -1718,15 +1704,13 @@ mod test { } = repair_info; cluster_info.insert_info(responder_node.info); bank_forks.read().unwrap().root_bank().epoch_schedule(); - let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = unbounded(); - let (quic_endpoint_sender, _quic_endpoint_sender) = + let (ancestor_hashes_request_quic_sender, _) = tokio::sync::mpsc::channel(/*buffer:*/ 128); // 1) No signals from ReplayStage, no requests should be made AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1769,8 +1753,7 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1810,8 +1793,7 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1843,8 +1825,7 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1882,8 +1863,7 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -1926,8 +1906,7 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -2084,15 +2063,13 @@ mod test { &leader_schedule_cache, ); - let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = unbounded(); - let (quic_endpoint_sender, _quic_endpoint_sender) = + let (ancestor_hashes_request_quic_sender, _) = tokio::sync::mpsc::channel(/*buffer:*/ 128); // Simulate making a request AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -2188,8 +2165,7 @@ mod test { &repair_info.ancestor_duplicate_slots_sender, &retryable_slots_sender, ); - let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = unbounded(); - let (quic_endpoint_sender, _quic_endpoint_sender) = + let (ancestor_hashes_request_quic_sender, _) = tokio::sync::mpsc::channel(/*buffer:*/ 128); // Simulate ancestor request thread getting the retry signal @@ -2199,8 +2175,7 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, @@ -2239,8 +2214,7 @@ mod test { AncestorHashesService::manage_ancestor_requests( &ancestor_hashes_request_statuses, &ancestor_hashes_request_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &ancestor_hashes_request_quic_sender, &repair_info, &outstanding_requests, &ancestor_hashes_replay_update_receiver, diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index 5f4fea3f637e73..87183dd84ae628 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -1,28 +1,27 @@ use { - bincode::Options, + bytes::Bytes, crossbeam_channel::Sender, - futures::future::TryJoin, - itertools::Itertools, + futures::future::{TryJoin, TryJoin3}, log::error, quinn::{ crypto::rustls::{QuicClientConfig, QuicServerConfig}, - ClientConfig, ClosedStream, ConnectError, Connecting, Connection, ConnectionError, - Endpoint, EndpointConfig, IdleTimeout, ReadError, ReadToEndError, RecvStream, SendStream, - ServerConfig, TokioRuntime, TransportConfig, VarInt, WriteError, + ClientConfig, ConnectError, Connecting, Connection, ConnectionError, Endpoint, + EndpointConfig, IdleTimeout, SendDatagramError, ServerConfig, TokioRuntime, + TransportConfig, VarInt, }, rustls::{ pki_types::{CertificateDer, PrivateKeyDer}, CertificateError, KeyLogFile, }, - serde_bytes::ByteBuf, + solana_gossip::contact_info::Protocol, solana_quic_client::nonblocking::quic_client::SkipServerVerification, solana_runtime::bank_forks::BankForks, - solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Keypair}, + solana_sdk::{pubkey::Pubkey, signature::Keypair}, solana_streamer::{quic::SkipClientVerification, tls_certificates::new_dummy_x509_certificate}, std::{ cmp::Reverse, collections::{hash_map::Entry, HashMap}, - io::{Cursor, Error as IoError}, + io::Error as IoError, net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -34,26 +33,53 @@ use { tokio::{ sync::{ mpsc::{error::TrySendError, Receiver as AsyncReceiver, Sender as AsyncSender}, - oneshot::Sender as OneShotSender, Mutex, RwLock as AsyncRwLock, }, task::JoinHandle, }, }; -const ALPN_REPAIR_PROTOCOL_ID: &[u8] = b"solana-repair"; -const CONNECT_SERVER_NAME: &str = "solana-repair"; +// Incoming packets can be either: +// RepairProtocol +// RepairResponse or Shred + repair Nonce +// AncestorHashesResponse +// So, we need 3 QUIC endpoints on 3 separate sockets to correctly distinguish +// between these packets and send them down the right channel. +// 1) serve_repair_quic: +// The server side receives incoming RepairProtocols from the cluster and +// channels them to serve_repair using a Sender channel. +// The outgoing repair (or ancestor hashes) responses from serve_repair are +// sent back to the client side through a AsyncReceiver<(SocketAddr, Bytes)> +// channel and sent back to the remote node. +// 2) repair_quic: +// Outgoing repair requests from the repair_service are received by the +// client through a AsyncReceiver<(SocketAddr, Bytes)> channel and sent to +// serve_repair_quic socket of the remote node. +// Incoming repair responses (RepairResponse or Shred + repair Nonce) are +// channeled to shred-fetch-stage using a Sender<(Pubkey, SocketAddr, Bytes)> +// channel. +// 3) ancestor_hashes_requests_quic: +// Outgoing RepairProtocol::AncestorHashes requests from the +// ancestor_hashes_service are received by the client through a +// AsyncReceiver<(SocketAddr, Bytes)> channel and sent to serve_repair_quic +// socket of the remote node. +// Incoming AncestorHashesResponse are channeled back to +// ancestor_hashes_service using a Sender<(Pubkey, SocketAddr, Bytes)> +// channel. const CLIENT_CHANNEL_BUFFER: usize = 1 << 14; const ROUTER_CHANNEL_BUFFER: usize = 64; const CONNECTION_CACHE_CAPACITY: usize = 3072; +const ALPN_REPAIR_PROTOCOL_ID: &[u8] = b"solana-repair"; +const CONNECT_SERVER_NAME: &str = "solana-repair"; // Transport config. -// Repair randomly samples peers, uses bi-directional streams and generally has -// low to moderate load and so is configured separately from other protocols. +const DATAGRAM_RECEIVE_BUFFER_SIZE: usize = 256 * 1024 * 1024; +const DATAGRAM_SEND_BUFFER_SIZE: usize = 128 * 1024 * 1024; +const INITIAL_MAXIMUM_TRANSMISSION_UNIT: u16 = MINIMUM_MAXIMUM_TRANSMISSION_UNIT; const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(4); -const MAX_CONCURRENT_BIDI_STREAMS: VarInt = VarInt::from_u32(512); const MAX_IDLE_TIMEOUT: Duration = Duration::from_secs(10); +const MINIMUM_MAXIMUM_TRANSMISSION_UNIT: u16 = 1280; const CONNECTION_CLOSE_ERROR_CODE_SHUTDOWN: VarInt = VarInt::from_u32(1); const CONNECTION_CLOSE_ERROR_CODE_DROPPED: VarInt = VarInt::from_u32(2); @@ -67,23 +93,53 @@ const CONNECTION_CLOSE_REASON_INVALID_IDENTITY: &[u8] = b"INVALID_IDENTITY"; const CONNECTION_CLOSE_REASON_REPLACED: &[u8] = b"REPLACED"; const CONNECTION_CLOSE_REASON_PRUNED: &[u8] = b"PRUNED"; -pub(crate) type AsyncTryJoinHandle = TryJoin, JoinHandle<()>>; - -// Outgoing local requests. -pub struct LocalRequest { - pub(crate) remote_address: SocketAddr, - pub(crate) bytes: Vec, - pub(crate) num_expected_responses: usize, - pub(crate) response_sender: Sender<(SocketAddr, Vec)>, -} +pub(crate) type AsyncTryJoinHandle = TryJoin3< + TryJoin, JoinHandle<()>>, + TryJoin, JoinHandle<()>>, + TryJoin, JoinHandle<()>>, +>; // Incoming requests from remote nodes. -// remote_pubkey and response_sender are None only when adapting UDP packets. -pub struct RemoteRequest { +// remote_pubkey is None only when adapting UDP packets. +pub(crate) struct RemoteRequest { pub(crate) remote_pubkey: Option, pub(crate) remote_address: SocketAddr, - pub(crate) bytes: Vec, - pub(crate) response_sender: Option>>>, + pub(crate) bytes: Bytes, +} + +// Async sender channel for directing outgoing packets from validator threads +// to QUIC clients. +pub(crate) struct RepairQuicAsyncSenders { + // Outgoing repair responses to remote repair requests from serve_repair. + pub(crate) repair_response_quic_sender: AsyncSender<(SocketAddr, Bytes)>, + // Outgoing local repair requests from repair_service. + pub(crate) repair_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, + // Outgoing RepairProtocol::AncestorHashes requests from + // ancestor_hashes_service. + pub(crate) ancestor_hashes_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, +} + +pub(crate) struct RepairQuicSockets { + // Socket receiving remote repair or ancestor hashes requests from the + // cluster, and sending back repair and ancestor hashes responses. + pub(crate) repair_server_quic_socket: UdpSocket, + // Socket sending out local repair requests, + // and receiving repair responses from the cluster. + pub(crate) repair_client_quic_socket: UdpSocket, + // Socket sending out local RepairProtocol::AncestorHashes, + // and receiving AncestorHashesResponse from the cluster. + pub(crate) ancestor_hashes_quic_socket: UdpSocket, +} + +// Sender channel for directing incoming packets from QUIC servers to validator +// threads processing those packets. +pub(crate) struct RepairQuicSenders { + // Channel to send incoming repair requests from the cluster. + pub(crate) repair_request_quic_sender: Sender, + // Channel to send incoming repair responses from the cluster. + pub(crate) repair_response_quic_sender: Sender<(Pubkey, SocketAddr, Bytes)>, + // Channel to send incoming ancestor hashes responses from the cluster. + pub(crate) ancestor_hashes_response_quic_sender: Sender<(Pubkey, SocketAddr, Bytes)>, } #[derive(Error, Debug)] @@ -99,18 +155,10 @@ pub(crate) enum Error { InvalidIdentity(SocketAddr), #[error(transparent)] IoError(#[from] IoError), - #[error("No Response Received")] - NoResponseReceived, #[error(transparent)] - ReadToEndError(#[from] ReadToEndError), - #[error("read_to_end Timeout")] - ReadToEndTimeout, + SendDatagramError(#[from] SendDatagramError), #[error(transparent)] TlsError(#[from] rustls::Error), - #[error(transparent)] - WriteError(#[from] WriteError), - #[error(transparent)] - ClosedStream(#[from] ClosedStream), } macro_rules! add_metric { @@ -119,14 +167,85 @@ macro_rules! add_metric { }}; } +pub(crate) fn new_quic_endpoints( + runtime: &tokio::runtime::Handle, + keypair: &Keypair, + sockets: RepairQuicSockets, + senders: RepairQuicSenders, + bank_forks: Arc>, +) -> Result<([Endpoint; 3], RepairQuicAsyncSenders, AsyncTryJoinHandle), Error> { + let (repair_server_quic_endpoint, repair_response_quic_sender, repair_server_join_handle) = + new_quic_endpoint( + runtime, + "repair_server_quic_client", + "repair_server_quic_server", + keypair, + sockets.repair_server_quic_socket, + senders.repair_request_quic_sender, + bank_forks.clone(), + )?; + let (repair_client_quic_endpoint, repair_request_quic_sender, repair_client_join_handle) = + new_quic_endpoint( + runtime, + "repair_client_quic_client", + "repair_client_quic_server", + keypair, + sockets.repair_client_quic_socket, + senders.repair_response_quic_sender, + bank_forks.clone(), + )?; + let ( + ancestor_hashes_quic_endpoint, + ancestor_hashes_request_quic_sender, + ancestor_hashes_join_handle, + ) = new_quic_endpoint( + runtime, + "ancestor_hashes_quic_client", + "ancestor_hashes_quic_server", + keypair, + sockets.ancestor_hashes_quic_socket, + senders.ancestor_hashes_response_quic_sender, + bank_forks, + )?; + Ok(( + [ + repair_server_quic_endpoint, + repair_client_quic_endpoint, + ancestor_hashes_quic_endpoint, + ], + RepairQuicAsyncSenders { + repair_response_quic_sender, + repair_request_quic_sender, + ancestor_hashes_request_quic_sender, + }, + futures::future::try_join3( + repair_server_join_handle, + repair_client_join_handle, + ancestor_hashes_join_handle, + ), + )) +} + #[allow(clippy::type_complexity)] -pub(crate) fn new_quic_endpoint( +fn new_quic_endpoint( runtime: &tokio::runtime::Handle, + client_name: &'static str, + server_name: &'static str, keypair: &Keypair, socket: UdpSocket, - remote_request_sender: Sender, + sender: Sender, bank_forks: Arc>, -) -> Result<(Endpoint, AsyncSender, AsyncTryJoinHandle), Error> { +) -> Result< + ( + Endpoint, + AsyncSender<(SocketAddr, Bytes)>, + TryJoin, JoinHandle<()>>, + ), + Error, +> +where + T: 'static + From<(Pubkey, SocketAddr, Bytes)> + Send, +{ let (cert, key) = new_dummy_x509_certificate(keypair); let server_config = new_server_config(cert.clone(), key.clone_key())?; let client_config = new_client_config(cert, key)?; @@ -144,11 +263,12 @@ pub(crate) fn new_quic_endpoint( endpoint.set_default_client_config(client_config); let prune_cache_pending = Arc::::default(); let cache = Arc::>>::default(); + let router = Arc::>>>::default(); let (client_sender, client_receiver) = tokio::sync::mpsc::channel(CLIENT_CHANNEL_BUFFER); - let router = Arc::>>>::default(); let server_task = runtime.spawn(run_server( endpoint.clone(), - remote_request_sender.clone(), + server_name, + sender.clone(), bank_forks.clone(), prune_cache_pending.clone(), router.clone(), @@ -156,8 +276,9 @@ pub(crate) fn new_quic_endpoint( )); let client_task = runtime.spawn(run_client( endpoint.clone(), + client_name, client_receiver, - remote_request_sender, + sender, bank_forks, prune_cache_pending, router, @@ -183,10 +304,12 @@ fn new_server_config( .with_single_cert(vec![cert], key)?; config.alpn_protocols = vec![ALPN_REPAIR_PROTOCOL_ID.to_vec()]; config.key_log = Arc::new(KeyLogFile::new()); - let quic_server_config = QuicServerConfig::try_from(config) - .map_err(|_err| rustls::Error::InvalidCertificate(CertificateError::BadSignature))?; - - let mut config = ServerConfig::with_crypto(Arc::new(quic_server_config)); + let Ok(config) = QuicServerConfig::try_from(config) else { + return Err(rustls::Error::InvalidCertificate( + CertificateError::BadSignature, + )); + }; + let mut config = ServerConfig::with_crypto(Arc::new(config)); config .transport_config(Arc::new(new_transport_config())) .migration(false); @@ -211,36 +334,40 @@ fn new_client_config( fn new_transport_config() -> TransportConfig { let max_idle_timeout = IdleTimeout::try_from(MAX_IDLE_TIMEOUT).unwrap(); let mut config = TransportConfig::default(); - // Disable datagrams and uni streams. config - .datagram_receive_buffer_size(None) + .datagram_receive_buffer_size(Some(DATAGRAM_RECEIVE_BUFFER_SIZE)) + .datagram_send_buffer_size(DATAGRAM_SEND_BUFFER_SIZE) + .initial_mtu(INITIAL_MAXIMUM_TRANSMISSION_UNIT) .keep_alive_interval(Some(KEEP_ALIVE_INTERVAL)) - .max_concurrent_bidi_streams(MAX_CONCURRENT_BIDI_STREAMS) + .max_concurrent_bidi_streams(VarInt::from(0u8)) .max_concurrent_uni_streams(VarInt::from(0u8)) - .max_idle_timeout(Some(max_idle_timeout)); + .max_idle_timeout(Some(max_idle_timeout)) + .min_mtu(MINIMUM_MAXIMUM_TRANSMISSION_UNIT) + .mtu_discovery_config(None); config } -async fn run_server( +async fn run_server( endpoint: Endpoint, - remote_request_sender: Sender, + server_name: &'static str, + sender: Sender, bank_forks: Arc>, prune_cache_pending: Arc, - router: Arc>>>, + router: Arc>>>, cache: Arc>>, -) { +) where + T: 'static + From<(Pubkey, SocketAddr, Bytes)> + Send, +{ let stats = Arc::::default(); - let report_metrics_task = - tokio::task::spawn(report_metrics_task("repair_quic_server", stats.clone())); + let report_metrics_task = tokio::task::spawn(report_metrics_task(server_name, stats.clone())); while let Some(incoming) = endpoint.accept().await { let remote_addr: SocketAddr = incoming.remote_address(); - let connecting = incoming.accept(); - match connecting { + match incoming.accept() { Ok(connecting) => { tokio::task::spawn(handle_connecting_task( endpoint.clone(), connecting, - remote_request_sender.clone(), + sender.clone(), bank_forks.clone(), prune_cache_pending.clone(), router.clone(), @@ -248,45 +375,48 @@ async fn run_server( stats.clone(), )); } - Err(error) => { - debug!("Error while accepting incoming connection: {error:?} from {remote_addr}"); + Err(err) => { + debug!("Error while accepting incoming connection: {err:?} from {remote_addr}"); + record_error(&Error::from(err), &stats); } } } report_metrics_task.abort(); } -async fn run_client( +async fn run_client( endpoint: Endpoint, - mut receiver: AsyncReceiver, - remote_request_sender: Sender, + client_name: &'static str, + mut receiver: AsyncReceiver<(SocketAddr, Bytes)>, + sender: Sender, bank_forks: Arc>, prune_cache_pending: Arc, - router: Arc>>>, + router: Arc>>>, cache: Arc>>, -) { +) where + T: 'static + From<(Pubkey, SocketAddr, Bytes)> + Send, +{ let stats = Arc::::default(); - let report_metrics_task = - tokio::task::spawn(report_metrics_task("repair_quic_client", stats.clone())); - while let Some(request) = receiver.recv().await { - let Some(request) = try_route_request(request, &*router.read().await, &stats) else { + let report_metrics_task = tokio::task::spawn(report_metrics_task(client_name, stats.clone())); + while let Some((remote_address, bytes)) = receiver.recv().await { + let Some(bytes) = try_route_bytes(&remote_address, bytes, &*router.read().await, &stats) + else { continue; }; - let remote_address = request.remote_address; let receiver = { let mut router = router.write().await; - let Some(request) = try_route_request(request, &router, &stats) else { + let Some(bytes) = try_route_bytes(&remote_address, bytes, &router, &stats) else { continue; }; let (sender, receiver) = tokio::sync::mpsc::channel(ROUTER_CHANNEL_BUFFER); - sender.try_send(request).unwrap(); + sender.try_send(bytes).unwrap(); router.insert(remote_address, sender); receiver }; tokio::task::spawn(make_connection_task( endpoint.clone(), remote_address, - remote_request_sender.clone(), + sender.clone(), receiver, bank_forks.clone(), prune_cache_pending.clone(), @@ -301,42 +431,45 @@ async fn run_client( report_metrics_task.abort(); } -// Routes the local request to respective channel. Drops the request if the -// channel is full. Bounces the request back if the channel is closed or does -// not exist. -fn try_route_request( - request: LocalRequest, - router: &HashMap>, +// Routes the payload to respective channel. +// Drops the payload if the channel is full. +// Bounces the payload back if the channel is closed or does not exist. +fn try_route_bytes( + remote_address: &SocketAddr, + bytes: Bytes, + router: &HashMap>, stats: &RepairQuicStats, -) -> Option { - match router.get(&request.remote_address) { - None => Some(request), - Some(sender) => match sender.try_send(request) { +) -> Option { + match router.get(remote_address) { + None => Some(bytes), + Some(sender) => match sender.try_send(bytes) { Ok(()) => None, - Err(TrySendError::Full(request)) => { - debug!("TrySendError::Full {}", request.remote_address); + Err(TrySendError::Full(_)) => { + debug!("TrySendError::Full {remote_address}"); add_metric!(stats.router_try_send_error_full); None } - Err(TrySendError::Closed(request)) => Some(request), + Err(TrySendError::Closed(bytes)) => Some(bytes), }, } } -async fn handle_connecting_task( +async fn handle_connecting_task( endpoint: Endpoint, connecting: Connecting, - remote_request_sender: Sender, + sender: Sender, bank_forks: Arc>, prune_cache_pending: Arc, - router: Arc>>>, + router: Arc>>>, cache: Arc>>, stats: Arc, -) { +) where + T: 'static + From<(Pubkey, SocketAddr, Bytes)> + Send, +{ if let Err(err) = handle_connecting( endpoint, connecting, - remote_request_sender, + sender, bank_forks, prune_cache_pending, router, @@ -350,16 +483,19 @@ async fn handle_connecting_task( } } -async fn handle_connecting( +async fn handle_connecting( endpoint: Endpoint, connecting: Connecting, - remote_request_sender: Sender, + sender: Sender, bank_forks: Arc>, prune_cache_pending: Arc, - router: Arc>>>, + router: Arc>>>, cache: Arc>>, stats: Arc, -) -> Result<(), Error> { +) -> Result<(), Error> +where + T: 'static + From<(Pubkey, SocketAddr, Bytes)> + Send, +{ let connection = connecting.await?; let remote_address = connection.remote_address(); let remote_pubkey = get_remote_pubkey(&connection)?; @@ -373,7 +509,7 @@ async fn handle_connecting( remote_address, remote_pubkey, connection, - remote_request_sender, + sender, receiver, bank_forks, prune_cache_pending, @@ -386,19 +522,21 @@ async fn handle_connecting( } #[allow(clippy::too_many_arguments)] -async fn handle_connection( +async fn handle_connection( endpoint: Endpoint, remote_address: SocketAddr, remote_pubkey: Pubkey, connection: Connection, - remote_request_sender: Sender, - receiver: AsyncReceiver, + sender: Sender, + receiver: AsyncReceiver, bank_forks: Arc>, prune_cache_pending: Arc, - router: Arc>>>, + router: Arc>>>, cache: Arc>>, stats: Arc, -) { +) where + T: 'static + From<(Pubkey, SocketAddr, Bytes)> + Send, +{ cache_connection( remote_pubkey, connection.clone(), @@ -408,30 +546,24 @@ async fn handle_connection( cache.clone(), ) .await; - let send_requests_task = tokio::task::spawn(send_requests_task( - endpoint.clone(), - remote_address, - connection.clone(), - receiver, - stats.clone(), - )); - let recv_requests_task = tokio::task::spawn(recv_requests_task( + let send_datagram_task = tokio::task::spawn(send_datagram_task(connection.clone(), receiver)); + let read_datagram_task = tokio::task::spawn(read_datagram_task( endpoint, remote_address, remote_pubkey, connection.clone(), - remote_request_sender, + sender, stats.clone(), )); - match futures::future::try_join(send_requests_task, recv_requests_task).await { + match futures::future::try_join(send_datagram_task, read_datagram_task).await { Err(err) => error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"), Ok(out) => { if let (Err(ref err), _) = out { - debug!("send_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + debug!("send_datagram_task: {remote_pubkey}, {remote_address}, {err:?}"); record_error(err, &stats); } if let (_, Err(ref err)) = out { - debug!("recv_requests_task: {remote_pubkey}, {remote_address}, {err:?}"); + debug!("read_datagram_task: {remote_pubkey}, {remote_address}, {err:?}"); record_error(err, &stats); } } @@ -444,97 +576,42 @@ async fn handle_connection( } } -async fn recv_requests_task( +async fn read_datagram_task( endpoint: Endpoint, remote_address: SocketAddr, remote_pubkey: Pubkey, connection: Connection, - remote_request_sender: Sender, - stats: Arc, -) -> Result<(), Error> { - loop { - let (send_stream, recv_stream) = connection.accept_bi().await?; - tokio::task::spawn(handle_streams_task( - endpoint.clone(), - remote_address, - remote_pubkey, - send_stream, - recv_stream, - remote_request_sender.clone(), - stats.clone(), - )); - } -} - -async fn handle_streams_task( - endpoint: Endpoint, - remote_address: SocketAddr, - remote_pubkey: Pubkey, - send_stream: SendStream, - recv_stream: RecvStream, - remote_request_sender: Sender, + sender: Sender, stats: Arc, -) { - if let Err(err) = handle_streams( - &endpoint, - remote_address, - remote_pubkey, - send_stream, - recv_stream, - &remote_request_sender, - ) - .await - { - debug!("handle_stream: {remote_address}, {remote_pubkey}, {err:?}"); - record_error(&err, &stats); - } -} - -async fn handle_streams( - endpoint: &Endpoint, - remote_address: SocketAddr, - remote_pubkey: Pubkey, - mut send_stream: SendStream, - mut recv_stream: RecvStream, - remote_request_sender: &Sender, -) -> Result<(), Error> { +) -> Result<(), Error> +where + T: From<(Pubkey, SocketAddr, Bytes)>, +{ // Assert that send won't block. - debug_assert_eq!(remote_request_sender.capacity(), None); - const READ_TIMEOUT_DURATION: Duration = Duration::from_secs(2); - let bytes = tokio::time::timeout( - READ_TIMEOUT_DURATION, - recv_stream.read_to_end(PACKET_DATA_SIZE), - ) - .await - .map_err(|_| Error::ReadToEndTimeout)??; - let (response_sender, response_receiver) = tokio::sync::oneshot::channel(); - let remote_request = RemoteRequest { - remote_pubkey: Some(remote_pubkey), - remote_address, - bytes, - response_sender: Some(response_sender), - }; - if let Err(err) = remote_request_sender.send(remote_request) { - close_quic_endpoint(endpoint); - return Err(Error::from(err)); - } - let Ok(response) = response_receiver.await else { - return Err(Error::NoResponseReceived); - }; - for chunk in response { - let size = chunk.len() as u64; - send_stream.write_all(&size.to_le_bytes()).await?; - send_stream.write_all(&chunk).await?; + debug_assert_eq!(sender.capacity(), None); + loop { + match connection.read_datagram().await { + Ok(bytes) => { + let value = T::from((remote_pubkey, remote_address, bytes)); + if let Err(err) = sender.send(value) { + close_quic_endpoint(&endpoint); + return Err(Error::from(err)); + } + } + Err(err) => { + if let Some(err) = connection.close_reason() { + return Err(Error::from(err)); + } + debug!("connection.read_datagram: {remote_pubkey}, {remote_address}, {err:?}"); + record_error(&Error::from(err), &stats); + } + }; } - send_stream.finish().map_err(Error::from) } -async fn send_requests_task( - endpoint: Endpoint, - remote_address: SocketAddr, +async fn send_datagram_task( connection: Connection, - mut receiver: AsyncReceiver, - stats: Arc, + mut receiver: AsyncReceiver, ) -> Result<(), Error> { tokio::pin! { let connection_closed = connection.closed(); @@ -542,97 +619,34 @@ async fn send_requests_task( loop { tokio::select! { biased; - request = receiver.recv() => { - match request { + bytes = receiver.recv() => { + match bytes { None => return Ok(()), - Some(request) => tokio::task::spawn(send_request_task( - endpoint.clone(), - remote_address, - connection.clone(), - request, - stats.clone(), - )), - }; + Some(bytes) => connection.send_datagram(bytes)?, + } } err = &mut connection_closed => return Err(Error::from(err)), } } } -async fn send_request_task( - endpoint: Endpoint, - remote_address: SocketAddr, - connection: Connection, - request: LocalRequest, - stats: Arc, -) { - if let Err(err) = send_request(endpoint, connection, request).await { - debug!("send_request: {remote_address}, {err:?}"); - record_error(&err, &stats); - } -} - -async fn send_request( - endpoint: Endpoint, - connection: Connection, - LocalRequest { - remote_address: _, - bytes, - num_expected_responses, - response_sender, - }: LocalRequest, -) -> Result<(), Error> { - // Assert that send won't block. - debug_assert_eq!(response_sender.capacity(), None); - const READ_TIMEOUT_DURATION: Duration = Duration::from_secs(10); - let (mut send_stream, mut recv_stream) = connection.open_bi().await?; - send_stream.write_all(&bytes).await?; - send_stream.finish()?; - // Each response is at most PACKET_DATA_SIZE bytes and requires - // an additional 8 bytes to encode its length. - let size = PACKET_DATA_SIZE - .saturating_add(8) - .saturating_mul(num_expected_responses); - let response = tokio::time::timeout(READ_TIMEOUT_DURATION, recv_stream.read_to_end(size)) - .await - .map_err(|_| Error::ReadToEndTimeout)??; - let remote_address = connection.remote_address(); - let mut cursor = Cursor::new(&response[..]); - std::iter::repeat_with(|| { - bincode::options() - .with_limit(response.len() as u64) - .with_fixint_encoding() - .allow_trailing_bytes() - .deserialize_from::<_, ByteBuf>(&mut cursor) - .map(ByteBuf::into_vec) - .ok() - }) - .while_some() - .try_for_each(|chunk| { - response_sender - .send((remote_address, chunk)) - .map_err(|err| { - close_quic_endpoint(&endpoint); - Error::from(err) - }) - }) -} - -async fn make_connection_task( +async fn make_connection_task( endpoint: Endpoint, remote_address: SocketAddr, - remote_request_sender: Sender, - receiver: AsyncReceiver, + sender: Sender, + receiver: AsyncReceiver, bank_forks: Arc>, prune_cache_pending: Arc, - router: Arc>>>, + router: Arc>>>, cache: Arc>>, stats: Arc, -) { +) where + T: 'static + From<(Pubkey, SocketAddr, Bytes)> + Send, +{ if let Err(err) = make_connection( endpoint, remote_address, - remote_request_sender, + sender, receiver, bank_forks, prune_cache_pending, @@ -647,17 +661,20 @@ async fn make_connection_task( } } -async fn make_connection( +async fn make_connection( endpoint: Endpoint, remote_address: SocketAddr, - remote_request_sender: Sender, - receiver: AsyncReceiver, + sender: Sender, + receiver: AsyncReceiver, bank_forks: Arc>, prune_cache_pending: Arc, - router: Arc>>>, + router: Arc>>>, cache: Arc>>, stats: Arc, -) -> Result<(), Error> { +) -> Result<(), Error> +where + T: 'static + From<(Pubkey, SocketAddr, Bytes)> + Send, +{ let connection = endpoint .connect(remote_address, CONNECT_SERVER_NAME)? .await?; @@ -666,7 +683,7 @@ async fn make_connection( connection.remote_address(), get_remote_pubkey(&connection)?, connection, - remote_request_sender, + sender, receiver, bank_forks, prune_cache_pending, @@ -696,7 +713,7 @@ async fn cache_connection( connection: Connection, bank_forks: Arc>, prune_cache_pending: Arc, - router: Arc>>>, + router: Arc>>>, cache: Arc>>, ) { let (old, should_prune_cache) = { @@ -741,7 +758,7 @@ async fn drop_connection( async fn prune_connection_cache( bank_forks: Arc>, prune_cache_pending: Arc, - router: Arc>>>, + router: Arc>>>, cache: Arc>>, ) { debug_assert!(prune_cache_pending.load(Ordering::Relaxed)); @@ -782,6 +799,39 @@ async fn prune_connection_cache( router.write().await.retain(|_, sender| !sender.is_closed()); } +impl RemoteRequest { + #[inline] + pub(crate) fn protocol(&self) -> Protocol { + // remote_pubkey is only available with QUIC. + if self.remote_pubkey.is_some() { + Protocol::QUIC + } else { + Protocol::UDP + } + } +} + +impl From<(Pubkey, SocketAddr, Bytes)> for RemoteRequest { + #[inline] + fn from((pubkey, remote_address, bytes): (Pubkey, SocketAddr, Bytes)) -> Self { + Self { + remote_pubkey: Some(pubkey), + remote_address, + bytes, + } + } +} + +impl RepairQuicAsyncSenders { + pub(crate) fn new_dummy() -> Self { + Self { + repair_response_quic_sender: tokio::sync::mpsc::channel(1).0, + repair_request_quic_sender: tokio::sync::mpsc::channel(1).0, + ancestor_hashes_request_quic_sender: tokio::sync::mpsc::channel(1).0, + } + } +} + impl From> for Error { fn from(_: crossbeam_channel::SendError) -> Self { Error::ChannelSendError @@ -790,43 +840,27 @@ impl From> for Error { #[derive(Default)] struct RepairQuicStats { + connect_error_cids_exhausted: AtomicU64, connect_error_invalid_remote_address: AtomicU64, connect_error_other: AtomicU64, - connect_error_too_many_connections: AtomicU64, connection_error_application_closed: AtomicU64, + connection_error_cids_exhausted: AtomicU64, connection_error_connection_closed: AtomicU64, connection_error_locally_closed: AtomicU64, connection_error_reset: AtomicU64, connection_error_timed_out: AtomicU64, connection_error_transport_error: AtomicU64, connection_error_version_mismatch: AtomicU64, - connection_error_connection_limit_exceeded: AtomicU64, invalid_identity: AtomicU64, - no_response_received: AtomicU64, - read_to_end_error_connection_lost: AtomicU64, - read_to_end_error_illegal_ordered_read: AtomicU64, - read_to_end_error_reset: AtomicU64, - read_to_end_error_too_long: AtomicU64, - read_to_end_error_unknown_stream: AtomicU64, - read_to_end_error_zero_rtt_rejected: AtomicU64, - read_to_end_timeout: AtomicU64, router_try_send_error_full: AtomicU64, - write_error_connection_lost: AtomicU64, - write_error_stopped: AtomicU64, - write_error_unknown_stream: AtomicU64, - write_error_zero_rtt_rejected: AtomicU64, - connect_error_cids_exhausted: AtomicU64, - connect_error_invalid_server_name: AtomicU64, - connection_error_cids_exhausted: AtomicU64, - closed_streams: AtomicU64, - read_to_end_error_closed_stream: AtomicU64, - write_error_closed_stream: AtomicU64, + send_datagram_error_connection_lost: AtomicU64, + send_datagram_error_too_large: AtomicU64, + send_datagram_error_unsupported_by_peer: AtomicU64, } async fn report_metrics_task(name: &'static str, stats: Arc) { - const METRICS_SUBMIT_CADENCE: Duration = Duration::from_secs(2); loop { - tokio::time::sleep(METRICS_SUBMIT_CADENCE).await; + tokio::time::sleep(Duration::from_secs(2)).await; report_metrics(name, &stats); } } @@ -834,18 +868,27 @@ async fn report_metrics_task(name: &'static str, stats: Arc) { fn record_error(err: &Error, stats: &RepairQuicStats) { match err { Error::ChannelSendError => (), + Error::ConnectError(ConnectError::CidsExhausted) => { + add_metric!(stats.connect_error_cids_exhausted) + } Error::ConnectError(ConnectError::EndpointStopping) => { add_metric!(stats.connect_error_other) } Error::ConnectError(ConnectError::InvalidRemoteAddress(_)) => { add_metric!(stats.connect_error_invalid_remote_address) } + Error::ConnectError(ConnectError::InvalidServerName(_)) => { + add_metric!(stats.connect_error_other) + } Error::ConnectError(ConnectError::NoDefaultClientConfig) => { add_metric!(stats.connect_error_other) } Error::ConnectError(ConnectError::UnsupportedVersion) => { add_metric!(stats.connect_error_other) } + Error::ConnectionError(ConnectionError::CidsExhausted) => { + add_metric!(stats.connection_error_cids_exhausted) + } Error::ConnectionError(ConnectionError::VersionMismatch) => { add_metric!(stats.connection_error_version_mismatch) } @@ -867,49 +910,17 @@ fn record_error(err: &Error, stats: &RepairQuicStats) { } Error::InvalidIdentity(_) => add_metric!(stats.invalid_identity), Error::IoError(_) => (), - Error::NoResponseReceived => add_metric!(stats.no_response_received), - Error::ReadToEndError(ReadToEndError::Read(ReadError::Reset(_))) => { - add_metric!(stats.read_to_end_error_reset) - } - Error::ReadToEndError(ReadToEndError::Read(ReadError::ConnectionLost(_))) => { - add_metric!(stats.read_to_end_error_connection_lost) + Error::SendDatagramError(SendDatagramError::UnsupportedByPeer) => { + add_metric!(stats.send_datagram_error_unsupported_by_peer) } - Error::ReadToEndError(ReadToEndError::Read(ReadError::IllegalOrderedRead)) => { - add_metric!(stats.read_to_end_error_illegal_ordered_read) + Error::SendDatagramError(SendDatagramError::Disabled) => (), + Error::SendDatagramError(SendDatagramError::TooLarge) => { + add_metric!(stats.send_datagram_error_too_large) } - Error::ReadToEndError(ReadToEndError::Read(ReadError::ZeroRttRejected)) => { - add_metric!(stats.read_to_end_error_zero_rtt_rejected) + Error::SendDatagramError(SendDatagramError::ConnectionLost(_)) => { + add_metric!(stats.send_datagram_error_connection_lost) } - Error::ReadToEndError(ReadToEndError::TooLong) => { - add_metric!(stats.read_to_end_error_too_long) - } - Error::ReadToEndTimeout => add_metric!(stats.read_to_end_timeout), Error::TlsError(_) => (), - Error::WriteError(WriteError::Stopped(_)) => add_metric!(stats.write_error_stopped), - Error::WriteError(WriteError::ConnectionLost(_)) => { - add_metric!(stats.write_error_connection_lost) - } - Error::WriteError(WriteError::ZeroRttRejected) => { - add_metric!(stats.write_error_zero_rtt_rejected) - } - Error::ConnectError(ConnectError::CidsExhausted) => { - add_metric!(stats.connect_error_cids_exhausted) - } - Error::ConnectError(ConnectError::InvalidServerName(_)) => { - add_metric!(stats.connect_error_invalid_server_name) - } - Error::ConnectionError(ConnectionError::CidsExhausted) => { - add_metric!(stats.connection_error_cids_exhausted) - } - Error::ClosedStream(_) => { - add_metric!(stats.closed_streams) - } - Error::ReadToEndError(ReadToEndError::Read(ReadError::ClosedStream)) => { - add_metric!(stats.read_to_end_error_closed_stream) - } - Error::WriteError(WriteError::ClosedStream) => { - add_metric!(stats.write_error_closed_stream) - } } } @@ -921,6 +932,11 @@ fn report_metrics(name: &'static str, stats: &RepairQuicStats) { } datapoint_info!( name, + ( + "connect_error_cids_exhausted", + reset_metric!(stats.connect_error_cids_exhausted), + i64 + ), ( "connect_error_invalid_remote_address", reset_metric!(stats.connect_error_invalid_remote_address), @@ -932,13 +948,13 @@ fn report_metrics(name: &'static str, stats: &RepairQuicStats) { i64 ), ( - "connect_error_too_many_connections", - reset_metric!(stats.connect_error_too_many_connections), + "connection_error_application_closed", + reset_metric!(stats.connection_error_application_closed), i64 ), ( - "connection_error_application_closed", - reset_metric!(stats.connection_error_application_closed), + "connection_error_cids_exhausted", + reset_metric!(stats.connection_error_cids_exhausted), i64 ), ( @@ -971,79 +987,29 @@ fn report_metrics(name: &'static str, stats: &RepairQuicStats) { reset_metric!(stats.connection_error_version_mismatch), i64 ), - ( - "connection_error_connection_limit_exceeded", - reset_metric!(stats.connection_error_connection_limit_exceeded), - i64 - ), ( "invalid_identity", reset_metric!(stats.invalid_identity), i64 ), - ( - "no_response_received", - reset_metric!(stats.no_response_received), - i64 - ), - ( - "read_to_end_error_connection_lost", - reset_metric!(stats.read_to_end_error_connection_lost), - i64 - ), - ( - "read_to_end_error_illegal_ordered_read", - reset_metric!(stats.read_to_end_error_illegal_ordered_read), - i64 - ), - ( - "read_to_end_error_reset", - reset_metric!(stats.read_to_end_error_reset), - i64 - ), - ( - "read_to_end_error_too_long", - reset_metric!(stats.read_to_end_error_too_long), - i64 - ), - ( - "read_to_end_error_unknown_stream", - reset_metric!(stats.read_to_end_error_unknown_stream), - i64 - ), - ( - "read_to_end_error_zero_rtt_rejected", - reset_metric!(stats.read_to_end_error_zero_rtt_rejected), - i64 - ), - ( - "read_to_end_timeout", - reset_metric!(stats.read_to_end_timeout), - i64 - ), ( "router_try_send_error_full", reset_metric!(stats.router_try_send_error_full), i64 ), ( - "write_error_connection_lost", - reset_metric!(stats.write_error_connection_lost), - i64 - ), - ( - "write_error_stopped", - reset_metric!(stats.write_error_stopped), + "send_datagram_error_connection_lost", + reset_metric!(stats.send_datagram_error_connection_lost), i64 ), ( - "write_error_unknown_stream", - reset_metric!(stats.write_error_unknown_stream), + "send_datagram_error_too_large", + reset_metric!(stats.send_datagram_error_too_large), i64 ), ( - "write_error_zero_rtt_rejected", - reset_metric!(stats.write_error_zero_rtt_rejected), + "send_datagram_error_unsupported_by_peer", + reset_metric!(stats.send_datagram_error_unsupported_by_peer), i64 ), ); @@ -1063,7 +1029,7 @@ mod tests { #[test] fn test_quic_endpoint() { const NUM_ENDPOINTS: usize = 3; - const RECV_TIMEOUT: Duration = Duration::from_secs(30); + const RECV_TIMEOUT: Duration = Duration::from_secs(60); let runtime = tokio::runtime::Builder::new_multi_thread() .worker_threads(8) .enable_all() @@ -1079,8 +1045,8 @@ mod tests { .map(UdpSocket::local_addr) .collect::>() .unwrap(); - let (remote_request_senders, remote_request_receivers): (Vec<_>, Vec<_>) = - repeat_with(crossbeam_channel::unbounded::) + let (senders, receivers): (Vec<_>, Vec<_>) = + repeat_with(crossbeam_channel::unbounded::<(Pubkey, SocketAddr, Bytes)>) .take(NUM_ENDPOINTS) .unzip(); let bank_forks = { @@ -1089,84 +1055,35 @@ mod tests { let bank = Bank::new_for_tests(&genesis_config); BankForks::new_rw_arc(bank) }; - let (endpoints, senders, tasks): (Vec<_>, Vec<_>, Vec<_>) = multiunzip( - keypairs - .iter() - .zip(sockets) - .zip(remote_request_senders) - .map(|((keypair, socket), remote_request_sender)| { + let (endpoints, senders, tasks): (Vec<_>, Vec<_>, Vec<_>) = + multiunzip(keypairs.iter().zip(sockets).zip(senders).map( + |((keypair, socket), sender)| { new_quic_endpoint( runtime.handle(), + "test_quic_client", + "test_quic_server", keypair, socket, - remote_request_sender, + sender, bank_forks.clone(), ) .unwrap() - }), - ); - let (response_senders, response_receivers): (Vec<_>, Vec<_>) = - repeat_with(crossbeam_channel::unbounded::<(SocketAddr, Vec)>) - .take(NUM_ENDPOINTS) - .unzip(); - // Send a unique request from each endpoint to every other endpoint. + }, + )); + // Send a unique message from each endpoint to every other endpoint. for (i, (keypair, &address, sender)) in izip!(&keypairs, &addresses, &senders).enumerate() { - for (j, (&remote_address, response_sender)) in - addresses.iter().zip(&response_senders).enumerate() - { - if i != j { - let mut bytes: Vec = format!("{i}=>{j}").into_bytes(); - bytes.resize(PACKET_DATA_SIZE, 0xa5); - let request = LocalRequest { - remote_address, - bytes, - num_expected_responses: j + 1, - response_sender: response_sender.clone(), - }; - sender.blocking_send(request).unwrap(); - } - } - // Verify all requests are received and respond to each. - for (j, remote_request_receiver) in remote_request_receivers.iter().enumerate() { + for (j, &address) in addresses.iter().enumerate() { if i != j { - let RemoteRequest { - remote_pubkey, - remote_address, - bytes, - response_sender, - } = remote_request_receiver.recv_timeout(RECV_TIMEOUT).unwrap(); - assert_eq!(remote_pubkey, Some(keypair.pubkey())); - assert_eq!(remote_address, address); - assert_eq!(bytes, { - let mut bytes = format!("{i}=>{j}").into_bytes(); - bytes.resize(PACKET_DATA_SIZE, 0xa5); - bytes - }); - let response: Vec> = (0..=j) - .map(|k| { - let mut bytes = format!("{j}=>{i}({k})").into_bytes(); - bytes.resize(PACKET_DATA_SIZE, 0xd5); - bytes - }) - .collect(); - response_sender.unwrap().send(response).unwrap(); + let bytes = Bytes::from(format!("{i}=>{j}")); + sender.blocking_send((address, bytes)).unwrap(); } } - // Verify responses. - for (j, (&remote_address, response_receiver)) in - addresses.iter().zip(&response_receivers).enumerate() - { + // Verify all messages are received. + for (j, receiver) in receivers.iter().enumerate() { if i != j { - for k in 0..=j { - let (address, response) = - response_receiver.recv_timeout(RECV_TIMEOUT).unwrap(); - assert_eq!(address, remote_address); - assert_eq!(response, { - let mut bytes = format!("{j}=>{i}({k})").into_bytes(); - bytes.resize(PACKET_DATA_SIZE, 0xd5); - bytes - }); - } + let bytes = Bytes::from(format!("{i}=>{j}")); + let entry = (keypair.pubkey(), address, bytes); + assert_eq!(receiver.recv_timeout(RECV_TIMEOUT).unwrap(), entry); } } } diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index f69aea596960e2..878d2ab9dfb9f4 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -13,7 +13,6 @@ use { ancestor_hashes_service::{AncestorHashesReplayUpdateReceiver, AncestorHashesService}, duplicate_repair_status::AncestorDuplicateSlotToRepair, outstanding_requests::OutstandingRequests, - quic_endpoint::LocalRequest, repair_weight::RepairWeight, serve_repair::{ self, RepairProtocol, RepairRequestHeader, ServeRepair, ShredRepairType, @@ -21,6 +20,7 @@ use { }, }, }, + bytes::Bytes, crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}, lru::LruCache, rand::seq::SliceRandom, @@ -254,8 +254,9 @@ impl RepairService { exit: Arc, repair_socket: Arc, ancestor_hashes_socket: Arc, - quic_endpoint_sender: AsyncSender, - quic_endpoint_response_sender: CrossbeamSender<(SocketAddr, Vec)>, + repair_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, + ancestor_hashes_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, + ancestor_hashes_response_quic_receiver: CrossbeamReceiver<(Pubkey, SocketAddr, Bytes)>, repair_info: RepairInfo, verified_vote_receiver: VerifiedVoteReceiver, outstanding_requests: Arc>, @@ -267,7 +268,6 @@ impl RepairService { let blockstore = blockstore.clone(); let exit = exit.clone(); let repair_info = repair_info.clone(); - let quic_endpoint_sender = quic_endpoint_sender.clone(); Builder::new() .name("solRepairSvc".to_string()) .spawn(move || { @@ -275,8 +275,7 @@ impl RepairService { &blockstore, &exit, &repair_socket, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &repair_request_quic_sender, repair_info, verified_vote_receiver, &outstanding_requests, @@ -291,7 +290,8 @@ impl RepairService { exit, blockstore, ancestor_hashes_socket, - quic_endpoint_sender, + ancestor_hashes_request_quic_sender, + ancestor_hashes_response_quic_receiver, repair_info, ancestor_hashes_replay_update_receiver, ); @@ -307,8 +307,7 @@ impl RepairService { blockstore: &Blockstore, exit: &AtomicBool, repair_socket: &UdpSocket, - quic_endpoint_sender: &AsyncSender, - quic_endpoint_response_sender: &CrossbeamSender<(SocketAddr, Vec)>, + repair_request_quic_sender: &AsyncSender<(SocketAddr, Bytes)>, repair_info: RepairInfo, verified_vote_receiver: VerifiedVoteReceiver, outstanding_requests: &RwLock, @@ -464,8 +463,7 @@ impl RepairService { &repair_info.repair_validators, &mut outstanding_requests, identity_keypair, - quic_endpoint_sender, - quic_endpoint_response_sender, + repair_request_quic_sender, repair_protocol, ) .ok()??; @@ -1124,8 +1122,7 @@ mod test { let remote_request = RemoteRequest { remote_pubkey: None, remote_address: packet.meta().socket_addr(), - bytes, - response_sender: None, + bytes: Bytes::from(bytes), }; // Deserialize and check the request diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index a1887cf689bb09..5ad270928c3ed9 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -3,7 +3,7 @@ use { cluster_slots_service::cluster_slots::ClusterSlots, repair::{ duplicate_repair_status::get_ancestor_hash_repair_sample_size, - quic_endpoint::{LocalRequest, RemoteRequest}, + quic_endpoint::RemoteRequest, repair_response, repair_service::{OutstandingShredRepairs, RepairStats, REPAIR_MS}, request_response::RequestResponse, @@ -11,7 +11,8 @@ use { }, }, bincode::{serialize, Options}, - crossbeam_channel::{Receiver, RecvTimeoutError, Sender}, + bytes::Bytes, + crossbeam_channel::{Receiver, RecvTimeoutError}, lru::LruCache, rand::{ distributions::{Distribution, WeightedError, WeightedIndex}, @@ -59,7 +60,7 @@ use { thread::{Builder, JoinHandle}, time::{Duration, Instant}, }, - tokio::sync::{mpsc::Sender as AsyncSender, oneshot::Sender as OneShotSender}, + tokio::sync::mpsc::Sender as AsyncSender, }; /// the number of slots to respond with when responding to `Orphan` requests @@ -393,9 +394,9 @@ impl RepairPeers { struct RepairRequestWithMeta { request: RepairProtocol, from_addr: SocketAddr, + protocol: Protocol, stake: u64, whitelisted: bool, - response_sender: Option>>>, } impl ServeRepair { @@ -563,9 +564,9 @@ impl ServeRepair { Ok(RepairRequestWithMeta { request, from_addr, + protocol: remote_request.protocol(), stake, whitelisted, - response_sender: remote_request.response_sender, }) } @@ -636,6 +637,7 @@ impl ServeRepair { blockstore: &Blockstore, requests_receiver: &Receiver, response_sender: &PacketBatchSender, + repair_response_quic_sender: &AsyncSender<(SocketAddr, Bytes)>, stats: &mut ServeRepairStats, data_budget: &DataBudget, ) -> std::result::Result<(), RecvTimeoutError> { @@ -710,6 +712,7 @@ impl ServeRepair { blockstore, decoded_requests, response_sender, + repair_response_quic_sender, stats, data_budget, ); @@ -806,11 +809,12 @@ impl ServeRepair { *stats = ServeRepairStats::default(); } - pub fn listen( + pub(crate) fn listen( self, blockstore: Arc, requests_receiver: Receiver, response_sender: PacketBatchSender, + repair_response_quic_sender: AsyncSender<(SocketAddr, Bytes)>, exit: Arc, ) -> JoinHandle<()> { const INTERVAL_MS: u64 = 1000; @@ -840,6 +844,7 @@ impl ServeRepair { &blockstore, &requests_receiver, &response_sender, + &repair_response_quic_sender, &mut stats, &data_budget, ); @@ -967,6 +972,7 @@ impl ServeRepair { blockstore: &Blockstore, requests: Vec, packet_batch_sender: &PacketBatchSender, + repair_response_quic_sender: &AsyncSender<(SocketAddr, Bytes)>, stats: &mut ServeRepairStats, data_budget: &DataBudget, ) { @@ -976,9 +982,9 @@ impl ServeRepair { for RepairRequestWithMeta { request, from_addr, + protocol, stake, whitelisted: _, - response_sender, } in requests.into_iter() { if !data_budget.check(request.max_response_bytes()) { @@ -986,7 +992,7 @@ impl ServeRepair { continue; } // Bypass ping/pong check for requests coming from QUIC endpoint. - if !matches!(&request, RepairProtocol::Pong(_)) && response_sender.is_none() { + if !matches!(&request, RepairProtocol::Pong(_)) && protocol == Protocol::UDP { let (check, ping_pkt) = Self::check_ping_cache(ping_cache, &request, &from_addr, &identity_keypair); if let Some(ping_pkt) = ping_pkt { @@ -1006,7 +1012,12 @@ impl ServeRepair { let num_response_packets = rsp.len(); let num_response_bytes = rsp.iter().map(|p| p.meta().size).sum(); if data_budget.take(num_response_bytes) - && send_response(rsp, packet_batch_sender, response_sender) + && send_response( + rsp, + protocol, + packet_batch_sender, + repair_response_quic_sender, + ) { stats.total_response_packets += num_response_packets; match stake > 0 { @@ -1057,8 +1068,7 @@ impl ServeRepair { repair_validators: &Option>, outstanding_requests: &mut OutstandingShredRepairs, identity_keypair: &Keypair, - quic_endpoint_sender: &AsyncSender, - quic_endpoint_response_sender: &Sender<(SocketAddr, Vec)>, + repair_request_quic_sender: &AsyncSender<(SocketAddr, Bytes)>, repair_protocol: Protocol, ) -> Result)>> { // find a peer that appears to be accepting replication and has the desired slot, as indicated @@ -1092,18 +1102,10 @@ impl ServeRepair { match repair_protocol { Protocol::UDP => Ok(Some((peer.serve_repair, out))), Protocol::QUIC => { - let num_expected_responses = - usize::try_from(repair_request.num_expected_responses()).unwrap(); - let request = LocalRequest { - remote_address: peer.serve_repair_quic, - bytes: out, - num_expected_responses, - response_sender: quic_endpoint_response_sender.clone(), - }; - quic_endpoint_sender - .blocking_send(request) - .map_err(|_| Error::SendError) - .map(|()| None) + repair_request_quic_sender + .blocking_send((peer.serve_repair_quic, Bytes::from(out))) + .map_err(|_| Error::SendError)?; + Ok(None) } } } @@ -1420,19 +1422,19 @@ where // Returns true on success. fn send_response( packets: PacketBatch, + protocol: Protocol, packet_batch_sender: &PacketBatchSender, - response_sender: Option>>>, + repair_response_quic_sender: &AsyncSender<(SocketAddr, Bytes)>, ) -> bool { - match response_sender { - None => packet_batch_sender.send(packets).is_ok(), - Some(response_sender) => { - let response = packets - .iter() - .filter_map(|packet| packet.data(..)) - .map(Vec::from) - .collect(); - response_sender.send(response).is_ok() - } + match protocol { + Protocol::UDP => packet_batch_sender.send(packets).is_ok(), + Protocol::QUIC => packets + .iter() + .filter_map(|packet| { + let bytes = Bytes::from(Vec::from(packet.data(..)?)); + Some((packet.meta().socket_addr(), bytes)) + }) + .all(|packet| repair_response_quic_sender.blocking_send(packet).is_ok()), } } @@ -1507,8 +1509,7 @@ mod tests { RemoteRequest { remote_pubkey: None, remote_address: packet.meta().socket_addr(), - bytes: packet.data(..).map(Vec::from).unwrap(), - response_sender: None, + bytes: Bytes::from(Vec::from(packet.data(..).unwrap())), } } @@ -2002,10 +2003,7 @@ mod tests { ); let identity_keypair = cluster_info.keypair().clone(); let mut outstanding_requests = OutstandingShredRepairs::default(); - let (quic_endpoint_sender, _quic_endpoint_receiver) = - tokio::sync::mpsc::channel(/*buffer:*/ 128); - let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = - crossbeam_channel::unbounded(); + let (repair_request_quic_sender, _) = tokio::sync::mpsc::channel(/*buffer:*/ 128); let rv = serve_repair.repair_request( &cluster_slots, ShredRepairType::Shred(0, 0), @@ -2014,8 +2012,7 @@ mod tests { &None, &mut outstanding_requests, &identity_keypair, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &repair_request_quic_sender, Protocol::UDP, // repair_protocol ); assert_matches!(rv, Err(Error::ClusterInfo(ClusterInfoError::NoPeers))); @@ -2047,8 +2044,7 @@ mod tests { &None, &mut outstanding_requests, &identity_keypair, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &repair_request_quic_sender, Protocol::UDP, // repair_protocol ) .unwrap() @@ -2087,8 +2083,7 @@ mod tests { &None, &mut outstanding_requests, &identity_keypair, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &repair_request_quic_sender, Protocol::UDP, // repair_protocol ) .unwrap() @@ -2327,10 +2322,7 @@ mod tests { let cluster_slots = ClusterSlots::default(); let cluster_info = Arc::new(new_test_cluster_info()); let me = cluster_info.my_contact_info(); - let (quic_endpoint_sender, _quic_endpoint_receiver) = - tokio::sync::mpsc::channel(/*buffer:*/ 128); - let (quic_endpoint_response_sender, _quic_endpoint_response_receiver) = - crossbeam_channel::unbounded(); + let (repair_request_quic_sender, _) = tokio::sync::mpsc::channel(/*buffer:*/ 128); // Insert two peers on the network let contact_info2 = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); @@ -2361,8 +2353,7 @@ mod tests { &known_validators, &mut OutstandingShredRepairs::default(), &identity_keypair, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &repair_request_quic_sender, Protocol::UDP, // repair_protocol ), Err(Error::ClusterInfo(ClusterInfoError::NoPeers)) @@ -2383,8 +2374,7 @@ mod tests { &known_validators, &mut OutstandingShredRepairs::default(), &identity_keypair, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &repair_request_quic_sender, Protocol::UDP, // repair_protocol ), Ok(Some(_)) @@ -2409,8 +2399,7 @@ mod tests { &None, &mut OutstandingShredRepairs::default(), &identity_keypair, - &quic_endpoint_sender, - &quic_endpoint_response_sender, + &repair_request_quic_sender, Protocol::UDP, // repair_protocol ), Ok(Some(_)) diff --git a/core/src/repair/serve_repair_service.rs b/core/src/repair/serve_repair_service.rs index 2be1a6712045de..8801defbbb8cc2 100644 --- a/core/src/repair/serve_repair_service.rs +++ b/core/src/repair/serve_repair_service.rs @@ -1,5 +1,6 @@ use { crate::repair::{quic_endpoint::RemoteRequest, serve_repair::ServeRepair}, + bytes::Bytes, crossbeam_channel::{unbounded, Receiver, Sender}, solana_ledger::blockstore::Blockstore, solana_perf::{packet::PacketBatch, recycler::Recycler}, @@ -8,11 +9,12 @@ use { streamer::{self, StreamerReceiveStats}, }, std::{ - net::UdpSocket, + net::{SocketAddr, UdpSocket}, sync::{atomic::AtomicBool, Arc}, thread::{self, Builder, JoinHandle}, time::Duration, }, + tokio::sync::mpsc::Sender as AsyncSender, }; pub struct ServeRepairService { @@ -20,10 +22,11 @@ pub struct ServeRepairService { } impl ServeRepairService { - pub fn new( + pub(crate) fn new( serve_repair: ServeRepair, remote_request_sender: Sender, remote_request_receiver: Receiver, + repair_response_quic_sender: AsyncSender<(SocketAddr, Bytes)>, blockstore: Arc, serve_repair_socket: UdpSocket, socket_addr_space: SocketAddrSpace, @@ -61,8 +64,13 @@ impl ServeRepairService { socket_addr_space, Some(stats_reporter_sender), ); - let t_listen = - serve_repair.listen(blockstore, remote_request_receiver, response_sender, exit); + let t_listen = serve_repair.listen( + blockstore, + remote_request_receiver, + response_sender, + repair_response_quic_sender, + exit, + ); let thread_hdls = vec![t_receiver, t_packet_adapter, t_responder, t_listen]; Self { thread_hdls } @@ -86,8 +94,7 @@ pub(crate) fn adapt_repair_requests_packets( let request = RemoteRequest { remote_pubkey: None, remote_address: packet.meta().socket_addr(), - bytes, - response_sender: None, + bytes: Bytes::from(bytes), }; if remote_request_sender.send(request).is_err() { return; // The receiver end of the channel is disconnected. diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 0776674a4748c1..6d4e6172af46b2 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -201,8 +201,8 @@ impl ShredFetchStage { pub(crate) fn new( sockets: Vec>, turbine_quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, + repair_response_quic_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, repair_socket: Arc, - repair_quic_endpoint_receiver: Receiver<(SocketAddr, Vec)>, sender: Sender, shred_version: u16, bank_forks: Arc>, @@ -257,8 +257,9 @@ impl ShredFetchStage { Builder::new() .name("solTvuRecvRpr".to_string()) .spawn(|| { - receive_repair_quic_packets( - repair_quic_endpoint_receiver, + receive_quic_datagrams( + repair_response_quic_receiver, + PacketFlags::REPAIR, packet_sender, recycler, exit, @@ -290,6 +291,7 @@ impl ShredFetchStage { .spawn(|| { receive_quic_datagrams( turbine_quic_endpoint_receiver, + PacketFlags::empty(), packet_sender, recycler, exit, @@ -325,15 +327,16 @@ impl ShredFetchStage { } } -fn receive_quic_datagrams( - turbine_quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, +pub(crate) fn receive_quic_datagrams( + quic_datagrams_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, + flags: PacketFlags, sender: Sender, recycler: PacketBatchRecycler, exit: Arc, ) { const RECV_TIMEOUT: Duration = Duration::from_secs(1); while !exit.load(Ordering::Relaxed) { - let entry = match turbine_quic_endpoint_receiver.recv_timeout(RECV_TIMEOUT) { + let entry = match quic_datagrams_receiver.recv_timeout(RECV_TIMEOUT) { Ok(entry) => entry, Err(RecvTimeoutError::Timeout) => continue, Err(RecvTimeoutError::Disconnected) => return, @@ -345,7 +348,7 @@ fn receive_quic_datagrams( }; let deadline = Instant::now() + PACKET_COALESCE_DURATION; let entries = std::iter::once(entry).chain( - std::iter::repeat_with(|| turbine_quic_endpoint_receiver.recv_deadline(deadline).ok()) + std::iter::repeat_with(|| quic_datagrams_receiver.recv_deadline(deadline).ok()) .while_some(), ); let size = entries @@ -356,52 +359,7 @@ fn receive_quic_datagrams( size: bytes.len(), addr: addr.ip(), port: addr.port(), - flags: PacketFlags::empty(), - }; - packet.buffer_mut()[..bytes.len()].copy_from_slice(&bytes); - }) - .count(); - if size > 0 { - packet_batch.truncate(size); - if sender.send(packet_batch).is_err() { - return; - } - } - } -} - -pub(crate) fn receive_repair_quic_packets( - repair_quic_endpoint_receiver: Receiver<(SocketAddr, Vec)>, - sender: Sender, - recycler: PacketBatchRecycler, - exit: Arc, -) { - const RECV_TIMEOUT: Duration = Duration::from_secs(1); - while !exit.load(Ordering::Relaxed) { - let entry = match repair_quic_endpoint_receiver.recv_timeout(RECV_TIMEOUT) { - Ok(entry) => entry, - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => return, - }; - let mut packet_batch = - PacketBatch::new_with_recycler(&recycler, PACKETS_PER_BATCH, "receive_quic_datagrams"); - unsafe { - packet_batch.set_len(PACKETS_PER_BATCH); - }; - let deadline = Instant::now() + PACKET_COALESCE_DURATION; - let entries = std::iter::once(entry).chain( - std::iter::repeat_with(|| repair_quic_endpoint_receiver.recv_deadline(deadline).ok()) - .while_some(), - ); - let size = entries - .filter(|(_, bytes)| bytes.len() <= PACKET_DATA_SIZE) - .zip(packet_batch.iter_mut()) - .map(|((addr, bytes), packet)| { - *packet.meta_mut() = Meta { - size: bytes.len(), - addr: addr.ip(), - port: addr.port(), - flags: PacketFlags::REPAIR, + flags, }; packet.buffer_mut()[..bytes.len()].copy_from_slice(&bytes); }) diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 083ff02bbb4abc..9938bcf1ab846a 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -14,10 +14,7 @@ use { consensus::{tower_storage::TowerStorage, Tower}, cost_update_service::CostUpdateService, drop_bank_service::DropBankService, - repair::{ - quic_endpoint::LocalRequest, - repair_service::{OutstandingShredRepairs, RepairInfo}, - }, + repair::repair_service::{OutstandingShredRepairs, RepairInfo}, replay_stage::{ReplayStage, ReplayStageConfig}, rewards_recorder_service::RewardsRecorderSender, shred_fetch_stage::ShredFetchStage, @@ -155,7 +152,10 @@ impl Tvu { banking_tracer: Arc, turbine_quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, turbine_quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, - repair_quic_endpoint_sender: AsyncSender, + repair_response_quic_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, + repair_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, + ancestor_hashes_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, + ancestor_hashes_response_quic_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, outstanding_repair_requests: Arc>, cluster_slots: Arc, wen_restart_repair_slots: Option>>>, @@ -174,13 +174,11 @@ impl Tvu { let repair_socket = Arc::new(repair_socket); let ancestor_hashes_socket = Arc::new(ancestor_hashes_socket); let fetch_sockets: Vec> = fetch_sockets.into_iter().map(Arc::new).collect(); - let (repair_quic_endpoint_response_sender, repair_quic_endpoint_response_receiver) = - unbounded(); let fetch_stage = ShredFetchStage::new( fetch_sockets, turbine_quic_endpoint_receiver, + repair_response_quic_receiver, repair_socket.clone(), - repair_quic_endpoint_response_receiver, fetch_sender, tvu_config.shred_version, bank_forks.clone(), @@ -240,8 +238,9 @@ impl Tvu { retransmit_sender, repair_socket, ancestor_hashes_socket, - repair_quic_endpoint_sender, - repair_quic_endpoint_response_sender, + repair_request_quic_sender, + ancestor_hashes_request_quic_sender, + ancestor_hashes_response_quic_receiver, exit.clone(), repair_info, leader_schedule_cache.clone(), @@ -405,7 +404,10 @@ impl Tvu { pub mod tests { use { super::*, - crate::consensus::tower_storage::FileTowerStorage, + crate::{ + consensus::tower_storage::FileTowerStorage, + repair::quic_endpoint::RepairQuicAsyncSenders, + }, serial_test::serial, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ @@ -436,8 +438,9 @@ pub mod tests { let (turbine_quic_endpoint_sender, _turbine_quic_endpoint_receiver) = tokio::sync::mpsc::channel(/*capacity:*/ 128); let (_turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver) = unbounded(); - let (repair_quic_endpoint_sender, _repair_quic_endpoint_receiver) = - tokio::sync::mpsc::channel(/*buffer:*/ 128); + let (_, repair_response_quic_receiver) = unbounded(); + let repair_quic_async_senders = RepairQuicAsyncSenders::new_dummy(); + let (_, ancestor_hashes_response_quic_receiver) = unbounded(); //start cluster_info1 let cluster_info1 = ClusterInfo::new( target1.info.clone(), @@ -529,7 +532,10 @@ pub mod tests { BankingTracer::new_disabled(), turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver, - repair_quic_endpoint_sender, + repair_response_quic_receiver, + repair_quic_async_senders.repair_request_quic_sender, + repair_quic_async_senders.ancestor_hashes_request_quic_sender, + ancestor_hashes_response_quic_receiver, outstanding_repair_requests, cluster_slots, wen_restart_repair_slots, diff --git a/core/src/validator.rs b/core/src/validator.rs index 7915c2117f4f8b..815510611f7954 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -15,7 +15,12 @@ use { ExternalRootSource, Tower, }, poh_timing_report_service::PohTimingReportService, - repair::{self, serve_repair::ServeRepair, serve_repair_service::ServeRepairService}, + repair::{ + self, + quic_endpoint::{RepairQuicAsyncSenders, RepairQuicSenders, RepairQuicSockets}, + serve_repair::ServeRepair, + serve_repair_service::ServeRepairService, + }, rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService}, sample_performance_service::SamplePerformanceService, sigverify, @@ -499,9 +504,9 @@ pub struct Validator { turbine_quic_endpoint: Option, turbine_quic_endpoint_runtime: Option, turbine_quic_endpoint_join_handle: Option, - repair_quic_endpoint: Option, - repair_quic_endpoint_runtime: Option, - repair_quic_endpoint_join_handle: Option, + repair_quic_endpoints: Option<[Endpoint; 3]>, + repair_quic_endpoints_runtime: Option, + repair_quic_endpoints_join_handle: Option, } impl Validator { @@ -1158,19 +1163,10 @@ impl Validator { bank_forks.clone(), config.repair_whitelist.clone(), ); - let (repair_quic_endpoint_sender, repair_quic_endpoint_receiver) = unbounded(); - let serve_repair_service = ServeRepairService::new( - serve_repair, - // Incoming UDP repair requests are adapted into RemoteRequest - // and also sent through the same channel. - repair_quic_endpoint_sender.clone(), - repair_quic_endpoint_receiver, - blockstore.clone(), - node.sockets.serve_repair, - socket_addr_space, - stats_reporter_sender, - exit.clone(), - ); + let (repair_request_quic_sender, repair_request_quic_receiver) = unbounded(); + let (repair_response_quic_sender, repair_response_quic_receiver) = unbounded(); + let (ancestor_hashes_response_quic_sender, ancestor_hashes_response_quic_receiver) = + unbounded(); let waited_for_supermajority = wait_for_supermajority( config, @@ -1267,7 +1263,7 @@ impl Validator { }; // Repair quic endpoint. - let repair_quic_endpoint_runtime = (current_runtime_handle.is_err() + let repair_quic_endpoints_runtime = (current_runtime_handle.is_err() && genesis_config.cluster_type != ClusterType::MainnetBeta) .then(|| { tokio::runtime::Builder::new_multi_thread() @@ -1276,24 +1272,48 @@ impl Validator { .build() .unwrap() }); - let (repair_quic_endpoint, repair_quic_endpoint_sender, repair_quic_endpoint_join_handle) = + let (repair_quic_endpoints, repair_quic_async_senders, repair_quic_endpoints_join_handle) = if genesis_config.cluster_type == ClusterType::MainnetBeta { - let (sender, _receiver) = tokio::sync::mpsc::channel(1); - (None, sender, None) + (None, RepairQuicAsyncSenders::new_dummy(), None) } else { - repair::quic_endpoint::new_quic_endpoint( - repair_quic_endpoint_runtime + let repair_quic_sockets = RepairQuicSockets { + repair_server_quic_socket: node.sockets.serve_repair_quic, + repair_client_quic_socket: node.sockets.repair_quic, + ancestor_hashes_quic_socket: node.sockets.ancestor_hashes_requests_quic, + }; + let repair_quic_senders = RepairQuicSenders { + repair_request_quic_sender: repair_request_quic_sender.clone(), + repair_response_quic_sender, + ancestor_hashes_response_quic_sender, + }; + repair::quic_endpoint::new_quic_endpoints( + repair_quic_endpoints_runtime .as_ref() .map(TokioRuntime::handle) .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), &identity_keypair, - node.sockets.serve_repair_quic, - repair_quic_endpoint_sender, + repair_quic_sockets, + repair_quic_senders, bank_forks.clone(), ) - .map(|(endpoint, sender, join_handle)| (Some(endpoint), sender, Some(join_handle))) + .map(|(endpoints, senders, join_handle)| { + (Some(endpoints), senders, Some(join_handle)) + }) .unwrap() }; + let serve_repair_service = ServeRepairService::new( + serve_repair, + // Incoming UDP repair requests are adapted into RemoteRequest + // and also sent through the same channel. + repair_request_quic_sender, + repair_request_quic_receiver, + repair_quic_async_senders.repair_response_quic_sender, + blockstore.clone(), + node.sockets.serve_repair, + socket_addr_space, + stats_reporter_sender, + exit.clone(), + ); let in_wen_restart = config.wen_restart_proto_path.is_some() && !waited_for_supermajority; let wen_restart_repair_slots = if in_wen_restart { @@ -1373,7 +1393,10 @@ impl Validator { banking_tracer.clone(), turbine_quic_endpoint_sender.clone(), turbine_quic_endpoint_receiver, - repair_quic_endpoint_sender, + repair_response_quic_receiver, + repair_quic_async_senders.repair_request_quic_sender, + repair_quic_async_senders.ancestor_hashes_request_quic_sender, + ancestor_hashes_response_quic_receiver, outstanding_repair_requests.clone(), cluster_slots.clone(), wen_restart_repair_slots.clone(), @@ -1501,9 +1524,9 @@ impl Validator { turbine_quic_endpoint, turbine_quic_endpoint_runtime, turbine_quic_endpoint_join_handle, - repair_quic_endpoint, - repair_quic_endpoint_runtime, - repair_quic_endpoint_join_handle, + repair_quic_endpoints, + repair_quic_endpoints_runtime, + repair_quic_endpoints_join_handle, }) } @@ -1615,18 +1638,19 @@ impl Validator { } self.gossip_service.join().expect("gossip_service"); - if let Some(repair_quic_endpoint) = &self.repair_quic_endpoint { - repair::quic_endpoint::close_quic_endpoint(repair_quic_endpoint); - } + self.repair_quic_endpoints + .iter() + .flatten() + .for_each(repair::quic_endpoint::close_quic_endpoint); self.serve_repair_service .join() .expect("serve_repair_service"); - if let Some(repair_quic_endpoint_join_handle) = self.repair_quic_endpoint_join_handle { - self.repair_quic_endpoint_runtime - .map(|runtime| runtime.block_on(repair_quic_endpoint_join_handle)) + if let Some(repair_quic_endpoints_join_handle) = self.repair_quic_endpoints_join_handle { + self.repair_quic_endpoints_runtime + .map(|runtime| runtime.block_on(repair_quic_endpoints_join_handle)) .transpose() .unwrap(); - }; + } self.stats_reporter_service .join() .expect("stats_reporter_service"); diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 3056090cf9ba94..0d2e0b75317597 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -8,7 +8,6 @@ use { completed_data_sets_service::CompletedDataSetsSender, repair::{ ancestor_hashes_service::AncestorHashesReplayUpdateReceiver, - quic_endpoint::LocalRequest, repair_response, repair_service::{ DumpedSlotsReceiver, OutstandingShredRepairs, PopularPrunedForksSender, RepairInfo, @@ -17,6 +16,7 @@ use { }, result::{Error, Result}, }, + bytes::Bytes, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, rayon::{prelude::*, ThreadPool}, solana_feature_set as feature_set, @@ -31,7 +31,10 @@ use { solana_perf::packet::{Packet, PacketBatch}, solana_rayon_threadlimit::get_thread_count, solana_runtime::bank_forks::BankForks, - solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT}, + solana_sdk::{ + clock::{Slot, DEFAULT_MS_PER_SLOT}, + pubkey::Pubkey, + }, solana_turbine::cluster_nodes, std::{ cmp::Reverse, @@ -374,8 +377,9 @@ impl WindowService { retransmit_sender: Sender>, repair_socket: Arc, ancestor_hashes_socket: Arc, - repair_quic_endpoint_sender: AsyncSender, - repair_quic_endpoint_response_sender: Sender<(SocketAddr, Vec)>, + repair_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, + ancestor_hashes_request_quic_sender: AsyncSender<(SocketAddr, Bytes)>, + ancestor_hashes_response_quic_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, exit: Arc, repair_info: RepairInfo, leader_schedule_cache: Arc, @@ -399,8 +403,9 @@ impl WindowService { exit.clone(), repair_socket, ancestor_hashes_socket, - repair_quic_endpoint_sender, - repair_quic_endpoint_response_sender, + repair_request_quic_sender, + ancestor_hashes_request_quic_sender, + ancestor_hashes_response_quic_receiver, repair_info, verified_vote_receiver, outstanding_repair_requests.clone(), diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index f0916970c9cae6..8fa99ad85b49ff 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2874,11 +2874,19 @@ pub struct Sockets { pub tpu_forwards: Vec, pub tpu_vote: Vec, pub broadcast: Vec, + // Socket sending out local repair requests, + // and receiving repair responses from the cluster. pub repair: UdpSocket, + pub repair_quic: UdpSocket, pub retransmit_sockets: Vec, + // Socket receiving remote repair requests from the cluster, + // and sending back repair responses. pub serve_repair: UdpSocket, pub serve_repair_quic: UdpSocket, + // Socket sending out local RepairProtocol::AncestorHashes, + // and receiving AncestorHashesResponse from the cluster. pub ancestor_hashes_requests: UdpSocket, + pub ancestor_hashes_requests_quic: UdpSocket, pub tpu_quic: Vec, pub tpu_forwards_quic: Vec, } @@ -2951,6 +2959,7 @@ impl Node { bind_more_with_config(tpu_forwards_quic, num_quic_endpoints, quic_config).unwrap(); let tpu_vote = UdpSocket::bind(&localhost_bind_addr).unwrap(); let repair = UdpSocket::bind(&localhost_bind_addr).unwrap(); + let repair_quic = UdpSocket::bind(&localhost_bind_addr).unwrap(); let rpc_port = find_available_port_in_range(localhost_ip_addr, port_range).unwrap(); let rpc_addr = SocketAddr::new(localhost_ip_addr, rpc_port); let rpc_pubsub_port = find_available_port_in_range(localhost_ip_addr, port_range).unwrap(); @@ -2960,6 +2969,7 @@ impl Node { let serve_repair = UdpSocket::bind(&localhost_bind_addr).unwrap(); let serve_repair_quic = UdpSocket::bind(&localhost_bind_addr).unwrap(); let ancestor_hashes_requests = UdpSocket::bind(&unspecified_bind_addr).unwrap(); + let ancestor_hashes_requests_quic = UdpSocket::bind(&unspecified_bind_addr).unwrap(); let mut info = ContactInfo::new( *pubkey, @@ -3008,10 +3018,12 @@ impl Node { tpu_vote: vec![tpu_vote], broadcast, repair, + repair_quic, retransmit_sockets: vec![retransmit_socket], serve_repair, serve_repair_quic, ancestor_hashes_requests, + ancestor_hashes_requests_quic, tpu_quic, tpu_forwards_quic, }, @@ -3084,10 +3096,12 @@ impl Node { let (tpu_vote_port, tpu_vote) = Self::bind(bind_ip_addr, port_range); let (_, retransmit_socket) = Self::bind(bind_ip_addr, port_range); let (_, repair) = Self::bind(bind_ip_addr, port_range); + let (_, repair_quic) = Self::bind(bind_ip_addr, port_range); let (serve_repair_port, serve_repair) = Self::bind(bind_ip_addr, port_range); let (serve_repair_quic_port, serve_repair_quic) = Self::bind(bind_ip_addr, port_range); let (_, broadcast) = Self::bind(bind_ip_addr, port_range); let (_, ancestor_hashes_requests) = Self::bind(bind_ip_addr, port_range); + let (_, ancestor_hashes_requests_quic) = Self::bind(bind_ip_addr, port_range); let rpc_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); let rpc_pubsub_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); @@ -3134,10 +3148,12 @@ impl Node { tpu_vote: vec![tpu_vote], broadcast: vec![broadcast], repair, + repair_quic, retransmit_sockets: vec![retransmit_socket], serve_repair, serve_repair_quic, ancestor_hashes_requests, + ancestor_hashes_requests_quic, tpu_quic, tpu_forwards_quic, }, @@ -3199,6 +3215,7 @@ impl Node { multi_bind_in_range(bind_ip_addr, port_range, 8).expect("retransmit multi_bind"); let (_, repair) = Self::bind(bind_ip_addr, port_range); + let (_, repair_quic) = Self::bind(bind_ip_addr, port_range); let (serve_repair_port, serve_repair) = Self::bind(bind_ip_addr, port_range); let (serve_repair_quic_port, serve_repair_quic) = Self::bind(bind_ip_addr, port_range); @@ -3206,6 +3223,7 @@ impl Node { multi_bind_in_range(bind_ip_addr, port_range, 4).expect("broadcast multi_bind"); let (_, ancestor_hashes_requests) = Self::bind(bind_ip_addr, port_range); + let (_, ancestor_hashes_requests_quic) = Self::bind(bind_ip_addr, port_range); let mut info = ContactInfo::new( *pubkey, @@ -3239,11 +3257,13 @@ impl Node { tpu_vote: tpu_vote_sockets, broadcast, repair, + repair_quic, retransmit_sockets, serve_repair, serve_repair_quic, ip_echo: Some(ip_echo), ancestor_hashes_requests, + ancestor_hashes_requests_quic, tpu_quic, tpu_forwards_quic, }, diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index 50008171eed119..196dda94afa624 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -32,7 +32,7 @@ pub struct UdpSocketPair { pub type PortRange = (u16, u16); pub const VALIDATOR_PORT_RANGE: PortRange = (8000, 10_000); -pub const MINIMUM_VALIDATOR_PORT_RANGE_WIDTH: u16 = 14; // VALIDATOR_PORT_RANGE must be at least this wide +pub const MINIMUM_VALIDATOR_PORT_RANGE_WIDTH: u16 = 16; // VALIDATOR_PORT_RANGE must be at least this wide pub(crate) const HEADER_LENGTH: usize = 4; pub(crate) const IP_ECHO_SERVER_RESPONSE_LENGTH: usize = HEADER_LENGTH + 23; From 9b5525d1270440601d7fe7980ab6b0652bf4fd01 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Wed, 2 Oct 2024 21:01:33 +0800 Subject: [PATCH 421/529] ci: ignore the tonic audit as a temporary stopgap (#3052) --- ci/do-audit.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ci/do-audit.sh b/ci/do-audit.sh index 49c4510d7bb44f..771989d556b975 100755 --- a/ci/do-audit.sh +++ b/ci/do-audit.sh @@ -39,6 +39,15 @@ cargo_audit_ignores=( # URL: https://rustsec.org/advisories/RUSTSEC-2024-0344 # Solution: Upgrade to >=4.1.3 --ignore RUSTSEC-2024-0344 + + # Crate: tonic + # Version: 0.9.2 + # Title: Remotely exploitable Denial of Service in Tonic + # Date: 2024-10-01 + # ID: RUSTSEC-2024-0376 + # URL: https://rustsec.org/advisories/RUSTSEC-2024-0376 + # Solution: Upgrade to >=0.12.3 + --ignore RUSTSEC-2024-0376 ) scripts/cargo-for-all-lock-files.sh audit "${cargo_audit_ignores[@]}" | $dep_tree_filter # we want the `cargo audit` exit code, not `$dep_tree_filter`'s From 14fe368a2b1d71052506bd3585b575cb6f9933b9 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Wed, 2 Oct 2024 11:40:17 -0400 Subject: [PATCH 422/529] banking_stage: use iproduct! macro in test (#3054) --- .../src/banking_stage/unprocessed_transaction_storage.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 1ee7363e0d1924..cdcba288f90f8d 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -1017,6 +1017,7 @@ impl ThreadLocalUnprocessedPackets { mod tests { use { super::*, + itertools::iproduct, solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}, solana_perf::packet::{Packet, PacketFlags}, solana_runtime::genesis_utils, @@ -1273,10 +1274,10 @@ mod tests { assert!(deserialized_packets.contains(&big_transfer)); } - for (vote_source, staked) in [VoteSource::Gossip, VoteSource::Tpu] - .into_iter() - .flat_map(|vs| [(vs, true), (vs, false)]) - { + for (vote_source, staked) in iproduct!( + [VoteSource::Gossip, VoteSource::Tpu].into_iter(), + [true, false].into_iter() + ) { let latest_unprocessed_votes = LatestUnprocessedVotes::default(); if staked { latest_unprocessed_votes.set_staked_nodes(&[keypair.pubkey()]); From 76cbf1a91c3552ad82ab8935902fa5f0ae46fa7f Mon Sep 17 00:00:00 2001 From: ripatel-fd Date: Wed, 2 Oct 2024 17:46:33 +0200 Subject: [PATCH 423/529] Fix broken parallelism in quic-client (#2526) * Fix broken parallelism in quic-client Fixes excessive fragmentation by TPU clients leading to a large number of streams per conn in 'sending' state simultaneously. This, in turn, requires excessive in-memory buffering server-side to reassemble fragmented transactions. - Simplifies QuicClient::send_batch to enqueue send operations in sequential order - Removes the "max_parallel_streams" config option The quic-client now produces an ordered fragment stream when scheduling send operations from a single-thread. * quic-client: remove outdated test --------- Co-authored-by: Richard Patel Co-authored-by: Alessandro Decina --- quic-client/src/lib.rs | 83 +--------------------- quic-client/src/nonblocking/quic_client.rs | 47 ++---------- 2 files changed, 8 insertions(+), 122 deletions(-) diff --git a/quic-client/src/lib.rs b/quic-client/src/lib.rs index 86ddd154fc8b13..e9741777ccb6ac 100644 --- a/quic-client/src/lib.rs +++ b/quic-client/src/lib.rs @@ -26,11 +26,7 @@ use { pubkey::Pubkey, signature::{Keypair, Signer}, }, - solana_streamer::{ - nonblocking::quic::{compute_max_allowed_uni_streams, ConnectionPeerType}, - streamer::StakedNodes, - tls_certificates::new_dummy_x509_certificate, - }, + solana_streamer::{streamer::StakedNodes, tls_certificates::new_dummy_x509_certificate}, std::{ net::{IpAddr, SocketAddr}, sync::{Arc, RwLock}, @@ -65,13 +61,12 @@ impl ConnectionPool for QuicPool { fn create_pool_entry( &self, - config: &Self::NewConnectionConfig, + _config: &Self::NewConnectionConfig, addr: &SocketAddr, ) -> Arc { Arc::new(Quic(Arc::new(QuicClient::new( self.endpoint.clone(), *addr, - config.compute_max_parallel_streams(), )))) } } @@ -120,24 +115,6 @@ impl QuicConfig { QuicLazyInitializedEndpoint::new(cert_guard.clone(), self.client_endpoint.as_ref().cloned()) } - fn compute_max_parallel_streams(&self) -> usize { - let (client_type, total_stake) = - self.maybe_client_pubkey - .map_or((ConnectionPeerType::Unstaked, 0), |pubkey| { - self.maybe_staked_nodes.as_ref().map_or( - (ConnectionPeerType::Unstaked, 0), - |stakes| { - let rstakes = stakes.read().unwrap(); - rstakes.get_node_stake(&pubkey).map_or( - (ConnectionPeerType::Unstaked, rstakes.total_stake()), - |stake| (ConnectionPeerType::Staked(stake), rstakes.total_stake()), - ) - }, - ) - }); - compute_max_allowed_uni_streams(client_type, total_stake) - } - pub fn update_client_certificate(&mut self, keypair: &Keypair, _ipaddr: IpAddr) { let (cert, priv_key) = new_dummy_x509_certificate(keypair); @@ -250,59 +227,3 @@ pub fn new_quic_connection_cache( let connection_manager = QuicConnectionManager::new_with_connection_config(config); ConnectionCache::new(name, connection_manager, connection_pool_size) } - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::quic::{ - QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS, QUIC_MIN_STAKED_CONCURRENT_STREAMS, - QUIC_TOTAL_STAKED_CONCURRENT_STREAMS, - }, - std::collections::HashMap, - }; - - #[test] - fn test_connection_cache_max_parallel_chunks() { - solana_logger::setup(); - - let mut connection_config = QuicConfig::new().unwrap(); - assert_eq!( - connection_config.compute_max_parallel_streams(), - QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS - ); - - let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let pubkey = Pubkey::new_unique(); - connection_config.set_staked_nodes(&staked_nodes, &pubkey); - assert_eq!( - connection_config.compute_max_parallel_streams(), - QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS - ); - let overrides = HashMap::::default(); - let mut stakes = HashMap::from([(Pubkey::new_unique(), 10_000)]); - *staked_nodes.write().unwrap() = - StakedNodes::new(Arc::new(stakes.clone()), overrides.clone()); - assert_eq!( - connection_config.compute_max_parallel_streams(), - QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS - ); - - stakes.insert(pubkey, 1); - *staked_nodes.write().unwrap() = - StakedNodes::new(Arc::new(stakes.clone()), overrides.clone()); - let delta = - (QUIC_TOTAL_STAKED_CONCURRENT_STREAMS - QUIC_MIN_STAKED_CONCURRENT_STREAMS) as f64; - - assert_eq!( - connection_config.compute_max_parallel_streams(), - (QUIC_MIN_STAKED_CONCURRENT_STREAMS as f64 + (1f64 / 10000f64) * delta) as usize - ); - stakes.insert(pubkey, 1_000); - *staked_nodes.write().unwrap() = StakedNodes::new(Arc::new(stakes.clone()), overrides); - assert_ne!( - connection_config.compute_max_parallel_streams(), - QUIC_MIN_STAKED_CONCURRENT_STREAMS - ); - } -} diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index c6a66fe56bcd10..0d6bede4149f69 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -4,8 +4,7 @@ use { async_lock::Mutex, async_trait::async_trait, - futures::future::{join_all, TryFutureExt}, - itertools::Itertools, + futures::future::TryFutureExt, log::*, quinn::{ crypto::rustls::QuicClientConfig, ClientConfig, ClosedStream, ConnectError, Connection, @@ -20,10 +19,7 @@ use { solana_net_utils::VALIDATOR_PORT_RANGE, solana_rpc_client_api::client_error::ErrorKind as ClientErrorKind, solana_sdk::{ - quic::{ - QUIC_CONNECTION_HANDSHAKE_TIMEOUT, QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT, - QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS, - }, + quic::{QUIC_CONNECTION_HANDSHAKE_TIMEOUT, QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT}, signature::Keypair, transport::Result as TransportResult, }, @@ -284,21 +280,15 @@ pub struct QuicClient { connection: Arc>>, addr: SocketAddr, stats: Arc, - chunk_size: usize, } impl QuicClient { - pub fn new( - endpoint: Arc, - addr: SocketAddr, - chunk_size: usize, - ) -> Self { + pub fn new(endpoint: Arc, addr: SocketAddr) -> Self { Self { endpoint, connection: Arc::new(Mutex::new(None)), addr, stats: Arc::new(ClientStats::default()), - chunk_size, } } @@ -307,7 +297,6 @@ impl QuicClient { connection: &Connection, ) -> Result<(), QuicError> { let mut send_stream = connection.open_uni().await?; - send_stream.write_all(data).await?; Ok(()) } @@ -520,28 +509,8 @@ impl QuicClient { .await .map_err(Into::::into)?; - // Used to avoid dereferencing the Arc multiple times below - // by just getting a reference to the NewConnection once - let connection_ref: &Connection = &connection; - - let chunks = buffers[1..buffers.len()].iter().chunks(self.chunk_size); - - let futures: Vec<_> = chunks - .into_iter() - .map(|buffs| { - join_all( - buffs - .into_iter() - .map(|buf| Self::_send_buffer_using_conn(buf.as_ref(), connection_ref)), - ) - }) - .collect(); - - for f in futures { - f.await - .into_iter() - .try_for_each(|res| res) - .map_err(Into::::into)?; + for data in buffers[1..buffers.len()].iter() { + Self::_send_buffer_using_conn(data.as_ref(), &connection).await?; } Ok(()) } @@ -574,11 +543,7 @@ impl QuicClientConnection { addr: SocketAddr, connection_stats: Arc, ) -> Self { - let client = Arc::new(QuicClient::new( - endpoint, - addr, - QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS, - )); + let client = Arc::new(QuicClient::new(endpoint, addr)); Self::new_with_client(client, connection_stats) } From b39bcb6379e0f500121d549205a7b3be6c76a791 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Wed, 2 Oct 2024 11:45:44 -0500 Subject: [PATCH 424/529] remove cu price rounding (#3047) --- core/src/banking_stage.rs | 8 ++--- core/src/banking_stage/packet_deserializer.rs | 32 +++---------------- core/src/banking_stage/packet_receiver.rs | 14 ++------ .../scheduler_controller.rs | 3 +- sdk/src/packet.rs | 19 ++--------- 5 files changed, 12 insertions(+), 64 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 6f22db2d41a87b..32cc3fbe44dda1 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -507,7 +507,6 @@ impl BankingStage { Self::spawn_thread_local_multi_iterator_thread( id, packet_receiver, - bank_forks.clone(), decision_maker.clone(), committer.clone(), transaction_recorder.clone(), @@ -566,7 +565,6 @@ impl BankingStage { bank_thread_hdls.push(Self::spawn_thread_local_multi_iterator_thread( id, packet_receiver, - bank_forks.clone(), decision_maker.clone(), committer.clone(), transaction_recorder.clone(), @@ -631,8 +629,7 @@ impl BankingStage { // Spawn the central scheduler thread bank_thread_hdls.push({ - let packet_deserializer = - PacketDeserializer::new(non_vote_receiver, bank_forks.clone()); + let packet_deserializer = PacketDeserializer::new(non_vote_receiver); let scheduler = PrioGraphScheduler::new(work_senders, finished_work_receiver); let scheduler_controller = SchedulerController::new( decision_maker.clone(), @@ -660,7 +657,6 @@ impl BankingStage { fn spawn_thread_local_multi_iterator_thread( id: u32, packet_receiver: BankingPacketReceiver, - bank_forks: Arc>, decision_maker: DecisionMaker, committer: Committer, transaction_recorder: TransactionRecorder, @@ -668,7 +664,7 @@ impl BankingStage { mut forwarder: Forwarder, unprocessed_transaction_storage: UnprocessedTransactionStorage, ) -> JoinHandle<()> { - let mut packet_receiver = PacketReceiver::new(id, packet_receiver, bank_forks); + let mut packet_receiver = PacketReceiver::new(id, packet_receiver); let consumer = Consumer::new( committer, transaction_recorder, diff --git a/core/src/banking_stage/packet_deserializer.rs b/core/src/banking_stage/packet_deserializer.rs index e310d5505c03c9..78fab3718252f4 100644 --- a/core/src/banking_stage/packet_deserializer.rs +++ b/core/src/banking_stage/packet_deserializer.rs @@ -11,12 +11,8 @@ use { }, crossbeam_channel::RecvTimeoutError, solana_perf::packet::PacketBatch, - solana_runtime::bank_forks::BankForks, solana_sdk::saturating_add_assign, - std::{ - sync::{Arc, RwLock}, - time::{Duration, Instant}, - }, + std::time::{Duration, Instant}, }; /// Results from deserializing packet batches. @@ -33,8 +29,6 @@ pub struct ReceivePacketResults { pub struct PacketDeserializer { /// Receiver for packet batches from sigverify stage packet_batch_receiver: BankingPacketReceiver, - /// Provides working bank for deserializer to check feature activation - bank_forks: Arc>, } #[derive(Default, Debug, PartialEq)] @@ -83,13 +77,9 @@ impl PacketReceiverStats { } impl PacketDeserializer { - pub fn new( - packet_batch_receiver: BankingPacketReceiver, - bank_forks: Arc>, - ) -> Self { + pub fn new(packet_batch_receiver: BankingPacketReceiver) -> Self { Self { packet_batch_receiver, - bank_forks, } } @@ -104,15 +94,9 @@ impl PacketDeserializer { ) -> Result { let (packet_count, packet_batches) = self.receive_until(recv_timeout, capacity)?; - // Note: this can be removed after feature `round_compute_unit_price` is activated in - // mainnet-beta - let _working_bank = self.bank_forks.read().unwrap().working_bank(); - let round_compute_unit_price_enabled = false; // TODO get from working_bank.feature_set - Ok(Self::deserialize_and_collect_packets( packet_count, &packet_batches, - round_compute_unit_price_enabled, packet_filter, )) } @@ -122,7 +106,6 @@ impl PacketDeserializer { fn deserialize_and_collect_packets( packet_count: usize, banking_batches: &[BankingPacketBatch], - round_compute_unit_price_enabled: bool, packet_filter: impl Fn( ImmutableDeserializedPacket, ) -> Result, @@ -147,7 +130,6 @@ impl PacketDeserializer { deserialized_packets.extend(Self::deserialize_packets( packet_batch, &packet_indexes, - round_compute_unit_price_enabled, &mut packet_stats, &packet_filter, )); @@ -218,17 +200,13 @@ impl PacketDeserializer { fn deserialize_packets<'a>( packet_batch: &'a PacketBatch, packet_indexes: &'a [usize], - round_compute_unit_price_enabled: bool, packet_stats: &'a mut PacketReceiverStats, packet_filter: &'a impl Fn( ImmutableDeserializedPacket, ) -> Result, ) -> impl Iterator + 'a { packet_indexes.iter().filter_map(move |packet_index| { - let mut packet_clone = packet_batch[*packet_index].clone(); - packet_clone - .meta_mut() - .set_round_compute_unit_price(round_compute_unit_price_enabled); + let packet_clone = packet_batch[*packet_index].clone(); match ImmutableDeserializedPacket::new(packet_clone) .and_then(|packet| packet_filter(packet).map_err(Into::into)) @@ -260,7 +238,7 @@ mod tests { #[test] fn test_deserialize_and_collect_packets_empty() { - let results = PacketDeserializer::deserialize_and_collect_packets(0, &[], false, Ok); + let results = PacketDeserializer::deserialize_and_collect_packets(0, &[], Ok); assert_eq!(results.deserialized_packets.len(), 0); assert!(results.new_tracer_stats_option.is_none()); assert_eq!(results.packet_stats.passed_sigverify_count, 0); @@ -277,7 +255,6 @@ mod tests { let results = PacketDeserializer::deserialize_and_collect_packets( packet_count, &[BankingPacketBatch::new((packet_batches, None))], - false, Ok, ); assert_eq!(results.deserialized_packets.len(), 2); @@ -297,7 +274,6 @@ mod tests { let results = PacketDeserializer::deserialize_and_collect_packets( packet_count, &[BankingPacketBatch::new((packet_batches, None))], - false, Ok, ); assert_eq!(results.deserialized_packets.len(), 1); diff --git a/core/src/banking_stage/packet_receiver.rs b/core/src/banking_stage/packet_receiver.rs index 65c0d5816472c4..6b77d103c69670 100644 --- a/core/src/banking_stage/packet_receiver.rs +++ b/core/src/banking_stage/packet_receiver.rs @@ -9,12 +9,8 @@ use { crate::{banking_trace::BankingPacketReceiver, tracer_packet_stats::TracerPacketStats}, crossbeam_channel::RecvTimeoutError, solana_measure::{measure::Measure, measure_us}, - solana_runtime::bank_forks::BankForks, solana_sdk::{saturating_add_assign, timing::timestamp}, - std::{ - sync::{atomic::Ordering, Arc, RwLock}, - time::Duration, - }, + std::{sync::atomic::Ordering, time::Duration}, }; pub struct PacketReceiver { @@ -23,14 +19,10 @@ pub struct PacketReceiver { } impl PacketReceiver { - pub fn new( - id: u32, - banking_packet_receiver: BankingPacketReceiver, - bank_forks: Arc>, - ) -> Self { + pub fn new(id: u32, banking_packet_receiver: BankingPacketReceiver) -> Self { Self { id, - packet_deserializer: PacketDeserializer::new(banking_packet_receiver, bank_forks), + packet_deserializer: PacketDeserializer::new(banking_packet_receiver), } } diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 9966a0527d0286..47a4c8f5a39736 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -733,8 +733,7 @@ mod tests { let decision_maker = DecisionMaker::new(Pubkey::new_unique(), poh_recorder.clone()); let (banking_packet_sender, banking_packet_receiver) = unbounded(); - let packet_deserializer = - PacketDeserializer::new(banking_packet_receiver, bank_forks.clone()); + let packet_deserializer = PacketDeserializer::new(banking_packet_receiver); let (consume_work_senders, consume_work_receivers) = create_channels(num_threads); let (finished_consume_work_sender, finished_consume_work_receiver) = unbounded(); diff --git a/sdk/src/packet.rs b/sdk/src/packet.rs index 1aa93308820d47..f0258710dcf423 100644 --- a/sdk/src/packet.rs +++ b/sdk/src/packet.rs @@ -29,10 +29,8 @@ bitflags! { const REPAIR = 0b0000_0100; const SIMPLE_VOTE_TX = 0b0000_1000; const TRACER_PACKET = 0b0001_0000; - /// to be set by bank.feature_set.is_active(round_compute_unit_price::id()) at the moment - /// the packet is built. - /// This field can be removed when the above feature gate is adopted by mainnet-beta. - const ROUND_COMPUTE_UNIT_PRICE = 0b0010_0000; + // Previously used - this can now be re-used for something else. + const UNUSED = 0b0010_0000; /// For tracking performance const PERF_TRACK_PACKET = 0b0100_0000; /// For marking packets from staked nodes @@ -250,14 +248,6 @@ impl Meta { self.flags.set(PacketFlags::SIMPLE_VOTE_TX, is_simple_vote); } - #[inline] - pub fn set_round_compute_unit_price(&mut self, round_compute_unit_price: bool) { - self.flags.set( - PacketFlags::ROUND_COMPUTE_UNIT_PRICE, - round_compute_unit_price, - ); - } - #[inline] pub fn forwarded(&self) -> bool { self.flags.contains(PacketFlags::FORWARDED) @@ -283,11 +273,6 @@ impl Meta { self.flags.contains(PacketFlags::PERF_TRACK_PACKET) } - #[inline] - pub fn round_compute_unit_price(&self) -> bool { - self.flags.contains(PacketFlags::ROUND_COMPUTE_UNIT_PRICE) - } - #[inline] pub fn is_from_staked_node(&self) -> bool { self.flags.contains(PacketFlags::FROM_STAKED_NODE) From 83e7d84bcc4cf438905d07279bc07e012a49afd9 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 2 Oct 2024 12:51:38 -0500 Subject: [PATCH 425/529] Add hidden CLI arg to control number of shred sigverify threads (#3046) The argument controls the size of the rayon threadpool that is used to perform signature verification on received shreds. The argument is hidden for now. This change allows configuration of the value, but the default behavior (ie not setting the arg) matches the pre-existing behavior. --- core/src/tvu.rs | 3 +++ core/src/validator.rs | 7 ++++++- local-cluster/src/validator_configs.rs | 1 + turbine/src/sigverify_shreds.rs | 7 ++++--- validator/src/cli/thread_args.rs | 23 ++++++++++++++++++++++- validator/src/main.rs | 2 ++ 6 files changed, 38 insertions(+), 5 deletions(-) diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 9938bcf1ab846a..5134228c34ac4c 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -89,6 +89,7 @@ pub struct TvuConfig { pub wait_for_vote_to_start_leader: bool, pub replay_forks_threads: NonZeroUsize, pub replay_transactions_threads: NonZeroUsize, + pub shred_sigverify_threads: NonZeroUsize, } impl Default for TvuConfig { @@ -101,6 +102,7 @@ impl Default for TvuConfig { wait_for_vote_to_start_leader: false, replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + shred_sigverify_threads: NonZeroUsize::new(1).expect("1 is non-zero"), } } } @@ -196,6 +198,7 @@ impl Tvu { fetch_receiver, retransmit_sender.clone(), verified_sender, + tvu_config.shred_sigverify_threads, ); let retransmit_stage = RetransmitStage::new( diff --git a/core/src/validator.rs b/core/src/validator.rs index 815510611f7954..aa9b9e81ab6fb2 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -82,7 +82,7 @@ use { poh_recorder::PohRecorder, poh_service::{self, PohService}, }, - solana_rayon_threadlimit::get_max_thread_count, + solana_rayon_threadlimit::{get_max_thread_count, get_thread_count}, solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::{ @@ -286,6 +286,7 @@ pub struct ValidatorConfig { pub ip_echo_server_threads: NonZeroUsize, pub replay_forks_threads: NonZeroUsize, pub replay_transactions_threads: NonZeroUsize, + pub tvu_shred_sigverify_threads: NonZeroUsize, pub delay_leader_block_for_pending_fork: bool, } @@ -358,6 +359,7 @@ impl Default for ValidatorConfig { ip_echo_server_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + tvu_shred_sigverify_threads: NonZeroUsize::new(1).expect("1 is non-zero"), delay_leader_block_for_pending_fork: false, } } @@ -373,6 +375,8 @@ impl ValidatorConfig { replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_transactions_threads: NonZeroUsize::new(get_max_thread_count()) .expect("thread count is non-zero"), + tvu_shred_sigverify_threads: NonZeroUsize::new(get_thread_count()) + .expect("thread count is non-zero"), ..Self::default() } } @@ -1382,6 +1386,7 @@ impl Validator { wait_for_vote_to_start_leader, replay_forks_threads: config.replay_forks_threads, replay_transactions_threads: config.replay_transactions_threads, + shred_sigverify_threads: config.tvu_shred_sigverify_threads, }, &max_slots, block_metadata_notifier, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index a2366eb41489c8..bbcd1067851805 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -72,6 +72,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { ip_echo_server_threads: config.ip_echo_server_threads, replay_forks_threads: config.replay_forks_threads, replay_transactions_threads: config.replay_transactions_threads, + tvu_shred_sigverify_threads: config.tvu_shred_sigverify_threads, delay_leader_block_for_pending_fork: config.delay_leader_block_for_pending_fork, } } diff --git a/turbine/src/sigverify_shreds.rs b/turbine/src/sigverify_shreds.rs index b1b4530b666e72..de544739ac54b7 100644 --- a/turbine/src/sigverify_shreds.rs +++ b/turbine/src/sigverify_shreds.rs @@ -13,7 +13,6 @@ use { sigverify_shreds::{verify_shreds_gpu, LruCache}, }, solana_perf::{self, deduper::Deduper, packet::PacketBatch, recycler_cache::RecyclerCache}, - solana_rayon_threadlimit::get_thread_count, solana_runtime::{ bank::{Bank, MAX_LEADER_SCHEDULE_STAKES}, bank_forks::BankForks, @@ -26,6 +25,7 @@ use { static_assertions::const_assert_eq, std::{ collections::HashMap, + num::NonZeroUsize, sync::{ atomic::{AtomicUsize, Ordering}, Arc, RwLock, @@ -66,6 +66,7 @@ pub fn spawn_shred_sigverify( shred_fetch_receiver: Receiver, retransmit_sender: Sender>>, verified_sender: Sender>, + num_sigverify_threads: NonZeroUsize, ) -> JoinHandle<()> { let recycler_cache = RecyclerCache::warmed(); let mut stats = ShredSigVerifyStats::new(Instant::now()); @@ -75,10 +76,10 @@ pub fn spawn_shred_sigverify( CLUSTER_NODES_CACHE_TTL, ); let thread_pool = ThreadPoolBuilder::new() - .num_threads(get_thread_count()) + .num_threads(num_sigverify_threads.get()) .thread_name(|i| format!("solSvrfyShred{i:02}")) .build() - .unwrap(); + .expect("new rayon threadpool"); let run_shred_sigverify = move || { let mut rng = rand::thread_rng(); let mut deduper = Deduper::<2, [u8]>::new(&mut rng, DEDUPER_NUM_BITS); diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs index 1841da54a1e028..33b6dd5d457161 100644 --- a/validator/src/cli/thread_args.rs +++ b/validator/src/cli/thread_args.rs @@ -3,7 +3,7 @@ use { clap::{value_t_or_exit, Arg, ArgMatches}, solana_clap_utils::{hidden_unless_forced, input_validators::is_within_range}, - solana_rayon_threadlimit::get_max_thread_count, + solana_rayon_threadlimit::{get_max_thread_count, get_thread_count}, std::{num::NonZeroUsize, ops::RangeInclusive}, }; @@ -13,6 +13,7 @@ pub struct DefaultThreadArgs { pub replay_forks_threads: String, pub replay_transactions_threads: String, pub tvu_receive_threads: String, + pub tvu_sigverify_threads: String, } impl Default for DefaultThreadArgs { @@ -23,6 +24,7 @@ impl Default for DefaultThreadArgs { replay_transactions_threads: ReplayTransactionsThreadsArg::bounded_default() .to_string(), tvu_receive_threads: TvuReceiveThreadsArg::bounded_default().to_string(), + tvu_sigverify_threads: TvuShredSigverifyThreadsArg::bounded_default().to_string(), } } } @@ -33,6 +35,7 @@ pub fn thread_args<'a>(defaults: &DefaultThreadArgs) -> Vec> { new_thread_arg::(&defaults.replay_forks_threads), new_thread_arg::(&defaults.replay_transactions_threads), new_thread_arg::(&defaults.tvu_receive_threads), + new_thread_arg::(&defaults.tvu_sigverify_threads), ] } @@ -52,6 +55,7 @@ pub struct NumThreadConfig { pub replay_forks_threads: NonZeroUsize, pub replay_transactions_threads: NonZeroUsize, pub tvu_receive_threads: NonZeroUsize, + pub tvu_sigverify_threads: NonZeroUsize, } pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { @@ -72,6 +76,11 @@ pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { NonZeroUsize ), tvu_receive_threads: value_t_or_exit!(matches, TvuReceiveThreadsArg::NAME, NonZeroUsize), + tvu_sigverify_threads: value_t_or_exit!( + matches, + TvuShredSigverifyThreadsArg::NAME, + NonZeroUsize + ), } } @@ -163,3 +172,15 @@ impl ThreadArg for TvuReceiveThreadsArg { solana_gossip::cluster_info::MINIMUM_NUM_TVU_SOCKETS.get() } } + +struct TvuShredSigverifyThreadsArg; +impl ThreadArg for TvuShredSigverifyThreadsArg { + const NAME: &'static str = "tvu_shred_sigverify_threads"; + const LONG_NAME: &'static str = "tvu-shred-sigverify-threads"; + const HELP: &'static str = + "Number of threads to use for performing signature verification of received shreds"; + + fn default() -> usize { + get_thread_count() + } +} diff --git a/validator/src/main.rs b/validator/src/main.rs index c3cedd49828d06..0a932d8045490c 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1390,6 +1390,7 @@ pub fn main() { replay_forks_threads, replay_transactions_threads, tvu_receive_threads, + tvu_sigverify_threads, } = cli::thread_args::parse_num_threads_args(&matches); let mut validator_config = ValidatorConfig { @@ -1533,6 +1534,7 @@ pub fn main() { ip_echo_server_threads, replay_forks_threads, replay_transactions_threads, + tvu_shred_sigverify_threads: tvu_sigverify_threads, delay_leader_block_for_pending_fork: matches .is_present("delay_leader_block_for_pending_fork"), wen_restart_proto_path: value_t!(matches, "wen_restart", PathBuf).ok(), From b23e63670172de16de51f368d245bd05b903f0e8 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Thu, 3 Oct 2024 09:07:27 -0500 Subject: [PATCH 426/529] clean scan optimization: scan disk index only for zero lamport (#2879) * clean scan optimization * fix rebase conflicts * Update accounts-db/src/accounts_db.rs Co-authored-by: Brooks * Update accounts-db/src/accounts_db.rs Co-authored-by: Brooks * Update accounts-db/src/accounts_db.rs Co-authored-by: Brooks * Update accounts-db/src/accounts_db.rs Co-authored-by: Brooks * review update * revert ZeroLamport trait for IndexInfoInner --------- Co-authored-by: HaoranYi Co-authored-by: Brooks --- accounts-db/src/accounts_db.rs | 57 +++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 12 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 1d507ccb88a889..2ea02149263149 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1335,6 +1335,10 @@ impl StoreAccountsTiming { struct CleaningInfo { slot_list: SlotList, ref_count: u64, + /// Indicates if this account might have a zero lamport index entry. + /// If false, the account *shall* not have zero lamport index entries. + /// If true, the account *might* have zero lamport index entries. + might_contain_zero_lamport_entry: bool, } /// This is the return type of AccountsDb::construct_candidate_clean_keys. @@ -2832,6 +2836,7 @@ impl AccountsDb { CleaningInfo { slot_list, ref_count, + .. }, ) in bin.iter() { @@ -3088,7 +3093,18 @@ impl AccountsDb { candidates_bin = candidates[curr_bin].write().unwrap(); prev_bin = curr_bin; } - candidates_bin.insert(removed_pubkey, CleaningInfo::default()); + // Conservatively mark the candidate might have a zero lamport entry for + // correctness so that scan WILL try to look in disk if it is + // not in-mem. These keys are from 1) recently processed + // slots, 2) zero lamports found in shrink. Therefore, they are very likely + // to be in-memory, and seldomly do we need to look them up in disk. + candidates_bin.insert( + removed_pubkey, + CleaningInfo { + might_contain_zero_lamport_entry: true, + ..Default::default() + }, + ); } } } @@ -3102,12 +3118,6 @@ impl AccountsDb { .sum::() as u64 } - fn insert_pubkey(&self, candidates: &[RwLock>], pubkey: Pubkey) { - let index = self.accounts_index.bin_calculator.bin_from_pubkey(&pubkey); - let mut candidates_bin = candidates[index].write().unwrap(); - candidates_bin.insert(pubkey, CleaningInfo::default()); - } - /// Construct a list of candidates for cleaning from: /// - dirty_stores -- set of stores which had accounts /// removed or recently rooted; @@ -3145,6 +3155,16 @@ impl AccountsDb { std::iter::repeat_with(|| RwLock::new(HashMap::::new())) .take(num_bins) .collect(); + + let insert_candidate = |pubkey, is_zero_lamport| { + let index = self.accounts_index.bin_calculator.bin_from_pubkey(&pubkey); + let mut candidates_bin = candidates[index].write().unwrap(); + candidates_bin + .entry(pubkey) + .or_default() + .might_contain_zero_lamport_entry |= is_zero_lamport; + }; + let dirty_ancient_stores = AtomicUsize::default(); let mut dirty_store_routine = || { let chunk_size = 1.max(dirty_stores_len.saturating_div(rayon::current_num_threads())); @@ -3157,9 +3177,12 @@ impl AccountsDb { dirty_ancient_stores.fetch_add(1, Ordering::Relaxed); } oldest_dirty_slot = oldest_dirty_slot.min(*slot); - store - .accounts - .scan_pubkeys(|pubkey| self.insert_pubkey(&candidates, *pubkey)); + + store.accounts.scan_index(|index| { + let pubkey = index.index_info.pubkey; + let is_zero_lamport = index.index_info.lamports == 0; + insert_candidate(pubkey, is_zero_lamport); + }); }); oldest_dirty_slot }) @@ -3209,7 +3232,7 @@ impl AccountsDb { let is_candidate_for_clean = max_slot_inclusive >= *slot && latest_full_snapshot_slot >= *slot; if is_candidate_for_clean { - self.insert_pubkey(&candidates, *pubkey); + insert_candidate(*pubkey, true); } !is_candidate_for_clean }); @@ -3425,7 +3448,11 @@ impl AccountsDb { }, None, false, - ScanFilter::All, + if candidate_info.might_contain_zero_lamport_entry { + ScanFilter::All + } else { + self.scan_filter_for_shrinking + }, ); if should_purge { let reclaims_new = self.collect_reclaims( @@ -3476,6 +3503,7 @@ impl AccountsDb { CleaningInfo { slot_list, ref_count, + .. }, ) in candidates_bin.write().unwrap().iter_mut() { @@ -3555,6 +3583,7 @@ impl AccountsDb { let CleaningInfo { slot_list, ref_count: _, + .. } = cleaning_info; (!slot_list.is_empty()).then_some(( *pubkey, @@ -3858,6 +3887,7 @@ impl AccountsDb { let CleaningInfo { slot_list, ref_count: _, + .. } = cleaning_info; debug_assert!(!slot_list.is_empty(), "candidate slot_list can't be empty"); // Only keep candidates where the entire history of the account in the root set @@ -12909,6 +12939,7 @@ pub mod tests { CleaningInfo { slot_list: rooted_entries, ref_count, + ..Default::default() }, ); } @@ -12919,6 +12950,7 @@ pub mod tests { CleaningInfo { slot_list: list, ref_count, + .. }, ) in candidates_bin.iter() { @@ -15171,6 +15203,7 @@ pub mod tests { CleaningInfo { slot_list: vec![(slot, account_info)], ref_count: 1, + ..Default::default() }, ); let accounts_db = AccountsDb::new_single_for_tests(); From aed6f36732ad16bff61b1e77bea799d57989cc9a Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 3 Oct 2024 10:07:46 -0400 Subject: [PATCH 427/529] Adds LtHash::identity() (#3057) --- lattice-hash/src/lt_hash.rs | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/lattice-hash/src/lt_hash.rs b/lattice-hash/src/lt_hash.rs index ef1ec4b6f41b4d..d334c54882de41 100644 --- a/lattice-hash/src/lt_hash.rs +++ b/lattice-hash/src/lt_hash.rs @@ -14,15 +14,21 @@ pub struct LtHash(pub [u16; LtHash::NUM_ELEMENTS]); impl LtHash { pub const NUM_ELEMENTS: usize = 1024; + /// Returns the identity value for LtHash + #[must_use] + pub const fn identity() -> Self { + Self([0; Self::NUM_ELEMENTS]) + } + /// Creates a new LtHash from `hasher` /// /// The caller should hash in all inputs of interest prior to calling. #[must_use] pub fn with(hasher: &blake3::Hasher) -> Self { let mut reader = hasher.finalize_xof(); - let mut inner = [0; Self::NUM_ELEMENTS]; - reader.fill(bytemuck::must_cast_slice_mut(inner.as_mut_slice())); - Self(inner) + let mut new = Self::identity(); + reader.fill(bytemuck::must_cast_slice_mut(new.0.as_mut_slice())); + new } /// Mixes `other` into `self` @@ -85,12 +91,8 @@ mod tests { }; impl LtHash { - const fn new_zeroed() -> Self { - Self([0; Self::NUM_ELEMENTS]) - } - fn new_random() -> Self { - let mut new = Self::new_zeroed(); + let mut new = Self::identity(); thread_rng().fill(&mut new.0); new } @@ -114,6 +116,14 @@ mod tests { impl Copy for LtHash {} + // Ensure that if you mix-in or mix-out with the identity, you get the original value + #[test] + fn test_identity() { + let a = LtHash::new_random(); + assert_eq!(a, a + LtHash::identity()); + assert_eq!(a, a - LtHash::identity()); + } + // Ensure that if you mix-in then mix-out a hash, you get the original value #[test] fn test_inverse() { From 7741b250a6e76afc9e7385ceb64c341f4bc21622 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Thu, 3 Oct 2024 19:20:11 +0400 Subject: [PATCH 428/529] remove unused deps from solana-version (#3025) * remove unused deps from solana-version * Trigger Build --- Cargo.lock | 2 -- programs/sbf/Cargo.lock | 2 -- version/Cargo.toml | 3 --- 3 files changed, 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 65d7ce94aaee19..51b063750b0bf4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8459,7 +8459,6 @@ dependencies = [ name = "solana-version" version = "2.1.0" dependencies = [ - "log", "semver 1.0.23", "serde", "serde_derive", @@ -8467,7 +8466,6 @@ dependencies = [ "solana-frozen-abi", "solana-frozen-abi-macro", "solana-sanitize", - "solana-sdk", "solana-serde-varint", ] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2b069250d219a5..9807b7e2afa2e2 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7048,13 +7048,11 @@ dependencies = [ name = "solana-version" version = "2.1.0" dependencies = [ - "log", "semver", "serde", "serde_derive", "solana-feature-set", "solana-sanitize", - "solana-sdk", "solana-serde-varint", ] diff --git a/version/Cargo.toml b/version/Cargo.toml index 3372a5cabbbcba..655b06f174fc90 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -10,7 +10,6 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -log = { workspace = true } semver = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } @@ -22,7 +21,6 @@ solana-frozen-abi-macro = { workspace = true, optional = true, features = [ "frozen-abi", ] } solana-sanitize = { workspace = true } -solana-sdk = { workspace = true } solana-serde-varint = { workspace = true } [features] @@ -30,7 +28,6 @@ dummy-for-ci-check = [] frozen-abi = [ "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", - "solana-sdk/frozen-abi", ] [lib] From b6b5b1a04fde8b3bae36be1b99960c7279ce300e Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Thu, 3 Oct 2024 10:11:52 -0700 Subject: [PATCH 429/529] Support notify first shred received in geyser (#3030) * support notify first shred received in geyser --- Cargo.lock | 1 + core/src/tvu.rs | 8 ++++++- core/src/validator.rs | 5 ++++ .../src/geyser_plugin_interface.rs | 4 ++++ .../src/geyser_plugin_service.rs | 16 +++++++++---- .../src/slot_status_notifier.rs | 7 ++++++ programs/sbf/Cargo.lock | 1 + turbine/Cargo.toml | 1 + turbine/benches/retransmit_stage.rs | 1 + turbine/src/retransmit_stage.rs | 24 ++++++++++++++++++- 10 files changed, 62 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 51b063750b0bf4..6c030fe71362cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8373,6 +8373,7 @@ dependencies = [ "rustls 0.23.13", "solana-entry", "solana-feature-set", + "solana-geyser-plugin-manager", "solana-gossip", "solana-ledger", "solana-logger", diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 5134228c34ac4c..5d5e18bc241395 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -25,7 +25,10 @@ use { bytes::Bytes, crossbeam_channel::{unbounded, Receiver, Sender}, solana_client::connection_cache::ConnectionCache, - solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierArc, + solana_geyser_plugin_manager::{ + block_metadata_notifier_interface::BlockMetadataNotifierArc, + slot_status_notifier::SlotStatusNotifier, + }, solana_gossip::{ cluster_info::ClusterInfo, duplicate_shred_handler::DuplicateShredHandler, duplicate_shred_listener::DuplicateShredListener, @@ -161,6 +164,7 @@ impl Tvu { outstanding_repair_requests: Arc>, cluster_slots: Arc, wen_restart_repair_slots: Option>>>, + slot_status_notifier: Option, ) -> Result { let in_wen_restart = wen_restart_repair_slots.is_some(); @@ -210,6 +214,7 @@ impl Tvu { retransmit_receiver, max_slots.clone(), Some(rpc_subscriptions.clone()), + slot_status_notifier, ); let (ancestor_duplicate_slots_sender, ancestor_duplicate_slots_receiver) = unbounded(); @@ -542,6 +547,7 @@ pub mod tests { outstanding_repair_requests, cluster_slots, wen_restart_repair_slots, + None, ) .expect("assume success"); if enable_wen_restart { diff --git a/core/src/validator.rs b/core/src/validator.rs index aa9b9e81ab6fb2..3ea9593a5efbb1 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -674,6 +674,10 @@ impl Validator { .as_ref() .and_then(|geyser_plugin_service| geyser_plugin_service.get_block_metadata_notifier()); + let slot_status_notifier = geyser_plugin_service + .as_ref() + .and_then(|geyser_plugin_service| geyser_plugin_service.get_slot_status_notifier()); + info!( "Geyser plugin: accounts_update_notifier: {}, transaction_notifier: {}, \ entry_notifier: {}", @@ -1405,6 +1409,7 @@ impl Validator { outstanding_repair_requests.clone(), cluster_slots.clone(), wen_restart_repair_slots.clone(), + slot_status_notifier, ) .map_err(ValidatorError::Other)?; diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index 1dec992bd6f10b..6ac7bb848b1444 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -319,6 +319,9 @@ pub enum SlotStatus { /// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed. Confirmed, + + /// First Shred Received + FirstShredReceived, } impl SlotStatus { @@ -327,6 +330,7 @@ impl SlotStatus { SlotStatus::Confirmed => "confirmed", SlotStatus::Processed => "processed", SlotStatus::Rooted => "rooted", + SlotStatus::FirstShredReceived => "first_shread_received", } } } diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index ff3e050dc4b391..8e293cbddbbeb0 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -5,7 +5,7 @@ use { block_metadata_notifier_interface::BlockMetadataNotifierArc, entry_notifier::EntryNotifierImpl, geyser_plugin_manager::{GeyserPluginManager, GeyserPluginManagerRequest}, - slot_status_notifier::SlotStatusNotifierImpl, + slot_status_notifier::{SlotStatusNotifier, SlotStatusNotifierImpl}, slot_status_observer::SlotStatusObserver, transaction_notifier::TransactionNotifierImpl, }, @@ -37,6 +37,7 @@ pub struct GeyserPluginService { transaction_notifier: Option, entry_notifier: Option, block_metadata_notifier: Option, + slot_status_notifier: Option, } impl GeyserPluginService { @@ -107,9 +108,10 @@ impl GeyserPluginService { None }; - let (slot_status_observer, block_metadata_notifier): ( + let (slot_status_observer, block_metadata_notifier, slot_status_notifier): ( Option, Option, + Option, ) = if account_data_notifications_enabled || transaction_notifications_enabled || entry_notifications_enabled @@ -119,14 +121,15 @@ impl GeyserPluginService { ( Some(SlotStatusObserver::new( confirmed_bank_receiver, - slot_status_notifier, + slot_status_notifier.clone(), )), Some(Arc::new(BlockMetadataNotifierImpl::new( plugin_manager.clone(), ))), + Some(slot_status_notifier), ) } else { - (None, None) + (None, None, None) }; // Initialize plugin manager rpc handler thread if needed @@ -143,6 +146,7 @@ impl GeyserPluginService { transaction_notifier, entry_notifier, block_metadata_notifier, + slot_status_notifier, }) } @@ -172,6 +176,10 @@ impl GeyserPluginService { self.block_metadata_notifier.clone() } + pub fn get_slot_status_notifier(&self) -> Option { + self.slot_status_notifier.clone() + } + pub fn join(self) -> thread::Result<()> { if let Some(mut slot_status_observer) = self.slot_status_observer { slot_status_observer.join()?; diff --git a/geyser-plugin-manager/src/slot_status_notifier.rs b/geyser-plugin-manager/src/slot_status_notifier.rs index 1557bb2d4d8c36..18ea942810ef41 100644 --- a/geyser-plugin-manager/src/slot_status_notifier.rs +++ b/geyser-plugin-manager/src/slot_status_notifier.rs @@ -17,6 +17,9 @@ pub trait SlotStatusNotifierInterface { /// Notified when a slot is rooted. fn notify_slot_rooted(&self, slot: Slot, parent: Option); + + /// Notified when the first shred is received for a slot. + fn notify_first_shred_received(&self, slot: Slot); } pub type SlotStatusNotifier = Arc>; @@ -37,6 +40,10 @@ impl SlotStatusNotifierInterface for SlotStatusNotifierImpl { fn notify_slot_rooted(&self, slot: Slot, parent: Option) { self.notify_slot_status(slot, parent, SlotStatus::Rooted); } + + fn notify_first_shred_received(&self, slot: Slot) { + self.notify_slot_status(slot, None, SlotStatus::FirstShredReceived); + } } impl SlotStatusNotifierImpl { diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 9807b7e2afa2e2..a3207197f0827f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -6976,6 +6976,7 @@ dependencies = [ "rustls 0.23.13", "solana-entry", "solana-feature-set", + "solana-geyser-plugin-manager", "solana-gossip", "solana-ledger", "solana-measure", diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index 0addbaf6866f74..ff035eaf11fdea 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -25,6 +25,7 @@ rayon = { workspace = true } rustls = { workspace = true } solana-entry = { workspace = true } solana-feature-set = { workspace = true } +solana-geyser-plugin-manager = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true } solana-measure = { workspace = true } diff --git a/turbine/benches/retransmit_stage.rs b/turbine/benches/retransmit_stage.rs index c5490d5670e6c6..75c7ad06bdde34 100644 --- a/turbine/benches/retransmit_stage.rs +++ b/turbine/benches/retransmit_stage.rs @@ -126,6 +126,7 @@ fn bench_retransmitter(bencher: &mut Bencher) { shreds_receiver, Arc::default(), // solana_rpc::max_slots::MaxSlots None, + None, ); let mut index = 0; diff --git a/turbine/src/retransmit_stage.rs b/turbine/src/retransmit_stage.rs index d8d13e7f935cad..b5e67cd3203a40 100644 --- a/turbine/src/retransmit_stage.rs +++ b/turbine/src/retransmit_stage.rs @@ -9,6 +9,7 @@ use { lru::LruCache, rand::Rng, rayon::{prelude::*, ThreadPool, ThreadPoolBuilder}, + solana_geyser_plugin_manager::slot_status_notifier::SlotStatusNotifier, solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol}, solana_ledger::{ leader_schedule_cache::LeaderScheduleCache, @@ -184,6 +185,7 @@ fn retransmit( shred_deduper: &mut ShredDeduper<2>, max_slots: &MaxSlots, rpc_subscriptions: Option<&RpcSubscriptions>, + slot_status_notifier: Option<&SlotStatusNotifier>, ) -> Result<(), RecvTimeoutError> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?; @@ -299,7 +301,12 @@ fn retransmit( .reduce(HashMap::new, RetransmitSlotStats::merge) }) }; - stats.upsert_slot_stats(slot_stats, root_bank.slot(), rpc_subscriptions); + stats.upsert_slot_stats( + slot_stats, + root_bank.slot(), + rpc_subscriptions, + slot_status_notifier, + ); timer_start.stop(); stats.total_time += timer_start.as_us(); stats.maybe_submit(&root_bank, &working_bank, cluster_info, cluster_nodes_cache); @@ -381,6 +388,7 @@ pub fn retransmitter( shreds_receiver: Receiver>>, max_slots: Arc, rpc_subscriptions: Option>, + slot_status_notifier: Option, ) -> JoinHandle<()> { let cluster_nodes_cache = ClusterNodesCache::::new( CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, @@ -412,6 +420,7 @@ pub fn retransmitter( &mut shred_deduper, &max_slots, rpc_subscriptions.as_deref(), + slot_status_notifier.as_ref(), ) { Ok(()) => (), Err(RecvTimeoutError::Timeout) => (), @@ -435,6 +444,7 @@ impl RetransmitStage { retransmit_receiver: Receiver>>, max_slots: Arc, rpc_subscriptions: Option>, + slot_status_notifier: Option, ) -> Self { let retransmit_thread_handle = retransmitter( retransmit_sockets, @@ -445,6 +455,7 @@ impl RetransmitStage { retransmit_receiver, max_slots, rpc_subscriptions, + slot_status_notifier, ); Self { @@ -507,6 +518,7 @@ impl RetransmitStats { feed: I, root: Slot, rpc_subscriptions: Option<&RpcSubscriptions>, + slot_status_notifier: Option<&SlotStatusNotifier>, ) where I: IntoIterator, { @@ -523,6 +535,16 @@ impl RetransmitStats { datapoint_info!("retransmit-first-shred", ("slot", slot, i64)); } } + + if let Some(slot_status_notifier) = slot_status_notifier { + if slot > root { + slot_status_notifier + .read() + .unwrap() + .notify_first_shred_received(slot); + } + } + self.slot_stats.put(slot, slot_stats); } Some(entry) => { From f38fe41c9f167c58d16d8aa056f35bda8f64938b Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 3 Oct 2024 19:47:12 -0500 Subject: [PATCH 430/529] generic verify_precompiles (#3055) --- Cargo.lock | 1 + banks-server/src/banks_server.rs | 3 +- core/src/banking_stage/consumer.rs | 5 +- rpc/src/rpc.rs | 3 +- runtime/Cargo.toml | 1 + runtime/src/bank.rs | 3 +- runtime/src/lib.rs | 1 + runtime/src/verify_precompiles.rs | 165 +++++++++++++++++++++++++++++ 8 files changed, 177 insertions(+), 5 deletions(-) create mode 100644 runtime/src/verify_precompiles.rs diff --git a/Cargo.lock b/Cargo.lock index 6c030fe71362cd..cff368061b4781 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7652,6 +7652,7 @@ dependencies = [ "num_enum", "percentage", "qualifier_attr", + "rand 0.7.3", "rand 0.8.5", "rand_chacha 0.3.1", "rayon", diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 7051daac45cb0f..f2d2d10da85abb 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -13,6 +13,7 @@ use { bank::{Bank, TransactionSimulationResult}, bank_forks::BankForks, commitment::BlockCommitmentCache, + verify_precompiles::verify_precompiles, }, solana_sdk::{ account::Account, @@ -167,7 +168,7 @@ fn verify_transaction( let move_precompile_verification_to_svm = feature_set.is_active(&move_precompile_verification_to_svm::id()); if !move_precompile_verification_to_svm { - transaction.verify_precompiles(feature_set)?; + verify_precompiles(transaction, feature_set)?; } Ok(()) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 4bd6f50cd18b65..57f1b1958b152c 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -21,6 +21,7 @@ use { solana_runtime::{ bank::{Bank, LoadAndExecuteTransactionsOutput}, transaction_batch::TransactionBatch, + verify_precompiles::verify_precompiles, }, solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ @@ -401,7 +402,7 @@ impl Consumer { .map(|(tx, result)| match result { Ok(_) => { if !move_precompile_verification_to_svm { - tx.verify_precompiles(&bank.feature_set) + verify_precompiles(tx, &bank.feature_set) } else { Ok(()) } @@ -452,7 +453,7 @@ impl Consumer { } else { // Verify pre-compiles. if !move_precompile_verification_to_svm { - tx.verify_precompiles(&bank.feature_set)?; + verify_precompiles(tx, &bank.feature_set)?; } // Any transaction executed between sanitization time and now may have closed the lookup table(s). diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 08fe5f3355f7af..9f6de648a40e30 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -59,6 +59,7 @@ use { prioritization_fee_cache::PrioritizationFeeCache, snapshot_config::SnapshotConfig, snapshot_utils, + verify_precompiles::verify_precompiles, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, @@ -2260,7 +2261,7 @@ fn verify_transaction( let move_precompile_verification_to_svm = feature_set.is_active(&feature_set::move_precompile_verification_to_svm::id()); if !move_precompile_verification_to_svm { - if let Err(e) = transaction.verify_precompiles(feature_set) { + if let Err(e) = verify_precompiles(transaction, feature_set) { return Err(RpcCustomError::TransactionPrecompileVerificationFailure(e).into()); } } diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 014995d592b612..35e23a8a194a99 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -107,6 +107,7 @@ assert_matches = { workspace = true } ed25519-dalek = { workspace = true } libsecp256k1 = { workspace = true } memoffset = { workspace = true } +rand0-7 = { package = "rand", version = "0.7" } rand_chacha = { workspace = true } solana-accounts-db = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 77661a4bc43cf2..f74de8b2ffec64 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -57,6 +57,7 @@ use { stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, transaction_batch::{OwnedOrBorrowed, TransactionBatch}, + verify_precompiles::verify_precompiles, }, byteorder::{ByteOrder, LittleEndian}, dashmap::{DashMap, DashSet}, @@ -5664,7 +5665,7 @@ impl Bank { verification_mode == TransactionVerificationMode::HashAndVerifyPrecompiles || verification_mode == TransactionVerificationMode::FullVerification } { - sanitized_tx.verify_precompiles(&self.feature_set)?; + verify_precompiles(&sanitized_tx, &self.feature_set)?; } Ok(sanitized_tx) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index bd11e97668eec0..21947a64e79d02 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -36,6 +36,7 @@ pub mod stakes; pub mod static_ids; pub mod status_cache; pub mod transaction_batch; +pub mod verify_precompiles; pub mod vote_sender_types; #[macro_use] diff --git a/runtime/src/verify_precompiles.rs b/runtime/src/verify_precompiles.rs new file mode 100644 index 00000000000000..69022785af4d1f --- /dev/null +++ b/runtime/src/verify_precompiles.rs @@ -0,0 +1,165 @@ +use { + solana_feature_set::FeatureSet, + solana_sdk::{ + precompiles::get_precompiles, + transaction::{Result, TransactionError}, + }, + solana_svm_transaction::svm_message::SVMMessage, +}; + +pub fn verify_precompiles(message: &impl SVMMessage, feature_set: &FeatureSet) -> Result<()> { + let mut all_instruction_data = None; // lazily collect this on first pre-compile + + let precompiles = get_precompiles(); + for (program_id, instruction) in message.program_instructions_iter() { + for precompile in precompiles { + if precompile.check_id(program_id, |id| feature_set.is_active(id)) { + let all_instruction_data: &Vec<&[u8]> = all_instruction_data + .get_or_insert_with(|| message.instructions_iter().map(|ix| ix.data).collect()); + precompile + .verify(instruction.data, all_instruction_data, feature_set) + .map_err(|_| TransactionError::InvalidAccountIndex)?; + break; + } + } + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use { + super::*, + rand0_7::{thread_rng, Rng}, + solana_sdk::{ + ed25519_instruction::new_ed25519_instruction, + hash::Hash, + pubkey::Pubkey, + secp256k1_instruction::new_secp256k1_instruction, + signature::Keypair, + signer::Signer, + system_instruction, system_transaction, + transaction::{SanitizedTransaction, Transaction}, + }, + }; + + #[test] + fn test_verify_precompiles_simple_transaction() { + let tx = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &Keypair::new(), + &Pubkey::new_unique(), + 1, + Hash::default(), + )); + assert!(verify_precompiles(&tx, &FeatureSet::all_enabled()).is_ok()); + } + + #[test] + fn test_verify_precompiles_secp256k1() { + let secp_privkey = libsecp256k1::SecretKey::random(&mut thread_rng()); + let message_arr = b"hello"; + let mut secp_instruction = new_secp256k1_instruction(&secp_privkey, message_arr); + let mint_keypair = Keypair::new(); + let feature_set = FeatureSet::all_enabled(); + + let tx = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[secp_instruction.clone()], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + )); + + assert!(verify_precompiles(&tx, &feature_set).is_ok()); + + let index = thread_rng().gen_range(0, secp_instruction.data.len()); + secp_instruction.data[index] = secp_instruction.data[index].wrapping_add(12); + let tx = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[secp_instruction], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + )); + + assert!(verify_precompiles(&tx, &feature_set).is_err()); + } + + #[test] + fn test_verify_precompiles_ed25519() { + let privkey = ed25519_dalek::Keypair::generate(&mut thread_rng()); + let message_arr = b"hello"; + let mut instruction = new_ed25519_instruction(&privkey, message_arr); + let mint_keypair = Keypair::new(); + let feature_set = FeatureSet::all_enabled(); + + let tx = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[instruction.clone()], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + )); + + assert!(verify_precompiles(&tx, &feature_set).is_ok()); + + let index = loop { + let index = thread_rng().gen_range(0, instruction.data.len()); + // byte 1 is not used, so this would not cause the verify to fail + if index != 1 { + break index; + } + }; + + instruction.data[index] = instruction.data[index].wrapping_add(12); + let tx = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[instruction], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + )); + assert!(verify_precompiles(&tx, &feature_set).is_err()); + } + + #[test] + fn test_verify_precompiles_mixed() { + let message_arr = b"hello"; + let secp_privkey = libsecp256k1::SecretKey::random(&mut thread_rng()); + let mut secp_instruction = new_secp256k1_instruction(&secp_privkey, message_arr); + let ed25519_privkey = ed25519_dalek::Keypair::generate(&mut thread_rng()); + let ed25519_instruction = new_ed25519_instruction(&ed25519_privkey, message_arr); + + let mint_keypair = Keypair::new(); + let feature_set = FeatureSet::all_enabled(); + + let tx = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[ + secp_instruction.clone(), + ed25519_instruction.clone(), + system_instruction::transfer(&mint_keypair.pubkey(), &Pubkey::new_unique(), 1), + ], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + )); + assert!(verify_precompiles(&tx, &feature_set).is_ok()); + + let index = thread_rng().gen_range(0, secp_instruction.data.len()); + secp_instruction.data[index] = secp_instruction.data[index].wrapping_add(12); + let tx = + SanitizedTransaction::from_transaction_for_tests(Transaction::new_signed_with_payer( + &[ + secp_instruction, + ed25519_instruction, + system_instruction::transfer(&mint_keypair.pubkey(), &Pubkey::new_unique(), 1), + ], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + )); + assert!(verify_precompiles(&tx, &feature_set).is_err()); + } +} From c05caa23516fb27b045a1d96b3105a0215b9453e Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 3 Oct 2024 22:19:30 -0400 Subject: [PATCH 431/529] banking_stage: calculate stake by vote account instead of node pubkey (#3049) * banking_stage: calculate stake by vote account instead of node pubkey * pr feedback: don't hold epoch stakes write lock for eviction * pr feedback: pubkey -> vote_pubkey renames * pr feedback: rename missed pubkey -> vote_pubkey --- .../banking_stage/latest_unprocessed_votes.rs | 262 +++++++++--------- .../unprocessed_transaction_storage.rs | 14 +- 2 files changed, 143 insertions(+), 133 deletions(-) diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index c126d1a40ed6b4..bb97142bda5e81 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -6,7 +6,7 @@ use { itertools::Itertools, rand::{thread_rng, Rng}, solana_perf::packet::Packet, - solana_runtime::bank::Bank, + solana_runtime::{bank::Bank, epoch_stakes::EpochStakes}, solana_sdk::{ account::from_account, clock::{Slot, UnixTimestamp}, @@ -39,7 +39,7 @@ pub enum VoteSource { #[derive(Debug, Clone)] pub struct LatestValidatorVotePacket { vote_source: VoteSource, - pubkey: Pubkey, + vote_pubkey: Pubkey, vote: Option>, slot: Slot, hash: Hash, @@ -87,10 +87,16 @@ impl LatestValidatorVotePacket { Ok(vote_state_update_instruction) if instruction_filter(&vote_state_update_instruction) => { - let &pubkey = message + let vote_account_index = instruction + .accounts + .first() + .copied() + .ok_or(DeserializedPacketError::VoteTransactionError)?; + let vote_pubkey = message .message .static_account_keys() - .first() + .get(vote_account_index as usize) + .copied() .ok_or(DeserializedPacketError::VoteTransactionError)?; let slot = vote_state_update_instruction.last_voted_slot().unwrap_or(0); let hash = vote_state_update_instruction.hash(); @@ -100,7 +106,7 @@ impl LatestValidatorVotePacket { vote: Some(vote), slot, hash, - pubkey, + vote_pubkey, vote_source, forwarded: false, timestamp, @@ -114,8 +120,8 @@ impl LatestValidatorVotePacket { self.vote.as_ref().unwrap().clone() } - pub fn pubkey(&self) -> Pubkey { - self.pubkey + pub fn vote_pubkey(&self) -> Pubkey { + self.vote_pubkey } pub fn slot(&self) -> Slot { @@ -150,12 +156,12 @@ pub(crate) struct VoteBatchInsertionMetrics { pub(crate) num_dropped_tpu: usize, } -#[derive(Debug, Default)] +#[derive(Debug)] pub struct LatestUnprocessedVotes { - latest_votes_per_pubkey: RwLock>>>, + latest_vote_per_vote_pubkey: RwLock>>>, num_unprocessed_votes: AtomicUsize, // These are only ever written to by the tpu vote thread - cached_staked_nodes: RwLock>>, + cached_epoch_stakes: RwLock, deprecate_legacy_vote_ixs: AtomicBool, current_epoch: AtomicU64, } @@ -166,10 +172,30 @@ impl LatestUnprocessedVotes { .feature_set .is_active(&feature_set::deprecate_legacy_vote_ixs::id()); Self { - cached_staked_nodes: RwLock::new(bank.current_epoch_staked_nodes().clone()), + latest_vote_per_vote_pubkey: RwLock::new(HashMap::default()), + num_unprocessed_votes: AtomicUsize::new(0), + cached_epoch_stakes: RwLock::new(bank.current_epoch_stakes().clone()), current_epoch: AtomicU64::new(bank.epoch()), deprecate_legacy_vote_ixs: AtomicBool::new(deprecate_legacy_vote_ixs), - ..Self::default() + } + } + + #[cfg(test)] + pub fn new_for_tests(vote_pubkeys_to_stake: &[Pubkey]) -> Self { + use solana_vote::vote_account::VoteAccount; + + let vote_accounts = vote_pubkeys_to_stake + .iter() + .map(|pubkey| (*pubkey, (1u64, VoteAccount::new_random()))) + .collect(); + let epoch_stakes = EpochStakes::new_for_tests(vote_accounts, 0); + + Self { + latest_vote_per_vote_pubkey: RwLock::new(HashMap::default()), + num_unprocessed_votes: AtomicUsize::new(0), + cached_epoch_stakes: RwLock::new(epoch_stakes), + current_epoch: AtomicU64::new(0), + deprecate_legacy_vote_ixs: AtomicBool::new(true), } } @@ -185,9 +211,9 @@ impl LatestUnprocessedVotes { &'a self, votes: impl Iterator + 'a, ) -> impl Iterator + 'a { - let staked_nodes = self.cached_staked_nodes.read().unwrap(); + let epoch_stakes = self.cached_epoch_stakes.read().unwrap(); votes.filter(move |vote| { - let stake = staked_nodes.get(&vote.pubkey()).copied().unwrap_or(0); + let stake = epoch_stakes.vote_account_stake(&vote.vote_pubkey()); stake > 0 }) } @@ -216,7 +242,7 @@ impl LatestUnprocessedVotes { } fn get_entry(&self, pubkey: Pubkey) -> Option>> { - self.latest_votes_per_pubkey + self.latest_vote_per_vote_pubkey .read() .unwrap() .get(&pubkey) @@ -231,7 +257,7 @@ impl LatestUnprocessedVotes { vote: LatestValidatorVotePacket, should_replenish_taken_votes: bool, ) -> Option { - let pubkey = vote.pubkey(); + let vote_pubkey = vote.vote_pubkey(); let slot = vote.slot(); let timestamp = vote.timestamp(); @@ -276,11 +302,16 @@ impl LatestUnprocessedVotes { Some(vote) }; - if let Some(latest_vote) = self.get_entry(pubkey) { + if let Some(latest_vote) = self.get_entry(vote_pubkey) { with_latest_vote(&latest_vote, vote) } else { // Grab write-lock to insert new vote. - match self.latest_votes_per_pubkey.write().unwrap().entry(pubkey) { + match self + .latest_vote_per_vote_pubkey + .write() + .unwrap() + .entry(vote_pubkey) + { std::collections::hash_map::Entry::Occupied(entry) => { with_latest_vote(entry.get(), vote) } @@ -295,7 +326,7 @@ impl LatestUnprocessedVotes { #[cfg(test)] pub fn get_latest_vote_slot(&self, pubkey: Pubkey) -> Option { - self.latest_votes_per_pubkey + self.latest_vote_per_vote_pubkey .read() .unwrap() .get(&pubkey) @@ -304,28 +335,21 @@ impl LatestUnprocessedVotes { #[cfg(test)] fn get_latest_timestamp(&self, pubkey: Pubkey) -> Option { - self.latest_votes_per_pubkey + self.latest_vote_per_vote_pubkey .read() .unwrap() .get(&pubkey) .and_then(|l| l.read().unwrap().timestamp()) } - #[cfg(test)] - pub(crate) fn set_staked_nodes(&self, staked_nodes: &[Pubkey]) { - let staked_nodes: HashMap = - staked_nodes.iter().map(|pk| (*pk, 1u64)).collect(); - *self.cached_staked_nodes.write().unwrap() = Arc::new(staked_nodes); - } - fn weighted_random_order_by_stake(&self) -> impl Iterator { // Efraimidis and Spirakis algo for weighted random sample without replacement - let staked_nodes = self.cached_staked_nodes.read().unwrap(); - let latest_votes_per_pubkey = self.latest_votes_per_pubkey.read().unwrap(); - let mut pubkey_with_weight: Vec<(f64, Pubkey)> = latest_votes_per_pubkey + let epoch_stakes = self.cached_epoch_stakes.read().unwrap(); + let latest_vote_per_vote_pubkey = self.latest_vote_per_vote_pubkey.read().unwrap(); + let mut pubkey_with_weight: Vec<(f64, Pubkey)> = latest_vote_per_vote_pubkey .keys() .filter_map(|&pubkey| { - let stake = staked_nodes.get(&pubkey).copied().unwrap_or(0); + let stake = epoch_stakes.vote_account_stake(&pubkey); if stake == 0 { None // Ignore votes from unstaked validators } else { @@ -343,24 +367,24 @@ impl LatestUnprocessedVotes { if bank.epoch() <= self.current_epoch.load(Ordering::Relaxed) { return; } - let mut staked_nodes = self.cached_staked_nodes.write().unwrap(); - *staked_nodes = bank.current_epoch_staked_nodes().clone(); - self.current_epoch.store(bank.epoch(), Ordering::Relaxed); - self.deprecate_legacy_vote_ixs.store( - bank.feature_set - .is_active(&feature_set::deprecate_legacy_vote_ixs::id()), - Ordering::Relaxed, - ); + { + let mut epoch_stakes = self.cached_epoch_stakes.write().unwrap(); + *epoch_stakes = bank.current_epoch_stakes().clone(); + self.current_epoch.store(bank.epoch(), Ordering::Relaxed); + self.deprecate_legacy_vote_ixs.store( + bank.feature_set + .is_active(&feature_set::deprecate_legacy_vote_ixs::id()), + Ordering::Relaxed, + ); + } // Evict any now unstaked pubkeys - let mut latest_votes_per_pubkey = self.latest_votes_per_pubkey.write().unwrap(); + let epoch_stakes = self.cached_epoch_stakes.read().unwrap(); + let mut latest_vote_per_vote_pubkey = self.latest_vote_per_vote_pubkey.write().unwrap(); let mut unstaked_votes = 0; - latest_votes_per_pubkey.retain(|pubkey, vote| { + latest_vote_per_vote_pubkey.retain(|vote_pubkey, vote| { let is_present = !vote.read().unwrap().is_vote_taken(); - let should_evict = match staked_nodes.get(pubkey) { - None => true, - Some(stake) => *stake == 0, - }; + let should_evict = epoch_stakes.vote_account_stake(vote_pubkey) == 0; if is_present && should_evict { unstaked_votes += 1; } @@ -474,7 +498,7 @@ impl LatestUnprocessedVotes { /// Sometimes we forward and hold the packets, sometimes we forward and clear. /// This also clears all gossip votes since by definition they have been forwarded pub fn clear_forwarded_packets(&self) { - self.latest_votes_per_pubkey + self.latest_vote_per_vote_pubkey .read() .unwrap() .values() @@ -611,12 +635,12 @@ mod tests { assert_eq!(VoteSource::Gossip, deserialized_packets[1].vote_source); assert_eq!( - keypairs.node_keypair.pubkey(), - deserialized_packets[0].pubkey + keypairs.vote_keypair.pubkey(), + deserialized_packets[0].vote_pubkey ); assert_eq!( - keypairs.node_keypair.pubkey(), - deserialized_packets[1].pubkey + keypairs.vote_keypair.pubkey(), + deserialized_packets[1].vote_pubkey ); assert!(deserialized_packets[0].vote.is_some()); @@ -625,12 +649,11 @@ mod tests { #[test] fn test_update_latest_vote() { - let latest_unprocessed_votes = LatestUnprocessedVotes::default(); let keypair_a = ValidatorVoteKeypairs::new_rand(); let keypair_b = ValidatorVoteKeypairs::new_rand(); - latest_unprocessed_votes.set_staked_nodes(&[ - keypair_a.node_keypair.pubkey(), - keypair_b.node_keypair.pubkey(), + let latest_unprocessed_votes = LatestUnprocessedVotes::new_for_tests(&[ + keypair_a.vote_keypair.pubkey(), + keypair_b.vote_keypair.pubkey(), ]); let vote_a = from_slots(vec![(0, 2), (1, 1)], VoteSource::Gossip, &keypair_a, None); @@ -651,11 +674,11 @@ mod tests { assert_eq!( Some(1), - latest_unprocessed_votes.get_latest_vote_slot(keypair_a.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_vote_slot(keypair_a.vote_keypair.pubkey()) ); assert_eq!( Some(9), - latest_unprocessed_votes.get_latest_vote_slot(keypair_b.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_vote_slot(keypair_b.vote_keypair.pubkey()) ); let vote_a = from_slots( @@ -710,13 +733,13 @@ mod tests { assert_eq!( 10, latest_unprocessed_votes - .get_latest_vote_slot(keypair_a.node_keypair.pubkey()) + .get_latest_vote_slot(keypair_a.vote_keypair.pubkey()) .unwrap() ); assert_eq!( 9, latest_unprocessed_votes - .get_latest_vote_slot(keypair_b.node_keypair.pubkey()) + .get_latest_vote_slot(keypair_b.vote_keypair.pubkey()) .unwrap() ); @@ -739,11 +762,11 @@ mod tests { assert_eq!(2, latest_unprocessed_votes.len()); assert_eq!( Some(1), - latest_unprocessed_votes.get_latest_timestamp(keypair_a.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_timestamp(keypair_a.vote_keypair.pubkey()) ); assert_eq!( Some(2), - latest_unprocessed_votes.get_latest_timestamp(keypair_b.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_timestamp(keypair_b.vote_keypair.pubkey()) ); // Same votes with bigger timestamps should override @@ -765,11 +788,11 @@ mod tests { assert_eq!(2, latest_unprocessed_votes.len()); assert_eq!( Some(5), - latest_unprocessed_votes.get_latest_timestamp(keypair_a.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_timestamp(keypair_a.vote_keypair.pubkey()) ); assert_eq!( Some(6), - latest_unprocessed_votes.get_latest_timestamp(keypair_b.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_timestamp(keypair_b.vote_keypair.pubkey()) ); // Same votes with smaller timestamps should not override @@ -793,16 +816,16 @@ mod tests { assert_eq!(2, latest_unprocessed_votes.len()); assert_eq!( Some(5), - latest_unprocessed_votes.get_latest_timestamp(keypair_a.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_timestamp(keypair_a.vote_keypair.pubkey()) ); assert_eq!( Some(6), - latest_unprocessed_votes.get_latest_timestamp(keypair_b.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_timestamp(keypair_b.vote_keypair.pubkey()) ); // Drain all latest votes for packet in latest_unprocessed_votes - .latest_votes_per_pubkey + .latest_vote_per_vote_pubkey .read() .unwrap() .values() @@ -832,8 +855,6 @@ mod tests { fn test_update_latest_vote_race() { // There was a race condition in updating the same pubkey in the hashmap // when the entry does not initially exist. - let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::default()); - const NUM_VOTES: usize = 100; let keypairs = Arc::new( (0..NUM_VOTES) @@ -842,9 +863,10 @@ mod tests { ); let staked_nodes = keypairs .iter() - .map(|kp| kp.node_keypair.pubkey()) + .map(|kp| kp.vote_keypair.pubkey()) .collect_vec(); - latest_unprocessed_votes.set_staked_nodes(&staked_nodes); + let latest_unprocessed_votes = + Arc::new(LatestUnprocessedVotes::new_for_tests(&staked_nodes)); // Insert votes in parallel let insert_vote = |latest_unprocessed_votes: &LatestUnprocessedVotes, @@ -876,8 +898,6 @@ mod tests { #[test] fn test_simulate_threads() { - let latest_unprocessed_votes = Arc::new(LatestUnprocessedVotes::default()); - let latest_unprocessed_votes_tpu = latest_unprocessed_votes.clone(); let keypairs = Arc::new( (0..10) .map(|_| ValidatorVoteKeypairs::new_rand()) @@ -886,9 +906,11 @@ mod tests { let keypairs_tpu = keypairs.clone(); let staked_nodes = keypairs .iter() - .map(|kp| kp.node_keypair.pubkey()) + .map(|kp| kp.vote_keypair.pubkey()) .collect_vec(); - latest_unprocessed_votes.set_staked_nodes(&staked_nodes); + let latest_unprocessed_votes = + Arc::new(LatestUnprocessedVotes::new_for_tests(&staked_nodes)); + let latest_unprocessed_votes_tpu = latest_unprocessed_votes.clone(); let vote_limit = 1000; let gossip = Builder::new() @@ -921,19 +943,21 @@ mod tests { .update_latest_vote(vote, false /* should replenish */); if i % 214 == 0 { // Simulate draining and processing packets - let latest_votes_per_pubkey = latest_unprocessed_votes_tpu - .latest_votes_per_pubkey + let latest_vote_per_vote_pubkey = latest_unprocessed_votes_tpu + .latest_vote_per_vote_pubkey .read() .unwrap(); - latest_votes_per_pubkey.iter().for_each(|(_pubkey, lock)| { - let mut latest_vote = lock.write().unwrap(); - if !latest_vote.is_vote_taken() { - latest_vote.take_vote(); - latest_unprocessed_votes_tpu - .num_unprocessed_votes - .fetch_sub(1, Ordering::Relaxed); - } - }); + latest_vote_per_vote_pubkey + .iter() + .for_each(|(_pubkey, lock)| { + let mut latest_vote = lock.write().unwrap(); + if !latest_vote.is_vote_taken() { + latest_vote.take_vote(); + latest_unprocessed_votes_tpu + .num_unprocessed_votes + .fetch_sub(1, Ordering::Relaxed); + } + }); } } }) @@ -944,7 +968,7 @@ mod tests { #[test] fn test_forwardable_packets() { - let latest_unprocessed_votes = LatestUnprocessedVotes::default(); + let latest_unprocessed_votes = LatestUnprocessedVotes::new_for_tests(&[]); let bank_0 = Bank::new_for_tests(&GenesisConfig::default()); let mut bank = Bank::new_from_parent( Arc::new(bank_0), @@ -983,12 +1007,9 @@ mod tests { .count() ); - let config = genesis_utils::create_genesis_config_with_leader( - 100, - &keypair_a.node_keypair.pubkey(), - 200, - ) - .genesis_config; + let config = + genesis_utils::create_genesis_config_with_vote_accounts(100, &[keypair_a], vec![200]) + .genesis_config; let bank_0 = Bank::new_for_tests(&config); let bank = Bank::new_from_parent( Arc::new(bank_0), @@ -1018,12 +1039,9 @@ mod tests { .count() ); - let config = genesis_utils::create_genesis_config_with_leader( - 100, - &keypair_b.node_keypair.pubkey(), - 200, - ) - .genesis_config; + let config = + genesis_utils::create_genesis_config_with_vote_accounts(100, &[keypair_b], vec![200]) + .genesis_config; let bank_0 = Bank::new_for_tests(&config); let bank = Arc::new(Bank::new_from_parent( Arc::new(bank_0), @@ -1069,16 +1087,15 @@ mod tests { #[test] fn test_clear_forwarded_packets() { - let latest_unprocessed_votes = LatestUnprocessedVotes::default(); let keypair_a = ValidatorVoteKeypairs::new_rand(); let keypair_b = ValidatorVoteKeypairs::new_rand(); let keypair_c = ValidatorVoteKeypairs::new_rand(); let keypair_d = ValidatorVoteKeypairs::new_rand(); - latest_unprocessed_votes.set_staked_nodes(&[ - keypair_a.node_keypair.pubkey(), - keypair_b.node_keypair.pubkey(), - keypair_c.node_keypair.pubkey(), - keypair_d.node_keypair.pubkey(), + let latest_unprocessed_votes = LatestUnprocessedVotes::new_for_tests(&[ + keypair_a.vote_keypair.pubkey(), + keypair_b.vote_keypair.pubkey(), + keypair_c.vote_keypair.pubkey(), + keypair_d.vote_keypair.pubkey(), ]); let vote_a = from_slots(vec![(1, 1)], VoteSource::Gossip, &keypair_a, None); @@ -1098,19 +1115,19 @@ mod tests { assert_eq!( Some(1), - latest_unprocessed_votes.get_latest_vote_slot(keypair_a.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_vote_slot(keypair_a.vote_keypair.pubkey()) ); assert_eq!( Some(2), - latest_unprocessed_votes.get_latest_vote_slot(keypair_b.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_vote_slot(keypair_b.vote_keypair.pubkey()) ); assert_eq!( Some(3), - latest_unprocessed_votes.get_latest_vote_slot(keypair_c.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_vote_slot(keypair_c.vote_keypair.pubkey()) ); assert_eq!( Some(4), - latest_unprocessed_votes.get_latest_vote_slot(keypair_d.node_keypair.pubkey()) + latest_unprocessed_votes.get_latest_vote_slot(keypair_d.vote_keypair.pubkey()) ); } @@ -1141,12 +1158,9 @@ mod tests { assert!(latest_unprocessed_votes.is_empty()); // Bank in same epoch should not update stakes - let config = genesis_utils::create_genesis_config_with_leader( - 100, - &keypair_a.node_keypair.pubkey(), - 200, - ) - .genesis_config; + let config = + genesis_utils::create_genesis_config_with_vote_accounts(100, &[&keypair_a], vec![200]) + .genesis_config; let bank_0 = Bank::new_for_tests(&config); let bank = Bank::new_from_parent( Arc::new(bank_0), @@ -1159,12 +1173,9 @@ mod tests { assert!(latest_unprocessed_votes.is_empty()); // Bank in next epoch should update stakes - let config = genesis_utils::create_genesis_config_with_leader( - 100, - &keypair_b.node_keypair.pubkey(), - 200, - ) - .genesis_config; + let config = + genesis_utils::create_genesis_config_with_vote_accounts(100, &[&keypair_b], vec![200]) + .genesis_config; let bank_0 = Bank::new_for_tests(&config); let bank = Bank::new_from_parent( Arc::new(bank_0), @@ -1176,17 +1187,14 @@ mod tests { latest_unprocessed_votes.insert_batch(votes.clone(), true); assert_eq!(latest_unprocessed_votes.len(), 1); assert_eq!( - latest_unprocessed_votes.get_latest_vote_slot(keypair_b.node_keypair.pubkey()), + latest_unprocessed_votes.get_latest_vote_slot(keypair_b.vote_keypair.pubkey()), Some(vote_b.slot()) ); // Previously unstaked votes are removed - let config = genesis_utils::create_genesis_config_with_leader( - 100, - &keypair_c.node_keypair.pubkey(), - 200, - ) - .genesis_config; + let config = + genesis_utils::create_genesis_config_with_vote_accounts(100, &[&keypair_c], vec![200]) + .genesis_config; let bank_0 = Bank::new_for_tests(&config); let bank = Bank::new_from_parent( Arc::new(bank_0), @@ -1199,7 +1207,7 @@ mod tests { latest_unprocessed_votes.insert_batch(votes.clone(), true); assert_eq!(latest_unprocessed_votes.len(), 1); assert_eq!( - latest_unprocessed_votes.get_latest_vote_slot(keypair_c.node_keypair.pubkey()), + latest_unprocessed_votes.get_latest_vote_slot(keypair_c.vote_keypair.pubkey()), Some(vote_c.slot()) ); } diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index cdcba288f90f8d..f612f5eaf08b11 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -1278,10 +1278,12 @@ mod tests { [VoteSource::Gossip, VoteSource::Tpu].into_iter(), [true, false].into_iter() ) { - let latest_unprocessed_votes = LatestUnprocessedVotes::default(); - if staked { - latest_unprocessed_votes.set_staked_nodes(&[keypair.pubkey()]); - } + let staked_keys = if staked { + vec![vote_keypair.pubkey()] + } else { + vec![] + }; + let latest_unprocessed_votes = LatestUnprocessedVotes::new_for_tests(&staked_keys); let mut transaction_storage = UnprocessedTransactionStorage::new_vote_storage( Arc::new(latest_unprocessed_votes), vote_source, @@ -1317,8 +1319,8 @@ mod tests { )?; vote.meta_mut().flags.set(PacketFlags::SIMPLE_VOTE_TX, true); - let latest_unprocessed_votes = LatestUnprocessedVotes::default(); - latest_unprocessed_votes.set_staked_nodes(&[node_keypair.pubkey()]); + let latest_unprocessed_votes = + LatestUnprocessedVotes::new_for_tests(&[vote_keypair.pubkey()]); let mut transaction_storage = UnprocessedTransactionStorage::new_vote_storage( Arc::new(latest_unprocessed_votes), VoteSource::Tpu, From dc35ce8a7be0ee4e9a53addccba0104adb7594cd Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Thu, 3 Oct 2024 20:06:59 -0700 Subject: [PATCH 432/529] Support max conncurrent connections (#3031) * implementing max concurrent connection limits * add metrics refused_connections_too_many_open_connections * reference counting on concurrent connections --- streamer/src/nonblocking/quic.rs | 117 ++++++++++++++++++++++++++++++- streamer/src/quic.rs | 14 ++++ 2 files changed, 128 insertions(+), 3 deletions(-) diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 760902a6aab06b..0bc99e04652da9 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -39,6 +39,7 @@ use { }, solana_transaction_metrics_tracker::signature_if_should_track_packet, std::{ + fmt, iter::repeat_with, net::{IpAddr, SocketAddr, UdpSocket}, pin::Pin, @@ -195,8 +196,7 @@ pub fn spawn_server_multi( coalesce: Duration, ) -> Result { info!("Start {name} quic server on {sockets:?}"); - let concurrent_connections = - (max_staked_connections + max_unstaked_connections) / sockets.len(); + let concurrent_connections = max_staked_connections + max_unstaked_connections; let max_concurrent_connections = concurrent_connections + concurrent_connections / 4; let (config, _) = configure_server(keypair)?; @@ -227,6 +227,7 @@ pub fn spawn_server_multi( stats.clone(), wait_for_chunk_timeout, coalesce, + max_concurrent_connections, )); Ok(SpawnNonBlockingServerResult { endpoints, @@ -236,6 +237,52 @@ pub fn spawn_server_multi( }) } +/// struct ease tracking connections of all stages, so that we do not have to +/// litter the code with open connection tracking. This is added into the +/// connection table as part of the ConnectionEntry. The reference is auto +/// reduced when it is dropped. + +struct ClientConnectionTracker { + stats: Arc, +} + +/// This is required by ConnectionEntry for supporting debug format. +impl fmt::Debug for ClientConnectionTracker { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("StreamerClientConnection") + .field( + "open_connections:", + &self.stats.open_connections.load(Ordering::Relaxed), + ) + .finish() + } +} + +impl Drop for ClientConnectionTracker { + /// When this is dropped, reduce the open connection count. + fn drop(&mut self) { + self.stats.open_connections.fetch_sub(1, Ordering::Relaxed); + } +} + +impl ClientConnectionTracker { + /// Check the max_concurrent_connections limit and if it is within the limit + /// create ClientConnectionTracker and increment open connection count. Otherwise returns Err + fn new(stats: Arc, max_concurrent_connections: usize) -> Result { + let open_connections = stats.open_connections.fetch_add(1, Ordering::Relaxed); + if open_connections >= max_concurrent_connections { + stats.open_connections.fetch_sub(1, Ordering::Relaxed); + debug!( + "There are too many concurrent connections opened already: open: {}, max: {}", + open_connections, max_concurrent_connections + ); + return Err(()); + } + + Ok(Self { stats }) + } +} + #[allow(clippy::too_many_arguments)] async fn run_server( name: &'static str, @@ -251,6 +298,7 @@ async fn run_server( stats: Arc, wait_for_chunk_timeout: Duration, coalesce: Duration, + max_concurrent_connections: usize, ) { let rate_limiter = ConnectionRateLimiter::new(max_connections_per_ipaddr_per_min); let overall_connection_rate_limiter = @@ -290,6 +338,7 @@ async fn run_server( }) }) .collect::>(); + while !exit.load(Ordering::Relaxed) { let timeout_connection = select! { ready = accepts.next() => { @@ -320,6 +369,7 @@ async fn run_server( stats .total_incoming_connection_attempts .fetch_add(1, Ordering::Relaxed); + let remote_address = incoming.remote_address(); // first check overall connection rate limit: @@ -354,6 +404,16 @@ async fn run_server( continue; } + let Ok(client_connection_tracker) = + ClientConnectionTracker::new(stats.clone(), max_concurrent_connections) + else { + stats + .refused_connections_too_many_open_connections + .fetch_add(1, Ordering::Relaxed); + incoming.refuse(); + continue; + }; + stats .outstanding_incoming_connection_attempts .fetch_add(1, Ordering::Relaxed); @@ -362,6 +422,7 @@ async fn run_server( Ok(connecting) => { tokio::spawn(setup_connection( connecting, + client_connection_tracker, unstaked_connection_table.clone(), staked_connection_table.clone(), sender.clone(), @@ -496,6 +557,7 @@ impl NewConnectionHandlerParams { } fn handle_and_cache_new_connection( + client_connection_tracker: ClientConnectionTracker, connection: Connection, mut connection_table_l: MutexGuard, connection_table: Arc>, @@ -525,6 +587,7 @@ fn handle_and_cache_new_connection( .try_add_connection( ConnectionTableKey::new(remote_addr.ip(), params.remote_pubkey), remote_addr.port(), + client_connection_tracker, Some(connection.clone()), params.peer_type, timing::timestamp(), @@ -571,6 +634,7 @@ fn handle_and_cache_new_connection( } async fn prune_unstaked_connections_and_add_new_connection( + client_connection_tracker: ClientConnectionTracker, connection: Connection, connection_table: Arc>, max_connections: usize, @@ -584,6 +648,7 @@ async fn prune_unstaked_connections_and_add_new_connection( let mut connection_table = connection_table.lock().await; prune_unstaked_connection_table(&mut connection_table, max_connections, stats); handle_and_cache_new_connection( + client_connection_tracker, connection, connection_table, connection_table_clone, @@ -646,6 +711,7 @@ fn compute_recieve_window( #[allow(clippy::too_many_arguments)] async fn setup_connection( connecting: Connecting, + client_connection_tracker: ClientConnectionTracker, unstaked_connection_table: Arc>, staked_connection_table: Arc>, packet_sender: AsyncSender, @@ -712,6 +778,7 @@ async fn setup_connection( if connection_table_l.total_size < max_staked_connections { if let Ok(()) = handle_and_cache_new_connection( + client_connection_tracker, new_connection, connection_table_l, staked_connection_table.clone(), @@ -728,6 +795,7 @@ async fn setup_connection( // put this connection in the unstaked connection table. If needed, prune a // connection from the unstaked connection table. if let Ok(()) = prune_unstaked_connections_and_add_new_connection( + client_connection_tracker, new_connection, unstaked_connection_table.clone(), max_unstaked_connections, @@ -752,6 +820,7 @@ async fn setup_connection( } ConnectionPeerType::Unstaked => { if let Ok(()) = prune_unstaked_connections_and_add_new_connection( + client_connection_tracker, new_connection, unstaked_connection_table.clone(), max_unstaked_connections, @@ -1226,6 +1295,8 @@ struct ConnectionEntry { peer_type: ConnectionPeerType, last_update: Arc, port: u16, + // We do not explicitly use it, but its drop is triggered when ConnectionEntry is dropped. + _client_connection_tracker: ClientConnectionTracker, connection: Option, stream_counter: Arc, } @@ -1236,6 +1307,7 @@ impl ConnectionEntry { peer_type: ConnectionPeerType, last_update: Arc, port: u16, + client_connection_tracker: ClientConnectionTracker, connection: Option, stream_counter: Arc, ) -> Self { @@ -1244,6 +1316,7 @@ impl ConnectionEntry { peer_type, last_update, port, + _client_connection_tracker: client_connection_tracker, connection, stream_counter, } @@ -1334,7 +1407,7 @@ impl ConnectionTable { }) .map(|index| { let connection = self.table[index].first(); - let stake = connection.map(|connection| connection.stake()); + let stake = connection.map(|connection: &ConnectionEntry| connection.stake()); (index, stake) }) .take(sample_size) @@ -1351,6 +1424,7 @@ impl ConnectionTable { &mut self, key: ConnectionTableKey, port: u16, + client_connection_tracker: ClientConnectionTracker, connection: Option, peer_type: ConnectionPeerType, last_update: u64, @@ -1378,6 +1452,7 @@ impl ConnectionTable { peer_type, last_update.clone(), port, + client_connection_tracker, connection, stream_counter.clone(), )); @@ -2002,11 +2077,13 @@ pub mod test { let sockets: Vec<_> = (0..num_entries) .map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0)) .collect(); + let stats = Arc::new(StreamerStats::default()); for (i, socket) in sockets.iter().enumerate() { table .try_add_connection( ConnectionTableKey::IP(socket.ip()), socket.port(), + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Unstaked, i as u64, @@ -2019,6 +2096,7 @@ pub mod test { .try_add_connection( ConnectionTableKey::IP(sockets[0].ip()), sockets[0].port(), + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Unstaked, 5, @@ -2040,6 +2118,7 @@ pub mod test { table.remove_connection(ConnectionTableKey::IP(socket.ip()), socket.port(), 0); } assert_eq!(table.total_size, 0); + assert_eq!(stats.open_connections.load(Ordering::Relaxed), 0); } #[test] @@ -2051,6 +2130,7 @@ pub mod test { // from a different peer pubkey. let num_entries = 15; let max_connections_per_peer = 10; + let stats = Arc::new(StreamerStats::default()); let pubkeys: Vec<_> = (0..num_entries).map(|_| Pubkey::new_unique()).collect(); for (i, pubkey) in pubkeys.iter().enumerate() { @@ -2058,6 +2138,7 @@ pub mod test { .try_add_connection( ConnectionTableKey::Pubkey(*pubkey), 0, + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Unstaked, i as u64, @@ -2075,6 +2156,7 @@ pub mod test { table.remove_connection(ConnectionTableKey::Pubkey(*pubkey), 0, 0); } assert_eq!(table.total_size, 0); + assert_eq!(stats.open_connections.load(Ordering::Relaxed), 0); } #[test] @@ -2084,11 +2166,14 @@ pub mod test { let max_connections_per_peer = 10; let pubkey = Pubkey::new_unique(); + let stats: Arc = Arc::new(StreamerStats::default()); + (0..max_connections_per_peer).for_each(|i| { table .try_add_connection( ConnectionTableKey::Pubkey(pubkey), 0, + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Unstaked, i as u64, @@ -2103,6 +2188,7 @@ pub mod test { .try_add_connection( ConnectionTableKey::Pubkey(pubkey), 0, + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Unstaked, 10, @@ -2117,6 +2203,7 @@ pub mod test { .try_add_connection( ConnectionTableKey::Pubkey(pubkey2), 0, + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Unstaked, 10, @@ -2134,6 +2221,7 @@ pub mod test { table.remove_connection(ConnectionTableKey::Pubkey(pubkey2), 0, 0); assert_eq!(table.total_size, 0); + assert_eq!(stats.open_connections.load(Ordering::Relaxed), 0); } #[test] @@ -2146,11 +2234,14 @@ pub mod test { let sockets: Vec<_> = (0..num_entries) .map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0)) .collect(); + let stats: Arc = Arc::new(StreamerStats::default()); + for (i, socket) in sockets.iter().enumerate() { table .try_add_connection( ConnectionTableKey::IP(socket.ip()), socket.port(), + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Staked((i + 1) as u64), i as u64, @@ -2171,6 +2262,8 @@ pub mod test { num_entries as u64 + 1, // threshold_stake ); assert_eq!(pruned, 1); + // We had 5 connections and pruned 1, we should have 4 left + assert_eq!(stats.open_connections.load(Ordering::Relaxed), 4); } #[test] @@ -2183,11 +2276,14 @@ pub mod test { let mut sockets: Vec<_> = (0..num_ips) .map(|i| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(i, 0, 0, 0)), 0)) .collect(); + let stats: Arc = Arc::new(StreamerStats::default()); + for (i, socket) in sockets.iter().enumerate() { table .try_add_connection( ConnectionTableKey::IP(socket.ip()), socket.port(), + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Unstaked, (i * 2) as u64, @@ -2199,6 +2295,7 @@ pub mod test { .try_add_connection( ConnectionTableKey::IP(socket.ip()), socket.port(), + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Unstaked, (i * 2 + 1) as u64, @@ -2213,6 +2310,7 @@ pub mod test { .try_add_connection( ConnectionTableKey::IP(single_connection_addr.ip()), single_connection_addr.port(), + ClientConnectionTracker::new(stats.clone(), 1000).unwrap(), None, ConnectionPeerType::Unstaked, (num_ips * 2) as u64, @@ -2230,6 +2328,7 @@ pub mod test { table.remove_connection(ConnectionTableKey::IP(socket.ip()), socket.port(), 0); } assert_eq!(table.total_size, 0); + assert_eq!(stats.open_connections.load(Ordering::Relaxed), 0); } #[test] @@ -2342,4 +2441,16 @@ pub mod test { ); assert!(stats.throttled_unstaked_streams.load(Ordering::Relaxed) > 0); } + + #[test] + fn test_client_connection_tracker() { + let stats = Arc::new(StreamerStats::default()); + let tracker_1 = ClientConnectionTracker::new(stats.clone(), 1); + assert!(tracker_1.is_ok()); + assert!(ClientConnectionTracker::new(stats.clone(), 1).is_err()); + assert_eq!(stats.open_connections.load(Ordering::Relaxed), 1); + // dropping the connection, concurrent connections should become 0 + drop(tracker_1); + assert_eq!(stats.open_connections.load(Ordering::Relaxed), 0); + } } diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index e9ca06a10bb133..b5f78c753da92c 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -245,6 +245,9 @@ pub struct StreamerStats { pub(crate) throttled_staked_streams: AtomicUsize, pub(crate) throttled_unstaked_streams: AtomicUsize, pub(crate) connection_rate_limiter_length: AtomicUsize, + // All connections in various states such as Incoming, Connecting, Connection + pub(crate) open_connections: AtomicUsize, + pub(crate) refused_connections_too_many_open_connections: AtomicUsize, pub(crate) outstanding_incoming_connection_attempts: AtomicUsize, pub(crate) total_incoming_connection_attempts: AtomicUsize, pub(crate) quic_endpoints_count: AtomicUsize, @@ -593,6 +596,17 @@ impl StreamerStats { self.quic_endpoints_count.load(Ordering::Relaxed), i64 ), + ( + "open_connections", + self.open_connections.load(Ordering::Relaxed), + i64 + ), + ( + "refused_connections_too_many_open_connections", + self.refused_connections_too_many_open_connections + .swap(0, Ordering::Relaxed), + i64 + ), ); } } From 0d236f12b7e5fe101c98c8697a200ff686fc5f56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 15:48:47 +0800 Subject: [PATCH 433/529] build(deps): bump regex from 1.10.6 to 1.11.0 (#3034) * build(deps): bump regex from 1.10.6 to 1.11.0 Bumps [regex](https://github.com/rust-lang/regex) from 1.10.6 to 1.11.0. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.10.6...1.11.0) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 12 ++++++------ 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cff368061b4781..588783880b8314 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4776,13 +4776,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-automata 0.4.4", + "regex-automata 0.4.8", "regex-syntax", ] @@ -4794,9 +4794,9 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.4.4" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -4805,9 +4805,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" diff --git a/Cargo.toml b/Cargo.toml index c4bd10559a8c17..8a16e8cdcbed2a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -334,7 +334,7 @@ rand = "0.8.5" rand_chacha = "0.3.1" rayon = "1.10.0" reed-solomon-erasure = "6.0.0" -regex = "1.10.6" +regex = "1.11.0" reqwest = { version = "0.11.27", default-features = false } reqwest-middleware = "0.2.5" rolling-file = "0.2.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a3207197f0827f..cb63c9155637c6 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3991,9 +3991,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -4003,9 +4003,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.4" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -4014,9 +4014,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" From b08af5811ad40c26a4a8989983bc18057f93f4c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 17:17:15 +0800 Subject: [PATCH 434/529] build(deps): bump tempfile from 3.12.0 to 3.13.0 (#3033) * build(deps): bump tempfile from 3.12.0 to 3.13.0 Bumps [tempfile](https://github.com/Stebalien/tempfile) from 3.12.0 to 3.13.0. - [Changelog](https://github.com/Stebalien/tempfile/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stebalien/tempfile/compare/v3.12.0...v3.13.0) --- updated-dependencies: - dependency-name: tempfile dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 16 ++++++++-------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 588783880b8314..e64fb1c823a034 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2234,9 +2234,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fd-lock" @@ -3428,9 +3428,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lock_api" @@ -4987,9 +4987,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -9152,9 +9152,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if 1.0.0", "fastrand", diff --git a/Cargo.toml b/Cargo.toml index 8a16e8cdcbed2a..aafb64f36ad31b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -503,7 +503,7 @@ sysctl = "0.4.6" systemstat = "0.2.3" tar = "0.4.42" tarpc = "0.29.0" -tempfile = "3.12.0" +tempfile = "3.13.0" test-case = "3.3.1" thiserror = "1.0.64" tiny-bip39 = "0.8.2" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index cb63c9155637c6..a4c36faf61f602 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1679,9 +1679,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fd-lock" @@ -2799,9 +2799,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lock_api" @@ -4187,9 +4187,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -7656,9 +7656,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if 1.0.0", "fastrand", From 91e01251915669001db52a449f740a6921941a7d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 18:17:17 +0800 Subject: [PATCH 435/529] build(deps): bump indexmap from 2.5.0 to 2.6.0 (#3068) * build(deps): bump indexmap from 2.5.0 to 2.6.0 Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.5.0 to 2.6.0. - [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md) - [Commits](https://github.com/indexmap-rs/indexmap/compare/2.5.0...2.6.0) --- updated-dependencies: - dependency-name: indexmap dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 34 ++++++++++++++++++++-------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 30 ++++++++++++++++++------------ 3 files changed, 39 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e64fb1c823a034..b8c6722e5389fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2651,7 +2651,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util 0.7.12", @@ -2701,6 +2701,12 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "headers" version = "0.3.7" @@ -3022,12 +3028,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.15.0", "rayon", ] @@ -5344,7 +5350,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -5708,7 +5714,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "index_list", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.12.1", "lazy_static", "libsecp256k1", @@ -6215,7 +6221,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.5.0", + "indexmap 2.6.0", "indicatif", "log", "quinn", @@ -6314,7 +6320,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.5.0", + "indexmap 2.6.0", "indicatif", "log", "rand 0.8.5", @@ -6706,7 +6712,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.12.1", "log", "lru", @@ -8015,7 +8021,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.12.1", "libc", "log", @@ -8206,7 +8212,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.5.0", + "indexmap 2.6.0", "indicatif", "pickledb", "serde", @@ -8257,7 +8263,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.5.0", + "indexmap 2.6.0", "indicatif", "log", "rayon", @@ -9512,7 +9518,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.16", ] @@ -9523,7 +9529,7 @@ version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", diff --git a/Cargo.toml b/Cargo.toml index aafb64f36ad31b..59999154ae60dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -272,7 +272,7 @@ hyper = "0.14.30" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.13" -indexmap = "2.5.0" +indexmap = "2.6.0" indicatif = "0.17.8" itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a4c36faf61f602..16a4e2f882b497 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2010,7 +2010,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util 0.7.1", @@ -2054,6 +2054,12 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "headers" version = "0.3.9" @@ -2355,12 +2361,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.15.0", "rayon", ] @@ -4483,7 +4489,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -4730,7 +4736,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "index_list", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.12.1", "lazy_static", "log", @@ -4979,7 +4985,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.5.0", + "indexmap 2.6.0", "indicatif", "log", "quinn", @@ -5046,7 +5052,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.5.0", + "indexmap 2.6.0", "log", "rand 0.8.5", "rayon", @@ -5306,7 +5312,7 @@ dependencies = [ "clap", "crossbeam-channel", "flate2", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.12.1", "log", "lru", @@ -6747,7 +6753,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.12.1", "libc", "log", @@ -6887,7 +6893,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.5.0", + "indexmap 2.6.0", "indicatif", "log", "rayon", @@ -7961,7 +7967,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow", ] From 17346cdfb292dff1ee538281b08859d3acf7a139 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 19:55:34 +0800 Subject: [PATCH 436/529] build(deps): bump syn from 2.0.77 to 2.0.79 (#3032) Bumps [syn](https://github.com/dtolnay/syn) from 2.0.77 to 2.0.79. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/2.0.77...2.0.79) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 70 +++++++++++++++++++++++++++--------------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b8c6722e5389fa..b4fb5c4899da53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -734,7 +734,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -887,7 +887,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1037,7 +1037,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "syn_derive", ] @@ -1169,7 +1169,7 @@ checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1802,7 +1802,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1826,7 +1826,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1837,7 +1837,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1899,7 +1899,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2023,7 +2023,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2129,7 +2129,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2426,7 +2426,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3777,7 +3777,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3850,7 +3850,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4481,7 +4481,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5273,7 +5273,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5329,7 +5329,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5379,7 +5379,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6636,7 +6636,7 @@ version = "2.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -7101,7 +7101,7 @@ version = "2.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "toml 0.8.12", ] @@ -7811,7 +7811,7 @@ dependencies = [ "bs58", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8739,7 +8739,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8751,7 +8751,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.77", + "syn 2.0.79", "thiserror", ] @@ -8810,7 +8810,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8998,9 +8998,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.77" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -9016,7 +9016,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9203,7 +9203,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9215,7 +9215,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "test-case-core", ] @@ -9251,7 +9251,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9388,7 +9388,7 @@ source = "git+https://github.com/anza-xyz/solana-tokio.git?rev=7cf47705faacf7bf0 dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9632,7 +9632,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9942,7 +9942,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-shared", ] @@ -9976,7 +9976,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10344,7 +10344,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10364,7 +10364,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] From 46a89da0e69592227d3cd9c2bcb6da931a15973c Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Fri, 4 Oct 2024 18:03:11 +0400 Subject: [PATCH 437/529] Extract program-pack crate (#2930) * extract program-pack crate * fix description grammar * requested doc comment change * fix trailing whitespace --- Cargo.lock | 8 ++++++++ Cargo.toml | 2 ++ programs/sbf/Cargo.lock | 8 ++++++++ sdk/program-pack/Cargo.toml | 16 ++++++++++++++++ .../program_pack.rs => program-pack/src/lib.rs} | 12 +++++++++--- sdk/program/Cargo.toml | 1 + sdk/program/src/lib.rs | 5 +++-- 7 files changed, 47 insertions(+), 5 deletions(-) create mode 100644 sdk/program-pack/Cargo.toml rename sdk/{program/src/program_pack.rs => program-pack/src/lib.rs} (78%) diff --git a/Cargo.lock b/Cargo.lock index b4fb5c4899da53..c399b81353fbb1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7239,6 +7239,7 @@ dependencies = [ "solana-program-error", "solana-program-memory", "solana-program-option", + "solana-program-pack", "solana-pubkey", "solana-sanitize", "solana-sdk-macro", @@ -7279,6 +7280,13 @@ dependencies = [ name = "solana-program-option" version = "2.1.0" +[[package]] +name = "solana-program-pack" +version = "2.1.0" +dependencies = [ + "solana-program-error", +] + [[package]] name = "solana-program-runtime" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 59999154ae60dd..9d6d1fd82984d1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,6 +119,7 @@ members = [ "sdk/program-error", "sdk/program-memory", "sdk/program-option", + "sdk/program-pack", "sdk/pubkey", "sdk/serde-varint", "sdk/serialize-utils", @@ -428,6 +429,7 @@ solana-program = { path = "sdk/program", version = "=2.1.0", default-features = solana-program-error = { path = "sdk/program-error", version = "=2.1.0" } solana-program-memory = { path = "sdk/program-memory", version = "=2.1.0" } solana-program-option = { path = "sdk/program-option", version = "=2.1.0" } +solana-program-pack = { path = "sdk/program-pack", version = "=2.1.0" } solana-program-runtime = { path = "program-runtime", version = "=2.1.0" } solana-program-test = { path = "program-test", version = "=2.1.0" } solana-pubkey = { path = "sdk/pubkey", version = "=2.1.0", default-features = false } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 16a4e2f882b497..3990c2bd6468e8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5650,6 +5650,7 @@ dependencies = [ "solana-program-error", "solana-program-memory", "solana-program-option", + "solana-program-pack", "solana-pubkey", "solana-sanitize", "solana-sdk-macro", @@ -5688,6 +5689,13 @@ dependencies = [ name = "solana-program-option" version = "2.1.0" +[[package]] +name = "solana-program-pack" +version = "2.1.0" +dependencies = [ + "solana-program-error", +] + [[package]] name = "solana-program-runtime" version = "2.1.0" diff --git a/sdk/program-pack/Cargo.toml b/sdk/program-pack/Cargo.toml new file mode 100644 index 00000000000000..a08c3456494cb0 --- /dev/null +++ b/sdk/program-pack/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "solana-program-pack" +description = "Solana Pack serialization trait." +documentation = "https://docs.rs/solana-program-pack" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-program-error = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/src/program_pack.rs b/sdk/program-pack/src/lib.rs similarity index 78% rename from sdk/program/src/program_pack.rs rename to sdk/program-pack/src/lib.rs index 286987f085c508..92223f9324921e 100644 --- a/sdk/program/src/program_pack.rs +++ b/sdk/program-pack/src/lib.rs @@ -1,6 +1,12 @@ -//! The [`Pack`] serialization trait. - -use crate::program_error::ProgramError; +//! The [`Pack`] serialization trait +//! This is a specific serialization API that is used by many older programs in +//! the [Solana Program Library][spl] to manage account state. It is not generally +//! recommended for new code since it does not define a language-independent +//! serialization format. +//! +//! [spl]: https://github.com/solana-labs/solana-program-library + +use solana_program_error::ProgramError; /// Check if a program account state is initialized pub trait IsInitialized { diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 6b0214ce1edcb3..2919a7bb5689ad 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -51,6 +51,7 @@ solana-msg = { workspace = true } solana-program-error = { workspace = true, features = ["serde"] } solana-program-memory = { workspace = true } solana-program-option = { workspace = true } +solana-program-pack = { workspace = true } solana-pubkey = { workspace = true, features = ["bytemuck", "curve25519", "serde", "std"] } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index d5da36d7c6dcdf..b853cd07d93be3 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -248,7 +248,7 @@ //! language-independent serialization format. It is not generally recommended //! for new code. //! -//! [`Pack`]: program_pack::Pack +//! [`Pack`]: https://docs.rs/solana-program-pack/latest/trait.Pack.html //! //! Developers should carefully consider the CPU cost of serialization, balanced //! against the need for correctness and ease of use: off-the-shelf @@ -506,7 +506,6 @@ pub mod native_token; pub mod nonce; pub mod program; pub mod program_error; -pub mod program_pack; pub mod program_stubs; pub mod program_utils; pub mod rent; @@ -525,6 +524,8 @@ pub mod wasm; #[deprecated(since = "2.1.0", note = "Use `solana-program-memory` crate instead")] pub use solana_program_memory as program_memory; +#[deprecated(since = "2.1.0", note = "Use `solana-program-pack` crate instead")] +pub use solana_program_pack as program_pack; #[deprecated(since = "2.1.0", note = "Use `solana-sanitize` crate instead")] pub use solana_sanitize as sanitize; #[deprecated(since = "2.1.0", note = "Use `solana-secp256k1-recover` crate instead")] From ae5919e52e6a25aeddf7c409724bd2cf6f6e749f Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 4 Oct 2024 10:40:52 -0500 Subject: [PATCH 438/529] appropriate precompile failure error (#3066) --- programs/ed25519-tests/tests/process_transaction.rs | 2 +- runtime/src/verify_precompiles.rs | 10 ++++++++-- sdk/src/transaction/sanitized.rs | 13 ++++++++++--- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/programs/ed25519-tests/tests/process_transaction.rs b/programs/ed25519-tests/tests/process_transaction.rs index 7ffb64ef0d6499..d91e9882defb6d 100644 --- a/programs/ed25519-tests/tests/process_transaction.rs +++ b/programs/ed25519-tests/tests/process_transaction.rs @@ -73,7 +73,7 @@ async fn test_failure_without_move_precompiles_feature() { assert_matches!( client.process_transaction(transaction).await, Err(BanksClientError::TransactionError( - TransactionError::InvalidAccountIndex + TransactionError::InstructionError(0, InstructionError::Custom(3)) )) ); } diff --git a/runtime/src/verify_precompiles.rs b/runtime/src/verify_precompiles.rs index 69022785af4d1f..d199a7b84317a8 100644 --- a/runtime/src/verify_precompiles.rs +++ b/runtime/src/verify_precompiles.rs @@ -1,6 +1,7 @@ use { solana_feature_set::FeatureSet, solana_sdk::{ + instruction::InstructionError, precompiles::get_precompiles, transaction::{Result, TransactionError}, }, @@ -11,14 +12,19 @@ pub fn verify_precompiles(message: &impl SVMMessage, feature_set: &FeatureSet) - let mut all_instruction_data = None; // lazily collect this on first pre-compile let precompiles = get_precompiles(); - for (program_id, instruction) in message.program_instructions_iter() { + for (index, (program_id, instruction)) in message.program_instructions_iter().enumerate() { for precompile in precompiles { if precompile.check_id(program_id, |id| feature_set.is_active(id)) { let all_instruction_data: &Vec<&[u8]> = all_instruction_data .get_or_insert_with(|| message.instructions_iter().map(|ix| ix.data).collect()); precompile .verify(instruction.data, all_instruction_data, feature_set) - .map_err(|_| TransactionError::InvalidAccountIndex)?; + .map_err(|err| { + TransactionError::InstructionError( + index as u8, + InstructionError::Custom(err as u32), + ) + })?; break; } } diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index 3027578918e1dd..10f7c6f048a531 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -18,7 +18,7 @@ use { transaction::{Result, Transaction, TransactionError, VersionedTransaction}, }, solana_feature_set as feature_set, - solana_program::message::SanitizedVersionedMessage, + solana_program::{instruction::InstructionError, message::SanitizedVersionedMessage}, solana_sanitize::Sanitize, std::collections::HashSet, }; @@ -262,14 +262,21 @@ impl SanitizedTransaction { /// Verify the precompiled programs in this transaction pub fn verify_precompiles(&self, feature_set: &feature_set::FeatureSet) -> Result<()> { - for (program_id, instruction) in self.message.program_instructions_iter() { + for (index, (program_id, instruction)) in + self.message.program_instructions_iter().enumerate() + { verify_if_precompile( program_id, instruction, self.message().instructions(), feature_set, ) - .map_err(|_| TransactionError::InvalidAccountIndex)?; + .map_err(|err| { + TransactionError::InstructionError( + index as u8, + InstructionError::Custom(err as u32), + ) + })?; } Ok(()) } From 78f8312eceb9fa68abcca350b2ef33d4b5c8dc09 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Sat, 5 Oct 2024 01:10:53 +0800 Subject: [PATCH 439/529] ci: run Github Actions only in anza-xyz/agave (#3070) --- .github/workflows/add-team-to-ghsa.yml | 1 + .github/workflows/benchmark.yml | 1 + .github/workflows/cargo.yml | 1 + .github/workflows/client-targets.yml | 2 ++ .github/workflows/crate-check.yml | 1 + .github/workflows/docs.yml | 1 + .github/workflows/downstream-project-spl-nightly.yml | 8 +------- .github/workflows/downstream-project-spl.yml | 1 + .github/workflows/release.yml | 3 +++ 9 files changed, 12 insertions(+), 7 deletions(-) diff --git a/.github/workflows/add-team-to-ghsa.yml b/.github/workflows/add-team-to-ghsa.yml index ea70d5870bf582..5e5f2f70881050 100644 --- a/.github/workflows/add-team-to-ghsa.yml +++ b/.github/workflows/add-team-to-ghsa.yml @@ -7,6 +7,7 @@ on: jobs: add-team-to-ghsa: + if: github.repository == 'anza-xyz/agave' runs-on: ubuntu-24.04 steps: - name: Checkout diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index bdfc21e2b0ec4c..d78f22288bd632 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -6,6 +6,7 @@ on: jobs: benchmark: + if: github.repository == 'anza-xyz/agave' name: benchmark runs-on: benchmark strategy: diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml index 01db66c1a1bd4a..b78d9d66de4016 100644 --- a/.github/workflows/cargo.yml +++ b/.github/workflows/cargo.yml @@ -27,6 +27,7 @@ env: jobs: clippy-nightly: + if: github.repository == 'anza-xyz/agave' strategy: matrix: os: diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml index 1a33d2ae59493c..fb0b4ebcb8447c 100644 --- a/.github/workflows/client-targets.yml +++ b/.github/workflows/client-targets.yml @@ -20,6 +20,7 @@ env: jobs: android: + if: github.repository == 'anza-xyz/agave' strategy: matrix: os: @@ -44,6 +45,7 @@ jobs: run: ./cargo stable ndk --target ${{ matrix.target }} build -p solana-client ios: + if: github.repository == 'anza-xyz/agave' strategy: matrix: os: diff --git a/.github/workflows/crate-check.yml b/.github/workflows/crate-check.yml index 6f130853ac1425..4526a0f526a891 100644 --- a/.github/workflows/crate-check.yml +++ b/.github/workflows/crate-check.yml @@ -14,6 +14,7 @@ on: jobs: check: + if: github.repository == 'anza-xyz/agave' runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index c348d69acbe4ea..2118a713cf5eb1 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -14,6 +14,7 @@ on: jobs: check: + if: github.repository == 'anza-xyz/agave' outputs: continue: ${{ steps.check.outputs.need_to_build }} runs-on: ubuntu-20.04 diff --git a/.github/workflows/downstream-project-spl-nightly.yml b/.github/workflows/downstream-project-spl-nightly.yml index e408140b52b927..ca0bcabd75a20a 100644 --- a/.github/workflows/downstream-project-spl-nightly.yml +++ b/.github/workflows/downstream-project-spl-nightly.yml @@ -6,13 +6,7 @@ on: jobs: main: - # As this is a cron job, it is better to avoid running it for all the forks. - # They are unlike to benefit from these executions, and they could easily - # eat up all the minutes GitHub allocation to free accounts. - if: > - github.event_name != 'schedule' - || github.repository == 'solana-labs/solana' - + if: github.repository == 'anza-xyz/agave' strategy: fail-fast: false matrix: diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index a3f45a29b30b0d..8097999ee8e8e9 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -37,6 +37,7 @@ env: jobs: check: + if: github.repository == 'anza-xyz/agave' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 73b8b0d25399d6..9d2f180d06b497 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -7,6 +7,7 @@ on: jobs: trigger-buildkite-pipeline: + if: github.repository == 'anza-xyz/agave' runs-on: ubuntu-latest steps: - name: Trigger a Buildkite Build @@ -20,6 +21,7 @@ jobs: message: ":github: Triggered from a GitHub Action" draft-release: + if: github.repository == 'anza-xyz/agave' runs-on: ubuntu-latest steps: - name: Create Release @@ -38,6 +40,7 @@ jobs: }) version-bump: + if: github.repository == 'anza-xyz/agave' runs-on: ubuntu-latest steps: - name: Checkout code From 9496f28205cd4583767ba786c6a7c9370382fb1b Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Fri, 4 Oct 2024 13:14:08 -0500 Subject: [PATCH 440/529] ReplayStage: No More Clone SanitizedTransaction (#3058) --- core/tests/unified_scheduler.rs | 2 +- ledger/src/blockstore_processor.rs | 323 ++++++++++++++---------- runtime/src/bank.rs | 19 +- runtime/src/installed_scheduler_pool.rs | 27 +- unified-scheduler-pool/src/lib.rs | 76 +++--- 5 files changed, 257 insertions(+), 190 deletions(-) diff --git a/core/tests/unified_scheduler.rs b/core/tests/unified_scheduler.rs index a6e40296510609..a458e776fbc42c 100644 --- a/core/tests/unified_scheduler.rs +++ b/core/tests/unified_scheduler.rs @@ -108,7 +108,7 @@ fn test_scheduler_waited_by_drop_bank_service() { // been started let lock_to_stall = LOCK_TO_STALL.lock().unwrap(); pruned_bank - .schedule_transaction_executions([(&tx, &0)].into_iter()) + .schedule_transaction_executions([(tx, 0)].into_iter()) .unwrap(); drop(pruned_bank); assert_eq!(pool_raw.pooled_scheduler_count(), 0); diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 986e1a2549b13b..3eedf03e4d0083 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -62,7 +62,7 @@ use { solana_vote::vote_account::VoteAccountsHashMap, std::{ collections::{HashMap, HashSet}, - ops::Index, + ops::{Index, Range}, path::PathBuf, result, sync::{ @@ -70,6 +70,7 @@ use { Arc, Mutex, RwLock, }, time::{Duration, Instant}, + vec::Drain, }, thiserror::Error, ExecuteTimingType::{NumExecuteBatches, TotalBatchesLen}, @@ -82,6 +83,14 @@ pub struct TransactionBatchWithIndexes<'a, 'b, Tx: SVMMessage> { pub transaction_indexes: Vec, } +// `TransactionBatchWithIndexes` but without the `Drop` that prevents +// us from nicely unwinding these with manual unlocking. +pub struct LockedTransactionsWithIndexes { + lock_results: Vec>, + transactions: Vec, + starting_index: usize, +} + struct ReplayEntry { entry: EntryType, starting_index: usize, @@ -360,7 +369,7 @@ fn execute_batches_internal( fn process_batches( bank: &BankWithScheduler, replay_tx_thread_pool: &ThreadPool, - batches: &[TransactionBatchWithIndexes], + locked_entries: impl ExactSizeIterator>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, batch_execution_timing: &mut BatchExecutionTiming, @@ -370,7 +379,7 @@ fn process_batches( if bank.has_installed_scheduler() { debug!( "process_batches()/schedule_batches_for_execution({} batches)", - batches.len() + locked_entries.len() ); // Scheduling usually succeeds (immediately returns `Ok(())`) here without being blocked on // the actual transaction executions. @@ -383,27 +392,27 @@ fn process_batches( // propagated eventually via the blocking fn called // BankWithScheduler::wait_for_completed_scheduler(). // - // To recite, the returned error is completely unrelated to the argument's `batches` at the - // hand. While being awkward, the _async_ unified scheduler is abusing this existing error - // propagation code path to the replay stage for compatibility and ease of integration, - // exploiting the fact that the replay stage doesn't care _which transaction the returned - // error is originating from_. + // To recite, the returned error is completely unrelated to the argument's `locked_entries` + // at the hand. While being awkward, the _async_ unified scheduler is abusing this existing + // error propagation code path to the replay stage for compatibility and ease of + // integration, exploiting the fact that the replay stage doesn't care _which transaction + // the returned error is originating from_. // // In the future, more proper error propagation mechanism will be introduced once after we // fully transition to the unified scheduler for the block verification. That one would be // a push based one from the unified scheduler to the replay stage to eliminate the current // overhead: 1 read lock per batch in // `BankWithScheduler::schedule_transaction_executions()`. - schedule_batches_for_execution(bank, batches) + schedule_batches_for_execution(bank, locked_entries) } else { debug!( "process_batches()/rebatch_and_execute_batches({} batches)", - batches.len() + locked_entries.len() ); rebatch_and_execute_batches( bank, replay_tx_thread_pool, - batches, + locked_entries, transaction_status_sender, replay_vote_sender, batch_execution_timing, @@ -415,38 +424,45 @@ fn process_batches( fn schedule_batches_for_execution( bank: &BankWithScheduler, - batches: &[TransactionBatchWithIndexes], + locked_entries: impl Iterator>, ) -> Result<()> { - for TransactionBatchWithIndexes { - batch, - transaction_indexes, - } in batches + // Track the first error encountered in the loop below, if any. + // This error will be propagated to the replay stage, or Ok(()). + let mut first_err = Ok(()); + + for LockedTransactionsWithIndexes { + lock_results, + transactions, + starting_index, + } in locked_entries { - bank.schedule_transaction_executions( - batch - .sanitized_transactions() - .iter() - .zip(transaction_indexes.iter()), - )?; + // unlock before sending to scheduler. + bank.unlock_accounts(transactions.iter().zip(lock_results.iter())); + // give ownership to scheduler. capture the first error, but continue the loop + // to unlock. + // scheduling is skipped if we have already detected an error in this loop + let indexes = starting_index..starting_index + transactions.len(); + first_err = first_err.and_then(|()| { + bank.schedule_transaction_executions(transactions.into_iter().zip_eq(indexes)) + }); } - Ok(()) + first_err } fn rebatch_transactions<'a>( lock_results: &'a [Result<()>], bank: &'a Arc, sanitized_txs: &'a [SanitizedTransaction], - start: usize, - end: usize, + range: Range, transaction_indexes: &'a [usize], ) -> TransactionBatchWithIndexes<'a, 'a, SanitizedTransaction> { - let txs = &sanitized_txs[start..=end]; - let results = &lock_results[start..=end]; + let txs = &sanitized_txs[range.clone()]; + let results = &lock_results[range.clone()]; let mut tx_batch = TransactionBatch::new(results.to_vec(), bank, OwnedOrBorrowed::Borrowed(txs)); - tx_batch.set_needs_unlock(false); + tx_batch.set_needs_unlock(true); // unlock on drop for easier clean up - let transaction_indexes = transaction_indexes[start..=end].to_vec(); + let transaction_indexes = transaction_indexes[range].to_vec(); TransactionBatchWithIndexes { batch: tx_batch, transaction_indexes, @@ -456,29 +472,37 @@ fn rebatch_transactions<'a>( fn rebatch_and_execute_batches( bank: &Arc, replay_tx_thread_pool: &ThreadPool, - batches: &[TransactionBatchWithIndexes], + locked_entries: impl ExactSizeIterator>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timing: &mut BatchExecutionTiming, log_messages_bytes_limit: Option, prioritization_fee_cache: &PrioritizationFeeCache, ) -> Result<()> { - if batches.is_empty() { + if locked_entries.len() == 0 { return Ok(()); } - let ((lock_results, sanitized_txs), transaction_indexes): ((Vec<_>, Vec<_>), Vec<_>) = batches - .iter() - .flat_map(|batch| { - batch - .batch - .lock_results() - .iter() - .cloned() - .zip(batch.batch.sanitized_transactions().to_vec()) - .zip(batch.transaction_indexes.to_vec()) - }) - .unzip(); + // Flatten the locked entries. Store the original entry lengths to avoid rebatching logic + // for small entries. + let mut original_entry_lengths = Vec::with_capacity(locked_entries.len()); + let ((lock_results, sanitized_txs), transaction_indexes): ((Vec<_>, Vec<_>), Vec<_>) = + locked_entries + .flat_map( + |LockedTransactionsWithIndexes { + lock_results, + transactions, + starting_index, + }| { + let num_transactions = transactions.len(); + original_entry_lengths.push(num_transactions); + lock_results + .into_iter() + .zip_eq(transactions) + .zip_eq(starting_index..starting_index + num_transactions) + }, + ) + .unzip(); let mut minimal_tx_cost = u64::MAX; let mut total_cost: u64 = 0; @@ -508,8 +532,7 @@ fn rebatch_and_execute_batches( &lock_results, bank, &sanitized_txs, - slice_start, - index, + slice_start..next_index, &transaction_indexes, ); slice_start = next_index; @@ -519,7 +542,24 @@ fn rebatch_and_execute_batches( }); &tx_batches[..] } else { - batches + let mut slice_start = 0; + for num_transactions in original_entry_lengths { + let next_index = slice_start + num_transactions; + // this is more of a "re-construction" of the original batches than + // a rebatching. But the logic is the same, with the transfer of + // unlocking responsibility to the batch. + let tx_batch = rebatch_transactions( + &lock_results, + bank, + &sanitized_txs, + slice_start..next_index, + &transaction_indexes, + ); + slice_start = next_index; + tx_batches.push(tx_batch); + } + + &tx_batches[..] }; let execute_batches_internal_metrics = execute_batches_internal( @@ -561,7 +601,7 @@ pub fn process_entries_for_tests( let mut entry_starting_index: usize = bank.transaction_count().try_into().unwrap(); let mut batch_timing = BatchExecutionTiming::default(); - let mut replay_entries: Vec<_> = entry::verify_transactions( + let replay_entries: Vec<_> = entry::verify_transactions( entries, &replay_tx_thread_pool, Arc::new(verify_transaction), @@ -583,7 +623,7 @@ pub fn process_entries_for_tests( let result = process_entries( bank, &replay_tx_thread_pool, - &mut replay_entries, + replay_entries, transaction_status_sender, replay_vote_sender, &mut batch_timing, @@ -598,7 +638,7 @@ pub fn process_entries_for_tests( fn process_entries( bank: &BankWithScheduler, replay_tx_thread_pool: &ThreadPool, - entries: &mut [ReplayEntry], + entries: Vec, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, batch_timing: &mut BatchExecutionTiming, @@ -624,78 +664,44 @@ fn process_entries( process_batches( bank, replay_tx_thread_pool, - &batches, + batches.drain(..), transaction_status_sender, replay_vote_sender, batch_timing, log_messages_bytes_limit, prioritization_fee_cache, )?; - batches.clear(); - for hash in &tick_hashes { - bank.register_tick(hash); + for hash in tick_hashes.drain(..) { + bank.register_tick(&hash); } - tick_hashes.clear(); } } EntryType::Transactions(transactions) => { - let starting_index = *starting_index; - let transaction_indexes = - (starting_index..starting_index.saturating_add(transactions.len())).collect(); - loop { - // try to lock the accounts - let batch = bank.prepare_sanitized_batch(transactions); - let first_lock_err = first_err(batch.lock_results()); - - // if locking worked - if first_lock_err.is_ok() { - batches.push(TransactionBatchWithIndexes { - batch, - transaction_indexes, - }); - // done with this entry - break; - } - // else we failed to lock, 2 possible reasons - if batches.is_empty() { - // An entry has account lock conflicts with *itself*, which should not happen - // if generated by a properly functioning leader - datapoint_error!( - "validator_process_entry_error", - ( - "error", - format!( - "Lock accounts error, entry conflicts with itself, txs: \ - {transactions:?}" - ), - String - ) - ); - // bail - first_lock_err?; - } else { - // else we have an entry that conflicts with a prior entry - // execute the current queue and try to process this entry again + queue_batches_with_lock_retry( + bank, + starting_index, + transactions, + &mut batches, + |batches| { process_batches( bank, replay_tx_thread_pool, - &batches, + batches, transaction_status_sender, replay_vote_sender, batch_timing, log_messages_bytes_limit, prioritization_fee_cache, - )?; - batches.clear(); - } - } + ) + }, + )?; } } } process_batches( bank, replay_tx_thread_pool, - &batches, + batches.into_iter(), transaction_status_sender, replay_vote_sender, batch_timing, @@ -703,11 +709,82 @@ fn process_entries( prioritization_fee_cache, )?; for hash in tick_hashes { - bank.register_tick(hash); + bank.register_tick(&hash); } Ok(()) } +/// If an entry can be locked without failure, the transactions are pushed +/// as a batch to `batches`. If the lock fails, the transactions are unlocked +/// and the batches are processed. +/// The locking process is retried, and if it fails again the block is marked +/// as dead. +/// If the lock retry succeeds, then the batch is pushed into `batches`. +fn queue_batches_with_lock_retry( + bank: &Bank, + starting_index: usize, + transactions: Vec, + batches: &mut Vec>, + mut process_batches: impl FnMut( + Drain>, + ) -> Result<()>, +) -> Result<()> { + // try to lock the accounts + let lock_results = bank.try_lock_accounts(&transactions); + let first_lock_err = first_err(&lock_results); + if first_lock_err.is_ok() { + batches.push(LockedTransactionsWithIndexes { + lock_results, + transactions, + starting_index, + }); + return Ok(()); + } + + // We need to unlock the transactions that succeeded to lock before the + // retry. + bank.unlock_accounts(transactions.iter().zip(lock_results.iter())); + + // We failed to lock, there are 2 possible reasons: + // 1. A batch already in `batches` holds the lock. + // 2. The batch is "self-conflicting" (i.e. the batch has account lock conflicts with itself) + + // Use the callback to process batches, and clear them. + // Clearing the batches will `Drop` the batches which will unlock the accounts. + process_batches(batches.drain(..))?; + + // Retry the lock + let lock_results = bank.try_lock_accounts(&transactions); + match first_err(&lock_results) { + Ok(()) => { + batches.push(LockedTransactionsWithIndexes { + lock_results, + transactions, + starting_index, + }); + Ok(()) + } + Err(err) => { + // We still may have succeeded to lock some accounts, unlock them. + bank.unlock_accounts(transactions.iter().zip(lock_results.iter())); + + // An entry has account lock conflicts with *itself*, which should not happen + // if generated by a properly functioning leader + datapoint_error!( + "validator_process_entry_error", + ( + "error", + format!( + "Lock accounts error, entry conflicts with itself, txs: {transactions:?}" + ), + String + ) + ); + Err(err) + } + } +} + #[derive(Error, Debug)] pub enum BlockstoreProcessorError { #[error("failed to load entries, error: {0}")] @@ -1608,7 +1685,7 @@ fn confirm_slot_entries( .expect("Transaction verification generates entries"); let mut replay_timer = Measure::start("replay_elapsed"); - let mut replay_entries: Vec<_> = entries + let replay_entries: Vec<_> = entries .into_iter() .zip(entry_tx_starting_indexes) .map(|(entry, tx_starting_index)| ReplayEntry { @@ -1619,7 +1696,7 @@ fn confirm_slot_entries( let process_result = process_entries( bank, replay_tx_thread_pool, - &mut replay_entries, + replay_entries, transaction_status_sender, replay_vote_sender, batch_execute_timing, @@ -4826,32 +4903,18 @@ pub mod tests { } = create_genesis_config_with_leader(500, &dummy_leader_pubkey, 100); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let txs = create_test_transactions(&mint_keypair, &genesis_config.hash()); - let batch = bank.prepare_sanitized_batch(&txs); - assert!(batch.needs_unlock()); + let lock_results = bank.try_lock_accounts(&txs); + assert!(lock_results.iter().all(Result::is_ok)); + let transaction_indexes = vec![42, 43, 44]; - let batch2 = rebatch_transactions( - batch.lock_results(), - &bank, - batch.sanitized_transactions(), - 0, - 0, - &transaction_indexes, - ); - assert!(batch.needs_unlock()); - assert!(!batch2.batch.needs_unlock()); - assert_eq!(batch2.transaction_indexes, vec![42]); + let batch = rebatch_transactions(&lock_results, &bank, &txs, 0..1, &transaction_indexes); + assert!(batch.batch.needs_unlock()); + assert_eq!(batch.transaction_indexes, vec![42]); - let batch3 = rebatch_transactions( - batch.lock_results(), - &bank, - batch.sanitized_transactions(), - 1, - 2, - &transaction_indexes, - ); - assert!(!batch3.batch.needs_unlock()); - assert_eq!(batch3.transaction_indexes, vec![43, 44]); + let batch2 = rebatch_transactions(&lock_results, &bank, &txs, 1..3, &transaction_indexes); + assert!(batch2.batch.needs_unlock()); + assert_eq!(batch2.transaction_indexes, vec![43, 44]); } fn do_test_schedule_batches_for_execution(should_succeed: bool) { @@ -4879,14 +4942,14 @@ pub mod tests { mocked_scheduler .expect_schedule_execution() .times(txs.len()) - .returning(|(_, _)| Ok(())); + .returning(|_, _| Ok(())); } else { // mocked_scheduler isn't async; so short-circuiting behavior is quite visible in that // .times(1) is called instead of .times(txs.len()), not like the succeeding case mocked_scheduler .expect_schedule_execution() .times(1) - .returning(|(_, _)| Err(SchedulerAborted)); + .returning(|_, _| Err(SchedulerAborted)); mocked_scheduler .expect_recover_error_after_abort() .times(1) @@ -4911,10 +4974,10 @@ pub mod tests { }); let bank = BankWithScheduler::new(bank, Some(Box::new(mocked_scheduler))); - let batch = bank.prepare_sanitized_batch(&txs); - let batch_with_indexes = TransactionBatchWithIndexes { - batch, - transaction_indexes: (0..txs.len()).collect(), + let locked_entry = LockedTransactionsWithIndexes { + lock_results: bank.try_lock_accounts(&txs), + transactions: txs, + starting_index: 0, }; let replay_tx_thread_pool = create_thread_pool(1); @@ -4923,7 +4986,7 @@ pub mod tests { let result = process_batches( &bank, &replay_tx_thread_pool, - &[batch_with_indexes], + [locked_entry].into_iter(), None, None, &mut batch_execution_timing, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f74de8b2ffec64..e232b2bc36bf89 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3352,17 +3352,24 @@ impl Bank { )) } + /// Attempt to take locks on the accounts in a transaction batch + pub fn try_lock_accounts(&self, txs: &[SanitizedTransaction]) -> Vec> { + let tx_account_lock_limit = self.get_transaction_account_lock_limit(); + self.rc + .accounts + .lock_accounts(txs.iter(), tx_account_lock_limit) + } + /// Prepare a locked transaction batch from a list of sanitized transactions. pub fn prepare_sanitized_batch<'a, 'b>( &'a self, txs: &'b [SanitizedTransaction], ) -> TransactionBatch<'a, 'b, SanitizedTransaction> { - let tx_account_lock_limit = self.get_transaction_account_lock_limit(); - let lock_results = self - .rc - .accounts - .lock_accounts(txs.iter(), tx_account_lock_limit); - TransactionBatch::new(lock_results, self, OwnedOrBorrowed::Borrowed(txs)) + TransactionBatch::new( + self.try_lock_accounts(txs), + self, + OwnedOrBorrowed::Borrowed(txs), + ) } /// Prepare a locked transaction batch from a list of sanitized transactions, and their cost diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index ee04859e3ea15e..db332228f03175 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -134,11 +134,7 @@ impl Debug for TimeoutListener { #[cfg_attr(feature = "dev-context-only-utils", automock)] // suppress false clippy complaints arising from mockall-derive: // warning: `#[must_use]` has no effect when applied to a struct field -// warning: the following explicit lifetimes could be elided: 'a -#[cfg_attr( - feature = "dev-context-only-utils", - allow(unused_attributes, clippy::needless_lifetimes) -)] +#[cfg_attr(feature = "dev-context-only-utils", allow(unused_attributes))] pub trait InstalledScheduler: Send + Sync + Debug + 'static { fn id(&self) -> SchedulerId; fn context(&self) -> &SchedulingContext; @@ -165,10 +161,8 @@ pub trait InstalledScheduler: Send + Sync + Debug + 'static { /// optimize the fast code-path of normal transaction scheduling to be multi-threaded at the /// cost of far slower error code-path while giving implementors increased flexibility by /// having &mut. - fn schedule_execution<'a>( - &'a self, - transaction_with_index: &'a (&'a SanitizedTransaction, usize), - ) -> ScheduleResult; + fn schedule_execution(&self, transaction: SanitizedTransaction, index: usize) + -> ScheduleResult; /// Return the error which caused the scheduler to abort. /// @@ -444,10 +438,9 @@ impl BankWithScheduler { /// /// Calling this will panic if the installed scheduler is Unavailable (the bank is /// wait_for_termination()-ed or the unified scheduler is disabled in the first place). - // 'a is needed; anonymous_lifetime_in_impl_trait isn't stabilized yet... - pub fn schedule_transaction_executions<'a>( + pub fn schedule_transaction_executions( &self, - transactions_with_indexes: impl ExactSizeIterator, + transactions_with_indexes: impl ExactSizeIterator, ) -> Result<()> { trace!( "schedule_transaction_executions(): {} txs", @@ -455,8 +448,8 @@ impl BankWithScheduler { ); let schedule_result: ScheduleResult = self.inner.with_active_scheduler(|scheduler| { - for (sanitized_transaction, &index) in transactions_with_indexes { - scheduler.schedule_execution(&(sanitized_transaction, index))?; + for (sanitized_transaction, index) in transactions_with_indexes { + scheduler.schedule_execution(sanitized_transaction, index)?; } Ok(()) }); @@ -856,12 +849,12 @@ mod tests { mocked .expect_schedule_execution() .times(1) - .returning(|(_, _)| Ok(())); + .returning(|_, _| Ok(())); } else { mocked .expect_schedule_execution() .times(1) - .returning(|(_, _)| Err(SchedulerAborted)); + .returning(|_, _| Err(SchedulerAborted)); mocked .expect_recover_error_after_abort() .times(1) @@ -871,7 +864,7 @@ mod tests { ); let bank = BankWithScheduler::new(bank, Some(mocked_scheduler)); - let result = bank.schedule_transaction_executions([(&tx0, &0)].into_iter()); + let result = bank.schedule_transaction_executions([(tx0, 0)].into_iter()); if should_succeed { assert_matches!(result, Ok(())); } else { diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 8c2745e138fd33..105e0d93326d3e 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -1411,9 +1411,10 @@ impl InstalledScheduler for PooledScheduler { fn schedule_execution( &self, - &(transaction, index): &(&SanitizedTransaction, usize), + transaction: SanitizedTransaction, + index: usize, ) -> ScheduleResult { - let task = SchedulingStateMachine::create_task(transaction.clone(), index, &mut |pubkey| { + let task = SchedulingStateMachine::create_task(transaction, index, &mut |pubkey| { self.inner.usage_queue_loader.load(pubkey) }); self.inner.thread_manager.send_task(task) @@ -1776,25 +1777,25 @@ mod tests { pool.register_timeout_listener(bank.create_timeout_listener()); let tx_before_stale = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, genesis_config.hash(), )); - bank.schedule_transaction_executions([(tx_before_stale, &0)].into_iter()) + bank.schedule_transaction_executions([(tx_before_stale, 0)].into_iter()) .unwrap(); sleepless_testing::at(TestCheckPoint::BeforeTimeoutListenerTriggered); sleepless_testing::at(TestCheckPoint::AfterTimeoutListenerTriggered); let tx_after_stale = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, genesis_config.hash(), )); - bank.schedule_transaction_executions([(tx_after_stale, &1)].into_iter()) + bank.schedule_transaction_executions([(tx_after_stale, 1)].into_iter()) .unwrap(); // Observe second occurrence of TimeoutListenerTriggered(1), which indicates a new timeout @@ -1896,26 +1897,26 @@ mod tests { pool.register_timeout_listener(bank.create_timeout_listener()); let tx_before_stale = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, genesis_config.hash(), )); - bank.schedule_transaction_executions([(tx_before_stale, &0)].into_iter()) + bank.schedule_transaction_executions([(tx_before_stale, 0)].into_iter()) .unwrap(); sleepless_testing::at(TestCheckPoint::BeforeTimeoutListenerTriggered); sleepless_testing::at(TestCheckPoint::AfterSchedulerThreadAborted); sleepless_testing::at(TestCheckPoint::AfterTimeoutListenerTriggered); let tx_after_stale = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, genesis_config.hash(), )); - let result = bank.schedule_transaction_executions([(tx_after_stale, &1)].into_iter()); + let result = bank.schedule_transaction_executions([(tx_after_stale, 1)].into_iter()); assert_matches!(result, Err(TransactionError::AccountNotFound)); let (result, _timings) = bank.wait_for_completed_scheduler().unwrap(); @@ -1960,7 +1961,7 @@ mod tests { .. } = create_genesis_config(10_000); - let tx = &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + let tx = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, @@ -1979,7 +1980,7 @@ mod tests { ); let context = SchedulingContext::new(bank.clone()); let scheduler = pool.do_take_scheduler(context); - scheduler.schedule_execution(&(tx, 0)).unwrap(); + scheduler.schedule_execution(tx, 0).unwrap(); match abort_case { AbortCase::Unhandled => { @@ -2082,13 +2083,13 @@ mod tests { for i in 0..MAX_TASK_COUNT { let tx = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, genesis_config.hash(), )); - scheduler.schedule_execution(&(tx, i)).unwrap(); + scheduler.schedule_execution(tx, i).unwrap(); } // Make sure ThreadManager::drop() is properly short-circuiting for non-aborting scheduler. @@ -2233,7 +2234,7 @@ mod tests { mint_keypair, .. } = create_genesis_config(10_000); - let tx0 = &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + let tx0 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, @@ -2248,7 +2249,7 @@ mod tests { assert_eq!(bank.transaction_count(), 0); let scheduler = pool.take_scheduler(context); - scheduler.schedule_execution(&(tx0, 0)).unwrap(); + scheduler.schedule_execution(tx0, 0).unwrap(); let bank = BankWithScheduler::new(bank, Some(scheduler)); assert_matches!(bank.wait_for_completed_scheduler(), Some((Ok(()), _))); assert_eq!(bank.transaction_count(), 1); @@ -2294,19 +2295,19 @@ mod tests { let unfunded_keypair = Keypair::new(); let bad_tx = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &unfunded_keypair, &solana_sdk::pubkey::new_rand(), 2, genesis_config.hash(), )); assert_eq!(bank.transaction_count(), 0); - scheduler.schedule_execution(&(bad_tx, 0)).unwrap(); + scheduler.schedule_execution(bad_tx, 0).unwrap(); sleepless_testing::at(TestCheckPoint::AfterTaskHandled); assert_eq!(bank.transaction_count(), 0); let good_tx_after_bad_tx = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 3, @@ -2314,7 +2315,7 @@ mod tests { )); // make sure this tx is really a good one to execute. assert_matches!( - bank.simulate_transaction_unchecked(good_tx_after_bad_tx, false) + bank.simulate_transaction_unchecked(&good_tx_after_bad_tx, false) .result, Ok(_) ); @@ -2322,7 +2323,7 @@ mod tests { let bank = BankWithScheduler::new(bank, Some(scheduler)); if extra_tx_after_failure { assert_matches!( - bank.schedule_transaction_executions([(good_tx_after_bad_tx, &1)].into_iter()), + bank.schedule_transaction_executions([(good_tx_after_bad_tx, 1)].into_iter()), Err(TransactionError::AccountNotFound) ); } @@ -2425,13 +2426,13 @@ mod tests { for index in 0..TX_COUNT { // Use 2 non-conflicting txes to exercise the channel disconnected case as well. let tx = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &Keypair::new(), &solana_sdk::pubkey::new_rand(), 1, genesis_config.hash(), )); - scheduler.schedule_execution(&(tx, index)).unwrap(); + scheduler.schedule_execution(tx, index).unwrap(); } // finally unblock the scheduler thread; otherwise the above schedule_execution could // return SchedulerAborted... @@ -2499,13 +2500,13 @@ mod tests { for i in 0..10 { let tx = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, genesis_config.hash(), )); - scheduler.schedule_execution(&(tx, i)).unwrap(); + scheduler.schedule_execution(tx, i).unwrap(); } // finally unblock the scheduler thread; otherwise the above schedule_execution could // return SchedulerAborted... @@ -2563,13 +2564,13 @@ mod tests { } = create_genesis_config(10_000); // tx0 and tx1 is definitely conflicting to write-lock the mint address - let tx0 = &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + let tx0 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, genesis_config.hash(), )); - let tx1 = &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + let tx1 = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, @@ -2594,10 +2595,10 @@ mod tests { // Stall handling tx0 and tx1 let lock_to_stall = LOCK_TO_STALL.lock().unwrap(); scheduler - .schedule_execution(&(tx0, STALLED_TRANSACTION_INDEX)) + .schedule_execution(tx0, STALLED_TRANSACTION_INDEX) .unwrap(); scheduler - .schedule_execution(&(tx1, BLOCKED_TRANSACTION_INDEX)) + .schedule_execution(tx1, BLOCKED_TRANSACTION_INDEX) .unwrap(); // Wait a bit for the scheduler thread to decide to block tx1 @@ -2656,7 +2657,7 @@ mod tests { // Create a dummy tx and two contexts let dummy_tx = - &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 2, @@ -2672,7 +2673,9 @@ mod tests { .take(10000) { let scheduler = pool.take_scheduler(context.clone()); - scheduler.schedule_execution(&(dummy_tx, index)).unwrap(); + scheduler + .schedule_execution(dummy_tx.clone(), index) + .unwrap(); scheduler.wait_for_termination(false).1.return_to_pool(); } } @@ -2714,9 +2717,10 @@ mod tests { fn schedule_execution( &self, - &(transaction, index): &(&SanitizedTransaction, usize), + transaction: SanitizedTransaction, + index: usize, ) -> ScheduleResult { - let transaction_and_index = (transaction.clone(), index); + let transaction_and_index = (transaction, index); let context = self.context().clone(); let pool = self.3.clone(); @@ -2854,7 +2858,7 @@ mod tests { assert_eq!(bank.transaction_count(), 0); // schedule but not immediately execute transaction - bank.schedule_transaction_executions([(&very_old_valid_tx, &0)].into_iter()) + bank.schedule_transaction_executions([(very_old_valid_tx, 0)].into_iter()) .unwrap(); // this calls register_recent_blockhash internally bank.fill_bank_with_ticks_for_tests(); @@ -2917,7 +2921,7 @@ mod tests { ); // mangle the transfer tx to try to lock fee_payer (= mint_keypair) address twice! tx.message.account_keys.push(tx.message.account_keys[0]); - let tx = &SanitizedTransaction::from_transaction_for_tests(tx); + let tx = SanitizedTransaction::from_transaction_for_tests(tx); // this internally should call SanitizedTransaction::get_account_locks(). let result = &mut Ok(()); @@ -2930,7 +2934,7 @@ mod tests { prioritization_fee_cache, }; - DefaultTaskHandler::handle(result, timings, bank, tx, 0, handler_context); + DefaultTaskHandler::handle(result, timings, bank, &tx, 0, handler_context); assert_matches!(result, Err(TransactionError::AccountLoadedTwice)); } } From f65aebb10cb84cbcc383d7080f0ceabd9e4e5b7c Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 4 Oct 2024 15:00:33 -0400 Subject: [PATCH 441/529] Adds experimental support for accounts lt hash (#3060) --- Cargo.lock | 1 + accounts-db/src/accounts_db.rs | 102 ++++- accounts-db/src/accounts_hash.rs | 3 +- ledger-tool/src/args.rs | 4 + programs/sbf/Cargo.lock | 1 + runtime/Cargo.toml | 1 + runtime/src/bank.rs | 77 +++- runtime/src/bank/accounts_lt_hash.rs | 633 +++++++++++++++++++++++++++ validator/src/cli.rs | 6 + validator/src/main.rs | 2 + 10 files changed, 821 insertions(+), 9 deletions(-) create mode 100644 runtime/src/bank/accounts_lt_hash.rs diff --git a/Cargo.lock b/Cargo.lock index c399b81353fbb1..1db6f01f95c5a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7632,6 +7632,7 @@ name = "solana-runtime" version = "2.1.0" dependencies = [ "agave-transaction-view", + "ahash 0.8.10", "aquamarine", "arrayref", "assert_matches", diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2ea02149263149..4ecae3e8959049 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -36,8 +36,8 @@ use { }, accounts_hash::{ AccountHash, AccountLtHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, - AccountsHasher, CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, - IncrementalAccountsHash, SerdeAccountsDeltaHash, SerdeAccountsHash, + AccountsHasher, AccountsLtHash, CalcAccountsHashConfig, CalculateHashIntermediate, + HashStats, IncrementalAccountsHash, SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash, ZeroLamportAccounts, ZERO_LAMPORT_ACCOUNT_HASH, ZERO_LAMPORT_ACCOUNT_LT_HASH, }, @@ -501,6 +501,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { test_skip_rewrites_but_include_in_bank_hash: false, storage_access: StorageAccess::Mmap, scan_filter_for_shrinking: ScanFilter::OnlyAbnormalWithVerify, + enable_experimental_accumulator_hash: false, }; pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), @@ -517,6 +518,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig test_skip_rewrites_but_include_in_bank_hash: false, storage_access: StorageAccess::Mmap, scan_filter_for_shrinking: ScanFilter::OnlyAbnormalWithVerify, + enable_experimental_accumulator_hash: false, }; pub type BinnedHashData = Vec>; @@ -612,6 +614,7 @@ pub struct AccountsDbConfig { pub test_partitioned_epoch_rewards: TestPartitionedEpochRewards, pub storage_access: StorageAccess, pub scan_filter_for_shrinking: ScanFilter, + pub enable_experimental_accumulator_hash: bool, } #[cfg(not(test))] @@ -1508,6 +1511,10 @@ pub struct AccountsDb { /// The latest full snapshot slot dictates how to handle zero lamport accounts latest_full_snapshot_slot: SeqLock>, + + /// Flag to indicate if the experimental accounts lattice hash is enabled. + /// (For R&D only; a feature-gate also exists to turn this on and make it a part of consensus.) + pub is_experimental_accumulator_hash_enabled: AtomicBool, } #[derive(Debug, Default)] @@ -2477,6 +2484,8 @@ impl AccountsDb { // rayon needs a lot of stack const ACCOUNTS_STACK_SIZE: usize = 8 * 1024 * 1024; + let default_accounts_db_config = AccountsDbConfig::default(); + AccountsDb { create_ancient_storage: CreateAncientStorage::default(), verify_accounts_hash_in_bg: VerifyAccountsHashInBackground::default(), @@ -2541,6 +2550,9 @@ impl AccountsDb { epoch_accounts_hash_manager: EpochAccountsHashManager::new_invalid(), test_skip_rewrites_but_include_in_bank_hash: false, latest_full_snapshot_slot: SeqLock::new(None), + is_experimental_accumulator_hash_enabled: default_accounts_db_config + .enable_experimental_accumulator_hash + .into(), } } @@ -2587,6 +2599,8 @@ impl AccountsDb { accounts_update_notifier: Option, exit: Arc, ) -> Self { + let default_accounts_db_config = AccountsDbConfig::default(); + let accounts_index = AccountsIndex::new( accounts_db_config.as_mut().and_then(|x| x.index.take()), exit, @@ -2648,6 +2662,12 @@ impl AccountsDb { .map(|config| config.scan_filter_for_shrinking) .unwrap_or_default(); + let enable_experimental_accumulator_hash = accounts_db_config + .as_ref() + .map(|config| config.enable_experimental_accumulator_hash) + .unwrap_or(default_accounts_db_config.enable_experimental_accumulator_hash) + .into(); + let paths_is_empty = paths.is_empty(); let mut new = Self { paths, @@ -2671,6 +2691,7 @@ impl AccountsDb { test_skip_rewrites_but_include_in_bank_hash, storage_access, scan_filter_for_shrinking, + is_experimental_accumulator_hash_enabled: enable_experimental_accumulator_hash, ..Self::default_with_accounts_index( accounts_index, base_working_path, @@ -2737,6 +2758,18 @@ impl AccountsDb { .expect("Cluster type must be set at initialization") } + /// Returns if the experimental accounts lattice hash is enabled + pub fn is_experimental_accumulator_hash_enabled(&self) -> bool { + self.is_experimental_accumulator_hash_enabled + .load(Ordering::Acquire) + } + + /// Sets if the experimental accounts lattice hash is enabled + pub fn set_is_experimental_accumulator_hash_enabled(&self, is_enabled: bool) { + self.is_experimental_accumulator_hash_enabled + .store(is_enabled, Ordering::Release); + } + /// While scanning cleaning candidates obtain slots that can be /// reclaimed for each pubkey. In addition, if the pubkey is /// removed from the index, insert in pubkeys_removed_from_accounts_index. @@ -7147,6 +7180,71 @@ impl AccountsDb { (accounts_hash, total_lamports) } + /// Calculates the accounts lt hash + /// + /// Only intended to be called at startup (or by tests). + /// Only intended to be used while testing the experimental accumulator hash. + pub fn calculate_accounts_lt_hash_at_startup( + &self, + ancestors: &Ancestors, + startup_slot: Slot, + ) -> AccountsLtHash { + debug_assert!(self.is_experimental_accumulator_hash_enabled()); + + // This impl iterates over all the index bins in parallel, and computes the lt hash + // sequentially per bin. Then afterwards reduces to a single lt hash. + // This implementation is quite fast. Runtime is about 150 seconds on mnb as of 10/2/2024. + // The sequential implementation took about 6,275 seconds! + // A different parallel implementation that iterated over the bins *sequentially* and then + // hashed the accounts *within* a bin in parallel took about 600 seconds. That impl uses + // less memory, as only a single index bin is loaded into mem at a time. + let lt_hash = self + .accounts_index + .account_maps + .par_iter() + .fold( + LtHash::identity, + |mut accumulator_lt_hash, accounts_index_bin| { + for pubkey in accounts_index_bin.keys() { + let account_lt_hash = self + .accounts_index + .get_with_and_then( + &pubkey, + Some(ancestors), + Some(startup_slot), + false, + |(slot, account_info)| { + (!account_info.is_zero_lamport()).then(|| { + self.get_account_accessor( + slot, + &pubkey, + &account_info.storage_location(), + ) + .get_loaded_account(|loaded_account| { + Self::lt_hash_account(&loaded_account, &pubkey) + }) + // SAFETY: The index said this pubkey exists, so + // there must be an account to load. + .unwrap() + }) + }, + ) + .flatten(); + if let Some(account_lt_hash) = account_lt_hash { + accumulator_lt_hash.mix_in(&account_lt_hash.0); + } + } + accumulator_lt_hash + }, + ) + .reduce(LtHash::identity, |mut accum, elem| { + accum.mix_in(&elem); + accum + }); + + AccountsLtHash(lt_hash) + } + /// This is only valid to call from tests. /// run the accounts hash calculation and store the results pub fn update_accounts_hash_for_tests( diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 3e70f3d1fa0edb..d5c3beedacc6c5 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -1262,8 +1262,7 @@ pub const ZERO_LAMPORT_ACCOUNT_HASH: AccountHash = pub struct AccountLtHash(pub LtHash); /// The AccountLtHash for a zero-lamport account -pub const ZERO_LAMPORT_ACCOUNT_LT_HASH: AccountLtHash = - AccountLtHash(LtHash([0; LtHash::NUM_ELEMENTS])); +pub const ZERO_LAMPORT_ACCOUNT_LT_HASH: AccountLtHash = AccountLtHash(LtHash::identity()); /// Lattice hash of all accounts #[derive(Debug, Clone, Eq, PartialEq)] diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index d01c542465256f..e9149f7b9cab4b 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -127,6 +127,10 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { .possible_values(&["mmap", "file"]) .help("Access account storage using this method") .hidden(hidden_unless_forced()), + Arg::with_name("accounts_db_experimental_accumulator_hash") + .long("accounts-db-experimental-accumulator-hash") + .help("Enables the experimental accumulator hash") + .hidden(hidden_unless_forced()), ] .into_boxed_slice() } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 3990c2bd6468e8..69c5ae6de9e2f0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5972,6 +5972,7 @@ dependencies = [ name = "solana-runtime" version = "2.1.0" dependencies = [ + "ahash 0.8.10", "aquamarine", "arrayref", "base64 0.22.1", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 35e23a8a194a99..8062d3de3a4384 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -10,6 +10,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +ahash = { workspace = true } aquamarine = { workspace = true } arrayref = { workspace = true } base64 = { workspace = true } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e232b2bc36bf89..5bf5dd2f75603a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -59,6 +59,8 @@ use { transaction_batch::{OwnedOrBorrowed, TransactionBatch}, verify_precompiles::verify_precompiles, }, + accounts_lt_hash::InitialStateOfAccount, + ahash::AHashMap, byteorder::{ByteOrder, LittleEndian}, dashmap::{DashMap, DashSet}, log::*, @@ -76,7 +78,8 @@ use { CalcAccountsHashDataSource, PubkeyHashAccount, VerifyAccountsHashAndLamportsConfig, }, accounts_hash::{ - AccountHash, AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, + AccountHash, AccountsHash, AccountsLtHash, CalcAccountsHashConfig, HashStats, + IncrementalAccountsHash, }, accounts_index::{AccountSecondaryIndexes, IndexKey, ScanConfig, ScanResult}, accounts_partition::{self, Partition, PartitionIndex}, @@ -97,7 +100,8 @@ use { self as feature_set, remove_rounding_in_fee_calculation, reward_full_priority_fee, FeatureSet, }, - solana_measure::{measure::Measure, measure_time, measure_us}, + solana_lattice_hash::lt_hash::LtHash, + solana_measure::{meas_dur, measure::Measure, measure_time, measure_us}, solana_program_runtime::{ invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, }, @@ -161,7 +165,7 @@ use { transaction_execution_result::{ TransactionExecutionDetails, TransactionLoadedAccountsStats, }, - transaction_processing_callback::TransactionProcessingCallback, + transaction_processing_callback::{AccountState, TransactionProcessingCallback}, transaction_processing_result::{ ProcessedTransaction, TransactionProcessingResult, TransactionProcessingResultExtensions, @@ -216,6 +220,7 @@ struct VerifyAccountsHashConfig { store_hash_raw_data_for_debug: bool, } +mod accounts_lt_hash; mod address_lookup_table; pub mod bank_hash_details; mod builtin_programs; @@ -572,6 +577,8 @@ impl PartialEq for Bank { compute_budget: _, transaction_account_lock_limit: _, fee_structure: _, + accounts_lt_hash: _, + cache_for_accounts_lt_hash: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this PartialEq is accordingly updated. @@ -902,6 +909,17 @@ pub struct Bank { /// This _field_ was needed to be DCOU-ed to avoid 2 locks per bank freezing... #[cfg(feature = "dev-context-only-utils")] hash_overrides: Arc>, + + /// The lattice hash of all accounts + /// + /// The value is only meaningful after freezing. + accounts_lt_hash: Mutex, + + /// A cache of *the initial state* of accounts modified in this slot + /// + /// The accounts lt hash needs both the initial and final state of each + /// account that was modified in this slot. Cache the initial state here. + cache_for_accounts_lt_hash: RwLock>, } struct VoteWithStakeDelegations { @@ -1022,6 +1040,8 @@ impl Bank { fee_structure: FeeStructure::default(), #[cfg(feature = "dev-context-only-utils")] hash_overrides: Arc::new(Mutex::new(HashOverrides::default())), + accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash([0xBAD1; LtHash::NUM_ELEMENTS]))), + cache_for_accounts_lt_hash: RwLock::new(AHashMap::new()), }; bank.transaction_processor = @@ -1030,6 +1050,17 @@ impl Bank { let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64; bank.accounts_data_size_initial = accounts_data_size_initial; + let accounts_lt_hash = { + let mut accounts_lt_hash = AccountsLtHash(LtHash::identity()); + let accounts = bank.get_all_accounts(false).unwrap(); + for account in accounts { + let account_lt_hash = AccountsDb::lt_hash_account(&account.1, &account.0); + accounts_lt_hash.0.mix_in(&account_lt_hash.0); + } + accounts_lt_hash + }; + *bank.accounts_lt_hash.get_mut().unwrap() = accounts_lt_hash; + bank } @@ -1283,6 +1314,8 @@ impl Bank { fee_structure: parent.fee_structure.clone(), #[cfg(feature = "dev-context-only-utils")] hash_overrides: parent.hash_overrides.clone(), + accounts_lt_hash: Mutex::new(parent.accounts_lt_hash.lock().unwrap().clone()), + cache_for_accounts_lt_hash: RwLock::new(AHashMap::new()), }; let (_, ancestors_time_us) = measure_us!({ @@ -1661,6 +1694,8 @@ impl Bank { fee_structure: FeeStructure::default(), #[cfg(feature = "dev-context-only-utils")] hash_overrides: Arc::new(Mutex::new(HashOverrides::default())), + accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash([0xBAD2; LtHash::NUM_ELEMENTS]))), + cache_for_accounts_lt_hash: RwLock::new(AHashMap::new()), }; bank.transaction_processor = @@ -1681,6 +1716,17 @@ impl Bank { .fill_missing_sysvar_cache_entries(&bank); bank.rebuild_skipped_rewrites(); + let calculate_accounts_lt_hash_duration = bank.is_accounts_lt_hash_enabled().then(|| { + let (_, duration) = meas_dur!({ + *bank.accounts_lt_hash.get_mut().unwrap() = bank + .rc + .accounts + .accounts_db + .calculate_accounts_lt_hash_at_startup(&bank.ancestors, bank.slot()); + }); + duration + }); + // Sanity assertions between bank snapshot and genesis config // Consider removing from serializable bank state // (BankFieldsToSerialize/BankFieldsToDeserialize) and initializing @@ -1725,6 +1771,11 @@ impl Bank { stakes_accounts_load_duration.as_micros(), i64 ), + ( + "calculate_accounts_lt_hash_us", + calculate_accounts_lt_hash_duration.as_ref().map(Duration::as_micros), + Option + ), ); bank } @@ -5373,6 +5424,10 @@ impl Bank { hash = hashv(&[hash.as_ref(), epoch_accounts_hash.as_ref().as_ref()]); }; + let accounts_lt_hash_checksum = self + .is_accounts_lt_hash_enabled() + .then(|| self.update_accounts_lt_hash()); + let buf = self .hard_forks .read() @@ -5413,8 +5468,9 @@ impl Bank { .accounts_db .get_bank_hash_stats(slot) .expect("No bank hash stats were found for this bank, that should not be possible"); + info!( - "bank frozen: {slot} hash: {hash} accounts_delta: {} signature_count: {} last_blockhash: {} capitalization: {}{}, stats: {bank_hash_stats:?}", + "bank frozen: {slot} hash: {hash} accounts_delta: {} signature_count: {} last_blockhash: {} capitalization: {}{}, stats: {bank_hash_stats:?}{}", accounts_delta_hash.0, self.signature_count(), self.last_blockhash(), @@ -5423,7 +5479,12 @@ impl Bank { format!(", epoch_accounts_hash: {:?}", epoch_accounts_hash.as_ref()) } else { "".to_string() - } + }, + if let Some(accounts_lt_hash_checksum) = accounts_lt_hash_checksum { + format!(", accounts_lt_hash checksum: {accounts_lt_hash_checksum}") + } else { + String::new() + }, ); hash } @@ -6710,6 +6771,12 @@ impl TransactionProcessingCallback for Bank { ); self.store_account_and_update_capitalization(program_id, &account); } + + fn inspect_account(&self, address: &Pubkey, account_state: AccountState, is_writable: bool) { + if self.is_accounts_lt_hash_enabled() { + self.inspect_account_for_accounts_lt_hash(address, &account_state, is_writable); + } + } } #[cfg(feature = "dev-context-only-utils")] diff --git a/runtime/src/bank/accounts_lt_hash.rs b/runtime/src/bank/accounts_lt_hash.rs new file mode 100644 index 00000000000000..372c65f08751a9 --- /dev/null +++ b/runtime/src/bank/accounts_lt_hash.rs @@ -0,0 +1,633 @@ +use { + super::Bank, + rayon::prelude::*, + solana_accounts_db::accounts_db::AccountsDb, + solana_lattice_hash::lt_hash::{Checksum as LtChecksum, LtHash}, + solana_measure::{meas_dur, measure::Measure}, + solana_sdk::{ + account::{accounts_equal, AccountSharedData}, + pubkey::Pubkey, + }, + solana_svm::transaction_processing_callback::AccountState, + std::{ops::AddAssign, time::Duration}, +}; + +impl Bank { + /// Returns if the accounts lt hash is enabled + pub fn is_accounts_lt_hash_enabled(&self) -> bool { + self.rc + .accounts + .accounts_db + .is_experimental_accumulator_hash_enabled() + } + + /// Updates the accounts lt hash + /// + /// When freezing a bank, we compute and update the accounts lt hash. + /// For each account modified in this bank, we: + /// - mix out its previous state, and + /// - mix in its current state + /// + /// Since this function is non-idempotent, it should only be called once per bank. + pub fn update_accounts_lt_hash(&self) -> LtChecksum { + debug_assert!(self.is_accounts_lt_hash_enabled()); + let delta_lt_hash = self.calculate_delta_lt_hash(); + let mut accounts_lt_hash = self.accounts_lt_hash.lock().unwrap(); + accounts_lt_hash.0.mix_in(&delta_lt_hash); + accounts_lt_hash.0.checksum() + } + + /// Calculates the lt hash *of only this slot* + /// + /// This can be thought of as akin to the accounts delta hash. + /// + /// For each account modified in this bank, we: + /// - mix out its previous state, and + /// - mix in its current state + /// + /// This function is idempotent, and may be called more than once. + fn calculate_delta_lt_hash(&self) -> LtHash { + debug_assert!(self.is_accounts_lt_hash_enabled()); + let measure_total = Measure::start(""); + let slot = self.slot(); + + // If we don't find the account in the cache, we need to go load it. + // We want the version of the account *before* it was written in this slot. + // Bank::ancestors *includes* this slot, so we need to remove it before loading. + let strictly_ancestors = { + let mut ancestors = self.ancestors.clone(); + ancestors.remove(&self.slot()); + ancestors + }; + + // Get all the accounts stored in this slot. + // Since this bank is in the middle of being frozen, it hasn't been rooted. + // That means the accounts should all be in the write cache, and loading will be fast. + let (accounts_curr, time_loading_accounts_curr) = meas_dur!({ + self.rc + .accounts + .accounts_db + .get_pubkey_hash_account_for_slot(slot) + }); + let num_accounts_total = accounts_curr.len(); + + #[derive(Debug, Default)] + struct Stats { + num_cache_misses: usize, + num_accounts_unmodified: usize, + time_loading_accounts_prev: Duration, + time_comparing_accounts: Duration, + time_computing_hashes: Duration, + time_mixing_hashes: Duration, + } + impl AddAssign for Stats { + fn add_assign(&mut self, other: Self) { + self.num_cache_misses += other.num_cache_misses; + self.num_accounts_unmodified += other.num_accounts_unmodified; + self.time_loading_accounts_prev += other.time_loading_accounts_prev; + self.time_comparing_accounts += other.time_comparing_accounts; + self.time_computing_hashes += other.time_computing_hashes; + self.time_mixing_hashes += other.time_mixing_hashes; + } + } + + let do_calculate_delta_lt_hash = || { + // Work on chunks of 128 pubkeys, which is 4 KiB. + // And 4 KiB is likely the smallest a real page size will be. + // And a single page is likely the smallest size a disk read will actually read. + // This can be tuned larger, but likely not smaller. + const CHUNK_SIZE: usize = 128; + let cache_for_accounts_lt_hash = self.cache_for_accounts_lt_hash.read().unwrap(); + accounts_curr + .par_iter() + .fold_chunks( + CHUNK_SIZE, + || (LtHash::identity(), Stats::default()), + |mut accum, elem| { + let pubkey = &elem.pubkey; + let curr_account = &elem.account; + + // load the initial state of the account + let (initial_state_of_account, measure_load) = meas_dur!({ + match cache_for_accounts_lt_hash.get(pubkey) { + Some(initial_state_of_account) => initial_state_of_account.clone(), + None => { + accum.1.num_cache_misses += 1; + // If the initial state of the account is not in the accounts + // lt hash cache, it is likely this account was stored + // *outside* of transaction processing (e.g. as part of rent + // collection). Do not populate the read cache, as this + // account likely will not be accessed again soon. + let account_slot = self + .rc + .accounts + .load_with_fixed_root_do_not_populate_read_cache( + &strictly_ancestors, + pubkey, + ); + match account_slot { + Some((account, _slot)) => { + InitialStateOfAccount::Alive(account) + } + None => InitialStateOfAccount::Dead, + } + } + } + }); + accum.1.time_loading_accounts_prev += measure_load; + + // mix out the previous version of the account + match initial_state_of_account { + InitialStateOfAccount::Dead => { + // nothing to do here + } + InitialStateOfAccount::Alive(prev_account) => { + let (are_accounts_equal, measure_is_equal) = + meas_dur!(accounts_equal(curr_account, &prev_account)); + accum.1.time_comparing_accounts += measure_is_equal; + if are_accounts_equal { + // this account didn't actually change, so skip it for lt hashing + accum.1.num_accounts_unmodified += 1; + return accum; + } + let (prev_lt_hash, measure_hashing) = + meas_dur!(AccountsDb::lt_hash_account(&prev_account, pubkey)); + let (_, measure_mixing) = + meas_dur!(accum.0.mix_out(&prev_lt_hash.0)); + accum.1.time_computing_hashes += measure_hashing; + accum.1.time_mixing_hashes += measure_mixing; + } + } + + // mix in the new version of the account + let (curr_lt_hash, measure_hashing) = + meas_dur!(AccountsDb::lt_hash_account(curr_account, pubkey)); + let (_, measure_mixing) = meas_dur!(accum.0.mix_in(&curr_lt_hash.0)); + accum.1.time_computing_hashes += measure_hashing; + accum.1.time_mixing_hashes += measure_mixing; + + accum + }, + ) + .reduce( + || (LtHash::identity(), Stats::default()), + |mut accum, elem| { + accum.0.mix_in(&elem.0); + accum.1 += elem.1; + accum + }, + ) + }; + let (delta_lt_hash, stats) = self + .rc + .accounts + .accounts_db + .thread_pool + .install(do_calculate_delta_lt_hash); + + let total_time = measure_total.end_as_duration(); + let num_accounts_modified = + num_accounts_total.saturating_sub(stats.num_accounts_unmodified); + datapoint_info!( + "bank-accounts_lt_hash", + ("slot", slot, i64), + ("num_accounts_total", num_accounts_total, i64), + ("num_accounts_modified", num_accounts_modified, i64), + ( + "num_accounts_unmodified", + stats.num_accounts_unmodified, + i64 + ), + ("num_cache_misses", stats.num_cache_misses, i64), + ("total_us", total_time.as_micros(), i64), + ( + "loading_accounts_curr_us", + time_loading_accounts_curr.as_micros(), + i64 + ), + ( + "par_loading_accounts_prev_us", + stats.time_loading_accounts_prev.as_micros(), + i64 + ), + ( + "par_comparing_accounts_us", + stats.time_comparing_accounts.as_micros(), + i64 + ), + ( + "par_computing_hashes_us", + stats.time_computing_hashes.as_micros(), + i64 + ), + ( + "par_mixing_hashes_us", + stats.time_mixing_hashes.as_micros(), + i64 + ), + ); + + delta_lt_hash + } + + /// Caches initial state of writeable accounts + /// + /// If a transaction account is writeable, cache its initial account state. + /// The initial state is needed when computing the accounts lt hash for the slot, and caching + /// the initial state saves us from having to look it up on disk later. + pub fn inspect_account_for_accounts_lt_hash( + &self, + address: &Pubkey, + account_state: &AccountState, + is_writable: bool, + ) { + debug_assert!(self.is_accounts_lt_hash_enabled()); + if !is_writable { + // if the account is not writable, then it cannot be modified; nothing to do here + return; + } + + // Only insert the account the *first* time we see it. + // We want to capture the value of the account *before* any modifications during this slot. + let is_in_cache = self + .cache_for_accounts_lt_hash + .read() + .unwrap() + .contains_key(address); + if !is_in_cache { + self.cache_for_accounts_lt_hash + .write() + .unwrap() + .entry(*address) + .or_insert_with(|| match account_state { + AccountState::Dead => InitialStateOfAccount::Dead, + AccountState::Alive(account) => { + InitialStateOfAccount::Alive((*account).clone()) + } + }); + } + } +} + +/// The initial state of an account prior to being modified in this slot/transaction +#[derive(Debug, Clone)] +pub enum InitialStateOfAccount { + /// The account was initiall dead + Dead, + /// The account was initially alive + Alive(AccountSharedData), +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::bank::tests::new_bank_from_parent_with_bank_forks, + solana_accounts_db::accounts::Accounts, + solana_sdk::{ + account::{ReadableAccount as _, WritableAccount as _}, + fee_calculator::FeeRateGovernor, + genesis_config::create_genesis_config, + native_token::LAMPORTS_PER_SOL, + pubkey::{self, Pubkey}, + signature::Signer as _, + signer::keypair::Keypair, + }, + std::{cmp, str::FromStr as _, sync::Arc}, + }; + + #[test] + fn test_update_accounts_lt_hash() { + // Write to address 1, 2, and 5 in first bank, so that in second bank we have + // updates to these three accounts. Make address 2 go to zero (dead). Make address 1 and 3 stay + // alive. Make address 5 unchanged. Ensure the updates are expected. + // + // 1: alive -> alive + // 2: alive -> dead + // 3: dead -> alive + // 4. dead -> dead + // 5. alive -> alive *unchanged* + + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + let keypair3 = Keypair::new(); + let keypair4 = Keypair::new(); + let keypair5 = Keypair::new(); + + let (mut genesis_config, mint_keypair) = + create_genesis_config(123_456_789 * LAMPORTS_PER_SOL); + genesis_config.fee_rate_governor = FeeRateGovernor::new(0, 0); + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + bank.rc + .accounts + .accounts_db + .set_is_experimental_accumulator_hash_enabled(true); + + // ensure the accounts lt hash is enabled, otherwise this test doesn't actually do anything... + assert!(bank.is_accounts_lt_hash_enabled()); + + let amount = cmp::max( + bank.get_minimum_balance_for_rent_exemption(0), + LAMPORTS_PER_SOL, + ); + + // send lamports to accounts 1, 2, and 5 so they are alive, + // and so we'll have a delta in the next bank + bank.register_unique_recent_blockhash_for_test(); + bank.transfer(amount, &mint_keypair, &keypair1.pubkey()) + .unwrap(); + bank.transfer(amount, &mint_keypair, &keypair2.pubkey()) + .unwrap(); + bank.transfer(amount, &mint_keypair, &keypair5.pubkey()) + .unwrap(); + + // manually freeze the bank to trigger update_accounts_lt_hash() to run + bank.freeze(); + let prev_accounts_lt_hash = bank.accounts_lt_hash.lock().unwrap().clone(); + + // save the initial values of the accounts to use for asserts later + let prev_mint = bank.get_account_with_fixed_root(&mint_keypair.pubkey()); + let prev_account1 = bank.get_account_with_fixed_root(&keypair1.pubkey()); + let prev_account2 = bank.get_account_with_fixed_root(&keypair2.pubkey()); + let prev_account3 = bank.get_account_with_fixed_root(&keypair3.pubkey()); + let prev_account4 = bank.get_account_with_fixed_root(&keypair4.pubkey()); + let prev_account5 = bank.get_account_with_fixed_root(&keypair5.pubkey()); + + assert!(prev_mint.is_some()); + assert!(prev_account1.is_some()); + assert!(prev_account2.is_some()); + assert!(prev_account3.is_none()); + assert!(prev_account4.is_none()); + assert!(prev_account5.is_some()); + + // These sysvars are also updated, but outside of transaction processing. This means they + // will not be in the accounts lt hash cache, but *will* be in the list of modified + // accounts. They must be included in the accounts lt hash. + let sysvars = [ + Pubkey::from_str("SysvarS1otHashes111111111111111111111111111").unwrap(), + Pubkey::from_str("SysvarC1ock11111111111111111111111111111111").unwrap(), + Pubkey::from_str("SysvarRecentB1ockHashes11111111111111111111").unwrap(), + Pubkey::from_str("SysvarS1otHistory11111111111111111111111111").unwrap(), + ]; + let prev_sysvar_accounts: Vec<_> = sysvars + .iter() + .map(|address| bank.get_account_with_fixed_root(address)) + .collect(); + + let bank = { + let slot = bank.slot() + 1; + new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), slot) + }; + + // send from account 2 to account 1; account 1 stays alive, account 2 ends up dead + bank.register_unique_recent_blockhash_for_test(); + bank.transfer(amount, &keypair2, &keypair1.pubkey()) + .unwrap(); + + // send lamports to account 4, then turn around and send them to account 3 + // account 3 will be alive, and account 4 will end dead + bank.register_unique_recent_blockhash_for_test(); + bank.transfer(amount, &mint_keypair, &keypair4.pubkey()) + .unwrap(); + bank.register_unique_recent_blockhash_for_test(); + bank.transfer(amount, &keypair4, &keypair3.pubkey()) + .unwrap(); + + // store account 5 into this new bank, unchanged + bank.rc.accounts.store_cached( + ( + bank.slot(), + [(&keypair5.pubkey(), &prev_account5.clone().unwrap())].as_slice(), + ), + None, + ); + + // freeze the bank to trigger update_accounts_lt_hash() to run + bank.freeze(); + + let actual_delta_lt_hash = bank.calculate_delta_lt_hash(); + let post_accounts_lt_hash = bank.accounts_lt_hash.lock().unwrap().clone(); + let post_mint = bank.get_account_with_fixed_root(&mint_keypair.pubkey()); + let post_account1 = bank.get_account_with_fixed_root(&keypair1.pubkey()); + let post_account2 = bank.get_account_with_fixed_root(&keypair2.pubkey()); + let post_account3 = bank.get_account_with_fixed_root(&keypair3.pubkey()); + let post_account4 = bank.get_account_with_fixed_root(&keypair4.pubkey()); + let post_account5 = bank.get_account_with_fixed_root(&keypair5.pubkey()); + + assert!(post_mint.is_some()); + assert!(post_account1.is_some()); + assert!(post_account2.is_none()); + assert!(post_account3.is_some()); + assert!(post_account4.is_none()); + assert!(post_account5.is_some()); + + let post_sysvar_accounts: Vec<_> = sysvars + .iter() + .map(|address| bank.get_account_with_fixed_root(address)) + .collect(); + + let mut expected_delta_lt_hash = LtHash::identity(); + let mut expected_accounts_lt_hash = prev_accounts_lt_hash.clone(); + let mut updater = + |address: &Pubkey, prev: Option, post: Option| { + // if there was an alive account, mix out + if let Some(prev) = prev { + let prev_lt_hash = AccountsDb::lt_hash_account(&prev, address); + expected_delta_lt_hash.mix_out(&prev_lt_hash.0); + expected_accounts_lt_hash.0.mix_out(&prev_lt_hash.0); + } + + // mix in the new one + let post = post.unwrap_or_default(); + let post_lt_hash = AccountsDb::lt_hash_account(&post, address); + expected_delta_lt_hash.mix_in(&post_lt_hash.0); + expected_accounts_lt_hash.0.mix_in(&post_lt_hash.0); + }; + updater(&mint_keypair.pubkey(), prev_mint, post_mint); + updater(&keypair1.pubkey(), prev_account1, post_account1); + updater(&keypair2.pubkey(), prev_account2, post_account2); + updater(&keypair3.pubkey(), prev_account3, post_account3); + updater(&keypair4.pubkey(), prev_account4, post_account4); + updater(&keypair5.pubkey(), prev_account5, post_account5); + for (i, sysvar) in sysvars.iter().enumerate() { + updater( + sysvar, + prev_sysvar_accounts[i].clone(), + post_sysvar_accounts[i].clone(), + ); + } + + // now make sure the delta lt hashes match + let expected = expected_delta_lt_hash.checksum(); + let actual = actual_delta_lt_hash.checksum(); + assert_eq!( + expected, actual, + "delta_lt_hash, expected: {expected}, actual: {actual}", + ); + + // ...and the accounts lt hashes match too + let expected = expected_accounts_lt_hash.0.checksum(); + let actual = post_accounts_lt_hash.0.checksum(); + assert_eq!( + expected, actual, + "accounts_lt_hash, expected: {expected}, actual: {actual}", + ); + } + + #[test] + fn test_inspect_account_for_accounts_lt_hash() { + let accounts_db = AccountsDb::default_for_tests(); + accounts_db.set_is_experimental_accumulator_hash_enabled(true); + let accounts = Accounts::new(Arc::new(accounts_db)); + let bank = Bank::default_with_accounts(accounts); + + // ensure the accounts lt hash is enabled, otherwise this test doesn't actually do anything... + assert!(bank.is_accounts_lt_hash_enabled()); + + // the cache should start off empty + assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 0); + + // ensure non-writable accounts are *not* added to the cache + bank.inspect_account_for_accounts_lt_hash( + &Pubkey::new_unique(), + &AccountState::Dead, + false, + ); + bank.inspect_account_for_accounts_lt_hash( + &Pubkey::new_unique(), + &AccountState::Alive(&AccountSharedData::default()), + false, + ); + assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 0); + + // ensure *new* accounts are added to the cache + let address = Pubkey::new_unique(); + bank.inspect_account_for_accounts_lt_hash(&address, &AccountState::Dead, true); + assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 1); + assert!(bank + .cache_for_accounts_lt_hash + .read() + .unwrap() + .contains_key(&address)); + + // ensure *existing* accounts are added to the cache + let address = Pubkey::new_unique(); + let initial_lamports = 123; + let mut account = AccountSharedData::new(initial_lamports, 0, &Pubkey::default()); + bank.inspect_account_for_accounts_lt_hash(&address, &AccountState::Alive(&account), true); + assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 2); + if let InitialStateOfAccount::Alive(cached_account) = bank + .cache_for_accounts_lt_hash + .read() + .unwrap() + .get(&address) + .unwrap() + { + assert_eq!(*cached_account, account); + } else { + panic!("wrong initial state for account"); + }; + + // ensure if an account is modified multiple times that we only cache the *first* one + let updated_lamports = account.lamports() + 1; + account.set_lamports(updated_lamports); + bank.inspect_account_for_accounts_lt_hash(&address, &AccountState::Alive(&account), true); + assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 2); + if let InitialStateOfAccount::Alive(cached_account) = bank + .cache_for_accounts_lt_hash + .read() + .unwrap() + .get(&address) + .unwrap() + { + assert_eq!(cached_account.lamports(), initial_lamports); + } else { + panic!("wrong initial state for account"); + }; + + // and ensure multiple updates are handled correctly when the account is initially dead + { + let address = Pubkey::new_unique(); + bank.inspect_account_for_accounts_lt_hash(&address, &AccountState::Dead, true); + assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 3); + match bank + .cache_for_accounts_lt_hash + .read() + .unwrap() + .get(&address) + .unwrap() + { + InitialStateOfAccount::Dead => { /* this is expected, nothing to do here*/ } + _ => panic!("wrong initial state for account"), + }; + + bank.inspect_account_for_accounts_lt_hash( + &address, + &AccountState::Alive(&AccountSharedData::default()), + true, + ); + assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 3); + match bank + .cache_for_accounts_lt_hash + .read() + .unwrap() + .get(&address) + .unwrap() + { + InitialStateOfAccount::Dead => { /* this is expected, nothing to do here*/ } + _ => panic!("wrong initial state for account"), + }; + } + } + + #[test] + fn test_calculate_accounts_lt_hash_at_startup() { + let (genesis_config, mint_keypair) = create_genesis_config(123_456_789 * LAMPORTS_PER_SOL); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + bank.rc + .accounts + .accounts_db + .set_is_experimental_accumulator_hash_enabled(true); + + // ensure the accounts lt hash is enabled, otherwise this test doesn't actually do anything... + assert!(bank.is_accounts_lt_hash_enabled()); + + let amount = cmp::max( + bank.get_minimum_balance_for_rent_exemption(0), + LAMPORTS_PER_SOL, + ); + + // create some banks with some modified accounts so that there are stored accounts + // (note: the number of banks and transfers are arbitrary) + for _ in 0..7 { + let slot = bank.slot() + 1; + bank = + new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), slot); + for _ in 0..13 { + bank.register_unique_recent_blockhash_for_test(); + // note: use a random pubkey here to ensure accounts + // are spread across all the index bins + bank.transfer(amount, &mint_keypair, &pubkey::new_rand()) + .unwrap(); + } + bank.freeze(); + } + let expected_accounts_lt_hash = bank.accounts_lt_hash.lock().unwrap().clone(); + + // root the bank and flush the accounts write cache to disk + // (this more accurately simulates startup, where accounts are in storages on disk) + bank.squash(); + bank.force_flush_accounts_cache(); + + // call the fn that calculates the accounts lt hash at startup, then ensure it matches + let calculated_accounts_lt_hash = bank + .rc + .accounts + .accounts_db + .calculate_accounts_lt_hash_at_startup(&bank.ancestors, bank.slot()); + + let expected = expected_accounts_lt_hash.0.checksum(); + let actual = calculated_accounts_lt_hash.0.checksum(); + assert_eq!(expected, actual, "expected: {expected}, actual: {actual}"); + } +} diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 5feba2f801a5e1..8895f85cae95e2 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1405,6 +1405,12 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { ) .hidden(hidden_unless_forced()), ) + .arg( + Arg::with_name("accounts_db_experimental_accumulator_hash") + .long("accounts-db-experimental-accumulator-hash") + .help("Enables the experimental accumulator hash") + .hidden(hidden_unless_forced()), + ) .arg( Arg::with_name("accounts_index_scan_results_limit_mb") .long("accounts-index-scan-results-limit-mb") diff --git a/validator/src/main.rs b/validator/src/main.rs index 0a932d8045490c..c01f6a1c2c2507 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1301,6 +1301,8 @@ pub fn main() { .is_present("accounts_db_test_skip_rewrites"), storage_access, scan_filter_for_shrinking, + enable_experimental_accumulator_hash: matches + .is_present("accounts_db_experimental_accumulator_hash"), ..AccountsDbConfig::default() }; From a07da926926417c893ce3f68e04a7f2d5047d36c Mon Sep 17 00:00:00 2001 From: dmakarov Date: Fri, 4 Oct 2024 15:01:51 -0400 Subject: [PATCH 442/529] Add a custom debug formatter implementation for RollingBitField (#3078) --- accounts-db/src/rolling_bit_field.rs | 78 +++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/rolling_bit_field.rs b/accounts-db/src/rolling_bit_field.rs index 477257fc8b1529..ddee90687470fb 100644 --- a/accounts-db/src/rolling_bit_field.rs +++ b/accounts-db/src/rolling_bit_field.rs @@ -9,7 +9,7 @@ use { }; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct RollingBitField { max_width: u64, min: u64, @@ -38,6 +38,60 @@ impl PartialEq for RollingBitField { } } +impl std::fmt::Debug for RollingBitField { + /// Custom debug formatter that outputs the possibly long and + /// sparse vector of bits in a compact representation, where each + /// sequence of equal values is compacted into value;length pair + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut bits = String::from("["); + let mut prev = self.bits[0]; + bits.push_str(&format!("{}", prev)); + let mut index = 1; + while index < self.bits.len() { + if self.bits[index] != prev { + prev = self.bits[index]; + break; + } + index += 1; + } + if index > 1 { + bits.push_str(&format!(";{}", index)); + } + if index < self.bits.len() { + bits.push_str(&format!(", {}", prev)); + } + let mut count = 0; + while index < self.bits.len() { + if self.bits[index] != prev { + if count > 1 { + bits.push_str(&format!(";{}", count)); + } + count = 0; + prev = self.bits[index]; + bits.push_str(&format!(", {}", prev)); + } + count += 1; + index += 1; + } + if count > 1 { + bits.push_str(&format!(";{}", count)); + } + bits.push(']'); + // The order of the `count` and `bits` fields is changed on + // purpose so that possibly very long output of `bits` doesn't + // make it more difficult to read the value of the `count` + // field. + f.debug_struct("RollingBitField") + .field("max_width", &self.max_width) + .field("min", &self.min) + .field("max_exclusive", &self.max_exclusive) + .field("count", &self.count) + .field("bits", &bits) + .field("excess", &self.excess) + .finish() + } +} + /// functionally similar to a hashset /// Relies on there being a sliding window of key values. The key values continue to increase. /// Old key values are removed from the lesser values and do not accumulate. @@ -1002,4 +1056,26 @@ pub mod tests { assert_eq!(count, count2); } } + + #[test] + fn test_debug_formatter() { + let mut bitfield = RollingBitField::new(1); + assert_eq!("RollingBitField { max_width: 1, min: 0, max_exclusive: 0, count: 0, bits: \"[false]\", excess: {} }", format!("{bitfield:?}")); + bitfield.insert(0); + assert_eq!("RollingBitField { max_width: 1, min: 0, max_exclusive: 1, count: 1, bits: \"[true]\", excess: {} }", format!("{bitfield:?}")); + let mut bitfield = RollingBitField::new(2); + assert_eq!("RollingBitField { max_width: 2, min: 0, max_exclusive: 0, count: 0, bits: \"[false;2]\", excess: {} }", format!("{bitfield:?}")); + bitfield.insert(0); + assert_eq!("RollingBitField { max_width: 2, min: 0, max_exclusive: 1, count: 1, bits: \"[true, false]\", excess: {} }", format!("{bitfield:?}")); + bitfield.insert(1); + assert_eq!("RollingBitField { max_width: 2, min: 0, max_exclusive: 2, count: 2, bits: \"[true;2]\", excess: {} }", format!("{bitfield:?}")); + let mut bitfield = RollingBitField::new(4096); + assert_eq!("RollingBitField { max_width: 4096, min: 0, max_exclusive: 0, count: 0, bits: \"[false;4096]\", excess: {} }", format!("{bitfield:?}")); + bitfield.insert(4095); + assert_eq!("RollingBitField { max_width: 4096, min: 4095, max_exclusive: 4096, count: 1, bits: \"[false;4095, true]\", excess: {} }", format!("{bitfield:?}")); + bitfield.clear(); + bitfield.insert(2); + bitfield.insert(3); + assert_eq!("RollingBitField { max_width: 4096, min: 2, max_exclusive: 4, count: 2, bits: \"[false;2, true;2, false;4092]\", excess: {} }", format!("{bitfield:?}")); + } } From e5a67dfa9699e6d3164e98e85836cc93ffd22f92 Mon Sep 17 00:00:00 2001 From: dmakarov Date: Fri, 4 Oct 2024 15:36:17 -0400 Subject: [PATCH 443/529] SVM: add a sample stand-alone application based on SVM (#2217) Add an extended example to the SVM crate - json-rpc-server is a server application that implements several Solana Json RPC commands, in particular simulateTransaction command to run a transaction in minimal Solana run-time environment required to use SVM. - json-rpc-client is a sample client application that submits simulateTransaction requests to json-rpc server. - json-rpc-program is source code of an on-chain program executed in the context of the simultateTransaction command submitted by the client program. --- Cargo.lock | 34 + Cargo.toml | 5 +- accounts-db/src/accounts_db.rs | 2 +- svm/Cargo.toml | 41 + svm/examples/json-rpc/README.md | 31 + svm/examples/json-rpc/client/src/client.rs | 79 ++ svm/examples/json-rpc/client/src/main.rs | 48 + svm/examples/json-rpc/client/src/utils.rs | 106 ++ svm/examples/json-rpc/config.yml | 7 + svm/examples/json-rpc/program/Cargo.toml | 19 + svm/examples/json-rpc/program/src/lib.rs | 34 + svm/examples/json-rpc/server/src/main.rs | 76 ++ .../json-rpc/server/src/rpc_process.rs | 928 ++++++++++++++++++ .../json-rpc/server/src/rpc_service.rs | 111 +++ .../json-rpc/server/src/svm_bridge.rs | 272 +++++ svm/examples/json-rpc/test.json | 1 + 16 files changed, 1792 insertions(+), 2 deletions(-) create mode 100644 svm/examples/json-rpc/README.md create mode 100644 svm/examples/json-rpc/client/src/client.rs create mode 100644 svm/examples/json-rpc/client/src/main.rs create mode 100644 svm/examples/json-rpc/client/src/utils.rs create mode 100644 svm/examples/json-rpc/config.yml create mode 100644 svm/examples/json-rpc/program/Cargo.toml create mode 100644 svm/examples/json-rpc/program/src/lib.rs create mode 100644 svm/examples/json-rpc/server/src/main.rs create mode 100644 svm/examples/json-rpc/server/src/rpc_process.rs create mode 100644 svm/examples/json-rpc/server/src/rpc_service.rs create mode 100644 svm/examples/json-rpc/server/src/svm_bridge.rs create mode 100644 svm/examples/json-rpc/test.json diff --git a/Cargo.lock b/Cargo.lock index 1db6f01f95c5a7..982eb8dff236b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2808,6 +2808,15 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "http" version = "0.2.12" @@ -8059,8 +8068,19 @@ name = "solana-svm" version = "2.1.0" dependencies = [ "assert_matches", + "base64 0.22.1", "bincode", + "borsh 1.5.1", + "bs58", + "clap 2.33.3", + "crossbeam-channel", + "env_logger", + "home", "itertools 0.12.1", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-http-server", "lazy_static", "libsecp256k1", "log", @@ -8070,8 +8090,12 @@ dependencies = [ "rand 0.8.5", "serde", "serde_derive", + "serde_json", "shuttle", + "solana-account-decoder", + "solana-accounts-db", "solana-bpf-loader-program", + "solana-client", "solana-compute-budget", "solana-compute-budget-program", "solana-feature-set", @@ -8082,7 +8106,11 @@ dependencies = [ "solana-log-collector", "solana-logger", "solana-measure", + "solana-perf", + "solana-program", "solana-program-runtime", + "solana-rpc-client", + "solana-rpc-client-api", "solana-runtime-transaction", "solana-sdk", "solana-svm", @@ -8091,10 +8119,16 @@ dependencies = [ "solana-svm-transaction", "solana-system-program", "solana-timings", + "solana-transaction-status", "solana-type-overrides", + "solana-version", "solana-vote", + "spl-token-2022", "test-case", "thiserror", + "tokio", + "tokio-util 0.7.12", + "yaml-rust", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 9d6d1fd82984d1..c1cfbfc2f8af82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -164,7 +164,10 @@ members = [ "zk-token-sdk", ] -exclude = ["programs/sbf", "svm/tests/example-programs"] +exclude = [ + "programs/sbf", + "svm/tests/example-programs", +] resolver = "2" diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 4ecae3e8959049..a038a7449ecf83 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3206,7 +3206,7 @@ impl AccountsDb { .map(|dirty_store_chunk| { let mut oldest_dirty_slot = max_slot_inclusive.saturating_add(1); dirty_store_chunk.iter().for_each(|(slot, store)| { - if slot < &oldest_non_ancient_slot { + if *slot < oldest_non_ancient_slot { dirty_ancient_stores.fetch_add(1, Ordering::Relaxed); } oldest_dirty_slot = oldest_dirty_slot.min(*slot); diff --git a/svm/Cargo.toml b/svm/Cargo.toml index be94989d892121..a6edc55fc1fca2 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -46,20 +46,46 @@ name = "solana_svm" [dev-dependencies] assert_matches = { workspace = true } +base64 = { workspace = true } bincode = { workspace = true } +borsh = { version = "1.5.1", features = ["derive"] } +bs58 = { workspace = true } +clap = { workspace = true } +crossbeam-channel = { workspace = true } +env_logger = { workspace = true } +home = "0.5" +jsonrpc-core = { workspace = true } +jsonrpc-core-client = { workspace = true } +jsonrpc-derive = { workspace = true } +jsonrpc-http-server = { workspace = true } lazy_static = { workspace = true } libsecp256k1 = { workspace = true } prost = { workspace = true } rand = { workspace = true } +serde_derive = { workspace = true } +serde_json = { workspace = true } shuttle = { workspace = true } +solana-account-decoder = { workspace = true } +solana-accounts-db = { workspace = true } solana-bpf-loader-program = { workspace = true } +solana-client = { workspace = true } solana-compute-budget-program = { workspace = true } solana-logger = { workspace = true } +solana-perf = { workspace = true } +solana-program = { workspace = true } +solana-rpc-client = { workspace = true } +solana-rpc-client-api = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-svm = { path = ".", features = ["dev-context-only-utils"] } solana-svm-conformance = { workspace = true } +solana-transaction-status = { workspace = true } +solana-version = { workspace = true } +spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } test-case = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true, features = ["codec", "compat"] } +yaml-rust = "0.4" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -80,5 +106,20 @@ shuttle-test = [ "solana-loader-v4-program/shuttle-test", ] +[[example]] +name = "json-rpc-server" +path = "examples/json-rpc/server/src/main.rs" +crate-type = ["bin"] + +[[example]] +name = "json-rpc-client" +path = "examples/json-rpc/client/src/main.rs" +crate-type = ["bin"] + +[[example]] +name = "json-rpc-example-program" +path = "examples/json-rpc/program/src/lib.rs" +crate-type = ["cdylib", "lib"] + [lints] workspace = true diff --git a/svm/examples/json-rpc/README.md b/svm/examples/json-rpc/README.md new file mode 100644 index 00000000000000..6714758efdce48 --- /dev/null +++ b/svm/examples/json-rpc/README.md @@ -0,0 +1,31 @@ +This is an example application using SVM to implement a tiny subset of +Solana RPC protocol for the purpose of simulating transaction +execution without having to use the entire Solana Runtime. + +The exmample consists of two host applications +- json-rpc-server -- the RPC server that accepts incoming RPC requests + and performs transaction simulation sending back the results, +- json-rpc-client -- the RPC client program that sends transactions to + json-rpc-server for simulation, + +and + +- json-rpc-program is the source code of on-chain program that is + executed in a transaction sent by json-rpc-client. + +To run the example, compile the json-rpc-program with `cargo +build-sbf` command. Using solana-test-validator create a ledger, or +use an existing one, and deploy the compiled program to store it in +the ledger. Using agave-ledger-tool dump ledger accounts to a file, +e.g. `accounts.out`. Now start the json-rpc-server, e.g. +``` +cargo run --manifest-path json-rpc-server/Cargo.toml -- -l test-ledger -a accounts.json +``` + +Finally, run the client program. +``` +cargo run --manifest-path json-rpc-client/Cargo.toml -- -C config.yml -k json-rpc-program/target/deploy/helloworld-keypair.json -u localhost +``` + +The client will communicate with the server and print the responses it +recieves from the server. diff --git a/svm/examples/json-rpc/client/src/client.rs b/svm/examples/json-rpc/client/src/client.rs new file mode 100644 index 00000000000000..4f9b7fe9e12ce7 --- /dev/null +++ b/svm/examples/json-rpc/client/src/client.rs @@ -0,0 +1,79 @@ +use { + crate::utils, + solana_client::rpc_client::RpcClient, + solana_sdk::{ + commitment_config::CommitmentConfig, + instruction::{AccountMeta, Instruction}, + message::Message, + signature::Signer, + signer::keypair::{read_keypair_file, Keypair}, + transaction::Transaction, + }, +}; + +/// Establishes a RPC connection with the Simulation server. +/// Information about the server is gleened from the config file `config.yml`. +pub fn establish_connection(url: &Option<&str>, config: &Option<&str>) -> utils::Result { + let rpc_url = match url { + Some(x) => { + if *x == "localhost" { + "http://localhost:8899".to_string() + } else { + String::from(*x) + } + } + None => utils::get_rpc_url(config)?, + }; + Ok(RpcClient::new_with_commitment( + rpc_url, + CommitmentConfig::confirmed(), + )) +} + +/// Loads keypair information from the file located at KEYPAIR_PATH +/// and then verifies that the loaded keypair information corresponds +/// to an executable account via CONNECTION. Failure to read the +/// keypair or the loaded keypair corresponding to an executable +/// account will result in an error being returned. +pub fn get_program(keypair_path: &str, connection: &RpcClient) -> utils::Result { + let program_keypair = read_keypair_file(keypair_path).map_err(|e| { + utils::Error::InvalidConfig(format!( + "failed to read program keypair file ({}): ({})", + keypair_path, e + )) + })?; + + let program_info = connection.get_account(&program_keypair.pubkey())?; + if !program_info.executable { + return Err(utils::Error::InvalidConfig(format!( + "program with keypair ({}) is not executable", + keypair_path + ))); + } + + Ok(program_keypair) +} + +pub fn say_hello(player: &Keypair, program: &Keypair, connection: &RpcClient) -> utils::Result<()> { + let greeting_pubkey = utils::get_greeting_public_key(&player.pubkey(), &program.pubkey())?; + println!("greeting pubkey {greeting_pubkey:?}"); + + // Submit an instruction to the chain which tells the program to + // run. We pass the account that we want the results to be stored + // in as one of the account arguments which the program will + // handle. + + let data = [1u8]; + let instruction = Instruction::new_with_bytes( + program.pubkey(), + &data, + vec![AccountMeta::new(greeting_pubkey, false)], + ); + let message = Message::new(&[instruction], Some(&player.pubkey())); + let transaction = Transaction::new(&[player], message, connection.get_latest_blockhash()?); + + let response = connection.simulate_transaction(&transaction)?; + println!("{:?}", response); + + Ok(()) +} diff --git a/svm/examples/json-rpc/client/src/main.rs b/svm/examples/json-rpc/client/src/main.rs new file mode 100644 index 00000000000000..c27a903ab249bb --- /dev/null +++ b/svm/examples/json-rpc/client/src/main.rs @@ -0,0 +1,48 @@ +use clap::{crate_description, crate_name, crate_version, App, Arg}; + +mod client; +mod utils; + +fn main() { + let version = crate_version!().to_string(); + let args = std::env::args().collect::>(); + let matches = App::new(crate_name!()) + .about(crate_description!()) + .version(version.as_str()) + .arg( + Arg::with_name("config") + .long("config") + .short("C") + .takes_value(true) + .value_name("CONFIG") + .help("Config filepath"), + ) + .arg( + Arg::with_name("keypair") + .long("keypair") + .short("k") + .takes_value(true) + .value_name("KEYPAIR") + .help("Filepath or URL to a keypair"), + ) + .arg( + Arg::with_name("url") + .long("url") + .short("u") + .takes_value(true) + .value_name("URL_OR_MONIKER") + .help("URL for JSON RPC Server"), + ) + .get_matches_from(args); + let config = matches.value_of("config"); + let keypair = matches.value_of("keypair").unwrap(); + let url = matches.value_of("url"); + let connection = client::establish_connection(&url, &config).unwrap(); + println!( + "Connected to Simulation server running version ({}).", + connection.get_version().unwrap() + ); + let player = utils::get_player(&config).unwrap(); + let program = client::get_program(keypair, &connection).unwrap(); + client::say_hello(&player, &program, &connection).unwrap(); +} diff --git a/svm/examples/json-rpc/client/src/utils.rs b/svm/examples/json-rpc/client/src/utils.rs new file mode 100644 index 00000000000000..2062c2cacc86e2 --- /dev/null +++ b/svm/examples/json-rpc/client/src/utils.rs @@ -0,0 +1,106 @@ +use { + borsh::{BorshDeserialize, BorshSerialize}, + solana_sdk::{ + pubkey::Pubkey, + signer::keypair::{read_keypair_file, Keypair}, + }, + thiserror::Error, + yaml_rust::YamlLoader, +}; + +#[derive(Error, Debug)] +pub enum Error { + #[error("failed to read solana config file: ({0})")] + ConfigRead(std::io::Error), + #[error("failed to parse solana config file: ({0})")] + ConfigParse(#[from] yaml_rust::ScanError), + #[error("invalid config: ({0})")] + InvalidConfig(String), + + #[error("solana client error: ({0})")] + Client(#[from] solana_client::client_error::ClientError), + + #[error("error in public key derivation: ({0})")] + KeyDerivation(#[from] solana_sdk::pubkey::PubkeyError), +} + +pub type Result = std::result::Result; + +/// The schema for greeting storage in greeting accounts. This is what +/// is serialized into the account and updated when hellos are sent. +#[derive(BorshSerialize, BorshDeserialize)] +struct GreetingSchema { + counter: u32, +} + +/// Parses and returns the Solana yaml config on the system. +pub fn get_config(config: &Option<&str>) -> Result { + let path = match config { + Some(path) => std::path::PathBuf::from(path), + None => match home::home_dir() { + Some(mut path) => { + path.push(".config/solana/cli/config.yml"); + path + } + None => { + return Err(Error::ConfigRead(std::io::Error::new( + std::io::ErrorKind::NotFound, + "failed to locate homedir and thus can not locate solana config", + ))); + } + }, + }; + let config = std::fs::read_to_string(path).map_err(Error::ConfigRead)?; + let mut config = YamlLoader::load_from_str(&config)?; + match config.len() { + 1 => Ok(config.remove(0)), + l => Err(Error::InvalidConfig(format!( + "expected one yaml document got ({})", + l + ))), + } +} + +/// Gets the RPC url for the cluster that this machine is configured +/// to communicate with. +pub fn get_rpc_url(config: &Option<&str>) -> Result { + let config = get_config(config)?; + match config["json_rpc_url"].as_str() { + Some(s) => Ok(s.to_string()), + None => Err(Error::InvalidConfig( + "missing `json_rpc_url` field".to_string(), + )), + } +} + +/// Gets the "player" or local solana wallet that has been configured +/// on the machine. +pub fn get_player(config: &Option<&str>) -> Result { + let config = get_config(config)?; + if let Some(path) = config["keypair_path"].as_str() { + read_keypair_file(path).map_err(|e| { + Error::InvalidConfig(format!("failed to read keypair file ({}): ({})", path, e)) + }) + } else { + Err(Error::InvalidConfig( + "missing `keypair_path` field".to_string(), + )) + } +} + +/// Gets the seed used to generate greeting accounts. If you'd like to +/// force this program to generate a new greeting account and thus +/// restart the counter you can change this value. +pub fn get_greeting_seed() -> &'static str { + "hello" +} + +/// Derives and returns the greeting account public key for a given +/// PLAYER, PROGRAM combination. +pub fn get_greeting_public_key(player: &Pubkey, program: &Pubkey) -> Result { + Ok(Pubkey::create_with_seed( + player, + get_greeting_seed(), + program, + )?) +} diff --git a/svm/examples/json-rpc/config.yml b/svm/examples/json-rpc/config.yml new file mode 100644 index 00000000000000..a7e4a4b1226dba --- /dev/null +++ b/svm/examples/json-rpc/config.yml @@ -0,0 +1,7 @@ +--- +json_rpc_url: http://127.0.0.1:8899 +websocket_url: '' +keypair_path: svm/examples/test.json +address_labels: + '11111111111111111111111111111111': System Program +commitment: confirmed diff --git a/svm/examples/json-rpc/program/Cargo.toml b/svm/examples/json-rpc/program/Cargo.toml new file mode 100644 index 00000000000000..c5c9fdd4ea5d59 --- /dev/null +++ b/svm/examples/json-rpc/program/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "json-rpc-example-program" +version = "2.1.0" +edition = "2021" + +[features] +# This was needed for ci +dummy-for-ci-check = [] +frozen-abi = [] + +[dependencies] +borsh = "0.9" +solana-program = { path = "../../../../sdk/program", version = "=2.1.0" } + +[lib] +name = "program" +crate-type = ["cdylib", "lib"] + +[workspace] diff --git a/svm/examples/json-rpc/program/src/lib.rs b/svm/examples/json-rpc/program/src/lib.rs new file mode 100644 index 00000000000000..22d425f27fbbc4 --- /dev/null +++ b/svm/examples/json-rpc/program/src/lib.rs @@ -0,0 +1,34 @@ +use { + borsh::{BorshDeserialize, BorshSerialize}, + solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint, msg, + pubkey::Pubkey, + }, +}; + +/// The type of state managed by this program. The type defined here +/// must match the `GreetingAccount` type defined by the client. +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct GreetingAccount { + /// The number of greetings that have been sent to this account. + pub counter: u32, +} + +entrypoint!(process_instruction); + +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> entrypoint::ProgramResult { + // Get the account that stores greeting count information. + let accounts_iter = &mut accounts.iter(); + let account = next_account_info(accounts_iter)?; + + msg!("account.owner"); + account.owner.log(); + msg!("program_id"); + program_id.log(); + Ok(()) +} diff --git a/svm/examples/json-rpc/server/src/main.rs b/svm/examples/json-rpc/server/src/main.rs new file mode 100644 index 00000000000000..93f18ebdde54b9 --- /dev/null +++ b/svm/examples/json-rpc/server/src/main.rs @@ -0,0 +1,76 @@ +#![allow(clippy::arithmetic_side_effects)] + +use { + clap::{value_t_or_exit, App, Arg}, + std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::PathBuf, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread, + time::Duration, + }, +}; + +pub mod rpc_process; +pub mod rpc_service; +pub mod svm_bridge; + +fn main() { + env_logger::init(); + let matches = App::new("solana-json-rpc") + .version("0.1.0") + .author("Agave Team ") + .about("JSON-RPC Simulation server") + .arg( + Arg::with_name("accounts_path") + .short("a") + .long("accounts") + .value_name("FILE") + .takes_value(true) + .required(true) + .default_value("accounts.json") + .help("Use FILE as location of accounts.json"), + ) + .arg( + Arg::with_name("ledger_path") + .short("l") + .long("ledger") + .value_name("DIR") + .takes_value(true) + .required(true) + .default_value("test-ledger") + .help("Use DIR as ledger location"), + ) + .get_matches(); + + let accounts_path = PathBuf::from(value_t_or_exit!(matches, "accounts_path", String)); + let ledger_path = PathBuf::from(value_t_or_exit!(matches, "ledger_path", String)); + let rpc_addr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let rpc_port = 8899u16; + let rpc_addr = SocketAddr::new(rpc_addr, rpc_port); + + let config = rpc_process::JsonRpcConfig { + accounts_path, + ledger_path, + rpc_threads: 1, + rpc_niceness_adj: 0, + max_request_body_size: Some(8192), + }; + + let exit = Arc::new(AtomicBool::new(false)); + let validator_exit = rpc_process::create_exit(exit.clone()); + + let _rpc_service = + rpc_service::JsonRpcService::new(rpc_addr, config, validator_exit, exit.clone()); + + let refresh_interval = Duration::from_millis(250); + for _i in 0.. { + if exit.load(Ordering::Relaxed) { + break; + } + thread::sleep(refresh_interval); + } +} diff --git a/svm/examples/json-rpc/server/src/rpc_process.rs b/svm/examples/json-rpc/server/src/rpc_process.rs new file mode 100644 index 00000000000000..ed239323b462b4 --- /dev/null +++ b/svm/examples/json-rpc/server/src/rpc_process.rs @@ -0,0 +1,928 @@ +use { + crate::svm_bridge::{ + create_executable_environment, LoadAndExecuteTransactionsOutput, MockBankCallback, + MockForkGraph, TransactionBatch, + }, + base64::{prelude::BASE64_STANDARD, Engine}, + bincode::config::Options, + jsonrpc_core::{types::error, Error, Metadata, Result}, + jsonrpc_derive::rpc, + log::*, + serde_json, + solana_account_decoder::{ + parse_account_data::{AccountAdditionalDataV2, SplTokenAdditionalData}, + parse_token::{get_token_account_mint, is_known_spl_token_id}, + UiAccount, UiAccountEncoding, UiDataSliceConfig, MAX_BASE58_BYTES, + }, + solana_accounts_db::blockhash_queue::BlockhashQueue, + solana_compute_budget::compute_budget::ComputeBudget, + solana_perf::packet::PACKET_DATA_SIZE, + solana_program_runtime::loaded_programs::ProgramCacheEntry, + solana_rpc_client_api::{ + config::*, + response::{Response as RpcResponse, *}, + }, + solana_sdk::{ + account::{from_account, Account, AccountSharedData, ReadableAccount}, + clock::{Epoch, Slot, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY}, + commitment_config::CommitmentConfig, + exit::Exit, + hash::Hash, + inner_instruction::InnerInstructions, + message::{ + v0::{LoadedAddresses, MessageAddressTableLookup}, + AddressLoaderError, + }, + nonce::state::DurableNonce, + pubkey::Pubkey, + reserved_account_keys::ReservedAccountKeys, + signature::Signature, + system_instruction, sysvar, + transaction::{ + AddressLoader, MessageHash, SanitizedTransaction, TransactionError, + VersionedTransaction, + }, + transaction_context::{TransactionAccount, TransactionReturnData}, + }, + solana_svm::{ + account_loader::{CheckedTransactionDetails, TransactionCheckResult}, + account_overrides::AccountOverrides, + transaction_error_metrics::TransactionErrorMetrics, + transaction_processing_result::{ + ProcessedTransaction, TransactionProcessingResultExtensions, + }, + transaction_processor::{ + ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages, + TransactionProcessingConfig, TransactionProcessingEnvironment, + }, + }, + solana_system_program::system_processor, + solana_transaction_status::{ + map_inner_instructions, parse_ui_inner_instructions, TransactionBinaryEncoding, + UiTransactionEncoding, + }, + solana_vote::vote_account::VoteAccountsHashMap, + spl_token_2022::{ + extension::{ + interest_bearing_mint::InterestBearingConfig, BaseStateWithExtensions, + StateWithExtensions, + }, + state::Mint, + }, + std::{ + any::type_name, + cmp::min, + collections::{HashMap, HashSet}, + fs, + path::PathBuf, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + }, +}; + +pub const MAX_REQUEST_BODY_SIZE: usize = 50 * (1 << 10); // 50kB + +const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot +const EXECUTION_EPOCH: u64 = 2; // The execution epoch must be greater than the deployment epoch +const MAX_BASE58_SIZE: usize = 1683; // Golden, bump if PACKET_DATA_SIZE changes +const MAX_BASE64_SIZE: usize = 1644; // Golden, bump if PACKET_DATA_SIZE changes + +fn new_response(slot: Slot, value: T) -> RpcResponse { + RpcResponse { + context: RpcResponseContext::new(slot), + value, + } +} + +#[derive(Debug, Default, Clone)] +pub struct JsonRpcConfig { + pub accounts_path: PathBuf, + pub ledger_path: PathBuf, + pub rpc_threads: usize, + pub rpc_niceness_adj: i8, + pub max_request_body_size: Option, +} + +#[derive(Clone)] +pub struct JsonRpcRequestProcessor { + account_map: Vec<(Pubkey, AccountSharedData)>, + #[allow(dead_code)] + exit: Arc>, + transaction_processor: Arc>>, +} + +struct TransactionSimulationResult { + pub result: solana_sdk::transaction::Result<()>, + pub logs: TransactionLogMessages, + pub post_simulation_accounts: Vec, + pub units_consumed: u64, + pub return_data: Option, + pub inner_instructions: Option>, +} + +#[derive(Debug, Default, PartialEq)] +pub struct ProcessedTransactionCounts { + pub processed_transactions_count: u64, + pub processed_non_vote_transactions_count: u64, + pub processed_with_successful_result_count: u64, + pub signature_count: u64, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum TransactionLogCollectorFilter { + All, + AllWithVotes, + None, + OnlyMentionedAddresses, +} + +impl Default for TransactionLogCollectorFilter { + fn default() -> Self { + Self::None + } +} + +#[derive(Debug, Default)] +pub struct TransactionLogCollectorConfig { + pub mentioned_addresses: HashSet, + pub filter: TransactionLogCollectorFilter, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TransactionLogInfo { + pub signature: Signature, + pub result: solana_sdk::transaction::Result<()>, + pub is_vote: bool, + pub log_messages: TransactionLogMessages, +} + +#[derive(Default, Debug)] +pub struct TransactionLogCollector { + // All the logs collected for from this Bank. Exact contents depend on the + // active `TransactionLogCollectorFilter` + pub logs: Vec, + + // For each `mentioned_addresses`, maintain a list of indices into `logs` to easily + // locate the logs from transactions that included the mentioned addresses. + pub mentioned_address_map: HashMap>, +} + +impl AddressLoader for JsonRpcRequestProcessor { + fn load_addresses( + self, + _lookups: &[MessageAddressTableLookup], + ) -> core::result::Result { + Ok(LoadedAddresses { + writable: vec![], + readonly: vec![], + }) + } +} + +impl Metadata for JsonRpcRequestProcessor {} + +impl JsonRpcRequestProcessor { + pub fn new(config: JsonRpcConfig, exit: Arc>) -> Self { + let accounts_json_path = config.accounts_path.clone(); + let accounts_data: String = fs::read_to_string(accounts_json_path).unwrap(); + let accounts_data: serde_json::Value = serde_json::from_str(&accounts_data).unwrap(); + let accounts_slice: Vec<(Pubkey, AccountSharedData)> = accounts_data["accounts"] + .as_array() + .unwrap() + .iter() + .map(|acc| { + let pubkey = Pubkey::from_str(acc["pubkey"].as_str().unwrap()).unwrap(); + let account = acc["account"].as_object().unwrap(); + let owner = account["owner"].as_str().unwrap(); + let data = account["data"].as_array().unwrap()[0].as_str().unwrap(); + let acc_data = AccountSharedData::from(Account { + lamports: account["lamports"].as_u64().unwrap(), + data: BASE64_STANDARD.decode(data).unwrap(), + owner: Pubkey::from_str(owner).unwrap(), + executable: account["executable"].as_bool().unwrap(), + rent_epoch: account["rentEpoch"].as_u64().unwrap(), + }); + (pubkey, acc_data) + }) + .collect(); + let batch_processor = TransactionBatchProcessor::::new( + EXECUTION_SLOT, + EXECUTION_EPOCH, + HashSet::new(), + ); + + Self { + account_map: accounts_slice, + exit, + transaction_processor: Arc::new(RwLock::new(batch_processor)), + } + } + + fn get_account_info( + &self, + pubkey: &Pubkey, + config: Option, + ) -> Result>> { + let RpcAccountInfoConfig { + encoding, + data_slice, + commitment: _, + min_context_slot: _, + } = config.unwrap_or_default(); + let encoding = encoding.unwrap_or(UiAccountEncoding::Binary); + Ok(new_response( + 0, + match self.get_account(pubkey) { + Some(account) => { + debug!("Found account {pubkey:?}"); + Some(encode_account(&account, pubkey, encoding, data_slice)?) + } + None => { + debug!("Did not find account {pubkey:?}"); + None + } + }, + )) + } + + fn get_latest_blockhash(&self, _config: RpcContextConfig) -> Result> { + let blockhash = Hash::default(); + let last_valid_block_height = 0u64; + Ok(new_response( + 0, + RpcBlockhash { + blockhash: blockhash.to_string(), + last_valid_block_height, + }, + )) + } + + fn get_minimum_balance_for_rent_exemption( + &self, + _data_len: usize, + _commitment: Option, + ) -> u64 { + 0u64 + } + + fn simulate_transaction_unchecked( + &self, + transaction: &SanitizedTransaction, + enable_cpi_recording: bool, + ) -> TransactionSimulationResult { + let mut mock_bank = MockBankCallback::new(self.account_map.clone()); + let transaction_processor = self.transaction_processor.read().unwrap(); + + let account_keys = transaction.message().account_keys(); + let number_of_accounts = account_keys.len(); + let account_overrides = AccountOverrides::default(); + + let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); + + create_executable_environment( + fork_graph.clone(), + &account_keys, + &mut mock_bank, + &transaction_processor, + ); + + // Add the system program builtin. + transaction_processor.add_builtin( + &mock_bank, + solana_system_program::id(), + "system_program", + ProgramCacheEntry::new_builtin( + 0, + b"system_program".len(), + system_processor::Entrypoint::vm, + ), + ); + // Add the BPF Loader v2 builtin, for the SPL Token program. + transaction_processor.add_builtin( + &mock_bank, + solana_sdk::bpf_loader_upgradeable::id(), + "solana_bpf_loader_upgradeable_program", + ProgramCacheEntry::new_builtin( + 0, + b"solana_bpf_loader_upgradeable_program".len(), + solana_bpf_loader_program::Entrypoint::vm, + ), + ); + + let batch = self.prepare_unlocked_batch_from_single_tx(transaction); + let LoadAndExecuteTransactionsOutput { + mut processing_results, + .. + } = self.load_and_execute_transactions( + &mock_bank, + &batch, + // After simulation, transactions will need to be forwarded to the leader + // for processing. During forwarding, the transaction could expire if the + // delay is not accounted for. + MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY, + TransactionProcessingConfig { + account_overrides: Some(&account_overrides), + check_program_modification_slot: false, + compute_budget: Some(ComputeBudget::default()), + log_messages_bytes_limit: None, + limit_to_load_programs: true, + recording_config: ExecutionRecordingConfig { + enable_cpi_recording, + enable_log_recording: true, + enable_return_data_recording: true, + }, + transaction_account_lock_limit: Some(64), + }, + ); + + let processing_result = processing_results + .pop() + .unwrap_or(Err(TransactionError::InvalidProgramForExecution)); + let flattened_result = processing_result.flattened_result(); + let (post_simulation_accounts, logs, return_data, inner_instructions) = + match processing_result { + Ok(processed_tx) => match processed_tx { + ProcessedTransaction::Executed(executed_tx) => { + let details = executed_tx.execution_details; + let post_simulation_accounts = executed_tx + .loaded_transaction + .accounts + .into_iter() + .take(number_of_accounts) + .collect::>(); + ( + post_simulation_accounts, + details.log_messages, + details.return_data, + details.inner_instructions, + ) + } + ProcessedTransaction::FeesOnly(_) => (vec![], None, None, None), + }, + Err(_) => (vec![], None, None, None), + }; + let logs = logs.unwrap_or_default(); + let units_consumed: u64 = 0; + + TransactionSimulationResult { + result: flattened_result, + logs, + post_simulation_accounts, + units_consumed, + return_data, + inner_instructions, + } + } + + fn prepare_unlocked_batch_from_single_tx<'a>( + &'a self, + transaction: &'a SanitizedTransaction, + ) -> TransactionBatch<'_> { + let tx_account_lock_limit = solana_sdk::transaction::MAX_TX_ACCOUNT_LOCKS; + let lock_result = transaction + .get_account_locks(tx_account_lock_limit) + .map(|_| ()); + let batch = TransactionBatch::new( + vec![lock_result], + std::borrow::Cow::Borrowed(std::slice::from_ref(transaction)), + ); + batch + } + + fn check_age( + &self, + sanitized_txs: &[impl core::borrow::Borrow], + lock_results: &[solana_sdk::transaction::Result<()>], + max_age: usize, + error_counters: &mut TransactionErrorMetrics, + ) -> Vec { + let last_blockhash = Hash::default(); + let blockhash_queue = BlockhashQueue::default(); + let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash); + + sanitized_txs + .iter() + .zip(lock_results) + .map(|(tx, lock_res)| match lock_res { + Ok(()) => self.check_transaction_age( + tx.borrow(), + max_age, + &next_durable_nonce, + &blockhash_queue, + error_counters, + ), + Err(e) => Err(e.clone()), + }) + .collect() + } + + fn check_transaction_age( + &self, + _tx: &SanitizedTransaction, + _max_age: usize, + _next_durable_nonce: &DurableNonce, + _hash_queue: &BlockhashQueue, + _error_counters: &mut TransactionErrorMetrics, + ) -> TransactionCheckResult { + /* for now just return defaults */ + Ok(CheckedTransactionDetails { + nonce: None, + lamports_per_signature: u64::default(), + }) + } + + fn clock(&self) -> sysvar::clock::Clock { + from_account(&self.get_account(&sysvar::clock::id()).unwrap_or_default()) + .unwrap_or_default() + } + + fn epoch_total_stake(&self, _epoch: Epoch) -> Option { + Some(u64::default()) + } + + fn epoch_vote_accounts(&self, _epoch: Epoch) -> Option<&VoteAccountsHashMap> { + None + } + + fn get_account(&self, pubkey: &Pubkey) -> Option { + let account_map: HashMap = + HashMap::from_iter(self.account_map.clone()); + account_map.get(pubkey).cloned() + } + + fn get_additional_mint_data(&self, data: &[u8]) -> Result { + StateWithExtensions::::unpack(data) + .map_err(|_| { + Error::invalid_params("Invalid param: Token mint could not be unpacked".to_string()) + }) + .map(|mint| { + let interest_bearing_config = mint + .get_extension::() + .map(|x| (*x, self.clock().unix_timestamp)) + .ok(); + SplTokenAdditionalData { + decimals: mint.base.decimals, + interest_bearing_config, + } + }) + } + + fn get_encoded_account( + &self, + pubkey: &Pubkey, + encoding: UiAccountEncoding, + data_slice: Option, + // only used for simulation results + overwrite_accounts: Option<&HashMap>, + ) -> Result> { + match overwrite_accounts + .and_then(|accounts| accounts.get(pubkey).cloned()) + .or_else(|| self.get_account(pubkey)) + { + Some(account) => { + let response = if is_known_spl_token_id(account.owner()) + && encoding == UiAccountEncoding::JsonParsed + { + self.get_parsed_token_account(pubkey, account, overwrite_accounts) + } else { + encode_account(&account, pubkey, encoding, data_slice)? + }; + Ok(Some(response)) + } + None => Ok(None), + } + } + + fn get_parsed_token_account( + &self, + pubkey: &Pubkey, + account: AccountSharedData, + // only used for simulation results + overwrite_accounts: Option<&HashMap>, + ) -> UiAccount { + let additional_data = get_token_account_mint(account.data()) + .and_then(|mint_pubkey| { + overwrite_accounts + .and_then(|accounts| accounts.get(&mint_pubkey).cloned()) + .or_else(|| self.get_account(&mint_pubkey)) + }) + .and_then(|mint_account| self.get_additional_mint_data(mint_account.data()).ok()) + .map(|data| AccountAdditionalDataV2 { + spl_token_additional_data: Some(data), + }); + + UiAccount::encode( + pubkey, + &account, + UiAccountEncoding::JsonParsed, + additional_data, + None, + ) + } + + fn last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { + let last_hash = Hash::default(); + let last_lamports_per_signature = u64::default(); + (last_hash, last_lamports_per_signature) + } + + fn load_and_execute_transactions( + &self, + bank: &MockBankCallback, + batch: &TransactionBatch, + max_age: usize, + processing_config: TransactionProcessingConfig, + ) -> LoadAndExecuteTransactionsOutput { + let sanitized_txs = batch.sanitized_transactions(); + debug!("processing transactions: {}", sanitized_txs.len()); + let mut error_counters = TransactionErrorMetrics::default(); + + let check_results = self.check_age( + sanitized_txs, + batch.lock_results(), + max_age, + &mut error_counters, + ); + + let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature(); + let processing_environment = TransactionProcessingEnvironment { + blockhash, + epoch_total_stake: self.epoch_total_stake(Epoch::default()), + epoch_vote_accounts: self.epoch_vote_accounts(Epoch::default()), + feature_set: Arc::clone(&bank.feature_set), + fee_structure: None, + lamports_per_signature, + rent_collector: None, + }; + + let sanitized_output = self + .transaction_processor + .read() + .unwrap() + .load_and_execute_sanitized_transactions( + bank, + sanitized_txs, + check_results, + &processing_environment, + &processing_config, + ); + + let err_count = &mut error_counters.total; + + let mut processed_counts = ProcessedTransactionCounts::default(); + for (processing_result, tx) in sanitized_output + .processing_results + .iter() + .zip(sanitized_txs) + { + if processing_result.was_processed() { + // Signature count must be accumulated only if the transaction + // is processed, otherwise a mismatched count between banking + // and replay could occur + processed_counts.signature_count += + u64::from(tx.message().header().num_required_signatures); + processed_counts.processed_transactions_count += 1; + + if !tx.is_simple_vote_transaction() { + processed_counts.processed_non_vote_transactions_count += 1; + } + } + + match processing_result.flattened_result() { + Ok(()) => { + processed_counts.processed_with_successful_result_count += 1; + } + Err(err) => { + if *err_count == 0 { + debug!("tx error: {:?} {:?}", err, tx); + } + *err_count += 1; + } + } + } + + LoadAndExecuteTransactionsOutput { + processing_results: sanitized_output.processing_results, + } + } +} + +/// RPC interface that an API node is expected to provide +pub mod rpc { + use super::*; + #[rpc] + pub trait Rpc { + type Metadata; + + #[rpc(meta, name = "getAccountInfo")] + fn get_account_info( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getLatestBlockhash")] + fn get_latest_blockhash( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>; + + #[rpc(meta, name = "getMinimumBalanceForRentExemption")] + fn get_minimum_balance_for_rent_exemption( + &self, + meta: Self::Metadata, + data_len: usize, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getVersion")] + fn get_version(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "simulateTransaction")] + fn simulate_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result>; + } + + pub struct RpcImpl; + + impl Rpc for RpcImpl { + type Metadata = JsonRpcRequestProcessor; + + fn simulate_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result> { + debug!("simulate_transaction rpc request received"); + + let RpcSimulateTransactionConfig { + sig_verify: _, + replace_recent_blockhash: _, + commitment: _, + encoding, + accounts: config_accounts, + min_context_slot: _, + inner_instructions: enable_cpi_recording, + } = config.unwrap_or_default(); + let tx_encoding = encoding.unwrap_or(UiTransactionEncoding::Base58); + let binary_encoding = tx_encoding.into_binary_encoding().ok_or_else(|| { + Error::invalid_params(format!( + "unsupported encoding: {tx_encoding}. Supported encodings: base58, base64" + )) + })?; + let (_, unsanitized_tx) = + decode_and_deserialize::(data, binary_encoding)?; + debug!("unsanitized transaction decoded {:?}", unsanitized_tx); + + let transaction = sanitize_transaction( + unsanitized_tx, + meta.clone(), + &ReservedAccountKeys::default().active, + )?; + + let TransactionSimulationResult { + result, + logs, + post_simulation_accounts, + units_consumed, + return_data, + inner_instructions, + } = meta.simulate_transaction_unchecked(&transaction, enable_cpi_recording); + + let account_keys = transaction.message().account_keys(); + let number_of_accounts = account_keys.len(); + + let accounts = if let Some(config_accounts) = config_accounts { + let accounts_encoding = config_accounts + .encoding + .unwrap_or(UiAccountEncoding::Base64); + + if accounts_encoding == UiAccountEncoding::Binary + || accounts_encoding == UiAccountEncoding::Base58 + { + return Err(Error::invalid_params("base58 encoding not supported")); + } + + if config_accounts.addresses.len() > number_of_accounts { + return Err(Error::invalid_params(format!( + "Too many accounts provided; max {number_of_accounts}" + ))); + } + + if result.is_err() { + Some(vec![None; config_accounts.addresses.len()]) + } else { + let mut post_simulation_accounts_map = HashMap::new(); + for (pubkey, data) in post_simulation_accounts { + post_simulation_accounts_map.insert(pubkey, data); + } + + Some( + config_accounts + .addresses + .iter() + .map(|address_str| { + let pubkey = verify_pubkey(address_str)?; + meta.get_encoded_account( + &pubkey, + accounts_encoding, + None, + Some(&post_simulation_accounts_map), + ) + }) + .collect::>>()?, + ) + } + } else { + None + }; + + let inner_instructions = inner_instructions.map(|info| { + map_inner_instructions(info) + .map(|converted| parse_ui_inner_instructions(converted, &account_keys)) + .collect() + }); + + Ok(new_response( + 0, + RpcSimulateTransactionResult { + err: result.err(), + logs: Some(logs), + accounts, + units_consumed: Some(units_consumed), + return_data: return_data.map(|return_data| return_data.into()), + inner_instructions, + replacement_blockhash: None, + }, + )) + } + + fn get_account_info( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result>> { + debug!("get_account_info rpc request received: {:?}", pubkey_str); + let pubkey = verify_pubkey(&pubkey_str)?; + debug!("pubkey {pubkey:?} verified."); + meta.get_account_info(&pubkey, config) + } + + fn get_latest_blockhash( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result> { + debug!("get_latest_blockhash rpc request received"); + meta.get_latest_blockhash(config.unwrap_or_default()) + } + + fn get_minimum_balance_for_rent_exemption( + &self, + meta: Self::Metadata, + data_len: usize, + commitment: Option, + ) -> Result { + debug!( + "get_minimum_balance_for_rent_exemption rpc request received: {:?}", + data_len + ); + if data_len as u64 > system_instruction::MAX_PERMITTED_DATA_LENGTH { + return Err(Error::invalid_request()); + } + Ok(meta.get_minimum_balance_for_rent_exemption(data_len, commitment)) + } + + fn get_version(&self, _: Self::Metadata) -> Result { + debug!("get_version rpc request received"); + let version = solana_version::Version::default(); + Ok(RpcVersionInfo { + solana_core: version.to_string(), + feature_set: Some(version.feature_set), + }) + } + } +} + +pub fn create_exit(exit: Arc) -> Arc> { + let mut exit_handler = Exit::default(); + exit_handler.register_exit(Box::new(move || exit.store(true, Ordering::Relaxed))); + Arc::new(RwLock::new(exit_handler)) +} + +fn decode_and_deserialize( + encoded: String, + encoding: TransactionBinaryEncoding, +) -> Result<(Vec, T)> +where + T: serde::de::DeserializeOwned, +{ + let wire_output = match encoding { + TransactionBinaryEncoding::Base58 => { + if encoded.len() > MAX_BASE58_SIZE { + return Err(Error::invalid_params(format!( + "base58 encoded {} too large: {} bytes (max: encoded/raw {}/{})", + type_name::(), + encoded.len(), + MAX_BASE58_SIZE, + PACKET_DATA_SIZE, + ))); + } + bs58::decode(encoded) + .into_vec() + .map_err(|e| Error::invalid_params(format!("invalid base58 encoding: {e:?}")))? + } + TransactionBinaryEncoding::Base64 => { + if encoded.len() > MAX_BASE64_SIZE { + return Err(Error::invalid_params(format!( + "base64 encoded {} too large: {} bytes (max: encoded/raw {}/{})", + type_name::(), + encoded.len(), + MAX_BASE64_SIZE, + PACKET_DATA_SIZE, + ))); + } + BASE64_STANDARD + .decode(encoded) + .map_err(|e| Error::invalid_params(format!("invalid base64 encoding: {e:?}")))? + } + }; + if wire_output.len() > PACKET_DATA_SIZE { + return Err(Error::invalid_params(format!( + "decoded {} too large: {} bytes (max: {} bytes)", + type_name::(), + wire_output.len(), + PACKET_DATA_SIZE + ))); + } + bincode::options() + .with_limit(PACKET_DATA_SIZE as u64) + .with_fixint_encoding() + .allow_trailing_bytes() + .deserialize_from(&wire_output[..]) + .map_err(|err| { + Error::invalid_params(format!( + "failed to deserialize {}: {}", + type_name::(), + &err.to_string() + )) + }) + .map(|output| (wire_output, output)) +} + +fn encode_account( + account: &T, + pubkey: &Pubkey, + encoding: UiAccountEncoding, + data_slice: Option, +) -> Result { + if (encoding == UiAccountEncoding::Binary || encoding == UiAccountEncoding::Base58) + && data_slice + .map(|s| min(s.length, account.data().len().saturating_sub(s.offset))) + .unwrap_or(account.data().len()) + > MAX_BASE58_BYTES + { + let message = format!("Encoded binary (base 58) data should be less than {MAX_BASE58_BYTES} bytes, please use Base64 encoding."); + Err(error::Error { + code: error::ErrorCode::InvalidRequest, + message, + data: None, + }) + } else { + Ok(UiAccount::encode( + pubkey, account, encoding, None, data_slice, + )) + } +} + +fn sanitize_transaction( + transaction: VersionedTransaction, + address_loader: impl AddressLoader, + reserved_account_keys: &HashSet, +) -> Result { + SanitizedTransaction::try_create( + transaction, + MessageHash::Compute, + None, + address_loader, + reserved_account_keys, + ) + .map_err(|err| Error::invalid_params(format!("invalid transaction: {err}"))) +} + +fn verify_pubkey(input: &str) -> Result { + input + .parse() + .map_err(|e| Error::invalid_params(format!("Invalid param: {e:?}"))) +} diff --git a/svm/examples/json-rpc/server/src/rpc_service.rs b/svm/examples/json-rpc/server/src/rpc_service.rs new file mode 100644 index 00000000000000..73f8d81f22a8bd --- /dev/null +++ b/svm/examples/json-rpc/server/src/rpc_service.rs @@ -0,0 +1,111 @@ +use { + crate::rpc_process::{rpc::*, *}, + crossbeam_channel::unbounded, + jsonrpc_core::MetaIoHandler, + jsonrpc_http_server::{ + hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, ServerBuilder, + }, + log::*, + solana_perf::thread::renice_this_thread, + solana_sdk::exit::Exit, + std::{ + net::SocketAddr, + sync::{atomic::AtomicBool, Arc, RwLock}, + thread::{self, Builder, JoinHandle}, + }, +}; + +pub struct JsonRpcService { + thread_hdl: JoinHandle<()>, + close_handle: Option, +} + +impl JsonRpcService { + #[allow(clippy::too_many_arguments)] + pub fn new( + rpc_addr: SocketAddr, + config: JsonRpcConfig, + validator_exit: Arc>, + _exit: Arc, + ) -> Result { + info!("rpc bound to {:?}", rpc_addr); + info!("rpc configuration: {:?}", config); + let rpc_threads = 1.max(config.rpc_threads); + let rpc_niceness_adj = config.rpc_niceness_adj; + + let runtime = Arc::new( + tokio::runtime::Builder::new_multi_thread() + .worker_threads(rpc_threads) + .on_thread_start(move || renice_this_thread(rpc_niceness_adj).unwrap()) + .thread_name("solRpcEl") + .enable_all() + .build() + .expect("Runtime"), + ); + + let max_request_body_size = config + .max_request_body_size + .unwrap_or(MAX_REQUEST_BODY_SIZE); + let request_processor = JsonRpcRequestProcessor::new(config, validator_exit.clone()); + let (close_handle_sender, close_handle_receiver) = unbounded(); + let thread_hdl = Builder::new() + .name("solJsonRpcSvc".to_string()) + .spawn(move || { + renice_this_thread(rpc_niceness_adj).unwrap(); + let mut io = MetaIoHandler::default(); + io.extend_with(rpc::RpcImpl.to_delegate()); + let server = ServerBuilder::with_meta_extractor( + io, + move |_req: &hyper::Request| request_processor.clone(), + ) + .event_loop_executor(runtime.handle().clone()) + .threads(1) + .cors(DomainsValidation::AllowOnly(vec![ + AccessControlAllowOrigin::Any, + ])) + .cors_max_age(86400) + .max_request_body_size(max_request_body_size) + .start_http(&rpc_addr); + + if let Err(e) = server { + warn!( + "JSON RPC service unavailable error: {:?}. \n\ + Also, check that port {} is not already in use by another application", + e, + rpc_addr.port() + ); + close_handle_sender.send(Err(e.to_string())).unwrap(); + return; + } + + let server = server.unwrap(); + close_handle_sender.send(Ok(server.close_handle())).unwrap(); + server.wait(); + }) + .unwrap(); + + let close_handle = close_handle_receiver.recv().unwrap()?; + let close_handle_ = close_handle.clone(); + validator_exit + .write() + .unwrap() + .register_exit(Box::new(move || { + close_handle_.close(); + })); + Ok(Self { + thread_hdl, + close_handle: Some(close_handle), + }) + } + + pub fn exit(&mut self) { + if let Some(c) = self.close_handle.take() { + c.close() + } + } + + pub fn join(mut self) -> thread::Result<()> { + self.exit(); + self.thread_hdl.join() + } +} diff --git a/svm/examples/json-rpc/server/src/svm_bridge.rs b/svm/examples/json-rpc/server/src/svm_bridge.rs new file mode 100644 index 00000000000000..c136af791c08f0 --- /dev/null +++ b/svm/examples/json-rpc/server/src/svm_bridge.rs @@ -0,0 +1,272 @@ +use { + log::*, + solana_bpf_loader_program::syscalls::{ + SyscallAbort, SyscallGetClockSysvar, SyscallInvokeSignedRust, SyscallLog, + SyscallLogBpfComputeUnits, SyscallLogPubkey, SyscallLogU64, SyscallMemcpy, SyscallMemset, + SyscallSetReturnData, + }, + solana_compute_budget::compute_budget::ComputeBudget, + solana_program_runtime::{ + invoke_context::InvokeContext, + loaded_programs::{ + BlockRelation, ForkGraph, LoadProgramMetrics, ProgramCacheEntry, + ProgramRuntimeEnvironments, + }, + solana_rbpf::{ + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + vm::Config, + }, + }, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + clock::{Clock, Slot, UnixTimestamp}, + feature_set::FeatureSet, + message::AccountKeys, + native_loader, + pubkey::Pubkey, + sysvar::SysvarId, + transaction::SanitizedTransaction, + }, + solana_svm::{ + transaction_processing_callback::TransactionProcessingCallback, + transaction_processing_result::TransactionProcessingResult, + transaction_processor::TransactionBatchProcessor, + }, + std::{ + collections::HashMap, + sync::{Arc, RwLock}, + time::{SystemTime, UNIX_EPOCH}, + }, +}; + +const DEPLOYMENT_SLOT: u64 = 0; +const DEPLOYMENT_EPOCH: u64 = 0; + +pub struct MockForkGraph {} + +impl ForkGraph for MockForkGraph { + fn relationship(&self, a: Slot, b: Slot) -> BlockRelation { + match a.cmp(&b) { + std::cmp::Ordering::Less => BlockRelation::Ancestor, + std::cmp::Ordering::Equal => BlockRelation::Equal, + std::cmp::Ordering::Greater => BlockRelation::Descendant, + } + } +} + +pub struct MockBankCallback { + pub feature_set: Arc, + pub account_shared_data: RwLock>, +} + +impl TransactionProcessingCallback for MockBankCallback { + fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { + if let Some(data) = self.account_shared_data.read().unwrap().get(account) { + if data.lamports() == 0 { + None + } else { + owners.iter().position(|entry| data.owner() == entry) + } + } else { + None + } + } + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + debug!( + "Get account {pubkey} shared data, thread {:?}", + std::thread::current().name() + ); + self.account_shared_data + .read() + .unwrap() + .get(pubkey) + .cloned() + } + + fn add_builtin_account(&self, name: &str, program_id: &Pubkey) { + let account_data = native_loader::create_loadable_account_with_fields(name, (5000, 0)); + + self.account_shared_data + .write() + .unwrap() + .insert(*program_id, account_data); + } +} + +impl MockBankCallback { + pub fn new(account_map: Vec<(Pubkey, AccountSharedData)>) -> Self { + Self { + feature_set: Arc::new(FeatureSet::default()), + account_shared_data: RwLock::new(HashMap::from_iter(account_map)), + } + } + + #[allow(dead_code)] + pub fn override_feature_set(&mut self, new_set: FeatureSet) { + self.feature_set = Arc::new(new_set) + } +} + +pub struct LoadAndExecuteTransactionsOutput { + // Vector of results indicating whether a transaction was executed or could not + // be executed. Note executed transactions can still have failed! + pub processing_results: Vec, +} + +pub struct TransactionBatch<'a> { + lock_results: Vec>, + sanitized_txs: std::borrow::Cow<'a, [SanitizedTransaction]>, +} + +impl<'a> TransactionBatch<'a> { + pub fn new( + lock_results: Vec>, + sanitized_txs: std::borrow::Cow<'a, [SanitizedTransaction]>, + ) -> Self { + assert_eq!(lock_results.len(), sanitized_txs.len()); + Self { + lock_results, + sanitized_txs, + } + } + + pub fn lock_results(&self) -> &Vec> { + &self.lock_results + } + + pub fn sanitized_transactions(&self) -> &[SanitizedTransaction] { + &self.sanitized_txs + } +} + +pub fn create_custom_environment<'a>() -> BuiltinProgram> { + let compute_budget = ComputeBudget::default(); + let vm_config = Config { + max_call_depth: compute_budget.max_call_depth, + stack_frame_size: compute_budget.stack_frame_size, + enable_address_translation: true, + enable_stack_frame_gaps: true, + instruction_meter_checkpoint_distance: 10000, + enable_instruction_meter: true, + enable_instruction_tracing: true, + enable_symbol_and_section_labels: true, + reject_broken_elfs: true, + noop_instruction_rate: 256, + sanitize_user_provided_values: true, + external_internal_function_hash_collision: false, + reject_callx_r10: false, + enable_sbpf_v1: true, + enable_sbpf_v2: false, + optimize_rodata: false, + aligned_memory_mapping: true, + }; + + // Register system calls that the compiled contract calls during execution. + let mut function_registry = FunctionRegistry::>::default(); + function_registry + .register_function_hashed(*b"abort", SyscallAbort::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_log_", SyscallLog::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_log_64_", SyscallLogU64::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_log_compute_units_", SyscallLogBpfComputeUnits::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_log_pubkey", SyscallLogPubkey::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_memcpy_", SyscallMemcpy::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_memset_", SyscallMemset::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_invoke_signed_rust", SyscallInvokeSignedRust::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_set_return_data", SyscallSetReturnData::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_get_clock_sysvar", SyscallGetClockSysvar::vm) + .expect("Registration failed"); + BuiltinProgram::new_loader(vm_config, function_registry) +} + +pub fn create_executable_environment( + fork_graph: Arc>, + account_keys: &AccountKeys, + mock_bank: &mut MockBankCallback, + transaction_processor: &TransactionBatchProcessor, +) { + let mut program_cache = transaction_processor.program_cache.write().unwrap(); + + program_cache.environments = ProgramRuntimeEnvironments { + program_runtime_v1: Arc::new(create_custom_environment()), + // We are not using program runtime v2 + program_runtime_v2: Arc::new(BuiltinProgram::new_loader( + Config::default(), + FunctionRegistry::default(), + )), + }; + + program_cache.fork_graph = Some(Arc::downgrade(&fork_graph)); + // add programs to cache + for key in account_keys.iter() { + if let Some(account) = mock_bank.get_account_shared_data(key) { + if account.executable() && *account.owner() == solana_sdk::bpf_loader_upgradeable::id() + { + let data = account.data(); + let program_data_account_key = Pubkey::try_from(data[4..].to_vec()).unwrap(); + let program_data_account = mock_bank + .get_account_shared_data(&program_data_account_key) + .unwrap(); + let program_data = program_data_account.data(); + let elf_bytes = program_data[45..].to_vec(); + + let program_runtime_environment = + program_cache.environments.program_runtime_v1.clone(); + program_cache.assign_program( + *key, + Arc::new( + ProgramCacheEntry::new( + &solana_sdk::bpf_loader_upgradeable::id(), + program_runtime_environment, + 0, + 0, + &elf_bytes, + elf_bytes.len(), + &mut LoadProgramMetrics::default(), + ) + .unwrap(), + ), + ); + } + } + } + + // We must fill in the sysvar cache entries + let time_now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() as i64; + let clock = Clock { + slot: DEPLOYMENT_SLOT, + epoch_start_timestamp: time_now.saturating_sub(10) as UnixTimestamp, + epoch: DEPLOYMENT_EPOCH, + leader_schedule_epoch: DEPLOYMENT_EPOCH, + unix_timestamp: time_now as UnixTimestamp, + }; + + let mut account_data = AccountSharedData::default(); + account_data.set_data_from_slice(bincode::serialize(&clock).unwrap().as_slice()); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(Clock::id(), account_data); +} diff --git a/svm/examples/json-rpc/test.json b/svm/examples/json-rpc/test.json new file mode 100644 index 00000000000000..2fd43ae43866aa --- /dev/null +++ b/svm/examples/json-rpc/test.json @@ -0,0 +1 @@ +[39,82,169,128,159,226,211,180,118,92,132,200,38,92,230,90,221,95,252,83,174,5,205,251,125,219,15,82,119,57,3,125,134,169,60,216,172,10,24,129,71,172,121,154,5,13,100,84,126,135,69,153,3,163,184,126,153,0,99,201,89,63,43,24] \ No newline at end of file From ed51e70c2e6528f602ad4f8fde718f60d7da2d0c Mon Sep 17 00:00:00 2001 From: Brooks Date: Sun, 6 Oct 2024 17:06:49 -0400 Subject: [PATCH 444/529] Adds timing metrics for Bank::hash_internal_state() (#3080) --- runtime/src/bank.rs | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 5bf5dd2f75603a..e6ad2bc0474025 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5395,19 +5395,22 @@ impl Bank { /// Hash the `accounts` HashMap. This represents a validator's interpretation /// of the delta of the ledger since the last vote and up to now fn hash_internal_state(&self) -> Hash { + let measure_total = Measure::start(""); + let slot = self.slot(); let ignore = (!self.is_partitioned_rewards_feature_enabled() && self.force_partition_rewards_in_first_block_of_epoch()) .then_some(sysvar::epoch_rewards::id()); - let accounts_delta_hash = self - .rc - .accounts - .accounts_db - .calculate_accounts_delta_hash_internal( - slot, - ignore, - self.skipped_rewrites.lock().unwrap().clone(), - ); + let (accounts_delta_hash, accounts_delta_hash_us) = measure_us!({ + self.rc + .accounts + .accounts_db + .calculate_accounts_delta_hash_internal( + slot, + ignore, + self.skipped_rewrites.lock().unwrap().clone(), + ) + }); let mut signature_count_buf = [0u8; 8]; LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count()); @@ -5469,6 +5472,12 @@ impl Bank { .get_bank_hash_stats(slot) .expect("No bank hash stats were found for this bank, that should not be possible"); + let total_us = measure_total.end_as_us(); + datapoint_info!( + "bank-hash_internal_state", + ("total_us", total_us, i64), + ("accounts_delta_hash_us", accounts_delta_hash_us, i64), + ); info!( "bank frozen: {slot} hash: {hash} accounts_delta: {} signature_count: {} last_blockhash: {} capitalization: {}{}, stats: {bank_hash_stats:?}{}", accounts_delta_hash.0, From 0f38e039724e6376d702d42930db2cb5cf12b692 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Mon, 7 Oct 2024 10:03:18 +0400 Subject: [PATCH 445/529] Extract solana-rent crate (#3022) * extract rent crate * add #![no_std] to rent crate * missing dev deps * update digest * update digest --- Cargo.lock | 14 +++++++++ Cargo.toml | 2 ++ programs/sbf/Cargo.lock | 10 +++++++ runtime/src/bank/serde_snapshot.rs | 2 +- sdk/program/Cargo.toml | 2 ++ sdk/program/src/lib.rs | 3 +- sdk/rent/Cargo.toml | 31 ++++++++++++++++++++ sdk/{program/src/rent.rs => rent/src/lib.rs} | 22 ++++++++++++-- sdk/src/genesis_config.rs | 2 +- 9 files changed, 81 insertions(+), 7 deletions(-) create mode 100644 sdk/rent/Cargo.toml rename sdk/{program/src/rent.rs => rent/src/lib.rs} (92%) diff --git a/Cargo.lock b/Cargo.lock index 982eb8dff236b4..40a271eb1a6c78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7250,6 +7250,7 @@ dependencies = [ "solana-program-option", "solana-program-pack", "solana-pubkey", + "solana-rent", "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", @@ -7476,6 +7477,19 @@ dependencies = [ "uriparse", ] +[[package]] +name = "solana-rent" +version = "2.1.0" +dependencies = [ + "serde", + "serde_derive", + "solana-clock", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-sdk-macro", + "static_assertions", +] + [[package]] name = "solana-rpc" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index c1cfbfc2f8af82..a61b7aacad85fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,6 +121,7 @@ members = [ "sdk/program-option", "sdk/program-pack", "sdk/pubkey", + "sdk/rent", "sdk/serde-varint", "sdk/serialize-utils", "sdk/sha256-hasher", @@ -440,6 +441,7 @@ solana-pubsub-client = { path = "pubsub-client", version = "=2.1.0" } solana-quic-client = { path = "quic-client", version = "=2.1.0" } solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.1.0" } solana-remote-wallet = { path = "remote-wallet", version = "=2.1.0", default-features = false } +solana-rent = { path = "sdk/rent", version = "=2.1.0", default-features = false } solana-sanitize = { path = "sanitize", version = "=2.1.0" } solana-serde-varint = { path = "sdk/serde-varint", version = "=2.1.0" } solana-serialize-utils = { path = "sdk/serialize-utils", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 69c5ae6de9e2f0..bb2021ea6c2477 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5652,6 +5652,7 @@ dependencies = [ "solana-program-option", "solana-program-pack", "solana-pubkey", + "solana-rent", "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", @@ -5854,6 +5855,15 @@ dependencies = [ "uriparse", ] +[[package]] +name = "solana-rent" +version = "2.1.0" +dependencies = [ + "serde", + "serde_derive", + "solana-sdk-macro", +] + [[package]] name = "solana-rpc" version = "2.1.0" diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index d0a422e1ad95d0..1235afd319062b 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -535,7 +535,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "6d4H7gw1hSrspdTew8dAXZ5dZT1mwFc6VZdXnkuggJ8E") + frozen_abi(digest = "8hwm4YsQXJWGZdp762SkJnDok29LXKwFtmW9oQ2KSzrN") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 2919a7bb5689ad..f7bc8ba8aeaa96 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -53,6 +53,7 @@ solana-program-memory = { workspace = true } solana-program-option = { workspace = true } solana-program-pack = { workspace = true } solana-pubkey = { workspace = true, features = ["bytemuck", "curve25519", "serde", "std"] } +solana-rent = { workspace = true, features = ["serde"] } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } @@ -128,6 +129,7 @@ frozen-abi = [ "solana-hash/frozen-abi", "solana-instruction/frozen-abi", "solana-pubkey/frozen-abi", + "solana-rent/frozen-abi", "solana-short-vec/frozen-abi" ] diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index b853cd07d93be3..de02676c1f7c28 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -508,7 +508,6 @@ pub mod program; pub mod program_error; pub mod program_stubs; pub mod program_utils; -pub mod rent; pub mod secp256k1_program; pub mod slot_hashes; pub mod slot_history; @@ -542,7 +541,7 @@ pub use { solana_account_info::{self as account_info, debug_account_data}, solana_clock as clock, solana_msg::msg, - solana_program_option as program_option, solana_pubkey as pubkey, + solana_program_option as program_option, solana_pubkey as pubkey, solana_rent as rent, }; /// The [config native program][np]. diff --git a/sdk/rent/Cargo.toml b/sdk/rent/Cargo.toml new file mode 100644 index 00000000000000..d3e978bdcdc084 --- /dev/null +++ b/sdk/rent/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "solana-rent" +description = "Configuration for Solana network rent." +documentation = "https://docs.rs/solana-rent" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +serde = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } +solana-sdk-macro = { workspace = true } + +[dev-dependencies] +solana-clock = { workspace = true } +static_assertions = { workspace = true } + +[features] +frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] +serde = ["dep:serde", "dep:serde_derive"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/sdk/program/src/rent.rs b/sdk/rent/src/lib.rs similarity index 92% rename from sdk/program/src/rent.rs rename to sdk/rent/src/lib.rs index 47308066d927cc..7b50e8953b288f 100644 --- a/sdk/program/src/rent.rs +++ b/sdk/rent/src/lib.rs @@ -3,13 +3,29 @@ //! [rent]: https://docs.solanalabs.com/implemented-proposals/rent #![allow(clippy::arithmetic_side_effects)] +#![no_std] +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] +#[cfg(feature = "frozen-abi")] +extern crate std; -use {solana_clock::DEFAULT_SLOTS_PER_EPOCH, solana_sdk_macro::CloneZeroed}; +use solana_sdk_macro::CloneZeroed; + +// inlined to avoid solana_clock dep +const DEFAULT_SLOTS_PER_EPOCH: u64 = 432_000; +#[cfg(test)] +static_assertions::const_assert_eq!( + DEFAULT_SLOTS_PER_EPOCH, + solana_clock::DEFAULT_SLOTS_PER_EPOCH +); /// Configuration of network rent. #[repr(C)] -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Serialize, Deserialize, PartialEq, CloneZeroed, Debug)] +#[cfg_attr(feature = "frozen-abi", derive(solana_frozen_abi_macro::AbiExample))] +#[cfg_attr( + feature = "serde", + derive(serde_derive::Deserialize, serde_derive::Serialize) +)] +#[derive(PartialEq, CloneZeroed, Debug)] pub struct Rent { /// Rental rate in lamports/byte-year. pub lamports_per_byte_year: u64, diff --git a/sdk/src/genesis_config.rs b/sdk/src/genesis_config.rs index ba58d5bd8811f8..8e3f89f254830e 100644 --- a/sdk/src/genesis_config.rs +++ b/sdk/src/genesis_config.rs @@ -87,7 +87,7 @@ impl FromStr for ClusterType { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "z6vuQfrTaknTiRs1giPFzG1Jcw8eReidFTNDTmaX6GN") + frozen_abi(digest = "GDkrvVXezJYuGHcKSK19wvPBUMfKsifKQtoBxH1RpriL") )] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct GenesisConfig { From 9fc633c41c01a4f89dcb356656b9090798fc2e38 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 7 Oct 2024 10:53:27 -0400 Subject: [PATCH 446/529] Refactors test_serialize_bank_snapshot() (#3093) --- runtime/src/bank/serde_snapshot.rs | 353 +++++++++++++++-------------- 1 file changed, 182 insertions(+), 171 deletions(-) diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 1235afd319062b..3f348e4e7a7d24 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -92,185 +92,196 @@ mod tests { } /// Test roundtrip serialize/deserialize of a bank - #[test_case(StorageAccess::Mmap, false, false)] - #[test_case(StorageAccess::Mmap, false, true)] - #[test_case(StorageAccess::Mmap, true, false)] - #[test_case(StorageAccess::Mmap, true, true)] - #[test_case(StorageAccess::File, false, false)] - #[test_case(StorageAccess::File, false, true)] - #[test_case(StorageAccess::File, true, false)] - #[test_case(StorageAccess::File, true, true)] - fn test_serialize_bank_snapshot( - storage_access: StorageAccess, - has_incremental_snapshot_persistence: bool, - has_epoch_accounts_hash: bool, - ) { - let (mut genesis_config, _) = create_genesis_config(500); - genesis_config.epoch_schedule = EpochSchedule::custom(400, 400, false); - let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let deposit_amount = bank0.get_minimum_balance_for_rent_exemption(0); - let eah_start_slot = epoch_accounts_hash_utils::calculation_start(&bank0); - let bank1 = Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1); - - // Create an account on a non-root fork - let key1 = Pubkey::new_unique(); - bank_test_utils::deposit(&bank1, &key1, deposit_amount).unwrap(); - - // If setting an initial EAH, then the bank being snapshotted must be in the EAH calculation - // window. Otherwise serializing below will *not* include the EAH in the bank snapshot, - // and the later-deserialized bank's EAH will not match the expected EAH. - let bank2_slot = if has_epoch_accounts_hash { - eah_start_slot - } else { - 0 - } + 2; - let mut bank2 = Bank::new_from_parent(bank0, &Pubkey::default(), bank2_slot); - - // Test new account - let key2 = Pubkey::new_unique(); - bank_test_utils::deposit(&bank2, &key2, deposit_amount).unwrap(); - assert_eq!(bank2.get_balance(&key2), deposit_amount); - - let key3 = Pubkey::new_unique(); - bank_test_utils::deposit(&bank2, &key3, 0).unwrap(); - - let accounts_db = &bank2.rc.accounts.accounts_db; - - bank2.squash(); - bank2.force_flush_accounts_cache(); - let expected_accounts_hash = AccountsHash(Hash::new_unique()); - accounts_db.set_accounts_hash(bank2_slot, (expected_accounts_hash, 30)); - - let expected_incremental_snapshot_persistence = - has_incremental_snapshot_persistence.then(|| BankIncrementalSnapshotPersistence { - full_slot: bank2_slot - 1, - full_hash: SerdeAccountsHash(Hash::new_unique()), - full_capitalization: 31, - incremental_hash: SerdeIncrementalAccountsHash(Hash::new_unique()), - incremental_capitalization: 32, - }); - - let expected_epoch_accounts_hash = has_epoch_accounts_hash.then(|| { - let epoch_accounts_hash = EpochAccountsHash::new(Hash::new_unique()); - accounts_db - .epoch_accounts_hash_manager - .set_valid(epoch_accounts_hash, eah_start_slot); - epoch_accounts_hash - }); - - // Only if a bank was recently recreated from a snapshot will it have an epoch stakes entry - // of type "delegations" which cannot be serialized into the versioned epoch stakes map. Simulate - // this condition by replacing the epoch 0 stakes map of stake accounts with an epoch stakes map - // of delegations. - { - assert_eq!(bank2.epoch_stakes.len(), 2); - assert!(bank2 - .epoch_stakes - .values() - .all(|epoch_stakes| matches!(epoch_stakes.stakes(), &StakesEnum::Accounts(_)))); - - let StakesEnum::Accounts(stake_accounts) = - bank2.epoch_stakes.remove(&0).unwrap().stakes().clone() - else { - panic!("expected the epoch 0 stakes entry to have stake accounts"); - }; - - bank2.epoch_stakes.insert( - 0, - EpochStakes::new(Arc::new(StakesEnum::Delegations(stake_accounts.into())), 0), + #[test] + fn test_serialize_bank_snapshot() { + let storage_access_iter = [StorageAccess::Mmap, StorageAccess::File].into_iter(); + let has_incremental_snapshot_persistence_iter = [false, true].into_iter(); + let has_epoch_accounts_hash_iter = [false, true].into_iter(); + + for (storage_access, has_incremental_snapshot_persistence, has_epoch_accounts_hash) in itertools::iproduct!( + storage_access_iter, + has_incremental_snapshot_persistence_iter, + has_epoch_accounts_hash_iter + ) { + do_serialize_bank_snapshot( + storage_access, + has_incremental_snapshot_persistence, + has_epoch_accounts_hash, ); } - let mut buf = Vec::new(); - let cursor = Cursor::new(&mut buf); - let mut writer = BufWriter::new(cursor); - { - let mut bank_fields = bank2.get_fields_to_serialize(); - // Ensure that epoch_stakes and versioned_epoch_stakes are each - // serialized with at least one entry to verify that epoch stakes - // entries are combined correctly during deserialization - assert!(!bank_fields.epoch_stakes.is_empty()); - assert!(!bank_fields.versioned_epoch_stakes.is_empty()); - - let versioned_epoch_stakes = mem::take(&mut bank_fields.versioned_epoch_stakes); - serde_snapshot::serialize_bank_snapshot_into( - &mut writer, - bank_fields, - accounts_db.get_bank_hash_stats(bank2_slot).unwrap(), - accounts_db.get_accounts_delta_hash(bank2_slot).unwrap(), - expected_accounts_hash, - &get_storages_to_serialize(&bank2.get_snapshot_storages(None)), - ExtraFieldsToSerialize { - lamports_per_signature: bank2.fee_rate_governor.lamports_per_signature, - incremental_snapshot_persistence: expected_incremental_snapshot_persistence - .as_ref(), - epoch_accounts_hash: expected_epoch_accounts_hash, - versioned_epoch_stakes, - }, - accounts_db.write_version.load(Ordering::Acquire), + fn do_serialize_bank_snapshot( + storage_access: StorageAccess, + has_incremental_snapshot_persistence: bool, + has_epoch_accounts_hash: bool, + ) { + let (mut genesis_config, _) = create_genesis_config(500); + genesis_config.epoch_schedule = EpochSchedule::custom(400, 400, false); + let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); + let deposit_amount = bank0.get_minimum_balance_for_rent_exemption(0); + let eah_start_slot = epoch_accounts_hash_utils::calculation_start(&bank0); + let bank1 = Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 1); + + // Create an account on a non-root fork + let key1 = Pubkey::new_unique(); + bank_test_utils::deposit(&bank1, &key1, deposit_amount).unwrap(); + + // If setting an initial EAH, then the bank being snapshotted must be in the EAH calculation + // window. Otherwise serializing below will *not* include the EAH in the bank snapshot, + // and the later-deserialized bank's EAH will not match the expected EAH. + let bank2_slot = if has_epoch_accounts_hash { + eah_start_slot + } else { + 0 + } + 2; + let mut bank2 = Bank::new_from_parent(bank0, &Pubkey::default(), bank2_slot); + + // Test new account + let key2 = Pubkey::new_unique(); + bank_test_utils::deposit(&bank2, &key2, deposit_amount).unwrap(); + assert_eq!(bank2.get_balance(&key2), deposit_amount); + + let key3 = Pubkey::new_unique(); + bank_test_utils::deposit(&bank2, &key3, 0).unwrap(); + + let accounts_db = &bank2.rc.accounts.accounts_db; + + bank2.squash(); + bank2.force_flush_accounts_cache(); + let expected_accounts_hash = AccountsHash(Hash::new_unique()); + accounts_db.set_accounts_hash(bank2_slot, (expected_accounts_hash, 30)); + + let expected_incremental_snapshot_persistence = has_incremental_snapshot_persistence + .then(|| BankIncrementalSnapshotPersistence { + full_slot: bank2_slot - 1, + full_hash: SerdeAccountsHash(Hash::new_unique()), + full_capitalization: 31, + incremental_hash: SerdeIncrementalAccountsHash(Hash::new_unique()), + incremental_capitalization: 32, + }); + + let expected_epoch_accounts_hash = has_epoch_accounts_hash.then(|| { + let epoch_accounts_hash = EpochAccountsHash::new(Hash::new_unique()); + accounts_db + .epoch_accounts_hash_manager + .set_valid(epoch_accounts_hash, eah_start_slot); + epoch_accounts_hash + }); + + // Only if a bank was recently recreated from a snapshot will it have an epoch stakes entry + // of type "delegations" which cannot be serialized into the versioned epoch stakes map. Simulate + // this condition by replacing the epoch 0 stakes map of stake accounts with an epoch stakes map + // of delegations. + { + assert_eq!(bank2.epoch_stakes.len(), 2); + assert!(bank2 + .epoch_stakes + .values() + .all(|epoch_stakes| matches!(epoch_stakes.stakes(), &StakesEnum::Accounts(_)))); + + let StakesEnum::Accounts(stake_accounts) = + bank2.epoch_stakes.remove(&0).unwrap().stakes().clone() + else { + panic!("expected the epoch 0 stakes entry to have stake accounts"); + }; + + bank2.epoch_stakes.insert( + 0, + EpochStakes::new(Arc::new(StakesEnum::Delegations(stake_accounts.into())), 0), + ); + } + + let mut buf = Vec::new(); + let cursor = Cursor::new(&mut buf); + let mut writer = BufWriter::new(cursor); + { + let mut bank_fields = bank2.get_fields_to_serialize(); + // Ensure that epoch_stakes and versioned_epoch_stakes are each + // serialized with at least one entry to verify that epoch stakes + // entries are combined correctly during deserialization + assert!(!bank_fields.epoch_stakes.is_empty()); + assert!(!bank_fields.versioned_epoch_stakes.is_empty()); + + let versioned_epoch_stakes = mem::take(&mut bank_fields.versioned_epoch_stakes); + serde_snapshot::serialize_bank_snapshot_into( + &mut writer, + bank_fields, + accounts_db.get_bank_hash_stats(bank2_slot).unwrap(), + accounts_db.get_accounts_delta_hash(bank2_slot).unwrap(), + expected_accounts_hash, + &get_storages_to_serialize(&bank2.get_snapshot_storages(None)), + ExtraFieldsToSerialize { + lamports_per_signature: bank2.fee_rate_governor.lamports_per_signature, + incremental_snapshot_persistence: expected_incremental_snapshot_persistence + .as_ref(), + epoch_accounts_hash: expected_epoch_accounts_hash, + versioned_epoch_stakes, + }, + accounts_db.write_version.load(Ordering::Acquire), + ) + .unwrap(); + } + drop(writer); + + // Now deserialize the serialized bank and ensure it matches the original bank + + // Create a new set of directories for this bank's accounts + let (_accounts_dir, dbank_paths) = get_temp_accounts_paths(4).unwrap(); + // Create a directory to simulate AppendVecs unpackaged from a snapshot tar + let copied_accounts = TempDir::new().unwrap(); + let storage_and_next_append_vec_id = + copy_append_vecs(accounts_db, copied_accounts.path(), storage_access).unwrap(); + + let cursor = Cursor::new(buf.as_slice()); + let mut reader = BufReader::new(cursor); + let mut snapshot_streams = SnapshotStreams { + full_snapshot_stream: &mut reader, + incremental_snapshot_stream: None, + }; + let dbank = serde_snapshot::bank_from_streams( + &mut snapshot_streams, + &dbank_paths, + storage_and_next_append_vec_id, + &genesis_config, + &RuntimeConfig::default(), + None, + None, + AccountSecondaryIndexes::default(), + None, + AccountShrinkThreshold::default(), + false, + Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + None, + Arc::default(), ) .unwrap(); - } - drop(writer); - - // Now deserialize the serialized bank and ensure it matches the original bank - - // Create a new set of directories for this bank's accounts - let (_accounts_dir, dbank_paths) = get_temp_accounts_paths(4).unwrap(); - // Create a directory to simulate AppendVecs unpackaged from a snapshot tar - let copied_accounts = TempDir::new().unwrap(); - let storage_and_next_append_vec_id = - copy_append_vecs(accounts_db, copied_accounts.path(), storage_access).unwrap(); - - let cursor = Cursor::new(buf.as_slice()); - let mut reader = BufReader::new(cursor); - let mut snapshot_streams = SnapshotStreams { - full_snapshot_stream: &mut reader, - incremental_snapshot_stream: None, - }; - let dbank = serde_snapshot::bank_from_streams( - &mut snapshot_streams, - &dbank_paths, - storage_and_next_append_vec_id, - &genesis_config, - &RuntimeConfig::default(), - None, - None, - AccountSecondaryIndexes::default(), - None, - AccountShrinkThreshold::default(), - false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - Arc::default(), - ) - .unwrap(); - assert_eq!(dbank.get_balance(&key1), 0); - assert_eq!(dbank.get_balance(&key2), deposit_amount); - assert_eq!(dbank.get_balance(&key3), 0); - if let Some(incremental_snapshot_persistence) = - expected_incremental_snapshot_persistence.as_ref() - { - assert_eq!(dbank.get_accounts_hash(), None); + assert_eq!(dbank.get_balance(&key1), 0); + assert_eq!(dbank.get_balance(&key2), deposit_amount); + assert_eq!(dbank.get_balance(&key3), 0); + if let Some(incremental_snapshot_persistence) = + expected_incremental_snapshot_persistence.as_ref() + { + assert_eq!(dbank.get_accounts_hash(), None); + assert_eq!( + dbank.get_incremental_accounts_hash(), + Some( + incremental_snapshot_persistence + .incremental_hash + .clone() + .into() + ), + ); + } else { + assert_eq!(dbank.get_accounts_hash(), Some(expected_accounts_hash)); + assert_eq!(dbank.get_incremental_accounts_hash(), None); + } assert_eq!( - dbank.get_incremental_accounts_hash(), - Some( - incremental_snapshot_persistence - .incremental_hash - .clone() - .into() - ), + dbank.get_epoch_accounts_hash_to_serialize(), + expected_epoch_accounts_hash, ); - } else { - assert_eq!(dbank.get_accounts_hash(), Some(expected_accounts_hash)); - assert_eq!(dbank.get_incremental_accounts_hash(), None); - } - assert_eq!( - dbank.get_epoch_accounts_hash_to_serialize(), - expected_epoch_accounts_hash, - ); - assert_eq!(dbank, bank2); + assert_eq!(dbank, bank2); + } } fn add_root_and_flush_write_cache(bank: &Bank) { From 8a78fdd118b8a15e4025388f21a848fd9188687d Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 7 Oct 2024 11:06:48 -0400 Subject: [PATCH 447/529] Adds "slot" to bank-hash_internal_state datapoint (#3089) --- runtime/src/bank.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e6ad2bc0474025..f881c548c89f1d 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5475,6 +5475,7 @@ impl Bank { let total_us = measure_total.end_as_us(); datapoint_info!( "bank-hash_internal_state", + ("slot", slot, i64), ("total_us", total_us, i64), ("accounts_delta_hash_us", accounts_delta_hash_us, i64), ); From 78595b8c308b62135a10f6e356117cd88e8684e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 00:57:28 +0800 Subject: [PATCH 448/529] build(deps): bump futures from 0.3.30 to 0.3.31 (#3090) * build(deps): bump futures from 0.3.30 to 0.3.31 Bumps [futures](https://github.com/rust-lang/futures-rs) from 0.3.30 to 0.3.31. - [Release notes](https://github.com/rust-lang/futures-rs/releases) - [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.30...0.3.31) --- updated-dependencies: - dependency-name: futures dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 90 ++++++++++++++++++++--------------------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 80 ++++++++++++++++++------------------ 3 files changed, 86 insertions(+), 86 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 40a271eb1a6c78..a32cce3bd15811 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,7 +165,7 @@ dependencies = [ "crossbeam-channel", "csv", "dashmap", - "futures 0.3.30", + "futures 0.3.31", "histogram", "itertools 0.12.1", "log", @@ -2371,9 +2371,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -2386,9 +2386,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -2396,15 +2396,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -2414,15 +2414,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -2431,15 +2431,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -2449,9 +2449,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2607,7 +2607,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.30", + "futures 0.3.31", "log", "reqwest", "serde", @@ -2627,7 +2627,7 @@ checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" dependencies = [ "cfg-if 1.0.0", "dashmap", - "futures 0.3.30", + "futures 0.3.31", "futures-timer", "no-std-compat", "nonzero_ext", @@ -2888,7 +2888,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.30", + "futures 0.3.31", "headers", "http", "hyper", @@ -3174,7 +3174,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -3192,7 +3192,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "futures-executor", "futures-util", "log", @@ -3207,7 +3207,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-client-transports", ] @@ -3229,7 +3229,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -3245,7 +3245,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -3260,7 +3260,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-core", "lazy_static", "log", @@ -3276,7 +3276,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.30", + "futures 0.3.31", "globset", "jsonrpc-core", "lazy_static", @@ -4010,7 +4010,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "libc", "log", "rand 0.7.3", @@ -5373,7 +5373,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" dependencies = [ "dashmap", - "futures 0.3.30", + "futures 0.3.31", "lazy_static", "log", "parking_lot 0.12.3", @@ -5618,7 +5618,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.30", + "futures 0.3.31", "httparse", "log", "rand 0.8.5", @@ -5833,7 +5833,7 @@ name = "solana-banks-client" version = "2.1.0" dependencies = [ "borsh 1.5.1", - "futures 0.3.30", + "futures 0.3.31", "solana-banks-interface", "solana-banks-server", "solana-program", @@ -5861,7 +5861,7 @@ version = "2.1.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.30", + "futures 0.3.31", "solana-banks-interface", "solana-client", "solana-feature-set", @@ -6228,7 +6228,7 @@ dependencies = [ "bincode", "crossbeam-channel", "dashmap", - "futures 0.3.30", + "futures 0.3.31", "futures-util", "indexmap 2.6.0", "indicatif", @@ -6361,7 +6361,7 @@ dependencies = [ "dashmap", "etcd-client", "fs_extra", - "futures 0.3.30", + "futures 0.3.31", "histogram", "itertools 0.12.1", "lazy_static", @@ -6856,7 +6856,7 @@ dependencies = [ "dashmap", "eager", "fs_extra", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.12.1", "lazy-lru", "lazy_static", @@ -7429,7 +7429,7 @@ dependencies = [ "async-lock", "async-trait", "crossbeam-channel", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.12.1", "lazy_static", "log", @@ -7562,7 +7562,7 @@ dependencies = [ "bincode", "bs58", "crossbeam-channel", - "futures 0.3.30", + "futures 0.3.31", "indicatif", "jsonrpc-core", "jsonrpc-http-server", @@ -7611,7 +7611,7 @@ version = "2.1.0" dependencies = [ "anyhow", "clap 2.33.3", - "futures 0.3.30", + "futures 0.3.31", "serde_json", "solana-account-decoder", "solana-clap-utils", @@ -8002,7 +8002,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.30", + "futures 0.3.31", "goauth", "http", "hyper", @@ -8049,7 +8049,7 @@ dependencies = [ "bytes", "crossbeam-channel", "dashmap", - "futures 0.3.30", + "futures 0.3.31", "futures-util", "governor", "histogram", @@ -8425,7 +8425,7 @@ dependencies = [ "bincode", "bytes", "crossbeam-channel", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.12.1", "lazy-lru", "log", @@ -8462,7 +8462,7 @@ dependencies = [ name = "solana-type-overrides" version = "2.1.0" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "lazy_static", "rand 0.8.5", "shuttle", @@ -9177,7 +9177,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.30", + "futures 0.3.31", "humantime", "opentelemetry", "pin-project", diff --git a/Cargo.toml b/Cargo.toml index a61b7aacad85fc..d52b8345316e67 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -259,7 +259,7 @@ fd-lock = "3.0.13" flate2 = "1.0.31" fnv = "1.0.7" fs_extra = "1.3.0" -futures = "0.3.30" +futures = "0.3.31" futures-util = "0.3.29" gag = "1.0.0" generic-array = { version = "0.14.7", default-features = false } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index bb2021ea6c2477..c1f2fc835e5c05 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1793,9 +1793,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1808,9 +1808,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1818,15 +1818,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1836,15 +1836,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -1853,15 +1853,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1871,9 +1871,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1966,7 +1966,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.30", + "futures 0.3.31", "log", "reqwest", "serde", @@ -1986,7 +1986,7 @@ checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" dependencies = [ "cfg-if 1.0.0", "dashmap", - "futures 0.3.30", + "futures 0.3.31", "futures-timer", "no-std-compat", "nonzero_ext", @@ -2212,7 +2212,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.30", + "futures 0.3.31", "headers", "http", "hyper", @@ -2487,7 +2487,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2505,7 +2505,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "futures-executor", "futures-util", "log", @@ -2520,7 +2520,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-client-transports", ] @@ -2542,7 +2542,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2558,7 +2558,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2573,7 +2573,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "jsonrpc-core", "lazy_static", "log", @@ -2589,7 +2589,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.30", + "futures 0.3.31", "globset", "jsonrpc-core", "lazy_static", @@ -3339,7 +3339,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "libc", "log", "rand 0.7.3", @@ -4681,7 +4681,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.30", + "futures 0.3.31", "httparse", "log", "rand 0.8.5", @@ -4795,7 +4795,7 @@ name = "solana-banks-client" version = "2.1.0" dependencies = [ "borsh 1.5.1", - "futures 0.3.30", + "futures 0.3.31", "solana-banks-interface", "solana-program", "solana-sdk", @@ -4821,7 +4821,7 @@ version = "2.1.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.30", + "futures 0.3.31", "solana-banks-interface", "solana-client", "solana-feature-set", @@ -4983,7 +4983,7 @@ dependencies = [ "async-trait", "bincode", "dashmap", - "futures 0.3.30", + "futures 0.3.31", "futures-util", "indexmap 2.6.0", "indicatif", @@ -5078,7 +5078,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "etcd-client", - "futures 0.3.30", + "futures 0.3.31", "histogram", "itertools 0.12.1", "lazy_static", @@ -5414,7 +5414,7 @@ dependencies = [ "dashmap", "eager", "fs_extra", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.12.1", "lazy-lru", "lazy_static", @@ -5811,7 +5811,7 @@ version = "2.1.0" dependencies = [ "async-lock", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.12.1", "lazy_static", "log", @@ -6723,7 +6723,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.30", + "futures 0.3.31", "goauth", "http", "hyper", @@ -6768,7 +6768,7 @@ dependencies = [ "bytes", "crossbeam-channel", "dashmap", - "futures 0.3.30", + "futures 0.3.31", "futures-util", "governor", "histogram", @@ -6989,7 +6989,7 @@ dependencies = [ "bincode", "bytes", "crossbeam-channel", - "futures 0.3.30", + "futures 0.3.31", "itertools 0.12.1", "lazy-lru", "log", @@ -7643,7 +7643,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.30", + "futures 0.3.31", "humantime", "opentelemetry", "pin-project", From 2b3f1ff9ff1b686e548fb23d2c070099d814862b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 00:58:11 +0800 Subject: [PATCH 449/529] build(deps): bump serde_with from 3.9.0 to 3.11.0 (#3092) * build(deps): bump serde_with from 3.9.0 to 3.11.0 Bumps [serde_with](https://github.com/jonasbb/serde_with) from 3.9.0 to 3.11.0. - [Release notes](https://github.com/jonasbb/serde_with/releases) - [Commits](https://github.com/jonasbb/serde_with/compare/v3.9.0...v3.11.0) --- updated-dependencies: - dependency-name: serde_with dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a32cce3bd15811..2587a2ac9494a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5320,9 +5320,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ "serde", "serde_derive", @@ -5331,9 +5331,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index d52b8345316e67..348426124459b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -352,7 +352,7 @@ serde = "1.0.210" # must match the serde_derive version, see https://github.com/ serde_bytes = "0.11.15" serde_derive = "1.0.210" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.128" -serde_with = { version = "3.9.0", default-features = false } +serde_with = { version = "3.11.0", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" sha2 = "0.10.8" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c1f2fc835e5c05..031715755939b8 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4462,9 +4462,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ "serde", "serde_derive", @@ -4473,9 +4473,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", From 10178d124b60f295b878b63b65c971530894d6f7 Mon Sep 17 00:00:00 2001 From: dmakarov Date: Mon, 7 Oct 2024 15:06:59 -0400 Subject: [PATCH 450/529] Move accounts-db stats to a separate module (#3084) * Move accounts-db stats to a separate module * Visibility * Digest --- accounts-db/src/accounts_db.rs | 754 +----------------------- accounts-db/src/accounts_db/stats.rs | 777 +++++++++++++++++++++++++ accounts-db/src/ancient_append_vecs.rs | 4 +- runtime/src/bank/serde_snapshot.rs | 4 +- runtime/src/serde_snapshot.rs | 4 +- runtime/src/snapshot_minimizer.rs | 4 +- runtime/src/snapshot_package.rs | 2 +- runtime/src/snapshot_utils.rs | 2 +- 8 files changed, 794 insertions(+), 757 deletions(-) create mode 100644 accounts-db/src/accounts_db/stats.rs diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index a038a7449ecf83..770212311becb1 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -20,6 +20,7 @@ mod geyser_plugin_utils; mod scan_account_storage; +pub mod stats; #[cfg(feature = "dev-context-only-utils")] use qualifier_attr::qualifiers; @@ -30,6 +31,10 @@ use { meta::StoredAccountMeta, AccountStorage, AccountStorageStatus, ShrinkInProgress, }, accounts_cache::{AccountsCache, CachedAccount, SlotCache}, + accounts_db::stats::{ + AccountsStats, BankHashStats, CleanAccountsStats, FlushStats, PurgeStats, + ShrinkAncientStats, ShrinkStats, ShrinkStatsSub, StoreAccountsTiming, + }, accounts_file::{ AccountsFile, AccountsFileError, AccountsFileProvider, MatchAccountOwnerError, StorageAccess, ALIGN_BOUNDARY_OFFSET, @@ -56,10 +61,7 @@ use { ancient_append_vecs::{ get_ancient_append_vec_capacity, is_ancient, AccountsToStore, StorageSelector, }, - append_vec::{ - aligned_stored_size, APPEND_VEC_MMAPPED_FILES_DIRTY, APPEND_VEC_MMAPPED_FILES_OPEN, - APPEND_VEC_OPEN_AS_FILE_IO, STORE_META_OVERHEAD, - }, + append_vec::{aligned_stored_size, STORE_META_OVERHEAD}, cache_hash_data::{CacheHashData, DeletionPolicy as CacheHashDeletionPolicy}, contains::Contains, epoch_accounts_hash::EpochAccountsHashManager, @@ -76,7 +78,6 @@ use { rand::{thread_rng, Rng}, rayon::{prelude::*, ThreadPool}, seqlock::SeqLock, - serde::{Deserialize, Serialize}, smallvec::SmallVec, solana_lattice_hash::lt_hash::LtHash, solana_measure::{measure::Measure, measure_us}, @@ -91,7 +92,6 @@ use { pubkey::Pubkey, rent_collector::RentCollector, saturating_add_assign, - timing::AtomicInterval, transaction::SanitizedTransaction, }, std::{ @@ -1282,58 +1282,6 @@ pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec, Vec(&mut self, account: &T) { - if account.is_zero_lamport() { - self.num_removed_accounts += 1; - } else { - self.num_updated_accounts += 1; - } - self.total_data_len = self - .total_data_len - .wrapping_add(account.data().len() as u64); - if account.executable() { - self.num_executable_accounts += 1; - } - self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports()); - } - - pub fn accumulate(&mut self, other: &BankHashStats) { - self.num_updated_accounts += other.num_updated_accounts; - self.num_removed_accounts += other.num_removed_accounts; - self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len); - self.num_lamports_stored = self - .num_lamports_stored - .wrapping_add(other.num_lamports_stored); - self.num_executable_accounts += other.num_executable_accounts; - } -} - -#[derive(Default, Debug)] -pub struct StoreAccountsTiming { - store_accounts_elapsed: u64, - update_index_elapsed: u64, - handle_reclaims_elapsed: u64, -} - -impl StoreAccountsTiming { - fn accumulate(&mut self, other: &Self) { - self.store_accounts_elapsed += other.store_accounts_elapsed; - self.update_index_elapsed += other.update_index_elapsed; - self.handle_reclaims_elapsed += other.handle_reclaims_elapsed; - } -} - #[derive(Default, Debug)] struct CleaningInfo { slot_list: SlotList, @@ -1517,126 +1465,6 @@ pub struct AccountsDb { pub is_experimental_accumulator_hash_enabled: AtomicBool, } -#[derive(Debug, Default)] -pub struct AccountsStats { - delta_hash_scan_time_total_us: AtomicU64, - delta_hash_accumulate_time_total_us: AtomicU64, - delta_hash_num: AtomicU64, - skipped_rewrites_num: AtomicUsize, - - last_store_report: AtomicInterval, - store_hash_accounts: AtomicU64, - calc_stored_meta: AtomicU64, - store_accounts: AtomicU64, - store_update_index: AtomicU64, - store_handle_reclaims: AtomicU64, - store_append_accounts: AtomicU64, - pub stakes_cache_check_and_store_us: AtomicU64, - store_num_accounts: AtomicU64, - store_total_data: AtomicU64, - create_store_count: AtomicU64, - store_get_slot_store: AtomicU64, - store_find_existing: AtomicU64, - dropped_stores: AtomicU64, - store_uncleaned_update: AtomicU64, - handle_dead_keys_us: AtomicU64, - purge_exact_us: AtomicU64, - purge_exact_count: AtomicU64, -} - -#[derive(Debug, Default)] -pub struct PurgeStats { - last_report: AtomicInterval, - safety_checks_elapsed: AtomicU64, - remove_cache_elapsed: AtomicU64, - remove_storage_entries_elapsed: AtomicU64, - drop_storage_entries_elapsed: AtomicU64, - num_cached_slots_removed: AtomicUsize, - num_stored_slots_removed: AtomicUsize, - total_removed_storage_entries: AtomicUsize, - total_removed_cached_bytes: AtomicU64, - total_removed_stored_bytes: AtomicU64, - scan_storages_elapsed: AtomicU64, - purge_accounts_index_elapsed: AtomicU64, - handle_reclaims_elapsed: AtomicU64, -} - -impl PurgeStats { - fn report(&self, metric_name: &'static str, report_interval_ms: Option) { - let should_report = report_interval_ms - .map(|report_interval_ms| self.last_report.should_update(report_interval_ms)) - .unwrap_or(true); - - if should_report { - datapoint_info!( - metric_name, - ( - "safety_checks_elapsed", - self.safety_checks_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "remove_cache_elapsed", - self.remove_cache_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "remove_storage_entries_elapsed", - self.remove_storage_entries_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "drop_storage_entries_elapsed", - self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "num_cached_slots_removed", - self.num_cached_slots_removed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "num_stored_slots_removed", - self.num_stored_slots_removed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "total_removed_storage_entries", - self.total_removed_storage_entries - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "total_removed_cached_bytes", - self.total_removed_cached_bytes.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "total_removed_stored_bytes", - self.total_removed_stored_bytes.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "scan_storages_elapsed", - self.scan_storages_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "purge_accounts_index_elapsed", - self.purge_accounts_index_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "handle_reclaims_elapsed", - self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ); - } - } -} - /// results from 'split_storages_ancient' #[derive(Debug, Default, PartialEq)] struct SplitAncientStorages { @@ -1825,576 +1653,6 @@ impl SplitAncientStorages { } } -#[derive(Debug, Default)] -struct FlushStats { - num_flushed: Saturating, - num_purged: Saturating, - total_size: Saturating, - store_accounts_timing: StoreAccountsTiming, - store_accounts_total_us: Saturating, -} - -impl FlushStats { - fn accumulate(&mut self, other: &Self) { - self.num_flushed += other.num_flushed; - self.num_purged += other.num_purged; - self.total_size += other.total_size; - self.store_accounts_timing - .accumulate(&other.store_accounts_timing); - self.store_accounts_total_us += other.store_accounts_total_us; - } -} - -#[derive(Debug, Default)] -struct LatestAccountsIndexRootsStats { - roots_len: AtomicUsize, - uncleaned_roots_len: AtomicUsize, - roots_range: AtomicU64, - rooted_cleaned_count: AtomicUsize, - unrooted_cleaned_count: AtomicUsize, - clean_unref_from_storage_us: AtomicU64, - clean_dead_slot_us: AtomicU64, -} - -impl LatestAccountsIndexRootsStats { - fn update(&self, accounts_index_roots_stats: &AccountsIndexRootsStats) { - if let Some(value) = accounts_index_roots_stats.roots_len { - self.roots_len.store(value, Ordering::Relaxed); - } - if let Some(value) = accounts_index_roots_stats.uncleaned_roots_len { - self.uncleaned_roots_len.store(value, Ordering::Relaxed); - } - if let Some(value) = accounts_index_roots_stats.roots_range { - self.roots_range.store(value, Ordering::Relaxed); - } - self.rooted_cleaned_count.fetch_add( - accounts_index_roots_stats.rooted_cleaned_count, - Ordering::Relaxed, - ); - self.unrooted_cleaned_count.fetch_add( - accounts_index_roots_stats.unrooted_cleaned_count, - Ordering::Relaxed, - ); - self.clean_unref_from_storage_us.fetch_add( - accounts_index_roots_stats.clean_unref_from_storage_us, - Ordering::Relaxed, - ); - self.clean_dead_slot_us.fetch_add( - accounts_index_roots_stats.clean_dead_slot_us, - Ordering::Relaxed, - ); - } - - fn report(&self) { - datapoint_info!( - "accounts_index_roots_len", - ( - "roots_len", - self.roots_len.load(Ordering::Relaxed) as i64, - i64 - ), - ( - "uncleaned_roots_len", - self.uncleaned_roots_len.load(Ordering::Relaxed) as i64, - i64 - ), - ( - "roots_range_width", - self.roots_range.load(Ordering::Relaxed) as i64, - i64 - ), - ( - "unrooted_cleaned_count", - self.unrooted_cleaned_count.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "rooted_cleaned_count", - self.rooted_cleaned_count.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "clean_unref_from_storage_us", - self.clean_unref_from_storage_us.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "clean_dead_slot_us", - self.clean_dead_slot_us.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "append_vecs_open", - APPEND_VEC_MMAPPED_FILES_OPEN.load(Ordering::Relaxed) as i64, - i64 - ), - ( - "append_vecs_dirty", - APPEND_VEC_MMAPPED_FILES_DIRTY.load(Ordering::Relaxed), - i64 - ), - ( - "append_vecs_open_as_file_io", - APPEND_VEC_OPEN_AS_FILE_IO.load(Ordering::Relaxed), - i64 - ) - ); - - // Don't need to reset since this tracks the latest updates, not a cumulative total - } -} - -#[derive(Debug, Default)] -struct CleanAccountsStats { - purge_stats: PurgeStats, - latest_accounts_index_roots_stats: LatestAccountsIndexRootsStats, - - // stats held here and reported by clean_accounts - clean_old_root_us: AtomicU64, - clean_old_root_reclaim_us: AtomicU64, - reset_uncleaned_roots_us: AtomicU64, - remove_dead_accounts_remove_us: AtomicU64, - remove_dead_accounts_shrink_us: AtomicU64, - clean_stored_dead_slots_us: AtomicU64, - uncleaned_roots_slot_list_1: AtomicU64, - get_account_sizes_us: AtomicU64, - slots_cleaned: AtomicU64, -} - -impl CleanAccountsStats { - fn report(&self) { - self.purge_stats.report("clean_purge_slots_stats", None); - self.latest_accounts_index_roots_stats.report(); - } -} - -#[derive(Debug, Default)] -pub(crate) struct ShrinkAncientStats { - pub(crate) shrink_stats: ShrinkStats, - pub(crate) ancient_append_vecs_shrunk: AtomicU64, - pub(crate) total_us: AtomicU64, - pub(crate) random_shrink: AtomicU64, - pub(crate) slots_considered: AtomicU64, - pub(crate) ancient_scanned: AtomicU64, - pub(crate) bytes_ancient_created: AtomicU64, - pub(crate) bytes_from_must_shrink: AtomicU64, - pub(crate) bytes_from_smallest_storages: AtomicU64, - pub(crate) bytes_from_newest_storages: AtomicU64, - pub(crate) many_ref_slots_skipped: AtomicU64, - pub(crate) slots_cannot_move_count: AtomicU64, - pub(crate) many_refs_old_alive: AtomicU64, - pub(crate) slots_eligible_to_shrink: AtomicU64, - pub(crate) total_dead_bytes: AtomicU64, - pub(crate) total_alive_bytes: AtomicU64, -} - -#[derive(Debug, Default)] -pub(crate) struct ShrinkStatsSub { - pub(crate) store_accounts_timing: StoreAccountsTiming, - pub(crate) rewrite_elapsed_us: Saturating, - pub(crate) create_and_insert_store_elapsed_us: Saturating, - pub(crate) unpackable_slots_count: Saturating, - pub(crate) newest_alive_packed_count: Saturating, -} - -impl ShrinkStatsSub { - pub(crate) fn accumulate(&mut self, other: &Self) { - self.store_accounts_timing - .accumulate(&other.store_accounts_timing); - self.rewrite_elapsed_us += other.rewrite_elapsed_us; - self.create_and_insert_store_elapsed_us += other.create_and_insert_store_elapsed_us; - self.unpackable_slots_count += other.unpackable_slots_count; - self.newest_alive_packed_count += other.newest_alive_packed_count; - } -} -#[derive(Debug, Default)] -pub struct ShrinkStats { - last_report: AtomicInterval, - pub(crate) num_slots_shrunk: AtomicUsize, - storage_read_elapsed: AtomicU64, - num_duplicated_accounts: AtomicU64, - index_read_elapsed: AtomicU64, - create_and_insert_store_elapsed: AtomicU64, - store_accounts_elapsed: AtomicU64, - update_index_elapsed: AtomicU64, - handle_reclaims_elapsed: AtomicU64, - remove_old_stores_shrink_us: AtomicU64, - rewrite_elapsed: AtomicU64, - unpackable_slots_count: AtomicU64, - newest_alive_packed_count: AtomicU64, - drop_storage_entries_elapsed: AtomicU64, - accounts_removed: AtomicUsize, - bytes_removed: AtomicU64, - bytes_written: AtomicU64, - skipped_shrink: AtomicU64, - dead_accounts: AtomicU64, - alive_accounts: AtomicU64, - index_scan_returned_none: AtomicU64, - index_scan_returned_some: AtomicU64, - accounts_loaded: AtomicU64, - purged_zero_lamports: AtomicU64, - accounts_not_found_in_index: AtomicU64, - num_ancient_slots_shrunk: AtomicU64, -} - -impl ShrinkStats { - fn report(&self) { - if self.last_report.should_update(1000) { - datapoint_info!( - "shrink_stats", - ( - "num_slots_shrunk", - self.num_slots_shrunk.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "index_scan_returned_none", - self.index_scan_returned_none.swap(0, Ordering::Relaxed), - i64 - ), - ( - "index_scan_returned_some", - self.index_scan_returned_some.swap(0, Ordering::Relaxed), - i64 - ), - ( - "storage_read_elapsed", - self.storage_read_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "num_duplicated_accounts", - self.num_duplicated_accounts.swap(0, Ordering::Relaxed), - i64 - ), - ( - "index_read_elapsed", - self.index_read_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "create_and_insert_store_elapsed", - self.create_and_insert_store_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "store_accounts_elapsed", - self.store_accounts_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "update_index_elapsed", - self.update_index_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "handle_reclaims_elapsed", - self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "remove_old_stores_shrink_us", - self.remove_old_stores_shrink_us.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "rewrite_elapsed", - self.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "drop_storage_entries_elapsed", - self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "accounts_removed", - self.accounts_removed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_removed", - self.bytes_removed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_written", - self.bytes_written.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "skipped_shrink", - self.skipped_shrink.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "alive_accounts", - self.alive_accounts.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "dead_accounts", - self.dead_accounts.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "accounts_loaded", - self.accounts_loaded.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "purged_zero_lamports_count", - self.purged_zero_lamports.swap(0, Ordering::Relaxed), - i64 - ), - ( - "num_ancient_slots_shrunk", - self.num_ancient_slots_shrunk.swap(0, Ordering::Relaxed), - i64 - ), - ( - "accounts_not_found_in_index", - self.accounts_not_found_in_index.swap(0, Ordering::Relaxed), - i64 - ), - ); - } - } -} - -impl ShrinkAncientStats { - pub(crate) fn report(&self) { - datapoint_info!( - "shrink_ancient_stats", - ( - "num_slots_shrunk", - self.shrink_stats - .num_slots_shrunk - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "index_scan_returned_none", - self.shrink_stats - .index_scan_returned_none - .swap(0, Ordering::Relaxed), - i64 - ), - ( - "index_scan_returned_some", - self.shrink_stats - .index_scan_returned_some - .swap(0, Ordering::Relaxed), - i64 - ), - ( - "storage_read_elapsed", - self.shrink_stats - .storage_read_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "num_duplicated_accounts", - self.shrink_stats - .num_duplicated_accounts - .swap(0, Ordering::Relaxed), - i64 - ), - ( - "index_read_elapsed", - self.shrink_stats - .index_read_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "create_and_insert_store_elapsed", - self.shrink_stats - .create_and_insert_store_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "store_accounts_elapsed", - self.shrink_stats - .store_accounts_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "update_index_elapsed", - self.shrink_stats - .update_index_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "handle_reclaims_elapsed", - self.shrink_stats - .handle_reclaims_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "remove_old_stores_shrink_us", - self.shrink_stats - .remove_old_stores_shrink_us - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "rewrite_elapsed", - self.shrink_stats.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "unpackable_slots_count", - self.shrink_stats - .unpackable_slots_count - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "newest_alive_packed_count", - self.shrink_stats - .newest_alive_packed_count - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "drop_storage_entries_elapsed", - self.shrink_stats - .drop_storage_entries_elapsed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "accounts_removed", - self.shrink_stats - .accounts_removed - .swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_removed", - self.shrink_stats.bytes_removed.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_written", - self.shrink_stats.bytes_written.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "alive_accounts", - self.shrink_stats.alive_accounts.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "dead_accounts", - self.shrink_stats.dead_accounts.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "accounts_loaded", - self.shrink_stats.accounts_loaded.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "ancient_append_vecs_shrunk", - self.ancient_append_vecs_shrunk.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "random", - self.random_shrink.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "slots_eligible_to_shrink", - self.slots_eligible_to_shrink.swap(0, Ordering::Relaxed), - i64 - ), - ( - "total_dead_bytes", - self.total_dead_bytes.swap(0, Ordering::Relaxed), - i64 - ), - ( - "total_alive_bytes", - self.total_alive_bytes.swap(0, Ordering::Relaxed), - i64 - ), - ( - "slots_considered", - self.slots_considered.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "ancient_scanned", - self.ancient_scanned.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "total_us", - self.total_us.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_ancient_created", - self.bytes_ancient_created.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_from_must_shrink", - self.bytes_from_must_shrink.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_from_smallest_storages", - self.bytes_from_smallest_storages.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "bytes_from_newest_storages", - self.bytes_from_newest_storages.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "many_ref_slots_skipped", - self.many_ref_slots_skipped.swap(0, Ordering::Relaxed), - i64 - ), - ( - "slots_cannot_move_count", - self.slots_cannot_move_count.swap(0, Ordering::Relaxed), - i64 - ), - ( - "many_refs_old_alive", - self.many_refs_old_alive.swap(0, Ordering::Relaxed), - i64 - ), - ( - "purged_zero_lamports_count", - self.shrink_stats - .purged_zero_lamports - .swap(0, Ordering::Relaxed), - i64 - ), - ( - "accounts_not_found_in_index", - self.shrink_stats - .accounts_not_found_in_index - .swap(0, Ordering::Relaxed), - i64 - ), - ); - } -} - pub fn quarter_thread_count() -> usize { std::cmp::max(2, num_cpus::get() / 4) } diff --git a/accounts-db/src/accounts_db/stats.rs b/accounts-db/src/accounts_db/stats.rs new file mode 100644 index 00000000000000..74baaf96a76826 --- /dev/null +++ b/accounts-db/src/accounts_db/stats.rs @@ -0,0 +1,777 @@ +use { + crate::{ + accounts_index::{AccountsIndexRootsStats, ZeroLamport}, + append_vec::{ + APPEND_VEC_MMAPPED_FILES_DIRTY, APPEND_VEC_MMAPPED_FILES_OPEN, + APPEND_VEC_OPEN_AS_FILE_IO, + }, + }, + serde::{Deserialize, Serialize}, + solana_sdk::{account::ReadableAccount, timing::AtomicInterval}, + std::{ + num::Saturating, + sync::atomic::{AtomicU64, AtomicUsize, Ordering}, + }, +}; + +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct BankHashStats { + pub num_updated_accounts: u64, + pub num_removed_accounts: u64, + pub num_lamports_stored: u64, + pub total_data_len: u64, + pub num_executable_accounts: u64, +} + +impl BankHashStats { + pub fn update(&mut self, account: &T) { + if account.is_zero_lamport() { + self.num_removed_accounts += 1; + } else { + self.num_updated_accounts += 1; + } + self.total_data_len = self + .total_data_len + .wrapping_add(account.data().len() as u64); + if account.executable() { + self.num_executable_accounts += 1; + } + self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports()); + } + + pub fn accumulate(&mut self, other: &BankHashStats) { + self.num_updated_accounts += other.num_updated_accounts; + self.num_removed_accounts += other.num_removed_accounts; + self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len); + self.num_lamports_stored = self + .num_lamports_stored + .wrapping_add(other.num_lamports_stored); + self.num_executable_accounts += other.num_executable_accounts; + } +} + +#[derive(Debug, Default)] +pub struct AccountsStats { + pub delta_hash_scan_time_total_us: AtomicU64, + pub delta_hash_accumulate_time_total_us: AtomicU64, + pub delta_hash_num: AtomicU64, + pub skipped_rewrites_num: AtomicUsize, + + pub last_store_report: AtomicInterval, + pub store_hash_accounts: AtomicU64, + pub calc_stored_meta: AtomicU64, + pub store_accounts: AtomicU64, + pub store_update_index: AtomicU64, + pub store_handle_reclaims: AtomicU64, + pub store_append_accounts: AtomicU64, + pub stakes_cache_check_and_store_us: AtomicU64, + pub store_num_accounts: AtomicU64, + pub store_total_data: AtomicU64, + pub create_store_count: AtomicU64, + pub store_get_slot_store: AtomicU64, + pub store_find_existing: AtomicU64, + pub dropped_stores: AtomicU64, + pub store_uncleaned_update: AtomicU64, + pub handle_dead_keys_us: AtomicU64, + pub purge_exact_us: AtomicU64, + pub purge_exact_count: AtomicU64, +} + +#[derive(Debug, Default)] +pub struct PurgeStats { + pub last_report: AtomicInterval, + pub safety_checks_elapsed: AtomicU64, + pub remove_cache_elapsed: AtomicU64, + pub remove_storage_entries_elapsed: AtomicU64, + pub drop_storage_entries_elapsed: AtomicU64, + pub num_cached_slots_removed: AtomicUsize, + pub num_stored_slots_removed: AtomicUsize, + pub total_removed_storage_entries: AtomicUsize, + pub total_removed_cached_bytes: AtomicU64, + pub total_removed_stored_bytes: AtomicU64, + pub scan_storages_elapsed: AtomicU64, + pub purge_accounts_index_elapsed: AtomicU64, + pub handle_reclaims_elapsed: AtomicU64, +} + +impl PurgeStats { + pub fn report(&self, metric_name: &'static str, report_interval_ms: Option) { + let should_report = report_interval_ms + .map(|report_interval_ms| self.last_report.should_update(report_interval_ms)) + .unwrap_or(true); + + if should_report { + datapoint_info!( + metric_name, + ( + "safety_checks_elapsed", + self.safety_checks_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "remove_cache_elapsed", + self.remove_cache_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "remove_storage_entries_elapsed", + self.remove_storage_entries_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "drop_storage_entries_elapsed", + self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "num_cached_slots_removed", + self.num_cached_slots_removed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "num_stored_slots_removed", + self.num_stored_slots_removed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "total_removed_storage_entries", + self.total_removed_storage_entries + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "total_removed_cached_bytes", + self.total_removed_cached_bytes.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "total_removed_stored_bytes", + self.total_removed_stored_bytes.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "scan_storages_elapsed", + self.scan_storages_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "purge_accounts_index_elapsed", + self.purge_accounts_index_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "handle_reclaims_elapsed", + self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ); + } + } +} + +#[derive(Default, Debug)] +pub struct StoreAccountsTiming { + pub store_accounts_elapsed: u64, + pub update_index_elapsed: u64, + pub handle_reclaims_elapsed: u64, +} + +impl StoreAccountsTiming { + pub fn accumulate(&mut self, other: &Self) { + self.store_accounts_elapsed += other.store_accounts_elapsed; + self.update_index_elapsed += other.update_index_elapsed; + self.handle_reclaims_elapsed += other.handle_reclaims_elapsed; + } +} + +#[derive(Debug, Default)] +pub struct FlushStats { + pub num_flushed: Saturating, + pub num_purged: Saturating, + pub total_size: Saturating, + pub store_accounts_timing: StoreAccountsTiming, + pub store_accounts_total_us: Saturating, +} + +impl FlushStats { + pub fn accumulate(&mut self, other: &Self) { + self.num_flushed += other.num_flushed; + self.num_purged += other.num_purged; + self.total_size += other.total_size; + self.store_accounts_timing + .accumulate(&other.store_accounts_timing); + self.store_accounts_total_us += other.store_accounts_total_us; + } +} + +#[derive(Debug, Default)] +pub struct LatestAccountsIndexRootsStats { + pub roots_len: AtomicUsize, + pub uncleaned_roots_len: AtomicUsize, + pub roots_range: AtomicU64, + pub rooted_cleaned_count: AtomicUsize, + pub unrooted_cleaned_count: AtomicUsize, + pub clean_unref_from_storage_us: AtomicU64, + pub clean_dead_slot_us: AtomicU64, +} + +impl LatestAccountsIndexRootsStats { + pub fn update(&self, accounts_index_roots_stats: &AccountsIndexRootsStats) { + if let Some(value) = accounts_index_roots_stats.roots_len { + self.roots_len.store(value, Ordering::Relaxed); + } + if let Some(value) = accounts_index_roots_stats.uncleaned_roots_len { + self.uncleaned_roots_len.store(value, Ordering::Relaxed); + } + if let Some(value) = accounts_index_roots_stats.roots_range { + self.roots_range.store(value, Ordering::Relaxed); + } + self.rooted_cleaned_count.fetch_add( + accounts_index_roots_stats.rooted_cleaned_count, + Ordering::Relaxed, + ); + self.unrooted_cleaned_count.fetch_add( + accounts_index_roots_stats.unrooted_cleaned_count, + Ordering::Relaxed, + ); + self.clean_unref_from_storage_us.fetch_add( + accounts_index_roots_stats.clean_unref_from_storage_us, + Ordering::Relaxed, + ); + self.clean_dead_slot_us.fetch_add( + accounts_index_roots_stats.clean_dead_slot_us, + Ordering::Relaxed, + ); + } + + pub fn report(&self) { + datapoint_info!( + "accounts_index_roots_len", + ( + "roots_len", + self.roots_len.load(Ordering::Relaxed) as i64, + i64 + ), + ( + "uncleaned_roots_len", + self.uncleaned_roots_len.load(Ordering::Relaxed) as i64, + i64 + ), + ( + "roots_range_width", + self.roots_range.load(Ordering::Relaxed) as i64, + i64 + ), + ( + "unrooted_cleaned_count", + self.unrooted_cleaned_count.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "rooted_cleaned_count", + self.rooted_cleaned_count.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "clean_unref_from_storage_us", + self.clean_unref_from_storage_us.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "clean_dead_slot_us", + self.clean_dead_slot_us.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "append_vecs_open", + APPEND_VEC_MMAPPED_FILES_OPEN.load(Ordering::Relaxed) as i64, + i64 + ), + ( + "append_vecs_dirty", + APPEND_VEC_MMAPPED_FILES_DIRTY.load(Ordering::Relaxed), + i64 + ), + ( + "append_vecs_open_as_file_io", + APPEND_VEC_OPEN_AS_FILE_IO.load(Ordering::Relaxed), + i64 + ) + ); + + // Don't need to reset since this tracks the latest updates, not a cumulative total + } +} + +#[derive(Debug, Default)] +pub struct CleanAccountsStats { + pub purge_stats: PurgeStats, + pub latest_accounts_index_roots_stats: LatestAccountsIndexRootsStats, + + // stats held here and reported by clean_accounts + pub clean_old_root_us: AtomicU64, + pub clean_old_root_reclaim_us: AtomicU64, + pub reset_uncleaned_roots_us: AtomicU64, + pub remove_dead_accounts_remove_us: AtomicU64, + pub remove_dead_accounts_shrink_us: AtomicU64, + pub clean_stored_dead_slots_us: AtomicU64, + pub uncleaned_roots_slot_list_1: AtomicU64, + pub get_account_sizes_us: AtomicU64, + pub slots_cleaned: AtomicU64, +} + +impl CleanAccountsStats { + pub fn report(&self) { + self.purge_stats.report("clean_purge_slots_stats", None); + self.latest_accounts_index_roots_stats.report(); + } +} + +#[derive(Debug, Default)] +pub struct ShrinkAncientStats { + pub shrink_stats: ShrinkStats, + pub ancient_append_vecs_shrunk: AtomicU64, + pub total_us: AtomicU64, + pub random_shrink: AtomicU64, + pub slots_considered: AtomicU64, + pub ancient_scanned: AtomicU64, + pub bytes_ancient_created: AtomicU64, + pub bytes_from_must_shrink: AtomicU64, + pub bytes_from_smallest_storages: AtomicU64, + pub bytes_from_newest_storages: AtomicU64, + pub many_ref_slots_skipped: AtomicU64, + pub slots_cannot_move_count: AtomicU64, + pub many_refs_old_alive: AtomicU64, + pub slots_eligible_to_shrink: AtomicU64, + pub total_dead_bytes: AtomicU64, + pub total_alive_bytes: AtomicU64, +} + +#[derive(Debug, Default)] +pub struct ShrinkStatsSub { + pub store_accounts_timing: StoreAccountsTiming, + pub rewrite_elapsed_us: Saturating, + pub create_and_insert_store_elapsed_us: Saturating, + pub unpackable_slots_count: Saturating, + pub newest_alive_packed_count: Saturating, +} + +impl ShrinkStatsSub { + pub fn accumulate(&mut self, other: &Self) { + self.store_accounts_timing + .accumulate(&other.store_accounts_timing); + self.rewrite_elapsed_us += other.rewrite_elapsed_us; + self.create_and_insert_store_elapsed_us += other.create_and_insert_store_elapsed_us; + self.unpackable_slots_count += other.unpackable_slots_count; + self.newest_alive_packed_count += other.newest_alive_packed_count; + } +} +#[derive(Debug, Default)] +pub struct ShrinkStats { + pub last_report: AtomicInterval, + pub num_slots_shrunk: AtomicUsize, + pub storage_read_elapsed: AtomicU64, + pub num_duplicated_accounts: AtomicU64, + pub index_read_elapsed: AtomicU64, + pub create_and_insert_store_elapsed: AtomicU64, + pub store_accounts_elapsed: AtomicU64, + pub update_index_elapsed: AtomicU64, + pub handle_reclaims_elapsed: AtomicU64, + pub remove_old_stores_shrink_us: AtomicU64, + pub rewrite_elapsed: AtomicU64, + pub unpackable_slots_count: AtomicU64, + pub newest_alive_packed_count: AtomicU64, + pub drop_storage_entries_elapsed: AtomicU64, + pub accounts_removed: AtomicUsize, + pub bytes_removed: AtomicU64, + pub bytes_written: AtomicU64, + pub skipped_shrink: AtomicU64, + pub dead_accounts: AtomicU64, + pub alive_accounts: AtomicU64, + pub index_scan_returned_none: AtomicU64, + pub index_scan_returned_some: AtomicU64, + pub accounts_loaded: AtomicU64, + pub initial_candidates_count: AtomicU64, + pub purged_zero_lamports: AtomicU64, + pub accounts_not_found_in_index: AtomicU64, + pub num_ancient_slots_shrunk: AtomicU64, + pub ancient_slots_added_to_shrink: AtomicU64, + pub ancient_bytes_added_to_shrink: AtomicU64, +} + +impl ShrinkStats { + pub fn report(&self) { + if self.last_report.should_update(1000) { + datapoint_info!( + "shrink_stats", + ( + "ancient_slots_added_to_shrink", + self.ancient_slots_added_to_shrink + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "ancient_bytes_added_to_shrink", + self.ancient_bytes_added_to_shrink + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "num_slots_shrunk", + self.num_slots_shrunk.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "index_scan_returned_none", + self.index_scan_returned_none.swap(0, Ordering::Relaxed), + i64 + ), + ( + "index_scan_returned_some", + self.index_scan_returned_some.swap(0, Ordering::Relaxed), + i64 + ), + ( + "storage_read_elapsed", + self.storage_read_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "num_duplicated_accounts", + self.num_duplicated_accounts.swap(0, Ordering::Relaxed), + i64 + ), + ( + "index_read_elapsed", + self.index_read_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "create_and_insert_store_elapsed", + self.create_and_insert_store_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "store_accounts_elapsed", + self.store_accounts_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "update_index_elapsed", + self.update_index_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "handle_reclaims_elapsed", + self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "remove_old_stores_shrink_us", + self.remove_old_stores_shrink_us.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "rewrite_elapsed", + self.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "drop_storage_entries_elapsed", + self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "accounts_removed", + self.accounts_removed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_removed", + self.bytes_removed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_written", + self.bytes_written.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "skipped_shrink", + self.skipped_shrink.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "alive_accounts", + self.alive_accounts.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "dead_accounts", + self.dead_accounts.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "accounts_loaded", + self.accounts_loaded.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "purged_zero_lamports_count", + self.purged_zero_lamports.swap(0, Ordering::Relaxed), + i64 + ), + ( + "num_ancient_slots_shrunk", + self.num_ancient_slots_shrunk.swap(0, Ordering::Relaxed), + i64 + ), + ( + "accounts_not_found_in_index", + self.accounts_not_found_in_index.swap(0, Ordering::Relaxed), + i64 + ), + ( + "initial_candidates_count", + self.initial_candidates_count.swap(0, Ordering::Relaxed), + i64 + ), + ); + } + } +} + +impl ShrinkAncientStats { + pub fn report(&self) { + datapoint_info!( + "shrink_ancient_stats", + ( + "num_slots_shrunk", + self.shrink_stats + .num_slots_shrunk + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "index_scan_returned_none", + self.shrink_stats + .index_scan_returned_none + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "index_scan_returned_some", + self.shrink_stats + .index_scan_returned_some + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "storage_read_elapsed", + self.shrink_stats + .storage_read_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "num_duplicated_accounts", + self.shrink_stats + .num_duplicated_accounts + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "index_read_elapsed", + self.shrink_stats + .index_read_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "create_and_insert_store_elapsed", + self.shrink_stats + .create_and_insert_store_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "store_accounts_elapsed", + self.shrink_stats + .store_accounts_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "update_index_elapsed", + self.shrink_stats + .update_index_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "handle_reclaims_elapsed", + self.shrink_stats + .handle_reclaims_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "remove_old_stores_shrink_us", + self.shrink_stats + .remove_old_stores_shrink_us + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "rewrite_elapsed", + self.shrink_stats.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "unpackable_slots_count", + self.shrink_stats + .unpackable_slots_count + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "newest_alive_packed_count", + self.shrink_stats + .newest_alive_packed_count + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "drop_storage_entries_elapsed", + self.shrink_stats + .drop_storage_entries_elapsed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "accounts_removed", + self.shrink_stats + .accounts_removed + .swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_removed", + self.shrink_stats.bytes_removed.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_written", + self.shrink_stats.bytes_written.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "alive_accounts", + self.shrink_stats.alive_accounts.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "dead_accounts", + self.shrink_stats.dead_accounts.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "accounts_loaded", + self.shrink_stats.accounts_loaded.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "ancient_append_vecs_shrunk", + self.ancient_append_vecs_shrunk.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "random", + self.random_shrink.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "slots_eligible_to_shrink", + self.slots_eligible_to_shrink.swap(0, Ordering::Relaxed), + i64 + ), + ( + "total_dead_bytes", + self.total_dead_bytes.swap(0, Ordering::Relaxed), + i64 + ), + ( + "total_alive_bytes", + self.total_alive_bytes.swap(0, Ordering::Relaxed), + i64 + ), + ( + "slots_considered", + self.slots_considered.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "ancient_scanned", + self.ancient_scanned.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "total_us", + self.total_us.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_ancient_created", + self.bytes_ancient_created.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_from_must_shrink", + self.bytes_from_must_shrink.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_from_smallest_storages", + self.bytes_from_smallest_storages.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "bytes_from_newest_storages", + self.bytes_from_newest_storages.swap(0, Ordering::Relaxed) as i64, + i64 + ), + ( + "many_ref_slots_skipped", + self.many_ref_slots_skipped.swap(0, Ordering::Relaxed), + i64 + ), + ( + "slots_cannot_move_count", + self.slots_cannot_move_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "many_refs_old_alive", + self.many_refs_old_alive.swap(0, Ordering::Relaxed), + i64 + ), + ( + "purged_zero_lamports_count", + self.shrink_stats + .purged_zero_lamports + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "accounts_not_found_in_index", + self.shrink_stats + .accounts_not_found_in_index + .swap(0, Ordering::Relaxed), + i64 + ), + ); + } +} diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 68d4f0b365e9fd..9c788c8e668917 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -8,9 +8,9 @@ use { crate::{ account_storage::ShrinkInProgress, accounts_db::{ + stats::{ShrinkAncientStats, ShrinkStatsSub}, AccountFromStorage, AccountStorageEntry, AccountsDb, AliveAccounts, - GetUniqueAccountsResult, ShrinkAncientStats, ShrinkCollect, - ShrinkCollectAliveSeparatedByRefs, ShrinkStatsSub, + GetUniqueAccountsResult, ShrinkCollect, ShrinkCollectAliveSeparatedByRefs, }, accounts_file::AccountsFile, active_stats::ActiveStatItem, diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 3f348e4e7a7d24..eeacfb3f556015 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -518,7 +518,7 @@ mod tests { use { super::*, solana_accounts_db::{ - account_storage::meta::StoredMetaWriteVersion, accounts_db::BankHashStats, + account_storage::meta::StoredMetaWriteVersion, accounts_db::stats::BankHashStats, }, solana_frozen_abi::abi_example::AbiExample, solana_sdk::clock::Slot, @@ -546,7 +546,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "8hwm4YsQXJWGZdp762SkJnDok29LXKwFtmW9oQ2KSzrN") + frozen_abi(digest = "7xkyjhBmj1xk3ykcbufPCnBKKkcpQ3AjKFUmH1r8MRnu") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 1494413bcfb02e..e12b9c5ec124a7 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -19,8 +19,8 @@ use { account_storage::meta::StoredMetaWriteVersion, accounts::Accounts, accounts_db::{ - AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, - AccountsFileId, AtomicAccountsFileId, BankHashStats, IndexGenerationInfo, + stats::BankHashStats, AccountShrinkThreshold, AccountStorageEntry, AccountsDb, + AccountsDbConfig, AccountsFileId, AtomicAccountsFileId, IndexGenerationInfo, }, accounts_file::{AccountsFile, StorageAccess}, accounts_hash::{AccountsDeltaHash, AccountsHash}, diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 7aeba40adea04d..bd8ae36c963237 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -9,7 +9,9 @@ use { prelude::ParallelSlice, }, solana_accounts_db::{ - accounts_db::{AccountStorageEntry, AccountsDb, GetUniqueAccountsResult, PurgeStats}, + accounts_db::{ + stats::PurgeStats, AccountStorageEntry, AccountsDb, GetUniqueAccountsResult, + }, accounts_partition, storable_accounts::StorableAccountsBySlot, }, diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index 1d929227109772..ee8a3fb861e40f 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -8,7 +8,7 @@ use { solana_accounts_db::{ account_storage::meta::StoredMetaWriteVersion, accounts::Accounts, - accounts_db::{AccountStorageEntry, BankHashStats}, + accounts_db::{stats::BankHashStats, AccountStorageEntry}, accounts_hash::{AccountsDeltaHash, AccountsHash, AccountsHashKind}, epoch_accounts_hash::EpochAccountsHash, }, diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 0dfbd8a13e91a3..bf706aaa8ba707 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -24,7 +24,7 @@ use { regex::Regex, solana_accounts_db::{ account_storage::{meta::StoredMetaWriteVersion, AccountStorageMap}, - accounts_db::{AccountStorageEntry, AtomicAccountsFileId, BankHashStats}, + accounts_db::{stats::BankHashStats, AccountStorageEntry, AtomicAccountsFileId}, accounts_file::{AccountsFile, AccountsFileError, InternalsForArchive, StorageAccess}, accounts_hash::{AccountsDeltaHash, AccountsHash}, epoch_accounts_hash::EpochAccountsHash, From d3312ea5d488578a170da3cfbfb65640cc8b745c Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:23:00 -0500 Subject: [PATCH 451/529] tidy: remove unused get_thread_pool fn (#3101) remove unused get_threadpool fn Co-authored-by: HaoranYi --- runtime/src/bank.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f881c548c89f1d..c617a652541b80 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5855,10 +5855,6 @@ impl Bank { SnapshotHash::new(&accounts_hash, epoch_accounts_hash.as_ref()) } - pub fn get_thread_pool(&self) -> &ThreadPool { - &self.rc.accounts.accounts_db.thread_pool_clean - } - pub fn load_account_into_read_cache(&self, key: &Pubkey) { self.rc .accounts From 1e389f48636cf7e710f38f154b9d683c15d1cb0c Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Mon, 7 Oct 2024 18:42:55 -0500 Subject: [PATCH 452/529] RuntimeTransaction::is_simple_vote_transaction (#3099) --- runtime-transaction/src/runtime_transaction.rs | 16 ++++++++-------- runtime-transaction/src/transaction_meta.rs | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index 14c13138025b31..1dbd44bed70bf6 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -45,8 +45,8 @@ impl StaticMeta for RuntimeTransaction { fn message_hash(&self) -> &Hash { &self.meta.message_hash } - fn is_simple_vote_tx(&self) -> bool { - self.meta.is_simple_vote_tx + fn is_simple_vote_transaction(&self) -> bool { + self.meta.is_simple_vote_transaction } fn signature_details(&self) -> &TransactionSignatureDetails { &self.meta.signature_details @@ -107,7 +107,7 @@ impl RuntimeTransaction { transaction: sanitized_versioned_tx, meta: TransactionMeta { message_hash, - is_simple_vote_tx, + is_simple_vote_transaction: is_simple_vote_tx, signature_details, compute_budget_instruction_details, }, @@ -122,7 +122,7 @@ impl RuntimeTransaction { reserved_account_keys: &HashSet, ) -> Result { let hash = *statically_loaded_runtime_tx.message_hash(); - let is_simple_vote_tx = statically_loaded_runtime_tx.is_simple_vote_tx(); + let is_simple_vote_tx = statically_loaded_runtime_tx.is_simple_vote_transaction(); let sanitized_transaction = SanitizedTransaction::try_new( statically_loaded_runtime_tx.transaction, hash, @@ -305,7 +305,7 @@ mod tests { RuntimeTransaction::::try_from(svt, None, is_simple_vote) .unwrap() .meta - .is_simple_vote_tx + .is_simple_vote_transaction } assert!(!get_is_simple_vote( @@ -342,7 +342,7 @@ mod tests { .unwrap(); assert_eq!(hash, *statically_loaded_transaction.message_hash()); - assert!(!statically_loaded_transaction.is_simple_vote_tx()); + assert!(!statically_loaded_transaction.is_simple_vote_transaction()); let dynamically_loaded_transaction = RuntimeTransaction::::try_from( statically_loaded_transaction, @@ -353,7 +353,7 @@ mod tests { dynamically_loaded_transaction.expect("created from statically loaded tx"); assert_eq!(hash, *dynamically_loaded_transaction.message_hash()); - assert!(!dynamically_loaded_transaction.is_simple_vote_tx()); + assert!(!dynamically_loaded_transaction.is_simple_vote_transaction()); } #[test] @@ -377,7 +377,7 @@ mod tests { .unwrap(); assert_eq!(&hash, runtime_transaction_static.message_hash()); - assert!(!runtime_transaction_static.is_simple_vote_tx()); + assert!(!runtime_transaction_static.is_simple_vote_transaction()); let signature_details = &runtime_transaction_static.meta.signature_details; assert_eq!(1, signature_details.num_transaction_signatures()); diff --git a/runtime-transaction/src/transaction_meta.rs b/runtime-transaction/src/transaction_meta.rs index 6ddce57e11dc5b..6e10d233636674 100644 --- a/runtime-transaction/src/transaction_meta.rs +++ b/runtime-transaction/src/transaction_meta.rs @@ -24,7 +24,7 @@ use { /// for example: message hash, simple-vote-tx flag, limits set by instructions pub trait StaticMeta { fn message_hash(&self) -> &Hash; - fn is_simple_vote_tx(&self) -> bool; + fn is_simple_vote_transaction(&self) -> bool; fn signature_details(&self) -> &TransactionSignatureDetails; fn compute_budget_limits(&self, feature_set: &FeatureSet) -> Result; } @@ -39,7 +39,7 @@ pub trait DynamicMeta: StaticMeta {} #[derive(Debug)] pub struct TransactionMeta { pub(crate) message_hash: Hash, - pub(crate) is_simple_vote_tx: bool, + pub(crate) is_simple_vote_transaction: bool, pub(crate) signature_details: TransactionSignatureDetails, pub(crate) compute_budget_instruction_details: ComputeBudgetInstructionDetails, } From c7b643d5f0fc9b7a121fea49ebca587ed045799a Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 7 Oct 2024 21:44:16 -0400 Subject: [PATCH 453/529] Bank equality includes accounts lt hash (#3103) --- runtime/src/bank.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index c617a652541b80..8b01326a44dd08 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -553,6 +553,7 @@ impl PartialEq for Bank { is_delta, #[cfg(feature = "dev-context-only-utils")] hash_overrides, + accounts_lt_hash, // TODO: Confirm if all these fields are intentionally ignored! rewards: _, cluster_type: _, @@ -577,7 +578,6 @@ impl PartialEq for Bank { compute_budget: _, transaction_account_lock_limit: _, fee_structure: _, - accounts_lt_hash: _, cache_for_accounts_lt_hash: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field @@ -616,6 +616,8 @@ impl PartialEq for Bank { // different Mutexes. && (Arc::ptr_eq(hash_overrides, &other.hash_overrides) || *hash_overrides.lock().unwrap() == *other.hash_overrides.lock().unwrap()) + && !(self.is_accounts_lt_hash_enabled() && other.is_accounts_lt_hash_enabled() + && *accounts_lt_hash.lock().unwrap() != *other.accounts_lt_hash.lock().unwrap()) } } From a722d09453da3f31419d907b3221c3020bf963c7 Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Mon, 7 Oct 2024 20:00:33 -0700 Subject: [PATCH 454/529] wen_restart: Add wen_restart_coordinator argument. (#2975) * wen_restart: Add wen_restart_coordinator argument. * rename LEADER_INDEX with COORDINATOR_INDEX --- core/src/validator.rs | 3 +++ local-cluster/src/validator_configs.rs | 1 + multinode-demo/bootstrap-validator.sh | 3 +++ multinode-demo/validator.sh | 3 +++ net/net.sh | 2 +- net/remote/remote-node.sh | 3 ++- validator/src/cli.rs | 16 ++++++++++++++++ validator/src/main.rs | 1 + wen-restart/src/wen_restart.rs | 13 ++++++++++++- 9 files changed, 42 insertions(+), 3 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 3ea9593a5efbb1..c05a3fa8474357 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -282,6 +282,7 @@ pub struct ValidatorConfig { pub generator_config: Option, pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, pub wen_restart_proto_path: Option, + pub wen_restart_coordinator: Option, pub unified_scheduler_handler_threads: Option, pub ip_echo_server_threads: NonZeroUsize, pub replay_forks_threads: NonZeroUsize, @@ -355,6 +356,7 @@ impl Default for ValidatorConfig { generator_config: None, use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), wen_restart_proto_path: None, + wen_restart_coordinator: None, unified_scheduler_handler_threads: None, ip_echo_server_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), @@ -1417,6 +1419,7 @@ impl Validator { info!("Waiting for wen_restart phase one to finish"); wait_for_wen_restart(WenRestartConfig { wen_restart_path: config.wen_restart_proto_path.clone().unwrap(), + wen_restart_coordinator: config.wen_restart_coordinator.unwrap(), last_vote, blockstore: blockstore.clone(), cluster_info: cluster_info.clone(), diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index bbcd1067851805..786d2e39e57aa4 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -68,6 +68,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { generator_config: config.generator_config.clone(), use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, wen_restart_proto_path: config.wen_restart_proto_path.clone(), + wen_restart_coordinator: config.wen_restart_coordinator, unified_scheduler_handler_threads: config.unified_scheduler_handler_threads, ip_echo_server_threads: config.ip_echo_server_threads, replay_forks_threads: config.replay_forks_threads, diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index 471756254cb5db..d21ee1aaa8b73f 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -115,6 +115,9 @@ while [[ -n $1 ]]; do elif [[ $1 == --wen-restart ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --wen-restart-coordinator ]]; then + args+=("$1" "$2") + shift 2 else echo "Unknown argument: $1" $program --help diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index d4e081c8893858..c97812c6cbb910 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -185,6 +185,9 @@ while [[ -n $1 ]]; do elif [[ $1 == --wen-restart ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --wen-restart-coordinator ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 = -h ]]; then usage "$@" else diff --git a/net/net.sh b/net/net.sh index 94fa429ace5086..3ef7430ebd54d6 100755 --- a/net/net.sh +++ b/net/net.sh @@ -146,7 +146,7 @@ Operate a configured testnet -i [ip address] - IP Address of the node to start or stop startnode specific option: - --wen-restart [proto_file] - Use given proto file (create if non-exist) and apply wen_restat + --wen-restart [coordinator_pubkey] - Use given coordinator pubkey and apply wen_restat startclients-specific options: $CLIENT_OPTIONS diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index fe3f6a1d38dbca..edd21ba73145b4 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -436,7 +436,8 @@ EOF fi if [[ -n "$maybeWenRestart" ]]; then - args+=(--wen-restart "$maybeWenRestart") + args+=(--wen-restart wen_restart.proto3) + args+=(--wen-restart-coordinator "$maybeWenRestart") fi cat >> ~/solana/on-reboot <(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .takes_value(true) .required(false) .conflicts_with("wait_for_supermajority") + .requires("wen_restart_coordinator") .help( "Only used during coordinated cluster restarts.\ \n\n\ + Need to also specify the leader's pubkey in --wen-restart-leader.\ + \n\n\ When specified, the validator will enter Wen Restart mode which \ pauses normal activity. Validators in this mode will gossip their last \ vote to reach consensus on a safe restart slot and repair all blocks \ @@ -1610,6 +1613,19 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { further debugging and watch the discord channel for instructions.", ), ) + .arg( + Arg::with_name("wen_restart_coordinator") + .long("wen-restart-coordinator") + .hidden(hidden_unless_forced()) + .value_name("PUBKEY") + .takes_value(true) + .required(false) + .requires("wen_restart") + .help( + "Specifies the pubkey of the leader used in wen restart. \ + May get stuck if the leader used is different from others.", + ), + ) .args(&thread_args(&default_args.thread_args)) .args(&get_deprecated_arguments()) .after_help("The default subcommand is run") diff --git a/validator/src/main.rs b/validator/src/main.rs index c01f6a1c2c2507..74ad2d0926eae2 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1540,6 +1540,7 @@ pub fn main() { delay_leader_block_for_pending_fork: matches .is_present("delay_leader_block_for_pending_fork"), wen_restart_proto_path: value_t!(matches, "wen_restart", PathBuf).ok(), + wen_restart_coordinator: value_t!(matches, "wen_restart_coordinator", Pubkey).ok(), ..ValidatorConfig::default() }; diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 924debb2adf226..c2e8cf5191b1c2 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -44,7 +44,7 @@ use { purge_all_bank_snapshots, }, }, - solana_sdk::{shred_version::compute_shred_version, timing::timestamp}, + solana_sdk::{pubkey::Pubkey, shred_version::compute_shred_version, timing::timestamp}, solana_timings::ExecuteTimings, solana_vote_program::vote_state::VoteTransaction, std::{ @@ -874,6 +874,7 @@ pub(crate) fn aggregate_restart_heaviest_fork( #[derive(Clone)] pub struct WenRestartConfig { pub wen_restart_path: PathBuf, + pub wen_restart_coordinator: Pubkey, pub last_vote: VoteTransaction, pub blockstore: Arc, pub cluster_info: Arc, @@ -1333,6 +1334,7 @@ mod tests { const TICKS_PER_SLOT: u64 = 2; const TOTAL_VALIDATOR_COUNT: u16 = 20; const MY_INDEX: usize = TOTAL_VALIDATOR_COUNT as usize - 1; + const COORDINATOR_INDEX: usize = 0; const WAIT_FOR_THREAD_TIMEOUT: u64 = 10_000; const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80; const NON_CONFORMING_VALIDATOR_PERCENT: u64 = 5; @@ -1404,6 +1406,7 @@ mod tests { pub bank_forks: Arc>, pub last_voted_fork_slots: Vec, pub wen_restart_proto_path: PathBuf, + pub wen_restart_coordinator: Pubkey, pub last_blockhash: Hash, pub genesis_config_hash: Hash, } @@ -1439,6 +1442,9 @@ mod tests { .node_keypair .insecure_clone(), ); + let wen_restart_coordinator = validator_voting_keypairs[COORDINATOR_INDEX] + .node_keypair + .pubkey(); let cluster_info = Arc::new(ClusterInfo::new( { let mut contact_info = @@ -1500,6 +1506,7 @@ mod tests { bank_forks, last_voted_fork_slots, wen_restart_proto_path, + wen_restart_coordinator, last_blockhash, genesis_config_hash: genesis_config.hash(), } @@ -1556,6 +1563,7 @@ mod tests { let last_vote_slot: Slot = test_state.last_voted_fork_slots[0]; let wen_restart_config = WenRestartConfig { wen_restart_path: test_state.wen_restart_proto_path.clone(), + wen_restart_coordinator: test_state.wen_restart_coordinator, last_vote: VoteTransaction::from(Vote::new(vec![last_vote_slot], last_vote_bankhash)), blockstore: test_state.blockstore.clone(), cluster_info: test_state.cluster_info.clone(), @@ -1623,6 +1631,7 @@ mod tests { let exit = Arc::new(AtomicBool::new(false)); let wen_restart_config = WenRestartConfig { wen_restart_path: test_state.wen_restart_proto_path.clone(), + wen_restart_coordinator: test_state.wen_restart_coordinator, last_vote: VoteTransaction::from(Vote::new(vec![last_vote_slot], last_vote_bankhash)), blockstore: test_state.blockstore.clone(), cluster_info: test_state.cluster_info.clone(), @@ -1984,6 +1993,7 @@ mod tests { assert_eq!( wait_for_wen_restart(WenRestartConfig { wen_restart_path: test_state.wen_restart_proto_path, + wen_restart_coordinator: test_state.wen_restart_coordinator, last_vote: VoteTransaction::from(Vote::new( vec![new_root_slot], last_vote_bankhash @@ -3375,6 +3385,7 @@ mod tests { let last_vote_bankhash = Hash::new_unique(); let config = WenRestartConfig { wen_restart_path: test_state.wen_restart_proto_path.clone(), + wen_restart_coordinator: test_state.wen_restart_coordinator, last_vote: VoteTransaction::from(Vote::new(vec![last_vote_slot], last_vote_bankhash)), blockstore: test_state.blockstore.clone(), cluster_info: test_state.cluster_info.clone(), From 01520c1a7e85ec7e878fddd980a2320f6de49ca5 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Tue, 8 Oct 2024 08:47:02 +0400 Subject: [PATCH 455/529] Extract solana-account crate (#2294) * extract account crate * tpyo in crate name * update account usage in sdk * update solana-program examples * re-export solana_account with deprecation notice * fix frozen-abi support * fmt * update digest * update lock file * fix doctests * update lock file * remove unnecessary build script * update lock files * sort table * make serde and bincode optional in the new crate * update digest --- Cargo.lock | 17 +++++ Cargo.toml | 2 + programs/sbf/Cargo.lock | 13 ++++ sdk/Cargo.toml | 4 +- sdk/account/Cargo.toml | 38 +++++++++++ sdk/{src/account.rs => account/src/lib.rs} | 74 +++++++++++++++++----- sdk/src/account_utils.rs | 11 +--- sdk/src/client.rs | 32 +++++----- sdk/src/feature.rs | 2 +- sdk/src/genesis_config.rs | 4 +- sdk/src/lib.rs | 3 +- sdk/src/native_loader.rs | 2 +- sdk/src/nonce_account.rs | 2 +- sdk/src/rent_collector.rs | 24 ++++--- sdk/src/transaction_context.rs | 25 ++++---- 15 files changed, 181 insertions(+), 72 deletions(-) create mode 100644 sdk/account/Cargo.toml rename sdk/{src/account.rs => account/src/lib.rs} (95%) diff --git a/Cargo.lock b/Cargo.lock index 2587a2ac9494a0..a8558aa59a0aa1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5625,6 +5625,22 @@ dependencies = [ "sha-1 0.9.8", ] +[[package]] +name = "solana-account" +version = "2.1.0" +dependencies = [ + "bincode", + "qualifier_attr", + "serde", + "serde_bytes", + "serde_derive", + "solana-account", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-logger", + "solana-program", +] + [[package]] name = "solana-account-decoder" version = "2.1.0" @@ -7814,6 +7830,7 @@ dependencies = [ "sha2 0.10.8", "sha3", "siphasher", + "solana-account", "solana-bn254", "solana-decode-error", "solana-derivation-path", diff --git a/Cargo.toml b/Cargo.toml index 348426124459b8..921996e4a27749 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -101,6 +101,7 @@ members = [ "runtime-transaction", "sanitize", "sdk", + "sdk/account", "sdk/account-info", "sdk/atomic-u64", "sdk/cargo-build-sbf", @@ -364,6 +365,7 @@ smallvec = "1.13.2" smpl_jwt = "0.7.1" socket2 = "0.5.7" soketto = "0.7" +solana-account = { path = "sdk/account", version = "=2.1.0" } solana-account-decoder = { path = "account-decoder", version = "=2.1.0" } solana-account-info = { path = "sdk/account-info", version = "=2.1.0" } solana-accounts-db = { path = "accounts-db", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 031715755939b8..70d1b420f6297b 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4688,6 +4688,18 @@ dependencies = [ "sha-1", ] +[[package]] +name = "solana-account" +version = "2.1.0" +dependencies = [ + "bincode", + "qualifier_attr", + "serde", + "serde_bytes", + "serde_derive", + "solana-program", +] + [[package]] name = "solana-account-decoder" version = "2.1.0" @@ -6596,6 +6608,7 @@ dependencies = [ "sha2 0.10.8", "sha3", "siphasher", + "solana-account", "solana-bn254", "solana-decode-error", "solana-derivation-path", diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 8793f1e58bd1ab..e0e7f132ffec20 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -35,11 +35,12 @@ full = [ "digest", ] borsh = ["dep:borsh", "solana-program/borsh", "solana-secp256k1-recover/borsh"] -dev-context-only-utils = ["qualifier_attr"] +dev-context-only-utils = ["qualifier_attr", "solana-account/dev-context-only-utils"] frozen-abi = [ "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-feature-set/frozen-abi", + "solana-account/frozen-abi", "solana-program/frozen-abi", "solana-short-vec/frozen-abi", "solana-signature/frozen-abi" @@ -83,6 +84,7 @@ serde_with = { workspace = true, features = ["macros"] } sha2 = { workspace = true } sha3 = { workspace = true, optional = true } siphasher = { workspace = true } +solana-account = { workspace = true, features = ["bincode"] } solana-bn254 = { workspace = true } solana-decode-error = { workspace = true } solana-derivation-path = { workspace = true } diff --git a/sdk/account/Cargo.toml b/sdk/account/Cargo.toml new file mode 100644 index 00000000000000..33d210778f08e5 --- /dev/null +++ b/sdk/account/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "solana-account" +description = "Solana Account type" +documentation = "https://docs.rs/solana-account" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bincode = { workspace = true, optional = true } +qualifier_attr = { workspace = true, optional = true } +serde = { workspace = true, optional = true } +serde_bytes = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } +solana-logger = { workspace = true, optional = true } +solana-program = { workspace = true } + +[dev-dependencies] +solana-account = { path = ".", features = ["dev-context-only-utils"] } + +[features] +bincode = ["dep:bincode", "serde"] +dev-context-only-utils = ["bincode", "dep:qualifier_attr"] +frozen-abi = [ + "dep:solana-frozen-abi", + "dep:solana-frozen-abi-macro", + "dep:solana-logger", + "solana-program/frozen-abi", +] +serde = ["dep:serde", "dep:serde_bytes", "dep:serde_derive"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/src/account.rs b/sdk/account/src/lib.rs similarity index 95% rename from sdk/src/account.rs rename to sdk/account/src/lib.rs index cb7dfbc9638c3e..c2bcbbfd02c916 100644 --- a/sdk/src/account.rs +++ b/sdk/account/src/lib.rs @@ -1,20 +1,24 @@ +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] //! The Solana [`Account`] type. #[cfg(feature = "dev-context-only-utils")] use qualifier_attr::qualifiers; +#[cfg(feature = "serde")] +use serde::ser::{Serialize, Serializer}; +#[cfg(feature = "frozen-abi")] +use solana_frozen_abi_macro::{frozen_abi, AbiExample}; +#[cfg(feature = "bincode")] +use solana_program::sysvar::Sysvar; use { - crate::{ + solana_program::{ + account_info::AccountInfo, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock::{Epoch, INITIAL_RENT_EPOCH}, + debug_account_data::*, lamports::LamportsError, loader_v4, pubkey::Pubkey, }, - serde::{ - ser::{Serialize, Serializer}, - Deserialize, - }, - solana_program::{account_info::AccountInfo, debug_account_data::*, sysvar::Sysvar}, std::{ cell::{Ref, RefCell}, fmt, @@ -32,13 +36,17 @@ use { derive(AbiExample), frozen_abi(digest = "2SUJNHbXMPWrsSXmDTFc4VHx2XQ85fT5Leabefh5Nwe7") )] -#[derive(Deserialize, PartialEq, Eq, Clone, Default)] -#[serde(rename_all = "camelCase")] +#[cfg_attr( + feature = "serde", + derive(serde_derive::Deserialize), + serde(rename_all = "camelCase") +)] +#[derive(PartialEq, Eq, Clone, Default)] pub struct Account { /// lamports in the account pub lamports: u64, /// data held in this account - #[serde(with = "serde_bytes")] + #[cfg_attr(feature = "serde", serde(with = "serde_bytes"))] pub data: Vec, /// the program that owns this account. If executable, the program that loads this account. pub owner: Pubkey, @@ -49,10 +57,14 @@ pub struct Account { } // mod because we need 'Account' below to have the name 'Account' to match expected serialization +#[cfg(feature = "serde")] mod account_serialize { + #[cfg(feature = "frozen-abi")] + use solana_frozen_abi_macro::{frozen_abi, AbiExample}; use { - crate::{account::ReadableAccount, clock::Epoch, pubkey::Pubkey}, + crate::ReadableAccount, serde::{ser::Serializer, Serialize}, + solana_program::{clock::Epoch, pubkey::Pubkey}, }; #[repr(C)] #[cfg_attr( @@ -60,7 +72,7 @@ mod account_serialize { derive(AbiExample), frozen_abi(digest = "2SUJNHbXMPWrsSXmDTFc4VHx2XQ85fT5Leabefh5Nwe7") )] - #[derive(Serialize)] + #[derive(serde_derive::Serialize)] #[serde(rename_all = "camelCase")] struct Account<'a> { lamports: u64, @@ -91,21 +103,23 @@ mod account_serialize { } } +#[cfg(feature = "serde")] impl Serialize for Account { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - crate::account::account_serialize::serialize_account(self, serializer) + crate::account_serialize::serialize_account(self, serializer) } } +#[cfg(feature = "serde")] impl Serialize for AccountSharedData { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - crate::account::account_serialize::serialize_account(self, serializer) + crate::account_serialize::serialize_account(self, serializer) } } @@ -113,8 +127,12 @@ impl Serialize for AccountSharedData { /// This will be the in-memory representation of the 'Account' struct data. /// The existing 'Account' structure cannot easily change due to downstream projects. #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(PartialEq, Eq, Clone, Default, Deserialize)] -#[serde(from = "Account")] +#[cfg_attr( + feature = "serde", + derive(serde_derive::Deserialize), + serde(from = "Account") +)] +#[derive(PartialEq, Eq, Clone, Default)] pub struct AccountSharedData { /// lamports in the account lamports: u64, @@ -435,6 +453,7 @@ fn shared_new_ref( Rc::new(RefCell::new(shared_new::(lamports, space, owner))) } +#[cfg(feature = "bincode")] fn shared_new_data( lamports: u64, state: &T, @@ -449,6 +468,8 @@ fn shared_new_data( Epoch::default(), )) } + +#[cfg(feature = "bincode")] fn shared_new_ref_data( lamports: u64, state: &T, @@ -459,6 +480,7 @@ fn shared_new_ref_data( )?)) } +#[cfg(feature = "bincode")] fn shared_new_data_with_space( lamports: u64, state: &T, @@ -471,6 +493,8 @@ fn shared_new_data_with_space( Ok(account) } + +#[cfg(feature = "bincode")] fn shared_new_ref_data_with_space( lamports: u64, state: &T, @@ -482,12 +506,14 @@ fn shared_new_ref_data_with_space( )?)) } +#[cfg(feature = "bincode")] fn shared_deserialize_data( account: &U, ) -> Result { bincode::deserialize(account.data()) } +#[cfg(feature = "bincode")] fn shared_serialize_data( account: &mut U, state: &T, @@ -505,6 +531,7 @@ impl Account { pub fn new_ref(lamports: u64, space: usize, owner: &Pubkey) -> Rc> { shared_new_ref(lamports, space, owner) } + #[cfg(feature = "bincode")] pub fn new_data( lamports: u64, state: &T, @@ -512,6 +539,7 @@ impl Account { ) -> Result { shared_new_data(lamports, state, owner) } + #[cfg(feature = "bincode")] pub fn new_ref_data( lamports: u64, state: &T, @@ -519,6 +547,7 @@ impl Account { ) -> Result, bincode::Error> { shared_new_ref_data(lamports, state, owner) } + #[cfg(feature = "bincode")] pub fn new_data_with_space( lamports: u64, state: &T, @@ -527,6 +556,7 @@ impl Account { ) -> Result { shared_new_data_with_space(lamports, state, space, owner) } + #[cfg(feature = "bincode")] pub fn new_ref_data_with_space( lamports: u64, state: &T, @@ -538,9 +568,11 @@ impl Account { pub fn new_rent_epoch(lamports: u64, space: usize, owner: &Pubkey, rent_epoch: Epoch) -> Self { shared_new_rent_epoch(lamports, space, owner, rent_epoch) } + #[cfg(feature = "bincode")] pub fn deserialize_data(&self) -> Result { shared_deserialize_data(self) } + #[cfg(feature = "bincode")] pub fn serialize_data(&mut self, state: &T) -> Result<(), bincode::Error> { shared_serialize_data(self, state) } @@ -631,6 +663,7 @@ impl AccountSharedData { pub fn new_ref(lamports: u64, space: usize, owner: &Pubkey) -> Rc> { shared_new_ref(lamports, space, owner) } + #[cfg(feature = "bincode")] pub fn new_data( lamports: u64, state: &T, @@ -638,6 +671,7 @@ impl AccountSharedData { ) -> Result { shared_new_data(lamports, state, owner) } + #[cfg(feature = "bincode")] pub fn new_ref_data( lamports: u64, state: &T, @@ -645,6 +679,7 @@ impl AccountSharedData { ) -> Result, bincode::Error> { shared_new_ref_data(lamports, state, owner) } + #[cfg(feature = "bincode")] pub fn new_data_with_space( lamports: u64, state: &T, @@ -653,6 +688,7 @@ impl AccountSharedData { ) -> Result { shared_new_data_with_space(lamports, state, space, owner) } + #[cfg(feature = "bincode")] pub fn new_ref_data_with_space( lamports: u64, state: &T, @@ -664,9 +700,11 @@ impl AccountSharedData { pub fn new_rent_epoch(lamports: u64, space: usize, owner: &Pubkey, rent_epoch: Epoch) -> Self { shared_new_rent_epoch(lamports, space, owner, rent_epoch) } + #[cfg(feature = "bincode")] pub fn deserialize_data(&self) -> Result { shared_deserialize_data(self) } + #[cfg(feature = "bincode")] pub fn serialize_data(&mut self, state: &T) -> Result<(), bincode::Error> { shared_serialize_data(self, state) } @@ -675,6 +713,7 @@ impl AccountSharedData { pub type InheritableAccountFields = (u64, Epoch); pub const DUMMY_INHERITABLE_ACCOUNT_FIELDS: InheritableAccountFields = (1, INITIAL_RENT_EPOCH); +#[cfg(feature = "bincode")] pub fn create_account_with_fields( sysvar: &S, (lamports, rent_epoch): InheritableAccountFields, @@ -686,10 +725,12 @@ pub fn create_account_with_fields( account } +#[cfg(feature = "bincode")] pub fn create_account_for_test(sysvar: &S) -> Account { create_account_with_fields(sysvar, DUMMY_INHERITABLE_ACCOUNT_FIELDS) } +#[cfg(feature = "bincode")] /// Create an `Account` from a `Sysvar`. pub fn create_account_shared_data_with_fields( sysvar: &S, @@ -698,6 +739,7 @@ pub fn create_account_shared_data_with_fields( AccountSharedData::from(create_account_with_fields(sysvar, fields)) } +#[cfg(feature = "bincode")] pub fn create_account_shared_data_for_test(sysvar: &S) -> AccountSharedData { AccountSharedData::from(create_account_with_fields( sysvar, @@ -705,11 +747,13 @@ pub fn create_account_shared_data_for_test(sysvar: &S) -> AccountShar )) } +#[cfg(feature = "bincode")] /// Create a `Sysvar` from an `Account`'s data. pub fn from_account(account: &T) -> Option { bincode::deserialize(account.data()).ok() } +#[cfg(feature = "bincode")] /// Serialize a `Sysvar` into an `Account`'s data. pub fn to_account(sysvar: &S, account: &mut T) -> Option<()> { bincode::serialize_into(account.data_as_mut_slice(), sysvar).ok() diff --git a/sdk/src/account_utils.rs b/sdk/src/account_utils.rs index e6e6a7bccd1b2b..7338d64cc33498 100644 --- a/sdk/src/account_utils.rs +++ b/sdk/src/account_utils.rs @@ -1,11 +1,9 @@ //! Useful extras for `Account` state. use { - crate::{ - account::{Account, AccountSharedData}, - instruction::InstructionError, - }, + crate::instruction::InstructionError, bincode::ErrorKind, + solana_account::{Account, AccountSharedData}, std::cell::Ref, }; @@ -66,10 +64,7 @@ where #[cfg(test)] mod tests { - use { - super::*, - crate::{account::AccountSharedData, pubkey::Pubkey}, - }; + use {super::*, crate::pubkey::Pubkey, solana_account::AccountSharedData}; #[test] fn test_account_state() { diff --git a/sdk/src/client.rs b/sdk/src/client.rs index 185b9aeeb0d40b..f33f31d478f6af 100644 --- a/sdk/src/client.rs +++ b/sdk/src/client.rs @@ -9,21 +9,23 @@ #![cfg(feature = "full")] -use crate::{ - account::Account, - clock::Slot, - commitment_config::CommitmentConfig, - epoch_info::EpochInfo, - hash::Hash, - instruction::Instruction, - message::Message, - pubkey::Pubkey, - signature::{Keypair, Signature}, - signer::Signer, - signers::Signers, - system_instruction, - transaction::{self, Transaction, VersionedTransaction}, - transport::Result, +use { + crate::{ + clock::Slot, + commitment_config::CommitmentConfig, + epoch_info::EpochInfo, + hash::Hash, + instruction::Instruction, + message::Message, + pubkey::Pubkey, + signature::{Keypair, Signature}, + signer::Signer, + signers::Signers, + system_instruction, + transaction::{self, Transaction, VersionedTransaction}, + transport::Result, + }, + solana_account::Account, }; pub trait Client: SyncClient + AsyncClient { diff --git a/sdk/src/feature.rs b/sdk/src/feature.rs index bca42e7bb6d327..a59e38ff0d252f 100644 --- a/sdk/src/feature.rs +++ b/sdk/src/feature.rs @@ -1,6 +1,6 @@ //! Methods for working with `Feature` accounts. -use crate::account::{AccountSharedData, ReadableAccount, WritableAccount}; +use solana_account::{AccountSharedData, ReadableAccount, WritableAccount}; pub use solana_program::feature::*; pub fn from_account(account: &T) -> Option { diff --git a/sdk/src/genesis_config.rs b/sdk/src/genesis_config.rs index 8e3f89f254830e..24208a76363dbf 100644 --- a/sdk/src/genesis_config.rs +++ b/sdk/src/genesis_config.rs @@ -4,7 +4,6 @@ use { crate::{ - account::{Account, AccountSharedData}, clock::{UnixTimestamp, DEFAULT_TICKS_PER_SLOT}, epoch_schedule::EpochSchedule, fee_calculator::FeeRateGovernor, @@ -22,6 +21,7 @@ use { bincode::{deserialize, serialize}, chrono::{TimeZone, Utc}, memmap2::Mmap, + solana_account::{Account, AccountSharedData}, std::{ collections::BTreeMap, fmt, @@ -87,7 +87,7 @@ impl FromStr for ClusterType { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "GDkrvVXezJYuGHcKSK19wvPBUMfKsifKQtoBxH1RpriL") + frozen_abi(digest = "2eGYc5mpKqDsS8sZfNS4mVq4qPptXYa9hSid2Hpv4DkQ") )] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct GenesisConfig { diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 07c92473cbb46a..11f65d40eedaec 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -58,7 +58,6 @@ pub use solana_program::{ }; #[cfg(feature = "borsh")] pub use solana_program::{borsh, borsh0_10, borsh1}; -pub mod account; pub mod account_utils; pub mod client; pub mod commitment_config; @@ -107,6 +106,8 @@ pub mod transaction_context; pub mod transport; pub mod wasm; +#[deprecated(since = "2.1.0", note = "Use `solana-account` crate instead")] +pub use solana_account as account; #[deprecated(since = "2.1.0", note = "Use `solana-bn254` crate instead")] pub use solana_bn254 as alt_bn128; #[deprecated(since = "2.1.0", note = "Use `solana-decode-error` crate instead")] diff --git a/sdk/src/native_loader.rs b/sdk/src/native_loader.rs index 53a7ded4b61f54..1f32d3988fcf58 100644 --- a/sdk/src/native_loader.rs +++ b/sdk/src/native_loader.rs @@ -1,6 +1,6 @@ //! The native loader native program. -use crate::account::{ +use solana_account::{ Account, AccountSharedData, InheritableAccountFields, DUMMY_INHERITABLE_ACCOUNT_FIELDS, }; diff --git a/sdk/src/nonce_account.rs b/sdk/src/nonce_account.rs index 255011a7bb6402..f2127790345b92 100644 --- a/sdk/src/nonce_account.rs +++ b/sdk/src/nonce_account.rs @@ -2,7 +2,6 @@ use { crate::{ - account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, hash::Hash, nonce::{ @@ -10,6 +9,7 @@ use { State, }, }, + solana_account::{AccountSharedData, ReadableAccount}, std::cell::RefCell, }; diff --git a/sdk/src/rent_collector.rs b/sdk/src/rent_collector.rs index 96a7e479db76a6..4be46786a1de12 100644 --- a/sdk/src/rent_collector.rs +++ b/sdk/src/rent_collector.rs @@ -1,14 +1,16 @@ #![cfg(feature = "full")] //! calculate and collect rent from Accounts -use solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - clock::Epoch, - epoch_schedule::EpochSchedule, - genesis_config::GenesisConfig, - incinerator, - pubkey::Pubkey, - rent::{Rent, RentDue}, +use { + solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, + solana_sdk::{ + clock::Epoch, + epoch_schedule::EpochSchedule, + genesis_config::GenesisConfig, + incinerator, + pubkey::Pubkey, + rent::{Rent, RentDue}, + }, }; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -213,11 +215,7 @@ impl std::ops::AddAssign for CollectedInfo { #[cfg(test)] mod tests { - use { - super::*, - assert_matches::assert_matches, - solana_sdk::{account::Account, sysvar}, - }; + use {super::*, assert_matches::assert_matches, solana_account::Account, solana_sdk::sysvar}; fn default_rent_collector_clone_with_epoch(epoch: Epoch) -> RentCollector { RentCollector::default().clone_with_epoch(epoch) diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index 0bb2f0ec983659..95f65c3af55fe0 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -3,31 +3,28 @@ #[cfg(all(not(target_os = "solana"), feature = "full", debug_assertions))] use crate::signature::Signature; +use { + crate::{instruction::InstructionError, pubkey::Pubkey}, + solana_account::{AccountSharedData, ReadableAccount}, + std::{ + cell::{Ref, RefCell, RefMut}, + collections::HashSet, + pin::Pin, + rc::Rc, + }, +}; #[cfg(not(target_os = "solana"))] use { crate::{ - account::WritableAccount, rent::Rent, system_instruction::{ MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION, MAX_PERMITTED_DATA_LENGTH, }, }, + solana_account::WritableAccount, solana_program::entrypoint::MAX_PERMITTED_DATA_INCREASE, std::mem::MaybeUninit, }; -use { - crate::{ - account::{AccountSharedData, ReadableAccount}, - instruction::InstructionError, - pubkey::Pubkey, - }, - std::{ - cell::{Ref, RefCell, RefMut}, - collections::HashSet, - pin::Pin, - rc::Rc, - }, -}; /// Index of an account inside of the TransactionContext or an InstructionContext. pub type IndexOfAccount = u16; From 50d13d1388755486068c777190404e4bc07e8fb3 Mon Sep 17 00:00:00 2001 From: sakridge Date: Tue, 8 Oct 2024 03:12:06 -0700 Subject: [PATCH 456/529] refactor cli-output large functions and add starting epoch for rewards (#3085) * refactor cli-output large functions * solana CLI: Add starting reward epoch option --- cli-output/src/cli_output.rs | 153 ++++++++++++++++++----------------- cli/src/cli.rs | 6 ++ cli/src/stake.rs | 32 +++++++- cli/src/vote.rs | 18 ++++- 4 files changed, 131 insertions(+), 78 deletions(-) diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 1474c758fd8c8e..9f0f6542f1e1a4 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -1311,6 +1311,84 @@ impl VerboseDisplay for CliStakeState { } } +fn show_inactive_stake( + me: &CliStakeState, + f: &mut fmt::Formatter, + delegated_stake: u64, +) -> fmt::Result { + if let Some(deactivation_epoch) = me.deactivation_epoch { + if me.current_epoch > deactivation_epoch { + let deactivating_stake = me.deactivating_stake.or(me.active_stake); + if let Some(deactivating_stake) = deactivating_stake { + writeln!( + f, + "Inactive Stake: {}", + build_balance_message( + delegated_stake - deactivating_stake, + me.use_lamports_unit, + true + ), + )?; + writeln!( + f, + "Deactivating Stake: {}", + build_balance_message(deactivating_stake, me.use_lamports_unit, true), + )?; + } + } + writeln!( + f, + "Stake deactivates starting from epoch: {deactivation_epoch}" + )?; + } + if let Some(delegated_vote_account_address) = &me.delegated_vote_account_address { + writeln!( + f, + "Delegated Vote Account Address: {delegated_vote_account_address}" + )?; + } + Ok(()) +} + +fn show_active_stake( + me: &CliStakeState, + f: &mut fmt::Formatter, + delegated_stake: u64, +) -> fmt::Result { + if me + .deactivation_epoch + .map(|d| me.current_epoch <= d) + .unwrap_or(true) + { + let active_stake = me.active_stake.unwrap_or(0); + writeln!( + f, + "Active Stake: {}", + build_balance_message(active_stake, me.use_lamports_unit, true), + )?; + let activating_stake = me.activating_stake.or_else(|| { + if me.active_stake.is_none() { + Some(delegated_stake) + } else { + None + } + }); + if let Some(activating_stake) = activating_stake { + writeln!( + f, + "Activating Stake: {}", + build_balance_message(activating_stake, me.use_lamports_unit, true), + )?; + writeln!( + f, + "Stake activates starting from epoch: {}", + me.activation_epoch.unwrap() + )?; + } + } + Ok(()) +} + impl fmt::Display for CliStakeState { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn show_authorized(f: &mut fmt::Formatter, authorized: &CliAuthorized) -> fmt::Result { @@ -1374,79 +1452,8 @@ impl fmt::Display for CliStakeState { "Delegated Stake: {}", build_balance_message(delegated_stake, self.use_lamports_unit, true) )?; - if self - .deactivation_epoch - .map(|d| self.current_epoch <= d) - .unwrap_or(true) - { - let active_stake = self.active_stake.unwrap_or(0); - writeln!( - f, - "Active Stake: {}", - build_balance_message(active_stake, self.use_lamports_unit, true), - )?; - let activating_stake = self.activating_stake.or_else(|| { - if self.active_stake.is_none() { - Some(delegated_stake) - } else { - None - } - }); - if let Some(activating_stake) = activating_stake { - writeln!( - f, - "Activating Stake: {}", - build_balance_message( - activating_stake, - self.use_lamports_unit, - true - ), - )?; - writeln!( - f, - "Stake activates starting from epoch: {}", - self.activation_epoch.unwrap() - )?; - } - } - - if let Some(deactivation_epoch) = self.deactivation_epoch { - if self.current_epoch > deactivation_epoch { - let deactivating_stake = self.deactivating_stake.or(self.active_stake); - if let Some(deactivating_stake) = deactivating_stake { - writeln!( - f, - "Inactive Stake: {}", - build_balance_message( - delegated_stake - deactivating_stake, - self.use_lamports_unit, - true - ), - )?; - writeln!( - f, - "Deactivating Stake: {}", - build_balance_message( - deactivating_stake, - self.use_lamports_unit, - true - ), - )?; - } - } - writeln!( - f, - "Stake deactivates starting from epoch: {deactivation_epoch}" - )?; - } - if let Some(delegated_vote_account_address) = - &self.delegated_vote_account_address - { - writeln!( - f, - "Delegated Vote Account Address: {delegated_vote_account_address}" - )?; - } + show_active_stake(self, f, delegated_stake)?; + show_inactive_stake(self, f, delegated_stake)?; } else { writeln!(f, "Stake account is undelegated")?; } diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 643782e418b161..e846539f9e4216 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -269,6 +269,7 @@ pub enum CliCommand { use_lamports_unit: bool, with_rewards: Option, use_csv: bool, + starting_epoch: Option, }, StakeAuthorize { stake_account_pubkey: Pubkey, @@ -344,6 +345,7 @@ pub enum CliCommand { use_lamports_unit: bool, use_csv: bool, with_rewards: Option, + starting_epoch: Option, }, WithdrawFromVoteAccount { vote_account_pubkey: Pubkey, @@ -1325,6 +1327,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { use_lamports_unit, with_rewards, use_csv, + starting_epoch, } => process_show_stake_account( &rpc_client, config, @@ -1332,6 +1335,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *use_lamports_unit, *with_rewards, *use_csv, + *starting_epoch, ), CliCommand::ShowStakeHistory { use_lamports_unit, @@ -1494,6 +1498,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { use_lamports_unit, use_csv, with_rewards, + starting_epoch, } => process_show_vote_account( &rpc_client, config, @@ -1501,6 +1506,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *use_lamports_unit, *use_csv, *with_rewards, + *starting_epoch, ), CliCommand::WithdrawFromVoteAccount { vote_account_pubkey, diff --git a/cli/src/stake.rs b/cli/src/stake.rs index a5434cd312d598..9fefb818aee3a9 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -741,6 +741,14 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(false) .help("Format stake rewards data in csv"), ) + .arg( + Arg::with_name("starting_epoch") + .long("starting-epoch") + .takes_value(true) + .value_name("NUM") + .requires("with_rewards") + .help("Start displaying from epoch NUM"), + ) .arg( Arg::with_name("num_rewards_epochs") .long("num-rewards-epochs") @@ -1329,12 +1337,14 @@ pub fn parse_show_stake_account( } else { None }; + let starting_epoch = value_of(matches, "starting_epoch"); Ok(CliCommandInfo::without_signers( CliCommand::ShowStakeAccount { pubkey: stake_account_pubkey, use_lamports_unit, with_rewards, use_csv, + starting_epoch, }, )) } @@ -2541,10 +2551,18 @@ pub(crate) fn fetch_epoch_rewards( rpc_client: &RpcClient, address: &Pubkey, mut num_epochs: usize, + starting_epoch: Option, ) -> Result, Box> { let mut all_epoch_rewards = vec![]; let epoch_schedule = rpc_client.get_epoch_schedule()?; - let mut rewards_epoch = rpc_client.get_epoch_info()?.epoch; + let mut rewards_epoch = if let Some(epoch) = starting_epoch { + epoch + } else { + rpc_client + .get_epoch_info()? + .epoch + .saturating_sub(num_epochs as u64) + }; let mut process_reward = |reward: &Option| -> Result<(), Box> { @@ -2559,14 +2577,14 @@ pub(crate) fn fetch_epoch_rewards( Ok(()) }; - while num_epochs > 0 && rewards_epoch > 0 { - rewards_epoch = rewards_epoch.saturating_sub(1); + while num_epochs > 0 { if let Ok(rewards) = rpc_client.get_inflation_reward(&[*address], Some(rewards_epoch)) { process_reward(&rewards[0])?; } else { eprintln!("Rewards not available for epoch {rewards_epoch}"); } num_epochs = num_epochs.saturating_sub(1); + rewards_epoch = rewards_epoch.saturating_add(1); } Ok(all_epoch_rewards) @@ -2579,6 +2597,7 @@ pub fn process_show_stake_account( use_lamports_unit: bool, with_rewards: Option, use_csv: bool, + starting_epoch: Option, ) -> ProcessResult { let stake_account = rpc_client.get_account(stake_account_address)?; if stake_account.owner != stake::program::id() { @@ -2614,7 +2633,12 @@ pub fn process_show_stake_account( if state.stake_type == CliStakeType::Stake && state.activation_epoch.is_some() { let epoch_rewards = with_rewards.and_then(|num_epochs| { - match fetch_epoch_rewards(rpc_client, stake_account_address, num_epochs) { + match fetch_epoch_rewards( + rpc_client, + stake_account_address, + num_epochs, + starting_epoch, + ) { Ok(rewards) => Some(rewards), Err(error) => { eprintln!("Failed to fetch epoch rewards: {error:?}"); diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 66b925d9b88418..ab9a4897342b24 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -350,6 +350,14 @@ impl VoteSubCommands for App<'_, '_> { .takes_value(false) .help("Format rewards in a CSV table"), ) + .arg( + Arg::with_name("starting_epoch") + .long("starting-epoch") + .takes_value(true) + .value_name("NUM") + .requires("with_rewards") + .help("Start displaying from epoch NUM"), + ) .arg( Arg::with_name("num_rewards_epochs") .long("num-rewards-epochs") @@ -675,12 +683,14 @@ pub fn parse_vote_get_account_command( } else { None }; + let starting_epoch = value_of(matches, "starting_epoch"); Ok(CliCommandInfo::without_signers( CliCommand::ShowVoteAccount { pubkey: vote_account_pubkey, use_lamports_unit, use_csv, with_rewards, + starting_epoch, }, )) } @@ -1261,6 +1271,7 @@ pub fn process_show_vote_account( use_lamports_unit: bool, use_csv: bool, with_rewards: Option, + starting_epoch: Option, ) -> ProcessResult { let (vote_account, vote_state) = get_vote_account(rpc_client, vote_account_address, config.commitment)?; @@ -1288,7 +1299,12 @@ pub fn process_show_vote_account( let epoch_rewards = with_rewards.and_then(|num_epochs| { - match crate::stake::fetch_epoch_rewards(rpc_client, vote_account_address, num_epochs) { + match crate::stake::fetch_epoch_rewards( + rpc_client, + vote_account_address, + num_epochs, + starting_epoch, + ) { Ok(rewards) => Some(rewards), Err(error) => { eprintln!("Failed to fetch epoch rewards: {error:?}"); From 21fc57dec644bf32b3c220b449f0f4cec8ac5b40 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 19:42:52 +0800 Subject: [PATCH 457/529] build(deps): bump rustls from 0.23.13 to 0.23.14 (#3106) * build(deps): bump rustls from 0.23.13 to 0.23.14 Bumps [rustls](https://github.com/rustls/rustls) from 0.23.13 to 0.23.14. - [Release notes](https://github.com/rustls/rustls/releases) - [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustls/rustls/compare/v/0.23.13...v/0.23.14) --- updated-dependencies: - dependency-name: rustls dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 22 +++++++++++----------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8558aa59a0aa1..41b2ac1a420bd6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4525,7 +4525,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.13", + "rustls 0.23.14", "socket2 0.5.7", "thiserror", "tokio", @@ -4542,7 +4542,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.3", "rustc-hash 2.0.0", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-platform-verifier", "slab", "thiserror", @@ -5027,9 +5027,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "once_cell", "ring 0.17.3", @@ -5073,9 +5073,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-platform-verifier" @@ -5088,7 +5088,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -6392,7 +6392,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.13", + "rustls 0.23.14", "serde", "serde_bytes", "serde_derive", @@ -7451,7 +7451,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.13", + "rustls 0.23.14", "solana-connection-cache", "solana-logger", "solana-measure", @@ -8080,7 +8080,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.13", + "rustls 0.23.14", "smallvec", "socket2 0.5.7", "solana-logger", @@ -8451,7 +8451,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.13", + "rustls 0.23.14", "solana-entry", "solana-feature-set", "solana-geyser-plugin-manager", diff --git a/Cargo.toml b/Cargo.toml index 921996e4a27749..50fa552d0bccd2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -345,7 +345,7 @@ reqwest = { version = "0.11.27", default-features = false } reqwest-middleware = "0.2.5" rolling-file = "0.2.0" rpassword = "7.3" -rustls = { version = "0.23.13", default-features = false } +rustls = { version = "0.23.14", default-features = false } scopeguard = "1.2.0" semver = "1.0.23" seqlock = "0.2.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 70d1b420f6297b..33df3b67f6914a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3790,7 +3790,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.13", + "rustls 0.23.14", "socket2 0.5.7", "thiserror", "tokio", @@ -3807,7 +3807,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.3", "rustc-hash 2.0.0", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-platform-verifier", "slab", "thiserror", @@ -4218,9 +4218,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "once_cell", "ring 0.17.3", @@ -4264,9 +4264,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-platform-verifier" @@ -4279,7 +4279,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -5105,7 +5105,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.13", + "rustls 0.23.14", "serde", "serde_bytes", "serde_derive", @@ -5829,7 +5829,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.13", + "rustls 0.23.14", "solana-connection-cache", "solana-measure", "solana-metrics", @@ -6795,7 +6795,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.13", + "rustls 0.23.14", "smallvec", "socket2 0.5.7", "solana-measure", @@ -7011,7 +7011,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.13", + "rustls 0.23.14", "solana-entry", "solana-feature-set", "solana-geyser-plugin-manager", From f7c166b7a983847b4f4e2f6aca0bbb96c90d1056 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 19:43:04 +0800 Subject: [PATCH 458/529] build(deps): bump proc-macro2 from 1.0.86 to 1.0.87 (#3107) * build(deps): bump proc-macro2 from 1.0.86 to 1.0.87 Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.86 to 1.0.87. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.86...1.0.87) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 41b2ac1a420bd6..c8e5ed49e73f11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4375,9 +4375,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] diff --git a/Cargo.toml b/Cargo.toml index 50fa552d0bccd2..552c2451e4bd22 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -325,7 +325,7 @@ pickledb = { version = "0.5.1", default-features = false } predicates = "2.1" pretty-hex = "0.3.0" prio-graph = "0.2.1" -proc-macro2 = "1.0.86" +proc-macro2 = "1.0.87" proptest = "1.5" prost = "0.11.9" prost-build = "0.11.9" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 33df3b67f6914a..806d0c40ff7626 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -3674,9 +3674,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] From dc6a1b3532385d6b0fd2a93c2c4b6bdb8895edf0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 20:37:26 +0800 Subject: [PATCH 459/529] build(deps): bump bytemuck_derive from 1.7.1 to 1.8.0 (#3108) * build(deps): bump bytemuck_derive from 1.7.1 to 1.8.0 Bumps [bytemuck_derive](https://github.com/Lokathor/bytemuck) from 1.7.1 to 1.8.0. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/bytemuck_derive-v1.7.1...bytemuck_derive-v1.8.0) --- updated-dependencies: - dependency-name: bytemuck_derive dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8e5ed49e73f11..62b8278cd7954b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1163,9 +1163,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" +checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 552c2451e4bd22..8e5c541548c2d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -221,7 +221,7 @@ bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" bytemuck = "1.18.0" -bytemuck_derive = "1.7.1" +bytemuck_derive = "1.8.0" byteorder = "1.5.0" bytes = "1.7" bzip2 = "0.4.4" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 806d0c40ff7626..1cf40e6cddd4b5 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -859,9 +859,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" +checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", From 7640c25509b0aa299b7e3238208c1d01035e7898 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Tue, 8 Oct 2024 23:33:22 +0800 Subject: [PATCH 460/529] ci: remove unused Github Actions (#3071) * ci: remove unused Github Actions * add changelog label back --- .github/workflows/label-actions.yml | 15 ------- .../workflows/manage-stale-issues-and-prs.yml | 44 ------------------- 2 files changed, 59 deletions(-) delete mode 100644 .github/workflows/label-actions.yml delete mode 100644 .github/workflows/manage-stale-issues-and-prs.yml diff --git a/.github/workflows/label-actions.yml b/.github/workflows/label-actions.yml deleted file mode 100644 index 218876135a1974..00000000000000 --- a/.github/workflows/label-actions.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: "Issue Label Actions" - -on: - issues: - types: [labeled, unlabeled] - -permissions: - contents: read - issues: write - -jobs: - action: - runs-on: ubuntu-latest - steps: - - uses: dessant/label-actions@v2 diff --git a/.github/workflows/manage-stale-issues-and-prs.yml b/.github/workflows/manage-stale-issues-and-prs.yml deleted file mode 100644 index a4115ccf3cf774..00000000000000 --- a/.github/workflows/manage-stale-issues-and-prs.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: "Manage stale issues and PRs" -on: - # Chosen to be just before London wakes up and way past San Francisco's bedtime. - schedule: - - cron: "0 8 * * 1-5" # This is in UTC. - # Do a dry-run (debug-only: true) whenever this workflow itself is changed. - pull_request: - paths: - - .github/workflows/manage-stale-issues-and-prs.yml - types: - - opened - - synchronize - -permissions: - issues: write - pull-requests: write - -jobs: - stale: - # Forks do not need to run this, especially on cron schedule. - if: > - github.event_name != 'schedule' - || github.repository == 'solana-labs/solana' - - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v6 - with: - ascending: true # Spend API operations budget on older, more-likely-to-get-closed issues first - close-issue-message: "" # Leave no comment when closing - close-pr-message: "" # Leave no comment when closing - days-before-issue-stale: 365 - days-before-pr-stale: 14 - days-before-close: 7 - debug-only: ${{ github.event_name == 'pull_request' }} # Dry-run when true. - exempt-all-milestones: true # Milestones can sometimes last a month, so exempt issues attached to a milestone. - exempt-issue-labels: blocked,do-not-close,feature-gate,security - exempt-pr-labels: blocked,do-not-close,feature-gate,security - # No actual changes get made in debug-only mode, so we can raise the operations ceiling. - operations-per-run: ${{ github.event_name == 'pull_request' && 1000 || 900}} - stale-issue-label: stale - stale-issue-message: "" # Leave no comment when marking as stale - stale-pr-label: stale - stale-pr-message: "" # Leave no comment when marking as stale From f1b3856791c2f301c7f90e815f2264eb2d8cbd23 Mon Sep 17 00:00:00 2001 From: Jon C Date: Tue, 8 Oct 2024 14:58:58 -0400 Subject: [PATCH 461/529] cargo-build-sbf: Use `metadata.solana.tools-version` in Cargo.toml (#2914) * cargo-build-sbf: Use `solana.tools-version` in Cargo.toml #### Problem It's possible to specify the version of the platform-tools version using the `--tools-version`, but most people aren't aware of the different versions of the compiler, or that they can use the flag. #### Summary of changes Allow `cargo-build-sbf` to read from the metadata section of a package or workspace Cargo.toml. The concept is similar to `rust-toolchain.toml`, where cargo finds the right version of Rust. For cargo-build-sbf, it can read: ```toml [package.metadata.solana] tools-version = "v1.43" ``` Or ```toml [workspace.metadata.solana] tools-version = "v1.43" ``` To go with this change, since I often forget leading "v" in the version string, the version parsing now allows for omitting the leading "v", so you can specify `--tools-version 1.43`. * Relax leading "v" * Add CHANGELOG entry * Special-case default run * Add tests * ci: Fix parsing platform tools version --- CHANGELOG.md | 11 ++ ci/platform-tools-info.sh | 2 +- sdk/cargo-build-sbf/src/main.rs | 104 ++++++++++++------ sdk/cargo-build-sbf/tests/crates.rs | 14 +++ .../tests/crates/package-metadata/Cargo.toml | 21 ++++ .../tests/crates/package-metadata/src/lib.rs | 12 ++ .../crates/workspace-metadata/Cargo.toml | 21 ++++ .../crates/workspace-metadata/src/lib.rs | 12 ++ sdk/cargo-test-sbf/src/main.rs | 4 +- 9 files changed, 163 insertions(+), 38 deletions(-) create mode 100644 sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml create mode 100644 sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs create mode 100644 sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml create mode 100644 sdk/cargo-build-sbf/tests/crates/workspace-metadata/src/lib.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e07ac3c42c17c..1a1851527965fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,17 @@ Release channels have their own copy of this changelog: * SDK: * removed the `respan` macro. This was marked as "internal use only" and was no longer used internally. * add `entrypoint_no_alloc!`, a more performant program entrypoint that avoids allocations, saving 20-30 CUs per unique account + * `cargo-build-sbf`: a workspace or package-level Cargo.toml may specify `tools-version` for overriding the default platform tools version when building on-chain programs. For example: +```toml +[package.metadata.solana] +tools-version = "1.43" +``` +or +```toml +[workspace.metadata.solana] +tools-version = "1.43" +``` +The order of precedence for the chosen tools version goes: `--tools-version` argument, package version, workspace version, and finally default version. * `agave-validator`: Update PoH speed check to compare against current hash rate from a Bank (#2447) * `solana-test-validator`: Add `--clone-feature-set` flag to mimic features from a target cluster (#2480) * `solana-genesis`: the `--cluster-type` parameter now clones the feature set from the target cluster (#2587) diff --git a/ci/platform-tools-info.sh b/ci/platform-tools-info.sh index d8ee41cb5fbb00..0f0e7c8e0db43c 100755 --- a/ci/platform-tools-info.sh +++ b/ci/platform-tools-info.sh @@ -11,7 +11,7 @@ SBF_TOOLS_VERSION=unknown cargo_build_sbf_main="${here}/../sdk/cargo-build-sbf/src/main.rs" if [[ -f "${cargo_build_sbf_main}" ]]; then - version=$(sed -e 's/^.*platform_tools_version\s*=\s*String::from("\(v[0-9.]\+\)").*/\1/;t;d' "${cargo_build_sbf_main}") + version=$(sed -e 's/^.*DEFAULT_PLATFORM_TOOLS_VERSION.*=\s*"\(v[0-9.]\+\)".*/\1/;t;d' "${cargo_build_sbf_main}") if [[ ${version} != '' ]]; then SBF_TOOLS_VERSION="${version}" else diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 3c3c56a08d14bf..03bfce2cabd5dd 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -21,13 +21,15 @@ use { tar::Archive, }; +const DEFAULT_PLATFORM_TOOLS_VERSION: &str = "v1.43"; + #[derive(Debug)] struct Config<'a> { cargo_args: Vec<&'a str>, target_directory: Option, sbf_out_dir: Option, sbf_sdk: PathBuf, - platform_tools_version: &'a str, + platform_tools_version: Option<&'a str>, dump: bool, features: Vec, force_tools_install: bool, @@ -55,7 +57,7 @@ impl Default for Config<'_> { .join("sdk") .join("sbf"), sbf_out_dir: None, - platform_tools_version: "(unknown)", + platform_tools_version: None, dump: false, features: vec![], force_tools_install: false, @@ -126,11 +128,11 @@ where } pub fn is_version_string(arg: &str) -> Result<(), String> { - let semver_re = Regex::new(r"^v[0-9]+\.[0-9]+(\.[0-9]+)?").unwrap(); + let semver_re = Regex::new(r"^v?[0-9]+\.[0-9]+(\.[0-9]+)?").unwrap(); if semver_re.is_match(arg) { return Ok(()); } - Err("a version string starts with 'v' and contains major and minor version numbers separated by a dot, e.g. v1.32".to_string()) + Err("a version string may start with 'v' and contains major and minor version numbers separated by a dot, e.g. v1.32 or 1.32".to_string()) } fn find_installed_platform_tools() -> Vec { @@ -181,44 +183,61 @@ fn get_base_rust_version(platform_tools_version: &str) -> String { } } -fn normalize_version(version: String) -> String { +fn downloadable_version(version: &str) -> String { + if version.starts_with('v') { + version.to_string() + } else { + format!("v{version}") + } +} + +fn semver_version(version: &str) -> String { + let starts_with_v = version.starts_with('v'); let dots = version.as_bytes().iter().fold( 0, |n: u32, c| if *c == b'.' { n.saturating_add(1) } else { n }, ); - if dots == 1 { - format!("{version}.0") - } else { - version + match (dots, starts_with_v) { + (0, false) => format!("{version}.0.0"), + (0, true) => format!("{}.0.0", &version[1..]), + (1, false) => format!("{version}.0"), + (1, true) => format!("{}.0", &version[1..]), + (_, false) => version.to_string(), + (_, true) => version[1..].to_string(), } } -fn validate_platform_tools_version(requested_version: &str, builtin_version: String) -> String { - let normalized_requested = normalize_version(requested_version.to_string()); - let requested_semver = semver::Version::parse(&normalized_requested[1..]).unwrap(); +fn validate_platform_tools_version(requested_version: &str, builtin_version: &str) -> String { + // Early return here in case it's the first time we're running `cargo build-sbf` + // and we need to create the cache folders + if requested_version == builtin_version { + return builtin_version.to_string(); + } + let normalized_requested = semver_version(requested_version); + let requested_semver = semver::Version::parse(&normalized_requested).unwrap(); let installed_versions = find_installed_platform_tools(); for v in installed_versions { - if requested_semver <= semver::Version::parse(&normalize_version(v)[1..]).unwrap() { - return requested_version.to_string(); + if requested_semver <= semver::Version::parse(&semver_version(&v)).unwrap() { + return downloadable_version(requested_version); } } let latest_version = get_latest_platform_tools_version().unwrap_or_else(|err| { debug!( "Can't get the latest version of platform-tools: {}. Using built-in version {}.", - err, &builtin_version, + err, builtin_version, ); - builtin_version.clone() + builtin_version.to_string() }); - let normalized_latest = normalize_version(latest_version.clone()); - let latest_semver = semver::Version::parse(&normalized_latest[1..]).unwrap(); + let normalized_latest = semver_version(&latest_version); + let latest_semver = semver::Version::parse(&normalized_latest).unwrap(); if requested_semver <= latest_semver { - requested_version.to_string() + downloadable_version(requested_version) } else { warn!( "Version {} is not valid, latest version is {}. Using the built-in version {}", - requested_version, latest_version, &builtin_version, + requested_version, latest_version, builtin_version, ); - builtin_version + builtin_version.to_string() } } @@ -240,6 +259,7 @@ fn install_if_missing( package: &str, url: &str, download_file_name: &str, + platform_tools_version: &str, target_path: &Path, ) -> Result<(), String> { if config.force_tools_install { @@ -286,7 +306,7 @@ fn install_if_missing( fs::create_dir_all(target_path).map_err(|err| err.to_string())?; let mut url = String::from(url); url.push('/'); - url.push_str(config.platform_tools_version); + url.push_str(platform_tools_version); url.push('/'); url.push_str(download_file_name); let download_file_path = target_path.join(download_file_name); @@ -536,6 +556,7 @@ fn build_solana_package( config: &Config, target_directory: &Path, package: &cargo_metadata::Package, + metadata: &cargo_metadata::Metadata, ) { let program_name = { let cdylib_targets = package @@ -591,6 +612,25 @@ fn build_solana_package( exit(1); }); + let platform_tools_version = config.platform_tools_version.unwrap_or_else(|| { + let workspace_tools_version = metadata.workspace_metadata.get("solana").and_then(|v| v.get("tools-version")).and_then(|v| v.as_str()); + let package_tools_version = package.metadata.get("solana").and_then(|v| v.get("tools-version")).and_then(|v| v.as_str()); + match (workspace_tools_version, package_tools_version) { + (Some(workspace_version), Some(package_version)) => { + if workspace_version != package_version { + warn!("Workspace and package specify conflicting tools versions, {workspace_version} and {package_version}, using package version {package_version}"); + } + package_version + }, + (Some(workspace_version), None) => workspace_version, + (None, Some(package_version)) => package_version, + (None, None) => DEFAULT_PLATFORM_TOOLS_VERSION, + } + }); + + let platform_tools_version = + validate_platform_tools_version(platform_tools_version, DEFAULT_PLATFORM_TOOLS_VERSION); + info!("Solana SDK: {}", config.sbf_sdk.display()); if config.no_default_features { info!("No default features"); @@ -614,12 +654,13 @@ fn build_solana_package( format!("platform-tools-linux-{arch}.tar.bz2") }; let package = "platform-tools"; - let target_path = make_platform_tools_path_for_version(package, config.platform_tools_version); + let target_path = make_platform_tools_path_for_version(package, &platform_tools_version); install_if_missing( config, package, "https://github.com/anza-xyz/platform-tools/releases/download", platform_tools_download_file_name.as_str(), + &platform_tools_version, &target_path, ) .unwrap_or_else(|err| { @@ -872,7 +913,7 @@ fn build_solana(config: Config, manifest_path: Option) { if let Some(root_package) = metadata.root_package() { if !config.workspace { - build_solana_package(&config, target_dir.as_ref(), root_package); + build_solana_package(&config, target_dir.as_ref(), root_package, &metadata); return; } } @@ -893,7 +934,7 @@ fn build_solana(config: Config, manifest_path: Option) { .collect::>(); for package in all_sbf_packages { - build_solana_package(&config, target_dir.as_ref(), package); + build_solana_package(&config, target_dir.as_ref(), package, &metadata); } } @@ -913,12 +954,11 @@ fn main() { // The following line is scanned by CI configuration script to // separate cargo caches according to the version of platform-tools. - let platform_tools_version = String::from("v1.43"); - let rust_base_version = get_base_rust_version(platform_tools_version.as_str()); + let rust_base_version = get_base_rust_version(DEFAULT_PLATFORM_TOOLS_VERSION); let version = format!( "{}\nplatform-tools {}\n{}", crate_version!(), - platform_tools_version, + DEFAULT_PLATFORM_TOOLS_VERSION, rust_base_version, ); let matches = clap::Command::new(crate_name!()) @@ -1051,12 +1091,6 @@ fn main() { let sbf_sdk: PathBuf = matches.value_of_t_or_exit("sbf_sdk"); let sbf_out_dir: Option = matches.value_of_t("sbf_out_dir").ok(); - let platform_tools_version = if let Some(tools_version) = matches.value_of("tools_version") { - validate_platform_tools_version(tools_version, platform_tools_version) - } else { - platform_tools_version - }; - let mut cargo_args = matches .values_of("cargo_args") .map(|vals| vals.collect::>()) @@ -1106,7 +1140,7 @@ fn main() { .join(sbf_out_dir) } }), - platform_tools_version: platform_tools_version.as_str(), + platform_tools_version: matches.value_of("tools_version"), dump: matches.is_present("dump"), features: matches.values_of_t("features").ok().unwrap_or_default(), force_tools_install: matches.is_present("force_tools_install"), diff --git a/sdk/cargo-build-sbf/tests/crates.rs b/sdk/cargo-build-sbf/tests/crates.rs index 09015eb208e437..65fb26b469da93 100644 --- a/sdk/cargo-build-sbf/tests/crates.rs +++ b/sdk/cargo-build-sbf/tests/crates.rs @@ -176,3 +176,17 @@ fn test_sbfv2() { .success(); clean_target("noop"); } + +#[test] +#[serial] +fn test_package_metadata_tools_version() { + run_cargo_build("package-metadata", &[], false); + clean_target("package-metadata"); +} + +#[test] +#[serial] +fn test_workspace_metadata_tools_version() { + run_cargo_build("workspace-metadata", &[], false); + clean_target("workspace-metadata"); +} diff --git a/sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml new file mode 100644 index 00000000000000..4de95889d4bf4d --- /dev/null +++ b/sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "package-metadata" +version = "2.1.0" +description = "Solana SBF test program with tools version in package metadata" +authors = ["Anza Maintainers "] +repository = "https://github.com/anza-xyz/agave" +license = "Apache-2.0" +homepage = "https://anza.xyz" +edition = "2021" +publish = false + +[package.metadata.solana] +tools-version = "v1.43" + +[dependencies] +solana-program = { path = "../../../../program", version = "=2.1.0" } + +[lib] +crate-type = ["cdylib"] + +[workspace] diff --git a/sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs b/sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs new file mode 100644 index 00000000000000..a6f2c05b770881 --- /dev/null +++ b/sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs @@ -0,0 +1,12 @@ +//! Example Rust-based SBF noop program + +use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult, pubkey::Pubkey}; + +solana_program::entrypoint!(process_instruction); +fn process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + Ok(()) +} diff --git a/sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml new file mode 100644 index 00000000000000..a72b71aab5f9e7 --- /dev/null +++ b/sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "workspace-metadata" +version = "2.1.0" +description = "Solana SBF test program with tools version in workspace metadata" +authors = ["Anza Maintainers "] +repository = "https://github.com/anza-xyz/agave" +license = "Apache-2.0" +homepage = "https://anza.xyz" +edition = "2021" +publish = false + +[dependencies] +solana-program = { path = "../../../../program", version = "=2.1.0" } + +[lib] +crate-type = ["cdylib"] + +[workspace] + +[workspace.metadata.solana] +tools-version = "v1.43" diff --git a/sdk/cargo-build-sbf/tests/crates/workspace-metadata/src/lib.rs b/sdk/cargo-build-sbf/tests/crates/workspace-metadata/src/lib.rs new file mode 100644 index 00000000000000..a6f2c05b770881 --- /dev/null +++ b/sdk/cargo-build-sbf/tests/crates/workspace-metadata/src/lib.rs @@ -0,0 +1,12 @@ +//! Example Rust-based SBF noop program + +use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult, pubkey::Pubkey}; + +solana_program::entrypoint!(process_instruction); +fn process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + Ok(()) +} diff --git a/sdk/cargo-test-sbf/src/main.rs b/sdk/cargo-test-sbf/src/main.rs index 829a94d918d8f8..57d1185b493caa 100644 --- a/sdk/cargo-test-sbf/src/main.rs +++ b/sdk/cargo-test-sbf/src/main.rs @@ -104,11 +104,11 @@ where } pub fn is_version_string(arg: &str) -> Result<(), String> { - let semver_re = Regex::new(r"^v[0-9]+\.[0-9]+(\.[0-9]+)?").unwrap(); + let semver_re = Regex::new(r"^v?[0-9]+\.[0-9]+(\.[0-9]+)?").unwrap(); if semver_re.is_match(arg) { return Ok(()); } - Err("a version string starts with 'v' and contains major and minor version numbers separated by a dot, e.g. v1.32".to_string()) + Err("a version string may start with 'v' and contains major and minor version numbers separated by a dot, e.g. v1.32 or 1.32".to_string()) } fn test_solana_package( From 10b447f7fc47c6cccfc9d832fbda5faecd9c5a50 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Tue, 8 Oct 2024 14:23:32 -0500 Subject: [PATCH 462/529] Consistent ImmutableDeserializedPacket::PartialEq (#3098) --- core/src/banking_stage/immutable_deserialized_packet.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index a83244171e775d..b03f3d5d64d4e8 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -39,7 +39,7 @@ pub enum DeserializedPacketError { FailedFilter(#[from] PacketFilterFailure), } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Eq)] pub struct ImmutableDeserializedPacket { original_packet: Packet, transaction: SanitizedVersionedTransaction, @@ -132,6 +132,13 @@ impl ImmutableDeserializedPacket { } } +// PartialEq MUST be consistent with PartialOrd and Ord +impl PartialEq for ImmutableDeserializedPacket { + fn eq(&self, other: &Self) -> bool { + self.compute_unit_price() == other.compute_unit_price() + } +} + impl PartialOrd for ImmutableDeserializedPacket { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) From 20e0df4debabf27a3da6d4c1a4972fc901cd22b1 Mon Sep 17 00:00:00 2001 From: cavemanloverboy <93507302+cavemanloverboy@users.noreply.github.com> Date: Tue, 8 Oct 2024 22:08:31 +0200 Subject: [PATCH 463/529] fix: reduce max packet receive time during leader window (#2801) --- .../banking_stage/transaction_scheduler/scheduler_controller.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 47a4c8f5a39736..995b1a5782702b 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -438,7 +438,7 @@ impl SchedulerController { fn receive_and_buffer_packets(&mut self, decision: &BufferedPacketsDecision) -> bool { let remaining_queue_capacity = self.container.remaining_queue_capacity(); - const MAX_PACKET_RECEIVE_TIME: Duration = Duration::from_millis(100); + const MAX_PACKET_RECEIVE_TIME: Duration = Duration::from_millis(10); let (recv_timeout, should_buffer) = match decision { BufferedPacketsDecision::Consume(_) => ( if self.container.is_empty() { From eb12163fac20642eee88d26920bb4ee04aa012ba Mon Sep 17 00:00:00 2001 From: steviez Date: Tue, 8 Oct 2024 16:36:26 -0500 Subject: [PATCH 464/529] Cleanup entry threadpool usage in GPU case (#3111) Fix a few minor items for entry verification in the GPU-present case: - Use allocated thread pool instead of rayon thread pool - Avoid nested parallel iterators which will fan out very small pieces of work to a large number of cores - Avoid intermediate allocation by swapping rayon iterator type --- entry/src/entry.rs | 80 ++++++++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/entry/src/entry.rs b/entry/src/entry.rs index 75057db1630f9b..438d4e742da9c9 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -506,7 +506,7 @@ fn start_verify_transactions_gpu( let entries = verify_transactions(entries, thread_pool, Arc::new(verify_func))?; - let entry_txs: Vec<&SanitizedTransaction> = entries + let transactions: Vec<&SanitizedTransaction> = entries .iter() .filter_map(|entry_type| match entry_type { EntryType::Tick(_) => None, @@ -515,7 +515,7 @@ fn start_verify_transactions_gpu( .flatten() .collect::>(); - if entry_txs.is_empty() { + if transactions.is_empty() { return Ok(EntrySigVerificationState { verification_status: EntryVerificationStatus::Success, entries: Some(entries), @@ -524,46 +524,48 @@ fn start_verify_transactions_gpu( }); } - let mut packet_batches = entry_txs - .par_iter() - .chunks(PACKETS_PER_BATCH) - .map(|slice| { - let vec_size = slice.len(); - let mut packet_batch = PacketBatch::new_with_recycler( - &verify_recyclers.packet_recycler, - vec_size, - "entry-sig-verify", - ); - // We use set_len here instead of resize(vec_size, Packet::default()), to save - // memory bandwidth and avoid writing a large amount of data that will be overwritten - // soon afterwards. As well, Packet::default() actually leaves the packet data - // uninitialized, so the initialization would simply write junk into - // the vector anyway. - unsafe { - packet_batch.set_len(vec_size); - } - let entry_tx_iter = slice - .into_par_iter() - .map(|tx| tx.to_versioned_transaction()); - - let res = packet_batch - .par_iter_mut() - .zip(entry_tx_iter) - .all(|(packet, tx)| { - *packet.meta_mut() = Meta::default(); - Packet::populate_packet(packet, None, &tx).is_ok() - }); - if res { - Ok(packet_batch) - } else { - Err(TransactionError::SanitizeFailure) - } - }) - .collect::>>()?; + let packet_batches = thread_pool.install(|| { + transactions + .par_chunks(PACKETS_PER_BATCH) + .map(|transaction_chunk| { + let num_transactions = transaction_chunk.len(); + let mut packet_batch = PacketBatch::new_with_recycler( + &verify_recyclers.packet_recycler, + num_transactions, + "entry-sig-verify", + ); + // We use set_len here instead of resize(num_txs, Packet::default()), to save + // memory bandwidth and avoid writing a large amount of data that will be overwritten + // soon afterwards. As well, Packet::default() actually leaves the packet data + // uninitialized, so the initialization would simply write junk into + // the vector anyway. + unsafe { + packet_batch.set_len(num_transactions); + } + let transaction_iter = transaction_chunk + .iter() + .map(|tx| tx.to_versioned_transaction()); + + let res = packet_batch + .iter_mut() + .zip(transaction_iter) + .all(|(packet, tx)| { + *packet.meta_mut() = Meta::default(); + Packet::populate_packet(packet, None, &tx).is_ok() + }); + if res { + Ok(packet_batch) + } else { + Err(TransactionError::SanitizeFailure) + } + }) + .collect::>>() + }); + let mut packet_batches = packet_batches?; let tx_offset_recycler = verify_recyclers.tx_offset_recycler; let out_recycler = verify_recyclers.out_recycler; - let num_packets = entry_txs.len(); + let num_packets = transactions.len(); let gpu_verify_thread = thread::Builder::new() .name("solGpuSigVerify".into()) .spawn(move || { From 74f6634950825491c4d24efca9a995b1c538215d Mon Sep 17 00:00:00 2001 From: Hero Date: Tue, 8 Oct 2024 21:36:33 -0500 Subject: [PATCH 465/529] docs: setup-a-validator.md Add cluster specific note (#3064) * Update setup-a-validator.md with cluster specific note. * Update docs/src/operations/setup-a-validator.md Co-authored-by: Joe C --------- Co-authored-by: Joe C --- docs/src/operations/setup-a-validator.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/src/operations/setup-a-validator.md b/docs/src/operations/setup-a-validator.md index 2a54b069ec20d7..35d265032f567f 100644 --- a/docs/src/operations/setup-a-validator.md +++ b/docs/src/operations/setup-a-validator.md @@ -435,6 +435,8 @@ Refer to `agave-validator --help` for more information on what each flag is doing in this script. Also refer to the section on [best practices for operating a validator](./best-practices/general.md). +This startup script is specifically intended for testnet. For more startup script examples intended for other clusters, refer to the +[clusters section.](./../clusters/available.md). ## Verifying Your Validator Is Working Test that your `validator.sh` file is running properly by executing the From d663e0563385a5ccb8d116c68bccc524ab9cafa5 Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 9 Oct 2024 15:46:26 +0400 Subject: [PATCH 466/529] move LamportsError to solana-instruction (#3081) * move LamportsError to solana-instruction * nits --- sdk/instruction/src/error.rs | 29 +++++++++++++++++++++++++++++ sdk/program/src/lamports.rs | 29 ++++++----------------------- 2 files changed, 35 insertions(+), 23 deletions(-) diff --git a/sdk/instruction/src/error.rs b/sdk/instruction/src/error.rs index 5cef257b3f31cb..270659f8de054e 100644 --- a/sdk/instruction/src/error.rs +++ b/sdk/instruction/src/error.rs @@ -432,3 +432,32 @@ where } } } + +#[derive(Debug)] +pub enum LamportsError { + /// arithmetic underflowed + ArithmeticUnderflow, + /// arithmetic overflowed + ArithmeticOverflow, +} + +#[cfg(feature = "std")] +impl std::error::Error for LamportsError {} + +impl fmt::Display for LamportsError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::ArithmeticUnderflow => f.write_str("Arithmetic underflowed"), + Self::ArithmeticOverflow => f.write_str("Arithmetic overflowed"), + } + } +} + +impl From for InstructionError { + fn from(error: LamportsError) -> Self { + match error { + LamportsError::ArithmeticOverflow => InstructionError::ArithmeticOverflow, + LamportsError::ArithmeticUnderflow => InstructionError::ArithmeticOverflow, + } + } +} diff --git a/sdk/program/src/lamports.rs b/sdk/program/src/lamports.rs index 91102899e029b5..925240b9893939 100644 --- a/sdk/program/src/lamports.rs +++ b/sdk/program/src/lamports.rs @@ -1,23 +1,6 @@ -//! Defines the [`LamportsError`] type. - -use {crate::instruction::InstructionError, thiserror::Error}; - -#[derive(Debug, Error)] -pub enum LamportsError { - /// arithmetic underflowed - #[error("Arithmetic underflowed")] - ArithmeticUnderflow, - - /// arithmetic overflowed - #[error("Arithmetic overflowed")] - ArithmeticOverflow, -} - -impl From for InstructionError { - fn from(error: LamportsError) -> Self { - match error { - LamportsError::ArithmeticOverflow => InstructionError::ArithmeticOverflow, - LamportsError::ArithmeticUnderflow => InstructionError::ArithmeticOverflow, - } - } -} +//! Re-exports the [`LamportsError`] type for backwards compatibility. +#[deprecated( + since = "2.1.0", + note = "Use solana_instruction::error::LamportsError instead" +)] +pub use solana_instruction::error::LamportsError; From 6e62af0f0de6a40e4e22628cbbcf63b1a6da560e Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Wed, 9 Oct 2024 17:05:25 +0400 Subject: [PATCH 467/529] extract solana-bincode crate (#3020) * extract limited-deserialize crate * rename solana-limited-deserialize to solana-bincode --- Cargo.lock | 11 +++++++ Cargo.toml | 2 ++ programs/sbf/Cargo.lock | 10 +++++++ sdk/bincode/Cargo.toml | 26 +++++++++++++++++ sdk/bincode/src/lib.rs | 48 +++++++++++++++++++++++++++++++ sdk/program/Cargo.toml | 1 + sdk/program/src/program_utils.rs | 49 +------------------------------- 7 files changed, 99 insertions(+), 48 deletions(-) create mode 100644 sdk/bincode/Cargo.toml create mode 100644 sdk/bincode/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 62b8278cd7954b..6b7a17b92111b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5948,6 +5948,16 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-bincode" +version = "2.1.0" +dependencies = [ + "bincode", + "serde", + "solana-instruction", + "solana-program", +] + [[package]] name = "solana-bloom" version = "2.1.0" @@ -7252,6 +7262,7 @@ dependencies = [ "sha3", "solana-account-info", "solana-atomic-u64", + "solana-bincode", "solana-clock", "solana-decode-error", "solana-define-syscall", diff --git a/Cargo.toml b/Cargo.toml index 8e5c541548c2d8..46f3b719578fb1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,6 +104,7 @@ members = [ "sdk/account", "sdk/account-info", "sdk/atomic-u64", + "sdk/bincode", "sdk/cargo-build-sbf", "sdk/cargo-test-sbf", "sdk/clock", @@ -375,6 +376,7 @@ solana-banks-client = { path = "banks-client", version = "=2.1.0" } solana-banks-interface = { path = "banks-interface", version = "=2.1.0" } solana-banks-server = { path = "banks-server", version = "=2.1.0" } solana-bench-tps = { path = "bench-tps", version = "=2.1.0" } +solana-bincode = { path = "sdk/bincode", version = "=2.1.0" } solana-bloom = { path = "bloom", version = "=2.1.0" } solana-bn254 = { path = "curves/bn254", version = "=2.1.0" } solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 1cf40e6cddd4b5..b774474ab01a4a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -4846,6 +4846,15 @@ dependencies = [ "tokio-serde", ] +[[package]] +name = "solana-bincode" +version = "2.1.0" +dependencies = [ + "bincode", + "serde", + "solana-instruction", +] + [[package]] name = "solana-bloom" version = "2.1.0" @@ -5653,6 +5662,7 @@ dependencies = [ "sha3", "solana-account-info", "solana-atomic-u64", + "solana-bincode", "solana-clock", "solana-decode-error", "solana-define-syscall", diff --git a/sdk/bincode/Cargo.toml b/sdk/bincode/Cargo.toml new file mode 100644 index 00000000000000..576ee0bf46a8b8 --- /dev/null +++ b/sdk/bincode/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "solana-bincode" +description = "Solana bincode utilities" +documentation = "https://docs.rs/solana-bincode" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bincode = { workspace = true } +serde = { workspace = true } +solana-instruction = { workspace = true, default-features = false, features = [ + "std", +] } + +[dev-dependencies] +solana-program = { path = "../program" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/sdk/bincode/src/lib.rs b/sdk/bincode/src/lib.rs new file mode 100644 index 00000000000000..28bd19dec7bdca --- /dev/null +++ b/sdk/bincode/src/lib.rs @@ -0,0 +1,48 @@ +//! Contains a single utility function for deserializing from [bincode]. +//! +//! [bincode]: https://docs.rs/bincode + +use {bincode::config::Options, solana_instruction::error::InstructionError}; + +/// Deserialize with a limit based the maximum amount of data a program can expect to get. +/// This function should be used in place of direct deserialization to help prevent OOM errors +pub fn limited_deserialize(instruction_data: &[u8], limit: u64) -> Result +where + T: serde::de::DeserializeOwned, +{ + bincode::options() + .with_limit(limit) + .with_fixint_encoding() // As per https://github.com/servo/bincode/issues/333, these two options are needed + .allow_trailing_bytes() // to retain the behavior of bincode::deserialize with the new `options()` method + .deserialize_from(instruction_data) + .map_err(|_| InstructionError::InvalidInstructionData) +} + +#[cfg(test)] +pub mod tests { + use {super::*, solana_program::system_instruction::SystemInstruction}; + + #[test] + fn test_limited_deserialize_advance_nonce_account() { + let item = SystemInstruction::AdvanceNonceAccount; + let mut serialized = bincode::serialize(&item).unwrap(); + + assert_eq!( + serialized.len(), + 4, + "`SanitizedMessage::get_durable_nonce()` may need a change" + ); + + assert_eq!( + limited_deserialize::(&serialized, 4).as_ref(), + Ok(&item) + ); + assert!(limited_deserialize::(&serialized, 3).is_err()); + + serialized.push(0); + assert_eq!( + limited_deserialize::(&serialized, 4).as_ref(), + Ok(&item) + ); + } +} diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index f7bc8ba8aeaa96..7acba606490664 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -33,6 +33,7 @@ sha2 = { workspace = true } sha3 = { workspace = true } solana-account-info = { workspace = true, features = ["bincode"] } solana-atomic-u64 = { workspace = true } +solana-bincode = { workspace = true } solana-clock = { workspace = true, features = ["serde"] } solana-decode-error = { workspace = true } solana-frozen-abi = { workspace = true, optional = true, features = ["frozen-abi"] } diff --git a/sdk/program/src/program_utils.rs b/sdk/program/src/program_utils.rs index f624a0f69b13fd..823cd9881a377d 100644 --- a/sdk/program/src/program_utils.rs +++ b/sdk/program/src/program_utils.rs @@ -1,48 +1 @@ -//! Contains a single utility function for deserializing from [bincode]. -//! -//! [bincode]: https://docs.rs/bincode - -use {crate::instruction::InstructionError, bincode::config::Options}; - -/// Deserialize with a limit based the maximum amount of data a program can expect to get. -/// This function should be used in place of direct deserialization to help prevent OOM errors -pub fn limited_deserialize(instruction_data: &[u8], limit: u64) -> Result -where - T: serde::de::DeserializeOwned, -{ - bincode::options() - .with_limit(limit) - .with_fixint_encoding() // As per https://github.com/servo/bincode/issues/333, these two options are needed - .allow_trailing_bytes() // to retain the behavior of bincode::deserialize with the new `options()` method - .deserialize_from(instruction_data) - .map_err(|_| InstructionError::InvalidInstructionData) -} - -#[cfg(test)] -pub mod tests { - use {super::*, solana_program::system_instruction::SystemInstruction}; - - #[test] - fn test_limited_deserialize_advance_nonce_account() { - let item = SystemInstruction::AdvanceNonceAccount; - let mut serialized = bincode::serialize(&item).unwrap(); - - assert_eq!( - serialized.len(), - 4, - "`SanitizedMessage::get_durable_nonce()` may need a change" - ); - - assert_eq!( - limited_deserialize::(&serialized, 4).as_ref(), - Ok(&item) - ); - assert!(limited_deserialize::(&serialized, 3).is_err()); - - serialized.push(0); - assert_eq!( - limited_deserialize::(&serialized, 4).as_ref(), - Ok(&item) - ); - } -} +pub use solana_bincode::limited_deserialize; From 07faf4a924b126e5eff457b8d896f700c28a25a2 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Wed, 9 Oct 2024 08:37:13 -0700 Subject: [PATCH 468/529] support completed slot status in geyser (#3069) * support completed slot status in geyser * missing interface file * Addressed some feedback from Brennan --- core/src/tvu.rs | 7 ++--- core/src/validator.rs | 28 ++++++++++--------- .../src/geyser_plugin_interface.rs | 4 +++ .../src/geyser_plugin_service.rs | 3 +- .../src/slot_status_notifier.rs | 21 ++++---------- .../src/slot_status_observer.rs | 6 ++-- rpc/src/lib.rs | 1 + rpc/src/rpc_completed_slots_service.rs | 6 +++- rpc/src/slot_status_notifier.rs | 23 +++++++++++++++ turbine/src/retransmit_stage.rs | 6 ++-- 10 files changed, 65 insertions(+), 40 deletions(-) create mode 100644 rpc/src/slot_status_notifier.rs diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 5d5e18bc241395..23e5d9b6562451 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -25,10 +25,7 @@ use { bytes::Bytes, crossbeam_channel::{unbounded, Receiver, Sender}, solana_client::connection_cache::ConnectionCache, - solana_geyser_plugin_manager::{ - block_metadata_notifier_interface::BlockMetadataNotifierArc, - slot_status_notifier::SlotStatusNotifier, - }, + solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierArc, solana_gossip::{ cluster_info::ClusterInfo, duplicate_shred_handler::DuplicateShredHandler, duplicate_shred_listener::DuplicateShredListener, @@ -41,7 +38,7 @@ use { solana_poh::poh_recorder::PohRecorder, solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::BankNotificationSenderConfig, - rpc_subscriptions::RpcSubscriptions, + rpc_subscriptions::RpcSubscriptions, slot_status_notifier::SlotStatusNotifier, }, solana_runtime::{ accounts_background_service::AbsRequestSender, bank_forks::BankForks, diff --git a/core/src/validator.rs b/core/src/validator.rs index c05a3fa8474357..ec1031c656715e 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1091,19 +1091,21 @@ impl Validator { ) }; - let rpc_completed_slots_service = if !config.rpc_config.full_api { - None - } else { - let (completed_slots_sender, completed_slots_receiver) = - bounded(MAX_COMPLETED_SLOTS_IN_CHANNEL); - blockstore.add_completed_slots_signal(completed_slots_sender); - - Some(RpcCompletedSlotsService::spawn( - completed_slots_receiver, - rpc_subscriptions.clone(), - exit.clone(), - )) - }; + let rpc_completed_slots_service = + if config.rpc_config.full_api || geyser_plugin_service.is_some() { + let (completed_slots_sender, completed_slots_receiver) = + bounded(MAX_COMPLETED_SLOTS_IN_CHANNEL); + blockstore.add_completed_slots_signal(completed_slots_sender); + + Some(RpcCompletedSlotsService::spawn( + completed_slots_receiver, + rpc_subscriptions.clone(), + slot_status_notifier.clone(), + exit.clone(), + )) + } else { + None + }; let optimistically_confirmed_bank_tracker = Some(OptimisticallyConfirmedBankTracker::new( diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index 6ac7bb848b1444..97271310a99f5f 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -322,6 +322,9 @@ pub enum SlotStatus { /// First Shred Received FirstShredReceived, + + /// All shreds for the slot have been received. + Completed, } impl SlotStatus { @@ -331,6 +334,7 @@ impl SlotStatus { SlotStatus::Processed => "processed", SlotStatus::Rooted => "rooted", SlotStatus::FirstShredReceived => "first_shread_received", + SlotStatus::Completed => "completed", } } } diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index 8e293cbddbbeb0..61fca230030c11 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -5,7 +5,7 @@ use { block_metadata_notifier_interface::BlockMetadataNotifierArc, entry_notifier::EntryNotifierImpl, geyser_plugin_manager::{GeyserPluginManager, GeyserPluginManagerRequest}, - slot_status_notifier::{SlotStatusNotifier, SlotStatusNotifierImpl}, + slot_status_notifier::SlotStatusNotifierImpl, slot_status_observer::SlotStatusObserver, transaction_notifier::TransactionNotifierImpl, }, @@ -15,6 +15,7 @@ use { solana_ledger::entry_notifier_interface::EntryNotifierArc, solana_rpc::{ optimistically_confirmed_bank_tracker::SlotNotification, + slot_status_notifier::SlotStatusNotifier, transaction_notifier_interface::TransactionNotifierArc, }, std::{ diff --git a/geyser-plugin-manager/src/slot_status_notifier.rs b/geyser-plugin-manager/src/slot_status_notifier.rs index 18ea942810ef41..573ed97d7787af 100644 --- a/geyser-plugin-manager/src/slot_status_notifier.rs +++ b/geyser-plugin-manager/src/slot_status_notifier.rs @@ -4,26 +4,11 @@ use { log::*, solana_measure::measure::Measure, solana_metrics::*, + solana_rpc::slot_status_notifier::SlotStatusNotifierInterface, solana_sdk::clock::Slot, std::sync::{Arc, RwLock}, }; -pub trait SlotStatusNotifierInterface { - /// Notified when a slot is optimistically confirmed - fn notify_slot_confirmed(&self, slot: Slot, parent: Option); - - /// Notified when a slot is marked frozen. - fn notify_slot_processed(&self, slot: Slot, parent: Option); - - /// Notified when a slot is rooted. - fn notify_slot_rooted(&self, slot: Slot, parent: Option); - - /// Notified when the first shred is received for a slot. - fn notify_first_shred_received(&self, slot: Slot); -} - -pub type SlotStatusNotifier = Arc>; - pub struct SlotStatusNotifierImpl { plugin_manager: Arc>, } @@ -44,6 +29,10 @@ impl SlotStatusNotifierInterface for SlotStatusNotifierImpl { fn notify_first_shred_received(&self, slot: Slot) { self.notify_slot_status(slot, None, SlotStatus::FirstShredReceived); } + + fn notify_completed(&self, slot: Slot) { + self.notify_slot_status(slot, None, SlotStatus::Completed); + } } impl SlotStatusNotifierImpl { diff --git a/geyser-plugin-manager/src/slot_status_observer.rs b/geyser-plugin-manager/src/slot_status_observer.rs index 7eba6e54eb6c58..99cdb568a8e384 100644 --- a/geyser-plugin-manager/src/slot_status_observer.rs +++ b/geyser-plugin-manager/src/slot_status_observer.rs @@ -1,7 +1,9 @@ use { - crate::slot_status_notifier::SlotStatusNotifier, crossbeam_channel::Receiver, - solana_rpc::optimistically_confirmed_bank_tracker::SlotNotification, + solana_rpc::{ + optimistically_confirmed_bank_tracker::SlotNotification, + slot_status_notifier::SlotStatusNotifier, + }, std::{ sync::{ atomic::{AtomicBool, Ordering}, diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 8dc81692d59790..9763ebd791a162 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -13,6 +13,7 @@ pub mod rpc_pubsub_service; pub mod rpc_service; pub mod rpc_subscription_tracker; pub mod rpc_subscriptions; +pub mod slot_status_notifier; pub mod transaction_notifier_interface; pub mod transaction_status_service; diff --git a/rpc/src/rpc_completed_slots_service.rs b/rpc/src/rpc_completed_slots_service.rs index cb9059b1a5d20f..0c5ecf3c3e6abd 100644 --- a/rpc/src/rpc_completed_slots_service.rs +++ b/rpc/src/rpc_completed_slots_service.rs @@ -1,5 +1,5 @@ use { - crate::rpc_subscriptions::RpcSubscriptions, + crate::{rpc_subscriptions::RpcSubscriptions, slot_status_notifier::SlotStatusNotifier}, crossbeam_channel::RecvTimeoutError, solana_ledger::blockstore::CompletedSlotsReceiver, solana_rpc_client_api::response::SlotUpdate, @@ -21,6 +21,7 @@ impl RpcCompletedSlotsService { pub fn spawn( completed_slots_receiver: CompletedSlotsReceiver, rpc_subscriptions: Arc, + slot_status_notifier: Option, exit: Arc, ) -> JoinHandle<()> { Builder::new() @@ -45,6 +46,9 @@ impl RpcCompletedSlotsService { slot, timestamp: timestamp(), }); + if let Some(slot_status_notifier) = &slot_status_notifier { + slot_status_notifier.read().unwrap().notify_completed(slot); + } } } } diff --git a/rpc/src/slot_status_notifier.rs b/rpc/src/slot_status_notifier.rs new file mode 100644 index 00000000000000..97a84da42f33bf --- /dev/null +++ b/rpc/src/slot_status_notifier.rs @@ -0,0 +1,23 @@ +use { + solana_sdk::clock::Slot, + std::sync::{Arc, RwLock}, +}; + +pub trait SlotStatusNotifierInterface { + /// Notified when a slot is optimistically confirmed + fn notify_slot_confirmed(&self, slot: Slot, parent: Option); + + /// Notified when a slot is marked frozen. + fn notify_slot_processed(&self, slot: Slot, parent: Option); + + /// Notified when a slot is rooted. + fn notify_slot_rooted(&self, slot: Slot, parent: Option); + + /// Notified when the first shred is received for a slot. + fn notify_first_shred_received(&self, slot: Slot); + + /// Notified when the slot is completed. + fn notify_completed(&self, slot: Slot); +} + +pub type SlotStatusNotifier = Arc>; diff --git a/turbine/src/retransmit_stage.rs b/turbine/src/retransmit_stage.rs index b5e67cd3203a40..e820851d03e4ba 100644 --- a/turbine/src/retransmit_stage.rs +++ b/turbine/src/retransmit_stage.rs @@ -9,7 +9,6 @@ use { lru::LruCache, rand::Rng, rayon::{prelude::*, ThreadPool, ThreadPoolBuilder}, - solana_geyser_plugin_manager::slot_status_notifier::SlotStatusNotifier, solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol}, solana_ledger::{ leader_schedule_cache::LeaderScheduleCache, @@ -18,7 +17,10 @@ use { solana_measure::measure::Measure, solana_perf::deduper::Deduper, solana_rayon_threadlimit::get_thread_count, - solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions}, + solana_rpc::{ + max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions, + slot_status_notifier::SlotStatusNotifier, + }, solana_rpc_client_api::response::SlotUpdate, solana_runtime::{ bank::{Bank, MAX_LEADER_SCHEDULE_STAKES}, From f9f8b60ca15fa721c6cdd816c99dfd4e9123fd77 Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Wed, 9 Oct 2024 10:24:40 -0700 Subject: [PATCH 469/529] svm: improve integration test program handling (#3094) * allow setting program upgrade authority * ensure program accounts are rent-exempt * set clock to execution epoch --- svm/tests/integration_test.rs | 53 +++++++++++++++++------------------ svm/tests/mock_bank.rs | 29 +++++++++++++------ 2 files changed, 46 insertions(+), 36 deletions(-) diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 2e63865d1ebead..1051f5eb651e7e 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -2,8 +2,9 @@ use { crate::mock_bank::{ - create_executable_environment, deploy_program, program_address, register_builtins, - MockBankCallback, MockForkGraph, WALLCLOCK_TIME, + create_executable_environment, deploy_program, deploy_program_with_upgrade_authority, + program_address, register_builtins, MockBankCallback, MockForkGraph, EXECUTION_EPOCH, + EXECUTION_SLOT, WALLCLOCK_TIME, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, @@ -42,8 +43,6 @@ use { mod mock_bank; const DEPLOYMENT_SLOT: u64 = 0; -const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot -const EXECUTION_EPOCH: u64 = 2; // The execution epoch must be greater than the deployment epoch const LAMPORTS_PER_SIGNATURE: u64 = 5000; const LAST_BLOCKHASH: Hash = Hash::new_from_array([7; 32]); // Arbitrary constant hash for advancing nonces @@ -55,8 +54,8 @@ pub struct SvmTestEntry { // features are disabled by default; these will be enabled pub enabled_features: Vec, - // programs to deploy to the new svm before transaction execution - pub initial_programs: Vec<(String, Slot)>, + // programs to deploy to the new svm + pub initial_programs: Vec<(String, Slot, Option)>, // accounts to deploy to the new svm before transaction execution pub initial_accounts: AccountsMap, @@ -81,6 +80,12 @@ impl SvmTestEntry { self.create_expected_account(pubkey, account); } + // add an immutable program that will have been deployed before the slot we execute transactions in + pub fn add_initial_program(&mut self, program_name: &str) { + self.initial_programs + .push((program_name.to_string(), DEPLOYMENT_SLOT, None)); + } + // add a new rent-exempt account that is created by the transaction // inserts it only into the post account map pub fn create_expected_account(&mut self, pubkey: Pubkey, account: &AccountSharedData) { @@ -338,11 +343,9 @@ fn program_medley() -> Vec { // 0: A transaction that works without any account { - let program_name = "hello-solana".to_string(); - let program_id = program_address(&program_name); - test_entry - .initial_programs - .push((program_name, DEPLOYMENT_SLOT)); + let program_name = "hello-solana"; + let program_id = program_address(program_name); + test_entry.add_initial_program(program_name); let fee_payer_keypair = Keypair::new(); let fee_payer = fee_payer_keypair.pubkey(); @@ -369,11 +372,9 @@ fn program_medley() -> Vec { // 1: A simple funds transfer between accounts { - let program_name = "simple-transfer".to_string(); - let program_id = program_address(&program_name); - test_entry - .initial_programs - .push((program_name, DEPLOYMENT_SLOT)); + let program_name = "simple-transfer"; + let program_id = program_address(program_name); + test_entry.add_initial_program(program_name); let fee_payer_keypair = Keypair::new(); let sender_keypair = Keypair::new(); @@ -420,11 +421,9 @@ fn program_medley() -> Vec { // 2: A program that utilizes a Sysvar { - let program_name = "clock-sysvar".to_string(); - let program_id = program_address(&program_name); - test_entry - .initial_programs - .push((program_name, DEPLOYMENT_SLOT)); + let program_name = "clock-sysvar"; + let program_id = program_address(program_name); + test_entry.add_initial_program(program_name); let fee_payer_keypair = Keypair::new(); let fee_payer = fee_payer_keypair.pubkey(); @@ -656,11 +655,9 @@ fn simple_nonce(enable_fee_only_transactions: bool, fee_paying_nonce: bool) -> V .push(feature_set::enable_transaction_loading_failure_fees::id()); } - let program_name = "hello-solana".to_string(); - let real_program_id = program_address(&program_name); - test_entry - .initial_programs - .push((program_name, DEPLOYMENT_SLOT)); + let program_name = "hello-solana"; + let real_program_id = program_address(program_name); + test_entry.add_initial_program(program_name); // create and return a transaction, fee payer, and nonce info // sets up initial account states but not final ones @@ -863,8 +860,8 @@ fn svm_integration(test_entries: Vec) { fn execute_test_entry(test_entry: SvmTestEntry) { let mock_bank = MockBankCallback::default(); - for (name, slot) in &test_entry.initial_programs { - deploy_program(name.to_string(), *slot, &mock_bank); + for (name, slot, authority) in &test_entry.initial_programs { + deploy_program_with_upgrade_authority(name.to_string(), *slot, &mock_bank, *authority); } for (pubkey, account) in &test_entry.initial_accounts { diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 8adbd2a55bf13e..58a1c155f226a9 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -41,6 +41,8 @@ use { }, }; +pub const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot +pub const EXECUTION_EPOCH: u64 = 2; // The execution epoch must be greater than the deployment epoch pub const WALLCLOCK_TIME: i64 = 1704067200; // Arbitrarily Jan 1, 2024 pub struct MockForkGraph {} @@ -138,6 +140,17 @@ pub fn program_address(program_name: &str) -> Pubkey { #[allow(unused)] pub fn deploy_program(name: String, deployment_slot: Slot, mock_bank: &MockBankCallback) -> Pubkey { + deploy_program_with_upgrade_authority(name, deployment_slot, mock_bank, None) +} + +#[allow(unused)] +pub fn deploy_program_with_upgrade_authority( + name: String, + deployment_slot: Slot, + mock_bank: &MockBankCallback, + upgrade_authority_address: Option, +) -> Pubkey { + let rent = Rent::default(); let program_account = program_address(&name); let program_data_account = bpf_loader_upgradeable::get_program_data_address(&program_account); @@ -147,10 +160,11 @@ pub fn deploy_program(name: String, deployment_slot: Slot, mock_bank: &MockBankC // The program account must have funds and hold the executable binary let mut account_data = AccountSharedData::default(); - account_data.set_data(bincode::serialize(&state).unwrap()); - account_data.set_lamports(25); + let buffer = bincode::serialize(&state).unwrap(); + account_data.set_lamports(rent.minimum_balance(buffer.len())); account_data.set_owner(bpf_loader_upgradeable::id()); account_data.set_executable(true); + account_data.set_data(buffer); mock_bank .account_shared_data .write() @@ -173,6 +187,8 @@ pub fn deploy_program(name: String, deployment_slot: Slot, mock_bank: &MockBankC let mut buffer = load_program(name); header.append(&mut complement); header.append(&mut buffer); + account_data.set_lamports(rent.minimum_balance(header.len())); + account_data.set_owner(bpf_loader_upgradeable::id()); account_data.set_data(header); mock_bank .account_shared_data @@ -189,9 +205,6 @@ pub fn create_executable_environment( mock_bank: &MockBankCallback, program_cache: &mut ProgramCache, ) { - const DEPLOYMENT_EPOCH: u64 = 0; - const DEPLOYMENT_SLOT: u64 = 0; - program_cache.environments = ProgramRuntimeEnvironments { program_runtime_v1: Arc::new(create_custom_environment()), // We are not using program runtime v2 @@ -207,10 +220,10 @@ pub fn create_executable_environment( // clock contents are important because we use them for a sysvar loading test let clock = Clock { - slot: DEPLOYMENT_SLOT, + slot: EXECUTION_SLOT, epoch_start_timestamp: WALLCLOCK_TIME.saturating_sub(10) as UnixTimestamp, - epoch: DEPLOYMENT_EPOCH, - leader_schedule_epoch: DEPLOYMENT_EPOCH, + epoch: EXECUTION_EPOCH, + leader_schedule_epoch: EXECUTION_EPOCH, unix_timestamp: WALLCLOCK_TIME as UnixTimestamp, }; From 3fc84a5245521a6828616bd9b81ba1ffbe02b11f Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Thu, 10 Oct 2024 12:12:45 +0400 Subject: [PATCH 470/529] fix a link in the solana-instruction docs (#3122) * fix a link in the solana-instruction docs * trailing whitespace --- sdk/instruction/src/account_meta.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/instruction/src/account_meta.rs b/sdk/instruction/src/account_meta.rs index 7f9f9a3dbc2974..67313c1b66f0e1 100644 --- a/sdk/instruction/src/account_meta.rs +++ b/sdk/instruction/src/account_meta.rs @@ -14,6 +14,8 @@ use solana_pubkey::Pubkey; /// default [`AccountMeta::new`] constructor creates writable accounts, this is /// a minor hazard: use [`AccountMeta::new_readonly`] to specify that an account /// is not writable. +/// +/// [`Instruction`]: crate::Instruction #[repr(C)] #[cfg_attr( feature = "serde", From 764f25ab5cb14a7b1e7340ab0db290980165d46e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 18:47:07 +0800 Subject: [PATCH 471/529] build(deps): bump js-sys from 0.3.70 to 0.3.71 (#3129) * build(deps): bump js-sys from 0.3.70 to 0.3.71 Bumps [js-sys](https://github.com/rustwasm/wasm-bindgen) from 0.3.70 to 0.3.71. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/commits) --- updated-dependencies: - dependency-name: js-sys dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- programs/sbf/Cargo.lock | 24 ++++++++++++------------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b7a17b92111b2..8ad2c06026e682 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3149,9 +3149,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" dependencies = [ "wasm-bindgen", ] @@ -10007,9 +10007,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -10018,9 +10018,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" dependencies = [ "bumpalo", "log", @@ -10045,9 +10045,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10055,9 +10055,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" dependencies = [ "proc-macro2", "quote", @@ -10068,9 +10068,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" [[package]] name = "web-sys" diff --git a/Cargo.toml b/Cargo.toml index 46f3b719578fb1..bbcef11c5c94b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -285,7 +285,7 @@ itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ "unprefixed_malloc_on_supported_platforms", ] } -js-sys = "0.3.70" +js-sys = "0.3.71" json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index b774474ab01a4a..57bcdd73334554 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2462,9 +2462,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" dependencies = [ "wasm-bindgen", ] @@ -8381,9 +8381,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -8392,9 +8392,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" dependencies = [ "bumpalo", "log", @@ -8419,9 +8419,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8429,9 +8429,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" dependencies = [ "proc-macro2", "quote", @@ -8442,9 +8442,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" [[package]] name = "web-sys" From a4e43c8120c1bab87d2f5979eac2927b0674d794 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 18:47:21 +0800 Subject: [PATCH 472/529] build(deps): bump wasm-bindgen from 0.2.93 to 0.2.94 (#3130) * build(deps): bump wasm-bindgen from 0.2.93 to 0.2.94 Bumps [wasm-bindgen](https://github.com/rustwasm/wasm-bindgen) from 0.2.93 to 0.2.94. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/compare/0.2.93...0.2.94) --- updated-dependencies: - dependency-name: wasm-bindgen dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> From 964f6d34ce952baafc8666021b766546f6cbc19e Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Thu, 10 Oct 2024 17:00:31 +0400 Subject: [PATCH 473/529] extract slot-hashes crate (#3010) * extract slot-hashes crate * fix serde feature * fix sysvar doc link --- Cargo.lock | 11 ++++ Cargo.toml | 4 +- programs/sbf/Cargo.lock | 10 +++ sdk/program/Cargo.toml | 1 + sdk/program/src/slot_hashes.rs | 110 +------------------------------- sdk/slot-hashes/Cargo.toml | 24 +++++++ sdk/slot-hashes/src/lib.rs | 112 +++++++++++++++++++++++++++++++++ 7 files changed, 162 insertions(+), 110 deletions(-) create mode 100644 sdk/slot-hashes/Cargo.toml create mode 100644 sdk/slot-hashes/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 8ad2c06026e682..6f60e7438130f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7285,6 +7285,7 @@ dependencies = [ "solana-serialize-utils", "solana-sha256-hasher", "solana-short-vec", + "solana-slot-hashes", "static_assertions", "test-case", "thiserror", @@ -7971,6 +7972,16 @@ dependencies = [ "solana-sanitize", ] +[[package]] +name = "solana-slot-hashes" +version = "2.1.0" +dependencies = [ + "serde", + "serde_derive", + "solana-hash", + "solana-sha256-hasher", +] + [[package]] name = "solana-stake-accounts" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index bbcef11c5c94b0..cf5c1f5f2ceb9f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -128,6 +128,7 @@ members = [ "sdk/serialize-utils", "sdk/sha256-hasher", "sdk/signature", + "sdk/slot-hashes", "send-transaction-service", "short-vec", "stake-accounts", @@ -413,7 +414,7 @@ solana-genesis-utils = { path = "genesis-utils", version = "=2.1.0" } agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=2.1.0" } solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=2.1.0" } solana-gossip = { path = "gossip", version = "=2.1.0" } -solana-hash = { path = "sdk/hash", version = "=2.1.0" } +solana-hash = { path = "sdk/hash", version = "=2.1.0", default-features = false } solana-inline-spl = { path = "inline-spl", version = "=2.1.0" } solana-instruction = { path = "sdk/instruction", version = "=2.1.0", default-features = false } solana-lattice-hash = { path = "lattice-hash", version = "=2.1.0" } @@ -451,6 +452,7 @@ solana-serde-varint = { path = "sdk/serde-varint", version = "=2.1.0" } solana-serialize-utils = { path = "sdk/serialize-utils", version = "=2.1.0" } solana-sha256-hasher = { path = "sdk/sha256-hasher", version = "=2.1.0" } solana-signature = { path = "sdk/signature", version = "=2.1.0", default-features = false } +solana-slot-hashes = { path = "sdk/slot-hashes", version = "=2.1.0" } solana-timings = { path = "timings", version = "=2.1.0" } solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=2.1.0" } solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 57bcdd73334554..41a574125fd1d1 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5682,6 +5682,7 @@ dependencies = [ "solana-serialize-utils", "solana-sha256-hasher", "solana-short-vec", + "solana-slot-hashes", "thiserror", "wasm-bindgen", ] @@ -6721,6 +6722,15 @@ dependencies = [ "solana-sanitize", ] +[[package]] +name = "solana-slot-hashes" +version = "2.1.0" +dependencies = [ + "serde", + "serde_derive", + "solana-hash", +] + [[package]] name = "solana-stake-program" version = "2.1.0" diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 7acba606490664..21240fe1db3edc 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -62,6 +62,7 @@ solana-serde-varint = { workspace = true } solana-serialize-utils = { workspace = true } solana-sha256-hasher = { workspace = true, features = ["sha2"] } solana-short-vec = { workspace = true } +solana-slot-hashes = { workspace = true, features = ["serde"] } thiserror = { workspace = true } # This is currently needed to build on-chain programs reliably. diff --git a/sdk/program/src/slot_hashes.rs b/sdk/program/src/slot_hashes.rs index f18d14e89f9e9c..7113b2345641c9 100644 --- a/sdk/program/src/slot_hashes.rs +++ b/sdk/program/src/slot_hashes.rs @@ -1,109 +1 @@ -//! A type to hold data for the [`SlotHashes` sysvar][sv]. -//! -//! [sv]: https://docs.solanalabs.com/runtime/sysvars#slothashes -//! -//! The sysvar ID is declared in [`sysvar::slot_hashes`]. -//! -//! [`sysvar::slot_hashes`]: crate::sysvar::slot_hashes - -pub use solana_clock::Slot; -use { - crate::hash::Hash, - std::{ - iter::FromIterator, - ops::Deref, - sync::atomic::{AtomicUsize, Ordering}, - }, -}; - -pub const MAX_ENTRIES: usize = 512; // about 2.5 minutes to get your vote in - -// This is to allow tests with custom slot hash expiry to avoid having to generate -// 512 blocks for such tests. -static NUM_ENTRIES: AtomicUsize = AtomicUsize::new(MAX_ENTRIES); - -pub fn get_entries() -> usize { - NUM_ENTRIES.load(Ordering::Relaxed) -} - -pub fn set_entries_for_tests_only(entries: usize) { - NUM_ENTRIES.store(entries, Ordering::Relaxed); -} - -pub type SlotHash = (Slot, Hash); - -#[repr(C)] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default)] -pub struct SlotHashes(Vec); - -impl SlotHashes { - pub fn add(&mut self, slot: Slot, hash: Hash) { - match self.binary_search_by(|(probe, _)| slot.cmp(probe)) { - Ok(index) => (self.0)[index] = (slot, hash), - Err(index) => (self.0).insert(index, (slot, hash)), - } - (self.0).truncate(get_entries()); - } - pub fn position(&self, slot: &Slot) -> Option { - self.binary_search_by(|(probe, _)| slot.cmp(probe)).ok() - } - #[allow(clippy::trivially_copy_pass_by_ref)] - pub fn get(&self, slot: &Slot) -> Option<&Hash> { - self.binary_search_by(|(probe, _)| slot.cmp(probe)) - .ok() - .map(|index| &self[index].1) - } - pub fn new(slot_hashes: &[SlotHash]) -> Self { - let mut slot_hashes = slot_hashes.to_vec(); - slot_hashes.sort_by(|(a, _), (b, _)| b.cmp(a)); - Self(slot_hashes) - } - pub fn slot_hashes(&self) -> &[SlotHash] { - &self.0 - } -} - -impl FromIterator<(Slot, Hash)> for SlotHashes { - fn from_iter>(iter: I) -> Self { - Self(iter.into_iter().collect()) - } -} - -impl Deref for SlotHashes { - type Target = Vec; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -#[cfg(test)] -mod tests { - use {super::*, crate::hash::hash}; - - #[test] - fn test() { - let mut slot_hashes = SlotHashes::new(&[(1, Hash::default()), (3, Hash::default())]); - slot_hashes.add(2, Hash::default()); - assert_eq!( - slot_hashes, - SlotHashes(vec![ - (3, Hash::default()), - (2, Hash::default()), - (1, Hash::default()), - ]) - ); - - let mut slot_hashes = SlotHashes::new(&[]); - for i in 0..MAX_ENTRIES + 1 { - slot_hashes.add( - i as u64, - hash(&[(i >> 24) as u8, (i >> 16) as u8, (i >> 8) as u8, i as u8]), - ); - } - for i in 0..MAX_ENTRIES { - assert_eq!(slot_hashes[i].0, (MAX_ENTRIES - i) as u64); - } - - assert_eq!(slot_hashes.len(), MAX_ENTRIES); - } -} +pub use {solana_clock::Slot, solana_slot_hashes::*}; diff --git a/sdk/slot-hashes/Cargo.toml b/sdk/slot-hashes/Cargo.toml new file mode 100644 index 00000000000000..a5dff397507619 --- /dev/null +++ b/sdk/slot-hashes/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "solana-slot-hashes" +description = "Types and utilities for the Solana SlotHashes sysvar." +documentation = "https://docs.rs/solana-slot-hashes" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +serde = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-hash = { workspace = true, default-features = false } + +[dev-dependencies] +solana-sha256-hasher = { workspace = true } + +[features] +serde = ["dep:serde", "dep:serde_derive", "solana-hash/serde"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/slot-hashes/src/lib.rs b/sdk/slot-hashes/src/lib.rs new file mode 100644 index 00000000000000..66be2f4b3b0eb8 --- /dev/null +++ b/sdk/slot-hashes/src/lib.rs @@ -0,0 +1,112 @@ +//! A type to hold data for the [`SlotHashes` sysvar][sv]. +//! +//! [sv]: https://docs.solanalabs.com/runtime/sysvars#slothashes +//! +//! The sysvar ID is declared in [`solana_program::sysvar::slot_hashes`]. +//! +//! [`solana_program::sysvar::slot_hashes`]: https://docs.rs/solana-program/latest/solana_program/sysvar/slot_hashes/index.html + +use { + solana_hash::Hash, + std::{ + iter::FromIterator, + ops::Deref, + sync::atomic::{AtomicUsize, Ordering}, + }, +}; + +pub const MAX_ENTRIES: usize = 512; // about 2.5 minutes to get your vote in + +// This is to allow tests with custom slot hash expiry to avoid having to generate +// 512 blocks for such tests. +static NUM_ENTRIES: AtomicUsize = AtomicUsize::new(MAX_ENTRIES); + +pub fn get_entries() -> usize { + NUM_ENTRIES.load(Ordering::Relaxed) +} + +pub fn set_entries_for_tests_only(entries: usize) { + NUM_ENTRIES.store(entries, Ordering::Relaxed); +} + +pub type SlotHash = (u64, Hash); + +#[repr(C)] +#[cfg_attr( + feature = "serde", + derive(serde_derive::Deserialize, serde_derive::Serialize) +)] +#[derive(PartialEq, Eq, Debug, Default)] +pub struct SlotHashes(Vec); + +impl SlotHashes { + pub fn add(&mut self, slot: u64, hash: Hash) { + match self.binary_search_by(|(probe, _)| slot.cmp(probe)) { + Ok(index) => (self.0)[index] = (slot, hash), + Err(index) => (self.0).insert(index, (slot, hash)), + } + (self.0).truncate(get_entries()); + } + pub fn position(&self, slot: &u64) -> Option { + self.binary_search_by(|(probe, _)| slot.cmp(probe)).ok() + } + #[allow(clippy::trivially_copy_pass_by_ref)] + pub fn get(&self, slot: &u64) -> Option<&Hash> { + self.binary_search_by(|(probe, _)| slot.cmp(probe)) + .ok() + .map(|index| &self[index].1) + } + pub fn new(slot_hashes: &[SlotHash]) -> Self { + let mut slot_hashes = slot_hashes.to_vec(); + slot_hashes.sort_by(|(a, _), (b, _)| b.cmp(a)); + Self(slot_hashes) + } + pub fn slot_hashes(&self) -> &[SlotHash] { + &self.0 + } +} + +impl FromIterator<(u64, Hash)> for SlotHashes { + fn from_iter>(iter: I) -> Self { + Self(iter.into_iter().collect()) + } +} + +impl Deref for SlotHashes { + type Target = Vec; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(test)] +mod tests { + use {super::*, solana_sha256_hasher::hash}; + + #[test] + fn test() { + let mut slot_hashes = SlotHashes::new(&[(1, Hash::default()), (3, Hash::default())]); + slot_hashes.add(2, Hash::default()); + assert_eq!( + slot_hashes, + SlotHashes(vec![ + (3, Hash::default()), + (2, Hash::default()), + (1, Hash::default()), + ]) + ); + + let mut slot_hashes = SlotHashes::new(&[]); + for i in 0..MAX_ENTRIES + 1 { + slot_hashes.add( + i as u64, + hash(&[(i >> 24) as u8, (i >> 16) as u8, (i >> 8) as u8, i as u8]), + ); + } + for i in 0..MAX_ENTRIES { + assert_eq!(slot_hashes[i].0, (MAX_ENTRIES - i) as u64); + } + + assert_eq!(slot_hashes.len(), MAX_ENTRIES); + } +} From aefeb0ec285a7ea3d1ace27b3e96f21da7b047ef Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Thu, 10 Oct 2024 17:01:26 +0400 Subject: [PATCH 474/529] Extract epoch-schedule crate (#3021) * extract epoch-schedule crate * make epoch-schedule crate no-std * simplify features * update digest * update digest * add deprecation to re-exports --- Cargo.lock | 14 ++ Cargo.toml | 2 + programs/sbf/Cargo.lock | 10 ++ runtime/src/bank/serde_snapshot.rs | 2 +- sdk/epoch-schedule/Cargo.toml | 31 ++++ sdk/epoch-schedule/src/lib.rs | 279 +++++++++++++++++++++++++++++ sdk/program/Cargo.toml | 2 + sdk/program/src/epoch_schedule.rs | 271 +--------------------------- sdk/src/genesis_config.rs | 2 +- 9 files changed, 348 insertions(+), 265 deletions(-) create mode 100644 sdk/epoch-schedule/Cargo.toml create mode 100644 sdk/epoch-schedule/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 6f60e7438130f2..898684f25fcee0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6604,6 +6604,19 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-epoch-schedule" +version = "2.1.0" +dependencies = [ + "serde", + "serde_derive", + "solana-clock", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-sdk-macro", + "static_assertions", +] + [[package]] name = "solana-faucet" version = "2.1.0" @@ -7266,6 +7279,7 @@ dependencies = [ "solana-clock", "solana-decode-error", "solana-define-syscall", + "solana-epoch-schedule", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-hash", diff --git a/Cargo.toml b/Cargo.toml index cf5c1f5f2ceb9f..8012227a54340c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,6 +110,7 @@ members = [ "sdk/clock", "sdk/decode-error", "sdk/derivation-path", + "sdk/epoch-schedule", "sdk/feature-set", "sdk/gen-headers", "sdk/hash", @@ -403,6 +404,7 @@ solana-define-syscall = { path = "define-syscall", version = "=2.1.0" } solana-derivation-path = { path = "sdk/derivation-path", version = "=2.1.0" } solana-download-utils = { path = "download-utils", version = "=2.1.0" } solana-entry = { path = "entry", version = "=2.1.0" } +solana-epoch-schedule = { path = "sdk/epoch-schedule", version = "=2.1.0" } solana-faucet = { path = "faucet", version = "=2.1.0" } solana-feature-set = { path = "sdk/feature-set", version = "=2.1.0" } solana-fee = { path = "fee", version = "=2.1.0" } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 41a574125fd1d1..525dca8b0caf2c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5249,6 +5249,15 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-epoch-schedule" +version = "2.1.0" +dependencies = [ + "serde", + "serde_derive", + "solana-sdk-macro", +] + [[package]] name = "solana-faucet" version = "2.1.0" @@ -5666,6 +5675,7 @@ dependencies = [ "solana-clock", "solana-decode-error", "solana-define-syscall", + "solana-epoch-schedule", "solana-hash", "solana-instruction", "solana-msg", diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index eeacfb3f556015..a088979e7bf429 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -546,7 +546,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "7xkyjhBmj1xk3ykcbufPCnBKKkcpQ3AjKFUmH1r8MRnu") + frozen_abi(digest = "D7zx9HfzJa1cqhNariHYufgyLUVLR64iPoMFzRYqs8rZ") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/sdk/epoch-schedule/Cargo.toml b/sdk/epoch-schedule/Cargo.toml new file mode 100644 index 00000000000000..c89e764d9911b2 --- /dev/null +++ b/sdk/epoch-schedule/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "solana-epoch-schedule" +description = "Configuration for Solana epochs and slots." +documentation = "https://docs.rs/solana-epoch-schedule" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +serde = { workspace = true, optional = true } +serde_derive = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } +solana-sdk-macro = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dev-dependencies] +solana-clock = { workspace = true } +static_assertions = { workspace = true } + +[features] +frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] +serde = ["dep:serde", "dep:serde_derive"] + +[lints] +workspace = true diff --git a/sdk/epoch-schedule/src/lib.rs b/sdk/epoch-schedule/src/lib.rs new file mode 100644 index 00000000000000..e0d9d1c24e3b30 --- /dev/null +++ b/sdk/epoch-schedule/src/lib.rs @@ -0,0 +1,279 @@ +//! Configuration for epochs and slots. +//! +//! Epochs mark a period of time composed of _slots_, for which a particular +//! [leader schedule][ls] is in effect. The epoch schedule determines the length +//! of epochs, and the timing of the next leader-schedule selection. +//! +//! [ls]: https://docs.solanalabs.com/consensus/leader-rotation#leader-schedule-rotation +//! +//! The epoch schedule does not change during the life of a blockchain, +//! though the length of an epoch does — during the initial launch of +//! the chain there is a "warmup" period, where epochs are short, with subsequent +//! epochs increasing in slots until they last for [`DEFAULT_SLOTS_PER_EPOCH`]. +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] +#![no_std] +#[cfg(feature = "frozen-abi")] +extern crate std; + +#[cfg(feature = "serde")] +use serde_derive::{Deserialize, Serialize}; +use solana_sdk_macro::CloneZeroed; + +// inlined to avoid solana_clock dep +const DEFAULT_SLOTS_PER_EPOCH: u64 = 432_000; +#[cfg(test)] +static_assertions::const_assert_eq!( + DEFAULT_SLOTS_PER_EPOCH, + solana_clock::DEFAULT_SLOTS_PER_EPOCH +); +/// The default number of slots before an epoch starts to calculate the leader schedule. +pub const DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET: u64 = DEFAULT_SLOTS_PER_EPOCH; + +/// The maximum number of slots before an epoch starts to calculate the leader schedule. +/// +/// Default is an entire epoch, i.e. leader schedule for epoch X is calculated at +/// the beginning of epoch X - 1. +pub const MAX_LEADER_SCHEDULE_EPOCH_OFFSET: u64 = 3; + +/// The minimum number of slots per epoch during the warmup period. +/// +/// Based on `MAX_LOCKOUT_HISTORY` from `vote_program`. +pub const MINIMUM_SLOTS_PER_EPOCH: u64 = 32; + +#[repr(C)] +#[cfg_attr(feature = "frozen-abi", derive(solana_frozen_abi_macro::AbiExample))] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "camelCase") +)] +#[derive(Debug, CloneZeroed, PartialEq, Eq)] +pub struct EpochSchedule { + /// The maximum number of slots in each epoch. + pub slots_per_epoch: u64, + + /// A number of slots before beginning of an epoch to calculate + /// a leader schedule for that epoch. + pub leader_schedule_slot_offset: u64, + + /// Whether epochs start short and grow. + pub warmup: bool, + + /// The first epoch after the warmup period. + /// + /// Basically: `log2(slots_per_epoch) - log2(MINIMUM_SLOTS_PER_EPOCH)`. + pub first_normal_epoch: u64, + + /// The first slot after the warmup period. + /// + /// Basically: `MINIMUM_SLOTS_PER_EPOCH * (2.pow(first_normal_epoch) - 1)`. + pub first_normal_slot: u64, +} + +impl Default for EpochSchedule { + fn default() -> Self { + Self::custom( + DEFAULT_SLOTS_PER_EPOCH, + DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, + true, + ) + } +} + +impl EpochSchedule { + pub fn new(slots_per_epoch: u64) -> Self { + Self::custom(slots_per_epoch, slots_per_epoch, true) + } + pub fn without_warmup() -> Self { + Self::custom( + DEFAULT_SLOTS_PER_EPOCH, + DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, + false, + ) + } + pub fn custom(slots_per_epoch: u64, leader_schedule_slot_offset: u64, warmup: bool) -> Self { + assert!(slots_per_epoch >= MINIMUM_SLOTS_PER_EPOCH); + let (first_normal_epoch, first_normal_slot) = if warmup { + let next_power_of_two = slots_per_epoch.next_power_of_two(); + let log2_slots_per_epoch = next_power_of_two + .trailing_zeros() + .saturating_sub(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()); + + ( + u64::from(log2_slots_per_epoch), + next_power_of_two.saturating_sub(MINIMUM_SLOTS_PER_EPOCH), + ) + } else { + (0, 0) + }; + EpochSchedule { + slots_per_epoch, + leader_schedule_slot_offset, + warmup, + first_normal_epoch, + first_normal_slot, + } + } + + /// get the length of the given epoch (in slots) + pub fn get_slots_in_epoch(&self, epoch: u64) -> u64 { + if epoch < self.first_normal_epoch { + 2u64.saturating_pow( + (epoch as u32).saturating_add(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()), + ) + } else { + self.slots_per_epoch + } + } + + /// get the epoch for which the given slot should save off + /// information about stakers + pub fn get_leader_schedule_epoch(&self, slot: u64) -> u64 { + if slot < self.first_normal_slot { + // until we get to normal slots, behave as if leader_schedule_slot_offset == slots_per_epoch + self.get_epoch_and_slot_index(slot).0.saturating_add(1) + } else { + let new_slots_since_first_normal_slot = slot.saturating_sub(self.first_normal_slot); + let new_first_normal_leader_schedule_slot = + new_slots_since_first_normal_slot.saturating_add(self.leader_schedule_slot_offset); + let new_epochs_since_first_normal_leader_schedule = + new_first_normal_leader_schedule_slot + .checked_div(self.slots_per_epoch) + .unwrap_or(0); + self.first_normal_epoch + .saturating_add(new_epochs_since_first_normal_leader_schedule) + } + } + + /// get epoch for the given slot + pub fn get_epoch(&self, slot: u64) -> u64 { + self.get_epoch_and_slot_index(slot).0 + } + + /// get epoch and offset into the epoch for the given slot + pub fn get_epoch_and_slot_index(&self, slot: u64) -> (u64, u64) { + if slot < self.first_normal_slot { + let epoch = slot + .saturating_add(MINIMUM_SLOTS_PER_EPOCH) + .saturating_add(1) + .next_power_of_two() + .trailing_zeros() + .saturating_sub(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()) + .saturating_sub(1); + + let epoch_len = + 2u64.saturating_pow(epoch.saturating_add(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros())); + + ( + u64::from(epoch), + slot.saturating_sub(epoch_len.saturating_sub(MINIMUM_SLOTS_PER_EPOCH)), + ) + } else { + let normal_slot_index = slot.saturating_sub(self.first_normal_slot); + let normal_epoch_index = normal_slot_index + .checked_div(self.slots_per_epoch) + .unwrap_or(0); + let epoch = self.first_normal_epoch.saturating_add(normal_epoch_index); + let slot_index = normal_slot_index + .checked_rem(self.slots_per_epoch) + .unwrap_or(0); + (epoch, slot_index) + } + } + + pub fn get_first_slot_in_epoch(&self, epoch: u64) -> u64 { + if epoch <= self.first_normal_epoch { + 2u64.saturating_pow(epoch as u32) + .saturating_sub(1) + .saturating_mul(MINIMUM_SLOTS_PER_EPOCH) + } else { + epoch + .saturating_sub(self.first_normal_epoch) + .saturating_mul(self.slots_per_epoch) + .saturating_add(self.first_normal_slot) + } + } + + pub fn get_last_slot_in_epoch(&self, epoch: u64) -> u64 { + self.get_first_slot_in_epoch(epoch) + .saturating_add(self.get_slots_in_epoch(epoch)) + .saturating_sub(1) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_epoch_schedule() { + // one week of slots at 8 ticks/slot, 10 ticks/sec is + // (1 * 7 * 24 * 4500u64).next_power_of_two(); + + // test values between MINIMUM_SLOT_LEN and MINIMUM_SLOT_LEN * 16, should cover a good mix + for slots_per_epoch in MINIMUM_SLOTS_PER_EPOCH..=MINIMUM_SLOTS_PER_EPOCH * 16 { + let epoch_schedule = EpochSchedule::custom(slots_per_epoch, slots_per_epoch / 2, true); + + assert_eq!(epoch_schedule.get_first_slot_in_epoch(0), 0); + assert_eq!( + epoch_schedule.get_last_slot_in_epoch(0), + MINIMUM_SLOTS_PER_EPOCH - 1 + ); + + let mut last_leader_schedule = 0; + let mut last_epoch = 0; + let mut last_slots_in_epoch = MINIMUM_SLOTS_PER_EPOCH; + for slot in 0..(2 * slots_per_epoch) { + // verify that leader_schedule_epoch is continuous over the warmup + // and into the first normal epoch + + let leader_schedule = epoch_schedule.get_leader_schedule_epoch(slot); + if leader_schedule != last_leader_schedule { + assert_eq!(leader_schedule, last_leader_schedule + 1); + last_leader_schedule = leader_schedule; + } + + let (epoch, offset) = epoch_schedule.get_epoch_and_slot_index(slot); + + // verify that epoch increases continuously + if epoch != last_epoch { + assert_eq!(epoch, last_epoch + 1); + last_epoch = epoch; + assert_eq!(epoch_schedule.get_first_slot_in_epoch(epoch), slot); + assert_eq!(epoch_schedule.get_last_slot_in_epoch(epoch - 1), slot - 1); + + // verify that slots in an epoch double continuously + // until they reach slots_per_epoch + + let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch); + if slots_in_epoch != last_slots_in_epoch && slots_in_epoch != slots_per_epoch { + assert_eq!(slots_in_epoch, last_slots_in_epoch * 2); + } + last_slots_in_epoch = slots_in_epoch; + } + // verify that the slot offset is less than slots_in_epoch + assert!(offset < last_slots_in_epoch); + } + + // assert that these changed ;) + assert!(last_leader_schedule != 0); // t + assert!(last_epoch != 0); + // assert that we got to "normal" mode + assert!(last_slots_in_epoch == slots_per_epoch); + } + } + + #[test] + fn test_clone() { + let epoch_schedule = EpochSchedule { + slots_per_epoch: 1, + leader_schedule_slot_offset: 2, + warmup: true, + first_normal_epoch: 4, + first_normal_slot: 5, + }; + #[allow(clippy::clone_on_copy)] + let cloned_epoch_schedule = epoch_schedule.clone(); + assert_eq!(cloned_epoch_schedule, epoch_schedule); + } +} diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 21240fe1db3edc..9785417637f235 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -36,6 +36,7 @@ solana-atomic-u64 = { workspace = true } solana-bincode = { workspace = true } solana-clock = { workspace = true, features = ["serde"] } solana-decode-error = { workspace = true } +solana-epoch-schedule = { workspace = true, features = ["serde"] } solana-frozen-abi = { workspace = true, optional = true, features = ["frozen-abi"] } solana-frozen-abi-macro = { workspace = true, optional = true, features = ["frozen-abi"] } solana-hash = { workspace = true, features = [ @@ -128,6 +129,7 @@ dev-context-only-utils = ["dep:qualifier_attr"] frozen-abi = [ "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", + "solana-epoch-schedule/frozen-abi", "solana-hash/frozen-abi", "solana-instruction/frozen-abi", "solana-pubkey/frozen-abi", diff --git a/sdk/program/src/epoch_schedule.rs b/sdk/program/src/epoch_schedule.rs index d36f34aacf64ab..56addac3218069 100644 --- a/sdk/program/src/epoch_schedule.rs +++ b/sdk/program/src/epoch_schedule.rs @@ -1,263 +1,8 @@ -//! Configuration for epochs and slots. -//! -//! Epochs mark a period of time composed of _slots_, for which a particular -//! [leader schedule][ls] is in effect. The epoch schedule determines the length -//! of epochs, and the timing of the next leader-schedule selection. -//! -//! [ls]: https://docs.solanalabs.com/consensus/leader-rotation#leader-schedule-rotation -//! -//! The epoch schedule does not change during the life of a blockchain, -//! though the length of an epoch does — during the initial launch of -//! the chain there is a "warmup" period, where epochs are short, with subsequent -//! epochs increasing in slots until they last for [`DEFAULT_SLOTS_PER_EPOCH`]. - -pub use solana_clock::{Epoch, Slot, DEFAULT_SLOTS_PER_EPOCH}; -use solana_sdk_macro::CloneZeroed; - -/// The default number of slots before an epoch starts to calculate the leader schedule. -pub const DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET: u64 = DEFAULT_SLOTS_PER_EPOCH; - -/// The maximum number of slots before an epoch starts to calculate the leader schedule. -/// -/// Default is an entire epoch, i.e. leader schedule for epoch X is calculated at -/// the beginning of epoch X - 1. -pub const MAX_LEADER_SCHEDULE_EPOCH_OFFSET: u64 = 3; - -/// The minimum number of slots per epoch during the warmup period. -/// -/// Based on `MAX_LOCKOUT_HISTORY` from `vote_program`. -pub const MINIMUM_SLOTS_PER_EPOCH: u64 = 32; - -#[repr(C)] -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Debug, CloneZeroed, PartialEq, Eq, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct EpochSchedule { - /// The maximum number of slots in each epoch. - pub slots_per_epoch: u64, - - /// A number of slots before beginning of an epoch to calculate - /// a leader schedule for that epoch. - pub leader_schedule_slot_offset: u64, - - /// Whether epochs start short and grow. - pub warmup: bool, - - /// The first epoch after the warmup period. - /// - /// Basically: `log2(slots_per_epoch) - log2(MINIMUM_SLOTS_PER_EPOCH)`. - pub first_normal_epoch: Epoch, - - /// The first slot after the warmup period. - /// - /// Basically: `MINIMUM_SLOTS_PER_EPOCH * (2.pow(first_normal_epoch) - 1)`. - pub first_normal_slot: Slot, -} - -impl Default for EpochSchedule { - fn default() -> Self { - Self::custom( - DEFAULT_SLOTS_PER_EPOCH, - DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, - true, - ) - } -} - -impl EpochSchedule { - pub fn new(slots_per_epoch: u64) -> Self { - Self::custom(slots_per_epoch, slots_per_epoch, true) - } - pub fn without_warmup() -> Self { - Self::custom( - DEFAULT_SLOTS_PER_EPOCH, - DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, - false, - ) - } - pub fn custom(slots_per_epoch: u64, leader_schedule_slot_offset: u64, warmup: bool) -> Self { - assert!(slots_per_epoch >= MINIMUM_SLOTS_PER_EPOCH); - let (first_normal_epoch, first_normal_slot) = if warmup { - let next_power_of_two = slots_per_epoch.next_power_of_two(); - let log2_slots_per_epoch = next_power_of_two - .trailing_zeros() - .saturating_sub(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()); - - ( - u64::from(log2_slots_per_epoch), - next_power_of_two.saturating_sub(MINIMUM_SLOTS_PER_EPOCH), - ) - } else { - (0, 0) - }; - EpochSchedule { - slots_per_epoch, - leader_schedule_slot_offset, - warmup, - first_normal_epoch, - first_normal_slot, - } - } - - /// get the length of the given epoch (in slots) - pub fn get_slots_in_epoch(&self, epoch: Epoch) -> u64 { - if epoch < self.first_normal_epoch { - 2u64.saturating_pow( - (epoch as u32).saturating_add(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()), - ) - } else { - self.slots_per_epoch - } - } - - /// get the epoch for which the given slot should save off - /// information about stakers - pub fn get_leader_schedule_epoch(&self, slot: Slot) -> Epoch { - if slot < self.first_normal_slot { - // until we get to normal slots, behave as if leader_schedule_slot_offset == slots_per_epoch - self.get_epoch_and_slot_index(slot).0.saturating_add(1) - } else { - let new_slots_since_first_normal_slot = slot.saturating_sub(self.first_normal_slot); - let new_first_normal_leader_schedule_slot = - new_slots_since_first_normal_slot.saturating_add(self.leader_schedule_slot_offset); - let new_epochs_since_first_normal_leader_schedule = - new_first_normal_leader_schedule_slot - .checked_div(self.slots_per_epoch) - .unwrap_or(0); - self.first_normal_epoch - .saturating_add(new_epochs_since_first_normal_leader_schedule) - } - } - - /// get epoch for the given slot - pub fn get_epoch(&self, slot: Slot) -> Epoch { - self.get_epoch_and_slot_index(slot).0 - } - - /// get epoch and offset into the epoch for the given slot - pub fn get_epoch_and_slot_index(&self, slot: Slot) -> (Epoch, u64) { - if slot < self.first_normal_slot { - let epoch = slot - .saturating_add(MINIMUM_SLOTS_PER_EPOCH) - .saturating_add(1) - .next_power_of_two() - .trailing_zeros() - .saturating_sub(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()) - .saturating_sub(1); - - let epoch_len = - 2u64.saturating_pow(epoch.saturating_add(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros())); - - ( - u64::from(epoch), - slot.saturating_sub(epoch_len.saturating_sub(MINIMUM_SLOTS_PER_EPOCH)), - ) - } else { - let normal_slot_index = slot.saturating_sub(self.first_normal_slot); - let normal_epoch_index = normal_slot_index - .checked_div(self.slots_per_epoch) - .unwrap_or(0); - let epoch = self.first_normal_epoch.saturating_add(normal_epoch_index); - let slot_index = normal_slot_index - .checked_rem(self.slots_per_epoch) - .unwrap_or(0); - (epoch, slot_index) - } - } - - pub fn get_first_slot_in_epoch(&self, epoch: Epoch) -> Slot { - if epoch <= self.first_normal_epoch { - 2u64.saturating_pow(epoch as u32) - .saturating_sub(1) - .saturating_mul(MINIMUM_SLOTS_PER_EPOCH) - } else { - epoch - .saturating_sub(self.first_normal_epoch) - .saturating_mul(self.slots_per_epoch) - .saturating_add(self.first_normal_slot) - } - } - - pub fn get_last_slot_in_epoch(&self, epoch: Epoch) -> Slot { - self.get_first_slot_in_epoch(epoch) - .saturating_add(self.get_slots_in_epoch(epoch)) - .saturating_sub(1) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_epoch_schedule() { - // one week of slots at 8 ticks/slot, 10 ticks/sec is - // (1 * 7 * 24 * 4500u64).next_power_of_two(); - - // test values between MINIMUM_SLOT_LEN and MINIMUM_SLOT_LEN * 16, should cover a good mix - for slots_per_epoch in MINIMUM_SLOTS_PER_EPOCH..=MINIMUM_SLOTS_PER_EPOCH * 16 { - let epoch_schedule = EpochSchedule::custom(slots_per_epoch, slots_per_epoch / 2, true); - - assert_eq!(epoch_schedule.get_first_slot_in_epoch(0), 0); - assert_eq!( - epoch_schedule.get_last_slot_in_epoch(0), - MINIMUM_SLOTS_PER_EPOCH - 1 - ); - - let mut last_leader_schedule = 0; - let mut last_epoch = 0; - let mut last_slots_in_epoch = MINIMUM_SLOTS_PER_EPOCH; - for slot in 0..(2 * slots_per_epoch) { - // verify that leader_schedule_epoch is continuous over the warmup - // and into the first normal epoch - - let leader_schedule = epoch_schedule.get_leader_schedule_epoch(slot); - if leader_schedule != last_leader_schedule { - assert_eq!(leader_schedule, last_leader_schedule + 1); - last_leader_schedule = leader_schedule; - } - - let (epoch, offset) = epoch_schedule.get_epoch_and_slot_index(slot); - - // verify that epoch increases continuously - if epoch != last_epoch { - assert_eq!(epoch, last_epoch + 1); - last_epoch = epoch; - assert_eq!(epoch_schedule.get_first_slot_in_epoch(epoch), slot); - assert_eq!(epoch_schedule.get_last_slot_in_epoch(epoch - 1), slot - 1); - - // verify that slots in an epoch double continuously - // until they reach slots_per_epoch - - let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch); - if slots_in_epoch != last_slots_in_epoch && slots_in_epoch != slots_per_epoch { - assert_eq!(slots_in_epoch, last_slots_in_epoch * 2); - } - last_slots_in_epoch = slots_in_epoch; - } - // verify that the slot offset is less than slots_in_epoch - assert!(offset < last_slots_in_epoch); - } - - // assert that these changed ;) - assert!(last_leader_schedule != 0); // t - assert!(last_epoch != 0); - // assert that we got to "normal" mode - assert!(last_slots_in_epoch == slots_per_epoch); - } - } - - #[test] - fn test_clone() { - let epoch_schedule = EpochSchedule { - slots_per_epoch: 1, - leader_schedule_slot_offset: 2, - warmup: true, - first_normal_epoch: 4, - first_normal_slot: 5, - }; - #[allow(clippy::clone_on_copy)] - let cloned_epoch_schedule = epoch_schedule.clone(); - assert_eq!(cloned_epoch_schedule, epoch_schedule); - } -} +#[deprecated( + since = "2.1.0", + note = "Use solana-clock and solana-epoch-schedule crates instead." +)] +pub use { + solana_clock::{Epoch, Slot, DEFAULT_SLOTS_PER_EPOCH}, + solana_epoch_schedule::*, +}; diff --git a/sdk/src/genesis_config.rs b/sdk/src/genesis_config.rs index 24208a76363dbf..6b48f8fd1e7645 100644 --- a/sdk/src/genesis_config.rs +++ b/sdk/src/genesis_config.rs @@ -87,7 +87,7 @@ impl FromStr for ClusterType { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "2eGYc5mpKqDsS8sZfNS4mVq4qPptXYa9hSid2Hpv4DkQ") + frozen_abi(digest = "7kinRF6sWtJWxz9Wt8Zu4CB4SxaiFsNW2y9wnZH1FkNM") )] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct GenesisConfig { From 2ed0e9fbd36cf559bda0edbfa49155fabbb65d97 Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 10 Oct 2024 10:22:59 -0400 Subject: [PATCH 475/529] Skips rehash if accounts lt hash is enabled (#3123) --- runtime/src/snapshot_bank_utils.rs | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 330669fcfa7e6d..cc65a12f9c63c9 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -913,7 +913,19 @@ fn bank_to_full_snapshot_archive_with( .accounts_db .set_latest_full_snapshot_slot(bank.slot()); bank.squash(); // Bank may not be a root - bank.rehash(); // Bank accounts may have been manually modified by the caller + + // Rehashing is not currently supported when the accounts lt hash is enabled. + // This is because rehashing will *re-mix-in* all the accounts stored in this bank into the + // accounts lt hash! This is incorrect, as the accounts lt hash would change, even if the bank + // was *not* manually modified by the caller. + // We can re-allow rehasing if we change the Bank to hold its parent's accounts lt hash plus a + // *delta* accounts lt hash, and then Bank::hash_internal_state() will only recalculate the + // delta accounts lt hash. + // Another option is to consider if manual modification should even be allowed in the first + // place. Disallowing it would solve these issues. + if !bank.is_accounts_lt_hash_enabled() { + bank.rehash(); // Bank accounts may have been manually modified by the caller + } bank.force_flush_accounts_cache(); bank.clean_accounts(); let calculated_accounts_hash = @@ -976,7 +988,11 @@ pub fn bank_to_incremental_snapshot_archive( .accounts_db .set_latest_full_snapshot_slot(full_snapshot_slot); bank.squash(); // Bank may not be a root - bank.rehash(); // Bank accounts may have been manually modified by the caller + + // See the comment in bank_to_full_snapshot_archive() when calling rehash() + if !bank.is_accounts_lt_hash_enabled() { + bank.rehash(); // Bank accounts may have been manually modified by the caller + } bank.force_flush_accounts_cache(); bank.clean_accounts(); let calculated_incremental_accounts_hash = From 653587bf47bec771771ec0e5213c3285a170f7a7 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Thu, 10 Oct 2024 10:36:45 -0400 Subject: [PATCH 476/529] use root bank cache instead of bank forks in gossip vote listener (#3124) --- core/src/cluster_info_vote_listener.rs | 36 ++++++++++++++++---------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index d8a8670f585e56..06d501d4865ad0 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -23,7 +23,8 @@ use { }, solana_runtime::{ bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE, - epoch_stakes::EpochStakes, vote_sender_types::ReplayVoteReceiver, + epoch_stakes::EpochStakes, root_bank_cache::RootBankCache, + vote_sender_types::ReplayVoteReceiver, }, solana_sdk::{ clock::{Slot, DEFAULT_MS_PER_SLOT}, @@ -200,14 +201,14 @@ impl ClusterInfoVoteListener { let (verified_vote_transactions_sender, verified_vote_transactions_receiver) = unbounded(); let listen_thread = { let exit = exit.clone(); - let bank_forks = bank_forks.clone(); + let mut root_bank_cache = RootBankCache::new(bank_forks.clone()); Builder::new() .name("solCiVoteLstnr".to_string()) .spawn(move || { let _ = Self::recv_loop( exit, &cluster_info, - &bank_forks, + &mut root_bank_cache, verified_packets_sender, verified_vote_transactions_sender, ); @@ -218,11 +219,12 @@ impl ClusterInfoVoteListener { let process_thread = Builder::new() .name("solCiProcVotes".to_string()) .spawn(move || { + let mut root_bank_cache = RootBankCache::new(bank_forks.clone()); let _ = Self::process_votes_loop( exit, verified_vote_transactions_receiver, vote_tracker, - bank_forks, + &mut root_bank_cache, subscriptions, gossip_verified_vote_hash_sender, verified_vote_sender, @@ -246,7 +248,7 @@ impl ClusterInfoVoteListener { fn recv_loop( exit: Arc, cluster_info: &ClusterInfo, - bank_forks: &RwLock, + root_bank_cache: &mut RootBankCache, verified_packets_sender: BankingPacketSender, verified_vote_transactions_sender: VerifiedVoteTransactionsSender, ) -> Result<()> { @@ -255,7 +257,7 @@ impl ClusterInfoVoteListener { let votes = cluster_info.get_votes(&mut cursor); inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len()); if !votes.is_empty() { - let (vote_txs, packets) = Self::verify_votes(votes, bank_forks); + let (vote_txs, packets) = Self::verify_votes(votes, root_bank_cache); verified_vote_transactions_sender.send(vote_txs)?; verified_packets_sender.send(BankingPacketBatch::new((packets, None)))?; } @@ -267,7 +269,7 @@ impl ClusterInfoVoteListener { #[allow(clippy::type_complexity)] fn verify_votes( votes: Vec, - bank_forks: &RwLock, + root_bank_cache: &mut RootBankCache, ) -> (Vec, Vec) { let mut packet_batches = packet::to_packet_batches(&votes, 1); @@ -277,7 +279,7 @@ impl ClusterInfoVoteListener { /*reject_non_vote=*/ false, votes.len(), ); - let root_bank = bank_forks.read().unwrap().root_bank(); + let root_bank = root_bank_cache.root_bank(); let epoch_schedule = root_bank.epoch_schedule(); votes .into_iter() @@ -309,7 +311,7 @@ impl ClusterInfoVoteListener { exit: Arc, gossip_vote_txs_receiver: VerifiedVoteTransactionsReceiver, vote_tracker: Arc, - bank_forks: Arc>, + root_bank_cache: &mut RootBankCache, subscriptions: Arc, gossip_verified_vote_hash_sender: GossipVerifiedVoteHashSender, verified_vote_sender: VerifiedVoteSender, @@ -319,7 +321,7 @@ impl ClusterInfoVoteListener { duplicate_confirmed_slot_sender: DuplicateConfirmedSlotsSender, ) -> Result<()> { let mut confirmation_verifier = - OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root()); + OptimisticConfirmationVerifier::new(root_bank_cache.root_bank().slot()); let mut latest_vote_slot_per_validator = HashMap::new(); let mut last_process_root = Instant::now(); let duplicate_confirmed_slot_sender = Some(duplicate_confirmed_slot_sender); @@ -329,7 +331,7 @@ impl ClusterInfoVoteListener { return Ok(()); } - let root_bank = bank_forks.read().unwrap().root_bank(); + let root_bank = root_bank_cache.root_bank(); if last_process_root.elapsed().as_millis() > DEFAULT_MS_PER_SLOT as u128 { let unrooted_optimistic_slots = confirmation_verifier .verify_for_unrooted_optimistic_slots(&root_bank, &blockstore); @@ -1441,8 +1443,10 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank); + let mut root_bank_cache = RootBankCache::new(bank_forks); let votes = vec![]; - let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks); + let (vote_txs, packets) = + ClusterInfoVoteListener::verify_votes(votes, &mut root_bank_cache); assert!(vote_txs.is_empty()); assert!(packets.is_empty()); } @@ -1482,9 +1486,11 @@ mod tests { ); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank); + let mut root_bank_cache = RootBankCache::new(bank_forks); let vote_tx = test_vote_tx(voting_keypairs.first(), hash); let votes = vec![vote_tx]; - let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks); + let (vote_txs, packets) = + ClusterInfoVoteListener::verify_votes(votes, &mut root_bank_cache); assert_eq!(vote_txs.len(), 1); verify_packets_len(&packets, 1); } @@ -1507,11 +1513,13 @@ mod tests { ); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank); + let mut root_bank_cache = RootBankCache::new(bank_forks); let vote_tx = test_vote_tx(voting_keypairs.first(), hash); let mut bad_vote = vote_tx.clone(); bad_vote.signatures[0] = Signature::default(); let votes = vec![vote_tx.clone(), bad_vote, vote_tx]; - let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks); + let (vote_txs, packets) = + ClusterInfoVoteListener::verify_votes(votes, &mut root_bank_cache); assert_eq!(vote_txs.len(), 2); verify_packets_len(&packets, 2); } From fda4def00535eebf2e14cceff7e43be4ac9d7785 Mon Sep 17 00:00:00 2001 From: hana <81144685+2501babe@users.noreply.github.com> Date: Thu, 10 Oct 2024 07:49:54 -0700 Subject: [PATCH 477/529] svm: only inspect loaded accounts (#3131) --- svm/src/account_loader.rs | 27 +++++++-------------------- svm/src/transaction_processor.rs | 24 +++++++++++++----------- svm/tests/integration_test.rs | 20 ++++++-------------- 3 files changed, 26 insertions(+), 45 deletions(-) diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index c139ea15130319..be4014fa69c4a4 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -415,7 +415,6 @@ fn load_transaction_account( loaded_programs: &ProgramCacheForTxBatch, ) -> Result<(LoadedTransactionAccount, bool)> { let mut account_found = true; - let mut was_inspected = false; let is_instruction_account = u8::try_from(account_index) .map(|i| instruction_accounts.contains(&&i)) .unwrap_or(false); @@ -454,17 +453,11 @@ fn load_transaction_account( callbacks .get_account_shared_data(account_key) .map(|mut account| { - let rent_collected = if is_writable { - // Inspect the account prior to collecting rent, since - // rent collection can modify the account. - debug_assert!(!was_inspected); - callbacks.inspect_account( - account_key, - AccountState::Alive(&account), - is_writable, - ); - was_inspected = true; + // Inspect the account prior to collecting rent, since + // rent collection can modify the account. + callbacks.inspect_account(account_key, AccountState::Alive(&account), is_writable); + let rent_collected = if is_writable { collect_rent_from_account( feature_set, rent_collector, @@ -483,8 +476,11 @@ fn load_transaction_account( } }) .unwrap_or_else(|| { + callbacks.inspect_account(account_key, AccountState::Dead, is_writable); + account_found = false; let mut default_account = AccountSharedData::default(); + // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account // with this field already set would allow us to skip rent collection for these accounts. @@ -497,15 +493,6 @@ fn load_transaction_account( }) }; - if !was_inspected { - let account_state = if account_found { - AccountState::Alive(&loaded_account.account) - } else { - AccountState::Dead - }; - callbacks.inspect_account(account_key, account_state, is_writable); - } - Ok((loaded_account, account_found)) } diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index f0c9f681a74955..92706997471c52 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -426,22 +426,24 @@ impl TransactionBatchProcessor { })?; let fee_payer_address = message.fee_payer(); + let mut fee_payer_account = if let Some(fee_payer_account) = + account_overrides.and_then(|overrides| overrides.get(fee_payer_address).cloned()) + { + fee_payer_account + } else if let Some(fee_payer_account) = callbacks.get_account_shared_data(fee_payer_address) + { + callbacks.inspect_account( + fee_payer_address, + AccountState::Alive(&fee_payer_account), + true, // <-- is_writable + ); - let fee_payer_account = account_overrides - .and_then(|overrides| overrides.get(fee_payer_address).cloned()) - .or_else(|| callbacks.get_account_shared_data(fee_payer_address)); - - let Some(mut fee_payer_account) = fee_payer_account else { + fee_payer_account + } else { error_counters.account_not_found += 1; return Err(TransactionError::AccountNotFound); }; - callbacks.inspect_account( - fee_payer_address, - AccountState::Alive(&fee_payer_account), - true, // <-- is_writable - ); - let fee_payer_loaded_rent_epoch = fee_payer_account.rent_epoch(); let fee_payer_rent_debit = collect_rent_from_account( feature_set, diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 1051f5eb651e7e..6b1325a643d2f0 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -1192,26 +1192,18 @@ fn svm_inspect_account() { ); } - // The transfer program account is also loaded during transaction processing, however the - // account state passed to `inspect_account()` is *not* the same as what is held by - // MockBankCallback::account_shared_data. So we check the transfer program differently. - // - // First ensure we have the correct number of inspected accounts, correctly counting the - // transfer program. + // The transfer program account is retreived from the program cache, which does not + // inspect accounts, because they are necessarily read-only. Verify it has not made + // its way into the inspected accounts list. let num_expected_inspected_accounts: usize = expected_inspected_accounts.values().map(Vec::len).sum(); let num_actual_inspected_accounts: usize = actual_inspected_accounts.values().map(Vec::len).sum(); + assert_eq!( - num_expected_inspected_accounts + 2, + num_expected_inspected_accounts, num_actual_inspected_accounts, ); - // And second, ensure the inspected transfer program accounts are alive and not writable. - let actual_transfer_program_accounts = - actual_inspected_accounts.get(&transfer_program).unwrap(); - for actual_transfer_program_account in actual_transfer_program_accounts { - assert!(actual_transfer_program_account.0.is_some()); - assert!(!actual_transfer_program_account.1); - } + assert!(!actual_inspected_accounts.contains_key(&transfer_program)); } From ea87ffa43c5fb8638cda070623e9182656aa8429 Mon Sep 17 00:00:00 2001 From: Andrew Fitzgerald Date: Thu, 10 Oct 2024 10:01:27 -0500 Subject: [PATCH 478/529] Deprecate Legacy BlockProductionMethod (#3113) --- CHANGELOG.md | 1 + core/src/validator.rs | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a1851527965fc..8b70c0927dbe14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ The order of precedence for the chosen tools version goes: `--tools-version` arg * `solana-test-validator`: Add `--clone-feature-set` flag to mimic features from a target cluster (#2480) * `solana-genesis`: the `--cluster-type` parameter now clones the feature set from the target cluster (#2587) * `unified-scheduler` as default option for `--block-verification-method` (#2653) + * warn that `thread-local-multi-iterator` option for `--block-production-method` is deprecated (#3113) ## [2.0.0] * Breaking diff --git a/core/src/validator.rs b/core/src/validator.rs index ec1031c656715e..280fd1c80bb7f3 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -846,6 +846,16 @@ impl Validator { "Using: block-verification-method: {}, block-production-method: {}", config.block_verification_method, config.block_production_method ); + if matches!( + config.block_production_method, + BlockProductionMethod::ThreadLocalMultiIterator + ) { + warn!( + "--block-production-method thread-local-multi-iterator is deprecated \ + and will be removed in a future release. Please use \ + --block-production-method=central-scheduler instead." + ); + } let (replay_vote_sender, replay_vote_receiver) = unbounded(); From 6ab552db59ec78170e1e9ce9ea7904e07baa070e Mon Sep 17 00:00:00 2001 From: Brooks Date: Thu, 10 Oct 2024 17:11:56 -0400 Subject: [PATCH 479/529] Adds more lattice hash tests (#3137) --- lattice-hash/src/lt_hash.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lattice-hash/src/lt_hash.rs b/lattice-hash/src/lt_hash.rs index d334c54882de41..100c9a10a6d53a 100644 --- a/lattice-hash/src/lt_hash.rs +++ b/lattice-hash/src/lt_hash.rs @@ -130,6 +130,7 @@ mod tests { let a = LtHash::new_random(); let b = LtHash::new_random(); assert_eq!(a, a + b - b); + assert_eq!(a, a - b + b); } // Ensure that mixing is commutative @@ -149,6 +150,16 @@ mod tests { assert_eq!((a + b) + c, a + (b + c)); } + // Ensure that mixing out respects distribution + #[test] + fn test_distribute() { + let a = LtHash::new_random(); + let b = LtHash::new_random(); + let c = LtHash::new_random(); + let d = LtHash::new_random(); + assert_eq!(a - b - c - d, a - (b + c + d)); + } + // Ensure the correct lattice hash and checksum values are produced #[test] fn test_hello_world() { From 7cf3d1d72b30ac312ca8540cf234ab7346b989da Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 20:32:35 +0800 Subject: [PATCH 480/529] build(deps): bump wasm-bindgen from 0.2.94 to 0.2.95 (#3139) * build(deps): bump wasm-bindgen from 0.2.94 to 0.2.95 Bumps [wasm-bindgen](https://github.com/rustwasm/wasm-bindgen) from 0.2.94 to 0.2.95. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/compare/0.2.94...0.2.95) --- updated-dependencies: - dependency-name: wasm-bindgen dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 20 ++++++++++---------- programs/sbf/Cargo.lock | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 898684f25fcee0..e094fe2a419ca6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10032,9 +10032,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -10043,9 +10043,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", @@ -10070,9 +10070,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10080,9 +10080,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", @@ -10093,9 +10093,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "web-sys" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 525dca8b0caf2c..eb7a2fed3acaf9 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -8401,9 +8401,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -8412,9 +8412,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", @@ -8439,9 +8439,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8449,9 +8449,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", @@ -8462,9 +8462,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.94" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "web-sys" From 8f0465fe418e938b6b74c8ff63c780b5c3cd8049 Mon Sep 17 00:00:00 2001 From: steviez Date: Fri, 11 Oct 2024 09:40:25 -0500 Subject: [PATCH 481/529] Avoid shell out tar for genesis archive creation (#3079) Use the tar crate instead; doing so avoids spawning a subprocess and also removes the variability of relying on whatever tar the caller has in their path --- Cargo.lock | 2 ++ ledger/Cargo.toml | 2 ++ ledger/src/blockstore.rs | 49 ++++++++++++++++++---------------------- programs/sbf/Cargo.lock | 2 ++ 4 files changed, 28 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e094fe2a419ca6..4b7d175c4e06ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6889,6 +6889,7 @@ dependencies = [ "bitflags 2.6.0", "bs58", "byteorder", + "bzip2", "chrono", "chrono-humanize", "crossbeam-channel", @@ -6947,6 +6948,7 @@ dependencies = [ "static_assertions", "strum", "strum_macros", + "tar", "tempfile", "test-case", "thiserror", diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 15c56d07c94c33..57bb0d1a0a8c73 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -14,6 +14,7 @@ assert_matches = { workspace = true } bincode = { workspace = true } bitflags = { workspace = true, features = ["serde"] } byteorder = { workspace = true } +bzip2 = { workspace = true } chrono = { workspace = true, features = ["default", "serde"] } chrono-humanize = { workspace = true } crossbeam-channel = { workspace = true } @@ -73,6 +74,7 @@ spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } static_assertions = { workspace = true } strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } +tar = { workspace = true } tempfile = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 54f612483be958..80ce6b83dce1a3 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -71,7 +71,7 @@ use { }, convert::TryInto, fmt::Write, - fs, + fs::{self, File}, io::{Error as IoError, ErrorKind}, ops::Bound, path::{Path, PathBuf}, @@ -81,6 +81,7 @@ use { Arc, Mutex, RwLock, }, }, + tar, tempfile::{Builder, TempDir}, thiserror::Error, trees::{Tree, TreeWalk}, @@ -4835,32 +4836,12 @@ pub fn create_new_ledger( drop(blockstore); let archive_path = ledger_path.join(DEFAULT_GENESIS_ARCHIVE); - let args = vec![ - "jcfhS", - archive_path.to_str().unwrap(), - "-C", - ledger_path.to_str().unwrap(), - DEFAULT_GENESIS_FILE, - blockstore_dir, - ]; - let output = std::process::Command::new("tar") - .env("COPYFILE_DISABLE", "1") - .args(args) - .output() - .unwrap(); - if !output.status.success() { - use std::str::from_utf8; - error!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?")); - error!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?")); - - return Err(BlockstoreError::Io(IoError::new( - ErrorKind::Other, - format!( - "Error trying to generate snapshot archive: {}", - output.status - ), - ))); - } + let archive_file = File::create(&archive_path)?; + let encoder = bzip2::write::BzEncoder::new(archive_file, bzip2::Compression::best()); + let mut archive = tar::Builder::new(encoder); + archive.append_path_with_name(ledger_path.join(DEFAULT_GENESIS_FILE), DEFAULT_GENESIS_FILE)?; + archive.append_dir_all(blockstore_dir, ledger_path.join(blockstore_dir))?; + archive.into_inner()?; // ensure the genesis archive can be unpacked and it is under // max_genesis_archive_unpacked_size, immediately after creating it above. @@ -5352,6 +5333,9 @@ pub mod tests { crossbeam_channel::unbounded, rand::{seq::SliceRandom, thread_rng}, solana_account_decoder::parse_token::UiTokenAmount, + solana_accounts_db::hardened_unpack::{ + open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, + }, solana_entry::entry::{next_entry, next_entry_mut}, solana_runtime::bank::{Bank, RewardType}, solana_sdk::{ @@ -5422,6 +5406,17 @@ pub mod tests { assert!(Path::new(ledger_path.path()) .join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL) .exists()); + + assert_eq!( + genesis_config, + open_genesis_config(ledger_path.path(), MAX_GENESIS_ARCHIVE_UNPACKED_SIZE).unwrap() + ); + // Remove DEFAULT_GENESIS_FILE to force extraction of DEFAULT_GENESIS_ARCHIVE + std::fs::remove_file(ledger_path.path().join(DEFAULT_GENESIS_FILE)).unwrap(); + assert_eq!( + genesis_config, + open_genesis_config(ledger_path.path(), MAX_GENESIS_ARCHIVE_UNPACKED_SIZE).unwrap() + ); } #[test] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index eb7a2fed3acaf9..617dce0e924299 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5438,6 +5438,7 @@ dependencies = [ "bincode", "bitflags 2.6.0", "byteorder 1.5.0", + "bzip2", "chrono", "chrono-humanize", "crossbeam-channel", @@ -5492,6 +5493,7 @@ dependencies = [ "static_assertions", "strum", "strum_macros", + "tar", "tempfile", "thiserror", "tokio", From 8e831839feee2b16a51575026179ef1a60f239ad Mon Sep 17 00:00:00 2001 From: Brooks Date: Fri, 11 Oct 2024 11:39:13 -0400 Subject: [PATCH 482/529] Displays LtHash's Checksum as base58 (#3142) --- Cargo.lock | 1 + lattice-hash/Cargo.toml | 1 + lattice-hash/src/lt_hash.rs | 20 +++++++++++++++++--- programs/sbf/Cargo.lock | 1 + 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b7d175c4e06ef..78438b2e9ea6a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6874,6 +6874,7 @@ version = "2.1.0" dependencies = [ "base64 0.22.1", "blake3", + "bs58", "bytemuck", "criterion", "rand 0.8.5", diff --git a/lattice-hash/Cargo.toml b/lattice-hash/Cargo.toml index 44d257091f642f..3fce6f0180fa01 100644 --- a/lattice-hash/Cargo.toml +++ b/lattice-hash/Cargo.toml @@ -10,6 +10,7 @@ edition = { workspace = true } [dependencies] base64 = { workspace = true } blake3 = { workspace = true } +bs58 = { workspace = true } bytemuck = { workspace = true, features = ["must_cast"] } [dev-dependencies] diff --git a/lattice-hash/src/lt_hash.rs b/lattice-hash/src/lt_hash.rs index 100c9a10a6d53a..9cdfe555eb24ec 100644 --- a/lattice-hash/src/lt_hash.rs +++ b/lattice-hash/src/lt_hash.rs @@ -1,6 +1,6 @@ use { base64::{display::Base64Display, prelude::BASE64_STANDARD}, - std::fmt, + std::{fmt, str}, }; /// A 16-bit, 1024 element lattice-based incremental hash based on blake3 @@ -77,8 +77,14 @@ impl Checksum { impl fmt::Display for Checksum { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let base64 = Base64Display::new(&self.0, &BASE64_STANDARD); - write!(f, "{base64}") + /// Maximum string length of a base58 encoded Checksum. + const MAX_BASE58_LEN: usize = 44; + let mut buf = [0u8; MAX_BASE58_LEN]; + // SAFETY: The only error is if the buffer is too small + let len = bs58::encode(&self.0).onto(buf.as_mut_slice()).unwrap(); + // SAFETY: The base58 alphabet is utf8 + let str = str::from_utf8(&buf[..len]).unwrap(); + write!(f, "{str}") } } @@ -377,4 +383,12 @@ mod tests { assert_eq!(actual_checksum, expected_checksum); } } + + #[test] + fn test_checksum_display() { + let lt_hash = LtHash::identity(); + let checksum = lt_hash.checksum(); + let str = checksum.to_string(); + assert_eq!(str.as_str(), "DoL6fvKuTpTQCyUh83NxQw2ewKzWYtq9gsTKp1eQiGC2"); + } } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 617dce0e924299..6900b5ac87e17d 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5427,6 +5427,7 @@ version = "2.1.0" dependencies = [ "base64 0.22.1", "blake3", + "bs58", "bytemuck", ] From df22c2eb8cd4fdf51dba52d7e1b9fb53de896d3f Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Fri, 11 Oct 2024 21:48:13 +0400 Subject: [PATCH 483/529] remove thiserror from solana-derivation-path (#3086) * remove thiserror from solana-derivation-path * fix excessively qualified path --- Cargo.lock | 1 - programs/sbf/Cargo.lock | 1 - sdk/derivation-path/Cargo.toml | 1 - sdk/derivation-path/src/lib.rs | 29 +++++++++++++++++++++++------ 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78438b2e9ea6a3..d293a5e68877f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6522,7 +6522,6 @@ dependencies = [ "assert_matches", "derivation-path", "qstring", - "thiserror", "uriparse", ] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 6900b5ac87e17d..11f3c749413333 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5213,7 +5213,6 @@ version = "2.1.0" dependencies = [ "derivation-path", "qstring", - "thiserror", "uriparse", ] diff --git a/sdk/derivation-path/Cargo.toml b/sdk/derivation-path/Cargo.toml index 8b82b091a07861..ada1f3c42daadc 100644 --- a/sdk/derivation-path/Cargo.toml +++ b/sdk/derivation-path/Cargo.toml @@ -12,7 +12,6 @@ edition = { workspace = true } [dependencies] derivation-path = { workspace = true } qstring = { workspace = true } -thiserror = { workspace = true } uriparse = { workspace = true } [dev-dependencies] diff --git a/sdk/derivation-path/src/lib.rs b/sdk/derivation-path/src/lib.rs index 4e76ddf95d2a9b..f3deea068d0292 100644 --- a/sdk/derivation-path/src/lib.rs +++ b/sdk/derivation-path/src/lib.rs @@ -17,7 +17,6 @@ use { fmt, str::FromStr, }, - thiserror::Error, uriparse::URIReference, }; @@ -25,14 +24,25 @@ const ACCOUNT_INDEX: usize = 2; const CHANGE_INDEX: usize = 3; /// Derivation path error. -#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum DerivationPathError { - #[error("invalid derivation path: {0}")] InvalidDerivationPath(String), - #[error("infallible")] Infallible, } +impl std::error::Error for DerivationPathError {} + +impl fmt::Display for DerivationPathError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + DerivationPathError::InvalidDerivationPath(p) => { + write!(f, "invalid derivation path: {p}",) + } + DerivationPathError::Infallible => f.write_str("infallible"), + } + } +} + impl From for DerivationPathError { fn from(_: Infallible) -> Self { Self::Infallible @@ -211,10 +221,17 @@ impl<'a> IntoIterator for &'a DerivationPath { const QUERY_KEY_FULL_PATH: &str = "full-path"; const QUERY_KEY_KEY: &str = "key"; -#[derive(Clone, Debug, Error, PartialEq, Eq)] -#[error("invalid query key `{0}`")] +#[derive(Clone, Debug, PartialEq, Eq)] struct QueryKeyError(String); +impl std::error::Error for QueryKeyError {} + +impl fmt::Display for QueryKeyError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "invalid query key `{}`", self.0) + } +} + enum QueryKey { FullPath, Key, From b81d197a9ccf517942c2b6f1ebf9862e5cec60a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 12 Oct 2024 02:04:43 +0800 Subject: [PATCH 484/529] build(deps): bump js-sys from 0.3.71 to 0.3.72 (#3138) * build(deps): bump js-sys from 0.3.71 to 0.3.72 Bumps [js-sys](https://github.com/rustwasm/wasm-bindgen) from 0.3.71 to 0.3.72. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/commits) --- updated-dependencies: - dependency-name: js-sys dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d293a5e68877f8..770557bd56da1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3149,9 +3149,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.71" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] diff --git a/Cargo.toml b/Cargo.toml index 8012227a54340c..8818beaecb9b3f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -287,7 +287,7 @@ itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ "unprefixed_malloc_on_supported_platforms", ] } -js-sys = "0.3.71" +js-sys = "0.3.72" json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 11f3c749413333..4d436d8bde77aa 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -2462,9 +2462,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.71" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] From 8d507cb6fdf4c5e600e358b5dcf34ea0d99d742b Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Fri, 11 Oct 2024 23:09:20 +0400 Subject: [PATCH 485/529] SDK doc fixes (#3133) * fix decode-error doc links * fix redundant link in program-option * mark as source code to avoid unclosed HTML tag warning * fix url --- sdk/decode-error/src/lib.rs | 4 ++-- sdk/program-option/src/lib.rs | 2 +- sdk/src/signer/signers.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sdk/decode-error/src/lib.rs b/sdk/decode-error/src/lib.rs index d225ba81a8a039..bcdf5ad5ad016d 100644 --- a/sdk/decode-error/src/lib.rs +++ b/sdk/decode-error/src/lib.rs @@ -18,8 +18,8 @@ use num_traits::FromPrimitive; /// `FromPrimitive`, it is only used correctly when the caller is certain of the /// original error type. /// -/// [`ProgramError`]: crate::program_error::ProgramError -/// [`ProgramError::Custom`]: crate::program_error::ProgramError::Custom +/// [`ProgramError`]: https://docs.rs/solana-program-error/latest/solana_program_error/enum.ProgramError.html +/// [`ProgramError::Custom`]: https://docs.rs/solana-program-error/latest/solana_program_error/enum.ProgramError.html#variant.Custom /// [`ToPrimitive`]: num_traits::ToPrimitive pub trait DecodeError { fn decode_custom_error_to_enum(custom: u32) -> Option diff --git a/sdk/program-option/src/lib.rs b/sdk/program-option/src/lib.rs index bde0e60f6e546f..6f5efe790727f0 100644 --- a/sdk/program-option/src/lib.rs +++ b/sdk/program-option/src/lib.rs @@ -882,7 +882,7 @@ impl Clone for COption { } impl Default for COption { - /// Returns [`COption::None`][COption::None]. + /// Returns [`COption::None`] /// /// # Examples /// diff --git a/sdk/src/signer/signers.rs b/sdk/src/signer/signers.rs index ad06e9ff3f3f24..5b41b5f93717f2 100644 --- a/sdk/src/signer/signers.rs +++ b/sdk/src/signer/signers.rs @@ -17,8 +17,8 @@ pub trait Signers { /// Any `T` where `T` impls `IntoIterator` yielding /// `Signer`s implements `Signers`. /// -/// This includes [&dyn Signer], [Box], -/// [&dyn Signer; N], Vec, Vec, etc. +/// This includes `[&dyn Signer]`, `[Box]`, +/// `[&dyn Signer; N]`, `Vec`, `Vec`, etc. /// /// When used as a generic function param, `&T` /// should be used instead of `T` where T: Signers, due to the `?Sized` bounds on T. From 6210454cb94120a8c4dc48d94236c5707f500076 Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Fri, 11 Oct 2024 19:32:49 -0500 Subject: [PATCH 486/529] fix accounts index startup stat and add two new stats for dups (#3112) * fix account index startup stats * add stats for dup account num at startup * add total num of uniq dup keys * pr: rename --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 98 +++++++++++++++++++++++----------- 1 file changed, 68 insertions(+), 30 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 770212311becb1..ba8ba98e6ec3b0 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -672,9 +672,9 @@ struct GenerateIndexTimings { pub index_time: u64, pub scan_time: u64, pub insertion_time_us: u64, - pub min_bin_size: usize, - pub max_bin_size: usize, - pub total_items: usize, + pub min_bin_size_in_mem: usize, + pub max_bin_size_in_mem: usize, + pub total_items_in_mem: usize, pub storage_size_storages_us: u64, pub index_flush_us: u64, pub rent_paying: AtomicUsize, @@ -682,6 +682,8 @@ struct GenerateIndexTimings { pub total_including_duplicates: u64, pub accounts_data_len_dedup_time_us: u64, pub total_duplicate_slot_keys: u64, + pub total_num_unique_duplicate_keys: u64, + pub num_duplicate_accounts: u64, pub populate_duplicate_keys_us: u64, pub total_slots: u64, pub slots_to_clean: u64, @@ -705,8 +707,8 @@ impl GenerateIndexTimings { ("total_us", self.index_time, i64), ("scan_stores_us", self.scan_time, i64), ("insertion_time_us", self.insertion_time_us, i64), - ("min_bin_size", self.min_bin_size as i64, i64), - ("max_bin_size", self.max_bin_size as i64, i64), + ("min_bin_size_in_mem", self.min_bin_size_in_mem as i64, i64), + ("max_bin_size_in_mem", self.max_bin_size_in_mem as i64, i64), ( "storage_size_storages_us", self.storage_size_storages_us as i64, @@ -728,7 +730,7 @@ impl GenerateIndexTimings { self.total_including_duplicates as i64, i64 ), - ("total_items", self.total_items as i64, i64), + ("total_items_in_mem", self.total_items_in_mem as i64, i64), ( "accounts_data_len_dedup_time_us", self.accounts_data_len_dedup_time_us as i64, @@ -739,6 +741,16 @@ impl GenerateIndexTimings { self.total_duplicate_slot_keys as i64, i64 ), + ( + "total_num_unique_duplicate_keys", + self.total_num_unique_duplicate_keys as i64, + i64 + ), + ( + "num_duplicate_accounts", + self.num_duplicate_accounts as i64, + i64 + ), ( "populate_duplicate_keys_us", self.populate_duplicate_keys_us as i64, @@ -8443,22 +8455,15 @@ impl AccountsDb { index_time.stop(); info!("rent_collector: {:?}", rent_collector); - let (total_items, min_bin_size, max_bin_size) = self - .accounts_index - .account_maps - .iter() - .map(|map_bin| map_bin.len_for_stats()) - .fold((0, usize::MAX, usize::MIN), |acc, len| { - ( - acc.0 + len, - std::cmp::min(acc.1, len), - std::cmp::max(acc.2, len), - ) - }); let mut index_flush_us = 0; let total_duplicate_slot_keys = AtomicU64::default(); let mut populate_duplicate_keys_us = 0; + let mut total_items_in_mem = 0; + let mut min_bin_size_in_mem = 0; + let mut max_bin_size_in_mem = 0; + let total_num_unique_duplicate_keys = AtomicU64::default(); + // outer vec is accounts index bin (determined by pubkey value) // inner vec is the pubkeys within that bin that are present in > 1 slot let unique_pubkeys_by_bin = Mutex::new(Vec::>::default()); @@ -8483,6 +8488,10 @@ impl AccountsDb { } let unique_pubkeys_by_bin_inner = unique_keys.into_iter().collect::>(); + total_num_unique_duplicate_keys.fetch_add( + unique_pubkeys_by_bin_inner.len() as u64, + Ordering::Relaxed, + ); // does not matter that this is not ordered by slot unique_pubkeys_by_bin .lock() @@ -8491,6 +8500,19 @@ impl AccountsDb { }); }) .1; + + (total_items_in_mem, min_bin_size_in_mem, max_bin_size_in_mem) = self + .accounts_index + .account_maps + .iter() + .map(|map_bin| map_bin.len_for_stats()) + .fold((0, usize::MAX, usize::MIN), |acc, len| { + ( + acc.0 + len, + std::cmp::min(acc.1, len), + std::cmp::max(acc.2, len), + ) + }); } let unique_pubkeys_by_bin = unique_pubkeys_by_bin.into_inner().unwrap(); @@ -8499,12 +8521,14 @@ impl AccountsDb { scan_time, index_time: index_time.as_us(), insertion_time_us: insertion_time_us.load(Ordering::Relaxed), - min_bin_size, - max_bin_size, - total_items, + min_bin_size_in_mem, + max_bin_size_in_mem, + total_items_in_mem, rent_paying, amount_to_top_off_rent, total_duplicate_slot_keys: total_duplicate_slot_keys.load(Ordering::Relaxed), + total_num_unique_duplicate_keys: total_num_unique_duplicate_keys + .load(Ordering::Relaxed), populate_duplicate_keys_us, total_including_duplicates: total_including_duplicates.load(Ordering::Relaxed), total_slots: slots.len() as u64, @@ -8515,6 +8539,7 @@ impl AccountsDb { #[derive(Debug, Default)] struct DuplicatePubkeysVisitedInfo { accounts_data_len_from_duplicates: u64, + num_duplicate_accounts: u64, uncleaned_roots: IntSet, } impl DuplicatePubkeysVisitedInfo { @@ -8530,6 +8555,7 @@ impl AccountsDb { fn merge(&mut self, other: Self) { self.accounts_data_len_from_duplicates += other.accounts_data_len_from_duplicates; + self.num_duplicate_accounts += other.num_duplicate_accounts; self.uncleaned_roots.extend(other.uncleaned_roots); } } @@ -8539,6 +8565,7 @@ impl AccountsDb { Measure::start("handle accounts data len duplicates"); let DuplicatePubkeysVisitedInfo { accounts_data_len_from_duplicates, + num_duplicate_accounts, uncleaned_roots, } = unique_pubkeys_by_bin .par_iter() @@ -8548,14 +8575,18 @@ impl AccountsDb { let intermediate = pubkeys_by_bin .par_chunks(4096) .fold(DuplicatePubkeysVisitedInfo::default, |accum, pubkeys| { - let (accounts_data_len_from_duplicates, uncleaned_roots) = self - .visit_duplicate_pubkeys_during_startup( - pubkeys, - &rent_collector, - &timings, - ); + let ( + accounts_data_len_from_duplicates, + accounts_duplicates_num, + uncleaned_roots, + ) = self.visit_duplicate_pubkeys_during_startup( + pubkeys, + &rent_collector, + &timings, + ); let intermediate = DuplicatePubkeysVisitedInfo { accounts_data_len_from_duplicates, + num_duplicate_accounts: accounts_duplicates_num, uncleaned_roots, }; DuplicatePubkeysVisitedInfo::reduce(accum, intermediate) @@ -8574,6 +8605,7 @@ impl AccountsDb { accounts_data_len_dedup_timer.stop(); timings.accounts_data_len_dedup_time_us = accounts_data_len_dedup_timer.as_us(); timings.slots_to_clean = uncleaned_roots.len() as u64; + timings.num_duplicate_accounts = num_duplicate_accounts; self.accounts_index .add_uncleaned_roots(uncleaned_roots.into_iter()); @@ -8635,14 +8667,15 @@ impl AccountsDb { /// 3. update rent stats /// /// Note this should only be used when ALL entries in the accounts index are roots. - /// returns (data len sum of all older duplicates, slots that contained duplicate pubkeys) + /// returns (data len sum of all older duplicates, number of duplicate accounts, slots that contained duplicate pubkeys) fn visit_duplicate_pubkeys_during_startup( &self, pubkeys: &[Pubkey], rent_collector: &RentCollector, timings: &GenerateIndexTimings, - ) -> (u64, IntSet) { + ) -> (u64, u64, IntSet) { let mut accounts_data_len_from_duplicates = 0; + let mut num_duplicate_accounts = 0_u64; let mut uncleaned_slots = IntSet::default(); let mut removed_rent_paying = 0; let mut removed_top_off = 0; @@ -8672,6 +8705,7 @@ impl AccountsDb { accessor.check_and_get_loaded_account(|loaded_account| { let data_len = loaded_account.data_len(); accounts_data_len_from_duplicates += data_len; + num_duplicate_accounts += 1; if let Some(lamports_to_top_off) = Self::stats_for_rent_payers( pubkey, loaded_account.lamports(), @@ -8699,7 +8733,11 @@ impl AccountsDb { timings .amount_to_top_off_rent .fetch_sub(removed_top_off, Ordering::Relaxed); - (accounts_data_len_from_duplicates as u64, uncleaned_slots) + ( + accounts_data_len_from_duplicates as u64, + num_duplicate_accounts, + uncleaned_slots, + ) } fn set_storage_count_and_alive_bytes( From e32281dac2a5c3f90d46bf52661e5cc9c2517d5e Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Sat, 12 Oct 2024 16:26:06 +0400 Subject: [PATCH 487/529] add doc_auto_cfg to relevant sdk crates (#3121) --- sdk/account-info/Cargo.toml | 2 ++ sdk/account-info/src/lib.rs | 2 +- sdk/account/Cargo.toml | 2 ++ sdk/account/src/lib.rs | 1 + sdk/hash/Cargo.toml | 2 ++ sdk/hash/src/lib.rs | 1 + sdk/instruction/Cargo.toml | 2 ++ sdk/instruction/src/lib.rs | 1 + sdk/program-error/Cargo.toml | 2 ++ sdk/program-error/src/lib.rs | 1 + sdk/pubkey/Cargo.toml | 2 ++ sdk/pubkey/src/lib.rs | 1 + sdk/signature/Cargo.toml | 2 ++ sdk/signature/src/lib.rs | 1 + 14 files changed, 21 insertions(+), 1 deletion(-) diff --git a/sdk/account-info/Cargo.toml b/sdk/account-info/Cargo.toml index 55dc3d8a30ef44..4ba7d4733775d8 100644 --- a/sdk/account-info/Cargo.toml +++ b/sdk/account-info/Cargo.toml @@ -21,3 +21,5 @@ bincode = ["dep:bincode", "dep:serde"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +all-features = true +rustdoc-args = ["--cfg=docsrs"] diff --git a/sdk/account-info/src/lib.rs b/sdk/account-info/src/lib.rs index aed2b36669ad86..d063e600a1a3da 100644 --- a/sdk/account-info/src/lib.rs +++ b/sdk/account-info/src/lib.rs @@ -1,5 +1,5 @@ //! Account information. - +#![cfg_attr(docsrs, feature(doc_auto_cfg))] use { solana_program_error::ProgramError, solana_program_memory::sol_memset, diff --git a/sdk/account/Cargo.toml b/sdk/account/Cargo.toml index 33d210778f08e5..015736eebe08c4 100644 --- a/sdk/account/Cargo.toml +++ b/sdk/account/Cargo.toml @@ -36,3 +36,5 @@ serde = ["dep:serde", "dep:serde_bytes", "dep:serde_derive"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +all-features = true +rustdoc-args = ["--cfg=docsrs"] diff --git a/sdk/account/src/lib.rs b/sdk/account/src/lib.rs index c2bcbbfd02c916..762e41a5dd5abf 100644 --- a/sdk/account/src/lib.rs +++ b/sdk/account/src/lib.rs @@ -1,4 +1,5 @@ #![cfg_attr(feature = "frozen-abi", feature(min_specialization))] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] //! The Solana [`Account`] type. #[cfg(feature = "dev-context-only-utils")] diff --git a/sdk/hash/Cargo.toml b/sdk/hash/Cargo.toml index ecf502e0ba243d..7db31fdf5929da 100644 --- a/sdk/hash/Cargo.toml +++ b/sdk/hash/Cargo.toml @@ -11,6 +11,8 @@ edition = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +all-features = true +rustdoc-args = ["--cfg=docsrs"] [dependencies] borsh = { workspace = true, optional = true } diff --git a/sdk/hash/src/lib.rs b/sdk/hash/src/lib.rs index c5cad6f58a5861..0471fff3c48ec1 100644 --- a/sdk/hash/src/lib.rs +++ b/sdk/hash/src/lib.rs @@ -1,4 +1,5 @@ #![no_std] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #[cfg(feature = "borsh")] use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; diff --git a/sdk/instruction/Cargo.toml b/sdk/instruction/Cargo.toml index 0f96abda001702..5826fca227ff20 100644 --- a/sdk/instruction/Cargo.toml +++ b/sdk/instruction/Cargo.toml @@ -49,6 +49,8 @@ std = [] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +all-features = true +rustdoc-args = ["--cfg=docsrs"] [lints] workspace = true diff --git a/sdk/instruction/src/lib.rs b/sdk/instruction/src/lib.rs index 8627a204bb41d8..b94ebf9df7ea2d 100644 --- a/sdk/instruction/src/lib.rs +++ b/sdk/instruction/src/lib.rs @@ -10,6 +10,7 @@ //! while executing a given instruction is also included in `Instruction`, as //! [`AccountMeta`] values. The runtime uses this information to efficiently //! schedule execution of transactions. +#![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] #![no_std] diff --git a/sdk/program-error/Cargo.toml b/sdk/program-error/Cargo.toml index 64860dee0a0331..8e864d309bc5f8 100644 --- a/sdk/program-error/Cargo.toml +++ b/sdk/program-error/Cargo.toml @@ -27,3 +27,5 @@ serde = ["dep:serde", "dep:serde_derive"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +all-features = true +rustdoc-args = ["--cfg=docsrs"] diff --git a/sdk/program-error/src/lib.rs b/sdk/program-error/src/lib.rs index 87c79595bd688a..947c47f6cfd5b6 100644 --- a/sdk/program-error/src/lib.rs +++ b/sdk/program-error/src/lib.rs @@ -1,6 +1,7 @@ //! The [`ProgramError`] type and related definitions. #![allow(clippy::arithmetic_side_effects)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] #[cfg(feature = "borsh")] use borsh::io::Error as BorshIoError; #[cfg(feature = "serde")] diff --git a/sdk/pubkey/Cargo.toml b/sdk/pubkey/Cargo.toml index 64260e6f085191..b34d214388a269 100644 --- a/sdk/pubkey/Cargo.toml +++ b/sdk/pubkey/Cargo.toml @@ -75,6 +75,8 @@ std = [] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +all-features = true +rustdoc-args = ["--cfg=docsrs"] [lints] workspace = true diff --git a/sdk/pubkey/src/lib.rs b/sdk/pubkey/src/lib.rs index c29e3654c396e7..7f3cd3fd8edf87 100644 --- a/sdk/pubkey/src/lib.rs +++ b/sdk/pubkey/src/lib.rs @@ -1,5 +1,6 @@ //! Solana account addresses. #![no_std] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] diff --git a/sdk/signature/Cargo.toml b/sdk/signature/Cargo.toml index 989200b9281b61..b01518d9692d8e 100644 --- a/sdk/signature/Cargo.toml +++ b/sdk/signature/Cargo.toml @@ -43,6 +43,8 @@ verify = ["dep:ed25519-dalek"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +all-features = true +rustdoc-args = ["--cfg=docsrs"] [lints] workspace = true diff --git a/sdk/signature/src/lib.rs b/sdk/signature/src/lib.rs index 1521391824a8da..7e793d75603cf1 100644 --- a/sdk/signature/src/lib.rs +++ b/sdk/signature/src/lib.rs @@ -1,5 +1,6 @@ //! 64-byte signature type. #![no_std] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #[cfg(any(test, feature = "verify"))] use core::convert::TryInto; From 2eb3e1bcd6d51e069c2235931d58028b4b9559f3 Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Sat, 12 Oct 2024 14:33:43 +0200 Subject: [PATCH 488/529] stop ignoring forwarder tests (#3143) When we migrated to QUIC from UDP forwarder tests were ignored, this PR fixes the problem. --- core/src/banking_stage/forwarder.rs | 171 +++++++++++++----- streamer/src/nonblocking/testing_utilities.rs | 21 ++- streamer/src/quic.rs | 2 +- 3 files changed, 137 insertions(+), 57 deletions(-) diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index 41e8e09fb372d2..d48c1556fa206e 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -311,6 +311,7 @@ mod tests { unprocessed_packet_batches::{DeserializedPacket, UnprocessedPacketBatches}, unprocessed_transaction_storage::ThreadType, }, + solana_client::rpc_client::SerializableTransaction, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{blockstore::Blockstore, genesis_utils::GenesisConfigInfo}, solana_perf::packet::PacketFlags, @@ -320,13 +321,25 @@ mod tests { hash::Hash, poh_config::PohConfig, signature::Keypair, signer::Signer, system_transaction, transaction::VersionedTransaction, }, - solana_streamer::recvmmsg::recv_mmsg, - std::sync::atomic::AtomicBool, + solana_streamer::{ + nonblocking::testing_utilities::{ + setup_quic_server_with_sockets, SpawnTestServerResult, TestServerConfig, + }, + quic::rt, + }, + std::{ + sync::atomic::AtomicBool, + time::{Duration, Instant}, + }, tempfile::TempDir, + tokio::time::sleep, }; struct TestSetup { _ledger_dir: TempDir, + blockhash: Hash, + rent_min_balance: u64, + bank_forks: Arc>, poh_recorder: Arc>, exit: Arc, @@ -363,6 +376,9 @@ mod tests { TestSetup { _ledger_dir: ledger_path, + blockhash: genesis_config.hash(), + rent_min_balance: genesis_config.rent.minimum_balance(0), + bank_forks, poh_recorder, exit, @@ -372,11 +388,52 @@ mod tests { } } + async fn check_all_received( + socket: UdpSocket, + expected_num_packets: usize, + expected_packet_size: usize, + expected_blockhash: &Hash, + ) { + let SpawnTestServerResult { + join_handle, + exit, + receiver, + server_address: _, + stats: _, + } = setup_quic_server_with_sockets(vec![socket], None, TestServerConfig::default()); + + let now = Instant::now(); + let mut total_packets = 0; + while now.elapsed().as_secs() < 5 { + if let Ok(packets) = receiver.try_recv() { + total_packets += packets.len(); + for packet in packets.iter() { + assert_eq!(packet.meta().size, expected_packet_size); + let tx: VersionedTransaction = packet.deserialize_slice(..).unwrap(); + assert_eq!( + tx.get_recent_blockhash(), + expected_blockhash, + "Unexpected blockhash, tx: {tx:?}, expected blockhash: {expected_blockhash}." + ); + } + } else { + sleep(Duration::from_millis(100)).await; + } + if total_packets >= expected_num_packets { + break; + } + } + assert_eq!(total_packets, expected_num_packets); + + exit.store(true, Ordering::Relaxed); + join_handle.await.unwrap(); + } + #[test] - #[ignore] fn test_forwarder_budget() { - solana_logger::setup(); let TestSetup { + blockhash, + rent_min_balance, bank_forks, poh_recorder, exit, @@ -390,17 +447,21 @@ mod tests { let tx = system_transaction::transfer( &Keypair::new(), &solana_sdk::pubkey::new_rand(), - 1, - Hash::new_unique(), + rent_min_balance, + blockhash, ); - let packet = Packet::from_data(None, tx).unwrap(); + let mut packet = Packet::from_data(None, tx).unwrap(); + // unstaked transactions will not be forwarded + packet.meta_mut().set_from_staked_node(true); + let expected_packet_size = packet.meta().size; let deserialized_packet = DeserializedPacket::new(packet).unwrap(); let test_cases = vec![ ("budget-restricted", DataBudget::restricted(), 0), ("budget-available", DataBudget::default(), 1), ]; - for (name, data_budget, expected_num_forwarded) in test_cases { + let runtime = rt("solQuicTestRt".to_string()); + for (_name, data_budget, expected_num_forwarded) in test_cases { let mut forwarder = Forwarder::new( poh_recorder.clone(), bank_forks.clone(), @@ -425,14 +486,13 @@ mod tests { &mut TracerPacketStats::new(0), ); - let recv_socket = &local_node.sockets.tpu_forwards[0]; - recv_socket - .set_nonblocking(expected_num_forwarded == 0) - .unwrap(); - - let mut packets = vec![Packet::default(); 2]; - let num_received = recv_mmsg(recv_socket, &mut packets[..]).unwrap_or_default(); - assert_eq!(num_received, expected_num_forwarded, "{name}"); + let recv_socket = &local_node.sockets.tpu_forwards_quic[0]; + runtime.block_on(check_all_received( + (*recv_socket).try_clone().unwrap(), + expected_num_forwarded, + expected_packet_size, + &blockhash, + )); } exit.store(true, Ordering::Relaxed); @@ -440,10 +500,10 @@ mod tests { } #[test] - #[ignore] fn test_handle_forwarding() { - solana_logger::setup(); let TestSetup { + blockhash, + rent_min_balance, bank_forks, poh_recorder, exit, @@ -453,36 +513,58 @@ mod tests { .. } = setup(); - // packets are deserialized upon receiving, failed packets will not be - // forwarded; Therefore need to create real packets here. let keypair = Keypair::new(); let pubkey = solana_sdk::pubkey::new_rand(); - let fwd_block_hash = Hash::new_unique(); + // forwarded packets will not be forwarded again let forwarded_packet = { - let transaction = system_transaction::transfer(&keypair, &pubkey, 1, fwd_block_hash); + let transaction = + system_transaction::transfer(&keypair, &pubkey, rent_min_balance, blockhash); let mut packet = Packet::from_data(None, transaction).unwrap(); packet.meta_mut().flags |= PacketFlags::FORWARDED; DeserializedPacket::new(packet).unwrap() }; - - let normal_block_hash = Hash::new_unique(); - let normal_packet = { - let transaction = system_transaction::transfer(&keypair, &pubkey, 1, normal_block_hash); + // packets from unstaked nodes will not be forwarded + let unstaked_packet = { + let transaction = + system_transaction::transfer(&keypair, &pubkey, rent_min_balance, blockhash); + let packet = Packet::from_data(None, transaction).unwrap(); + DeserializedPacket::new(packet).unwrap() + }; + // packets with incorrect blockhash will be filtered out + let incorrect_blockhash_packet = { + let transaction = + system_transaction::transfer(&keypair, &pubkey, rent_min_balance, Hash::default()); let packet = Packet::from_data(None, transaction).unwrap(); DeserializedPacket::new(packet).unwrap() }; + // maybe also add packet without stake and packet with incorrect blockhash? + let (expected_packet_size, normal_packet) = { + let transaction = system_transaction::transfer(&keypair, &pubkey, 1, blockhash); + let mut packet = Packet::from_data(None, transaction).unwrap(); + packet.meta_mut().set_from_staked_node(true); + (packet.meta().size, DeserializedPacket::new(packet).unwrap()) + }; + let mut unprocessed_packet_batches = UnprocessedTransactionStorage::new_transaction_storage( - UnprocessedPacketBatches::from_iter(vec![forwarded_packet, normal_packet], 2), + UnprocessedPacketBatches::from_iter( + vec![ + forwarded_packet, + unstaked_packet, + incorrect_blockhash_packet, + normal_packet, + ], + 4, + ), ThreadType::Transactions, ); let connection_cache = ConnectionCache::new("connection_cache_test"); let test_cases = vec![ - ("fwd-normal", true, vec![normal_block_hash], 2), - ("fwd-no-op", true, vec![], 2), - ("fwd-no-hold", false, vec![], 0), + ("fwd-normal", true, 2, 1), + ("fwd-no-op", true, 2, 0), + ("fwd-no-hold", false, 0, 0), ]; let mut forwarder = Forwarder::new( @@ -492,7 +574,8 @@ mod tests { Arc::new(connection_cache), Arc::new(DataBudget::default()), ); - for (name, hold, expected_ids, expected_num_unprocessed) in test_cases { + let runtime = rt("solQuicTestRt".to_string()); + for (name, hold, expected_num_unprocessed, expected_num_processed) in test_cases { let stats = BankingStageStats::default(); forwarder.handle_forwarding( &mut unprocessed_packet_batches, @@ -502,24 +585,14 @@ mod tests { &mut TracerPacketStats::new(0), ); - let recv_socket = &local_node.sockets.tpu_forwards[0]; - recv_socket - .set_nonblocking(expected_ids.is_empty()) - .unwrap(); - - let mut packets = vec![Packet::default(); 2]; - let num_received = recv_mmsg(recv_socket, &mut packets[..]).unwrap_or_default(); - assert_eq!(num_received, expected_ids.len(), "{name}"); - for (i, expected_id) in expected_ids.iter().enumerate() { - assert_eq!(packets[i].meta().size, 215); - let recv_transaction: VersionedTransaction = - packets[i].deserialize_slice(..).unwrap(); - assert_eq!( - recv_transaction.message.recent_blockhash(), - expected_id, - "{name}" - ); - } + let recv_socket = &local_node.sockets.tpu_forwards_quic[0]; + + runtime.block_on(check_all_received( + (*recv_socket).try_clone().unwrap(), + expected_num_processed, + expected_packet_size, + &blockhash, + )); let num_unprocessed_packets: usize = unprocessed_packet_batches.len(); assert_eq!(num_unprocessed_packets, expected_num_unprocessed, "{name}"); diff --git a/streamer/src/nonblocking/testing_utilities.rs b/streamer/src/nonblocking/testing_utilities.rs index 4a63458e7c6d74..ab87334c7cc4c9 100644 --- a/streamer/src/nonblocking/testing_utilities.rs +++ b/streamer/src/nonblocking/testing_utilities.rs @@ -136,13 +136,7 @@ pub struct SpawnTestServerResult { pub fn setup_quic_server( option_staked_nodes: Option, - TestServerConfig { - max_connections_per_peer, - max_staked_connections, - max_unstaked_connections, - max_streams_per_ms, - max_connections_per_ipaddr_per_minute, - }: TestServerConfig, + config: TestServerConfig, ) -> SpawnTestServerResult { let sockets = { #[cfg(not(target_os = "windows"))] @@ -171,7 +165,20 @@ pub fn setup_quic_server( vec![UdpSocket::bind("127.0.0.1:0").unwrap()] } }; + setup_quic_server_with_sockets(sockets, option_staked_nodes, config) +} +pub fn setup_quic_server_with_sockets( + sockets: Vec, + option_staked_nodes: Option, + TestServerConfig { + max_connections_per_peer, + max_staked_connections, + max_unstaked_connections, + max_streams_per_ms, + max_connections_per_ipaddr_per_minute, + }: TestServerConfig, +) -> SpawnTestServerResult { let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index b5f78c753da92c..3d15a42bfdad05 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -153,7 +153,7 @@ pub(crate) fn configure_server( Ok((server_config, cert_chain_pem)) } -fn rt(name: String) -> Runtime { +pub fn rt(name: String) -> Runtime { tokio::runtime::Builder::new_multi_thread() .thread_name(name) .enable_all() From 1d9947cd681704fedfce50960247199cd91aea31 Mon Sep 17 00:00:00 2001 From: steviez Date: Sun, 13 Oct 2024 11:36:02 -0500 Subject: [PATCH 489/529] ledger-tool: Fix create-snapshot default value for output_directory (#3148) The arguments to specify full and incremental snapshot archives paths used to be a global argument; these were moved to only be instantiated on commands that needed them in #1773. But, when the arguments were moved from app-level to subcommand-level, the code that matches the arguments was not updated to look at subcommand-matches instead of app-matches. --- ledger-tool/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 682f2bf8a1aa0f..6756806ad4ecb3 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1923,11 +1923,11 @@ fn main() { let is_minimized = arg_matches.is_present("minimized"); let output_directory = value_t!(arg_matches, "output_directory", PathBuf) .unwrap_or_else(|_| { - let snapshot_archive_path = value_t!(matches, "snapshots", String) + let snapshot_archive_path = value_t!(arg_matches, "snapshots", String) .ok() .map(PathBuf::from); let incremental_snapshot_archive_path = - value_t!(matches, "incremental_snapshot_archive_path", String) + value_t!(arg_matches, "incremental_snapshot_archive_path", String) .ok() .map(PathBuf::from); match ( From 05a3200ca6ad6c6254f0b5871a2b62848a99ab93 Mon Sep 17 00:00:00 2001 From: steviez Date: Sun, 13 Oct 2024 21:17:05 -0500 Subject: [PATCH 490/529] chore: Fix whitespace in error message (#3154) --- runtime/src/bank.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8b01326a44dd08..bd19c71fbb99bc 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1735,7 +1735,7 @@ impl Bank { // from the passed in genesis_config instead (as new()/new_with_paths() already do) assert_eq!( bank.genesis_creation_time, genesis_config.creation_time, - "Bank snapshot genesis creation time does not match genesis.bin creation time.\ + "Bank snapshot genesis creation time does not match genesis.bin creation time. \ The snapshot and genesis.bin might pertain to different clusters" ); assert_eq!(bank.ticks_per_slot, genesis_config.ticks_per_slot); From f7f340513cf249f82253e23ab8502c9ab844ce3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 19:10:39 +0800 Subject: [PATCH 491/529] build(deps): bump bytemuck from 1.18.0 to 1.19.0 (#3155) * build(deps): bump bytemuck from 1.18.0 to 1.19.0 Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.18.0 to 1.19.0. - [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md) - [Commits](https://github.com/Lokathor/bytemuck/compare/v1.18.0...v1.19.0) --- updated-dependencies: - dependency-name: bytemuck dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update all Cargo files --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- programs/sbf/Cargo.lock | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 770557bd56da1a..a0f6060e727908 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1154,9 +1154,9 @@ checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "bytemuck" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" +checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" dependencies = [ "bytemuck_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 8818beaecb9b3f..9fd40bdc457f0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -223,7 +223,7 @@ bs58 = { version = "0.5.1", default-features = false } bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" -bytemuck = "1.18.0" +bytemuck = "1.19.0" bytemuck_derive = "1.8.0" byteorder = "1.5.0" bytes = "1.7" diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 4d436d8bde77aa..2bbae7b0bb4d8e 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -850,9 +850,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" +checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" dependencies = [ "bytemuck_derive", ] From 49d22987621067ab6401140695cdd69a306e61c3 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Mon, 14 Oct 2024 19:11:01 +0800 Subject: [PATCH 492/529] link openssl statically for Windows (#3118) * ci: fix windows pipeline * vendor openssl for windows * add comment for the workaround --- .github/scripts/install-all-deps.sh | 12 ++++++++++++ .github/scripts/install-openssl.sh | 18 ++++++++++++++++++ ...lippy-before-script.sh => install-proto.sh} | 2 -- .github/workflows/cargo.yml | 9 +++++++-- .github/workflows/release-artifacts.yml | 13 +++++++++---- storage-bigtable/Cargo.toml | 13 +++---------- 6 files changed, 49 insertions(+), 18 deletions(-) create mode 100755 .github/scripts/install-all-deps.sh create mode 100644 .github/scripts/install-openssl.sh rename .github/scripts/{cargo-clippy-before-script.sh => install-proto.sh} (79%) mode change 100755 => 100644 diff --git a/.github/scripts/install-all-deps.sh b/.github/scripts/install-all-deps.sh new file mode 100755 index 00000000000000..33d154453795c3 --- /dev/null +++ b/.github/scripts/install-all-deps.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e + +here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +os_name="$1" + +# shellcheck source=.github/scripts/install-openssl.sh +source "$here/install-openssl.sh" "$os_name" +# shellcheck source=.github/scripts/install-proto.sh +source "$here/install-proto.sh" "$os_name" diff --git a/.github/scripts/install-openssl.sh b/.github/scripts/install-openssl.sh new file mode 100644 index 00000000000000..1bbcbd01a3773b --- /dev/null +++ b/.github/scripts/install-openssl.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -e + +os_name="$1" + +case "$os_name" in +"Windows") + choco install openssl --version 3.3.2 --install-arguments="'/DIR=C:\OpenSSL'" -y + export OPENSSL_LIB_DIR="C:\OpenSSL\lib\VC\x64\MT" + export OPENSSL_INCLUDE_DIR="C:\OpenSSL\include" + ;; +"macOS") ;; +"Linux") ;; +*) + echo "Unknown Operating System" + ;; +esac diff --git a/.github/scripts/cargo-clippy-before-script.sh b/.github/scripts/install-proto.sh old mode 100755 new mode 100644 similarity index 79% rename from .github/scripts/cargo-clippy-before-script.sh rename to .github/scripts/install-proto.sh index bba03060877434..478847562fdc0f --- a/.github/scripts/cargo-clippy-before-script.sh +++ b/.github/scripts/install-proto.sh @@ -6,8 +6,6 @@ os_name="$1" case "$os_name" in "Windows") - vcpkg install openssl:x64-windows-static-md - vcpkg integrate install choco install protoc export PROTOC='C:\ProgramData\chocolatey\lib\protoc\tools\bin\protoc.exe' ;; diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml index b78d9d66de4016..37096d3be6808a 100644 --- a/.github/workflows/cargo.yml +++ b/.github/workflows/cargo.yml @@ -41,11 +41,16 @@ jobs: with: version: "v0.8.1" - - shell: bash - run: .github/scripts/cargo-clippy-before-script.sh ${{ runner.os }} + # took the workaround from https://github.com/sfackler/rust-openssl/issues/2149 + - name: Set Perl environment variables + if: runner.os == 'Windows' + run: | + echo "PERL=$((where.exe perl)[0])" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + echo "OPENSSL_SRC_PERL=$((where.exe perl)[0])" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 - shell: bash run: | + source .github/scripts/install-all-deps.sh ${{ runner.os }} source ci/rust-version.sh nightly rustup component add clippy --toolchain "$rust_nightly" scripts/cargo-clippy-nightly.sh diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 8b1da1d7e28426..1966d59ab1a246 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -39,14 +39,19 @@ jobs: run: | git checkout ${{ inputs.commit }} + # took the workaround from https://github.com/sfackler/rust-openssl/issues/2149 + - name: Set Perl environment variables + run: | + echo "PERL=$((where.exe perl)[0])" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + echo "OPENSSL_SRC_PERL=$((where.exe perl)[0])" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + - name: Build id: build shell: bash run: | - vcpkg install openssl:x64-windows-static-md - vcpkg integrate install - choco install protoc - export PROTOC="C:\ProgramData\chocolatey\lib\protoc\tools\bin\protoc.exe" + # install all deps + source .github/scripts/install-all-deps.sh ${{ runner.os }} + source /tmp/env.sh echo "tag=$CI_TAG" >> $GITHUB_OUTPUT eval "$(ci/channel-info.sh)" diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index fafacf0292da72..4234cf3eec234a 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -22,6 +22,9 @@ http = { workspace = true } hyper = { workspace = true } hyper-proxy = { workspace = true } log = { workspace = true } +# openssl is a dependency of the goauth and smpl_jwt crates, but explicitly +# declare it here as well to activate the "vendored" feature that builds OpenSSL +openssl = { workspace = true, features = ["vendored"] } prost = { workspace = true } prost-types = { workspace = true } serde = { workspace = true } @@ -36,16 +39,6 @@ tokio = { workspace = true } tonic = { workspace = true, features = ["tls", "transport"] } zstd = { workspace = true } -# openssl is a dependency of the goauth and smpl_jwt crates, but explicitly -# declare it here as well to activate the "vendored" feature that builds OpenSSL -# statically... -[target."cfg(not(windows))".dependencies] -openssl = { workspace = true, features = ["vendored"] } -# ...except on Windows to avoid having to deal with getting CI past a build-time -# Perl dependency -[target."cfg(windows)".dependencies] -openssl = { workspace = true, features = [] } - [lib] crate-type = ["lib"] name = "solana_storage_bigtable" From 2ea7fd7ef75f3636dd91f555146c0297f1e5551f Mon Sep 17 00:00:00 2001 From: Kevin Heavey Date: Mon, 14 Oct 2024 17:03:47 +0400 Subject: [PATCH 493/529] Add const pubkey support and make declare_id a declarative macro (#2348) * make declare_id a declarative macro * remove old comment * deprecate program_declare_id * deprecate pubkey macro * put deprecation on the re-export of the pubkey macro * replace pubkey! with from_str_const in this repo * fmt * remove unused import * Revert "remove unused rustc_version dep from wen-restart (wrong branch)" This reverts commit 60dbddd03ab3330fc3d86d36ed77ad2babf0afc6. * avoid wen-restart changes again * fmt * fix deprecation text * make declare_deprecated_id a declarative macro * put back the deprecation on the re-export of the pubkey macro * fmt * don't deprecate the pubkey macro, but make it a declarative macro * update deprecation note * re-export the new pubkey macro in solana-sdk (with deprecation) instead of the old one --- Cargo.lock | 18 ++++ Cargo.toml | 1 + keygen/src/keygen.rs | 5 +- program-runtime/src/sysvar_cache.rs | 4 +- programs/sbf/Cargo.lock | 18 ++++ sdk/Cargo.toml | 1 + sdk/macro/src/lib.rs | 7 ++ sdk/program/Cargo.toml | 1 + sdk/program/src/lib.rs | 41 +-------- sdk/program/tests/test_pubkey_export.rs | 15 ++++ sdk/pubkey/Cargo.toml | 1 + sdk/pubkey/src/lib.rs | 115 ++++++++++++++++++++++++ sdk/src/lib.rs | 33 +++---- sdk/tests/test_pubkey_export.rs | 8 ++ 14 files changed, 208 insertions(+), 60 deletions(-) create mode 100644 sdk/program/tests/test_pubkey_export.rs create mode 100644 sdk/tests/test_pubkey_export.rs diff --git a/Cargo.lock b/Cargo.lock index a0f6060e727908..0e9569f47e6a82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2284,6 +2284,21 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "five8_const" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b4f62f0f8ca357f93ae90c8c2dd1041a1f665fde2f889ea9b1787903829015" +dependencies = [ + "five8_core", +] + +[[package]] +name = "five8_core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b2a72055cd9cffc40c9f75f1e5810c80559e158796cf2202292ce4745889588" + [[package]] name = "fixedbitset" version = "0.4.0" @@ -7255,6 +7270,7 @@ dependencies = [ "console_error_panic_hook", "console_log", "curve25519-dalek 4.1.3", + "five8_const", "getrandom 0.2.10", "itertools 0.12.1", "js-sys", @@ -7422,6 +7438,7 @@ dependencies = [ "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", + "five8_const", "getrandom 0.2.10", "js-sys", "num-traits", @@ -7868,6 +7885,7 @@ dependencies = [ "solana-logger", "solana-program", "solana-program-memory", + "solana-pubkey", "solana-sanitize", "solana-sdk", "solana-sdk-macro", diff --git a/Cargo.toml b/Cargo.toml index 9fd40bdc457f0a..925d3c01cb13e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -261,6 +261,7 @@ etcd-client = "0.11.1" fast-math = "0.1" fd-lock = "3.0.13" flate2 = "1.0.31" +five8_const = "0.1.3" fnv = "1.0.7" fs_extra = "1.3.0" futures = "0.3.31" diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index faf3a493ff7a7b..cbccb33e9a9095 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -50,9 +50,10 @@ use { }; mod smallest_length_44_public_key { - use solana_sdk::{pubkey, pubkey::Pubkey}; + use solana_sdk::pubkey::Pubkey; - pub(super) static PUBKEY: Pubkey = pubkey!("21111111111111111111111111111111111111111111"); + pub(super) static PUBKEY: Pubkey = + Pubkey::from_str_const("21111111111111111111111111111111111111111111"); #[test] fn assert_length() { diff --git a/program-runtime/src/sysvar_cache.rs b/program-runtime/src/sysvar_cache.rs index 8b4bc614375b64..76496ce9ed69b4 100644 --- a/program-runtime/src/sysvar_cache.rs +++ b/program-runtime/src/sysvar_cache.rs @@ -50,9 +50,9 @@ pub struct SysvarCache { // declare_deprecated_sysvar_id doesn't support const. // These sysvars are going away anyway. -const FEES_ID: Pubkey = solana_sdk::pubkey!("SysvarFees111111111111111111111111111111111"); +const FEES_ID: Pubkey = Pubkey::from_str_const("SysvarFees111111111111111111111111111111111"); const RECENT_BLOCKHASHES_ID: Pubkey = - solana_sdk::pubkey!("SysvarRecentB1ockHashes11111111111111111111"); + Pubkey::from_str_const("SysvarRecentB1ockHashes11111111111111111111"); impl SysvarCache { /// Overwrite a sysvar. For testing purposes only. diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2bbae7b0bb4d8e..c77621fa74892a 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -1718,6 +1718,21 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "five8_const" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b4f62f0f8ca357f93ae90c8c2dd1041a1f665fde2f889ea9b1787903829015" +dependencies = [ + "five8_core", +] + +[[package]] +name = "five8_core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b2a72055cd9cffc40c9f75f1e5810c80559e158796cf2202292ce4745889588" + [[package]] name = "fixedbitset" version = "0.4.1" @@ -5656,6 +5671,7 @@ dependencies = [ "console_error_panic_hook", "console_log", "curve25519-dalek 4.1.3", + "five8_const", "getrandom 0.2.10", "js-sys", "lazy_static", @@ -5804,6 +5820,7 @@ dependencies = [ "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", + "five8_const", "getrandom 0.2.10", "js-sys", "num-traits", @@ -6638,6 +6655,7 @@ dependencies = [ "solana-feature-set", "solana-program", "solana-program-memory", + "solana-pubkey", "solana-sanitize", "solana-sdk-macro", "solana-secp256k1-recover", diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index e0e7f132ffec20..10eb2b0f8a6c8a 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -97,6 +97,7 @@ solana-frozen-abi-macro = { workspace = true, optional = true, features = [ ] } solana-program = { workspace = true } solana-program-memory = { workspace = true } +solana-pubkey = { workspace = true } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } solana-secp256k1-recover = { workspace = true } diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index e3380712ef9d1a..13841b126b82c1 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -180,12 +180,14 @@ impl ToTokens for ProgramSdkIdDeprecated { } } +#[deprecated(since = "2.1.0", note = "Use `solana_pubkey::pubkey` instead")] #[proc_macro] pub fn pubkey(input: TokenStream) -> TokenStream { let id = parse_macro_input!(input as SdkPubkey); TokenStream::from(quote! {#id}) } +#[deprecated(since = "2.1.0", note = "Use `solana_pubkey::pubkey!` instead")] #[proc_macro] pub fn program_pubkey(input: TokenStream) -> TokenStream { let id = parse_macro_input!(input as ProgramSdkPubkey); @@ -204,12 +206,17 @@ pub fn declare_deprecated_id(input: TokenStream) -> TokenStream { TokenStream::from(quote! {#id}) } +#[deprecated(since = "2.1.0", note = "Use `solana_pubkey::declare_id` instead")] #[proc_macro] pub fn program_declare_id(input: TokenStream) -> TokenStream { let id = parse_macro_input!(input as ProgramSdkId); TokenStream::from(quote! {#id}) } +#[deprecated( + since = "2.1.0", + note = "Use `solana_pubkey::declare_deprecated_id` instead" +)] #[proc_macro] pub fn program_declare_deprecated_id(input: TokenStream) -> TokenStream { let id = parse_macro_input!(input as ProgramSdkIdDeprecated); diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 9785417637f235..391e5e148f11ec 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -20,6 +20,7 @@ bs58 = { workspace = true, features = ["alloc"] } bv = { workspace = true, features = ["serde"] } bytemuck = { workspace = true } bytemuck_derive = { workspace = true } +five8_const = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } memoffset = { workspace = true } diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index de02676c1f7c28..11a2ccaab2f9d5 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -597,46 +597,7 @@ pub mod sdk_ids { #[deprecated(since = "2.1.0", note = "Use `solana-decode-error` crate instead")] pub use solana_decode_error as decode_error; -/// Same as [`declare_id`] except that it reports that this ID has been deprecated. -pub use solana_sdk_macro::program_declare_deprecated_id as declare_deprecated_id; -/// Convenience macro to declare a static public key and functions to interact with it. -/// -/// Input: a single literal base58 string representation of a program's ID. -/// -/// # Example -/// -/// ``` -/// # // wrapper is used so that the macro invocation occurs in the item position -/// # // rather than in the statement position which isn't allowed. -/// use std::str::FromStr; -/// use solana_program::{declare_id, pubkey::Pubkey}; -/// -/// # mod item_wrapper { -/// # use solana_program::declare_id; -/// declare_id!("My11111111111111111111111111111111111111111"); -/// # } -/// # use item_wrapper::id; -/// -/// let my_id = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); -/// assert_eq!(id(), my_id); -/// ``` -pub use solana_sdk_macro::program_declare_id as declare_id; -/// Convenience macro to define a static public key. -/// -/// Input: a single literal base58 string representation of a Pubkey. -/// -/// # Example -/// -/// ``` -/// use std::str::FromStr; -/// use solana_program::{pubkey, pubkey::Pubkey}; -/// -/// static ID: Pubkey = pubkey!("My11111111111111111111111111111111111111111"); -/// -/// let my_id = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); -/// assert_eq!(ID, my_id); -/// ``` -pub use solana_sdk_macro::program_pubkey as pubkey; +pub use solana_pubkey::{declare_deprecated_id, declare_id, pubkey}; #[macro_use] extern crate serde_derive; diff --git a/sdk/program/tests/test_pubkey_export.rs b/sdk/program/tests/test_pubkey_export.rs new file mode 100644 index 00000000000000..a6b0564e995e16 --- /dev/null +++ b/sdk/program/tests/test_pubkey_export.rs @@ -0,0 +1,15 @@ +use { + solana_program::{pubkey, pubkey::Pubkey}, + std::str::FromStr, +}; + +// solana_program::pubkey refers to both a module and a macro. +// This test demonstrates that both imports are working +#[test] +fn test_pubkey_import() { + let pk = pubkey!("ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL"); + assert_eq!( + pk, + Pubkey::from_str("ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL").unwrap() + ); +} diff --git a/sdk/pubkey/Cargo.toml b/sdk/pubkey/Cargo.toml index b34d214388a269..50d7a5ed6bdef7 100644 --- a/sdk/pubkey/Cargo.toml +++ b/sdk/pubkey/Cargo.toml @@ -16,6 +16,7 @@ borsh0-10 = { package = "borsh", version = "0.10.3", optional = true } bs58 = { workspace = true } bytemuck = { workspace = true, optional = true } bytemuck_derive = { workspace = true, optional = true } +five8_const = { workspace = true } num-traits = { workspace = true } serde = { workspace = true, optional = true } serde_derive = { workspace = true, optional = true } diff --git a/sdk/pubkey/src/lib.rs b/sdk/pubkey/src/lib.rs index 7f3cd3fd8edf87..e378c44caf3f06 100644 --- a/sdk/pubkey/src/lib.rs +++ b/sdk/pubkey/src/lib.rs @@ -305,6 +305,12 @@ impl Pubkey { Self(pubkey_array) } + /// Decode a string into a Pubkey, usable in a const context + pub const fn from_str_const(s: &str) -> Self { + let id_array = five8_const::decode_32_const(s); + Pubkey::new_from_array(id_array) + } + /// unique Pubkey for tests and benchmarks. pub fn new_unique() -> Self { use solana_atomic_u64::AtomicU64; @@ -1009,6 +1015,105 @@ impl Pubkey { } } +/// Convenience macro to declare a static public key and functions to interact with it. +/// +/// Input: a single literal base58 string representation of a program's ID. +/// +/// # Example +/// +/// ``` +/// # // wrapper is used so that the macro invocation occurs in the item position +/// # // rather than in the statement position which isn't allowed. +/// use std::str::FromStr; +/// use solana_pubkey::{declare_id, Pubkey}; +/// +/// # mod item_wrapper { +/// # use solana_pubkey::declare_id; +/// declare_id!("My11111111111111111111111111111111111111111"); +/// # } +/// # use item_wrapper::id; +/// +/// let my_id = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); +/// assert_eq!(id(), my_id); +/// ``` +#[macro_export] +macro_rules! declare_id { + ($address:expr) => { + /// The const program ID. + pub const ID: $crate::Pubkey = $crate::Pubkey::from_str_const($address); + + /// Returns `true` if given pubkey is the program ID. + // TODO make this const once `derive_const` makes it out of nightly + // and we can `derive_const(PartialEq)` on `Pubkey`. + pub fn check_id(id: &$crate::Pubkey) -> bool { + id == &ID + } + + /// Returns the program ID. + pub const fn id() -> $crate::Pubkey { + ID + } + + #[cfg(test)] + #[test] + fn test_id() { + assert!(check_id(&id())); + } + }; +} + +/// Same as [`declare_id`] except that it reports that this ID has been deprecated. +#[macro_export] +macro_rules! declare_deprecated_id { + ($address:expr) => { + /// The const program ID. + pub const ID: $crate::Pubkey = $crate::Pubkey::from_str_const($address); + + /// Returns `true` if given pubkey is the program ID. + // TODO make this const once `derive_const` makes it out of nightly + // and we can `derive_const(PartialEq)` on `Pubkey`. + #[deprecated()] + pub fn check_id(id: &$crate::Pubkey) -> bool { + id == &ID + } + + /// Returns the program ID. + #[deprecated()] + pub const fn id() -> $crate::Pubkey { + ID + } + + #[cfg(test)] + #[test] + #[allow(deprecated)] + fn test_id() { + assert!(check_id(&id())); + } + }; +} + +/// Convenience macro to define a static public key. +/// +/// Input: a single literal base58 string representation of a Pubkey. +/// +/// # Example +/// +/// ``` +/// use std::str::FromStr; +/// use solana_pubkey::{pubkey, Pubkey}; +/// +/// static ID: Pubkey = pubkey!("My11111111111111111111111111111111111111111"); +/// +/// let my_id = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); +/// assert_eq!(ID, my_id); +/// ``` +#[macro_export] +macro_rules! pubkey { + ($input:literal) => { + $crate::Pubkey::from_str_const($input) + }; +} + #[cfg(test)] mod tests { use {super::*, strum::IntoEnumIterator}; @@ -1279,4 +1384,14 @@ mod tests { ); } } + + #[test] + fn test_pubkey_macro() { + const PK: Pubkey = Pubkey::from_str_const("9h1HyLCW5dZnBVap8C5egQ9Z6pHyjsh5MNy83iPqqRuq"); + assert_eq!(pubkey!("9h1HyLCW5dZnBVap8C5egQ9Z6pHyjsh5MNy83iPqqRuq"), PK); + assert_eq!( + Pubkey::from_str("9h1HyLCW5dZnBVap8C5egQ9Z6pHyjsh5MNy83iPqqRuq").unwrap(), + PK + ); + } } diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 11f65d40eedaec..ed07e8c7806b57 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -118,6 +118,23 @@ pub use solana_derivation_path as derivation_path; pub use solana_feature_set as feature_set; #[deprecated(since = "2.1.0", note = "Use `solana-program-memory` crate instead")] pub use solana_program_memory as program_memory; +#[deprecated(since = "2.1.0", note = "Use `solana_pubkey::pubkey` instead")] +/// Convenience macro to define a static public key. +/// +/// Input: a single literal base58 string representation of a Pubkey +/// +/// # Example +/// +/// ``` +/// use std::str::FromStr; +/// use solana_program::{pubkey, pubkey::Pubkey}; +/// +/// static ID: Pubkey = pubkey!("My11111111111111111111111111111111111111111"); +/// +/// let my_id = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); +/// assert_eq!(ID, my_id); +/// ``` +pub use solana_pubkey::pubkey; #[deprecated(since = "2.1.0", note = "Use `solana-sanitize` crate instead")] pub use solana_sanitize as sanitize; /// Same as `declare_id` except report that this id has been deprecated. @@ -144,22 +161,6 @@ pub use solana_sdk_macro::declare_deprecated_id; /// assert_eq!(id(), my_id); /// ``` pub use solana_sdk_macro::declare_id; -/// Convenience macro to define a static public key. -/// -/// Input: a single literal base58 string representation of a Pubkey -/// -/// # Example -/// -/// ``` -/// use std::str::FromStr; -/// use solana_program::{pubkey, pubkey::Pubkey}; -/// -/// static ID: Pubkey = pubkey!("My11111111111111111111111111111111111111111"); -/// -/// let my_id = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); -/// assert_eq!(ID, my_id); -/// ``` -pub use solana_sdk_macro::pubkey; /// Convenience macro to define multiple static public keys. pub use solana_sdk_macro::pubkeys; #[deprecated(since = "2.1.0", note = "Use `solana-secp256k1-recover` crate instead")] diff --git a/sdk/tests/test_pubkey_export.rs b/sdk/tests/test_pubkey_export.rs new file mode 100644 index 00000000000000..de26efb6d7985d --- /dev/null +++ b/sdk/tests/test_pubkey_export.rs @@ -0,0 +1,8 @@ +// Simple test to make sure we haven't broken the re-export of the pubkey macro in solana_sdk +#[test] +fn test_sdk_pubkey_export() { + assert_eq!( + solana_sdk::pubkey!("ZkTokenProof1111111111111111111111111111111"), + solana_pubkey::pubkey!("ZkTokenProof1111111111111111111111111111111") + ); +} From 6c264d7d4a80936cc9a479e1a0a37ae89bfcf45c Mon Sep 17 00:00:00 2001 From: HaoranYi <219428+HaoranYi@users.noreply.github.com> Date: Mon, 14 Oct 2024 09:24:24 -0500 Subject: [PATCH 494/529] accounts-db: use smaller thread pool for hash scan (#3097) * use smaller thread pool for hash scan * pr feedbacks * clippy * up to 6 threads * rename thread name --------- Co-authored-by: HaoranYi --- accounts-db/src/accounts_db.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index ba8ba98e6ec3b0..7ed43e238eb546 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1384,6 +1384,8 @@ pub struct AccountsDb { pub thread_pool_clean: ThreadPool, + pub thread_pool_hash: ThreadPool, + bank_hash_stats: Mutex>, accounts_delta_hashes: Mutex>, accounts_hashes: Mutex>, @@ -1679,6 +1681,16 @@ pub fn make_min_priority_thread_pool() -> ThreadPool { .unwrap() } +pub fn make_hash_thread_pool() -> ThreadPool { + // 1/8 of the number of cpus and up to 6 threads gives good balance for the system. + let num_threads = (num_cpus::get() / 8).clamp(2, 6); + rayon::ThreadPoolBuilder::new() + .thread_name(|i| format!("solAcctHash{i:02}")) + .num_threads(num_threads) + .build() + .unwrap() +} + #[cfg(feature = "frozen-abi")] impl solana_frozen_abi::abi_example::AbiExample for AccountsDb { fn example() -> Self { @@ -1790,6 +1802,7 @@ impl AccountsDb { .build() .unwrap(), thread_pool_clean: make_min_priority_thread_pool(), + thread_pool_hash: make_hash_thread_pool(), bank_hash_stats: Mutex::new(bank_hash_stats), accounts_delta_hashes: Mutex::new(HashMap::new()), accounts_hashes: Mutex::new(HashMap::new()), @@ -7046,7 +7059,7 @@ impl AccountsDb { }; let result = if use_bg_thread_pool { - self.thread_pool_clean.install(scan_and_hash) + self.thread_pool_hash.install(scan_and_hash) } else { scan_and_hash() }; From f03bce592f20c9dc619a5269fc18940795444f6e Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Mon, 14 Oct 2024 16:25:12 +0200 Subject: [PATCH 495/529] Add a new client implementation targeting TPU (#2905) Although tpu-client, component which is currently used for sending transactions over TPU, suited for bulk transaction sent, it was not designed for handling the stream of transactions. Additionally, the call stack for sending transactions using tpu-client has grown too deep, making the code difficult to maintain. This motivated us to create a new client implementation that is optimized for handling transaction streams and is built using Tokio from the start. --- Cargo.lock | 24 + Cargo.toml | 1 + tpu-client-next/Cargo.toml | 34 + tpu-client-next/src/connection_worker.rs | 258 +++++++ .../src/connection_workers_scheduler.rs | 213 ++++++ tpu-client-next/src/leader_updater.rs | 124 ++++ tpu-client-next/src/lib.rs | 12 + tpu-client-next/src/quic_networking.rs | 70 ++ tpu-client-next/src/quic_networking/error.rs | 49 ++ .../quic_client_certificate.rs | 17 + .../skip_server_verification.rs | 74 ++ tpu-client-next/src/send_transaction_stats.rs | 166 +++++ tpu-client-next/src/transaction_batch.rs | 33 + tpu-client-next/src/workers_cache.rs | 184 +++++ .../connection_workers_scheduler_test.rs | 671 ++++++++++++++++++ 15 files changed, 1930 insertions(+) create mode 100644 tpu-client-next/Cargo.toml create mode 100644 tpu-client-next/src/connection_worker.rs create mode 100644 tpu-client-next/src/connection_workers_scheduler.rs create mode 100644 tpu-client-next/src/leader_updater.rs create mode 100644 tpu-client-next/src/lib.rs create mode 100644 tpu-client-next/src/quic_networking.rs create mode 100644 tpu-client-next/src/quic_networking/error.rs create mode 100644 tpu-client-next/src/quic_networking/quic_client_certificate.rs create mode 100644 tpu-client-next/src/quic_networking/skip_server_verification.rs create mode 100644 tpu-client-next/src/send_transaction_stats.rs create mode 100644 tpu-client-next/src/transaction_batch.rs create mode 100644 tpu-client-next/src/workers_cache.rs create mode 100644 tpu-client-next/tests/connection_workers_scheduler_test.rs diff --git a/Cargo.lock b/Cargo.lock index 0e9569f47e6a82..8be26642cb791d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8407,6 +8407,30 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-tpu-client-next" +version = "2.1.0" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures 0.3.31", + "log", + "lru", + "quinn", + "rustls 0.23.14", + "solana-cli-config", + "solana-connection-cache", + "solana-logger", + "solana-measure", + "solana-rpc-client", + "solana-sdk", + "solana-streamer", + "solana-tpu-client", + "thiserror", + "tokio", + "tokio-util 0.7.12", +] + [[package]] name = "solana-transaction-dos" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index 925d3c01cb13e3..db33afda423e7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -148,6 +148,7 @@ members = [ "tokens", "tps-client", "tpu-client", + "tpu-client-next", "transaction-dos", "transaction-metrics-tracker", "transaction-status", diff --git a/tpu-client-next/Cargo.toml b/tpu-client-next/Cargo.toml new file mode 100644 index 00000000000000..13ceb0e584a10f --- /dev/null +++ b/tpu-client-next/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "solana-tpu-client-next" +description = "Client code to send transaction to TPU." +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +async-trait = { workspace = true } +log = { workspace = true } +lru = { workspace = true } +quinn = { workspace = true } +rustls = { workspace = true } +solana-connection-cache = { workspace = true } +solana-logger = { workspace = true } +solana-measure = { workspace = true } +solana-rpc-client = { workspace = true } +solana-sdk = { workspace = true } +solana-streamer = { workspace = true } +solana-tpu-client = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } +tokio-util = { workspace = true } + +[dev-dependencies] +crossbeam-channel = { workspace = true } +futures = { workspace = true } +solana-cli-config = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/tpu-client-next/src/connection_worker.rs b/tpu-client-next/src/connection_worker.rs new file mode 100644 index 00000000000000..7d77bc3f6ed2a2 --- /dev/null +++ b/tpu-client-next/src/connection_worker.rs @@ -0,0 +1,258 @@ +//! This module defines [`ConnectionWorker`] which encapsulates the functionality +//! needed to handle one connection within the scope of task. + +use { + super::SendTransactionStats, + crate::{ + quic_networking::send_data_over_stream, send_transaction_stats::record_error, + transaction_batch::TransactionBatch, + }, + log::*, + quinn::{ConnectError, Connection, Endpoint}, + solana_measure::measure::Measure, + solana_sdk::{ + clock::{DEFAULT_MS_PER_SLOT, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS}, + timing::timestamp, + }, + std::net::SocketAddr, + tokio::{ + sync::mpsc, + time::{sleep, Duration}, + }, + tokio_util::sync::CancellationToken, +}; + +/// Interval between retry attempts for creating a new connection. This value is +/// a best-effort estimate, based on current network conditions. +const RETRY_SLEEP_INTERVAL: Duration = + Duration::from_millis(NUM_CONSECUTIVE_LEADER_SLOTS * DEFAULT_MS_PER_SLOT); + +/// Maximum age (in milliseconds) of a blockhash, beyond which transaction +/// batches are dropped. +const MAX_PROCESSING_AGE_MS: u64 = MAX_PROCESSING_AGE as u64 * DEFAULT_MS_PER_SLOT; + +/// [`ConnectionState`] represents the current state of a quic connection. +/// +/// It tracks the lifecycle of connection from initial setup to closing phase. +/// The transition function between states is defined in `ConnectionWorker` +/// implementation. +enum ConnectionState { + NotSetup, + Active(Connection), + Retry(usize), + Closing, +} + +impl Drop for ConnectionState { + /// When [`ConnectionState`] is dropped, underlying connection is closed + /// which means that there is no guarantee that the open streams will + /// finish. + fn drop(&mut self) { + if let Self::Active(connection) = self { + debug!( + "Close connection with {:?}, stats: {:?}. All pending streams will be dropped.", + connection.remote_address(), + connection.stats() + ); + connection.close(0u32.into(), b"done"); + } + } +} + +/// [`ConnectionWorker`] holds connection to the validator with address `peer`. +/// +/// If connection has been closed, [`ConnectionWorker`] tries to reconnect +/// `max_reconnect_attempts` times. If connection is in `Active` state, it sends +/// transactions received from `transactions_receiver`. Additionally, it +/// accumulates statistics about connections and streams failures. +pub(crate) struct ConnectionWorker { + endpoint: Endpoint, + peer: SocketAddr, + transactions_receiver: mpsc::Receiver, + connection: ConnectionState, + skip_check_transaction_age: bool, + max_reconnect_attempts: usize, + send_txs_stats: SendTransactionStats, + cancel: CancellationToken, +} + +impl ConnectionWorker { + /// Constructs a [`ConnectionWorker`]. + /// + /// [`ConnectionWorker`] maintains a connection to a `peer` and processes + /// transactions from `transactions_receiver`. If + /// `skip_check_transaction_age` is set to `true`, the worker skips checking + /// for transaction blockhash expiration. The `max_reconnect_attempts` + /// parameter controls how many times the worker will attempt to reconnect + /// in case of connection failure. Returns the created `ConnectionWorker` + /// along with a cancellation token that can be used by the caller to stop + /// the worker. + pub fn new( + endpoint: Endpoint, + peer: SocketAddr, + transactions_receiver: mpsc::Receiver, + skip_check_transaction_age: bool, + max_reconnect_attempts: usize, + ) -> (Self, CancellationToken) { + let cancel = CancellationToken::new(); + + let this = Self { + endpoint, + peer, + transactions_receiver, + connection: ConnectionState::NotSetup, + skip_check_transaction_age, + max_reconnect_attempts, + send_txs_stats: SendTransactionStats::default(), + cancel: cancel.clone(), + }; + + (this, cancel) + } + + /// Starts the main loop of the [`ConnectionWorker`]. + /// + /// This method manages the connection to the peer and handles state + /// transitions. It runs indefinitely until the connection is closed or an + /// unrecoverable error occurs. + pub async fn run(&mut self) { + let cancel = self.cancel.clone(); + + let main_loop = async move { + loop { + match &self.connection { + ConnectionState::Closing => { + break; + } + ConnectionState::NotSetup => { + self.create_connection(0).await; + } + ConnectionState::Active(connection) => { + let Some(transactions) = self.transactions_receiver.recv().await else { + debug!("Transactions sender has been dropped."); + self.connection = ConnectionState::Closing; + continue; + }; + self.send_transactions(connection.clone(), transactions) + .await; + } + ConnectionState::Retry(num_reconnects) => { + if *num_reconnects > self.max_reconnect_attempts { + error!("Failed to establish connection: reach max reconnect attempts."); + self.connection = ConnectionState::Closing; + continue; + } + sleep(RETRY_SLEEP_INTERVAL).await; + self.reconnect(*num_reconnects).await; + } + } + } + }; + + tokio::select! { + () = main_loop => (), + () = cancel.cancelled() => (), + } + } + + /// Retrieves the statistics for transactions sent by this worker. + pub fn transaction_stats(&self) -> &SendTransactionStats { + &self.send_txs_stats + } + + /// Sends a batch of transactions using the provided `connection`. + /// + /// Each transaction in the batch is sent over the QUIC streams one at the + /// time, which prevents traffic fragmentation and shows better TPS in + /// comparison with multistream send. If the batch is determined to be + /// outdated and flag `skip_check_transaction_age` is unset, it will be + /// dropped without being sent. + /// + /// In case of error, it doesn't retry to send the same transactions again. + async fn send_transactions(&mut self, connection: Connection, transactions: TransactionBatch) { + let now = timestamp(); + if !self.skip_check_transaction_age + && now.saturating_sub(transactions.timestamp()) > MAX_PROCESSING_AGE_MS + { + debug!("Drop outdated transaction batch."); + return; + } + let mut measure_send = Measure::start("send transaction batch"); + for data in transactions.into_iter() { + let result = send_data_over_stream(&connection, &data).await; + + if let Err(error) = result { + trace!("Failed to send transaction over stream with error: {error}."); + record_error(error, &mut self.send_txs_stats); + self.connection = ConnectionState::Retry(0); + } else { + self.send_txs_stats.successfully_sent = + self.send_txs_stats.successfully_sent.saturating_add(1); + } + } + measure_send.stop(); + debug!( + "Time to send transactions batch: {} us", + measure_send.as_us() + ); + } + + /// Attempts to create a new connection to the specified `peer` address. + /// + /// If the connection is successful, the state is updated to `Active`. + /// + /// If an error occurs, the state may transition to `Retry` or `Closing`, + /// depending on the nature of the error. + async fn create_connection(&mut self, max_retries_attempt: usize) { + let connecting = self.endpoint.connect(self.peer, "connect"); + match connecting { + Ok(connecting) => { + let mut measure_connection = Measure::start("establish connection"); + let res = connecting.await; + measure_connection.stop(); + debug!( + "Establishing connection with {} took: {} us", + self.peer, + measure_connection.as_us() + ); + match res { + Ok(connection) => { + self.connection = ConnectionState::Active(connection); + } + Err(err) => { + warn!("Connection error {}: {}", self.peer, err); + record_error(err.into(), &mut self.send_txs_stats); + self.connection = + ConnectionState::Retry(max_retries_attempt.saturating_add(1)); + } + } + } + Err(connecting_error) => { + record_error(connecting_error.clone().into(), &mut self.send_txs_stats); + match connecting_error { + ConnectError::EndpointStopping => { + debug!("Endpoint stopping, exit connection worker."); + self.connection = ConnectionState::Closing; + } + ConnectError::InvalidRemoteAddress(_) => { + warn!("Invalid remote address."); + self.connection = ConnectionState::Closing; + } + e => { + error!("Unexpected error has happen while trying to create connection {e}"); + self.connection = ConnectionState::Closing; + } + } + } + } + } + + /// Attempts to reconnect to the peer after a connection failure. + async fn reconnect(&mut self, num_reconnects: usize) { + debug!("Trying to reconnect. Reopen connection, 0rtt is not implemented yet."); + // We can reconnect using 0rtt, but not a priority for now. Check if we + // need to call config.enable_0rtt() on the client side and where + // session tickets are stored. + self.create_connection(num_reconnects).await; + } +} diff --git a/tpu-client-next/src/connection_workers_scheduler.rs b/tpu-client-next/src/connection_workers_scheduler.rs new file mode 100644 index 00000000000000..82b038827b48eb --- /dev/null +++ b/tpu-client-next/src/connection_workers_scheduler.rs @@ -0,0 +1,213 @@ +//! This module defines [`ConnectionWorkersScheduler`] which sends transactions +//! to the upcoming leaders. + +use { + super::{leader_updater::LeaderUpdater, SendTransactionStatsPerAddr}, + crate::{ + connection_worker::ConnectionWorker, + quic_networking::{ + create_client_config, create_client_endpoint, QuicClientCertificate, QuicError, + }, + transaction_batch::TransactionBatch, + workers_cache::{WorkerInfo, WorkersCache, WorkersCacheError}, + }, + log::*, + quinn::Endpoint, + solana_sdk::signature::Keypair, + std::{net::SocketAddr, sync::Arc}, + thiserror::Error, + tokio::sync::mpsc, + tokio_util::sync::CancellationToken, +}; + +/// The [`ConnectionWorkersScheduler`] sends transactions from the provided +/// receiver channel to upcoming leaders. It obtains information about future +/// leaders from the implementation of the [`LeaderUpdater`] trait. +/// +/// Internally, it enables the management and coordination of multiple network +/// connections, schedules and oversees connection workers. +pub struct ConnectionWorkersScheduler; + +/// Errors that arise from running [`ConnectionWorkersSchedulerError`]. +#[derive(Debug, Error, PartialEq)] +pub enum ConnectionWorkersSchedulerError { + #[error(transparent)] + QuicError(#[from] QuicError), + #[error(transparent)] + WorkersCacheError(#[from] WorkersCacheError), + #[error("Leader receiver unexpectedly dropped.")] + LeaderReceiverDropped, +} + +/// Configuration for the [`ConnectionWorkersScheduler`]. +/// +/// This struct holds the necessary settings to initialize and manage connection +/// workers, including network binding, identity, connection limits, and +/// behavior related to transaction handling. +pub struct ConnectionWorkersSchedulerConfig { + /// The local address to bind the scheduler to. + pub bind: SocketAddr, + + /// Optional stake identity keypair used in the endpoint certificate for + /// identifying the sender. + pub stake_identity: Option, + + /// The number of connections to be maintained by the scheduler. + pub num_connections: usize, + + /// Whether to skip checking the transaction blockhash expiration. + pub skip_check_transaction_age: bool, + + /// The size of the channel used to transmit transaction batches to the + /// worker tasks. + pub worker_channel_size: usize, + + /// The maximum number of reconnection attempts allowed in case of + /// connection failure. + pub max_reconnect_attempts: usize, + + /// The number of slots to look ahead during the leader estimation + /// procedure. Determines how far into the future leaders are estimated, + /// allowing connections to be established with those leaders in advance. + pub lookahead_slots: u64, +} + +impl ConnectionWorkersScheduler { + /// Starts the scheduler, which manages the distribution of transactions to + /// the network's upcoming leaders. + /// + /// Runs the main loop that handles worker scheduling and management for + /// connections. Returns the error quic statistics per connection address or + /// an error. + /// + /// Importantly, if some transactions were not delivered due to network + /// problems, they will not be retried when the problem is resolved. + pub async fn run( + ConnectionWorkersSchedulerConfig { + bind, + stake_identity: validator_identity, + num_connections, + skip_check_transaction_age, + worker_channel_size, + max_reconnect_attempts, + lookahead_slots, + }: ConnectionWorkersSchedulerConfig, + mut leader_updater: Box, + mut transaction_receiver: mpsc::Receiver, + cancel: CancellationToken, + ) -> Result { + let endpoint = Self::setup_endpoint(bind, validator_identity)?; + debug!("Client endpoint bind address: {:?}", endpoint.local_addr()); + let mut workers = WorkersCache::new(num_connections, cancel.clone()); + + loop { + let transaction_batch = tokio::select! { + recv_res = transaction_receiver.recv() => match recv_res { + Some(txs) => txs, + None => { + debug!("End of `transaction_receiver`: shutting down."); + break; + } + }, + () = cancel.cancelled() => { + debug!("Cancelled: Shutting down"); + break; + } + }; + let updated_leaders = leader_updater.next_leaders(lookahead_slots); + let new_leader = &updated_leaders[0]; + let future_leaders = &updated_leaders[1..]; + if !workers.contains(new_leader) { + debug!("No existing workers for {new_leader:?}, starting a new one."); + let worker = Self::spawn_worker( + &endpoint, + new_leader, + worker_channel_size, + skip_check_transaction_age, + max_reconnect_attempts, + ); + workers.push(*new_leader, worker).await; + } + + tokio::select! { + send_res = workers.send_transactions_to_address(new_leader, transaction_batch) => match send_res { + Ok(()) => (), + Err(WorkersCacheError::ShutdownError) => { + debug!("Connection to {new_leader} was closed, worker cache shutdown"); + } + Err(err) => { + warn!("Connection to {new_leader} was closed, worker error: {err}"); + // If we has failed to send batch, it will be dropped. + } + }, + () = cancel.cancelled() => { + debug!("Cancelled: Shutting down"); + break; + } + }; + + // Regardless of who is leader, add future leaders to the cache to + // hide the latency of opening the connection. + for peer in future_leaders { + if !workers.contains(peer) { + let worker = Self::spawn_worker( + &endpoint, + peer, + worker_channel_size, + skip_check_transaction_age, + max_reconnect_attempts, + ); + workers.push(*peer, worker).await; + } + } + } + + workers.shutdown().await; + + endpoint.close(0u32.into(), b"Closing connection"); + leader_updater.stop().await; + Ok(workers.transaction_stats().clone()) + } + + /// Sets up the QUIC endpoint for the scheduler to handle connections. + fn setup_endpoint( + bind: SocketAddr, + validator_identity: Option, + ) -> Result { + let client_certificate = if let Some(validator_identity) = validator_identity { + Arc::new(QuicClientCertificate::new(&validator_identity)) + } else { + Arc::new(QuicClientCertificate::new(&Keypair::new())) + }; + let client_config = create_client_config(client_certificate); + let endpoint = create_client_endpoint(bind, client_config)?; + Ok(endpoint) + } + + /// Spawns a worker to handle communication with a given peer. + fn spawn_worker( + endpoint: &Endpoint, + peer: &SocketAddr, + worker_channel_size: usize, + skip_check_transaction_age: bool, + max_reconnect_attempts: usize, + ) -> WorkerInfo { + let (txs_sender, txs_receiver) = mpsc::channel(worker_channel_size); + let endpoint = endpoint.clone(); + let peer = *peer; + + let (mut worker, cancel) = ConnectionWorker::new( + endpoint, + peer, + txs_receiver, + skip_check_transaction_age, + max_reconnect_attempts, + ); + let handle = tokio::spawn(async move { + worker.run().await; + worker.transaction_stats().clone() + }); + + WorkerInfo::new(txs_sender, handle, cancel) + } +} diff --git a/tpu-client-next/src/leader_updater.rs b/tpu-client-next/src/leader_updater.rs new file mode 100644 index 00000000000000..5e07b9b0bfe612 --- /dev/null +++ b/tpu-client-next/src/leader_updater.rs @@ -0,0 +1,124 @@ +//! This module provides [`LeaderUpdater`] trait along with +//! `create_leader_updater` function to create an instance of this trait. +//! +//! Currently, the main purpose of [`LeaderUpdater`] is to abstract over leader +//! updates, hiding the details of how leaders are retrieved and which +//! structures are used. It contains trait implementations +//! `LeaderUpdaterService` and `PinnedLeaderUpdater`, where +//! `LeaderUpdaterService` keeps [`LeaderTpuService`] internal to this module. +//! Yet, it also allows to implement custom leader estimation. + +use { + async_trait::async_trait, + log::*, + solana_connection_cache::connection_cache::Protocol, + solana_rpc_client::nonblocking::rpc_client::RpcClient, + solana_tpu_client::nonblocking::tpu_client::LeaderTpuService, + std::{ + fmt, + net::SocketAddr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + }, +}; + +/// [`LeaderUpdater`] trait abstracts out functionality required for the +/// [`ConnectionWorkersScheduler`](crate::ConnectionWorkersScheduler) to +/// identify next leaders to send transactions to. +#[async_trait] +pub trait LeaderUpdater: Send { + /// Returns next unique leaders for the next `lookahead_slots` starting from + /// current estimated slot. + /// + /// If the current leader estimation is incorrect and transactions are sent to + /// only one estimated leader, there is a risk of losing all the transactions, + /// depending on the forwarding policy. + fn next_leaders(&self, lookahead_slots: u64) -> Vec; + + /// Stop [`LeaderUpdater`] and releases all associated resources. + async fn stop(&mut self); +} + +/// Error type for [`LeaderUpdater`]. +pub struct LeaderUpdaterError; + +impl fmt::Display for LeaderUpdaterError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Leader updater encountered an error") + } +} + +impl fmt::Debug for LeaderUpdaterError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "LeaderUpdaterError") + } +} + +/// Creates a [`LeaderUpdater`] based on the configuration provided by the +/// caller. +/// +/// If `pinned_address` is provided, it returns a `PinnedLeaderUpdater` that +/// always returns the provided address instead of checking leader schedule. +/// Otherwise, it creates a `LeaderUpdaterService` which dynamically updates the +/// leaders by connecting to the network via the [`LeaderTpuService`]. +pub async fn create_leader_updater( + rpc_client: Arc, + websocket_url: String, + pinned_address: Option, +) -> Result, LeaderUpdaterError> { + if let Some(pinned_address) = pinned_address { + return Ok(Box::new(PinnedLeaderUpdater { + address: vec![pinned_address], + })); + } + + let exit = Arc::new(AtomicBool::new(false)); + let leader_tpu_service = + LeaderTpuService::new(rpc_client, &websocket_url, Protocol::QUIC, exit.clone()) + .await + .map_err(|error| { + error!("Failed to create a LeaderTpuService: {error}"); + LeaderUpdaterError + })?; + Ok(Box::new(LeaderUpdaterService { + leader_tpu_service, + exit, + })) +} + +/// `LeaderUpdaterService` is an implementation of the [`LeaderUpdater`] trait +/// that dynamically retrieves the current and upcoming leaders by communicating +/// with the Solana network using [`LeaderTpuService`]. +struct LeaderUpdaterService { + leader_tpu_service: LeaderTpuService, + exit: Arc, +} + +#[async_trait] +impl LeaderUpdater for LeaderUpdaterService { + fn next_leaders(&self, lookahead_slots: u64) -> Vec { + self.leader_tpu_service.leader_tpu_sockets(lookahead_slots) + } + + async fn stop(&mut self) { + self.exit.store(true, Ordering::Relaxed); + self.leader_tpu_service.join().await; + } +} + +/// `PinnedLeaderUpdater` is an implementation of [`LeaderUpdater`] that always +/// returns a fixed, "pinned" leader address. It is mainly used for testing. +struct PinnedLeaderUpdater { + address: Vec, +} + +#[async_trait] +impl LeaderUpdater for PinnedLeaderUpdater { + fn next_leaders(&self, _lookahead_slots: u64) -> Vec { + self.address.clone() + } + + async fn stop(&mut self) {} +} diff --git a/tpu-client-next/src/lib.rs b/tpu-client-next/src/lib.rs new file mode 100644 index 00000000000000..720b3876b47cb4 --- /dev/null +++ b/tpu-client-next/src/lib.rs @@ -0,0 +1,12 @@ +pub(crate) mod connection_worker; +pub mod connection_workers_scheduler; +pub mod send_transaction_stats; +pub(crate) mod workers_cache; +pub use crate::{ + connection_workers_scheduler::{ConnectionWorkersScheduler, ConnectionWorkersSchedulerError}, + send_transaction_stats::{SendTransactionStats, SendTransactionStatsPerAddr}, +}; +pub(crate) mod quic_networking; +pub(crate) use crate::quic_networking::QuicError; +pub mod leader_updater; +pub mod transaction_batch; diff --git a/tpu-client-next/src/quic_networking.rs b/tpu-client-next/src/quic_networking.rs new file mode 100644 index 00000000000000..b18fa469241da9 --- /dev/null +++ b/tpu-client-next/src/quic_networking.rs @@ -0,0 +1,70 @@ +//! Utility code to handle quic networking. + +use { + quinn::{ + crypto::rustls::QuicClientConfig, ClientConfig, Connection, Endpoint, IdleTimeout, + TransportConfig, + }, + skip_server_verification::SkipServerVerification, + solana_sdk::quic::{QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT}, + solana_streamer::nonblocking::quic::ALPN_TPU_PROTOCOL_ID, + std::{net::SocketAddr, sync::Arc}, +}; + +pub mod error; +pub mod quic_client_certificate; +pub mod skip_server_verification; + +pub use { + error::{IoErrorWithPartialEq, QuicError}, + quic_client_certificate::QuicClientCertificate, +}; + +pub(crate) fn create_client_config(client_certificate: Arc) -> ClientConfig { + // adapted from QuicLazyInitializedEndpoint::create_endpoint + let mut crypto = rustls::ClientConfig::builder() + .dangerous() + .with_custom_certificate_verifier(SkipServerVerification::new()) + .with_client_auth_cert( + vec![client_certificate.certificate.clone()], + client_certificate.key.clone_key(), + ) + .expect("Failed to set QUIC client certificates"); + crypto.enable_early_data = true; + crypto.alpn_protocols = vec![ALPN_TPU_PROTOCOL_ID.to_vec()]; + + let transport_config = { + let mut res = TransportConfig::default(); + + let timeout = IdleTimeout::try_from(QUIC_MAX_TIMEOUT).unwrap(); + res.max_idle_timeout(Some(timeout)); + res.keep_alive_interval(Some(QUIC_KEEP_ALIVE)); + + res + }; + + let mut config = ClientConfig::new(Arc::new(QuicClientConfig::try_from(crypto).unwrap())); + config.transport_config(Arc::new(transport_config)); + + config +} + +pub(crate) fn create_client_endpoint( + bind_addr: SocketAddr, + client_config: ClientConfig, +) -> Result { + let mut endpoint = Endpoint::client(bind_addr).map_err(IoErrorWithPartialEq::from)?; + endpoint.set_default_client_config(client_config); + Ok(endpoint) +} + +pub(crate) async fn send_data_over_stream( + connection: &Connection, + data: &[u8], +) -> Result<(), QuicError> { + let mut send_stream = connection.open_uni().await?; + send_stream.write_all(data).await.map_err(QuicError::from)?; + + // Stream will be finished when dropped. Finishing here explicitly is a noop. + Ok(()) +} diff --git a/tpu-client-next/src/quic_networking/error.rs b/tpu-client-next/src/quic_networking/error.rs new file mode 100644 index 00000000000000..8fa79265cb69a4 --- /dev/null +++ b/tpu-client-next/src/quic_networking/error.rs @@ -0,0 +1,49 @@ +use { + quinn::{ConnectError, ConnectionError, WriteError}, + std::{ + fmt::{self, Formatter}, + io, + }, + thiserror::Error, +}; + +/// Wrapper for [`io::Error`] implementing [`PartialEq`] to simplify error +/// checking for the [`QuicError`] type. The reasons why [`io::Error`] doesn't +/// implement [`PartialEq`] are discusses in +/// . +#[derive(Debug, Error)] +pub struct IoErrorWithPartialEq(pub io::Error); + +impl PartialEq for IoErrorWithPartialEq { + fn eq(&self, other: &Self) -> bool { + let formatted_self = format!("{self:?}"); + let formatted_other = format!("{other:?}"); + formatted_self == formatted_other + } +} + +impl fmt::Display for IoErrorWithPartialEq { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl From for IoErrorWithPartialEq { + fn from(err: io::Error) -> Self { + IoErrorWithPartialEq(err) + } +} + +/// Error types that can occur when dealing with QUIC connections or +/// transmissions. +#[derive(Error, Debug, PartialEq)] +pub enum QuicError { + #[error(transparent)] + StreamWrite(#[from] WriteError), + #[error(transparent)] + Connection(#[from] ConnectionError), + #[error(transparent)] + Connect(#[from] ConnectError), + #[error(transparent)] + Endpoint(#[from] IoErrorWithPartialEq), +} diff --git a/tpu-client-next/src/quic_networking/quic_client_certificate.rs b/tpu-client-next/src/quic_networking/quic_client_certificate.rs new file mode 100644 index 00000000000000..b9f0c8d1cf27a6 --- /dev/null +++ b/tpu-client-next/src/quic_networking/quic_client_certificate.rs @@ -0,0 +1,17 @@ +use { + rustls::pki_types::{CertificateDer, PrivateKeyDer}, + solana_sdk::signature::Keypair, + solana_streamer::tls_certificates::new_dummy_x509_certificate, +}; + +pub struct QuicClientCertificate { + pub certificate: CertificateDer<'static>, + pub key: PrivateKeyDer<'static>, +} + +impl QuicClientCertificate { + pub fn new(keypair: &Keypair) -> Self { + let (certificate, key) = new_dummy_x509_certificate(keypair); + Self { certificate, key } + } +} diff --git a/tpu-client-next/src/quic_networking/skip_server_verification.rs b/tpu-client-next/src/quic_networking/skip_server_verification.rs new file mode 100644 index 00000000000000..70b17e0fcd5bab --- /dev/null +++ b/tpu-client-next/src/quic_networking/skip_server_verification.rs @@ -0,0 +1,74 @@ +use { + rustls::{ + client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}, + crypto::{ring, verify_tls12_signature, verify_tls13_signature, CryptoProvider}, + pki_types::{CertificateDer, ServerName, UnixTime}, + DigitallySignedStruct, Error, SignatureScheme, + }, + std::{ + fmt::{self, Debug, Formatter}, + sync::Arc, + }, +}; + +/// Implementation of [`ServerCertVerifier`] that ignores the server +/// certificate. Yet still checks the TLS signatures. +pub(crate) struct SkipServerVerification(Arc); + +impl SkipServerVerification { + pub fn new() -> Arc { + Arc::new(Self(Arc::new(ring::default_provider()))) + } +} + +impl ServerCertVerifier for SkipServerVerification { + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls12_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls13_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn supported_verify_schemes(&self) -> Vec { + self.0.signature_verification_algorithms.supported_schemes() + } + + fn verify_server_cert( + &self, + _end_entity: &CertificateDer<'_>, + _intermediates: &[CertificateDer<'_>], + _server_name: &ServerName, + _ocsp_response: &[u8], + _now: UnixTime, + ) -> Result { + Ok(ServerCertVerified::assertion()) + } +} + +impl Debug for SkipServerVerification { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("SkipServerVerification") + .finish_non_exhaustive() + } +} diff --git a/tpu-client-next/src/send_transaction_stats.rs b/tpu-client-next/src/send_transaction_stats.rs new file mode 100644 index 00000000000000..abe68b8bf60213 --- /dev/null +++ b/tpu-client-next/src/send_transaction_stats.rs @@ -0,0 +1,166 @@ +//! This module defines [`SendTransactionStats`] which is used to collect per IP +//! statistics about relevant network errors. + +use { + super::QuicError, + quinn::{ConnectError, ConnectionError, WriteError}, + std::{collections::HashMap, fmt, net::IpAddr}, +}; + +/// [`SendTransactionStats`] aggregates counters related to sending transactions. +#[derive(Debug, Default, Clone, PartialEq)] +pub struct SendTransactionStats { + pub successfully_sent: u64, + pub connect_error_cids_exhausted: u64, + pub connect_error_invalid_remote_address: u64, + pub connect_error_other: u64, + pub connection_error_application_closed: u64, + pub connection_error_cids_exhausted: u64, + pub connection_error_connection_closed: u64, + pub connection_error_locally_closed: u64, + pub connection_error_reset: u64, + pub connection_error_timed_out: u64, + pub connection_error_transport_error: u64, + pub connection_error_version_mismatch: u64, + pub write_error_closed_stream: u64, + pub write_error_connection_lost: u64, + pub write_error_stopped: u64, + pub write_error_zero_rtt_rejected: u64, +} + +#[allow(clippy::arithmetic_side_effects)] +pub fn record_error(err: QuicError, stats: &mut SendTransactionStats) { + match err { + QuicError::Connect(ConnectError::EndpointStopping) => { + stats.connect_error_other += 1; + } + QuicError::Connect(ConnectError::CidsExhausted) => { + stats.connect_error_cids_exhausted += 1; + } + QuicError::Connect(ConnectError::InvalidServerName(_)) => { + stats.connect_error_other += 1; + } + QuicError::Connect(ConnectError::InvalidRemoteAddress(_)) => { + stats.connect_error_invalid_remote_address += 1; + } + QuicError::Connect(ConnectError::NoDefaultClientConfig) => { + stats.connect_error_other += 1; + } + QuicError::Connect(ConnectError::UnsupportedVersion) => { + stats.connect_error_other += 1; + } + QuicError::Connection(ConnectionError::VersionMismatch) => { + stats.connection_error_version_mismatch += 1; + } + QuicError::Connection(ConnectionError::TransportError(_)) => { + stats.connection_error_transport_error += 1; + } + QuicError::Connection(ConnectionError::ConnectionClosed(_)) => { + stats.connection_error_connection_closed += 1; + } + QuicError::Connection(ConnectionError::ApplicationClosed(_)) => { + stats.connection_error_application_closed += 1; + } + QuicError::Connection(ConnectionError::Reset) => { + stats.connection_error_reset += 1; + } + QuicError::Connection(ConnectionError::TimedOut) => { + stats.connection_error_timed_out += 1; + } + QuicError::Connection(ConnectionError::LocallyClosed) => { + stats.connection_error_locally_closed += 1; + } + QuicError::Connection(ConnectionError::CidsExhausted) => { + stats.connection_error_cids_exhausted += 1; + } + QuicError::StreamWrite(WriteError::Stopped(_)) => { + stats.write_error_stopped += 1; + } + QuicError::StreamWrite(WriteError::ConnectionLost(_)) => { + stats.write_error_connection_lost += 1; + } + QuicError::StreamWrite(WriteError::ClosedStream) => { + stats.write_error_closed_stream += 1; + } + QuicError::StreamWrite(WriteError::ZeroRttRejected) => { + stats.write_error_zero_rtt_rejected += 1; + } + // Endpoint is created on the scheduler level and handled separately + // No counters are used for this case. + QuicError::Endpoint(_) => (), + } +} + +pub type SendTransactionStatsPerAddr = HashMap; + +macro_rules! add_fields { + ($self:ident += $other:ident for: $( $field:ident ),* $(,)? ) => { + $( + $self.$field = $self.$field.saturating_add($other.$field); + )* + }; +} + +impl SendTransactionStats { + pub fn add(&mut self, other: &SendTransactionStats) { + add_fields!( + self += other for: + successfully_sent, + connect_error_cids_exhausted, + connect_error_invalid_remote_address, + connect_error_other, + connection_error_application_closed, + connection_error_cids_exhausted, + connection_error_connection_closed, + connection_error_locally_closed, + connection_error_reset, + connection_error_timed_out, + connection_error_transport_error, + connection_error_version_mismatch, + write_error_closed_stream, + write_error_connection_lost, + write_error_stopped, + write_error_zero_rtt_rejected, + ); + } +} + +macro_rules! display_send_transaction_stats_body { + ($self:ident, $f:ident, $($field:ident),* $(,)?) => { + write!( + $f, + concat!( + "SendTransactionStats:\n", + $( + "\x20 ", stringify!($field), ": {},\n", + )* + ), + $($self.$field),* + ) + }; +} + +impl fmt::Display for SendTransactionStats { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + display_send_transaction_stats_body!( + self, + f, + successfully_sent, + connect_error_cids_exhausted, + connect_error_invalid_remote_address, + connect_error_other, + connection_error_application_closed, + connection_error_cids_exhausted, + connection_error_connection_closed, + connection_error_locally_closed, + connection_error_reset, + connection_error_timed_out, + connection_error_transport_error, + connection_error_version_mismatch, + write_error_closed_stream, + write_error_connection_lost, + write_error_stopped, + write_error_zero_rtt_rejected, + ) + } +} diff --git a/tpu-client-next/src/transaction_batch.rs b/tpu-client-next/src/transaction_batch.rs new file mode 100644 index 00000000000000..a3c2d92fc8386e --- /dev/null +++ b/tpu-client-next/src/transaction_batch.rs @@ -0,0 +1,33 @@ +//! This module holds [`TransactionBatch`] structure. + +use solana_sdk::timing::timestamp; + +/// Batch of generated transactions timestamp is used to discard batches which +/// are too old to have valid blockhash. +#[derive(Clone, PartialEq)] +pub struct TransactionBatch { + wired_transactions: Vec, + timestamp: u64, +} + +type WiredTransaction = Vec; + +impl IntoIterator for TransactionBatch { + type Item = Vec; + type IntoIter = std::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.wired_transactions.into_iter() + } +} + +impl TransactionBatch { + pub fn new(wired_transactions: Vec) -> Self { + Self { + wired_transactions, + timestamp: timestamp(), + } + } + pub fn timestamp(&self) -> u64 { + self.timestamp + } +} diff --git a/tpu-client-next/src/workers_cache.rs b/tpu-client-next/src/workers_cache.rs new file mode 100644 index 00000000000000..90d2954b669d7f --- /dev/null +++ b/tpu-client-next/src/workers_cache.rs @@ -0,0 +1,184 @@ +//! This module defines `WorkersCache` along with aux struct `WorkerInfo`. These +//! structures provide mechanisms for caching workers, sending transaction +//! batches, and gathering send transaction statistics. + +use { + super::SendTransactionStats, + crate::transaction_batch::TransactionBatch, + log::*, + lru::LruCache, + std::{ + collections::HashMap, + net::{IpAddr, SocketAddr}, + }, + thiserror::Error, + tokio::{sync::mpsc, task::JoinHandle}, + tokio_util::sync::CancellationToken, +}; + +/// [`WorkerInfo`] holds information about a worker responsible for sending +/// transaction batches. +pub(crate) struct WorkerInfo { + pub sender: mpsc::Sender, + pub handle: JoinHandle, + pub cancel: CancellationToken, +} + +impl WorkerInfo { + pub fn new( + sender: mpsc::Sender, + handle: JoinHandle, + cancel: CancellationToken, + ) -> Self { + Self { + sender, + handle, + cancel, + } + } + + async fn send_transactions( + &self, + txs_batch: TransactionBatch, + ) -> Result<(), WorkersCacheError> { + self.sender + .send(txs_batch) + .await + .map_err(|_| WorkersCacheError::ReceiverDropped)?; + Ok(()) + } + + /// Closes the worker by dropping the sender and awaiting the worker's + /// statistics. + async fn shutdown(self) -> Result { + self.cancel.cancel(); + drop(self.sender); + let stats = self + .handle + .await + .map_err(|_| WorkersCacheError::TaskJoinFailure)?; + Ok(stats) + } +} + +/// [`WorkersCache`] manages and caches workers. It uses an LRU cache to store and +/// manage workers. It also tracks transaction statistics for each peer. +pub(crate) struct WorkersCache { + workers: LruCache, + send_stats_per_addr: HashMap, + + /// Indicates that the `WorkersCache` is been `shutdown()`, interrupting any outstanding + /// `send_txs()` invocations. + cancel: CancellationToken, +} + +#[derive(Debug, Error, PartialEq)] +pub enum WorkersCacheError { + /// typically happens when the client could not establish the connection. + #[error("Work receiver has been dropped unexpectedly.")] + ReceiverDropped, + + #[error("Task failed to join.")] + TaskJoinFailure, + + #[error("The WorkersCache is being shutdown.")] + ShutdownError, +} + +impl WorkersCache { + pub fn new(capacity: usize, cancel: CancellationToken) -> Self { + Self { + workers: LruCache::new(capacity), + send_stats_per_addr: HashMap::new(), + cancel, + } + } + + pub fn contains(&self, peer: &SocketAddr) -> bool { + self.workers.contains(peer) + } + + pub async fn push(&mut self, peer: SocketAddr, peer_worker: WorkerInfo) { + // Although there might be concerns about the performance implications + // of waiting for the worker to be closed when trying to add a new + // worker, the idea is that these workers are almost always created in + // advance so the latency is hidden. + if let Some((leader, popped_worker)) = self.workers.push(peer, peer_worker) { + self.shutdown_worker(leader, popped_worker).await; + } + } + + /// Sends a batch of transactions to the worker for a given peer. If the + /// worker for the peer is disconnected or fails, it is removed from the + /// cache. + pub async fn send_transactions_to_address( + &mut self, + peer: &SocketAddr, + txs_batch: TransactionBatch, + ) -> Result<(), WorkersCacheError> { + let Self { + workers, cancel, .. + } = self; + + let body = async move { + let current_worker = workers.get(peer).expect( + "Failed to fetch worker for peer {peer}.\n\ + Peer existence must be checked before this call using `contains` method.", + ); + let send_res = current_worker.send_transactions(txs_batch).await; + + if let Err(WorkersCacheError::ReceiverDropped) = send_res { + // Remove the worker from the cache, if the peer has disconnected. + if let Some(current_worker) = workers.pop(peer) { + // To avoid obscuring the error from send, ignore a possible + // `TaskJoinFailure`. + let close_result = current_worker.shutdown().await; + if let Err(error) = close_result { + error!("Error while closing worker: {error}."); + } + } + } + + send_res + }; + + tokio::select! { + send_res = body => send_res, + () = cancel.cancelled() => Err(WorkersCacheError::ShutdownError), + } + } + + pub fn transaction_stats(&self) -> &HashMap { + &self.send_stats_per_addr + } + + /// Closes and removes all workers in the cache. This is typically done when + /// shutting down the system. + pub async fn shutdown(&mut self) { + // Interrupt any outstanding `send_txs()` calls. + self.cancel.cancel(); + + while let Some((leader, worker)) = self.workers.pop_lru() { + self.shutdown_worker(leader, worker).await; + } + } + + /// Shuts down a worker for a given peer by closing the worker and gathering + /// its transaction statistics. + async fn shutdown_worker(&mut self, leader: SocketAddr, worker: WorkerInfo) { + let res = worker.shutdown().await; + + let stats = match res { + Ok(stats) => stats, + Err(err) => { + debug!("Error while shutting down worker for {leader}: {err}"); + return; + } + }; + + self.send_stats_per_addr + .entry(leader.ip()) + .and_modify(|e| e.add(&stats)) + .or_insert(stats); + } +} diff --git a/tpu-client-next/tests/connection_workers_scheduler_test.rs b/tpu-client-next/tests/connection_workers_scheduler_test.rs new file mode 100644 index 00000000000000..0ffabb6640f7a3 --- /dev/null +++ b/tpu-client-next/tests/connection_workers_scheduler_test.rs @@ -0,0 +1,671 @@ +use { + crossbeam_channel::Receiver as CrossbeamReceiver, + futures::future::BoxFuture, + solana_cli_config::ConfigInput, + solana_rpc_client::nonblocking::rpc_client::RpcClient, + solana_sdk::{ + commitment_config::CommitmentConfig, + pubkey::Pubkey, + signer::{keypair::Keypair, Signer}, + }, + solana_streamer::{ + nonblocking::testing_utilities::{ + make_client_endpoint, setup_quic_server, SpawnTestServerResult, TestServerConfig, + }, + packet::PacketBatch, + streamer::StakedNodes, + }, + solana_tpu_client_next::{ + connection_workers_scheduler::ConnectionWorkersSchedulerConfig, + leader_updater::create_leader_updater, transaction_batch::TransactionBatch, + ConnectionWorkersScheduler, ConnectionWorkersSchedulerError, SendTransactionStats, + SendTransactionStatsPerAddr, + }, + std::{ + collections::HashMap, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + num::Saturating, + str::FromStr, + sync::{atomic::Ordering, Arc}, + time::Duration, + }, + tokio::{ + sync::{ + mpsc::{channel, Receiver}, + oneshot, + }, + task::JoinHandle, + time::{sleep, Instant}, + }, + tokio_util::sync::CancellationToken, +}; + +fn test_config(validator_identity: Option) -> ConnectionWorkersSchedulerConfig { + ConnectionWorkersSchedulerConfig { + bind: SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 0), + stake_identity: validator_identity, + num_connections: 1, + skip_check_transaction_age: false, + worker_channel_size: 2, + max_reconnect_attempts: 4, + lookahead_slots: 1, + } +} + +async fn setup_connection_worker_scheduler( + tpu_address: SocketAddr, + transaction_receiver: Receiver, + validator_identity: Option, +) -> ( + JoinHandle>, + CancellationToken, +) { + let json_rpc_url = "http://127.0.0.1:8899"; + let (_, websocket_url) = ConfigInput::compute_websocket_url_setting("", "", json_rpc_url, ""); + + let rpc_client = Arc::new(RpcClient::new_with_commitment( + json_rpc_url.to_string(), + CommitmentConfig::confirmed(), + )); + + // Setup sending txs + let leader_updater = create_leader_updater(rpc_client, websocket_url, Some(tpu_address)) + .await + .expect("Leader updates was successfully created"); + + let cancel = CancellationToken::new(); + let config = test_config(validator_identity); + let scheduler = tokio::spawn(ConnectionWorkersScheduler::run( + config, + leader_updater, + transaction_receiver, + cancel.clone(), + )); + + (scheduler, cancel) +} + +async fn join_scheduler( + scheduler_handle: JoinHandle< + Result, + >, +) -> SendTransactionStats { + let stats_per_ip = scheduler_handle + .await + .unwrap() + .expect("Scheduler should stop successfully."); + stats_per_ip + .get(&IpAddr::from_str("127.0.0.1").unwrap()) + .expect("setup_connection_worker_scheduler() connected to a leader at 127.0.0.1") + .clone() +} + +// Specify the pessimistic time to finish generation and result checks. +const TEST_MAX_TIME: Duration = Duration::from_millis(2500); + +struct SpawnTxGenerator { + tx_receiver: Receiver, + tx_sender_shutdown: BoxFuture<'static, ()>, + tx_sender_done: oneshot::Receiver<()>, +} + +/// Generates `num_tx_batches` batches of transactions, each holding a single transaction of +/// `tx_size` bytes. +/// +/// It will not close the returned `tx_receiver` until `tx_sender_shutdown` is invoked. Otherwise, +/// there is a race condition, that exists between the last transaction being scheduled for delivery +/// and the server connection being closed. +fn spawn_tx_sender( + tx_size: usize, + num_tx_batches: usize, + time_per_tx: Duration, +) -> SpawnTxGenerator { + let num_tx_batches: u32 = num_tx_batches + .try_into() + .expect("`num_tx_batches` fits into u32 for all the tests"); + let (tx_sender, tx_receiver) = channel(1); + let cancel = CancellationToken::new(); + let (done_sender, tx_sender_done) = oneshot::channel(); + + let sender = tokio::spawn({ + let start = Instant::now(); + + let tx_sender = tx_sender.clone(); + + let main_loop = async move { + for i in 0..num_tx_batches { + let txs = vec![vec![i as u8; tx_size]; 1]; + tx_sender + .send(TransactionBatch::new(txs)) + .await + .expect("Receiver should not close their side"); + + // Pretend the client runs at the specified TPS. + let sleep_time = time_per_tx + .saturating_mul(i) + .saturating_sub(start.elapsed()); + if !sleep_time.is_zero() { + sleep(sleep_time).await; + } + } + + // It is OK if the receiver has disconnected. + let _ = done_sender.send(()); + }; + + let cancel = cancel.clone(); + async move { + tokio::select! { + () = main_loop => (), + () = cancel.cancelled() => (), + } + } + }); + + let tx_sender_shutdown = Box::pin(async move { + cancel.cancel(); + // This makes sure that the sender exists up until the shutdown is invoked. + drop(tx_sender); + + sender.await.unwrap(); + }); + + SpawnTxGenerator { + tx_receiver, + tx_sender_shutdown, + tx_sender_done, + } +} + +#[tokio::test] +async fn test_basic_transactions_sending() { + let SpawnTestServerResult { + join_handle: server_handle, + exit, + receiver, + server_address, + stats: _stats, + } = setup_quic_server(None, TestServerConfig::default()); + + // Setup sending txs + let tx_size = 1; + let expected_num_txs: usize = 100; + // Pretend that we are running at ~100 TPS. + let SpawnTxGenerator { + tx_receiver, + tx_sender_shutdown, + .. + } = spawn_tx_sender(tx_size, expected_num_txs, Duration::from_millis(10)); + + let (scheduler_handle, _scheduler_cancel) = + setup_connection_worker_scheduler(server_address, tx_receiver, None).await; + + // Check results + let mut received_data = Vec::with_capacity(expected_num_txs); + let now = Instant::now(); + let mut actual_num_packets = 0; + while actual_num_packets < expected_num_txs { + { + let elapsed = now.elapsed(); + assert!( + elapsed < TEST_MAX_TIME, + "Failed to send {} transaction in {:?}. Only sent {}", + expected_num_txs, + elapsed, + actual_num_packets, + ); + } + + let Ok(packets) = receiver.try_recv() else { + sleep(Duration::from_millis(10)).await; + continue; + }; + + actual_num_packets += packets.len(); + for p in packets.iter() { + let packet_id = p.data(0).expect("Data should not be lost by server."); + received_data.push(*packet_id); + assert_eq!(p.meta().size, 1); + } + } + + received_data.sort_unstable(); + for i in 1..received_data.len() { + assert_eq!(received_data[i - 1] + 1, received_data[i]); + } + + // Stop sending + tx_sender_shutdown.await; + let localhost_stats = join_scheduler(scheduler_handle).await; + assert_eq!( + localhost_stats, + SendTransactionStats { + successfully_sent: expected_num_txs as u64, + ..Default::default() + } + ); + + // Stop server + exit.store(true, Ordering::Relaxed); + server_handle.await.unwrap(); +} + +async fn count_received_packets_for( + receiver: CrossbeamReceiver, + expected_tx_size: usize, + receive_duration: Duration, +) -> usize { + let now = Instant::now(); + let mut num_packets_received = Saturating(0usize); + + while now.elapsed() < receive_duration { + if let Ok(packets) = receiver.try_recv() { + num_packets_received += packets.len(); + for p in packets.iter() { + assert_eq!(p.meta().size, expected_tx_size); + } + } else { + sleep(Duration::from_millis(100)).await; + } + } + + num_packets_received.0 +} + +// Check that client can create connection even if the first several attempts were unsuccessful. +#[tokio::test] +async fn test_connection_denied_until_allowed() { + let SpawnTestServerResult { + join_handle: server_handle, + exit, + receiver, + server_address, + stats: _stats, + } = setup_quic_server(None, TestServerConfig::default()); + + // To prevent server from accepting a new connection, we use the following observation. + // Since max_connections_per_peer == 1 (< max_unstaked_connections == 500), if we create a first + // connection and later try another one, the second connection will be immediately closed. + // + // Since client is not retrying sending failed transactions, this leads to the packets loss. + // The connection has been created and closed when we already have sent the data. + let throttling_connection = make_client_endpoint(&server_address, None).await; + + // Setup sending txs + let tx_size = 1; + let expected_num_txs: usize = 10; + let SpawnTxGenerator { + tx_receiver, + tx_sender_shutdown, + .. + } = spawn_tx_sender(tx_size, expected_num_txs, Duration::from_millis(100)); + + let (scheduler_handle, _scheduler_cancel) = + setup_connection_worker_scheduler(server_address, tx_receiver, None).await; + + // Check results + let actual_num_packets = count_received_packets_for(receiver, tx_size, TEST_MAX_TIME).await; + assert!( + actual_num_packets < expected_num_txs, + "Expected to receive {expected_num_txs} packets in {TEST_MAX_TIME:?}\n\ + Got packets: {actual_num_packets}" + ); + + // Wait for the exchange to finish. + tx_sender_shutdown.await; + let localhost_stats = join_scheduler(scheduler_handle).await; + // in case of pruning, server closes the connection with code 1 and error + // message b"dropped". This might lead to connection error + // (ApplicationClosed::ApplicationClose) or to stream error + // (ConnectionLost::ApplicationClosed::ApplicationClose). + assert_eq!( + localhost_stats.write_error_connection_lost + + localhost_stats.connection_error_application_closed, + 1 + ); + + drop(throttling_connection); + + // Exit server + exit.store(true, Ordering::Relaxed); + server_handle.await.unwrap(); +} + +// Check that if the client connection has been pruned, client manages to +// reestablish it. Pruning will lead to 1 packet loss, because when we send the +// next packet we will reestablish connection. +#[tokio::test] +async fn test_connection_pruned_and_reopened() { + let SpawnTestServerResult { + join_handle: server_handle, + exit, + receiver, + server_address, + stats: _stats, + } = setup_quic_server( + None, + TestServerConfig { + max_connections_per_peer: 100, + max_unstaked_connections: 1, + ..Default::default() + }, + ); + + // Setup sending txs + let tx_size = 1; + let expected_num_txs: usize = 16; + let SpawnTxGenerator { + tx_receiver, + tx_sender_shutdown, + .. + } = spawn_tx_sender(tx_size, expected_num_txs, Duration::from_millis(100)); + + let (scheduler_handle, _scheduler_cancel) = + setup_connection_worker_scheduler(server_address, tx_receiver, None).await; + + sleep(Duration::from_millis(400)).await; + let _connection_to_prune_client = make_client_endpoint(&server_address, None).await; + + // Check results + let actual_num_packets = count_received_packets_for(receiver, tx_size, TEST_MAX_TIME).await; + assert!(actual_num_packets < expected_num_txs); + + // Wait for the exchange to finish. + tx_sender_shutdown.await; + let localhost_stats = join_scheduler(scheduler_handle).await; + // in case of pruning, server closes the connection with code 1 and error + // message b"dropped". This might lead to connection error + // (ApplicationClosed::ApplicationClose) or to stream error + // (ConnectionLost::ApplicationClosed::ApplicationClose). + assert_eq!( + localhost_stats.connection_error_application_closed + + localhost_stats.write_error_connection_lost, + 1, + ); + + // Exit server + exit.store(true, Ordering::Relaxed); + server_handle.await.unwrap(); +} + +/// Check that client creates staked connection. To do that prohibit unstaked +/// connection and verify that all the txs has been received. +#[tokio::test] +async fn test_staked_connection() { + let validator_identity = Keypair::new(); + let stakes = HashMap::from([(validator_identity.pubkey(), 100_000)]); + let staked_nodes = StakedNodes::new(Arc::new(stakes), HashMap::::default()); + + let SpawnTestServerResult { + join_handle: server_handle, + exit, + receiver, + server_address, + stats: _stats, + } = setup_quic_server( + Some(staked_nodes), + TestServerConfig { + // Must use at least the number of endpoints (10) because + // `max_staked_connections` and `max_unstaked_connections` are + // cumulative for all the endpoints. + max_staked_connections: 10, + max_unstaked_connections: 0, + ..Default::default() + }, + ); + + // Setup sending txs + let tx_size = 1; + let expected_num_txs: usize = 10; + let SpawnTxGenerator { + tx_receiver, + tx_sender_shutdown, + .. + } = spawn_tx_sender(tx_size, expected_num_txs, Duration::from_millis(100)); + + let (scheduler_handle, _scheduler_cancel) = + setup_connection_worker_scheduler(server_address, tx_receiver, Some(validator_identity)) + .await; + + // Check results + let actual_num_packets = count_received_packets_for(receiver, tx_size, TEST_MAX_TIME).await; + assert_eq!(actual_num_packets, expected_num_txs); + + // Wait for the exchange to finish. + tx_sender_shutdown.await; + let localhost_stats = join_scheduler(scheduler_handle).await; + assert_eq!( + localhost_stats, + SendTransactionStats { + successfully_sent: expected_num_txs as u64, + ..Default::default() + } + ); + + // Exit server + exit.store(true, Ordering::Relaxed); + server_handle.await.unwrap(); +} + +// Check that if client sends transactions at a reasonably high rate that is +// higher than what the server accepts, nevertheless all the transactions are +// delivered and there are no errors on the client side. +#[tokio::test] +async fn test_connection_throttling() { + let SpawnTestServerResult { + join_handle: server_handle, + exit, + receiver, + server_address, + stats: _stats, + } = setup_quic_server(None, TestServerConfig::default()); + + // Setup sending txs + let tx_size = 1; + let expected_num_txs: usize = 50; + // Send at 1000 TPS - x10 more than the throttling interval of 10ms used in other tests allows. + let SpawnTxGenerator { + tx_receiver, + tx_sender_shutdown, + .. + } = spawn_tx_sender(tx_size, expected_num_txs, Duration::from_millis(1)); + + let (scheduler_handle, _scheduler_cancel) = + setup_connection_worker_scheduler(server_address, tx_receiver, None).await; + + // Check results + let actual_num_packets = + count_received_packets_for(receiver, tx_size, Duration::from_secs(1)).await; + assert_eq!(actual_num_packets, expected_num_txs); + + // Stop sending + tx_sender_shutdown.await; + let localhost_stats = join_scheduler(scheduler_handle).await; + assert_eq!( + localhost_stats, + SendTransactionStats { + successfully_sent: expected_num_txs as u64, + ..Default::default() + } + ); + + // Exit server + exit.store(true, Ordering::Relaxed); + server_handle.await.unwrap(); +} + +// Check that when the host cannot be reached, the client exits gracefully. +#[tokio::test] +async fn test_no_host() { + // A "black hole" address for the TPU. + let server_ip = IpAddr::V6(Ipv6Addr::new(0x100, 0, 0, 0, 0, 0, 0, 1)); + let server_address = SocketAddr::new(server_ip, 49151); + + // Setup sending side. + let tx_size = 1; + let max_send_attempts: usize = 10; + + let SpawnTxGenerator { + tx_receiver, + tx_sender_shutdown, + tx_sender_done, + .. + } = spawn_tx_sender(tx_size, max_send_attempts, Duration::from_millis(10)); + + let (scheduler_handle, _scheduler_cancel) = + setup_connection_worker_scheduler(server_address, tx_receiver, None).await; + + // Wait for all the transactions to be sent, and some extra time for the delivery to be + // attempted. + tx_sender_done.await.unwrap(); + sleep(Duration::from_millis(100)).await; + + // Wait for the generator to finish. + tx_sender_shutdown.await; + + // While attempting to establish a connection with a nonexistent host, we fill the worker's + // channel. Transactions from this channel will never be sent and will eventually be dropped + // without increasing the `SendTransactionStats` counters. + let stats = scheduler_handle + .await + .expect("Scheduler should stop successfully") + .expect("Scheduler execution was successful"); + assert_eq!(stats, HashMap::new()); +} + +// Check that when the client is rate-limited by server, we update counters +// accordingly. To implement it we: +// * set the connection limit per minute to 1 +// * create a dummy connection to reach the limit and immediately close it +// * set up client which will try to create a new connection which it will be +// rate-limited. This test doesn't check what happens when the rate-limiting +// period ends because it too long for test (1min). +#[tokio::test] +async fn test_rate_limiting() { + let SpawnTestServerResult { + join_handle: server_handle, + exit, + receiver, + server_address, + stats: _stats, + } = setup_quic_server( + None, + TestServerConfig { + max_connections_per_peer: 100, + max_connections_per_ipaddr_per_minute: 1, + ..Default::default() + }, + ); + + let connection_to_reach_limit = make_client_endpoint(&server_address, None).await; + drop(connection_to_reach_limit); + + // Setup sending txs + let tx_size = 1; + let expected_num_txs: usize = 16; + let SpawnTxGenerator { + tx_receiver, + tx_sender_shutdown, + .. + } = spawn_tx_sender(tx_size, expected_num_txs, Duration::from_millis(100)); + + let (scheduler_handle, scheduler_cancel) = + setup_connection_worker_scheduler(server_address, tx_receiver, None).await; + + let actual_num_packets = count_received_packets_for(receiver, tx_size, TEST_MAX_TIME).await; + assert_eq!(actual_num_packets, 0); + + // Stop the sender. + tx_sender_shutdown.await; + + // And the scheduler. + scheduler_cancel.cancel(); + let localhost_stats = join_scheduler(scheduler_handle).await; + + // We do not expect to see any errors, as the connection is in the pending state still, when we + // do the shutdown. If we increase the time we wait in `count_received_packets_for`, we would + // start seeing a `connection_error_timed_out` incremented to 1. Potentially, we may want to + // accept both 0 and 1 as valid values for it. + assert_eq!(localhost_stats, SendTransactionStats::default()); + + // Stop the server. + exit.store(true, Ordering::Relaxed); + server_handle.await.unwrap(); +} + +// The same as test_rate_limiting but here we wait for 1 min to check that the +// connection has been established. +#[tokio::test] +// TODO Provide an alternative testing interface for `streamer::nonblocking::quic::spawn_server` +// that would accept throttling at a granularity below 1 minute. +#[ignore = "takes 70s to complete"] +async fn test_rate_limiting_establish_connection() { + let SpawnTestServerResult { + join_handle: server_handle, + exit, + receiver, + server_address, + stats: _stats, + } = setup_quic_server( + None, + TestServerConfig { + max_connections_per_peer: 100, + max_connections_per_ipaddr_per_minute: 1, + ..Default::default() + }, + ); + + let connection_to_reach_limit = make_client_endpoint(&server_address, None).await; + drop(connection_to_reach_limit); + + // Setup sending txs + let tx_size = 1; + let expected_num_txs: usize = 65; + let SpawnTxGenerator { + tx_receiver, + tx_sender_shutdown, + .. + } = spawn_tx_sender(tx_size, expected_num_txs, Duration::from_millis(1000)); + + let (scheduler_handle, scheduler_cancel) = + setup_connection_worker_scheduler(server_address, tx_receiver, None).await; + + let actual_num_packets = + count_received_packets_for(receiver, tx_size, Duration::from_secs(70)).await; + assert!( + actual_num_packets > 0, + "As we wait longer than 1 minute, at least one transaction should be delivered. \ + After 1 minute the server is expected to accept our connection.\n\ + Actual packets delivered: {actual_num_packets}" + ); + + // Stop the sender. + tx_sender_shutdown.await; + + // And the scheduler. + scheduler_cancel.cancel(); + let mut localhost_stats = join_scheduler(scheduler_handle).await; + assert!( + localhost_stats.connection_error_timed_out > 0, + "As the quinn timeout is below 1 minute, a few connections will fail to connect during \ + the 1 minute delay.\n\ + Actual connection_error_timed_out: {}", + localhost_stats.connection_error_timed_out + ); + assert!( + localhost_stats.successfully_sent > 0, + "As we run the test for longer than 1 minute, we expect a connection to be established, \ + and a number of transactions to be delivered.\n\ + Actual successfully_sent: {}", + localhost_stats.successfully_sent + ); + + // All the rest of the error counters should be 0. + localhost_stats.connection_error_timed_out = 0; + localhost_stats.successfully_sent = 0; + assert_eq!(localhost_stats, SendTransactionStats::default()); + + // Stop the server. + exit.store(true, Ordering::Relaxed); + server_handle.await.unwrap(); +} From 2348429ff9aa33e59a6ee2b047ae49ec74d6970d Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Mon, 14 Oct 2024 17:48:32 +0200 Subject: [PATCH 496/529] Remove unused forward_worker module (#3128) * Remove unused `forward_worker` module * Remove `ForwardWork` --- core/src/banking_stage.rs | 2 - core/src/banking_stage/forward_worker.rs | 219 ------------------- core/src/banking_stage/scheduler_messages.rs | 16 +- 3 files changed, 1 insertion(+), 236 deletions(-) delete mode 100644 core/src/banking_stage/forward_worker.rs diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 32cc3fbe44dda1..33dd38f9648265 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -65,7 +65,6 @@ pub mod unprocessed_transaction_storage; mod consume_worker; mod decision_maker; mod forward_packet_batches_by_accounts; -mod forward_worker; mod immutable_deserialized_packet; mod latest_unprocessed_votes; mod leader_slot_timing_metrics; @@ -74,7 +73,6 @@ mod packet_deserializer; mod packet_filter; mod packet_receiver; mod read_write_account_set; -#[allow(dead_code)] mod scheduler_messages; mod transaction_scheduler; diff --git a/core/src/banking_stage/forward_worker.rs b/core/src/banking_stage/forward_worker.rs deleted file mode 100644 index 61cf311f0a8cf8..00000000000000 --- a/core/src/banking_stage/forward_worker.rs +++ /dev/null @@ -1,219 +0,0 @@ -use { - super::{ - forwarder::Forwarder, - scheduler_messages::{FinishedForwardWork, ForwardWork}, - ForwardOption, - }, - crate::banking_stage::LikeClusterInfo, - crossbeam_channel::{Receiver, RecvError, SendError, Sender}, - thiserror::Error, -}; - -#[derive(Debug, Error)] -pub enum ForwardWorkerError { - #[error("Failed to receive work from scheduler: {0}")] - Recv(#[from] RecvError), - #[error("Failed to send finalized forward work to scheduler: {0}")] - Send(#[from] SendError), -} - -pub(crate) struct ForwardWorker { - forward_receiver: Receiver, - forward_option: ForwardOption, - forwarder: Forwarder, - forwarded_sender: Sender, -} - -#[allow(dead_code)] -impl ForwardWorker { - pub fn new( - forward_receiver: Receiver, - forward_option: ForwardOption, - forwarder: Forwarder, - forwarded_sender: Sender, - ) -> Self { - Self { - forward_receiver, - forward_option, - forwarder, - forwarded_sender, - } - } - - pub fn run(self) -> Result<(), ForwardWorkerError> { - loop { - let work = self.forward_receiver.recv()?; - self.forward_loop(work)?; - } - } - - fn forward_loop(&self, work: ForwardWork) -> Result<(), ForwardWorkerError> { - for work in try_drain_iter(work, &self.forward_receiver) { - let (res, _num_packets, _forward_us, _leader_pubkey) = self.forwarder.forward_packets( - &self.forward_option, - work.packets.iter().map(|p| p.original_packet()), - ); - match res { - Ok(()) => self.forwarded_sender.send(FinishedForwardWork { - work, - successful: true, - })?, - Err(_err) => return self.failed_forward_drain(work), - }; - } - Ok(()) - } - - fn failed_forward_drain(&self, work: ForwardWork) -> Result<(), ForwardWorkerError> { - for work in try_drain_iter(work, &self.forward_receiver) { - self.forwarded_sender.send(FinishedForwardWork { - work, - successful: false, - })?; - } - Ok(()) - } -} - -/// Helper function to create an non-blocking iterator over work in the receiver, -/// starting with the given work item. -fn try_drain_iter(work: T, receiver: &Receiver) -> impl Iterator + '_ { - std::iter::once(work).chain(receiver.try_iter()) -} - -#[cfg(test)] -mod tests { - use { - super::*, - crate::banking_stage::{ - immutable_deserialized_packet::ImmutableDeserializedPacket, - tests::{create_slow_genesis_config, new_test_cluster_info, simulate_poh}, - }, - crossbeam_channel::unbounded, - solana_client::connection_cache::ConnectionCache, - solana_gossip::cluster_info::ClusterInfo, - solana_ledger::{ - blockstore::Blockstore, genesis_utils::GenesisConfigInfo, - get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, - }, - solana_perf::packet::to_packet_batches, - solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}, - solana_runtime::bank::Bank, - solana_sdk::{ - genesis_config::GenesisConfig, poh_config::PohConfig, pubkey::Pubkey, - signature::Keypair, system_transaction, - }, - std::{ - sync::{atomic::AtomicBool, Arc, RwLock}, - thread::JoinHandle, - }, - tempfile::TempDir, - }; - - // Helper struct to create tests that hold channels, files, etc. - // such that our tests can be more easily set up and run. - struct TestFrame { - mint_keypair: Keypair, - genesis_config: GenesisConfig, - _ledger_path: TempDir, - _entry_receiver: Receiver, - _poh_simulator: JoinHandle<()>, - - forward_sender: Sender, - forwarded_receiver: Receiver, - } - - fn setup_test_frame() -> (TestFrame, ForwardWorker>) { - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_slow_genesis_config(10_000); - let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - - let ledger_path = get_tmp_ledger_path_auto_delete!(); - let blockstore = Blockstore::open(ledger_path.path()) - .expect("Expected to be able to open database ledger"); - let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( - bank.tick_height(), - bank.last_blockhash(), - bank.clone(), - Some((4, 4)), - bank.ticks_per_slot(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), - ); - let poh_recorder = Arc::new(RwLock::new(poh_recorder)); - let poh_simulator = simulate_poh(record_receiver, &poh_recorder); - - let (_local_node, cluster_info) = new_test_cluster_info(None); - let cluster_info = Arc::new(cluster_info); - let forwarder = Forwarder::new( - poh_recorder, - bank_forks, - cluster_info, - Arc::new(ConnectionCache::new("test")), - Arc::default(), - ); - - let (forward_sender, forward_receiver) = unbounded(); - let (forwarded_sender, forwarded_receiver) = unbounded(); - let worker = ForwardWorker::new( - forward_receiver, - ForwardOption::ForwardTransaction, - forwarder, - forwarded_sender, - ); - - ( - TestFrame { - mint_keypair, - genesis_config, - _ledger_path: ledger_path, - _entry_receiver: entry_receiver, - _poh_simulator: poh_simulator, - forward_sender, - forwarded_receiver, - }, - worker, - ) - } - - #[test] - fn test_worker_forward_simple() { - let (test_frame, worker) = setup_test_frame(); - let TestFrame { - mint_keypair, - genesis_config, - forward_sender, - forwarded_receiver, - .. - } = &test_frame; - let worker_thread = std::thread::spawn(move || worker.run()); - - let pubkey1 = Pubkey::new_unique(); - let pubkey2 = Pubkey::new_unique(); - - let txs = vec![ - system_transaction::transfer(mint_keypair, &pubkey1, 2, genesis_config.hash()), - system_transaction::transfer(mint_keypair, &pubkey2, 2, genesis_config.hash()), - ]; - - let packets = to_packet_batches(&txs, 2); - assert_eq!(packets.len(), 1); - let packets = packets[0] - .into_iter() - .cloned() - .map(|p| ImmutableDeserializedPacket::new(p).unwrap()) - .map(Arc::new) - .collect(); - forward_sender.send(ForwardWork { packets }).unwrap(); - let forwarded = forwarded_receiver.recv().unwrap(); - assert!(forwarded.successful); - - drop(test_frame); - let _ = worker_thread.join().unwrap(); - } -} diff --git a/core/src/banking_stage/scheduler_messages.rs b/core/src/banking_stage/scheduler_messages.rs index ee5c4ebeef9738..d93d2d6dbb6c52 100644 --- a/core/src/banking_stage/scheduler_messages.rs +++ b/core/src/banking_stage/scheduler_messages.rs @@ -1,7 +1,6 @@ use { - super::immutable_deserialized_packet::ImmutableDeserializedPacket, solana_sdk::{clock::Slot, transaction::SanitizedTransaction}, - std::{fmt::Display, sync::Arc}, + std::fmt::Display, }; /// A unique identifier for a transaction batch. @@ -45,22 +44,9 @@ pub struct ConsumeWork { pub max_age_slots: Vec, } -/// Message: [Scheduler -> Worker] -/// Transactions to be forwarded to the next leader(s) -pub struct ForwardWork { - pub packets: Vec>, -} - /// Message: [Worker -> Scheduler] /// Processed transactions. pub struct FinishedConsumeWork { pub work: ConsumeWork, pub retryable_indexes: Vec, } - -/// Message: [Worker -> Scheduler] -/// Forwarded transactions. -pub struct FinishedForwardWork { - pub work: ForwardWork, - pub successful: bool, -} From a66b023f2a1c0250c4d23ab15793eee1c46f1832 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 14 Oct 2024 12:16:09 -0400 Subject: [PATCH 497/529] Verifies accounts lt hash at startup (#3145) --- accounts-db/src/accounts_db.rs | 96 ++++++++++++- accounts-db/src/accounts_file.rs | 5 +- runtime/src/bank.rs | 82 ++++++++++- runtime/src/bank/accounts_lt_hash.rs | 207 +++++++++++++++++++++++++-- runtime/src/bank/serde_snapshot.rs | 6 +- runtime/src/bank/tests.rs | 61 ++++++-- runtime/src/serde_snapshot.rs | 45 ++++-- runtime/src/snapshot_bank_utils.rs | 35 +++-- 8 files changed, 485 insertions(+), 52 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7ed43e238eb546..29582ab7e1ff7f 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -80,7 +80,7 @@ use { seqlock::SeqLock, smallvec::SmallVec, solana_lattice_hash::lt_hash::LtHash, - solana_measure::{measure::Measure, measure_us}, + solana_measure::{meas_dur, measure::Measure, measure_us}, solana_nohash_hasher::{IntMap, IntSet}, solana_rayon_threadlimit::get_thread_count, solana_sdk::{ @@ -654,6 +654,9 @@ pub enum ScanStorageResult { pub struct IndexGenerationInfo { pub accounts_data_len: u64, pub rent_paying_accounts_by_partition: RentPayingAccountsByPartition, + /// The lt hash of the old/duplicate accounts identified during index generation. + /// Will be used when verifying the accounts lt hash, after rebuilding a Bank. + pub duplicates_lt_hash: Box, } #[derive(Debug, Default)] @@ -666,6 +669,21 @@ struct SlotIndexGenerationInfo { rent_paying_accounts_by_partition: Vec, } +/// The lt hash of old/duplicate accounts +/// +/// Accumulation of all the duplicate accounts found during index generation. +/// These accounts need to have their lt hashes mixed *out*. +/// This is the final value, that when applied to all the storages at startup, +/// will produce the correct accounts lt hash. +#[derive(Debug, Clone)] +pub struct DuplicatesLtHash(pub LtHash); + +impl Default for DuplicatesLtHash { + fn default() -> Self { + Self(LtHash::identity()) + } +} + #[derive(Default, Debug)] struct GenerateIndexTimings { pub total_time_us: u64, @@ -687,6 +705,7 @@ struct GenerateIndexTimings { pub populate_duplicate_keys_us: u64, pub total_slots: u64, pub slots_to_clean: u64, + pub par_duplicates_lt_hash_us: AtomicU64, } #[derive(Default, Debug, PartialEq, Eq)] @@ -763,6 +782,11 @@ impl GenerateIndexTimings { startup_stats.copy_data_us.swap(0, Ordering::Relaxed), i64 ), + ( + "par_duplicates_lt_hash_us", + self.par_duplicates_lt_hash_us.load(Ordering::Relaxed), + i64 + ), ); } } @@ -6467,7 +6491,7 @@ impl AccountsDb { /// /// Only intended to be called at startup (or by tests). /// Only intended to be used while testing the experimental accumulator hash. - pub fn calculate_accounts_lt_hash_at_startup( + pub fn calculate_accounts_lt_hash_at_startup_from_index( &self, ancestors: &Ancestors, startup_slot: Slot, @@ -6528,6 +6552,39 @@ impl AccountsDb { AccountsLtHash(lt_hash) } + /// Calculates the accounts lt hash + /// + /// Intended to be used to verify the accounts lt hash at startup. + /// + /// The `duplicates_lt_hash` is the old/duplicate accounts to mix *out* of the storages. + /// This value comes from index generation. + pub fn calculate_accounts_lt_hash_at_startup_from_storages( + &self, + storages: &[Arc], + duplicates_lt_hash: &DuplicatesLtHash, + ) -> AccountsLtHash { + debug_assert!(self.is_experimental_accumulator_hash_enabled()); + + let mut lt_hash = storages + .par_iter() + .fold(LtHash::identity, |mut accum, storage| { + storage.accounts.scan_accounts(|stored_account_meta| { + let account_lt_hash = + Self::lt_hash_account(&stored_account_meta, stored_account_meta.pubkey()); + accum.mix_in(&account_lt_hash.0); + }); + accum + }) + .reduce(LtHash::identity, |mut accum, elem| { + accum.mix_in(&elem); + accum + }); + + lt_hash.mix_out(&duplicates_lt_hash.0); + + AccountsLtHash(lt_hash) + } + /// This is only valid to call from tests. /// run the accounts hash calculation and store the results pub fn update_accounts_hash_for_tests( @@ -8351,6 +8408,7 @@ impl AccountsDb { let rent_paying_accounts_by_partition = Mutex::new(RentPayingAccountsByPartition::new(schedule)); + let mut outer_duplicates_lt_hash = None; // pass == 0 always runs and generates the index // pass == 1 only runs if verify == true. @@ -8554,6 +8612,7 @@ impl AccountsDb { accounts_data_len_from_duplicates: u64, num_duplicate_accounts: u64, uncleaned_roots: IntSet, + duplicates_lt_hash: Box, } impl DuplicatePubkeysVisitedInfo { fn reduce(mut a: Self, mut b: Self) -> Self { @@ -8570,6 +8629,9 @@ impl AccountsDb { other.accounts_data_len_from_duplicates; self.num_duplicate_accounts += other.num_duplicate_accounts; self.uncleaned_roots.extend(other.uncleaned_roots); + self.duplicates_lt_hash + .0 + .mix_in(&other.duplicates_lt_hash.0); } } @@ -8580,6 +8642,7 @@ impl AccountsDb { accounts_data_len_from_duplicates, num_duplicate_accounts, uncleaned_roots, + duplicates_lt_hash, } = unique_pubkeys_by_bin .par_iter() .fold( @@ -8592,6 +8655,7 @@ impl AccountsDb { accounts_data_len_from_duplicates, accounts_duplicates_num, uncleaned_roots, + duplicates_lt_hash, ) = self.visit_duplicate_pubkeys_during_startup( pubkeys, &rent_collector, @@ -8601,6 +8665,7 @@ impl AccountsDb { accounts_data_len_from_duplicates, num_duplicate_accounts: accounts_duplicates_num, uncleaned_roots, + duplicates_lt_hash, }; DuplicatePubkeysVisitedInfo::reduce(accum, intermediate) }) @@ -8623,6 +8688,8 @@ impl AccountsDb { self.accounts_index .add_uncleaned_roots(uncleaned_roots.into_iter()); accounts_data_len.fetch_sub(accounts_data_len_from_duplicates, Ordering::Relaxed); + let old_val = outer_duplicates_lt_hash.replace(duplicates_lt_hash); + assert!(old_val.is_none()); info!( "accounts data len: {}", accounts_data_len.load(Ordering::Relaxed) @@ -8649,6 +8716,7 @@ impl AccountsDb { rent_paying_accounts_by_partition: rent_paying_accounts_by_partition .into_inner() .unwrap(), + duplicates_lt_hash: outer_duplicates_lt_hash.unwrap(), } } @@ -8678,20 +8746,28 @@ impl AccountsDb { /// 1. get the _duplicate_ accounts data len from the given pubkeys /// 2. get the slots that contained duplicate pubkeys /// 3. update rent stats + /// 4. build up the duplicates lt hash /// /// Note this should only be used when ALL entries in the accounts index are roots. - /// returns (data len sum of all older duplicates, number of duplicate accounts, slots that contained duplicate pubkeys) + /// + /// returns tuple of: + /// - data len sum of all older duplicates + /// - number of duplicate accounts + /// - slots that contained duplicate pubkeys + /// - lt hash of duplicates fn visit_duplicate_pubkeys_during_startup( &self, pubkeys: &[Pubkey], rent_collector: &RentCollector, timings: &GenerateIndexTimings, - ) -> (u64, u64, IntSet) { + ) -> (u64, u64, IntSet, Box) { let mut accounts_data_len_from_duplicates = 0; let mut num_duplicate_accounts = 0_u64; let mut uncleaned_slots = IntSet::default(); + let mut duplicates_lt_hash = Box::new(DuplicatesLtHash::default()); let mut removed_rent_paying = 0; let mut removed_top_off = 0; + let mut lt_hash_time = Duration::default(); self.accounts_index.scan( pubkeys.iter(), |pubkey, slots_refs, _entry| { @@ -8730,6 +8806,14 @@ impl AccountsDb { removed_rent_paying += 1; removed_top_off += lamports_to_top_off; } + if self.is_experimental_accumulator_hash_enabled() { + let (_, duration) = meas_dur!({ + let account_lt_hash = + Self::lt_hash_account(&loaded_account, pubkey); + duplicates_lt_hash.0.mix_in(&account_lt_hash.0); + }); + lt_hash_time += duration; + } }); }); } @@ -8746,10 +8830,14 @@ impl AccountsDb { timings .amount_to_top_off_rent .fetch_sub(removed_top_off, Ordering::Relaxed); + timings + .par_duplicates_lt_hash_us + .fetch_add(lt_hash_time.as_micros() as u64, Ordering::Relaxed); ( accounts_data_len_from_duplicates as u64, num_duplicate_accounts, uncleaned_slots, + duplicates_lt_hash, ) } diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index 3602f47c88fbec..d087aecbe0b871 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -210,10 +210,7 @@ impl AccountsFile { } /// Iterate over all accounts and call `callback` with each account. - pub(crate) fn scan_accounts( - &self, - callback: impl for<'local> FnMut(StoredAccountMeta<'local>), - ) { + pub fn scan_accounts(&self, callback: impl for<'local> FnMut(StoredAccountMeta<'local>)) { match self { Self::AppendVec(av) => av.scan_accounts(callback), Self::TieredStorage(ts) => { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index bd19c71fbb99bc..60347c5dde4e00 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -75,7 +75,8 @@ use { accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot}, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, - CalcAccountsHashDataSource, PubkeyHashAccount, VerifyAccountsHashAndLamportsConfig, + CalcAccountsHashDataSource, DuplicatesLtHash, PubkeyHashAccount, + VerifyAccountsHashAndLamportsConfig, }, accounts_hash::{ AccountHash, AccountsHash, AccountsLtHash, CalcAccountsHashConfig, HashStats, @@ -1724,7 +1725,7 @@ impl Bank { .rc .accounts .accounts_db - .calculate_accounts_lt_hash_at_startup(&bank.ancestors, bank.slot()); + .calculate_accounts_lt_hash_at_startup_from_index(&bank.ancestors, bank.slot()); }); duration }); @@ -5548,6 +5549,7 @@ impl Bank { run_in_background: false, store_hash_raw_data_for_debug: on_halt_store_hash_raw_data_for_debug, }, + None, ); } @@ -5558,7 +5560,8 @@ impl Bank { fn verify_accounts_hash( &self, base: Option<(Slot, /*capitalization*/ u64)>, - config: VerifyAccountsHashConfig, + mut config: VerifyAccountsHashConfig, + duplicates_lt_hash: Option<&DuplicatesLtHash>, ) -> bool { let accounts = &self.rc.accounts; // Wait until initial hash calc is complete before starting a new hash calc. @@ -5569,19 +5572,36 @@ impl Bank { .wait_for_complete(); let slot = self.slot(); + let is_accounts_lt_hash_enabled = self.is_accounts_lt_hash_enabled(); if config.require_rooted_bank && !accounts.accounts_db.accounts_index.is_alive_root(slot) { if let Some(parent) = self.parent() { info!( "slot {slot} is not a root, so verify accounts hash on parent bank at slot {}", parent.slot(), ); - return parent.verify_accounts_hash(base, config); + if is_accounts_lt_hash_enabled { + // The duplicates_lt_hash is only valid for the current slot, so we must fall + // back to verifying the accounts lt hash with the index (which also means we + // cannot run in the background). + config.run_in_background = false; + } + return parent.verify_accounts_hash(base, config, None); } else { // this will result in mismatch errors // accounts hash calc doesn't include unrooted slots panic!("cannot verify accounts hash because slot {slot} is not a root"); } } + + if is_accounts_lt_hash_enabled { + // Calculating the accounts lt hash from storages *requires* a duplicates_lt_hash. + // If it is None here, then we must use the index instead, which also means we + // cannot run in the background. + if duplicates_lt_hash.is_none() { + config.run_in_background = false; + } + } + // The snapshot storages must be captured *before* starting the background verification. // Otherwise, it is possible that a delayed call to `get_snapshot_storages()` will *not* // get the correct storages required to calculate and verify the accounts hashes. @@ -5600,17 +5620,43 @@ impl Bank { store_detailed_debug_info: config.store_hash_raw_data_for_debug, use_bg_thread_pool: config.run_in_background, }; + if config.run_in_background { let accounts = Arc::clone(accounts); let accounts_ = Arc::clone(&accounts); let ancestors = self.ancestors.clone(); let epoch_schedule = self.epoch_schedule().clone(); let rent_collector = self.rent_collector().clone(); + let expected_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap().clone(); + let duplicates_lt_hash = duplicates_lt_hash.cloned(); accounts.accounts_db.verify_accounts_hash_in_bg.start(|| { Builder::new() .name("solBgHashVerify".into()) .spawn(move || { info!("Initial background accounts hash verification has started"); + if is_accounts_lt_hash_enabled { + let accounts_db = &accounts_.accounts_db; + let (calculated_accounts_lt_hash, duration) = meas_dur!(accounts_db.thread_pool_hash.install(|| { + accounts_db + .calculate_accounts_lt_hash_at_startup_from_storages( + snapshot_storages.0.as_slice(), + &duplicates_lt_hash.unwrap(), + ) + })); + if calculated_accounts_lt_hash != expected_accounts_lt_hash { + error!( + "Verifying accounts lt hash failed: hashes do not match, expected: {}, calculated: {}", + expected_accounts_lt_hash.0.checksum(), + calculated_accounts_lt_hash.0.checksum(), + ); + return false; + } + datapoint_info!( + "startup_verify_accounts", + ("verify_accounts_lt_hash_us", duration.as_micros(), i64) + ); + } + let snapshot_storages_and_slots = ( snapshot_storages.0.as_slice(), snapshot_storages.1.as_slice(), @@ -5638,6 +5684,32 @@ impl Bank { }); true // initial result is true. We haven't failed yet. If verification fails, we'll panic from bg thread. } else { + if is_accounts_lt_hash_enabled { + let expected_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap().clone(); + let calculated_accounts_lt_hash = + if let Some(duplicates_lt_hash) = duplicates_lt_hash { + accounts + .accounts_db + .calculate_accounts_lt_hash_at_startup_from_storages( + snapshot_storages.0.as_slice(), + duplicates_lt_hash, + ) + } else { + accounts + .accounts_db + .calculate_accounts_lt_hash_at_startup_from_index(&self.ancestors, slot) + }; + if calculated_accounts_lt_hash != expected_accounts_lt_hash { + error!( + "Verifying accounts lt hash failed: hashes do not match, expected: {}, calculated: {}", + expected_accounts_lt_hash.0.checksum(), + calculated_accounts_lt_hash.0.checksum(), + ); + return false; + } + // if we get here then the accounts lt hash is correct + } + let snapshot_storages_and_slots = ( snapshot_storages.0.as_slice(), snapshot_storages.1.as_slice(), @@ -5953,6 +6025,7 @@ impl Bank { force_clean: bool, latest_full_snapshot_slot: Slot, base: Option<(Slot, /*capitalization*/ u64)>, + duplicates_lt_hash: Option<&DuplicatesLtHash>, ) -> bool { let (_, clean_time_us) = measure_us!({ let should_clean = force_clean || (!skip_shrink && self.slot() > 0); @@ -6002,6 +6075,7 @@ impl Bank { run_in_background: true, store_hash_raw_data_for_debug: false, }, + duplicates_lt_hash, ); info!("Verifying accounts... In background."); verified diff --git a/runtime/src/bank/accounts_lt_hash.rs b/runtime/src/bank/accounts_lt_hash.rs index 372c65f08751a9..2fe0fceb3f3072 100644 --- a/runtime/src/bank/accounts_lt_hash.rs +++ b/runtime/src/bank/accounts_lt_hash.rs @@ -282,8 +282,18 @@ pub enum InitialStateOfAccount { mod tests { use { super::*, - crate::bank::tests::new_bank_from_parent_with_bank_forks, - solana_accounts_db::accounts::Accounts, + crate::{ + bank::tests::new_bank_from_parent_with_bank_forks, runtime_config::RuntimeConfig, + snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_utils, + }, + solana_accounts_db::{ + accounts::Accounts, + accounts_db::{ + AccountShrinkThreshold, AccountsDbConfig, DuplicatesLtHash, + ACCOUNTS_DB_CONFIG_FOR_TESTING, + }, + accounts_index::AccountSecondaryIndexes, + }, solana_sdk::{ account::{ReadableAccount as _, WritableAccount as _}, fee_calculator::FeeRateGovernor, @@ -293,7 +303,8 @@ mod tests { signature::Signer as _, signer::keypair::Keypair, }, - std::{cmp, str::FromStr as _, sync::Arc}, + std::{cmp, collections::HashMap, ops::RangeFull, str::FromStr as _, sync::Arc}, + tempfile::TempDir, }; #[test] @@ -581,7 +592,7 @@ mod tests { } #[test] - fn test_calculate_accounts_lt_hash_at_startup() { + fn test_calculate_accounts_lt_hash_at_startup_from_index() { let (genesis_config, mint_keypair) = create_genesis_config(123_456_789 * LAMPORTS_PER_SOL); let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank.rc @@ -624,10 +635,190 @@ mod tests { .rc .accounts .accounts_db - .calculate_accounts_lt_hash_at_startup(&bank.ancestors, bank.slot()); + .calculate_accounts_lt_hash_at_startup_from_index(&bank.ancestors, bank.slot()); + assert_eq!(expected_accounts_lt_hash, calculated_accounts_lt_hash); + } - let expected = expected_accounts_lt_hash.0.checksum(); - let actual = calculated_accounts_lt_hash.0.checksum(); - assert_eq!(expected, actual, "expected: {expected}, actual: {actual}"); + #[test] + fn test_calculate_accounts_lt_hash_at_startup_from_storages() { + let (genesis_config, mint_keypair) = create_genesis_config(123_456_789 * LAMPORTS_PER_SOL); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + bank.rc + .accounts + .accounts_db + .set_is_experimental_accumulator_hash_enabled(true); + + // ensure the accounts lt hash is enabled, otherwise this test doesn't actually do anything... + assert!(bank.is_accounts_lt_hash_enabled()); + + let amount = cmp::max( + bank.get_minimum_balance_for_rent_exemption(0), + LAMPORTS_PER_SOL, + ); + + // Write to this pubkey multiple times, so there are guaranteed duplicates in the storages. + let duplicate_pubkey = pubkey::new_rand(); + + // create some banks with some modified accounts so that there are stored accounts + // (note: the number of banks and transfers are arbitrary) + for _ in 0..7 { + let slot = bank.slot() + 1; + bank = + new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), slot); + for _ in 0..9 { + bank.register_unique_recent_blockhash_for_test(); + // note: use a random pubkey here to ensure accounts + // are spread across all the index bins + // (and calculating the accounts lt hash from storages requires no duplicates) + bank.transfer(amount, &mint_keypair, &pubkey::new_rand()) + .unwrap(); + + bank.register_unique_recent_blockhash_for_test(); + bank.transfer(amount, &mint_keypair, &duplicate_pubkey) + .unwrap(); + } + + // flush the write cache each slot to ensure there are account duplicates in the storages + bank.squash(); + bank.force_flush_accounts_cache(); + } + let expected_accounts_lt_hash = bank.accounts_lt_hash.lock().unwrap().clone(); + + // go through the storages to find the duplicates + let (mut storages, _slots) = bank + .rc + .accounts + .accounts_db + .get_snapshot_storages(RangeFull); + // sort the storages in slot-descending order + // this makes skipping the latest easier + storages.sort_unstable_by_key(|storage| cmp::Reverse(storage.slot())); + let storages = storages.into_boxed_slice(); + + // get all the lt hashes for each version of all accounts + let mut stored_accounts_map = HashMap::<_, Vec<_>>::new(); + for storage in &storages { + storage.accounts.scan_accounts(|stored_account_meta| { + let pubkey = stored_account_meta.pubkey(); + let account_lt_hash = AccountsDb::lt_hash_account(&stored_account_meta, pubkey); + stored_accounts_map + .entry(*pubkey) + .or_default() + .push(account_lt_hash) + }); + } + + // calculate the duplicates lt hash by skipping the first version (latest) of each account, + // and then mixing together all the rest + let duplicates_lt_hash = stored_accounts_map + .values() + .map(|lt_hashes| { + // the first element in the vec is the latest; all the rest are duplicates + <_hashes[1..] + }) + .fold(LtHash::identity(), |mut accum, duplicate_lt_hashes| { + for duplicate_lt_hash in duplicate_lt_hashes { + accum.mix_in(&duplicate_lt_hash.0); + } + accum + }); + let duplicates_lt_hash = DuplicatesLtHash(duplicates_lt_hash); + + // ensure that calculating the accounts lt hash from storages is correct + let calculated_accounts_lt_hash_from_storages = bank + .rc + .accounts + .accounts_db + .calculate_accounts_lt_hash_at_startup_from_storages(&storages, &duplicates_lt_hash); + assert_eq!( + expected_accounts_lt_hash, + calculated_accounts_lt_hash_from_storages + ); + } + + #[test] + fn test_verify_accounts_lt_hash_at_startup() { + let (genesis_config, mint_keypair) = create_genesis_config(123_456_789 * LAMPORTS_PER_SOL); + let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + bank.rc + .accounts + .accounts_db + .set_is_experimental_accumulator_hash_enabled(true); + + // ensure the accounts lt hash is enabled, otherwise this test doesn't actually do anything... + assert!(bank.is_accounts_lt_hash_enabled()); + + let amount = cmp::max( + bank.get_minimum_balance_for_rent_exemption(0), + LAMPORTS_PER_SOL, + ); + + // Write to this pubkey multiple times, so there are guaranteed duplicates in the storages. + let duplicate_pubkey = pubkey::new_rand(); + + // create some banks with some modified accounts so that there are stored accounts + // (note: the number of banks and transfers are arbitrary) + for _ in 0..9 { + let slot = bank.slot() + 1; + bank = + new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), slot); + for _ in 0..3 { + bank.register_unique_recent_blockhash_for_test(); + bank.transfer(amount, &mint_keypair, &pubkey::new_rand()) + .unwrap(); + bank.register_unique_recent_blockhash_for_test(); + bank.transfer(amount, &mint_keypair, &duplicate_pubkey) + .unwrap(); + } + + // flush the write cache to disk to ensure there are duplicates across the storages + bank.fill_bank_with_ticks_for_tests(); + bank.squash(); + bank.force_flush_accounts_cache(); + } + + // verification happens at startup, so mimic the behavior by loading from a snapshot + let snapshot_config = SnapshotConfig::default(); + let bank_snapshots_dir = TempDir::new().unwrap(); + let snapshot_archives_dir = TempDir::new().unwrap(); + let snapshot = snapshot_bank_utils::bank_to_full_snapshot_archive( + &bank_snapshots_dir, + &bank, + Some(snapshot_config.snapshot_version), + &snapshot_archives_dir, + &snapshot_archives_dir, + snapshot_config.archive_format, + ) + .unwrap(); + let (_accounts_tempdir, accounts_dir) = snapshot_utils::create_tmp_accounts_dir_for_tests(); + let accounts_db_config = AccountsDbConfig { + enable_experimental_accumulator_hash: true, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }; + let (roundtrip_bank, _) = snapshot_bank_utils::bank_from_snapshot_archives( + &[accounts_dir], + &bank_snapshots_dir, + &snapshot, + None, + &genesis_config, + &RuntimeConfig::default(), + None, + None, + AccountSecondaryIndexes::default(), + None, + AccountShrinkThreshold::default(), + false, + false, + false, + false, + Some(accounts_db_config), + None, + Arc::default(), + ) + .unwrap(); + + // Wait for the startup verification to complete. If we don't panic, then we're good! + roundtrip_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); + assert_eq!(roundtrip_bank, *bank); } } diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index a088979e7bf429..72c97f7acc8999 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -238,7 +238,7 @@ mod tests { full_snapshot_stream: &mut reader, incremental_snapshot_stream: None, }; - let dbank = serde_snapshot::bank_from_streams( + let (dbank, _) = serde_snapshot::bank_from_streams( &mut snapshot_streams, &dbank_paths, storage_and_next_append_vec_id, @@ -352,7 +352,7 @@ mod tests { storage_access, ) .unwrap(); - let dbank = crate::serde_snapshot::bank_from_streams( + let (dbank, _) = crate::serde_snapshot::bank_from_streams( &mut snapshot_streams, &dbank_paths, storage_and_next_append_vec_id, @@ -487,7 +487,7 @@ mod tests { storage_access, ) .unwrap(); - let dbank = crate::serde_snapshot::bank_from_streams( + let (dbank, _) = crate::serde_snapshot::bank_from_streams( &mut snapshot_streams, &dbank_paths, storage_and_next_append_vec_id, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 13fb573bc8afad..454aa32215de11 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -2220,7 +2220,11 @@ fn test_purge_empty_accounts() { if pass == 0 { add_root_and_flush_write_cache(&bank0); - assert!(bank0.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank0.verify_accounts_hash( + None, + VerifyAccountsHashConfig::default_for_test(), + None, + )); continue; } @@ -2229,7 +2233,11 @@ fn test_purge_empty_accounts() { bank0.squash(); add_root_and_flush_write_cache(&bank0); if pass == 1 { - assert!(bank0.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank0.verify_accounts_hash( + None, + VerifyAccountsHashConfig::default_for_test(), + None, + )); continue; } @@ -2237,7 +2245,11 @@ fn test_purge_empty_accounts() { bank1.squash(); add_root_and_flush_write_cache(&bank1); bank1.update_accounts_hash_for_tests(); - assert!(bank1.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank1.verify_accounts_hash( + None, + VerifyAccountsHashConfig::default_for_test(), + None, + )); // keypair should have 0 tokens on both forks assert_eq!(bank0.get_account(&keypair.pubkey()), None); @@ -2245,7 +2257,11 @@ fn test_purge_empty_accounts() { bank1.clean_accounts_for_tests(); - assert!(bank1.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank1.verify_accounts_hash( + None, + VerifyAccountsHashConfig::default_for_test(), + None, + )); } } @@ -3440,7 +3456,7 @@ fn test_bank_hash_internal_state() { add_root_and_flush_write_cache(&bank1); add_root_and_flush_write_cache(&bank2); bank2.update_accounts_hash_for_tests(); - assert!(bank2.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank2.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test(), None,)); } #[test] @@ -3475,7 +3491,11 @@ fn test_bank_hash_internal_state_verify() { // we later modify bank 2, so this flush is destructive to the test add_root_and_flush_write_cache(&bank2); bank2.update_accounts_hash_for_tests(); - assert!(bank2.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank2.verify_accounts_hash( + None, + VerifyAccountsHashConfig::default_for_test(), + None, + )); } let bank3 = new_bank_from_parent_with_bank_forks( bank_forks.as_ref(), @@ -3486,7 +3506,11 @@ fn test_bank_hash_internal_state_verify() { assert_eq!(bank0_state, bank0.hash_internal_state()); if pass == 0 { // this relies on us having set the bank hash in the pass==0 if above - assert!(bank2.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank2.verify_accounts_hash( + None, + VerifyAccountsHashConfig::default_for_test(), + None, + )); continue; } if pass == 1 { @@ -3495,7 +3519,11 @@ fn test_bank_hash_internal_state_verify() { // Doing so throws an assert. So, we can't flush 3 until 2 is flushed. add_root_and_flush_write_cache(&bank3); bank3.update_accounts_hash_for_tests(); - assert!(bank3.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank3.verify_accounts_hash( + None, + VerifyAccountsHashConfig::default_for_test(), + None, + )); continue; } @@ -3504,10 +3532,18 @@ fn test_bank_hash_internal_state_verify() { bank2.transfer(amount, &mint_keypair, &pubkey2).unwrap(); add_root_and_flush_write_cache(&bank2); bank2.update_accounts_hash_for_tests(); - assert!(bank2.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank2.verify_accounts_hash( + None, + VerifyAccountsHashConfig::default_for_test(), + None, + )); add_root_and_flush_write_cache(&bank3); bank3.update_accounts_hash_for_tests(); - assert!(bank3.verify_accounts_hash(None, VerifyAccountsHashConfig::default_for_test())); + assert!(bank3.verify_accounts_hash( + None, + VerifyAccountsHashConfig::default_for_test(), + None, + )); } } @@ -3533,11 +3569,11 @@ fn test_verify_snapshot_bank() { bank.freeze(); add_root_and_flush_write_cache(&bank); bank.update_accounts_hash_for_tests(); - assert!(bank.verify_snapshot_bank(true, false, false, bank.slot(), None)); + assert!(bank.verify_snapshot_bank(true, false, false, bank.slot(), None, None,)); // tamper the bank after freeze! bank.increment_signature_count(1); - assert!(!bank.verify_snapshot_bank(true, false, false, bank.slot(), None)); + assert!(!bank.verify_snapshot_bank(true, false, false, bank.slot(), None, None,)); } // Test that two bank forks with the same accounts should not hash to the same value. @@ -12157,6 +12193,7 @@ fn test_bank_verify_accounts_hash_with_base() { test_hash_calculation: false, ..VerifyAccountsHashConfig::default_for_test() }, + None, )); } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index e12b9c5ec124a7..0b299ea5d42185 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -20,7 +20,8 @@ use { accounts::Accounts, accounts_db::{ stats::BankHashStats, AccountShrinkThreshold, AccountStorageEntry, AccountsDb, - AccountsDbConfig, AccountsFileId, AtomicAccountsFileId, IndexGenerationInfo, + AccountsDbConfig, AccountsFileId, AtomicAccountsFileId, DuplicatesLtHash, + IndexGenerationInfo, }, accounts_file::{AccountsFile, StorageAccess}, accounts_hash::{AccountsDeltaHash, AccountsHash}, @@ -545,6 +546,12 @@ pub(crate) fn fields_from_streams( Ok((snapshot_bank_fields, snapshot_accounts_db_fields)) } +/// This struct contains side-info while reconstructing the bank from streams +#[derive(Debug)] +pub struct BankFromStreamsInfo { + pub duplicates_lt_hash: Box, +} + #[allow(clippy::too_many_arguments)] pub(crate) fn bank_from_streams( snapshot_streams: &mut SnapshotStreams, @@ -561,12 +568,12 @@ pub(crate) fn bank_from_streams( accounts_db_config: Option, accounts_update_notifier: Option, exit: Arc, -) -> std::result::Result +) -> std::result::Result<(Bank, BankFromStreamsInfo), Error> where R: Read, { let (bank_fields, accounts_db_fields) = fields_from_streams(snapshot_streams)?; - reconstruct_bank_from_fields( + let (bank, info) = reconstruct_bank_from_fields( bank_fields, accounts_db_fields, genesis_config, @@ -582,7 +589,13 @@ where accounts_db_config, accounts_update_notifier, exit, - ) + )?; + Ok(( + bank, + BankFromStreamsInfo { + duplicates_lt_hash: info.duplicates_lt_hash, + }, + )) } #[cfg(test)] @@ -826,6 +839,12 @@ impl<'a> Serialize for SerializableAccountsDb<'a> { #[cfg(feature = "frozen-abi")] impl<'a> solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccountsDb<'a> {} +/// This struct contains side-info while reconstructing the bank from fields +#[derive(Debug)] +struct ReconstructedBankInfo { + duplicates_lt_hash: Box, +} + #[allow(clippy::too_many_arguments)] fn reconstruct_bank_from_fields( bank_fields: SnapshotBankFields, @@ -843,7 +862,7 @@ fn reconstruct_bank_from_fields( accounts_db_config: Option, accounts_update_notifier: Option, exit: Arc, -) -> Result +) -> Result<(Bank, ReconstructedBankInfo), Error> where E: SerializableStorage + std::marker::Sync, { @@ -890,7 +909,12 @@ where info!("rent_collector: {:?}", bank.rent_collector()); - Ok(bank) + Ok(( + bank, + ReconstructedBankInfo { + duplicates_lt_hash: reconstructed_accounts_db_info.duplicates_lt_hash, + }, + )) } pub(crate) fn reconstruct_single_storage( @@ -1010,9 +1034,10 @@ pub(crate) fn remap_and_reconstruct_single_storage( } /// This struct contains side-info while reconstructing the accounts DB from fields. -#[derive(Debug, Default, Copy, Clone)] +#[derive(Debug, Default, Clone)] pub struct ReconstructedAccountsDbInfo { pub accounts_data_len: u64, + pub duplicates_lt_hash: Box, } #[allow(clippy::too_many_arguments)] @@ -1220,6 +1245,7 @@ where let IndexGenerationInfo { accounts_data_len, rent_paying_accounts_by_partition, + duplicates_lt_hash, } = accounts_db.generate_index( limit_load_slot_count_from_snapshot, verify_index, @@ -1241,7 +1267,10 @@ where Ok(( Arc::try_unwrap(accounts_db).unwrap(), - ReconstructedAccountsDbInfo { accounts_data_len }, + ReconstructedAccountsDbInfo { + accounts_data_len, + duplicates_lt_hash, + }, )) } diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index cc65a12f9c63c9..2547935c52841d 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -28,7 +28,7 @@ use { solana_accounts_db::{ accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDbConfig, AtomicAccountsFileId, - CalcAccountsHashDataSource, + CalcAccountsHashDataSource, DuplicatesLtHash, }, accounts_file::StorageAccess, accounts_index::AccountSecondaryIndexes, @@ -180,7 +180,7 @@ pub fn bank_from_snapshot_archives( }; let mut measure_rebuild = Measure::start("rebuild bank from snapshots"); - let bank = rebuild_bank_from_unarchived_snapshots( + let (bank, info) = rebuild_bank_from_unarchived_snapshots( &unarchived_full_snapshot.unpacked_snapshots_dir_and_version, unarchived_incremental_snapshot .as_ref() @@ -235,6 +235,7 @@ pub fn bank_from_snapshot_archives( accounts_db_force_initial_clean, full_snapshot_archive_info.slot(), base, + Some(&info.duplicates_lt_hash), ) && limit_load_slot_count_from_snapshot.is_none() { panic!("Snapshot bank for slot {} failed to verify", bank.slot()); @@ -386,7 +387,7 @@ pub fn bank_from_snapshot_dir( storage, next_append_vec_id, }; - let (bank, measure_rebuild_bank) = measure_time!( + let ((bank, _info), measure_rebuild_bank) = measure_time!( rebuild_bank_from_snapshot( bank_snapshot, account_paths, @@ -544,6 +545,12 @@ fn deserialize_status_cache( }) } +/// This struct contains side-info from rebuilding the bank +#[derive(Debug)] +struct RebuiltBankInfo { + duplicates_lt_hash: Box, +} + #[allow(clippy::too_many_arguments)] fn rebuild_bank_from_unarchived_snapshots( full_snapshot_unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion, @@ -563,7 +570,7 @@ fn rebuild_bank_from_unarchived_snapshots( accounts_db_config: Option, accounts_update_notifier: Option, exit: Arc, -) -> snapshot_utils::Result { +) -> snapshot_utils::Result<(Bank, RebuiltBankInfo)> { let (full_snapshot_version, full_snapshot_root_paths) = verify_unpacked_snapshots_dir_and_version( full_snapshot_unpacked_snapshots_dir_and_version, @@ -593,7 +600,7 @@ fn rebuild_bank_from_unarchived_snapshots( .map(|root_paths| root_paths.snapshot_path()), }; - let bank = deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| { + let (bank, info) = deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| { Ok( match incremental_snapshot_version.unwrap_or(full_snapshot_version) { SnapshotVersion::V1_2_0 => bank_from_streams( @@ -641,7 +648,12 @@ fn rebuild_bank_from_unarchived_snapshots( bank.status_cache.write().unwrap().append(&slot_deltas); info!("Rebuilt bank for slot: {}", bank.slot()); - Ok(bank) + Ok(( + bank, + RebuiltBankInfo { + duplicates_lt_hash: info.duplicates_lt_hash, + }, + )) } #[allow(clippy::too_many_arguments)] @@ -660,7 +672,7 @@ fn rebuild_bank_from_snapshot( accounts_db_config: Option, accounts_update_notifier: Option, exit: Arc, -) -> snapshot_utils::Result { +) -> snapshot_utils::Result<(Bank, RebuiltBankInfo)> { info!( "Rebuilding bank from snapshot {}", bank_snapshot.snapshot_dir.display(), @@ -671,7 +683,7 @@ fn rebuild_bank_from_snapshot( incremental_snapshot_root_file_path: None, }; - let bank = deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| { + let (bank, info) = deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| { Ok(bank_from_streams( snapshot_streams, account_paths, @@ -702,7 +714,12 @@ fn rebuild_bank_from_snapshot( bank.status_cache.write().unwrap().append(&slot_deltas); info!("Rebuilt bank for slot: {}", bank.slot()); - Ok(bank) + Ok(( + bank, + RebuiltBankInfo { + duplicates_lt_hash: info.duplicates_lt_hash, + }, + )) } /// Verify that the snapshot's slot deltas are not corrupt/invalid From 715f49fc94f4405f9f0f51f03586f1800e78b996 Mon Sep 17 00:00:00 2001 From: Brooks Date: Mon, 14 Oct 2024 14:51:10 -0400 Subject: [PATCH 498/529] Adds more metrics for startup accounts verification (#3158) --- runtime/src/bank.rs | 54 +++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 60347c5dde4e00..6167e603f63e4e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5634,49 +5634,61 @@ impl Bank { .name("solBgHashVerify".into()) .spawn(move || { info!("Initial background accounts hash verification has started"); + let start = Instant::now(); + let mut accounts_lt_hash_time = None; if is_accounts_lt_hash_enabled { let accounts_db = &accounts_.accounts_db; - let (calculated_accounts_lt_hash, duration) = meas_dur!(accounts_db.thread_pool_hash.install(|| { - accounts_db - .calculate_accounts_lt_hash_at_startup_from_storages( + let (calculated_accounts_lt_hash, duration) = + meas_dur!(accounts_db.thread_pool_hash.install(|| { + accounts_db.calculate_accounts_lt_hash_at_startup_from_storages( snapshot_storages.0.as_slice(), &duplicates_lt_hash.unwrap(), ) - })); + })); if calculated_accounts_lt_hash != expected_accounts_lt_hash { error!( - "Verifying accounts lt hash failed: hashes do not match, expected: {}, calculated: {}", + "Verifying accounts lt hash failed: hashes do not match, \ + expected: {}, calculated: {}", expected_accounts_lt_hash.0.checksum(), calculated_accounts_lt_hash.0.checksum(), ); return false; } - datapoint_info!( - "startup_verify_accounts", - ("verify_accounts_lt_hash_us", duration.as_micros(), i64) - ); + accounts_lt_hash_time = Some(duration); } let snapshot_storages_and_slots = ( snapshot_storages.0.as_slice(), snapshot_storages.1.as_slice(), ); - let result = accounts_.verify_accounts_hash_and_lamports( - snapshot_storages_and_slots, - slot, - capitalization, - base, - VerifyAccountsHashAndLamportsConfig { - ancestors: &ancestors, - epoch_schedule: &epoch_schedule, - rent_collector: &rent_collector, - ..verify_config - }, - ); + let (result, accounts_hash_time) = meas_dur!(accounts_ + .verify_accounts_hash_and_lamports( + snapshot_storages_and_slots, + slot, + capitalization, + base, + VerifyAccountsHashAndLamportsConfig { + ancestors: &ancestors, + epoch_schedule: &epoch_schedule, + rent_collector: &rent_collector, + ..verify_config + }, + )); accounts_ .accounts_db .verify_accounts_hash_in_bg .background_finished(); + let total_time = start.elapsed(); + datapoint_info!( + "startup_verify_accounts", + ("total_us", total_time.as_micros(), i64), + ( + "verify_accounts_lt_hash_us", + accounts_lt_hash_time.as_ref().map(Duration::as_micros), + Option + ), + ("verify_accounts_hash_us", accounts_hash_time.as_micros(), i64), + ); info!("Initial background accounts hash verification has stopped"); result }) From e8f7cc55b1f6f71ecb460713a84297ce8a26880b Mon Sep 17 00:00:00 2001 From: Brooks Date: Tue, 15 Oct 2024 00:09:25 -0400 Subject: [PATCH 499/529] Marks Bank::unfreeze_for_ledger_tool() as dcou (#3160) --- runtime/src/bank.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6167e603f63e4e..6770010b5d1b84 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2911,6 +2911,7 @@ impl Bank { } // dangerous; don't use this; this is only needed for ledger-tool's special command + #[cfg(feature = "dev-context-only-utils")] pub fn unfreeze_for_ledger_tool(&self) { self.freeze_started.store(false, Relaxed); } From 5bcf161e096384cf74a1870824f852b467f434ef Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Mon, 14 Oct 2024 22:37:48 -0700 Subject: [PATCH 500/529] Support notifying bank created slot status in geyser (#3126) * Support notifying bank created slot status in geyser * Updated comment for CreatedBank slot event --- core/src/replay_stage.rs | 25 +++++++++++++++++++ core/src/tvu.rs | 3 ++- .../src/geyser_plugin_interface.rs | 4 +++ .../src/slot_status_notifier.rs | 4 +++ rpc/src/slot_status_notifier.rs | 3 +++ 5 files changed, 38 insertions(+), 1 deletion(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index db0ee5aff30d53..a10017c3dcd1c6 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -54,6 +54,7 @@ use { solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSenderConfig}, rpc_subscriptions::RpcSubscriptions, + slot_status_notifier::SlotStatusNotifier, }, solana_rpc_client_api::response::SlotUpdate, solana_runtime::{ @@ -251,6 +252,7 @@ pub struct ReplayStageConfig { pub authorized_voter_keypairs: Arc>>>, pub exit: Arc, pub rpc_subscriptions: Arc, + pub slot_status_notifier: Option, pub leader_schedule_cache: Arc, pub accounts_background_request_sender: AbsRequestSender, pub block_commitment_cache: Arc>, @@ -537,6 +539,7 @@ impl ReplayStage { authorized_voter_keypairs, exit, rpc_subscriptions, + slot_status_notifier, leader_schedule_cache, accounts_background_request_sender, block_commitment_cache, @@ -668,6 +671,7 @@ impl ReplayStage { &bank_forks, &leader_schedule_cache, &rpc_subscriptions, + &slot_status_notifier, &mut progress, &mut replay_timing, ); @@ -1122,6 +1126,7 @@ impl ReplayStage { &poh_recorder, &leader_schedule_cache, &rpc_subscriptions, + &slot_status_notifier, &mut progress, &retransmit_slots_sender, &mut skipped_slots_info, @@ -2052,6 +2057,7 @@ impl ReplayStage { poh_recorder: &Arc>, leader_schedule_cache: &Arc, rpc_subscriptions: &Arc, + slot_status_notifier: &Option, progress_map: &mut ProgressMap, retransmit_slots_sender: &Sender, skipped_slots_info: &mut SkippedSlotsInfo, @@ -2181,6 +2187,7 @@ impl ReplayStage { root_slot, my_pubkey, rpc_subscriptions, + slot_status_notifier, NewBankOptions { vote_only_bank }, ); // make sure parent is frozen for finalized hashes via the above @@ -3960,6 +3967,7 @@ impl ReplayStage { bank_forks: &RwLock, leader_schedule_cache: &Arc, rpc_subscriptions: &Arc, + slot_status_notifier: &Option, progress: &mut ProgressMap, replay_timing: &mut ReplayLoopTiming, ) { @@ -4014,6 +4022,7 @@ impl ReplayStage { forks.root(), &leader, rpc_subscriptions, + slot_status_notifier, NewBankOptions::default(), ); let empty: Vec = vec![]; @@ -4061,9 +4070,16 @@ impl ReplayStage { root_slot: u64, leader: &Pubkey, rpc_subscriptions: &Arc, + slot_status_notifier: &Option, new_bank_options: NewBankOptions, ) -> Bank { rpc_subscriptions.notify_slot(slot, parent.slot(), root_slot); + if let Some(slot_status_notifier) = slot_status_notifier { + slot_status_notifier + .read() + .unwrap() + .notify_created_bank(slot, parent.slot()); + } Bank::new_from_parent_with_options(parent, leader, slot, new_bank_options) } @@ -4410,6 +4426,7 @@ pub(crate) mod tests { &bank_forks, &leader_schedule_cache, &rpc_subscriptions, + &None, &mut progress, &mut replay_timing, ); @@ -4438,6 +4455,7 @@ pub(crate) mod tests { &bank_forks, &leader_schedule_cache, &rpc_subscriptions, + &None, &mut progress, &mut replay_timing, ); @@ -6307,6 +6325,7 @@ pub(crate) mod tests { &bank_forks, &leader_schedule_cache, &rpc_subscriptions, + &None, &mut progress, &mut replay_timing, ); @@ -6336,6 +6355,7 @@ pub(crate) mod tests { &bank_forks, &leader_schedule_cache, &rpc_subscriptions, + &None, &mut progress, &mut replay_timing, ); @@ -6366,6 +6386,7 @@ pub(crate) mod tests { &bank_forks, &leader_schedule_cache, &rpc_subscriptions, + &None, &mut progress, &mut replay_timing, ); @@ -6395,6 +6416,7 @@ pub(crate) mod tests { &bank_forks, &leader_schedule_cache, &rpc_subscriptions, + &None, &mut progress, &mut replay_timing, ); @@ -8329,6 +8351,7 @@ pub(crate) mod tests { &poh_recorder, &leader_schedule_cache, &rpc_subscriptions, + &None, &mut progress, &retransmit_slots_sender, &mut SkippedSlotsInfo::default(), @@ -8997,6 +9020,7 @@ pub(crate) mod tests { &poh_recorder, &leader_schedule_cache, &rpc_subscriptions, + &None, &mut progress, &retransmit_slots_sender, &mut SkippedSlotsInfo::default(), @@ -9023,6 +9047,7 @@ pub(crate) mod tests { &poh_recorder, &leader_schedule_cache, &rpc_subscriptions, + &None, &mut progress, &retransmit_slots_sender, &mut SkippedSlotsInfo::default(), diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 23e5d9b6562451..68b7cf38023505 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -211,7 +211,7 @@ impl Tvu { retransmit_receiver, max_slots.clone(), Some(rpc_subscriptions.clone()), - slot_status_notifier, + slot_status_notifier.clone(), ); let (ancestor_duplicate_slots_sender, ancestor_duplicate_slots_receiver) = unbounded(); @@ -274,6 +274,7 @@ impl Tvu { authorized_voter_keypairs, exit: exit.clone(), rpc_subscriptions: rpc_subscriptions.clone(), + slot_status_notifier, leader_schedule_cache: leader_schedule_cache.clone(), accounts_background_request_sender, block_commitment_cache, diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index 97271310a99f5f..3f4394a84bae8e 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -325,6 +325,9 @@ pub enum SlotStatus { /// All shreds for the slot have been received. Completed, + + /// A new bank fork is created with the slot + CreatedBank, } impl SlotStatus { @@ -335,6 +338,7 @@ impl SlotStatus { SlotStatus::Rooted => "rooted", SlotStatus::FirstShredReceived => "first_shread_received", SlotStatus::Completed => "completed", + SlotStatus::CreatedBank => "created_bank", } } } diff --git a/geyser-plugin-manager/src/slot_status_notifier.rs b/geyser-plugin-manager/src/slot_status_notifier.rs index 573ed97d7787af..27dcd03dac28f3 100644 --- a/geyser-plugin-manager/src/slot_status_notifier.rs +++ b/geyser-plugin-manager/src/slot_status_notifier.rs @@ -33,6 +33,10 @@ impl SlotStatusNotifierInterface for SlotStatusNotifierImpl { fn notify_completed(&self, slot: Slot) { self.notify_slot_status(slot, None, SlotStatus::Completed); } + + fn notify_created_bank(&self, slot: Slot, parent: Slot) { + self.notify_slot_status(slot, Some(parent), SlotStatus::CreatedBank); + } } impl SlotStatusNotifierImpl { diff --git a/rpc/src/slot_status_notifier.rs b/rpc/src/slot_status_notifier.rs index 97a84da42f33bf..38e9bf60a6e091 100644 --- a/rpc/src/slot_status_notifier.rs +++ b/rpc/src/slot_status_notifier.rs @@ -18,6 +18,9 @@ pub trait SlotStatusNotifierInterface { /// Notified when the slot is completed. fn notify_completed(&self, slot: Slot); + + /// Notified when the slot has bank created. + fn notify_created_bank(&self, slot: Slot, parent: Slot); } pub type SlotStatusNotifier = Arc>; From eaec893942c1da53b99ed90098892d9912dc27e7 Mon Sep 17 00:00:00 2001 From: samkim-crypto Date: Tue, 15 Oct 2024 16:45:25 +0900 Subject: [PATCH 501/529] [zk-sdk] Expose `ElGamalPubkey` and `ElGamalKeypair` as wasm (#2996) * add wasm dependencies * expose `ElGamalPubkey` and `ElGamalKeypair` via `wasm_bindgen` * depend on `wasm-bindgen` only when target is `wasm32` * remove `profile.release` changes * add comments on the reason for specific exclusion of dependencies * re-organize type associated functions depending on target * re-organize type associated functions for grouped elgamal depending on target * Move wasm impl into PodElGamalPubkey --------- Co-authored-by: Jon C --- Cargo.lock | 2 + programs/sbf/Cargo.lock | 2 + zk-sdk/Cargo.toml | 4 + zk-sdk/src/encryption/auth_encryption.rs | 53 +++++--- zk-sdk/src/encryption/elgamal.rs | 155 ++++++++++++++--------- zk-sdk/src/encryption/grouped_elgamal.rs | 63 +++++---- zk-sdk/src/encryption/mod.rs | 2 +- zk-sdk/src/encryption/pod/elgamal.rs | 74 +++++++++++ zk-sdk/src/lib.rs | 1 + 9 files changed, 247 insertions(+), 109 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8be26642cb791d..a138ada09979c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8740,6 +8740,7 @@ dependencies = [ "bytemuck_derive", "curve25519-dalek 4.1.3", "itertools 0.12.1", + "js-sys", "lazy_static", "merlin", "num-derive", @@ -8755,6 +8756,7 @@ dependencies = [ "subtle", "thiserror", "tiny-bip39", + "wasm-bindgen", "zeroize", ] diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c77621fa74892a..2b2a17b0978b2f 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -7220,6 +7220,7 @@ dependencies = [ "bytemuck_derive", "curve25519-dalek 4.1.3", "itertools 0.12.1", + "js-sys", "lazy_static", "merlin", "num-derive", @@ -7234,6 +7235,7 @@ dependencies = [ "solana-sdk", "subtle", "thiserror", + "wasm-bindgen", "zeroize", ] diff --git a/zk-sdk/Cargo.toml b/zk-sdk/Cargo.toml index 5a1ff83a620b9c..8dc8ffc18029fe 100644 --- a/zk-sdk/Cargo.toml +++ b/zk-sdk/Cargo.toml @@ -38,6 +38,10 @@ solana-sdk = { workspace = true } subtle = { workspace = true } zeroize = { workspace = true, features = ["zeroize_derive"] } +[target.'cfg(target_arch = "wasm32")'.dependencies] +js-sys = { workspace = true } +wasm-bindgen = { workspace = true } + [lib] crate-type = ["cdylib", "rlib"] diff --git a/zk-sdk/src/encryption/auth_encryption.rs b/zk-sdk/src/encryption/auth_encryption.rs index 14c145decb6736..15ed58069b80ca 100644 --- a/zk-sdk/src/encryption/auth_encryption.rs +++ b/zk-sdk/src/encryption/auth_encryption.rs @@ -13,7 +13,17 @@ use { }, base64::{prelude::BASE64_STANDARD, Engine}, rand::{rngs::OsRng, Rng}, - sha3::{Digest, Sha3_512}, + std::{convert::TryInto, fmt}, + zeroize::Zeroize, +}; +// Currently, `wasm_bindgen` exports types and functions included in the current crate, but all +// types and functions exported for wasm targets in all of its dependencies +// (https://github.com/rustwasm/wasm-bindgen/issues/3759). We specifically exclude some of the +// dependencies that will cause unnecessary bloat to the wasm binary. +#[cfg(not(target_arch = "wasm32"))] +use { + sha3::Digest, + sha3::Sha3_512, solana_derivation_path::DerivationPath, solana_sdk::{ signature::Signature, @@ -23,12 +33,10 @@ use { }, }, std::{ - convert::TryInto, - error, fmt, + error, io::{Read, Write}, }, subtle::ConstantTimeEq, - zeroize::Zeroize, }; /// Byte length of an authenticated encryption nonce component @@ -82,6 +90,25 @@ impl AuthenticatedEncryption { #[derive(Debug, Zeroize, Eq, PartialEq)] pub struct AeKey([u8; AE_KEY_LEN]); +impl AeKey { + /// Generates a random authenticated encryption key. + /// + /// This function is randomized. It internally samples a scalar element using `OsRng`. + pub fn new_rand() -> Self { + AuthenticatedEncryption::keygen() + } + + /// Encrypts an amount under the authenticated encryption key. + pub fn encrypt(&self, amount: u64) -> AeCiphertext { + AuthenticatedEncryption::encrypt(self, amount) + } + + pub fn decrypt(&self, ciphertext: &AeCiphertext) -> Option { + AuthenticatedEncryption::decrypt(self, ciphertext) + } +} + +#[cfg(not(target_arch = "wasm32"))] impl AeKey { /// Deterministically derives an authenticated encryption key from a Solana signer and a public /// seed. @@ -130,24 +157,9 @@ impl AeKey { result.to_vec() } - - /// Generates a random authenticated encryption key. - /// - /// This function is randomized. It internally samples a scalar element using `OsRng`. - pub fn new_rand() -> Self { - AuthenticatedEncryption::keygen() - } - - /// Encrypts an amount under the authenticated encryption key. - pub fn encrypt(&self, amount: u64) -> AeCiphertext { - AuthenticatedEncryption::encrypt(self, amount) - } - - pub fn decrypt(&self, ciphertext: &AeCiphertext) -> Option { - AuthenticatedEncryption::decrypt(self, ciphertext) - } } +#[cfg(not(target_arch = "wasm32"))] impl EncodableKey for AeKey { fn read(reader: &mut R) -> Result> { let bytes: [u8; AE_KEY_LEN] = serde_json::from_reader(reader)?; @@ -162,6 +174,7 @@ impl EncodableKey for AeKey { } } +#[cfg(not(target_arch = "wasm32"))] impl SeedDerivable for AeKey { fn from_seed(seed: &[u8]) -> Result> { const MINIMUM_SEED_LEN: usize = AE_KEY_LEN; diff --git a/zk-sdk/src/encryption/elgamal.rs b/zk-sdk/src/encryption/elgamal.rs index 850db36329c715..9df73d6b199b85 100644 --- a/zk-sdk/src/encryption/elgamal.rs +++ b/zk-sdk/src/encryption/elgamal.rs @@ -14,10 +14,33 @@ //! As the messages are encrypted as scalar elements (a.k.a. in the "exponent"), one must solve the //! discrete log to recover the originally encrypted value. +#[cfg(target_arch = "wasm32")] +use wasm_bindgen::prelude::*; +// Currently, `wasm_bindgen` exports types and functions included in the current crate, but all +// types and functions exported for wasm targets in all of its dependencies +// (https://github.com/rustwasm/wasm-bindgen/issues/3759). We specifically exclude some of the +// dependencies that will cause unnecessary bloat to the wasm binary. +#[cfg(not(target_arch = "wasm32"))] +use { + crate::encryption::discrete_log::DiscreteLog, + sha3::Digest, + solana_derivation_path::DerivationPath, + solana_sdk::{ + signature::Signature, + signer::{ + keypair::generate_seed_from_seed_phrase_and_passphrase, EncodableKey, EncodableKeypair, + SeedDerivable, Signer, SignerError, + }, + }, + std::{ + error, + io::{Read, Write}, + path::Path, + }, +}; use { crate::{ encryption::{ - discrete_log::DiscreteLog, pedersen::{Pedersen, PedersenCommitment, PedersenOpening, G, H}, DECRYPT_HANDLE_LEN, ELGAMAL_CIPHERTEXT_LEN, ELGAMAL_KEYPAIR_LEN, ELGAMAL_PUBKEY_LEN, ELGAMAL_SECRET_KEY_LEN, PEDERSEN_COMMITMENT_LEN, @@ -33,21 +56,8 @@ use { }, rand::rngs::OsRng, serde::{Deserialize, Serialize}, - sha3::{Digest, Sha3_512}, - solana_derivation_path::DerivationPath, - solana_sdk::{ - signature::Signature, - signer::{ - keypair::generate_seed_from_seed_phrase_and_passphrase, EncodableKey, EncodableKeypair, - SeedDerivable, Signer, SignerError, - }, - }, - std::{ - convert::TryInto, - error, fmt, - io::{Read, Write}, - path::Path, - }, + sha3::Sha3_512, + std::{convert::TryInto, fmt}, subtle::{Choice, ConstantTimeEq}, zeroize::Zeroize, }; @@ -116,6 +126,7 @@ impl ElGamal { /// /// The output of this function is of type `DiscreteLog`. To recover, the originally encrypted /// amount, use `DiscreteLog::decode`. + #[cfg(not(target_arch = "wasm32"))] fn decrypt(secret: &ElGamalSecretKey, ciphertext: &ElGamalCiphertext) -> DiscreteLog { DiscreteLog::new( *G, @@ -128,6 +139,7 @@ impl ElGamal { /// /// If the originally encrypted amount is not a positive 32-bit number, then the function /// returns `None`. + #[cfg(not(target_arch = "wasm32"))] fn decrypt_u32(secret: &ElGamalSecretKey, ciphertext: &ElGamalCiphertext) -> Option { let discrete_log_instance = Self::decrypt(secret, ciphertext); discrete_log_instance.decode_u32() @@ -137,6 +149,7 @@ impl ElGamal { /// A (twisted) ElGamal encryption keypair. /// /// The instances of the secret key are zeroized on drop. +#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize, Zeroize)] pub struct ElGamalKeypair { /// The public half of this keypair. @@ -145,6 +158,31 @@ pub struct ElGamalKeypair { secret: ElGamalSecretKey, } +#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] +impl ElGamalKeypair { + /// Generates the public and secret keys for ElGamal encryption. + /// + /// This function is randomized. It internally samples a scalar element using `OsRng`. + pub fn new_rand() -> Self { + ElGamal::keygen() + } + + pub fn pubkey_owned(&self) -> ElGamalPubkey { + self.public + } +} + +impl ElGamalKeypair { + pub fn pubkey(&self) -> &ElGamalPubkey { + &self.public + } + + pub fn secret(&self) -> &ElGamalSecretKey { + &self.secret + } +} + +#[cfg(not(target_arch = "wasm32"))] impl ElGamalKeypair { /// Create an ElGamal keypair from an ElGamal public key and an ElGamal secret key. /// @@ -187,21 +225,6 @@ impl ElGamalKeypair { Ok(Self::new(secret)) } - /// Generates the public and secret keys for ElGamal encryption. - /// - /// This function is randomized. It internally samples a scalar element using `OsRng`. - pub fn new_rand() -> Self { - ElGamal::keygen() - } - - pub fn pubkey(&self) -> &ElGamalPubkey { - &self.public - } - - pub fn secret(&self) -> &ElGamalSecretKey { - &self.secret - } - /// Reads a JSON-encoded keypair from a `Reader` implementor pub fn read_json(reader: &mut R) -> Result> { let bytes: Vec = serde_json::from_reader(reader)?; @@ -232,6 +255,7 @@ impl ElGamalKeypair { } } +#[cfg(not(target_arch = "wasm32"))] impl EncodableKey for ElGamalKeypair { fn read(reader: &mut R) -> Result> { Self::read_json(reader) @@ -276,6 +300,7 @@ impl From<&ElGamalKeypair> for [u8; ELGAMAL_KEYPAIR_LEN] { } } +#[cfg(not(target_arch = "wasm32"))] impl SeedDerivable for ElGamalKeypair { fn from_seed(seed: &[u8]) -> Result> { let secret = ElGamalSecretKey::from_seed(seed)?; @@ -301,6 +326,7 @@ impl SeedDerivable for ElGamalKeypair { } } +#[cfg(not(target_arch = "wasm32"))] impl EncodableKeypair for ElGamalKeypair { type Pubkey = ElGamalPubkey; @@ -310,6 +336,7 @@ impl EncodableKeypair for ElGamalKeypair { } /// Public key for the ElGamal encryption scheme. +#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] #[derive(Clone, Copy, Debug, Default, Deserialize, Eq, PartialEq, Serialize, Zeroize)] pub struct ElGamalPubkey(RistrettoPoint); impl ElGamalPubkey { @@ -348,6 +375,7 @@ impl ElGamalPubkey { } } +#[cfg(not(target_arch = "wasm32"))] impl EncodableKey for ElGamalPubkey { fn read(reader: &mut R) -> Result> { let bytes: Vec = serde_json::from_reader(reader)?; @@ -410,6 +438,38 @@ impl From<&ElGamalPubkey> for [u8; ELGAMAL_PUBKEY_LEN] { #[derive(Clone, Debug, Deserialize, Serialize, Zeroize)] #[zeroize(drop)] pub struct ElGamalSecretKey(Scalar); +impl ElGamalSecretKey { + /// Randomly samples an ElGamal secret key. + /// + /// This function is randomized. It internally samples a scalar element using `OsRng`. + pub fn new_rand() -> Self { + ElGamalSecretKey(Scalar::random(&mut OsRng)) + } + + /// Derive an ElGamal secret key from an entropy seed. + pub fn from_seed(seed: &[u8]) -> Result { + const MINIMUM_SEED_LEN: usize = ELGAMAL_SECRET_KEY_LEN; + const MAXIMUM_SEED_LEN: usize = 65535; + + if seed.len() < MINIMUM_SEED_LEN { + return Err(ElGamalError::SeedLengthTooShort); + } + if seed.len() > MAXIMUM_SEED_LEN { + return Err(ElGamalError::SeedLengthTooLong); + } + Ok(ElGamalSecretKey(Scalar::hash_from_bytes::(seed))) + } + + pub fn get_scalar(&self) -> &Scalar { + &self.0 + } + + pub fn as_bytes(&self) -> &[u8; ELGAMAL_SECRET_KEY_LEN] { + self.0.as_bytes() + } +} + +#[cfg(not(target_arch = "wasm32"))] impl ElGamalSecretKey { /// Deterministically derives an ElGamal secret key from a Solana signer and a public seed. /// @@ -458,31 +518,6 @@ impl ElGamalSecretKey { result.to_vec() } - /// Randomly samples an ElGamal secret key. - /// - /// This function is randomized. It internally samples a scalar element using `OsRng`. - pub fn new_rand() -> Self { - ElGamalSecretKey(Scalar::random(&mut OsRng)) - } - - /// Derive an ElGamal secret key from an entropy seed. - pub fn from_seed(seed: &[u8]) -> Result { - const MINIMUM_SEED_LEN: usize = ELGAMAL_SECRET_KEY_LEN; - const MAXIMUM_SEED_LEN: usize = 65535; - - if seed.len() < MINIMUM_SEED_LEN { - return Err(ElGamalError::SeedLengthTooShort); - } - if seed.len() > MAXIMUM_SEED_LEN { - return Err(ElGamalError::SeedLengthTooLong); - } - Ok(ElGamalSecretKey(Scalar::hash_from_bytes::(seed))) - } - - pub fn get_scalar(&self) -> &Scalar { - &self.0 - } - /// Decrypts a ciphertext using the ElGamal secret key. /// /// The output of this function is of type `DiscreteLog`. To recover, the originally encrypted @@ -495,12 +530,9 @@ impl ElGamalSecretKey { pub fn decrypt_u32(&self, ciphertext: &ElGamalCiphertext) -> Option { ElGamal::decrypt_u32(self, ciphertext) } - - pub fn as_bytes(&self) -> &[u8; ELGAMAL_SECRET_KEY_LEN] { - self.0.as_bytes() - } } +#[cfg(not(target_arch = "wasm32"))] impl EncodableKey for ElGamalSecretKey { fn read(reader: &mut R) -> Result> { let bytes: Vec = serde_json::from_reader(reader)?; @@ -517,6 +549,7 @@ impl EncodableKey for ElGamalSecretKey { } } +#[cfg(not(target_arch = "wasm32"))] impl SeedDerivable for ElGamalSecretKey { fn from_seed(seed: &[u8]) -> Result> { let key = Self::from_seed(seed)?; @@ -633,6 +666,7 @@ impl ElGamalCiphertext { /// /// The output of this function is of type `DiscreteLog`. To recover, the originally encrypted /// amount, use `DiscreteLog::decode`. + #[cfg(not(target_arch = "wasm32"))] pub fn decrypt(&self, secret: &ElGamalSecretKey) -> DiscreteLog { ElGamal::decrypt(secret, self) } @@ -642,6 +676,7 @@ impl ElGamalCiphertext { /// /// If the originally encrypted amount is not a positive 32-bit number, then the function /// returns `None`. + #[cfg(not(target_arch = "wasm32"))] pub fn decrypt_u32(&self, secret: &ElGamalSecretKey) -> Option { ElGamal::decrypt_u32(secret, self) } diff --git a/zk-sdk/src/encryption/grouped_elgamal.rs b/zk-sdk/src/encryption/grouped_elgamal.rs index b786d251973c38..e1eee744f98540 100644 --- a/zk-sdk/src/encryption/grouped_elgamal.rs +++ b/zk-sdk/src/encryption/grouped_elgamal.rs @@ -12,11 +12,12 @@ //! ElGamal ciphertext. //! +#[cfg(not(target_arch = "wasm32"))] +use crate::encryption::{discrete_log::DiscreteLog, elgamal::ElGamalSecretKey}; use { crate::{ encryption::{ - discrete_log::DiscreteLog, - elgamal::{DecryptHandle, ElGamalCiphertext, ElGamalPubkey, ElGamalSecretKey}, + elgamal::{DecryptHandle, ElGamalCiphertext, ElGamalPubkey}, pedersen::{Pedersen, PedersenCommitment, PedersenOpening}, }, RISTRETTO_POINT_LEN, @@ -92,7 +93,10 @@ impl GroupedElGamal { handle: *handle, }) } +} +#[cfg(not(target_arch = "wasm32"))] +impl GroupedElGamal { /// Decrypts a grouped ElGamal ciphertext using an ElGamal secret key pertaining to a /// decryption handle at a specified index. /// @@ -142,32 +146,6 @@ impl GroupedElGamalCiphertext { GroupedElGamal::to_elgamal_ciphertext(self, index) } - /// Decrypts the grouped ElGamal ciphertext using an ElGamal secret key pertaining to a - /// specified index. - /// - /// The output of this function is of type `DiscreteLog`. To recover the originally encrypted - /// amount, use `DiscreteLog::decode`. - pub fn decrypt( - &self, - secret: &ElGamalSecretKey, - index: usize, - ) -> Result { - GroupedElGamal::decrypt(self, secret, index) - } - - /// Decrypts the grouped ElGamal ciphertext to a number that is interpreted as a positive 32-bit - /// number (but still of type `u64`). - /// - /// If the originally encrypted amount is not a positive 32-bit number, then the function - /// returns `None`. - pub fn decrypt_u32( - &self, - secret: &ElGamalSecretKey, - index: usize, - ) -> Result, GroupedElGamalError> { - GroupedElGamal::decrypt_u32(self, secret, index) - } - /// The expected length of a serialized grouped ElGamal ciphertext. /// /// A grouped ElGamal ciphertext consists of a Pedersen commitment and an array of decryption @@ -209,6 +187,35 @@ impl GroupedElGamalCiphertext { } } +#[cfg(not(target_arch = "wasm32"))] +impl GroupedElGamalCiphertext { + /// Decrypts the grouped ElGamal ciphertext using an ElGamal secret key pertaining to a + /// specified index. + /// + /// The output of this function is of type `DiscreteLog`. To recover the originally encrypted + /// amount, use `DiscreteLog::decode`. + pub fn decrypt( + &self, + secret: &ElGamalSecretKey, + index: usize, + ) -> Result { + GroupedElGamal::decrypt(self, secret, index) + } + + /// Decrypts the grouped ElGamal ciphertext to a number that is interpreted as a positive 32-bit + /// number (but still of type `u64`). + /// + /// If the originally encrypted amount is not a positive 32-bit number, then the function + /// returns `None`. + pub fn decrypt_u32( + &self, + secret: &ElGamalSecretKey, + index: usize, + ) -> Result, GroupedElGamalError> { + GroupedElGamal::decrypt_u32(self, secret, index) + } +} + #[cfg(test)] mod tests { use {super::*, crate::encryption::elgamal::ElGamalKeypair}; diff --git a/zk-sdk/src/encryption/mod.rs b/zk-sdk/src/encryption/mod.rs index 8cad6217fc4c68..28a9ae6bf7fded 100644 --- a/zk-sdk/src/encryption/mod.rs +++ b/zk-sdk/src/encryption/mod.rs @@ -17,7 +17,7 @@ use crate::{RISTRETTO_POINT_LEN, SCALAR_LEN}; pub(crate) mod macros; #[cfg(not(target_os = "solana"))] pub mod auth_encryption; -#[cfg(not(target_os = "solana"))] +#[cfg(all(not(target_os = "solana"), not(target_arch = "wasm32")))] pub mod discrete_log; #[cfg(not(target_os = "solana"))] pub mod elgamal; diff --git a/zk-sdk/src/encryption/pod/elgamal.rs b/zk-sdk/src/encryption/pod/elgamal.rs index 6b30f27a127e3a..1dac45378b588c 100644 --- a/zk-sdk/src/encryption/pod/elgamal.rs +++ b/zk-sdk/src/encryption/pod/elgamal.rs @@ -17,6 +17,11 @@ use { bytemuck::Zeroable, std::fmt, }; +#[cfg(target_arch = "wasm32")] +use { + js_sys::{Array, Uint8Array}, + wasm_bindgen::prelude::*, +}; /// Maximum length of a base64 encoded ElGamal public key const ELGAMAL_PUBKEY_MAX_BASE64_LEN: usize = 44; @@ -80,8 +85,77 @@ impl TryFrom for ElGamalCiphertext { /// The `ElGamalPubkey` type as a `Pod`. #[derive(Clone, Copy, Default, bytemuck_derive::Pod, bytemuck_derive::Zeroable, PartialEq, Eq)] #[repr(transparent)] +#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] pub struct PodElGamalPubkey(pub(crate) [u8; ELGAMAL_PUBKEY_LEN]); +#[cfg(target_arch = "wasm32")] +#[allow(non_snake_case)] +#[cfg_attr(target_arch = "wasm32", wasm_bindgen)] +impl PodElGamalPubkey { + /// Create a new `PodElGamalPubkey` object + /// + /// * `value` - optional public key as a base64 encoded string, `Uint8Array`, `[number]` + #[wasm_bindgen(constructor)] + pub fn constructor(value: JsValue) -> Result { + if let Some(base64_str) = value.as_string() { + base64_str + .parse::() + .map_err(|e| e.to_string().into()) + } else if let Some(uint8_array) = value.dyn_ref::() { + bytemuck::try_from_bytes(&uint8_array.to_vec()) + .map_err(|err| JsValue::from(format!("Invalid Uint8Array ElGamalPubkey: {err:?}"))) + .map(|pubkey| *pubkey) + } else if let Some(array) = value.dyn_ref::() { + let mut bytes = vec![]; + let iterator = js_sys::try_iter(&array.values())?.expect("array to be iterable"); + for x in iterator { + let x = x?; + + if let Some(n) = x.as_f64() { + if (0. ..=255.).contains(&n) { + bytes.push(n as u8); + continue; + } + } + return Err(format!("Invalid array argument: {:?}", x).into()); + } + + bytemuck::try_from_bytes(&bytes) + .map_err(|err| JsValue::from(format!("Invalid Array pubkey: {err:?}"))) + .map(|pubkey| *pubkey) + } else if value.is_undefined() { + Ok(PodElGamalPubkey::default()) + } else { + Err("Unsupported argument".into()) + } + } + + /// Return the base64 string representation of the public key + pub fn toString(&self) -> String { + self.to_string() + } + + /// Checks if two `ElGamalPubkey`s are equal + pub fn equals(&self, other: &PodElGamalPubkey) -> bool { + self == other + } + + /// Return the `Uint8Array` representation of the public key + pub fn toBytes(&self) -> Box<[u8]> { + self.0.into() + } + + pub fn compressed(decoded: &ElGamalPubkey) -> PodElGamalPubkey { + (*decoded).into() + } + + pub fn decompressed(&self) -> Result { + (*self) + .try_into() + .map_err(|err| JsValue::from(format!("Invalid ElGamalPubkey: {err:?}"))) + } +} + impl fmt::Debug for PodElGamalPubkey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.0) diff --git a/zk-sdk/src/lib.rs b/zk-sdk/src/lib.rs index 824a4718f14c5c..8d7388475f2f36 100644 --- a/zk-sdk/src/lib.rs +++ b/zk-sdk/src/lib.rs @@ -25,6 +25,7 @@ pub mod pod; mod range_proof; mod sigma_proofs; mod transcript; +#[cfg(not(target_arch = "wasm32"))] pub mod zk_elgamal_proof_program; /// Byte length of a compressed Ristretto point or scalar in Curve255519 From e05f75455e2372f8341c21ed8d21d1f9a5146de3 Mon Sep 17 00:00:00 2001 From: Jon C Date: Tue, 15 Oct 2024 11:52:53 +0200 Subject: [PATCH 502/529] package-metadata: Add macro to define program id from Cargo.toml (#1806) * package-metadata: Add package and macro * Use the macro in a program as a test * Move test to cargo-build-sbf, update version * Add changelog entry * Revert simulation change --- CHANGELOG.md | 13 +++ Cargo.lock | 8 ++ Cargo.toml | 2 + .../tests/crates/package-metadata/Cargo.toml | 2 + .../tests/crates/package-metadata/src/lib.rs | 1 + sdk/package-metadata/Cargo.toml | 17 ++++ sdk/package-metadata/src/lib.rs | 90 +++++++++++++++++++ 7 files changed, 133 insertions(+) create mode 100644 sdk/package-metadata/Cargo.toml create mode 100644 sdk/package-metadata/src/lib.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b70c0927dbe14..916d801e4ae443 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,19 @@ or tools-version = "1.43" ``` The order of precedence for the chosen tools version goes: `--tools-version` argument, package version, workspace version, and finally default version. + * `package-metadata`: specify a program's id in Cargo.toml for easy consumption by downstream users and tools using `solana-package-metadata` (#1806). For example: +```toml +[package.metadata.solana] +program-id = "MyProgram1111111111111111111111111111111111" +``` +Can be consumed in the program crate: +```rust +solana_package_metadata::declare_id_with_package_metadata!("solana.program-id"); +``` +This is equivalent to writing: +```rust +solana_pubkey::declare_id!("MyProgram1111111111111111111111111111111111"); +``` * `agave-validator`: Update PoH speed check to compare against current hash rate from a Bank (#2447) * `solana-test-validator`: Add `--clone-feature-set` flag to mimic features from a target cluster (#2480) * `solana-genesis`: the `--cluster-type` parameter now clones the feature set from the target cluster (#2587) diff --git a/Cargo.lock b/Cargo.lock index a138ada09979c6..a610aaaca948f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7160,6 +7160,14 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-package-metadata" +version = "2.1.0" +dependencies = [ + "solana-package-metadata-macro", + "solana-pubkey", +] + [[package]] name = "solana-package-metadata-macro" version = "2.1.0" diff --git a/Cargo.toml b/Cargo.toml index db33afda423e7f..5ec30da8b2c460 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,6 +117,7 @@ members = [ "sdk/instruction", "sdk/macro", "sdk/msg", + "sdk/package-metadata", "sdk/package-metadata-macro", "sdk/program", "sdk/program-error", @@ -434,6 +435,7 @@ solana-msg = { path = "sdk/msg", version = "=2.1.0" } solana-net-utils = { path = "net-utils", version = "=2.1.0" } solana-nohash-hasher = "0.2.1" solana-notifier = { path = "notifier", version = "=2.1.0" } +solana-package-metadata = { path = "sdk/package-metadata", version = "=2.1.0" } solana-package-metadata-macro = { path = "sdk/package-metadata-macro", version = "=2.1.0" } solana-perf = { path = "perf", version = "=2.1.0" } solana-poh = { path = "poh", version = "=2.1.0" } diff --git a/sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml b/sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml index 4de95889d4bf4d..b4ff4139640939 100644 --- a/sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml +++ b/sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml @@ -11,8 +11,10 @@ publish = false [package.metadata.solana] tools-version = "v1.43" +program-id = "MyProgram1111111111111111111111111111111111" [dependencies] +solana-package-metadata = { path = "../../../../package-metadata", version = "=2.1.0" } solana-program = { path = "../../../../program", version = "=2.1.0" } [lib] diff --git a/sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs b/sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs index a6f2c05b770881..ee364d392441a8 100644 --- a/sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs +++ b/sdk/cargo-build-sbf/tests/crates/package-metadata/src/lib.rs @@ -2,6 +2,7 @@ use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult, pubkey::Pubkey}; +solana_package_metadata::declare_id_with_package_metadata!("solana.program-id"); solana_program::entrypoint!(process_instruction); fn process_instruction( _program_id: &Pubkey, diff --git a/sdk/package-metadata/Cargo.toml b/sdk/package-metadata/Cargo.toml new file mode 100644 index 00000000000000..8f73e50055f7d4 --- /dev/null +++ b/sdk/package-metadata/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "solana-package-metadata" +description = "Solana Package Metadata" +documentation = "https://docs.rs/solana-package-metadata" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-package-metadata-macro = { workspace = true } +solana-pubkey = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/package-metadata/src/lib.rs b/sdk/package-metadata/src/lib.rs new file mode 100644 index 00000000000000..28fdf08cca35ff --- /dev/null +++ b/sdk/package-metadata/src/lib.rs @@ -0,0 +1,90 @@ +/// Macro for accessing data from the `package.metadata` section of the Cargo manifest +/// +/// # Arguments +/// * `key` - A string slice of a dot-separated path to the TOML key of interest +/// +/// # Example +/// Given the following `Cargo.toml`: +/// ```ignore +/// [package] +/// name = "MyApp" +/// version = "0.1.0" +/// +/// [package.metadata] +/// copyright = "Copyright (c) 2024 ACME Inc." +/// ``` +/// +/// You can fetch the copyright with the following: +/// ```ignore +/// use solana_package_metadata::package_metadata; +/// +/// pub fn main() { +/// let copyright = package_metadata!("copyright"); +/// assert_eq!(copyright, "Copyright (c) 2024 ACME Inc."); +/// } +/// ``` +/// +/// ## TOML Support +/// This macro only supports static data: +/// * Strings +/// * Integers +/// * Floating-point numbers +/// * Booleans +/// * Datetimes +/// * Arrays +/// +/// ## Array Example +/// Given the following Cargo manifest: +/// ```ignore +/// [package.metadata.arrays] +/// some_array = [ 1, 2, 3 ] +/// ``` +/// +/// This is legal: +/// ```ignore +/// static ARR: [i64; 3] = package_metadata!("arrays.some_array"); +/// ``` +/// +/// It does *not* currently support accessing TOML array elements directly. +/// TOML tables are not supported. +pub use solana_package_metadata_macro::package_metadata; +/// Re-export solana_pubkey::declare_id for easy usage within the macro +pub use solana_pubkey::declare_id; + +/// Convenience macro for declaring a program id from Cargo.toml package metadata. +/// +/// # Arguments +/// * `key` - A string slice of a dot-separated path to the TOML key of interest +/// +/// # Example +/// Given the following `Cargo.toml`: +/// ```ignore +/// [package] +/// name = "my-solana-program" +/// version = "0.1.0" +/// +/// [package.metadata.solana] +/// program-id = "MyProgram1111111111111111111111111111111111" +/// ``` +/// +/// A program can use the program id declared in its `Cargo.toml` as the program +/// id in code: +/// +/// ```ignore +/// declare_id_with_package_metadata!("solana.program-id"); +/// ``` +/// +/// This program id behaves exactly as if the developer had written: +/// +/// ``` +/// solana_pubkey::declare_id!("MyProgram1111111111111111111111111111111111"); +/// ``` +/// +/// Meaning that it's possible to refer to the program id using `crate::id()`, +/// without needing to specify the program id in multiple places. +#[macro_export] +macro_rules! declare_id_with_package_metadata { + ($key:literal) => { + $crate::declare_id!($crate::package_metadata!($key)); + }; +} From 96955661f72941bb772a0f514fe2a31029cf9c87 Mon Sep 17 00:00:00 2001 From: Jon C Date: Tue, 15 Oct 2024 11:55:39 +0200 Subject: [PATCH 503/529] token-2022: Use mainnet-beta binary in all tools (#3165) #### Problem The binary of token-2022 bundled in program-test and fetch-spl.sh is very out of date with what's currently running on mainnet. #### Summary of changes Update the shared object and version to fetch. --- fetch-spl.sh | 2 +- program-test/src/programs.rs | 2 +- .../src/programs/spl_token_2022-1.0.0.so | Bin 535256 -> 0 bytes .../src/programs/spl_token_2022-5.0.2.so | Bin 0 -> 541936 bytes 4 files changed, 2 insertions(+), 2 deletions(-) delete mode 100755 program-test/src/programs/spl_token_2022-1.0.0.so create mode 100755 program-test/src/programs/spl_token_2022-5.0.2.so diff --git a/fetch-spl.sh b/fetch-spl.sh index 97fb1c50aa52eb..17f3c68a86df78 100755 --- a/fetch-spl.sh +++ b/fetch-spl.sh @@ -45,7 +45,7 @@ fetch_program() { } fetch_program token 3.5.0 TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA BPFLoader2111111111111111111111111111111111 -fetch_program token-2022 1.0.0 TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb BPFLoaderUpgradeab1e11111111111111111111111 +fetch_program token-2022 5.0.2 TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb BPFLoaderUpgradeab1e11111111111111111111111 fetch_program memo 1.0.0 Memo1UhkJRfHyvLMcVucJwxXeuD728EqVDDwQDxFMNo BPFLoader1111111111111111111111111111111111 fetch_program memo 3.0.0 MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr BPFLoader2111111111111111111111111111111111 fetch_program associated-token-account 1.1.2 ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL BPFLoader2111111111111111111111111111111111 diff --git a/program-test/src/programs.rs b/program-test/src/programs.rs index e839b2c090097a..f4d773fbdeffb0 100644 --- a/program-test/src/programs.rs +++ b/program-test/src/programs.rs @@ -21,7 +21,7 @@ static SPL_PROGRAMS: &[(Pubkey, Pubkey, &[u8])] = &[ ( solana_inline_spl::token_2022::ID, solana_sdk::bpf_loader_upgradeable::ID, - include_bytes!("programs/spl_token_2022-1.0.0.so"), + include_bytes!("programs/spl_token_2022-5.0.2.so"), ), ( spl_memo_1_0::ID, diff --git a/program-test/src/programs/spl_token_2022-1.0.0.so b/program-test/src/programs/spl_token_2022-1.0.0.so deleted file mode 100755 index 796fafc4cc13ab3b2e1b00551f2e0c5cb4107fe4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 535256 zcmeFa3t&`Noj-mjFy&!2JUTo^DU(tl&30_4wZ*Kpf!ab*x72nD($;23k(X6MYT-FQBbu*{aMWB*AqRUkhq=@Y%YnF01=M*9W`4sy|oJ)m_wD{@>5{aqr2^gd`=X z>wiya&YW}4`JV6Ve81;)&%NuamtI|yN(B~11^*Ku_4pS@aaOZFSYAI?e+5BvFh3Yg zf5!*oxNMr9qEkhd2MG>PYgN3{!HZEX7^C^&bj9!C9Ot{;XhrHT*h_eoILYOoAiOHu zBwcBj80}q7cUlXM^g0iRT~!EDM0KF{4tcq@zAR8AFbXN>ayAXP$<{H*Epm*kZK zBZ-cU(oUvE80OlfeUzuBs9gc%HcPom2UiO~AMhxoYPp>25CkInIz;AgqcVg-;O>^^ zV+ahf<)pjV+7)V?!gbG+_CeR5heVg=n?3+9)Wq}4@LcJ|^Bm>-&!0!nllowK34H(_ zrVrMS+(ZIG0r21d&LF6kzwl#mer1l7uT0grC^72q{tWSlu_Zjc{J-RULV!Zro}1iO z#`WLwB|I0t8J!jH`3in6{HykS%2#4M!|da%f0b^5KZd&65u)Gl0^b_x5%6Agl<+NS zIYRSsDu;f&)$4ba))>!kekyLKGGFqIFJbnOwClk;PVgWv1hn@h!l}6bg!GRd=~sUH z$F%%OVg<^70p+znh1p)IS31UVm_5+1J<#dfA#3-O1gG*KqY+sDHvU3y-uo2s=W2ca z0*P<0`%C_j%+HJ^Wuf4@6`%cr$EunKf=dWixMYXm*R-4CgG5U+mCua*GgJ#IcXJeM zA~{g;Nbj$mC;64h@pG`QT0WV|s`=t)CZf=yHjblj@tYpwriKfrU)WBIUqwG>bn@vT}O`6&v> zM|_dyZzBFe6^q?Rncr^z5zz~Uzl>^JLC;?c<4VETx5GrQ$>Sg)0hGgry<8vqcdQro zU(&8i`jzWEEbu+Mi+(e^CKbGo;>@U*GaoCeN2WiU&JsK-YZ#4S=4Od2izG&Se^2dI zUMJ~V2$O>OrPaj59Hqer3nz*9wT1`5A4d-=*U8hniV{Dj_0aE8_4Zj8@&m}3)ShWU zWO6q7R?d<7uf0v;88=A7R?eO;BRB*)aQlQ$nb9fHYt>`$74pwN*th-|a!d3m!1L$e zc@nz5#;A7{Cav$VKtS@QT&+3{rcGH%yIcl4Wvr-iuD}NC3?=IUw-||slHn& z4kT))*v#eqa`UUUBjw_Hi~H4^$MBbPeq64lU%3mp+=fIs>6ktqb5X9iTgs(KX(+hW zJ2;+8>~5rA6{!}~PUCFy=b~6+s5qYAa8j z8B%T|M?pkS8%p)%>m=a0Q4q+~9A|mEwp_n*&t$ki;pZmzipf#+!^U@G2ruYYSjS+3 zLXFZfN!pK}OS+(XQ*E5~lmj1$FbX39zn)f@HdPY{G>kZAo%_CX|?pf%ouUdZp~P3 zf3Dgo_*axj2VxMS`I?P0PQxA~@q0bthxQt&J=jT%-wXIb!L8(o?4301)GooFpuEq= zc7l@*p2-zl#|g1}1LFIAeJ*tOw_pFx&#T$5S8+Sv>T5UW3ZCX~+cqxXj6%(g97pq( zJEfd+VxOJb?=UOl$^7&yxq#a&^2zTOI5uCw`U&`F_T1+EVbcM|r}kojAJNkwtdks+VN_u2QMEcgMLASyQ@>NWKq}h!b;dho9AWSlXUl zexa`rOqX$z`eEo7>V@|TUtLM)&v(eWn=2{*r-?pP?i2W~Q_9VJfuwEzT&v?IR~Ej* zPe1W5SQopB3mP8z-Qxe#!Pgj1f8K$4G3rhEEXlKULg*+T=eR)g2;iFze#q(YVZj^x zMSlC#1=3DiBgeJMKj*~n*}V1^8wXN7FnI0|M?m)MnLJ{#^7K7>Wd zkL<@hN%xbV);D;O{MJ5Q`ThGzU})p0f#oNJnfxyA$TEWY-QN&?w<^BF)@!jpzPyg9 zypDYa@_It$bxh^;_l$2k_z|c3_k-_BzV?H3@Gnwd=8JM zdC#<03XY%ZiNqD`?;+%n7;wM&k{YZq<%yF zZ>a0WVVksT`chQ?P^d}sI(FMc?f--i|w%%!J)WeQ}(|JWS&(U>r@ELG` z*XxMRWeT^_LYWi{4}V?v2Ff@8k$gU!b<#5N!M6v4t&?8%XpB$gQbsQ@{c=+!9qpee zA7`rGTwWAIk(8VHY0!TyF(R%n{n-3cB*#{12j%Z4{A<@rI`SuyjQECf&!xO1zmoXd z`d-|=02&VdmkksDjYGiyOyK!)56_DBv(;oTp@%kptF06IO+KqtPfZTs>xwG)KP1(H zijE&`{%vw!O^xKJ%qERt|F=@w_SKL^Xl-~>{0q{g3i(M?fkK)_BnSp?8D+z6ETVWT z#qHVOaXqY`!elI>_%QOfC_G97WSXrT(Yuleq2P)f1-DEhLMhm|2y}j#(s4P}%Q`L= zw5y)O-w+BxI7i^6gXePr!?POQsF9x}^Oa?U7t=xUR;t;)Mb-dqJiLkEgPvs+b2*B+ zT*FgLuU`%$)c)m^FN7pa8u&&{G2f0;+@6j3Gn&%rK*qWK@v7e5BEshw;HcanU&fbW z#+Tx)c&_p}NR5NjZcShQ&+MQ~3YWL3y(miB^v<{AU!+HIeU*#Wuau=8HOX4G%G1yVkfn#lFtLTR^9BYq$=W*X<4+zoH&(ZNmeKC|iZ zLE;NSI7Q$_^SuRe>?a2k4F}XlbP;MmUbJrP`&o4`TFz5ZbR5qi4x4D$M$Ow{x6bxwjT<+^~cKp+!^NoLms~&?=CML@qae6IjDWU z#1a27Xr+KOLd^dm`j6|RVr7}s|KIFC{vT~{pGp7G;>iVadaajsv&0@am5;4o9wfGc z!O0Ka4kurL@2|Qpm3J0E9$yw zPR3tuP7cL_wz>5Z*NdLCo!7wW_BxF_B)`2)%7w)oKMzv^C%jJZvh$1K0)8CLFKzy7 z`kNUg=McgrOW7~GSIGHDw@l7Ox(kpN>b0pyRIZ~ZM@@3(yomw)4aOQ6|MNZbB+|n%g?cXbL&u)&BbesRFJiyiX+y;SUIqYbp^EaKs zkHV-iy#C{qAD2&;d7I6jF)rKPN={V+!AdhHgPDRJoezBCB;;)KliG9Sx$S4H)pE0g@1C4Wk6f4V z0eFC-_4BlRoy7SC%lSv#{!N@VIoY|O%9T=nridXZjG8aw^(Bo=PlmVx_;y}lt-`nc z7|7=VI^1dUv-7~f3jyOD3<2P0FcUAN{{G|wD7cdJr}n8iNux0e;Z!N;Qc^FRD)!W+48921 znLVBQ00+YDk?qfWS4x3M;gGV^uKW`0th4W%VS+~b|>&931?U(=ck+@%)UZejW zNc7*rVXxc*Mmy>k)ymKF%P*AjDED{*?h?P8!mUIYs7NuPvW!8E}8vJ2XYQHd}m*~oDS+4P`E<%aHr_s zsr=t7zE;iuoy&0CF4I?uW50I?KL>xch@QY!#^aDqL+~$)pe?LAO_Br+612Qp|noZKNq;Am713QP*+-^X*pNX zR(b`e+m#<}JC`%w8MT8>*PVPmfiJLkIVuVL`^{JT>wQMg$@zSNU$gO%D=!xO)SgG< z0oDI>@G*Xleu18CdX%QOJ17~rPLXH(-8m%5zL~$^WDriSlYH|JlhwX!d*NiY@1{qS z2eR)!=JtF$j(#^BJH9w!$D4dRK40*)alu^TYa?aZ`eiK=6vFYle}~xC`zrS$CFgWd z#LoFfc23W;q*+_3euvsO>fAi;6!uWR!f9W6-msbBhuN(hyXz+$N4~pJ=K-&fd4Rna z0(rHdUUn~+_w^%3{PFwI?%#=>%8*ZME9IDsq*8xMz*jFVzXWY_Fy-(DVUK z&(riFO}{|X$2d*rg*0E}Rk6 z8w6g5&~qw(^Oh?8t`Ow7A+qlbhvr=puWJ^#+k`)6FJn74mLw|UL=s;WKeSZpSMfuT zH}JsGxfi5|~Xdc$>M7cL|E8mVNMReNE2msNY=@qzi($zb{Y z!)f5lK1tJfK=i9i^c8&Ex)$RAvlliFm@9a7Eav!B{bOzw`U||PPV{bT=K7E$HPuI6 z+lh}Dhi*>T?cKkn`Of|wf@f*F#QHv=ouBsQVf|tr&3%hHze0bbCP@mjxqVj>zZGUb zCvsK47&bgAY4vkq(?=xTrTu8h`y@T&ejyH{Sbibve`|yG!_G^jUp8q!?7C3WOSB*E zJ73Z(v>)!({*+U{(RdEi5jLMK@ltqs(o-;%LYimo1NTSr+rmfn%l*dzI!@>o``~&+ zACi2wdA9BQLw`PdNj3dh!FYvRRNs0PZutqpqw_e&N%(;G#TQkBx0Khl` z49{p>-pla9snk&@xY7YlkJI!aO;6PHVNFlhbc?2I8G!ARf=_i+;vkd>G3jeF#lUkP z>`5DPX?#HCiWFBXw>MF)G(fq(N|gI|Do5j5#tZG9PieG!5tYMujs9`^)o-DF^D(lp zihKl=yNmL}F~Cg9TmI`gUl`?=cbq>nDsS*x7(OkeQb@|n?1`@r|3NOfsQOc&1z+LkYq6sBDGVnrd{=@u=+X*a$-|Rm{e5C)K+<)8<^y{9>WBn@7 zd&1mKN${&Czcs|~bnu@F-~6fe(@wP$>Q9SZ(vRIf)pza3VIx{0dE`hih?j~#Y;Eq_ zkE4D%Af5Lz9{xBP_HQ`jxeLUR4q&Re@7tPd1OUaJX4j{e`IaUpm%{UUuDaDCu&{2Y46dev`zKg)IgoV2c1d@h;5cmBD(SA{l3ub|VqF)A=BbzJ zc>8Kbm-JG`55T+HI)L9_Hxr&=b_3fJe|!b}!zQuQ=#QHTH0UWG>D%AW!8oEU?ZThG zniz%hw`uuD66G-;ab;=Ghx;&h;~4S( zPRD6s_CeuWd7A3eTP58&LDEh4NV-e)Z^`R99qk|JIkd2!J~c9(NqsUqZ1b7^9*3(Io+N; z!fE)wF+^G0&h1QJCY2F7J2(n_KH~h6-#@#lUt9hD_E*NOt}Np)dY@eEP)Xo|K9kEs z^PwNY&q8Xv_~%}cGxU=Fv3+_O$**0^A-4;SX`LhR+<<)R7Cw~4{)P?5CEY1{7B-#W zH0`^o+@#%f@HprDei!;Y9KX9GVIO|*LC^2XvRJ=!g|<&NmxxBswEZaC=L#E^Gaoaf zrN7uZ2l$EdVOGYqet+Qkp=ZaJlbx+xFMLD4cq`f5wrD;m9qV-0j|R6p)F|*xj2CP@ z@*zPl2)8U1z3G(YxJ z{>_-GPuEA3LWU-apX# zjmmS_>o=d_VAg z;!D#0XM)L$5A5aWpJJpc0si7#`UyIqufD&d{yab(~$LNHf6X`VWkqK-BADI-u&t{5Ies@A2 z*?wtb`N`T?=q<_BzD743a%d*5|Ou`@D3In#C~&AUh= z?+;AxTv_|cfcEQsr)|HEChXU32mUzi*CoTWU*D(xu$r_kNB?iq`1fhQK7sXxKSBG| zn6O8K*{_B~`GM@$Y%2H1V84F0>h$c_+j;*__O0S|rGf3&RcyaxUn3qT{h$3B>bc3| z3H$Y>4|w)#8FhRL_C8R*_mp+t#>Ppf+F!n@syz|8hW9_gb~xOl>o|6=$Y$}sm|tKd zoEa()*+usKkeQz=mUc56u9k^$#GLyKUv`5IYz$ilXcuXiyu>c z0gUH7j!`;o9J7kfxgvj6YPR5&dMQ`Tq+ZG6PmF(Ga~vfk0MB{!6aE(SBl6=+Z-$PM zJeTs*!JD{X+fEsux(8-+I&2d9R};MXl=GOzc76ckDaxYv1@!#DKCVV~KwN8=#?{b1W5FSbRKD{ZDZ#ww6$TxL0;VSzYpHA=-GuE`; zyOT*>!+HojUYNiGbftr4`YUk$F2S#hDBth|e-=_Y9nhgr3T->)ziWHsQw)Zyk6wh9}ZqU zj8_Z8H9Oo|2du%*TR7j&l{?LMb7=vFLSYoNAP8nmUC6P&{?tf0q3-{<&6su2yly{7 z0rcVk^&?vc2R}>q((?{^ZtLg%deu9=Ty4Di0 zLI3D|2Q9C7xs5q~UZ}Z5-t(YBu#Uj%Cc+DD62RIza`czzB|n6a4rmZd!F328;S_Ns zt^?kH+L>mTXhLViADs_64TeD&wq%+HItTJ%0J z@Nav7+UpVfTo}{H1)_U%vU{0Fu6&r|uwl2PI}b^^X@{h{4oG^*c8RCt7*04xMqjKB48ea_slN z`Dj=Dg}=XI?LAEGgpPboo=5eqzkq+#1jAeSTH;?>_!OUS5_RixPN$+LQ9e^BppZzgt?w57~z{Jk!CqMDA*j`rCb3Po1In{TaVOUnAi&9J~J?#0P7)n%zG_ zdbpZk=IH-T8W(spNc_|LDD6GU{&xSxR71|8fmQZ?rMp-7=(gqXnC=aJUeG}??%PH= z(>uuNuL!@${b8!|cDIZWtR3q1$C3yGej-0Z!pk+An4-bH+g z>^(+3l(u`){%Gv|QpU^pZ2F5d`j6S?bnp@`7x@wD-;hs#doT05_IGk|=5HT6(Ls!- z5ZQTspL!(hd@f<#b;%iFpO@N1?juvY^1zf-Nl{kRH#1>M&V_vE;}a9useuDelUr|WQp zA5=3lPR7q}ldi*2K0=%BXXAS!C=sp0sXjY>Z>fD2F_OaiSvn4<=Vw#8eU`4n$+95T zmx{67=W0>ETt)Y>F+MfouQN4`Z2z6U|A6u1CBJ~jkN~~CntnoGZTyJ(>EM@4XQt+S zUYE+$T*T{Cm9FSbaP00A|7G8e%A`a={rSmo->09S@LS_NzXd#hmEgk_ zgo(}T@O(>xj)nBx=zu)IcjH&Qe(^b$1L}V&0sk_ueo`)(aWc+I^4s8N#(kCTvDsPR z@zr~Y|Kq2Lez%GKoT}g4UA5nUD)?i86Rw*e@Dl3{*vCLf2M_ZP&=38y`J`*l@pJqB zg+FhH+<Mr7{5Y2w;SE#V&~}XJ$YNV(f7RL_u#H#`XV?R z$1+`@m*@ABI7xia_aBOq27P;nDUa)_%0v6Xpz^@#F4@gaX@3m)vElZQHHrSQ?|pss z8&<;JXNlx(-}QSS#|^t(qIcEg{jVb`Z?Wfv)Y*OHZRhzSIqUtDgWuQle8FGsUARr; zoDM!C`ro@(;__~elk|lR;#ch6OS^Xtdhlm+vG2Q?>*0M8aLDeTj_&1|CXUbiJ>EY| z-pljugMH;O-2PT+JQMxx5c5AB>=gc#c5obK#qUP<21WNV37_o#py<5;d!Iz_4|4h* zmff!u-G6W?iz)ifrOi9-dxCUcU*xk*%2m5R=uPw}x<9Ba>wEb+a%9Ngb~eYA98&aM zHR*nIU(k>Fz96?(@UPYUwOl#~W~YA8@17tOSx-Di$S(w@6yGE8BRDzaQop@YV*BoB zVbqV9ui^L!!p|jQ-(YtE*S=rqO7eW|M5zybd&fNlPw~Qd7NP&WKu1n?AfL)1M!h#v zJ%hJi>pOjq!@hG0Do`G9QN->yj@lt72)&H%0aE*D{v==b3#tV^|2M}bXV)q8*>}al ztnhi}#at|E&&ENxmlb%VgM%XPHYo@E>gcEGAI7)9vtNJBN_z*Tn^%25^7Vh$x8BFp zZ?zMj+V4Na?b5kgX&=Z?Fn)m_$O2!_COA(0aX3xvqpib6>ofNK5&iCky(bV#ae}M; z3B*wdr{y?~?m3xOuW4upo;PY*0EE-#a~krde~j0B&@r}|^Krg(0sT7Fd}syHf%7=~<_2%U&$EWV5@x9`+O!<;Y$-V~o(Lx|yX^XUzm3tptX`7}ONV}!`HGPey z#SVtqrJQanIgZ=2^QD}gyPiwyZjcc6h4vCYmuWiOBj@kJb^YI;2j0GapO5rDp*zx# zF0lvT_wFM2E&Pc2eLYt&JxB*jIPHcYzf@KE$+-OwA-^wFl^+R`f_?W7<8BKx=!cEt z(!po>JY=!_dXbymOVqVP()ylkc$Mg*-7^)>8{Lb< z?fog})yt~tl^9a!6$13{eIy@OlK$DAYUX^iA0~%*o^+J?UM%0C@{sXe>nu6f?aHs_ z{BV}+qqy>In#L>}a=ls8DvxNsE#GZ1e-(}wdz}vC{W`xt4R^e}G%;Sj;;)5n>Xv+e zMDkv?^54eK&7b5yZ6TOhTcOj`rq#+=cTK? zzm4{R_Z`nAJKnvU^}BG^vvj_r{^V)C_vH5J`R+mPO``cd^96pYg?{;d^;GBF(cd;x zdEoz($NSbB?0o1C66FUvANuc9u65mT&$WyEqw_;0CXe??z;9pveeb8y_a|t~guwQ# z1^!jWWwC!1JBa63zJ=xmG}Ta*VZ5{GG zEpji4>pjl+xb7!7_T|m|S;^={-_6Fn0s-{yK9~5>Bm5|gIhV(Yu2b~X#`*bqFX!_4 zyKm&!*1haIx)mMo+q@0qeCqPVKk>tBcXG9$g2gimX!qyaNWXOazl`#53dZg+tyTQY z{@Zu*kVb$#LBEXVD+@KB<_##4-v(<#-}RXEXB@*m9$iTfHK6?;epK)P9HW!Ihc0~F zFMiDI81Ao!J`Cl(hr4J=BG#|T!q4da#-Y7uW_mExdp-(drt`fhnQ=Pq(fexreQMwj zx-VT1xrn2+d6~V3Q5Z8`;gI2=P_6!RxxTEIo4k!bkh}C7`F@;zht1A|qy4=k_jX;n zj>i?M_mj9;;?RyWE#nMpp z-q##W%kVkcFFVzG?U%{kt)^-@`hSzg1^y}y`L!*_*+1C1Mt5;ye}lf~h2Snu>~GBF z>;U)53%{Si8MYr5oil#A`x{E1@i~Kwg(<+VzUaSHc$7wa80q=z?JE+`aS{@8M?bUk zsdnxU@5v!V-?v=J$-wT%_TNYPPd@)A_vTc@4AFUT85c(Ksn_}@pAJc%N(0<=jeTepKmAl4-Ad}w(o%YP`pg)Qm&U7JD+3s zmn0|(@&1kE`~DU4+2M?@b|=PH|F}QK2mK$~)T;9G^@tInKcL_4g*JUe`-c*AqH7LJ zr<_Bwb>7SGwONF-%!_^!Upk=fd9xn7>`bnep`}!XZsRruQDkb z`~iL^M|>`CxO_X1;M;pWzJY!V&!ive;NKXZ@ZqtX4Zgwyrh|Xte5BF;Y`xVzkmGXI z*8PDe=sZd!(7go$_q6XPo~z^E69Uif1=sW4(f*U26SRB5OUJZ4qf7MM!9VQY@R&|n zPmJg^dCU?zO@7tJ>qhTa^CL;`vqy;DkBFRlw=y4Gc{9iU_zw7=Y9C_(#&?|}$9Ny( zMFLm%(Jr6%GEV#cF}aWNCPKyfmn(f*eSR&+#nPi3 zXHwg^U7M%0>34)}T`F29RJy~4K;V@HPS_y&WcLz>4L{fCf=Af!BTdieIBbyfgLZFn z^j8}xgu^Xw&lUZ&~rep%PE_4R7=5Afgb=f{wa*7wwZbWP*-!N2F+it*ucPRH|) zRl@%gM?9|qT)Pj*={-QG|IT^U>c5IB+J0GkQQ)9IU__V>*6H&N!iTKlwQmilt)8vl zNB3LnJ$9k?*KipjKp`40Yd>vWB;$kD%lL8RKcaD)?l-1`Mckh0Su}oB|96n8W5g5h z^9X+VIysk+-}XvwppxV0Y3%a|{g_9<@5K8&uV8@rdj$UA`#e{3|F(Vma7iwbC*a2W zJR*00pXcQrSyf# z4>2Eo|Avu``F~pzNM-y`+!Hb;QY@y|wX)^&8B0lI4H) z;n+S$6MG{@Zcub9gq6uljC}|C(%8U zB9`cU=c&f^=RP*@yvy`B8sD!;%)9LU&Q~SoT`Pr8I9G;#IpXKals+5hW@_Sb{bzm& z%OU~&cQ*Yre_R-Q5$~V6u12v#qsd87uzP51J=Mmy*6%Z;U(#os3wX_Ud*j=F_l{p9 za637ATI1SdiE-_y(LbmF~2sA@Wx$OxDmGpWgvcIuyYYTP6CE|U_w z;QML#SF|_Ob>hbpc+dCnhFvC+(7J`R7w?aJLF|g1PZ{tWl8mPShtL|BdsDPlAqkK8`1kT#_KoJ6n&)TC$_IqNHzDN-{cveLp#p$jP?U; zz7m~7JEqT>e4>++K?Msy6zu&AY^M-Q2>-28-p<|V`#Y{EhQPk_>Wb=r>^mt%@k62A z(^zEiG)|EAlk)?AA06XU^ry^L?4!ryP(VlrwG1da_h8>AX-l+k=N_#6Hm)|T_S0Ma z_Q_aNNa9WR(=mU{FS*{Fq{&7h_~*4gKQo@^6h^&N)?K6d&COii;J|Ms=Qlt4P;6(0 z^1Yk4V}HlvA0(Cz-p%c}{Vj~A-7g%;QSDOnJs;g4i{!YIs}GBh-@MD`<6klWn%8jq z(6<)KiN7CX=OI?|^Dt}YA?*7zb{?WlpD*P&{@%=d$=@RW((M=d40bMJIP<6Zq@Ol_ zGI=>~{&dGJ)%d=h>5lr9_8(UkK7^Cz^Yf@5O={G%_M=Jln&yb!3(N5j*eA%<>_BYS zX%Ptd_Ps$}Cvt<$r}P|}(|MJhI|}Xm%yrt&Qi50O0-aaQPLf|pZmU^pWDFjFut8aAmVwI7zU@`gUR3X zI%!APAJl4l{m;?e!gko!*}^3vN6eQ2-^TBzSN{B}`>vtPufB4!nczUah+z-kMfD6` zJfD(wZC?t@;9QUBq^kYDuSbIb`0r>wwU7!?u=!+i9$PEo4t#k5F6K?m6x;m`kw2Iq zej(E12?BuDrKvQH+a^ftikzgh#L>C52|3PiX1^z>-kW`&(5TO)op6Hmn}Pem->}>V znn&5a_3?Qe(YwLUYIm@H- z4>=z^k(dwO>ER7~NvA=0%0n|DO5ao`#X5`~Q#g!Rz1X&m%~I zDf~X?gID%x|McgB&AHQ?56=ByY=?$2AME)fnGa_B@bOQ7KKLQp8tOj}-_H8`fAc|R zL!bEt4f*(c?7y%1AXZ+_-W#6YeDL3>-v7-9|M$)ZUy>L%VkP2FaX$F= z12J9I=2yx2;JRw%Pca|d=9gbUOr&7zOx5O(uk^}Sn-6aGo>#*+IUl^iFCWbZ>v_Iz z=ZT-r{@|X^R+|sXdP2Bo8pC&b4miAD)}7sm?Gp;Tr+xo~EYC#uPh@K9ST0!SSn%I{ z=Y=#Zj`Y4TcAlIw+{wl1{NMBVhu!~F!Q_ep-a~Js*w))KslVX$uC+yKfC8O^Vg7_o z6GRU4I6FGOh<62Yb3)fPQ5b)J1$018|3(|EPT!}r_b}sqsTPLsO4~UO?S0PZeQ{a$EtY!u zc_B4N=DpE94nkMFf3`;8*nZyt_c){j+SH^F&X@H}(1rcOG@Z^uenh9;6VRGNcHmYH zOy|GQQs>Cgd2Sy%qxXYLN}t}_6`iZH_k-;`rS{)+@Lk3`+)~eU(Raut?%B<8HF@p1 zFQy}t()T|~Qa;j`=-!MS3@6^N+^%WiYb0k;f4c8e`l+2CGCfKM8wIZ+yl?t;5?%lI zg0m3z^uCDlNBv8Q zv-w;k^Jk-^?L7g1ANZL$%=<;&g;DbTU(@r;r;DQ7do38BB=>>qKM?y77uh-G`##}= z>k;~s_hFcP`t9fGeoJyc&+gwaeEfYNLXGEX^C`~f{!8RumiF^?TbKd)T{KCdP|0!R z?c4n)@7q;2NWSee#rIzPsNcO8u&?nvZ8KNQKdA5xPEO$U->+N6_AneDh}^vWIN)+cX|!Lb`@J@oblVvLn6PM@~wZG zTyVY;D!z*U(sN29*m=sCV$Oow@0E^#dI%p7wFs7J{X)%B&R<)U2WW2>@X#rmkKaSX zz{7P3e=657_+X}VyWn>Hp25KQyof8feZnt%ucDE1DmMt6$H)yM{TSt6f&9BQpXlRs zNz&+#$1yJ1CTY+$`4G*gi7tkBo7OXUl+X1a7#JSTrI}t=R>g3~5?p&<(3MZHoY7v* zqlBNZKfpO)3yY_{aRHux@e|db2irf}jOU;Co+Eu1(kd6H?;Y9q67c+&UioV6ePV#; z*4{zyIpDP+T@k=kw&LMY7X61lfpFjM2EqAE2l!qM&VGUqy#|Y1S^7;_BZ?bN zl6LK!akP$t!!;Cqd&>8|U#a=3C&2S9u(LWZ%%o`3k%H^p!SeC#=C40V@H(`<$_e$q zO>~CicXN5eGtBlzY1E+iu{j;;ebAL2frIh;A?Qn4(*$=f(pco90DCm;7B#MvMtj#% zU7xPqD8G@5^YdqYg#0&ujfKi}NPEa}U{q}FGa?m5#0~}4CM*6+jYX|q~ukq4o z_rruEVgf__QhIFOs`F;MFD)APsvP`#M}d)*i{!Q#xrFZvG}d~Vv0@n9b$U+kdVN1t zhWm7%M@;~U;r(08wCy_tcn=Hug7=_+Z(%GEN&)!4nEGxy_-{^!O&gxX54yi!;)bmp z+c@0d+C2k4-;dLGOQL>$H-lsNU#B&xwo>prhP!S%!z-jN=5ZzXO`@RpP`<|%OrP@G zfUW2`o}(Os*+2VEOzkC--*%P6klQo96U#Gt{$hFl0oC(s&zCgx04-G(N;*I9m!hYX zM7z^sP?S1x*D`|t9r~3SouR+O!|PcifS0vj82u@3-~5bkuYbBLiTCllZrS>`T)r^o zqpBBm%wWjBhUG8MX&s2=)7F(@dC~jpoOV6OIktWXKLM4Tow|s>1k~4l<%0ZVI>b%E zzDtht1_-ODgQK3kKaKXUBKFwv(qV>o%M|HQ#nR)PHaoqV2`@9nfM&~u34 zx$Xl3*TZM8mOn|fq5OMLzSPU*=w3LfSK6!T#r^6JWY>N~OEETn3D=4I=^lHopGnc6 zmqM{5;~MB`j?#~54E!IZdkP?T-yM2=Az3Ub{T#w+lD%BM{pXweIwPAZm$>8=$ zn5D(}_MUz1ZqD|~O_Oru2VO|O44+Mk&+OD_zPSQ=dcyOEwUY!++XWK)a`5>AzAhnt z3`B2|PSBqs2YmY^(c^R=@AJ;=hc~E%9z37pisF;hmqLx`Ra@ye^B;DJOf`Q`gPIER z)Ab$=q8|{`{8c)64u0G#&72Xwyo>d%x*qhG^XdS4nhw6rcI%d{M*nLFOVQuI*hPBR zy`AYqzeEYU|Ix+|_Yw>Qv!9U9BRi_wlUqL?+h2M=o8h{y%@TKRkhrvl<4mfB;r6xn z!PXDJXi-Q9H*>t2^fX7m{Bjs~)QTDgkLmMOW2Xiw>YwnlH57AY`eXN@+k1HZ^lqD! z1YN%+{?=}mw2dE<`fc#?`n@YVSAhP6c4umR!+VqB2mZd0K<7K;d_Bq|oeuQAk`8It z+Vk!FNpeiJb@Kc%f|H}4{pEc@@D$0a(ktHqYw;bj7!Gk)c-7kglDC+ri{p@XIC$(NbevKQ&{G$C8raAUYvVH|Wh9Tj#tLYB~ zU;mqS#PqmGpD}t!;1|UDJWzk|^>GL359t1$eD|CC39T=4oJrL)e4oCbe=Nqs@+&L3 z81wIKkJ9&mf;oNa`FcefawXb(oaSGvf;ui|e!<}R{_%Si@)Kj9(+3{vf4B-Be!XR% zCOnCs2nHARzDo1Y+c!gMluz>GxF|93jP>znjOT|5Psj=Rw(es66z#zzxK6QWt+ND< zy_^ zV^j|5aEr{R!=~*_*Gx=WDa@QIvEAb~^8!h)?GQZ-8@6(J(DOo~Cmg?n(`!2}=7Qm# zL!1t`$U4%sR0aX%F9rQ7Z`jF3%C~vPV`dZs>~IoIk5eYW4?_6dE1u7{PR{umMm*1tgSM?OmQ+#~tX z`Wey9bb95FQF+sgN~@Hwyh&of!F^>mPIR5en7?+vnT@X^J+b?t2oD6;CF2Iy`2^Ri zy;AaPm2Te-9U(kim&ni70nz@dkD)&t<@&C4MB2{?oH4Yn5p1RYNj4cx(v*inD$?)# zQbEM#-TCV@P2~}47fOu%5dqQNsb1jucoE@!iME8)sQ}W>lS${|% zfSl%AJxE{meJH!XFVy!eVMoSOxz;+VpALxW6b8J9v?%4kpIbuogEMyz>GH%mkyr1P z{zT&9?+YT1_VH)s7_qQn@OwzVJB!BKy}MbSg|WZydq_pWv98oY82frS)O$#8q9qK! z-eC8T782zLx`%Wrm218K_k0iOZ7epnlh3{p^Pn8dCB6qp43z7YdpupY-0SJnpCdgm zdun#X*4dz^E6Da0^?sJZs0+n!_RQCQ-N0gjvDdUjKH{Q(;3@+An zllOSOhDZS2fcN{qe^nf(-9tg^*S`b8_I|J3Cu-}sVUyf13Od0NTX%Q+Mb0Mo0p&@p!+#}RHa0PtS4%TW}j?NbNpCR7+xr-`MF#R;W zfnS2F%IAJ8`0Bl@`8rw9&F_My65n$idFLg*C+8)$N`C+I636tsgzyLLf#UeQ#LKy2 z{y~LraB`eop_Q>p(&D*M#Kjf9i z^W=X1SH0)e_R*8&-(Rh~h^OkkPG9iLFA%=?_9FRyaM>#le3Se5k9yCm;hQYK+bchk z_wIiClhw|5i=+A9c<=6!#Cc|T9$Ux7y2-B-&+F+q^c{Zjbne}Khj;I;n+~YdU#8|b z_Wjf`w4?UI`iFl$4;6sV)ikFl>i&8m^&A4*IM{Db(9NJk${oL%L%{QZQiP<&MU)ZI*E<8ncmE05fmK>ktcgcRu<=dXg4H;bY zgWkmb(>Hl~T}Y+bp4oYV_`BZ6``8)a{h3?IKeY%Q4@nfvRQe0F2nl>^@0AB_Z4#s2 z3yI!%9qxa`PH8>hwz?DhIy}f0^7z=yxCYWz3G?>AaUSw+eslzVv9EZTE7I*!ws#&KpMG1(I{X)g#HEgeSvgvuJo5} z{5Tr7V?06sy!!{FU&i;*J;D|8+up-*|8<(7e$Q3tMthj?#p^VC7;bzY9XSIE_{J_m zZH}WV>osrC^%_~P4ZZKW0)D(+BhUTyntys7(F>v}U_ZU@dd-oO&B!O_Ax6Lb5|wu) zffKLOOcQ?6+zA2uG0ha)_$=CQv36=?d|~$iNAsAMqc+WV>cr0Hw{7Qi?KvDp-&3i! ze$z}v>^{268Yz!``8xV(dJQ@dT#wkdaO!+chiB@&7kdxJ?kO#bK1TOz>ON3(zh;rq zp>wOpgr6lzLx0(xmo-iN!~6sC*g*0a@SfBTSas5SYriAWzOIW#?RN#4J&>O>v*3Gs78dUl^B9uG?MXmtP>|{P9Mz{A{m0+E1?A zUEw{ihHtX`v;Fc_)(@<|&8IDkcztN1=%=m=VSPMDsM|Rsr{g%M?>k4|15tT~4eDR? ze&(>@3Fg0>DD+4DFwyV*b1cV6z;+I7IO{m~-6D1!EwwAX4@t8@*Wz3;iAlXvAODPKCd z7~gAwj*5P|{Fv|@PcScs9mIP9N4)np@IJ@h#Qf1c68F={e%O1`cjo{ua8>SGK)c|> zYtAM8)_Fl;lqjDGM7ZzNN_O@X{L5A({#AbzTcyI zla=>id^$mo|Gv*r!o%LTkLGs|5^xHj^L*0%qP}+=@7s&xvh$3!0F(!oqLOaAjYtS7Av zs#l%oh~6}*UUi+VdZc=_Z?vRWs9xi4Og!j0vymFadR;Ro?e_+Ju!h1q*FE&L3#ua|Ti34lU` zw~e1lQlGv9r}d6eRrb=M2L%qZO_(Eae$#247b%K6+}u=Xm64^o8u z*DjUxGK#wP*UeQ1t@iQ_ySQ;Y0mHo;@ZxFkUD}|EQd-*ZJ_oj!8eM&>AU%?aR(YB?|N&8_Un; zFLu9?4+r+C(k112JA?S$D-R1E_FlVx{^v2`bAS8vVy@uRV{-q+)@tQn#O2+7mE%rn?;zP#1k=09 zn2tk4FxC-6(KC_siZ$bkqC>!QP8P{*BNxJ%5l67VvX_ zyp3}uHZCld#oo0x?2z_5#g5Q9SV?z@oe7t0mGoY*L*aPQ1J|u~Z_abLo$%6gB)(eq z|3a0Y?Sr@h>_o}k>y?^8al z@giV64!aeNgT4N73(9%zwXj_{62W-|p7$yoxFdRQ`>4L10p@7;qL1~pGmR*}OX`E~ zR<0pYZaL9G`;6SKzy3{C815r7o)jYoT@izPOjQ{9M5BLYX-u3SUzgOhu zdPJ^fkIXLEcULhkr7C^)hfmP^7MQml_520kj)tA?2}GZ}PcZ(jRsHr#p}QKMU%#N5 zoy4qx^v?84?PMnPTFxH|+}BdqFY0%H3S%#2J-6@ro%#8|AGqJQF`MG`3ug#dc}N657Qo1llwu^LqFg2^BT%e2dib= z<@Imww`HY^^pg8uQPSv7o9QR?@bIC&`g(q1zVv$PhbVuWmj8I7Jda0IUwybArE+wh zP5R{=Y1igY_C0nRFXdN?eY0^->-{;lr^_fCPYTia61`f+Z~TM)h`%L+M#1_c=ql2B zW$dSAondhQ_emLFM)SI*jECXbnqAE4Q_Z6=Z$taJ8)V&O8t~Hhv29(`zJr3{D(JKC zO2+<2#$~7SKYwvk?0;+>#MVDDHQ!~uqxUj7wsR47&Nnk!_+|IVVEw$2@W_l#tn=u) zZM4p#_lMcKjE!UMTs`VT)uMGCec!>(N!s`dc5;Wd2Z^QwIbUMyLhyUM-&;oyDLB=y z=)6)<>?HIJ`~$x)1V8lN#kgD!)uVM(l$*=tKrPW*Bl_fu^EvIqJCDjm>qI(lrE}FH z|BWqN-sFgSIm!Y2W2E=1={M4tEg{b6`@7J;)hC4al+Eise@-##Q8(+eZVSvv&$zvK zd?xSNy0*=z&iBo1_@)QX!{Z;S;8D=|xNk>VJU?Hz6@0AN1E|j$HFNaMs@22;BrTh|^ zm%Dw+pF!7~)-nF*$KaOn)%s&P_%OpQ>i1e*r_vSeTk3kjUTtsbG=>*86JZp>M$yOC zob=mP$tJz-6*}xb&g>!4ALTEN&p7S#YZ}QTnva$lJg;}%LHW?L^C=7b97po?>phXM z+q<=Vqn00)DE~PsZ}VGMmU)N||91yzzp0HXQOKmOka(PoW9b~T(4q2nrR|*d`@vmQ zHQ5ge!Nm-R#%of)Q~2T20eu90nJo0Bh51()XXh40E>XW#eMGw?TIS1Nub{f)?3{z_ zQ@OItyIdVNAFr<%{vDDZuiwuTxkc;u7ie1K95&QRdZN?|v(q_kexhB!!gYKmM+J-|qyvlGF0brF>fpK_~o8-e#}PAw-ZraT>?;el7El?3>hIa#+$!)J`>t zzS?`N^uC@v*Y{N8^F(SdN=p?Fk(Vni*0kD}lF$=oMbD%A74^QU=>A2$kIEGpO4R1~7G<{gpM%N)tH|z6bnx3!eqnd8i^bt-&?$CJK$3_~#_L*%z+1K*}utS|v z4)1ZWf7vK$o99CQyLcWW{EPjJ*iWSuMd2Q`QF;lD0p{%PUO&3FX^sFRc<*+ z@00m_xMG^5)s7E!J=v$v?9&gwgK_t7xt(ZyCF~3;I)BKFel`=H89kltlMm-G__R&< z0)AY12mG$+Tg;DHT!7Biayzah`sqx6RiBe^0q@7ubg?A$3aME-pF1vm$?Cd&`3Xrk z=(>LAF-bSwBkgq^mGqL=bDR#u{@ZvlyjScYj13edPr%0+g&NC;B! z?e;HUfpR&{kIRXE`{jN{M72!LL~E;&f|+;N6;)_M&q;od-le zwVsa$^aXOAL`&gL_u&eogf3T7dTrmLEbWI4+XWBhPuR3o(p}PSxMVY@Lp`?<&h5*m za1o1xzV|6H<}0w<;dDu-gU>L&KAxL-T=BTfJG}7=@DH1ga6aU@m_UG@xt*?SFtuzJk>ErU-q`d$BG^rx<@%?1_`uhW!C!2q?d*+JeN0=|I4Vy)toey$M z_ajQW>#dSrqT`Nz_egq03&*Wa+*P5~WzxqLkS5yf*u@QW9{+V_jZ6bgShzxXksyF60kgCB@qmzXS`zYf>W zj!O8=!Tjv+KHAq_4diD}P`Rh?XP1hcevf{3%!WS=KYKmN^Gx{JJdH17KdbM>>VlSA zD|XMfZ|nzmiC)6KO(8#5U@Js?R{xwy<+*@gFX=BCj`~B)6U`5PdG5gW()?iSDg4vL zfIH%r7zrs7!ZOlMS?f`=>CU zyp#BTCj8t|vWKysJ4fb|9V{N2@5%f1oFegF3J zIo0f_oEJC$mZlv@cGc$x`W5^}^S3ZhyH5J~GU0b+zQ&Os61!^r z#XKuVMNt2es`?=N`{36^DwhuG8DRKkfrEV5`*bjy^L@OqPv)<~?jqk=)cYIbd59q9 zdSyM2_P@l=pJE*raDnd&Xss;%E}{9g$GJYOCkdSLVM#Y^mvpDj8=AICx=YptX}wO$ z4Psx@f%rYs7iZtg>r(vpDZD}anVl&sbGagAkg5Dx6A@t{;tXsHFZNIE= zXw4hJ>A3>;I`JEB0|BBy-|alk@d`OVU+p_}Es)n9O?Pm@osgPV)@7lj;ZN9Ms5S|dQPa~SqLp^BDk({bsl)MOt2 zhD$_FnbcCQhyAp|ud#zI0pmMpH0UBWoWwZ{XuN;U?-2M?l=A+1-F&Ur{1Gazc-uK% zr~2l9|K_`Njx8Nr%YZVeDLg*Qq%Pp~eZv#@{lCP1-?`cj@N+wJT%XqGM6Y)W;nBUe z`W~;XLpnV_5@w}8n4EFWh3PW9S2EZji9h(#PVJ@xP%Gz#E>7@o7UkP}P{zkIh9|B4 zAXILX81226>|&aC^##9i3H)fAtO|az03Y)aR*(9N-4DEz6oJ!+mCqy?P7lJzD)?=o zb<8xMY83poB=9?*;2PhCqkoIF-ti9)P5)N^_T)*kLw27F+5=_qr&zJH`{L|-E16N- zD3{Q%=&_gV1f-%d$CQ4s>jXZBVPvTWIqeZ`K#P4QmZsK+_HG4Qe9bC@^Gc{{i z|1&iY^P6$F_l;DO{+E6Nyr_!?`Oz=q^8R~uBzC?J3;i{Jl^}rq(qB=2MG1wT!*96< z0fpqdMC{ty2RTV;v8U#9%r9|^LE8>4PNR!S{G$-GM*PfC zPX3M}=i|O^)N?m*GQz{%p#68rQqi*;NTDg1Jt*D)sUW?VpMyS{rk*UmLJ$CbIeKpL z+C==q6T?5ee>vC7jG0Zjl)r(aNRD=oc$&Zgwxh;xK7J$(RPR~=to}fNldH!;TPZ~+Upq2KHnt+rgqLQ z&8Ir4UkmwM7|(G}8#t6t3!i*_M3;&B8*!KArFu~yJBGCNuhj%Mhw=%0;`c|Tq7NVP z^Wjr6`Yv3Cp~vu-5uV(CuRx8}}eujVR0+DN) zZ_$=|$Z4;WeAGi|udC&FecdxT9tdu9znJ1h>OpTzksTmm0h9b$kF&h3{eJX4DENj; zc8i`o$jwFl5p@5Q;Klv)0GEUQp{ChEWP_QdXSX`Oco19Y9+MepVbqONm?rZ3dz zYcyTY>98%Q`O;oEPwaw?r&{Noqt6BJ_IVSe-gQDxYlkSHjStt)6UMd|p$)MfuU{a3 zZW9#;g!To}&z-Ha*NLB}bH|KdSl5i|G;b2Vw$?4?beL+9c;|ACU3r6+U#jWNoL*n| zQcj!SF?zzLz5Lu=FL>HM+uF!(Nd2Pr^H%+Sr0YB`?KBB~PVY&l_gFdK^ug?YxJUTa zUtY@RCXMeGdAQ!?f>+N{iB)c9$4stf=S=Qq2h+i)xL!rr6hz-2xPaLZoolgkN|}_* zQ~mQkInqCj1M83V9j8KGurmh|?=>$if@%pEM|_)f+Qt(YrxUo7p?#;WHOB?*oUcFs zK)a}iDUHoP3aN4Ie~YCY(-prrU(f02oQZyi)!)x67pT9Ju|tox`|bSp7LbAQ_kZiC zeblGv1>^tEAEN$NVsv>vumREwj=$!BpZ69>5rvG#FKWIQ)2t;39xkdJCJJ)dR(`9@=g7dw~=S)iO ze=ek+IEnN5cK<}CM)tAn{yd-GcM!kR!GDO}?icykxf95>=62$zm3X%?eyLXhU<#SkJdOW?<&S=K_iy_4t4pbb^%tk@WKw_4 zIB=i6$^CJFsioRt=i6Xgfk*9rU1@opxDv$XRAkuv^^q2U$+w|l~~>vG3YF1%mXZ_JL-d(@Jz_a=l}^ge`rC{6M#Nq;bV={i*( zyM!*lA9oS(7KH&GQxbSQkKh;{vE1G*a=TOMQ2QBu_o$Pb=leN^t2b*BdXx6#=H@PC zz+r>*kF|4TTmgQtf19rOJdopZZkO=ax3j+hwL_2>#mW zxrAVh96h&<5IqZ-9#;~*GrH6Ioh!(V_Yal+Xn!%Xv$I%^70;fvP)y`@s zz0<_rbWh+oNr$}$?Z3ASL7|_oJe$T-ozl<2S8yU792NfP_`;Q>-(N1~H2S_(wR=0? z{q9&^PT%LZ_xzK1q5X|BFrL%)>HSM-|6ip2buvzIy+SA4LnA%GIl3cc05W6c!%LTG zzTrvVFJ-=1dE7&YVDAfBKbo=m8rE0qe|A2hR{K-*9>;BqxxQ;{=GdoWv+g&&itJ`v zX*0K5h~6Wu=km~7;N4bo{JcH;1j`Bj<6$6%;m_3y9f zNB%q?{TKTA)H>J=!{=78KY;yeq1csvq2n>fu`B&X(;b?wVYkpU)u>MoL7y7ShvMa}d2i@Bs@~Cpp7}L{)WamZ z{(fKy)9ZSqKiGXHv&p_rm1}Jh7?4&eHsE?oI`}^0ZRZI5du!1_ z57Pe|=>L4~dqRI%+RN9C;{Kc8b{5B#92KH4Q_!IGmv77WpjtRP=>I*aZ|L`+q#v4{ zj_(;hTj1X_QQ#Y#oWQGGDe=r%5*J3ji0wc)e!l3B+CQ9wfGDH)lyqF|-!J?)^#`<% z(Eonn&!41a8lE2@M*lfX<;}mx_Y14Nm1PvZXN>C+T(qBGAPN<=L)L`UKmI<-LMnuE zb-!jili&6v$9CU@FBd?{gi zZLagztdF!#Ezi5ep3uI5$kon|+(->muyc&zI+gp?oE`Z0_b`1OlJC@t1xZE_sin_o;mt^u7PrGhZ-nLKk*rsb}x~ zyK*zbqx+;8;nTGf=|I-o2Am3A$iNr%&Zlvv-4j#nS;PIYFy_0em)h^6_g1yv zNAIm_zmMKq)qbJxt-AfvpTp_mpu-D54e|2@GENAu;UwAjqSzht|7C&W({1>!npBN% zS27^$hi;PUZG+n1NsatGY*4+OqVdOp+E@k>lbJkIIVG_`|u8M9mF zf9d-WEDzX=aaHXWEjmyL_lUl@mCZ=@!Ya~9! zIbO31Zy^@y#z z3gNhw60a5dY&@9`_^_FZ0^i5p-?tx}lUTQSH8BR| zZ`1OhPL#*GhVj{l`zV!b-7-P!!7jne$M12}D@s1<)9JNIG~i=hOZ2thfE^kujytDSM(Vuzi|GxD1>Cqb}vNaUlR z*?MC-`23UnJUK5d>%7#)MT5*s&EJGm=5u@DoJNT+)$!ETIZmVhAwcgxxrX?6Rx_t< zoIN}BAzp{_{es!?yI<$~(NAhU8F#u)9iNy#(EXKgG8gObPiLt=olJ;OpnHJD4(`|S z$IfQX2cGXGJk8Gd`0oS$%wxkb?n+UuFmHtL2`bw5N_)c%SbtpW9!!GU^iFr z>4bxmV_*ms?5 zTxriOpT1+o<>nGS*gvxGjMzGgwF~WhIc2gjiyMG z)Zh!^WQs2r2sOE0Nk;r9!&q@RwE>zUxdpx?qHS~U%)ro^(PqhRxkKd^?V3w%ZPZ#} zwT0!j^s+9Q%K!KMy`1yRnLEsd0cN-5d|;mYJLmU2&+qv?zqjY*Jm*x}kKR()!TvXu z5It)te4F9c^e;od+;bgit@Lj}%7q^N5_+WP%%k6Wt|Q5MO>pb7Uin}b563zZ*0tRE zZqu8#0Q}y~?eaI_$lY*#r5CLXQBP8!sp_8 zN%dTFw7=;xnV*FQn7xSR$J&;?}82S&LN3-=7Jck_EcP^7&g8hR_*Xd3@R!-6z z%5;T%GCGxRi!(s)x%;DcB^6GW(V6r%YI`MKMgEt`2v~oc-@yIVq-Xd{gaLX))EnbZCHR;Lu_by)`DPdes@*Dp?*9#k> zaktr1vy&L7$MK(b@!Of49{Zch{{B#GJk|X15_DWB<%5p+cf9`5rDKNRxpc?zH#+zz zFa8~`ZO-#je2C*e=Hg$f^}j{aDU#BAzoLgL=kpEp`{yAKcgu6^7eJ3@){?^0g>?d8 z+VkcdWuOcBHc~!&55(+AYS?6cK9~Mt6*pDlebhaqH^_&pTdE1yRl4tx`*=SC7wS2$ zgX$qX=%c+?g8BeYqsu7~z~HfA=TvXdb@5wkL|%$gFYSlj zgPOn>ur@VR8tR`R`x~tvTJ9HmCer#7IA`Z+TYpfz!{r|Icfhrs4@or?FOA_}urm++ zNu31>a$>vqR7FY6&(uOI6l$K9g^=zp$%#Vsxe|o46hJ-5Zp*oyO0>2-z>- zzlUxBU-cuEfEH0bjyMPm8~=`*zft-jm5}+zfzZ1A0x>K@2iZ5 z`)FUdZ}ZL?!Xmz}lDO8@NAMebdx@G(`%XB&y=P17vI=jL`onW-C9b3Wx%zwur*>X= zHZeu#_2qt`>x^`6so>kb(1Mg3jFR^Fg{}IW$)$d2;~#y0|69({4BKnv=4&`#srN&y zy}uwEcj(${=a@@-t^cC$eQUdIf1|Zq?N``twYy30RO&&ZZ__;Hhre+)r}2KXc-r1~ zhFwKQ^k)UBcdosj^F{jxbsp>1DN?TX@AQ)~Gedf~loS5GXZ(BL4?hy=m9O6mr14-$ zx={Z+$oAw^o}%*;e!>;_MUks$pN{gamNL>6zP}vq(@{C8s}j0v8IABnUV8DRD97oVMzB ze1kP&|0k1as;Ba?FQAR+IA7slx$VyxspaUt_X3l}@harhwKMRW;e5I7C$+fA?~wjP zJI7sIitpRFV0%y658t;}zK8GIi=0O9+pFHM6S@wB-`DlvxAd!DFurNM1vxJ=?C-)` zlpkhqgs-LQ<@f$q30>~E{&SD__SY$o2K#hbyL)}-+}f|}cVYW={m$M$3Qnlua$s*@ zNs}H0U3B?vvzTtbvr*E}Zc4x8I_$d!HS&C;+z%?m9tT$oe*!%pIe#H3GF|y|I0?Te zYU3EQ>oh;Y<(b~vybJh2|2(`;O5)&UChd`Wi$Y(fmL?@}f9c`T0B*rXD+0mi7sFBXYTkM!!|F|d%w{K*k1%qq7Sea zD9`p8S-p_&3rMak4n)1UVE4|YbT0J^;^}PSJNp`OpJ1fO_h!nsnD~Lq_MKRq3p>$B z_fZ~Yp?yR6D=?>2KB*slPfz+SOK~zT`);Dy`^*d)T+o%d! z;ko)PDxY@lzMj((q}7h={A7@>7kMl&Ic&#RPm2l@pUwY%e1f*fQ@+6S6Mpo{jmaQTpj8o0v;m-KGp6zvCw(PMF{#s8r_Q+Z5!x5~Vj$)$bA-sCo|b}T#(W;fRp%DwvE z{lx?DqmYApSpGy_vx#k74){1OHeP5Y`hgGqcliv?Si2%VH|@9GNpGgqL-H7(pV_oN zswbSsaN@Bz{+*@pFXZmv_$@w<@~DeHL-5RRl$xjcfQ#>x8=p^E>*AFtH;#Xsi*NHT zla(H|TW1Wb=h51!|kKFfLrtuH^ zo`>I3udwP{i~1RTFUD`F(dWCk9Cw`n{bln5pes#$Owwr-$YA{dd0QK6_oYs|jX&M@ zb2ZGTIR1DS-~8Qpq0=8%!x`gp1bViU?TqASsK}3=vjTgCE(YEhqPvy+Fs{gs%et*! z6u!{+9YpT*e21W6C&RgppUeGD>9=6&Xf7wPb1>(qJ)f!eROz>Q8hg*lpG<_%Wq#*S zoU<@G#`n+N7ug-O6Y?9Jto63mNc~HNewUv$gao8>|1&n8qJ zx`cl40~gx)Z#R=XNVk=c2W?-SwqN~w&>-#4bxaUCI;Grl<-z(r^pn~zCMQADZfV!~ zLT6AXcF?}llNxp@Kd^R$-&e>Be&%FpVA2x&T-_obeEt%d zJRWB-9>ue`?7PgEzlJ-ImJ`dF*B#mm~ipB%h(b()y~=IdmD_nJG~__&$N$FGJ*emqxF>UkAA%k}1yF z!*UktmBNe9fs|1Bif7-uw)1SV@>)xeeJAyVpVaRlhUehisrGR^HH0pAz7J3GVZ1q< zpSy7wB37d>io=Ai@c#XxxS*5%h4ZDRN9dROSiii_?U!1vw#WQf9RDf@zkj_HK?Oay z`qs-C{Cx2B61_(Hq~{=ImR>1zP2j|f^zy6&)XOPcz=7!Hb&g(|T)X=C1}bqN`Z(3; z$AReM0JX(t+sXYNsCuqL2PjeLST8 zG3)kA8Txp(gWtbC9-VI`wK=Twtys4kh+d|g{v3#24jqtQ9$lA=k0XEjfD^xzp_hN} z;2+h?a{c7t+b@6}U<1+1qSK!P(aQ%G3?zO%s+V!Syxr}WGW7Bt4*pTS9DKbTH9)<5 zlhdCA(aV~ndU;6W*jH%}sjX|+{wCY^TWa6WP6z*}UJkxqo;X0g{Ib)Z1JTRR+<4Sq z9?JQid)={;Ok{#fO>hE)1L#;%Z8(Rc}V@`D7RnA(97S^9%S>c{p;n? zeFLO6H1Bih*C~%5pgw-z>BoWS*KKl z)W?NRKMq76=N#3?L+T&Tar>nVeLT*=?_VE}u8W9&Jbd%7Ck;?9f44S@cLt)DKf7Td ze)8!2YurzEyZuszUOwvJAJxlp{p8`(%aH@r%MUyKIS{>Ea#Sx5slUA3?Uyq2a*Bh0 zR4)f#FYzYrK-Mddar$#0dimP*NB!lYoPYg6sdIamBKo4U&1myT+`oRo!9S{(gRhq- zP(KYsFCTIGb0B)T=BQpCQh)g#w_nQemlrztNA+^>^%5rp4MZ;|IsG{hy*&A-ULI1t z{QbwAaeoGb13^s)20fyA#z*G1y-=t{R=%FxFf z9Q>ee7oVpRq_@erDsrq4eOFNYN3cQ8)xx<|c>fRQt{@H4J0xG7oQGuR_t?2_;W<7U zNBiwN89#G2O~TWaIT5GR;JhNK$8VSVgYh+bU&;;ids2Q;=m<)ji-ZjJJt{hHlk<0< z%n6@2&2-bt#6&uuhlMoLg1-VIbQp3K>{PqjV_fc=+`+h(e zlGfjL?v{No;ufkFHTy-82RlDnMarsWs`O%=$ z&Tn$h9ou|`=8=dmYk)L5M{z1skXa8$(>TAo?Z1BJlwXegbi4QmAwOSt>`yi^hW&BxxF^F7H?#h1PVGBH7Oz|UZ|AP# z{P7-fk7VO`?JMs(ba4;#7jaH>?)vdU-+tncryPFSxx#*@jK}Oe+gzK(OW}FIR1f{5 z_(I();gz+i4tHZCuEgvQqH>`S<`@L_ukT~7+tW+F(y+e07axu-t z*ErrCFYls>0F1A)^xa__A7vBr9bTi`&aKN<9OKoH1lai7zVjBzwZ!iO9f!pA>tM!v z@1XD3N9{TM_HO(j%kwEtKOb&+E@kgv_xhIW{_S1Dp}!ot{?SLH@#VqD$Cb`}K;QQ6 z(D}ok%o|*P_&tYTgCQTV3w7zb@2~EKK{Ba`*9|_|F9~O5Uig;q@th?US*ny25-Jo0lc{(j==ul0yO z`;Mm(H~nvjr!OvRe0b>MrN5;`lS8#`0()|x^9P@G`gt(qdZ6+2!w&uB*u7R4{~+w% zu)Rmt?!9l2JNM5uSfMdywU0vcs>zkPoa} z4@5qy9s0|Wk6+*G_|g6N#o4dAcA{^;IGGIK{|3L<`A4@shku^wU*`<2Uwqc-=fRNc z83UB-Cms6Bk?XZC{z2Hi;br;7LpM&me~{&4wZpH&Egy$+oY>;fU5;G5)5SlZgp#iA zDVnPNOAQ<4<;jkL&Q5xiX~Emdgje8%_p8mucOrG43-&t!5A*R~oJx2_#iK>GYY8vc zIh)rHqVKC#^ZHw^b~GdUy06fmAoKA4ILRN>jppxg2Te7SHca6(pRVEZ^67eplimc* z0D7rP+OI={OS&w6wt4xax1R3@YrNO+gT4bR>HN{0c5mj#{Jr}sw2$7I$A$J?Oexy; z(xb5Nk8%A$x{d4i3$hz^9EYSaQXH$)sK2F(t3YH z^}6`a0AI&@E`2)*FIZE>?a5ZjdLzCoI-JVM6@*{;^lpAm-b!oueuy@FwvZxd;J>+%_Vclo>pe0Jnu2k=!6TzNNn_;jpa z?gM^_e(5B<)UaW!Cn@@n?ndUz7;Z55M*qA(zcUONR}g-WUhwRD*7y!DdcbW5>UH_{ zF!N3L6^x>nFzE6N!mnV|%?b;@f>DUd@m%;7jJktix4sY29k)L4(dFB{RNqswt<@V% z{k!+ax;dBfCB0fM&+ihwYgq|v;dv8jKO^)t=?QM@s7k&c*W0*UCHZT)wDy;HWgqgp zboR%WJB~14-b3|Kw|nzRpCR|?cfTO|**fZj++M#RcF61&eRrDWHn06}`wN4+89m(3 z*xjf7&V=^+YRZync_U|Z^`Hy(ZIzUVegq!uTlze*6Pvr(UZsYg!Tl4gSsk-utNFQK z5IYv6*DCys+PU?d<_gbpnoqasbJ?dHq~|f*qWhy;(+d=T3#X*VTwktWc65c>%^&b{ zw;ku89Ytx!FcO9zs@0A-y5%$#G&k8xT{)*&R{9Dpn&iVX;`GG2}*Z4P5;hy{p(xa6g<717&!lztO;C%SK zZ@~{5_AJ8@aW;j3TT&63SDsfyebArHg#Lg$Z1p;qa#Q5DlgX|xl zAU@{`!XNX;enI$Wbd$YddZX{H3;(0?eI}=FKi?no7wC7$t@ZOC zH$bnXoFFaszoj7Mw5GR7`|W$}X=y+0Q`h^-r}Pemm5=G24Ce~U=kzXyQ^UpYp`WwF z62EIT-_NWg<)I7ruR%&eK|fiqo|sN{3PM9ycDV4l6yDNBf=|uEJ^(H^KT7(E-X~x< z7$yDX7lhtml$1yN*!8*epI;C@1f!;Ly)6adL+hwUhF!aPA>DEH2>f>W`z_*cU-q5) znd@yGHG|6~IZ(Pq{>iT>4C{=3TBI-;9J=E6&A#7l@htou?33$P!C%;`i%GwO5ev9H z^V8rv^Pl9y_`jU^?-v9wm5`1A|F1>;qw48z)NjG4DIrWnkX}nWfbZ(L?W2nP*(iRV zP3+x=djBW&-$T@YaX*ywwu`=LT$l7-5WN#|@w(4oHhA4OE~tu)3sy>hx_Zjv0w?~l zalxBOHC*``oN>XjSieg9lHQa2BbYCKIY{q)6*qzn60f-NU|-{cX0|_%G5>;t9T#j8 zf8g@7zj48Qs@KKuYg~{eykPkrZdZ2LV2ule|IxVMBg}V;hg?3F93PYzk2Nsg+gSgP z#Q5M$@Ou-J&HBXSgBF2Z`*bngf_<74>mME;EERm%SHr6%yj0b29v}FdL=OGUB7dnW z8!wFGjIcwnFMW>}1kbha=MuiFfAR6c@yy3X9Crp`Tx#QmySQGm>-zjYg>Ag>Aj58b zpN!QPA1@4{`p^zsqsgec;|1&=bYZ;zhb!buhwvrooz3_0o*V2;i^|7=k1Iqjlir)T zyi((e{`mLeBkb3X82|o_>Kje`+vB`nL~+#~G$53Zw8yY(A4Xz)D#I_1PZ(eQWVdu9 z%k6({lkp11Exz)*`yytKH;nLQ93$ftzbNAuzf>zv0Jk>1~8dy}7|djEd$v%%fs zUxTSNjAwFe?^S_*kZAifgPDvLjeDjE+&x*-&_Bh-JuCYd_c;28xB_|_P}fJ}o+&c! zSuSx*6t}Nt`+oSwJrWm1;~t5tS~eZ{xaVP(N1^wi$34p>Udj%uVMe205IH5i<3d>C zA?a5abB=t zp2V5=GW!3>anL;O_dyy56-8e%>l%fhBo7>ilaSC)*0Wd9F}vS!)jz)#{MPmr1wU0G9SlC9$IQN4Kij+}VAu=OGr+Dr zNqTFU?v~@lVNFa->SKI0_;T^$`u+Q3=0gd8yaMwYjz4BSAf^Dn`wsd;7y4re`D0(@ z2=j>W%iTbxEAq#yxxf743Qm*W=a_#subT9}&2V?0{_~Wc^M)%==JMV6f#(fN_|FH< z!Face%a6_nIEnLv;9@wdeX_WrlU1-<2F z0@oYAAD1_qN4EK>V7d4ucN}sT_*~@TJj^dX4p`$%#P6sH{j-_+pXOtP?(}Rtf}{=P&UqqMb7g&ORJD=^B;HBvOAzYxB_0^g8LB781R?_<{*F*F8((j?Yx`^!+^Z{4W z+sn_5uL8ALpZV!NSO9yTN{rz9bBX@3^vmWqZ$E)=dvl4u^XI%4;%D`I3|IbAglh7a z^i;naw4ZWP4(9vLrJt_eLGDf7yXOfW`mcfYZZ@|o2>rY0TkEI=jJQAl4*f&9lL!;# z)l+%k3$E5tLJ-YQaQS(iCk#HJzUl|T`9o;W35V%g{ z!*3V<1q~uceuwh6N%&y?ynlPVj0p*XR`zC+p|C8Q$#t+idBwkzIDDlT; zz8$U07Zs+$xZRFDo<)z)?*D!m_V~!p$L*$i-G12y|9-#F3pq3Tenxn`?X~d5)t8}^ z)%3;ebuarY=F=NZCZ&{pPF0;N>(1dk5*Y@%z<-Fk^{2_b$#0TJd$1qc4|i}jZy41F z`D&RVa5Ue7-i6)>LH2%fDnSjWt9O6@9n0PR>><-v!XD0w#Z6p3RRetV|BwEj<}ZjJ zZ2vkWF7?}Iu{`x{|Kxm{W{ry{CSHu$zc-*7Z!T3bmeT(1AE<%d7k;??+S3j`BKp@* zFY=z_Y`o8=o_@M^5b|Vl)D2;x3+;V`<-JYxCY-$^9x%Pw)wlQ3O~rtT{{_{ zzl-ma_$bw9^3po$F)d&8*7Q5HlkczL=fT}(C#9VxXDA2q6xm6Ux1`s`d^wo+B)9(N z$nJ0aJMx)xsSStXyd=|;{hb@I)0y`}|8Jo#MoR9C=Iiz{-)KHgQaxAcaGo15nfmZR z_U&$q(ZAlI-{MiTo3M9r{5xFyx3GA5@%cd44)21WuM|A+Y20s*tbDwGkmchxhhKvs zAC1KNfyl?T4*liGN4<-G5b}{Y!tV{8GsyBW#o^ar$j6KU%Ew5D{&M8wr8^^gc`*K^ z>Gwz0e*9+k;QE)JJNz09`B*tX`FPf$za07Ka`6wse!N~*{Bb6=ipFz?;63Y453+o; zIs7`@@`3n>{&@%Uo^?EqxYwb(9JyHN;va-uwEgFiwHvn#vRvHY@au5P#i4l5IxZiV zIP{kzAGI$2LCD7g|8ZpHWBee?$0-iK217oo$c_voE=@S}mm?oPS~ak^t9YBWce8G@M|#S z1M8dv@h`t!>BJNC8`u8kO~2*hAB21ye}v`ZpRX9)_~RcOehr3vG*W*KL_R*_&|i-I zxYxx$2>EFL)seLyA01@*Sn2ROKOk8d8~@$Ha7mXANKi2V0p$j3nK#|sYq<;cg=F8(x+ zul=ptZqs$!1Yu&`_8Hg@{a#siNEN?n5G>y+>)g7}bo%IODVJh9Tv$I|@9eijIUif$ z@_nbSm+z2t({@gCZTfyf{9Jwg-cR1n)t}1MMDM3)z4pBU?AM@w-cz(+@?uIeXZ)Ac zqu=ey)Y2wI++X!JPVK$Rv8OOd`{e(V&pGfnOFOIee)BqcJ~8oY-lu|ka6Lu;u|q7I z*e&(w_unvYemvb@tobwN2;o+4=FSFsO7SD@?Z8z%S<>#aB^_HUX(~~{`?yda`X%Z8 z8`p#K8wm~?wlEAnKlD!Q!xDHg?Z?5@eUhZuN7P6^t(?pZlqN8z6|$efzGK>bAwTvq zwent1X4P4I!}wwR#`hf>=}FQ%hM&9VjzFGFPC?%TM4#;sb zM{oMcs^?3ke9#@&^W#@UeCTCgyF@>7n}06-7}xW=rJwVkAVH-oAJ=oO*W@nLbHc?A zyG)Jgx$MKq)QWy(mQF`;UUv;A-dJicuB@ke-X`s=*89z(7PKE%^xWEk`miHyaXn}E zG8c+L^}8g0<_>xPrFx1yxA*tPju$vPWE|^p&?I&b`;<|hoyUUM5EuI6bkY;lhYNNL z<5t`6l=XJ<`|h|8QcVw#FGcwl3me1z@!cn)6nSp}`1lSTF68Ib^nSS9?^Zd_)Jj9b z?^!ObV|@G0i}eHaA32krR1A7>wQ9Y+^jW_rm-NKmn>{l9wf7CIU6Fpv_fSje>+2tm zD8$CEX%(Qf}r>npC5! z`%+F0%8z3|pY4l5e?4|N!5VLr^5eiay(!m^`C+P5>InN=e2#urzgQGU{^VLUsB zBv}4i>DT2iYWjkvKj75$FMlNYLpvTqI|{;=VN~W@C?3H5`(yWgz5nsp{lC@wtLc90 za*Y?53Czm%HEeQjBYRl`ru} zkY2wJ_!gh}T@v@i%Ma>wKeUz)x}VxZ`+e=bvklV!*~F-*Jt zyktWc^aGwb>D|t-)o=Cq9ZGkT(4Fg$a+6+O@g<(J@1a?{f;z*0p6SXJr5wZqh~Jam zb&|iho}Xu?Y|`?CZ%w|5B-3Gs2Arss!;f6PtQtSh_ zaZhF?DK}k754-;9zP{gRYgZg=nqukw<#Zz66oSSB458#n-LnO*ULk4sRrEa~z)Vtli4@MSIhJTu|_oQ{2vlc?Wk2t56&)A)z=Q;^=xe877hdv4IdEnx&{ z(KFn4-^U5kdl(=2anU(L4BPj;|0D<8u73vEy4 zY%NFi)#%F7`%$>MPmui6#|!o*RENjpO--->&-c;ADoYRe#2+p4fXnuK)Y> zC9qH3+@GdjHg2@>Vrt0m`FYZNiBqEw{HTfX%rA7 z{tUq{h4&^GPrA29xhrCHU+Us(x$b&u9Dlrve_=$&qFA{XxOgSXjpL7Y@n=c7;d!5; zHsQH!uHD#0hQZ6wcV6bi z9_39xc?@STxk7)$=VN~IAxBS&Vh@A59U^z_Y7ZNB3aoKPu2bh{row8Gy_-aY&=tlN zqZs7487dvVw-|p9%I16Y`>DYkDJQUbp4fb@#o^UjzMVJlkW@!@cbn3weANEOIRQ|$ z(2q?Z#wv_7SR?it^Bu6`p&z$?B^!^+#y=>B{?T~zMX68umZ?35@iI%r;PoDF{!HhM zg-#n+S==%)@smF0jU|4vIM&}R{Q`cRNHs3TAG$L4pUJ7;CFN)QdWOwznEe}D%eQGh zI?Ztt+5`UBxeyZ*&k+~k$F@lOHY-2X4nXdnZF1(3+6Axr0V&t~4*UT)9ol#G50GE* z*)Iy;vl=&aV-kxlU)K{dGfrnXbH_MN!+E8X7!1!D20fRsy_ESM>*u5=?{P!!OF7&FlWfD+Q4ad z4JWy^9}Ag9_OQSfl}`LU#9jRE-$lc z4X4#mGP+#9hw?CfNBK$bt1J(o4_Bc0N$)|Czy0(p^eR&$=MC6*p) z`gxo$#r;tBIt@*Z(v?|tC8zuASCpId?&;C5tzo~utKWWw<;~Q5nDL^1t>t<%t0r*T zyI=i&`t^O%uNsfSuN+GyEpFuEyv#XLZstx2*le9V(ogxm3D(n)FI-O}8;lEj_2hp0 zYhe$50RM5qw|V!ZH&vc%zjTYbQ$6qQaX;x@&oKN!Jihq!2OWPP>2_cM>U?bD?!~&dG$^I89ZeH_09iIfkF7h9voXk(TkmCfiE;QDA>lL*BqJ@Bmx? z!#_jb1O4uejq8J2>R`shQ(9Lzn+Tu_%CUi-)51=tys+j@@F3p~UXN$(Zz@A!V2 z)R5D|^MkG?yv*6B(O+4AeS+eO)DVfcU{|yJTmoLe*AQ%ULk_=3ePr{T*(#;81l}Xp zJMyJ+kMSkyxBf*tP(I52=*Q{)a02MEc?HzV^%EXtWxTIY{i)$1U!d<=y6>J}1AO;f z5$2zc-;94PE!tmnE)DpYviL0_C)U_GUC}u<5^teC(9wFb@|^(TyUdHk`EK)5!F(A% z(RUX_KPeylnau~LhKPdPqV4QnMi1$#o+c^yfD84D9~4H7x#&2)-<{&btG+~1#EZad zUMsMVPNl0^(;JdP&zO6JG4@@D?(u>LI>0xapM$Dl|8OO}-!lC+?|#EMNxtvKeY9fzIgc-KUC{DEO?NM^1Vs$v8`x3~uD z8ld0!Z}R~u?*fU>y?PHChvyel8uE=Yfp0gQgWOdA96*96{Ve;mx*>FQb)yivs_P|1 zd7#u6P``Ra@k|6}R@Li+9vEX6`PeGE-nNLVb7-hXgAQ{EUZ5BX#q zA?;sCr5w-r|Gp0rkfN^v;d#pY#Xj*jGGDTgyw6E||EK#-;Pi!kfcH0U78b9;Ya-1ZPrI8`=T7unwN|zOVGYfu7;{clSZE@O&1fN$+36ew)CsoI{D{A7S{P z!{3x%*gAeGT6mbdTrIh#eeV`tA^pF0_evXv)wSCYS;6YN7 z-k-wqp<*`gY3I`WMbXn#g~&lDPbweaOFh}4hbS*D`#wUJ`~fcLDdfe*d4M|!w)2f_ z9oy9#%)6o74^SG-*Nv2MkLGgQC!Mp1zXEegLPW(x^GfJ^x=PWtkg_7a z#`oT={D@BRV-{ESHBXo85P9=Eg>OM@p3d|rT!)C+vE{fI~-$<~1E$4Zn+ zcIX6pK$rPLlz%bG@1tC+w^aEYzY3ems^|QJsAutmOm1YqwV#rn=(p+1Q-n61{--L& zaR%3JbN`7Ng?^`5*ywdb$?2L)=~zlrL#w2ps72_B6j=kHLO;;|n1|*5t`M7M{RjJi zdMsWtJu<&<^GJq&BZ3OLY#nJ6Wrlv)`oxW@UvsHGrk`-%O#ibLLhvRc4)`}-gi?rp z>Q=*JKG?X5o*|#{srxcuQ@!VFIz>~}AJ={?qw*miPt$lkr{7`9CThff6jrdkv+p9s z^I1HG{LeV~F@6!hcPhvwf4f@C+48!;Qm%iZ%DeI*UOvjbmERka{Mp24?jK*z3l7q< zKg%z+N%?i(mGaxy3*7Lmz#VG^ZhD4ci~DRn!1n3cJblu;1!Q`DQR%MRrsZlm4ci&c zb$pP^^*fbM{n#&x5}6;thBhfL*JSY~qcK?}y)0Tc-RlIa#sac638Y=>pxi(YY5c-5cqCpLCP{LZ3o< zQ(R0a_kN%B)`>poI<$>fd-3U{7@t&svlZeu%)TbQGr8hiLFDHa%2q>nuB;V$eBIyR zcM3oJc9pM&^}-k9XNT~=d#&V4l4Zi>>pm4*PmlCZ^Q+t$J+}~@HI(lInx-nx<@N@X z#G%@H7nRHP2a`l^El&A>(x2D!JN=^SxwZ#<0^bAG^Q2b*nW3G|()qyflYUX!nW|8` zpmxLTY%pKyN52oNkMh%g5#fucw!J2cz~RJ6yiJ5%E{z1CtBmFY0+7_4Lz!SsF(}GHCwv!h-cHu|nL@P(^S=@e^zp-&8=snT#JK4lsrmL@fz~4u{ z(fnNGr)COWWTls#b*65aa!EaUhUCPDw*f(na_B*sd zWprc{zZ8C!k#91c!(jF`2)|FL_96T(q1n+L%4hXk;dcqU*gj_yJJg@HM3J3d&>?adX{|7&@+f5+@@Fh}j@OyQ%&8TmnVwDC-gVI_VF;{XU_mLHn`eE`gi2OTTtX{lR?ckNhN=pX=*8H8u|uY!JWiey?B_ z>_?Hw;`o>C$vS~yAL{8R^yh*5$j=uPkHr2D*pKYwdz{Jv{Lw!&^4)sCxIMfHxjPAo@H$yqmehuYA z%sa(BmmU&%wQ_vrkIm=wu5Sa^XY+^lek9JPhuk7QIfhbuKhEZXQbX$T*z1N+(uHV+ z%=NyxbQc9;|2b}4sqI2JxLP!xU0C@Bo)=zNc>>RGEv(G&IB{X+581w@Du2uPHvURg z?&Qc2`&laJNz(fymzeS%;}m#u|5=6?Rvzb_PIwC|r+ZMng_Rfb`^c%vngoVwCwMQ zob>Jv^WizY->3Oao zBa=O%6Eu&MNBy3~Vma{8Wx?GL2&Ht`Q_5@*`JaeE&X`UAbm zZ5F?E3)yS&W73bQ%8L`MkCpFCU_KT7Ec3!|WqPq+-!9-GegcOwLx%Emw4d`+Z{WW5 z7v?FcDq4FzgGbCskIAI+n-aBjKUMjT#5hh99^^aeWu!d%TSIUrfmImN@0G%sTL^;u zHm~B_-jzCD&)qBWs5-g9-}4!*@DhDq!)bnv#$g3%PhP*nmMchm^XYcR zgIu8HHcq?}N<>#n;jiR5wU_0pDCJo{AzuyQeL&M&lwYoVokY)R9hROzu7)Qjb2=h1 zh0}xwQA~Q@p}!)J0?P~FHqQ<|{ZaXZf->W!f1!s|HRbK36Hs6fq%zv zs#4og zA?qYx3I?05q_<1@-OAPYp}U43Gyf2$hW$m%7{Txd7yDgJyw8XDwM1!>_7~v-J>wWgbT0m-hSE$z^kg7%g*eh*DFe|k{$2rT zqqsu-aJX*)!6sdE!C&%w(Y`I+A4GHl#q!xY8R(x(_ieui;3*7CM*+qNW-i$~FyP9w z=?`6ghs0U=nP)PbpMzX3jOx}tnDwHOX`jCEN-$m&(3GC??wjt-AG@* z4;qY`CiN5~UJFJwDqO?;6O5Xr@LrB{gHf{;=ITh^<}qyZ&9DQw4&0wyD*l4<6N|k6 zIsW9_1N0|nmF-WSe{yjA2{bJ}&a{1(h?`-*lHMKkm-=yPjih&r+*$v&j)GOAec+?u zoe)=GHiGt}juL(9%Pt|m>qnBF#7B!~!U~DLlWK(H`8dAK=cOt>$MR|KAsl%A&72?p z4I*oHALAa#AI3YEkSZoUnU{dxf-YCDN#zI@fyZ<3tv4T6aXq=>mqnlIM9*{j{vYjQ zW4(&vLY)`T^96!Bop&#`Y5whupWCe8Y1Hv(^QKSo^OnM=1W(uTTt9@oEhfX?D!tr1P*kZj8pC_1az=e@;_a?3rI^R9Ng;6pxEN^9$Sb zxyLE`5%Wbh?u_e6(wi0HK_hJ6P#n+S)W&$`H&Cw3&mdY6KZh9tSDv5&t~_C0r{zva z71eVOCne-d-@l;p5y#}h@rKG-r_7`I1({b1{a%~Sx9N9j!zsB+(p0jYS^fTtCNbUBuAlRkL#@nf+C`c+f zlr0bBivES~WyIsNKN8@&dS8zUx$9nN$0rA1DZ#_+j+I}Cu zUHF6gZ>6C`mU|KXc%IbTTEnUNJAbLji(8)2|22GPtZO~fk)`iHqs#qppW&UyXq4w2(*B@H?fWRHudn?SHvUiYXoC9tZjo>BA0snce@l9`(vDS}u%4ku zQo~N-7VocLzVKPBb6dYWw+|HMigz)+`8x5_e)|U*rt_+#KRRR{C}`R$aHq@z1@pHs zT<-d^y~kza@ZRkr#ik4QaoK(;n_n?`hrb4j$)m;b-J3WYzjrf@o>i%4k zSEWrcExhiL;nq!sB{v zJp}!}k;(;s(f@tDw_$XI=YpJ|{X&^ay|2;TD$i-ZDf44YElP*oct59PulT;5Uj%xq zY2UzHdI|#ZUd(jHpUC*V+bjL*w@3ZDi1%}w9!}smE3%W5xg4{TEiz1@^K0M5b_jT| zM`kBz|02_uQ@kv%+RA)67es%E;&*c$I{(|L zcCMd&-e#|?KT=iqXnj|3eRlr0>3eVdm5M)0>63cxy&~KHXYzo4>7;?0^;eMAesk=? zF49wKC-ckK_elMA(eL0yvCn=jxJCRrQBq&yk-+xp*Yaa}E-Cp1F0WPBlh7VWP;2^l zra!OiX|7zHM0SPt)pI`3kNHB-zx^Jf-}arhZ{_D^xBL!i5AddpLp!(2bL-E92!LM{ z`vN*(Q8T(e=d1l`ro6Z&R-|~`pVId`tRLo*aE+zs#@E}&Gdj=z%%4N&b?EyRNq##U zbZnD$P(E&Vt>B|PaMb!WRVDjFWEU>R-4cfxorV|5%M9)RDlQQ53oJ_9KBcNIP&~9& z{0Dwnd8sP#zqe2^pwHyP9q&=m%>NrmeyskacQ%*XJ&zvJW#_qeYkZj+F0objP4XP^ zZaw|PegV`I`nxcm#a)uCT+|-+>$QA4dT-?xf${$60{V${HC(o@I5oUU;*pyd@cnKF ze=Xe!>gZ4vq)oFVP0!=h&MkxdTyZfyA2FNnXR9VlIR#F59vXTAyuT$?rT6K$eD|jeAP1xt`+`&Cx&J^7Klkq! zy>#`*=$pZD+C1*3Y}FaUmqJ)C*H7=?2_1FI1lIk4L4)X%-y!V_nik3PPT^OOl6r%Q zLQim>^jknND6XXU9;SQpaOrpBZ}mEPKIY`v{3A}c#R=f=o%8~E#NY2}JoYNnk*{kL zzP0~B=xB+d;ji(mH;v(?h>=Nw6GHNN>Q-4p-c&aj%?|SNA;NPtHbus+av>)8~%B|L)^r~4tqx))i=q}rx5KsK7+5ZTit0(!8eFXI0I`iN5)%Iu~ z^tP^^`4BWr;}mr6!8uZeT}*#uKg4m8{owol394sxE2R7r&&djRSdm>A@eg=6#Ev+UpOD7?Noag%vL#Sqyp#)>P2qy zHF10QI6pW3*tpAAzL{U3_p(JE?L2&|r(Wx+(RyZUJ&ir<>D~TMa=l4!z0_Y&zEnv$ zb}qZcDK-!57rV4yw@AMgIC~F2-SBeMf7wL9aTnqT*o866gf71z{a)Q7`J2Tp(0g9L zXTM|grYZ#g7RrpCwtTQZC=dNJjqFccuKc3PZ+j%azuistp7Nj^^K-t*1I=5(-bHpn z_Mcf?(IRZ2wR0Qq$UK+HS*y}%@@I0F^p;Smu!mx=tUVT=xOQ|bwZrK13ys2OwJ*NO zvDtN<|MD#^n7@PZgQo40Hf)o$ZmX2jIZM*3nHJ0Kf?Q8|I(x;-2cql zS567)Bpw3(r$1QPf&1@&@$@x;uJhpjoyWfVDct|@jlbAx>pAYci21YUC%FDp`p=aM z_+!5~o7(}od7h}SxD)*bxuAW5(ykHpoVxz~rx%i(ksRal3sO#|<*f{lm1$+NLnwvH zSuW+c^>3y6gXMdJ)N~Ew~gUcLi?q-g<+GcOf9@6lAhRM8~@RG zo%xp6{fl`WhueEy?mRf;^g!mp=f&c@uip~+qi#(2&}Dwp&WpA84^maK;WKF3^(y1( zH~@0r206Z;vxoDwSulVuTjxks?cxy;ol7YB3yM!@%QTL8jb1FH_a&K6k=?ZMn8*X2 zi_ZCNUMNU=%rC#Nmh;g%B*S*TEXY_uRZ)Dy`H~z36Z{FWS4r63y+oc^3t6r)@fPI2Ifjg9ZICFyV-R*bq3;jMXw*CPT#QMjM*!sun zSij%n^gF!|#C*%^c}ny?qVTan;zGaUE`gggp6&dg!1E>UEq{Le3FdRR`W;(83npv( zm)4-kbS}oH5_pf=_eG)1pS9c%D?fx_(yFH!hTg|{l4 zS9qGj7b(0};qeN$DLh2sEdoCv`|)!R)d^keMsOOW8x{Xoia$%?e^hv$!k<=nk-}>f zUZ(J^3a?PON#WHB*D1VS;SmaVG2F6_CJyLoO~04<-@Hy7-Qx6Z3`fszjXhU8zE0xv zAbkTr_t(irM_aG(*TH&1uV-s{6ztH|n!ZBIo6PXy^yQjg{A!S%uJBZT9;Bx!e3ruX z47aSijN#VwSqwL?yHv|NQ}Geaqy8}p&rx`^!dECflHrzh%?!7uk7Kxb9VsDQi_^y_ ze!y^$eogGzI+>p*d!q1r_4yu#Th`snaBKQE3^%X4hvCKPmlXd|#s87QpHO(G!XHz3 z2g5DvHZ$CsevaYhb(ol~1HLp*TLjGp7Cy{|g+R_!|(A9zr)XdU}M zx!%d-mh0)4&1+TP$+taQpYjVkIKQ0(Z0l#|Q@c2ygpuA{TK9(&9fp)H?0?4yD|0Qg z1#uS_O#4BEe!{~Q&llnUl%M+@TN&T)nkH$d&@<*58Cdz(Fq)S;cNqT&n#3OF&XxN? zli2T;b0w~BZBqMpE)huAVws7-YotxXb}5|bAr%*I&Y85 zp>-CfFJBihKH^KXytQsQ!>wsw(h=ehn+sxh7pFhW_ge}=S8IAD!?dqT%1f_OSm+AU zcPlJ(1?i6|EOZ6w2NZ78{Esr+ToAjwIQ=PxTMA;gThj%_7r&BE7ZqNs`M;#_YK6b5 zu-NS&y;0$5oCfJ96c)Q5q(9Gab3yF*;&eB|Erqi8~q1n$sZtoWf%F zgY>^JoL}>AdVht|^;3mKUh?TbDZEUdS8$BiToC)eIQ%d2I$x$rE*i_>Ejf05#kQ}}5u@68HtRroB0waEl3->X+IDMny z&rjp0(qWx>|*R5ClRMhURv3CE4^GAGtN$az?WrxBR zx9k)+tp7(0rz(E+`W~v+zXpHZ`WMCcz9d%vbDTe_|5>fi>ffTU)xTBXu>PkRPF4Ky z^%{~<{~G*t>leQgwSPgZ{wFwpRR7~zpVhxsVXJ?=z+wHLWH?pvkFU?>`tjGTe^#vh zvt#vtjPpnJ->3Ci{YwvN>w!&8LJb}Y{Z(ul8@z<|!;d=4ct#@0j-Sx3{U%~mK`Y+e|tp0k1t^R2OhxJcq zI92iJ>$|vq{B`S}605%^R{vR?KdS#stfO zy7Q(Bbl$Ywed9Y~{jkgFhhUk_ALH#5qC?N2PV(x8^ee`lfv)%a<7))3Zne-mPTK9p z7oZRAx`x&jbDMXvea{X*p4U(8yvQJ3Bl4vCOoKX=uXY)~1q~{H9Sb;r&?NKW!D!{P zosT_D=nrNwS=7I)guc;oA99I^B(HL2aYV3Y7Sr#(k5onLQ11LG+F|?q5ic#Ha?q~r zRE68#>oG4-l=>0JJ+JW~%}wgRHMuKcFJ1T|@v~ zZk&RVx}8%=>u!wiZx%VroC4`Coexiw_Vnc*ZzX~u}k_PTV?xSq@SYw zFH5+ez$cUsJ}sI^^W5~kQLw%g{q&gbH__jo{2WRA^t)7l(v93tsj6{|=M#@`q5K)O z?z{^r5nXZmjnBaU2%+Q((vE0dW(Mc;yM%t)$Ib27-+7M4mos_ajO5EEE>ihkEApD2 zpz^(1;JVWUZrAzyhLZ&DSSHV#juE(16_mhL2(>pk@XS^c8&QO7^Dt}OPd zmIQ&WaGaxdE$PYo6uIKf(*J6QY(KBxDgETPX}M+ZW0CPgi=ID=bKUn*{bO>_63{Pl z9jvJr`mElf^p`8oY@c*|Wb2!d>(_oxd@qQ7OeJb~Jr;f#S5P-i^oQ(-=;53AKE0>S z@f3D>0z1FRJeLqdx=NF~DsS4Y_BL2p;SKpa{{*d^`+TrqL z2ia}EsQl9X+~7Zbx}Dzt*r9xG;EweRI~g|qnq79w&$HZ6+D|FZ>!khe`tSn62yzigH zmHprsSEAp94?+4P+OIpfT|xRU6&5}Q>4y{+J_qSfGTeHrtdGM!p=GI|GVkBLQR>5Y zMLt4t4e7t|3Bdydgoq`jcyX4=9K*;%uvAK?tNA6Vq5L>z^BkpDm6heJDkb!=bVA}L4hvb|4)eB0jY z#yNS$eqWt>zld$MzaX$42>#Lh@8}ZxrxuAYrL0bHci?F133_u^$${0}pm6g#zfZ`|dt=kN~{nNwHi$*3kW=_g|_vhgkk%@@9U3 z;trNS(35iHE}O9L{H|qr%bapEQAcuKT+8^`3PeTJKD8@j?`M$gkKo^+`w@)a`Lv8T zY`howNtx*Mb^ojDC;wwS_#qV>a(#^RL&TlVv5ENcdkP&)kIcSg6DP@jycUTou>WYu z^N}2a{u*Kh__<*voo7S80e?W#ET66id!c^d`2NdTSE8I0pQb8$iQ1!i^Ut#UqkIVG0hABn zJb>~coCi=n593OWBPSApboGA!XD;7&^*I4NoyVuRpXq`=pd^bkqy3yhrZASD+^??xy_QT$w7+2tm+$to`M2O}cz*)ig?&i& z!ZUezitsEx$R_?v`96i|qWFe-Tp;rW9wY*ed;Q@uj2z9_v~-}V-~4gI4@IN!F;v%q3t~uxBQZFwT>#K zD;%$qFTh3T|FfLYcT%|AFb>jve11XV(M;{{C9YRIyKnnWXq+Sb1~cfYu9X!26+LO+ zIWd20@q4zy;%&)q<)#wifBa(q{kiN{t{l8U?H+2bv0k85J~xDNJM%Nc9y zl7%f?t`8xmi_TSII8c82wQ@g{C$-z=7eMFk5_DE;`LL7VbY=nmil0L|SMO&Nog#1M zM|EAgRJ+==UDIOis^)gJ#oEzTUOU=%OTOYRN!6aAU5}lLejh1zrdTC%zY^4oomh|O zg?i4HrSS@`@I94+?9Zlso7%p;+|RUcQ(+lb2I*M}%ea#Eu`0Zb%S|P2<#OZp&7D8S z{4nS};d_yOM0W1Ve)F3>9#Jy?rJTRN`7pl^yW@@r+bEv4_dqO;2HjP(|K0r_1jYxp zkKHc{AAmOmcm*bh?Hl}Lxb9P2#QXx@>lEL+e#-TqbpAw$_eYEa`WPRcPu*EhRna(Q z4{9{Mq45dBL7j{jgEhLYRFJrd<`ek2OXmu-$MNTlfHiLI&P#jj{dA^(3gXojEC*3P zp2>)Ax!kU$k{|hAqIo6rlTkh~An|-;w?aPrdyjl_P6vJehy6tqcizEp6nEYxa2R*W zJgCK;bS^p*Hqdfx{; zxKH4wMFMxqIXl7p1p@1QZ~63op6x>4^nR9~+x%_#UP>iK>m-+U?$m6R!)tVXQ$ukZ zx59qc6TW>HJBqtxd>gKBeuRtV{i6$jf3m>A@-1xt!gbDj&?a{B zUGjX`aH)5UFAjmmmHeE(cPD9s#F?h=kmK{np9CXT^K<*I>;Bd^Ye=8apXIJ^&W`zy zi*sVnshE9sK2wrU-y=HSFMig>yXYsz>$SewCG?h9-voVV*Hf5x56_>n_04G_PxigD zV!gn1TUo#S_9+54Y!SGlM&PC{(Z7DqpGta9NWbWJDM0@Sx?|%^Ti4H~~ z=O&M(;uE}=h;i_un0y_(8TD=x`6z6abU))@lb4M=ei3=d(m60_H;oUZ|I^QiJQO8< z2*cc^?d$vCtB@4wCD_PTtfCOLeS`J)#7-hy8geMg)R!lHAQD9EL& z&-VqDpSGXF-kXG-T0r^iJy4qO+kCwqn-3zpkFt=NZB{ucam_dkq=T!!;RVt-tJ z`&lwgxq|3bv(qHX+ICbU8`f1t=@Vl0OTx^^B(W}cZ^nfX=8iYzkPf+^U<{fZR{^quIj{3xPB1y zx_)iuU-a~w7sUMLgM?4(CM;*J-+X&4-!}G}Qm4HAAG5u05@NXe} z$OEo?`c>{1^Dpjt4lM)GxJ~$scGnO-^3!cPZ$-+>6(oO(zYaqDZbzw9FX4|we}&_= zo_@s6eL~#YM&+Ha@oTU_{EpjB^lLA_a);CddVWqWB&X-u<2%rpFLv|qq4wMSQ=t3a zt-t)@Hs+V#E_Mgyt)aO*volHlS^||{6g!^}<3+XeVZ5kzUgJf-^E8q7`6BQA+5O0# zsXo>cA#}y_w;%pIsV5Pv|567#uCD3Cv-N3Rj}On?m33`@j*NqX23fbZ{g>e5X3W2hko9i=UTKe=Ur_4Y-JR4=2V(Em z$L!tK8zcENeG2Vf8Rw$mJwnj^CXNTy&TNqRqe!3UfGyts&R5$?a*uLe;c|AXJ-ANh z^$VPx`xWhV?HBSv&sGx(@;~t*n!lrNg&-{)`v3S_iwlR2OQ2&?L&qU(q4aXH6iDs+ zQSxD6U)Ndvc8}|K^lMxzreLPvTgiqi`YL57Nz2exXU>7KN`? z*jM;n3a{X_r7(lj*7PE|ul8uN`g!F9j=&>vzQ zCFz|d{aV~6_bblSe%{V7=m5cJ&xtj_mk;Tg9;Aj|!+HdHM}0}}ee@Uhe=Xly51()(Lf zPwm~S{-bX_8^QZgtzT?^N{8e>f_ipYNj+;5I{pmw>}PZ)S8scEFSqYN^laY;d+JME z&we`<`eFU~ZMMVood}j2i+2WMU*HE^dZz%6*%y01!nHGBy#%YbVAr9n7X4`*&bb%wE`febW0Iu4v)V zAIf*ChSpSIWCVT337oVqUF5Y4y?Zm>a|rv%^zJ;#e+2dJhb%8~|8};}@n@iSca+q- zG`G){jLLFxM{5D7zG$fc6JRF*9-n@ig1|Kxg( zWIXg>NqPCT(D7#=FK3mMm+x}>4n$s>`jM9lddSOZDlZR^RFK^%=sc~(wS^9i2Q;oN zJQj-&#Ij7~?{ZjEH+xvDVi^t9EZ_M61aU^}Bg8ZA*1k>RKHSJ8wrGBd zTkSmkRN^yy-}MLBfnoERN$(H*JXicK^P9e-!Ew6Z{;cr7LHnaa=Hcl(8uGkT=Hcl( z8UnXz+*$Uy&Ho;&w=GuhdV#}w*T(8y&2YKndOA0S>q`wCPcvv@pCle$!uR*re~<_8 z`)~Mt5{o;`pIs;UkDx#EO4=u^OhB*x_m9^$&^r;mUdqo0ldq@lm#;5COnc0uU_GJKJk9x~_*p=1na@RkKtCQ%AwHFJ-^#K7 zKf?H|*Ldbprt3(?r;Tj48d+bW`MQUNjz0tY|3k7Vz4^RC`hS!5BMmP48_&GFyr;ax z=Mi?F>)QYATyWBRyV&tE#;#@U9~+53h4O;_{IFvyq4OCFS952P_Xy_9G7) zd&tAxDi5!*T`b2wyj)TqPCnxD@a2;7aN?hhJj^L64?GM!p!xiz{m8@3J>=nHm4`2t zEf3d~^cUabaw2~*jp1;fJXf!<#F>6>io!bX%GD?=cE??h=xcpvc}aQrHkb3~FkU#R zq&)l`x6h5k`tuj{{m8?(9`eBIMDc^X_l@OW^o(ElCi{;d-KKs-&c~zooCR+GTY28F zR^X1$3EU*{aDV$UEZ(H=?y$fk?v!z?w3ptu<@hmGu~qxGieYzMXXj~%d!Cf%W%#+7 zCFS^olK%*fduvO|@iJ}a|Ne3BmuNw+cifZX_8o{Ef8XDK-1E&hyK#?vmp$p#lx^2; zD>)C+aKz=|C)|#B+(VNibp1KlHN30TTOP)8`wm1N9_mLP?&%>9x3E1*dOw!&Xi@x7 zP$%|@_UVYdQu~$bc#>hiQ~i_90|sS&XUM+a>CR7Nk#59TZV2Ru(Xi|eI*N~|BzzRuq3a2c|zu0LMe$M`fK>ko`a{dO7m z+dKfqr!Srq?Sr!YNlEW*F+S{x@nN^XAs==L9P(i&!{yJLO8iky`;Vpe+ju-z%VhHW zHaw+$m&30mB)Wo=F>6lvwbV$u>^xJy7ORYat2p&*WxmNhca@*(IShIIK0waCi>mYT zsR}t~Hm~1_NP6GqcDr&W>zOycit_w+owplLjHk=Km+InUeaqh8Ee9VZ*>_fZ`)ia2 zewn|P`8i+D!EpWcPfrD&?1SoSpY20j8p)rCt7qRQ4)@ts4dHpdY!z1Jh^}xy?Kp-p z-{-zBVf%0+yt^3B*1hdqKzug_6Hs)XlGux@^nI~(qr$SUo!xId?%rJ>Pft zt$l~)H-ZsU1b=BG*K6%Ab?)CR(jW9wx$nhniS1i^VSco4Eto2D7w1Qk-|7&3r0KyD z=l;zWdQ0r%1%1%xYky4hN1MftWQQNee53dB*sg@{Ll)-=TsKYhwtcq14fO(d%o4b1 z3d8-zi<^b6q<5M0Ux5?e|275f(DO;$ePN&%@`uO@bbpI}xOVO5v3+yTDE`fg|BV>_ zBEt8JqnST$xnCu`{06amErm*MSG3&rfFrJBPB=dolB<9K08^Jtkl4i&VaTXVcDm@eA`r zUTnWVjbU)Pd^Wjx0dk|?CBk_g(6>l##2@>i+=##SL%C7E9Lmj>vg9Voz2Eaa2yoQa z4Q(H3xIb3+cZPOoGg?FQt+Z*2=iQ156Y*%5oo{3B!$B_U2?c!ra7=!#$9%k#|AU-g z>?ZPEtNE72@>bd>@1!0B6_9o7ZKkWF4Yfj`~mb=^bhYR$hgbwxaesV zABY~KUid$>>kP8v$T#}`llLZYRvlIT_uRvt*gy%;$fTre58uI_K0mr%s((Zk5E-`=@jFy4Ayb;UgK|?j}E{ z@bLJaCHT6VUdZ+7e%jwsf%d=JZ~o8#?KD;oQSnn+Amv)QLp6($48G*?_T~I573S)D zU$1~ZY(&06$|pVpwMGA6dU}R1LgS{VXVBgzt){1EXtLxtJ+<$m{N51kD_$vbJx9WJ zz`4`ElVX2SZ+##77VZ~SecVDk@qqPPL+pnjr-|}c>8G5#3(e%;5uBg%G104K0=8NH zZndyGV*TRke0d{$6+Vx3{>}&LlrP!$#psW)uO{;Y=@*6eZKkBB?bGa|pIa2S0DHKf zo$+GC`vgKb&}fHKjQ(BQgP;GH-6O{DERysdu`8`#+Q;^^<)w@t^*v%gXZgWBY9HM% zi0wYsQRiE5S~EQ#6M8JHl6%IcX$9kEEgTvVWTR z7h3!69$(V*uzfB<|Jh3T{@s_H)1vsGM=z0v6A!MWogY}v(9VrQ`i-*Q1-YVyxLgk> zuh{ORbblzdzv+3MuTcDXQ2-TbZZY0EmGME_Pu+1W<3qEV-qpzX7V`CQ`x4@t)&577 zf2G0%)ys;VH|ly*Dqxp>t^Yv3yJggS`19WX`u<~wQ;bg0{IP#WZuc{1=S0y^hjtEv z_xni0das5)nSVbrOTnt0Z!&h* zit`yEo$#kd)!X#l?Ni>a#}y$|p7i|`#*5KH!#kUy^*?$2K|RlWNc5p!!6+Pzdy+Ek zDbi0pALE^Mn!)Rb_fz!!8*APtYnoLLMuL80qvQzrj_)=@e6FuEBp=|8%jw@sML)yx z(0k}1GzaF$6}-31^298iA7qwL6NdqsV?LFtz7 zBQz|HJ6Y>3_qAypdWn4C!|P50U*N@w{-bfrVZL%(9YYZv2N9%X#-PR2LWFFNvbjGm+Ae7qYE?x@wP zGrXOQ?w{y~cWU`*(^dR(ud$UMT%zyW4qN^t;@`&d@Ym;RzfC_lgG+dFZYSv*T%aNJ z_50HC%6V@JCmv`dJnJ8k{t5Z>aY>%#pJP5NE7FjSr-9q(W?EbaOx4I({fS$>Wh-^}AU zf9&zi2IYU!v5{eU6T_~x8jj2(eDMEY#b(GoIr=`n1=NbXRxv%a>$|0YMgD7LoD&c4 zCY*legP)-N?^4E*Z;|B7><-5lvaZ8&LsssXD8e}pe(neCT(cyhJosC&`x;7x3$;MJ zS`y@m*D-{hKt8;aK<+mmeXa)JL*7SvzxlDRrAz=5bo_E*M$d@&1bDFjVk1I&-s*YgF^?HU52H`$dkYviav$mODgw)C2Uq74vib+f}big$r!FwnyWB zE>t`?tZ_N#f%+fNu~QfYIP3lyaA=pZ{(Tf2Uc?br)G!cX;UHS3q^m(@SjFRTA#Ehh7|>~CD(A-_%M!f%8&2tWE)s-406zmfN)2;QN* z-%5KJS2U_}-=O7QuP?^dr>|;y-2VloGqL+z4U7AE1>dna>sRgP{;a;T z(SO~M`t!8U_569Xqp*Klf_UoRvmc0uX!pU7bLE31v8zf~*6y3Fe&k!J@Cl8FdVaFJ zaXriQd{#|4w>NHoLj6bk2ma61*qzxrF6b^im;RvHop3yGm)M7FJV1Pa^LgoWAunml zG9H*=beMe24#~NssUG}FT)b|Z?+EMlTq!B%8Yn+11Dv<)N%zljzlrptnjQ~*i1M`k zbMclT#uu+;y6OLs`gbxuQu-hV`fG1wJ?wn6(zrKRz4mLqa2)XK6G8WC%g^?gtp~B) zhWURc`I9F>-yp}M(YN2ol*H(M<_gm7?a}(FdU^x>hPeb)M#nQ%-zo#&Rk>yI{=bxGD(|l`p30l{E-`r*qYG>BX8hl-DQQ0~gJgNUp2cXo#=8qVM^N?+8NJQ=-oL{uMzC_Cuc=<_VEqPNFDTb|$n}Et zj<>j8P;T*2+5z`x;ZF)14!S>Cl-qtlPsn%Fq!|59-@ExH(nITGhIS9*^p-d~0HD?p8`8E_gy?Jq>pb}t3ua#<)*yvh6~;t%UVdq=-D zUNskB>)yYObI-8PVRp_hPu_=p4%@YU!8z+1PplopEebaC)6IW~S5cqp?W@>q_&Rg= zZd81mYw&qH6T3ychA+H#3w-c>S!2mLv{&k<(p^*io+*sV#zEd5*ucSWc<9CG{nUp6 zekU#KrlfQ56Z&4(O&K5h5aV6c3+cZYU(9~!X!W)j&C&AdIGAEQit`p_00{o1`@=Z) z^79tm-}dn#+Q$;fKWaQY#m2*uuIqhM05yu!}1~xq1Tf|ujSrX^5+QF z?e>Zv4?RB8b-P75J!-is8&5)h+TP9Yji)y&#d2;P>8Z1B1pJ_T6ULE)i_|`KkN0(> z1C*!T4<-Bf7;jyzq3k1MykjNfLwgzTTF&q==N`L1Rg7jTouS??7rJC$nbIAO^FVLX zF<;-KKZ3^s`j5Ry>`A|Z(e`G8bZfNMdUa+5NS*~}Xy~20=`TX65KORK>Y<{2m8Ko=czuR zat!Og1NEy)VU%wr|G>9<#D-=5@F|)eukX~bo;|~Q6zu29oF4wR^sM)@W8Nm@SA z?|y^aXUKZl_(kqBv^eq0eTuB7pGPPCXS=||ezp4RH?&9f#(QXIq5loJWaB;Bxp*kE zZ*P);NqzaP(QMLJ=BkEUk~*o>Ds4!j<=Y89Ob?qZ#StwMYmb%OS@W(-m36@eKy^< zJl@8UN3j2DoY zLi>h#g?9VpGG`u^t9R~w*dNT#<>z_#J5^6ZxED#j9y{;e+X3VOi4^_5IlFh-*IB^t z12V2?lY6uA_&R*3-|%1EuKPZKF#Y(Nbn3yhwenIPu|H}L+ETrs9~72Hf0Mzdf0ImP zIT^oLyWFi|$hQMx??L}Zg+Ry${$aY`%k(jwx6bdQ9Rzh^m&CY2KA`7F{hSTC{^gkN z%+DFQlyI|lHd|lBc7MG4#dY^d9VH!`b9DUv%8ZV*-5tSvJ@)g0kF#XHJ?l@l(=Mg` z$qtLB?a<=hpRm8F^e@u)-332SoA15fi*5cpv3(Y@4@L7Q=GV$N$KvFpk7tva_5I%T zKI=78NzeTY7;c-dVW=;j^DV-HRU;h?hmG*Fz0FWoPd3xth z&n0X>IJ-*IZG9-!bCb7^2b?~?e+~9R{*NA%KlA`?&A%hRY`(s&<+T7CwcX7D@JO>p zqiG~Z{IOrE9~f^Y-IMJ73Yw4Tw8GC0q6>>BFfcFpmRmo%L%9Jm&QEWauo!(t`2c$H$i4!N`}be|-Issw<==UQ z_hd;o8{Ct{_5-@17vLY(Mn|=uyA_?LUoU*An*ZthwPZX(I?Df~p^RG?Z~ZdkgVg_| zV}$XccBXed$nY@ZvSQS)`9LSCo=jxDi_s>+vvZNt@BRl~K)9_tnLap&@s1sg51r0< zm)RZrelD3M3;=v=h(~C49sCaanPSBAlF-hBSoh53y?>3u9V&*A=IsYxcsp;kJ=@?fBgdFmi8%sL-&sh#(``g*e?LT9(GUMu7lkL?x!z}7iUJE$6(m zx?c}^Zj$j_yi|V3gYiD_%J*7YE-|^H9{`GC)QN1;Uv6M}dN1Nyi+5@$^T&)|w?M;b z6)hH>cNgbJLV7v{-^fy?yT1UszKHpRtt{{9%Y;5*jo^3tM|t{p6<&Us=%41Z{uc1Q zCUYN??j;`ZbBp{;jK0zYc-N!l?tB{Q(PY)TM}7qUkgwf^GnFs04x{z-_6+%Ll_D;8 z-}j3+9?bKC&uKp;b1?c|%Js@0`kx*N9bQr%El21j;6M{lzgf>`zY00^m_B&FP#Px< zS)Po4U*3`6YA7xWvX0$s)G1p7}Yt20B^J=qru;rN$4%-+4k;G5V?I8#Qh@ zo$W6h-%F#EC$yK%BJZOaKkdkkTV8oZ)=xtJp?}Fy%okiF^TVm1T#i`(bY85{;?yho zPSoO*pM0mQ@vPnWxP|YtaSO}2-IIOCTF(9QxSx2lacqn7Y3y-Jv!=(xv==@;2<;E} z>i3-bcR=BI<-1?Oc%@nC$i^$|i+mh+-c;(b->;4SFF#IM_^OQm-oBEu&HHU3-{Nhv z$)^F%7smZecmDWz#pg?UcwI-7j#Cad9$%~MNXHpyH!w-4FEqUE7#pW-COvJ1Q?)?b z+L;`yS0L9E$4Lpl;aiiOJTRb!RskLvFC)_HfI{~-8NmrNjA?m zjpjM>b%~N#ca-<}^A$F)4Li8xIkdX~06b?&2>(XMAJMmzzuuoB|02on;|{!6`$@AW z@GZYENs=V~4_bZ%MpGWhIq#2N705Xq7p)&V{~2NaGqQYnzw#wvzB$Yn>RUeltT6wj zmVdT|R=?7C^7%;iNxx2byiAHW%ipaQc1N}@p1Yq1`zYZrN;cS;ziMk)il5oK=@HnS zPALfatkL}o3lx^$ub9n$xZkxz(=)$oxyGU2kQ?CKB|0?9?@Hx-uF?nj!$V2uCC9v{ zjq{k*{)ec80^l8iw^I#=Rbjl;&xf=bHPcizL z#z7DCu^9cDcm_4Q&W{2Q^z%r^L%VbR`;yDE{$&IPPoDHVmD4SrZ})f6-#OIx5x-q| zh<5)jVSqf&*U|0%-I|_`w~nCQ&$o8+N4@Ww`kS@;CCayKyhV9sdbN^#4%+?XL$>?d zl)h2z{$Bchcwf67u~zf@`2*>>C)Tdydj-n1U$d$HY`6Mcigs`FuGx5senUDxYxiP? z?aTSmt?ixgONEyxg4n*>ll4Kx8xN2_Nh{lPZ0q?+#~!8+Q7@CO-5O8*3LEG7`7y~Q zq~G_|x_x_4UPFIN4Op#^v5E{BrFv?o`_BvX&;0dm(hng0RtYbV{N7%D9FF$$UGW*Z z?K^8v_x6VLZ6RDBmnSK=EB)T?{ahd9pR^Kok?HasJnOlJ{7BZY-g*9p>qxtW-gvN6 z%K_fkbbqV!{dgPiQ7@C!t{UGv4rsZ`&`ULpw=5w3vu#}Ae7?%Y6+X{w?b7|i?n1ln z({p=R8uu4kZ=cr>?cAq?f9W~f+lViNQ_8`S#vo5UI3HFxoTrKVSCW3~$C6h6UY+}k zNAVAXX~4Ddw)@fk-3jqN2x}^u5fVX&Br0J&EsE>L0fB z4H|~>a~jb5b=H4Xro|H5i(EBl^ zSNsme7gtwmhS>N6w;1(G{Rdm0LmW@Ccn`}Bm>$@E(!}JA{b}UO7V-s$V967&T&rRA zrVR{N-NEqYJ2mWET-N-3ttT<;no2q+9?P(!kzqQYMLN=XnSSyw+mA*5^_71?zVwrS zedV89oc!xc_iK@VzF#ZuH~y9PSbp*=Detzp=~a1`#b;|M`&}&Fu3=yKGZvp~@lRX4 z)8acUK3_w*cgo@mEIw@UW{YpO_&$|`%r{$niKTDUFtk_jlS01<_ABfUVZUJqq2jPV zSBfkvc0WyK$2pJ+;kw=b!aD_Czlu@y-tFU+@;<(s@AK(Nne?JRa=ZRgv8Q1^(4E-1 zo4!s6`hIm^Exw&Xzx$U+-`=Np=rVWx_Y@4xeLpqwMO zA1NVy7%v3-?7A(Y!Eb8^Yc?E$0@gA$p4YLtDKkn1|e#cv!Xx7#15 z+&b&X&Hdm|ZhsU7sigBZKL2x`5)|^s<@UTWe(%&U9^!Xb+5K+z zeU07kb~wMq>8Qv5Il}*<&2whwH*>rZr{^~>FghM0U0oc<#Ob-ko%-JQ{Z$5Dsd2eS zp5+FwWV!U5RE{4LJ0~^Xaxv3wzseJ-&rd{e!g{)0yZwP(7b*TH&ffssuuFMn5zH{=)&&5pVXX(FY>(=Jq_HVTO zD>bb24{Mn1tK)m0CrRg9eBXf2w@zU@h;3iq(XUT!R=n}HO&W&sLH&GwHMA#a>Ztd! zhp|4zdXHY8TC4DmWPNJ>(cD z5%(lrpF%l2a_=(fU{vKiUdoNVPG$K8E{YVRuW9*1ty2{Oc^}C-)%SvPrbo_DGIjkc zzwY!{n9uxo_oL9ib!M@%lIs3Ev6`B5{e^UE%6#IHVB=gn29_;>3 zHV?T&<>B*k?mzkZjr&>Ix|Xkx@6vMKzI@&;TOTLfY<+yY-PWi_x0YOjBmU9^%{;eQx1816mKR!j$~gicE)7;daqM} z*}h(JR^>VTzTQi9+z{^T1>8gJ>%CP>YHAl&Dw@o0$v8yqmbMe|{|>RQ_XdIEcI(OC z*L$TB)^_(o4WH_Ly|4?guYI@JoxE*%)j}pOnLtI?*o3f(vSWM_0IMI-_3gcx9V^Wjfw; zf4-*~B*}hQw$HF#HVZwF=SrD~@cRss>o^~X`4<5#e6o5T0uJg0IMcokIQwvcaU_~8$}=aTQSIbUb<$MW62CWmwbUuaKI547iN5oJAcOKwGMageKL{jiZ%3#LWJ`{$0DAt zFH*lo-md7C{^v?xl${I8bAPgP70vHT&sC&<6sPAZ4%3dM=PGX2crntcE~(GcHSY6T zasM$EH#q%&Qu!rigVX;9ixW=BpGi*NY6KiDdh@yF+zmMQ}){ha^J_ET`aIonUs zuJv*I4>|99X6?L@>vNCH*9jieXUB80;)(nB8DIDvu*`>(@23Ar`MH+9&)y%f_*oY2 z*Z00Z0CfCB#zUj@28~c2q<_9$(lH2=CoK16l%pAo+U{V3@_W3{zvJ=mU&3~PmGRN- zfCCoMH>FeNb2N+xO%FQAKhZbVql^5Fw>--DV)8%U%yxXF=R6goc}iz|iw5cU9;o*h zq<&9YJIuGc9>W*3zlX$@Wcqck&g(tV_II@WaC<5DR4HG(3%>6mQoSjS<3g10Cl2-b zt)kD|r6%=w!soFbog6c|sR!ssgdCxV{dqj#?NJ)BJTjit`i1o0A_W{yJVz8j9`tKs z77ixIGhU3IrhJ&TNt0{ZLoeEcwUf3@-X5N&8AJUiUFR#DjL)|o#QS)_`Fyjc*W+_V zGRnF9Oh3eVImb%>1b=B`u0Oy19S09iyMy%0I70c5ozpXw?ddK{Z(*4B@76!cc?;TK zqMck4)O(V_b^pF?Bl+O=V4+0urn~SxqkpaT{{}2B@Km2FjDy|< zR(^u$PbOE>i$Rm`o0xy3mm%o+@TcI(Hnr}^)C4E*}7%C)y?s-AG? z?Vz{m?x3`MYMf)F(x3)bX{S$uzo*x;WcSpx5-1Fol8Ni=CPO>Qc z+jDY1@lfysxxeDy0sq?yKcjz#o-iePNjXS8q`3&bwsFuW)aOrss;v*2%INy!_-J~7 ze9sU&S&aDob#K#Wv}u? ziu!-@SE&Csh1=V-OYb*4hmo^?zYe%B{2lna*yy=XdLYpA;=-v$&$JwW#m!6U$=cP?@$+!}^8Oz5 zZBzSpiP3j{0j_7MX`6g8F8%ll`9~h;!Ao;|!${fr=Hpeg=XQDDSNNoonK$( zK8POM-=66Y=hm|N^1J;^e`x13dGPy`L-mvQ!0)IKKl&}$PtezTo4|-viG<>z~>R&IvFnw9f=;lcfH)YGp?+Y9v+jFaz__e)UYUmua4-iA^l zk0JJl;Ky&Jp!EMK-`aBRdz$2TzGdz3(02TAb|?`y#tC#EfXut0#>W>em^nEVJpU`i8D*BBb+Hc&W_`3`6 zG-Ny>|KbP66?ic3(40GGWb;*7{ZRjpy`FK{Y1m27cZ2AEwm)I5+0%9nlkx@)-QQ~v`PcddW7p^NGVUox%Tu}j zBFCRvz0c`KAL*>8ck6x(xYwp|zmvz}>$$10XuHB#WdX1^~fgPUpQ{a7h ziuadCq3_-vy*{9C!2!U#IED9*PXJ!Lg?>NaKCEzGkiwmq<4@>MWc@ttUB)l+$=iYZ zY02a8%j6S(spqVep5Nx$ci3;_@#X1zJoq>M9QaRE{8>NA5w+tB+xdcAJD(o3^Q`