Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

merge queue: embarking main (6c5c61b), #4062 and #4019 together #4065

Closed
wants to merge 10 commits into from
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
- The speculative shielded context now avoids updating its
state if the transaction failed. Added a test for it.
([\#4019](https://github.com/anoma/namada/pull/4019))
49 changes: 35 additions & 14 deletions .mergify.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,37 +2,54 @@ merge_queue:
max_parallel_checks: 3

queue_rules:
- name: duplicated main-queue from automatic merge to main or backport branch
batch_size: 3
queue_conditions:
- "#approved-reviews-by >= 1"
- base = main
- label = "merge"
- label != "do-not-merge"
- "#approved-reviews-by >= 1"
- &id001
or:
- base = main
- base = maint-0.45
merge_conditions: []
merge_method: merge
autosquash: true
- name: main-queue
batch_size: 3
queue_conditions:
- "#approved-reviews-by >= 1"
- base = main
merge_method: merge

- name: duplicated backport-0.45-queue from automatic merge to main or backport
branch
batch_size: 3
queue_conditions:
- "#approved-reviews-by >= 1"
- base = maint-0.45
- label = "merge"
- label != "do-not-merge"
- "#approved-reviews-by >= 1"
- *id001
merge_conditions: []
merge_method: merge
autosquash: true
- name: backport-0.45-queue
batch_size: 3
queue_conditions:
- "#approved-reviews-by >= 1"
- base = maint-0.45
merge_method: merge

pull_request_rules:
- name: automatic merge to main or backport branch
conditions:
- label = "merge"
- label != "do-not-merge"
- "#approved-reviews-by >= 1"
- or:
- base = main
- base = maint-0.45
actions:
queue:
autosquash: true

pull_request_rules:
- name: notify when a PR is removed from the queue
conditions:
- queue-dequeue-reason != none
- queue-dequeue-reason != pr-merged
- queue-dequeue-reason != none
- queue-dequeue-reason != pr-merged
actions:
comment:
message: >
Expand All @@ -49,3 +66,7 @@ pull_request_rules:
backport:
branches:
- "maint-0.45"
- name: refactored queue action rule
conditions: []
actions:
queue:
10 changes: 10 additions & 0 deletions crates/apps_lib/src/client/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use std::collections::{BTreeMap, BTreeSet};
use std::io;

use borsh::BorshDeserialize;
use color_eyre::owo_colors::OwoColorize;
use data_encoding::HEXLOWER;
use either::Either;
use masp_primitives::asset_type::AssetType;
Expand Down Expand Up @@ -395,6 +396,15 @@ async fn query_shielded_balance(
context: &impl Namada,
args: args::QueryBalance,
) {
display_line!(
context.io(),
"{}: {}\n",
"WARNING".bold().underline().yellow(),
"The resulting balance could be outdated, make sure to run `namadac \
shielded-sync` before querying the balance to get the most recent \
value."
);

let args::QueryBalance {
// Token owner (needs to be a viewing key)
owner,
Expand Down
96 changes: 93 additions & 3 deletions crates/apps_lib/src/client/tx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ use std::io::Write;

use borsh::BorshDeserialize;
use borsh_ext::BorshSerializeExt;
use color_eyre::owo_colors::OwoColorize;
use ledger_namada_rs::{BIP44Path, NamadaApp};
use namada_core::masp::MaspTransaction;
use namada_sdk::address::{Address, ImplicitAddress};
use namada_sdk::args::TxBecomeValidator;
use namada_sdk::collections::HashSet;
Expand Down Expand Up @@ -829,13 +831,33 @@ pub async fn submit_shielded_transfer(
namada: &impl Namada,
args: args::TxShieldedTransfer,
) -> Result<(), error::Error> {
display_line!(
namada.io(),
"{}: {}\n",
"WARNING".bold().underline().yellow(),
"Some information might be leaked if your shielded wallet is not up \
to date, make sure to run `namadac shielded-sync` before running \
this command.",
);

let (mut tx, signing_data) = args.clone().build(namada).await?;

let masp_section = tx
.sections
.iter()
.find_map(|section| section.masp_tx())
.ok_or_else(|| {
error::Error::Other(
"Missing MASP section in shielded transaction".to_string(),
)
})?;
if args.tx.dump_tx || args.tx.dump_wrapper_tx {
tx::dump_tx(namada.io(), &args.tx, tx)?;
pre_cache_masp_data(namada, &masp_section).await;
} else {
sign(namada, &mut tx, &args.tx, signing_data).await?;
namada.submit(tx, &args.tx).await?;
let res = namada.submit(tx, &args.tx).await?;
pre_cache_masp_data_on_tx_result(namada, &res, &masp_section).await;
}
Ok(())
}
Expand Down Expand Up @@ -900,13 +922,33 @@ pub async fn submit_unshielding_transfer(
namada: &impl Namada,
args: args::TxUnshieldingTransfer,
) -> Result<(), error::Error> {
display_line!(
namada.io(),
"{}: {}\n",
"WARNING".bold().underline().yellow(),
"Some information might be leaked if your shielded wallet is not up \
to date, make sure to run `namadac shielded-sync` before running \
this command.",
);

let (mut tx, signing_data) = args.clone().build(namada).await?;

let masp_section = tx
.sections
.iter()
.find_map(|section| section.masp_tx())
.ok_or_else(|| {
error::Error::Other(
"Missing MASP section in shielded transaction".to_string(),
)
})?;
if args.tx.dump_tx || args.tx.dump_wrapper_tx {
tx::dump_tx(namada.io(), &args.tx, tx)?;
pre_cache_masp_data(namada, &masp_section).await;
} else {
sign(namada, &mut tx, &args.tx, signing_data).await?;
namada.submit(tx, &args.tx).await?;
let res = namada.submit(tx, &args.tx).await?;
pre_cache_masp_data_on_tx_result(namada, &res, &masp_section).await;
}
Ok(())
}
Expand All @@ -920,16 +962,25 @@ where
{
let (tx, signing_data, _) = args.build(namada).await?;

let opt_masp_section =
tx.sections.iter().find_map(|section| section.masp_tx());
if args.tx.dump_tx || args.tx.dump_wrapper_tx {
tx::dump_tx(namada.io(), &args.tx, tx)?;
if let Some(masp_section) = opt_masp_section {
pre_cache_masp_data(namada, &masp_section).await;
}
} else {
batch_opt_reveal_pk_and_submit(
let res = batch_opt_reveal_pk_and_submit(
namada,
&args.tx,
&[&args.source.effective_address()],
(tx, signing_data),
)
.await?;

if let Some(masp_section) = opt_masp_section {
pre_cache_masp_data_on_tx_result(namada, &res, &masp_section).await;
}
}
// NOTE that the tx could fail when its submission epoch doesn't match
// construction epoch
Expand Down Expand Up @@ -1482,3 +1533,42 @@ pub async fn gen_ibc_shielding_transfer(
}
Ok(())
}

// Pre-cache the data for the provided MASP transaction. Log an error on
// failure.
async fn pre_cache_masp_data(namada: &impl Namada, masp_tx: &MaspTransaction) {
if let Err(e) = namada
.shielded_mut()
.await
.pre_cache_transaction(masp_tx)
.await
{
// Just display the error but do not propagate it
edisplay_line!(namada.io(), "Failed to pre-cache masp data: {}.", e);
}
}

// Check the result of a transaction and pre-cache the masp data accordingly
async fn pre_cache_masp_data_on_tx_result(
namada: &impl Namada,
tx_result: &ProcessTxResponse,
masp_tx: &MaspTransaction,
) {
match tx_result {
ProcessTxResponse::Applied(resp) => {
if let Some(InnerTxResult::Success(_)) =
// If we have the masp data in an ibc transfer it
// means we are unshielding, so there's no reveal pk
// tx in the batch which contains only the ibc tx
resp.batch_result().first().map(|(_, res)| res)
{
pre_cache_masp_data(namada, masp_tx).await;
}
}
ProcessTxResponse::Broadcast(_) => {
pre_cache_masp_data(namada, masp_tx).await;
}
// Do not pre-cache when dry-running
ProcessTxResponse::DryRun(_) => {}
}
}
1 change: 0 additions & 1 deletion crates/node/src/bench_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1223,7 +1223,6 @@ impl BenchShieldedCtx {
vec![masp_transfer_data],
None,
expiration,
true,
)
.await
})
Expand Down
10 changes: 1 addition & 9 deletions crates/sdk/src/tx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2648,7 +2648,6 @@ pub async fn build_ibc_transfer(
context,
masp_transfer_data,
masp_fee_data,
!(args.tx.dry_run || args.tx.dry_run_wrapper),
args.tx.expiration.to_datetime(),
)
.await?;
Expand Down Expand Up @@ -3113,7 +3112,6 @@ pub async fn build_shielded_transfer<N: Namada>(
context,
transfer_data,
masp_fee_data,
!(args.tx.dry_run || args.tx.dry_run_wrapper),
args.tx.expiration.to_datetime(),
)
.await?
Expand Down Expand Up @@ -3280,7 +3278,6 @@ pub async fn build_shielding_transfer<N: Namada>(
context,
transfer_data,
None,
!(args.tx.dry_run || args.tx.dry_run_wrapper),
args.tx.expiration.to_datetime(),
)
.await?
Expand Down Expand Up @@ -3402,7 +3399,6 @@ pub async fn build_unshielding_transfer<N: Namada>(
context,
transfer_data,
masp_fee_data,
!(args.tx.dry_run || args.tx.dry_run_wrapper),
args.tx.expiration.to_datetime(),
)
.await?
Expand Down Expand Up @@ -3455,7 +3451,6 @@ async fn construct_shielded_parts<N: Namada>(
context: &N,
data: Vec<MaspTransferData>,
fee_data: Option<MaspFeeData>,
update_ctx: bool,
expiration: Option<DateTimeUtc>,
) -> Result<Option<(ShieldedTransfer, HashSet<AssetData>)>> {
// Precompute asset types to increase chances of success in decoding
Expand All @@ -3469,9 +3464,7 @@ async fn construct_shielded_parts<N: Namada>(
.await;

shielded
.gen_shielded_transfer(
context, data, fee_data, expiration, update_ctx,
)
.gen_shielded_transfer(context, data, fee_data, expiration)
.await
};

Expand Down Expand Up @@ -3852,7 +3845,6 @@ pub async fn gen_ibc_shielding_transfer<N: Namada>(
// Fees are paid from the transparent balance of the relayer
None,
args.expiration.to_datetime(),
true,
)
.await
.map_err(|err| TxSubmitError::MaspError(err.to_string()))?
Expand Down
6 changes: 3 additions & 3 deletions crates/shielded_token/src/masp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -320,10 +320,10 @@ pub type WitnessMap = HashMap<usize, IncrementalWitness<Node>>;
#[derive(BorshSerialize, BorshDeserialize, Debug)]
/// The possible sync states of the shielded context
pub enum ContextSyncStatus {
/// The context contains only data that has been confirmed by the protocol
/// The context contains data that has been confirmed by the protocol
Confirmed,
/// The context contains that that has not yet been confirmed by the
/// protocol and could end up being invalid
/// The context possibly contains that that has not yet been confirmed by
/// the protocol and could be incomplete or invalid
Speculative,
}

Expand Down
12 changes: 3 additions & 9 deletions crates/shielded_token/src/masp/shielded_wallet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -359,8 +359,9 @@ impl<U: ShieldedUtils + MaybeSend + MaybeSync> ShieldedWallet<U> {
/// Updates the internal state with the data of the newly generated
/// transaction. More specifically invalidate the spent notes, but do not
/// cache the newly produced output descriptions and therefore the merkle
/// tree
async fn pre_cache_transaction(
/// tree (this is because we don't know the exact position in the tree where
/// the new notes will be appended)
pub async fn pre_cache_transaction(
&mut self,
masp_tx: &Transaction,
) -> Result<(), eyre::Error> {
Expand Down Expand Up @@ -1018,7 +1019,6 @@ pub trait ShieldedApi<U: ShieldedUtils + MaybeSend + MaybeSync>:
data: Vec<MaspTransferData>,
fee_data: Option<MaspFeeData>,
expiration: Option<DateTimeUtc>,
update_ctx: bool,
) -> Result<Option<ShieldedTransfer>, TransferErr> {
// Determine epoch in which to submit potential shielded transaction
let epoch = Self::query_masp_epoch(context.client())
Expand Down Expand Up @@ -1212,12 +1212,6 @@ pub trait ShieldedApi<U: ShieldedUtils + MaybeSend + MaybeSync>:
)
.map_err(|error| TransferErr::Build { error })?;

if update_ctx {
self.pre_cache_transaction(&masp_tx)
.await
.map_err(|e| TransferErr::General(e.to_string()))?;
}

Ok(Some(ShieldedTransfer {
builder: builder_clone,
masp_tx,
Expand Down
Loading