Skip to content

Commit

Permalink
Codebase-wide formatting (#1167)
Browse files Browse the repository at this point in the history
Update the formatting on the whole codebase to match the driver, like
discussed before. This does not change default options, but rather
standardizes formatting for things that are otherwise not formatted
consistently.

This will cause merge conflicts. Once this is approved, I'm going to
ping everybody with the exact date when I intend to merge this so we can
minimize the impact.
  • Loading branch information
sistemd authored Feb 6, 2023
1 parent f0b9067 commit 06ae435
Show file tree
Hide file tree
Showing 324 changed files with 7,553 additions and 5,860 deletions.
78 changes: 42 additions & 36 deletions crates/alerter/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,22 +1,25 @@
// This application observes the order book api and tries to determine if the solver is down. It
// does this by checking if no trades have been made recently and if so checking if it finds a
// matchable order according to an external price api (0x). If this is the case it alerts.

use anyhow::{Context, Result};
use chrono::{DateTime, Utc};
use clap::Parser;
use model::{
order::{OrderClass, OrderKind, OrderStatus, OrderUid, BUY_ETH_ADDRESS},
u256_decimal,
// This application observes the order book api and tries to determine if the
// solver is down. It does this by checking if no trades have been made recently
// and if so checking if it finds a matchable order according to an external
// price api (0x). If this is the case it alerts.

use {
anyhow::{Context, Result},
chrono::{DateTime, Utc},
clap::Parser,
model::{
order::{OrderClass, OrderKind, OrderStatus, OrderUid, BUY_ETH_ADDRESS},
u256_decimal,
},
primitive_types::{H160, U256},
prometheus::IntGauge,
reqwest::Client,
std::{
collections::HashMap,
time::{Duration, Instant},
},
url::Url,
};
use primitive_types::{H160, U256};
use prometheus::IntGauge;
use reqwest::Client;
use std::{
collections::HashMap,
time::{Duration, Instant},
};
use url::Url;

#[derive(Debug, serde::Deserialize, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
Expand Down Expand Up @@ -103,7 +106,8 @@ impl OrderBookApi {
}
}

// Converts the eth placeholder address to weth. Leaves other addresses untouched.
// Converts the eth placeholder address to weth. Leaves other addresses
// untouched.
fn convert_eth_to_weth(token: H160) -> H160 {
let weth: H160 = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
.parse()
Expand Down Expand Up @@ -186,8 +190,8 @@ struct Alerter {
// Expose a prometheus metric so that we can use our Grafana alert infrastructure.
//
// Set to 0 or 1 depending on whether our alert condition is satisfied which is that there
// hasn't been a trade for some time and that there is an order that has been matchable for some
// time.
// hasn't been a trade for some time and that there is an order that has been matchable for
// some time.
no_trades_but_matchable_order: IntGauge,
api_get_order_min_interval: Duration,
}
Expand Down Expand Up @@ -246,8 +250,8 @@ impl Alerter {
let mut closed_orders: Vec<Order> = orders.into_values().map(|(order, _)| order).collect();
// Keep only orders that were open last update and are not open this update.
closed_orders.retain(|order| !self.open_orders.contains_key(&order.uid));
// We're trying to find an order that has been filled. Try market orders first because they
// are more likely to be.
// We're trying to find an order that has been filled. Try market orders first
// because they are more likely to be.
closed_orders.sort_unstable_by_key(|order| match order.class {
OrderClass::Market => 0u8,
OrderClass::Limit(_) => 1,
Expand Down Expand Up @@ -278,13 +282,15 @@ impl Alerter {
self.no_trades_but_matchable_order.set(0);
// Delete all matchable timestamps.
//
// If we didn't do this what could happen is that first we mark an order as matchable
// at t0. Then a trade happens so we skip the matchable update loop below because if
// there was a recent trade we don't want to alert anyway. Then no trade happens for
// long enough that we want to alert and the order is again matchable.
// In this case we would alert immediately even though it could be the case that the
// order wasn't matchable and just now became matchable again. We would wrongly assume
// it has been matchable since t0 but we did not check this between now and then.
// If we didn't do this what could happen is that first we mark an order as
// matchable at t0. Then a trade happens so we skip the matchable
// update loop below because if there was a recent trade we don't
// want to alert anyway. Then no trade happens for long enough that
// we want to alert and the order is again matchable. In this case
// we would alert immediately even though it could be the case that the
// order wasn't matchable and just now became matchable again. We would wrongly
// assume it has been matchable since t0 but we did not check this
// between now and then.
for (_, instant) in self.open_orders.values_mut() {
*instant = None;
}
Expand Down Expand Up @@ -325,9 +331,8 @@ impl Alerter {
impl AlertConfig {
fn alert(&self, order: &Order) {
tracing::error!(
"No orders have been settled in the last {} seconds \
even though order {} is solvable and has a price that \
allows it to be settled according to 0x.",
"No orders have been settled in the last {} seconds even though order {} is solvable \
and has a price that allows it to be settled according to 0x.",
self.time_without_trade.as_secs(),
order.uid,
);
Expand Down Expand Up @@ -372,8 +377,8 @@ struct Arguments {
)]
min_alert_interval: Duration,

/// How many errors in the update loop (fetching solvable orders or querying 0x) in a row
/// must happen before we alert about them.
/// How many errors in the update loop (fetching solvable orders or querying
/// 0x) in a row must happen before we alert about them.
#[clap(long, env, default_value = "5")]
errors_in_a_row_before_alert: u32,

Expand All @@ -383,7 +388,8 @@ struct Arguments {
#[clap(long, env, default_value = "9588")]
metrics_port: u16,

/// Minimum time between get order requests to the api. Without this the api can rate limit us.
/// Minimum time between get order requests to the api. Without this the api
/// can rate limit us.
#[clap(long, env, default_value = "0.2", value_parser = shared::arguments::duration_from_seconds)]
api_get_order_min_interval: Duration,
}
Expand Down
49 changes: 30 additions & 19 deletions crates/autopilot/src/arguments.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
use crate::limit_orders::QuotingStrategy;
use primitive_types::H160;
use shared::{
arguments::display_option, bad_token::token_owner_finder, http_client, price_estimation,
use {
crate::limit_orders::QuotingStrategy,
primitive_types::H160,
shared::{
arguments::display_option,
bad_token::token_owner_finder,
http_client,
price_estimation,
},
std::{net::SocketAddr, num::NonZeroUsize, time::Duration},
url::Url,
};
use std::{net::SocketAddr, num::NonZeroUsize, time::Duration};
use url::Url;

#[derive(clap::Parser)]
pub struct Arguments {
Expand All @@ -23,13 +28,14 @@ pub struct Arguments {
#[clap(flatten)]
pub price_estimation: price_estimation::Arguments,

/// Address of the ethflow contract. If not specified, eth-flow orders are disabled.
/// Address of the ethflow contract. If not specified, eth-flow orders are
/// disabled.
#[clap(long, env)]
pub ethflow_contract: Option<H160>,

/// Timestamp at which we should start indexing eth-flow contract events.
/// If there are already events in the database for a date later than this, then this date is
/// ignored and can be omitted.
/// If there are already events in the database for a date later than this,
/// then this date is ignored and can be omitted.
#[clap(long, env)]
pub ethflow_indexing_start: Option<u64>,

Expand All @@ -41,24 +47,27 @@ pub struct Arguments {
#[clap(long, env, default_value = "0.0.0.0:9589")]
pub metrics_address: SocketAddr,

/// Url of the Postgres database. By default connects to locally running postgres.
/// Url of the Postgres database. By default connects to locally running
/// postgres.
#[clap(long, env, default_value = "postgresql://")]
pub db_url: Url,

/// Skip syncing past events (useful for local deployments)
#[clap(long, env)]
pub skip_event_sync: bool,

/// List of token addresses that should be allowed regardless of whether the bad token detector
/// thinks they are bad. Base tokens are automatically allowed.
/// List of token addresses that should be allowed regardless of whether the
/// bad token detector thinks they are bad. Base tokens are
/// automatically allowed.
#[clap(long, env, use_value_delimiter = true)]
pub allowed_tokens: Vec<H160>,

/// List of token addresses to be ignored throughout service
#[clap(long, env, use_value_delimiter = true)]
pub unsupported_tokens: Vec<H160>,

/// The amount of time in seconds a classification of a token into good or bad is valid for.
/// The amount of time in seconds a classification of a token into good or
/// bad is valid for.
#[clap(
long,
env,
Expand All @@ -71,7 +80,8 @@ pub struct Arguments {
#[clap(long, env, default_value = "200")]
pub pool_cache_lru_size: NonZeroUsize,

/// Which estimators to use to estimate token prices in terms of the chain's native token.
/// Which estimators to use to estimate token prices in terms of the chain's
/// native token.
#[clap(
long,
env,
Expand All @@ -94,8 +104,8 @@ pub struct Arguments {
#[clap(long, env, use_value_delimiter = true)]
pub banned_users: Vec<H160>,

/// If the auction hasn't been updated in this amount of time the pod fails the liveness check.
/// Expects a value in seconds.
/// If the auction hasn't been updated in this amount of time the pod fails
/// the liveness check. Expects a value in seconds.
#[clap(
long,
env,
Expand All @@ -104,8 +114,8 @@ pub struct Arguments {
)]
pub max_auction_age: Duration,

/// If a limit order surplus fee is older than this, it will get refreshed. Expects a value in
/// seconds.
/// If a limit order surplus fee is older than this, it will get refreshed.
/// Expects a value in seconds.
#[clap(
long,
env,
Expand Down Expand Up @@ -153,7 +163,8 @@ pub struct Arguments {
#[clap(long, env, value_parser = shared::arguments::duration_from_seconds)]
pub network_block_interval: Option<Duration>,

/// Defines which strategies to apply when updating the `surplus_fee` of limit orders.
/// Defines which strategies to apply when updating the `surplus_fee` of
/// limit orders.
#[clap(long, env, use_value_delimiter = true)]
pub quoting_strategies: Vec<QuotingStrategy>,
}
Expand Down
48 changes: 26 additions & 22 deletions crates/autopilot/src/auction_transaction.rs
Original file line number Diff line number Diff line change
@@ -1,28 +1,34 @@
//! This module is responsible for associating auction ids with transaction hashes.
//! This module is responsible for associating auction ids with transaction
//! hashes.
//!
//! see database/sql/V037__auction_transaction.sql
//!
//! When we put settlement transactions on chain there is no reliable way to know the transaction
//! hash because we can create multiple transactions with different gas prices. What we do know is
//! the account and nonce that the transaction will have which is enough to uniquely identify it.
//! When we put settlement transactions on chain there is no reliable way to
//! know the transaction hash because we can create multiple transactions with
//! different gas prices. What we do know is the account and nonce that the
//! transaction will have which is enough to uniquely identify it.
//!
//! We build an association between account-nonce and tx hash by backfilling settlement events with
//! the account and nonce of their tx hash. This happens in an always running background task.
//! We build an association between account-nonce and tx hash by backfilling
//! settlement events with the account and nonce of their tx hash. This happens
//! in an always running background task.
//!
//! Alternatively we could change the event insertion code to do this but I (vk) would like to keep
//! that code as fast as possible to not slow down event insertion which also needs to deal with
//! reorgs. It is also nicer from a code organization standpoint.
use std::time::Duration;

use anyhow::{anyhow, Context, Result};
use primitive_types::H256;
use shared::{
current_block::CurrentBlockStream, ethrpc::Web3, event_handling::MAX_REORG_BLOCK_COUNT,
//! Alternatively we could change the event insertion code to do this but I (vk)
//! would like to keep that code as fast as possible to not slow down event
//! insertion which also needs to deal with reorgs. It is also nicer from a code
//! organization standpoint.
use {
crate::database::Postgres,
anyhow::{anyhow, Context, Result},
primitive_types::H256,
shared::{
current_block::CurrentBlockStream,
ethrpc::Web3,
event_handling::MAX_REORG_BLOCK_COUNT,
},
std::time::Duration,
web3::types::TransactionId,
};
use web3::types::TransactionId;

use crate::database::Postgres;

pub struct AuctionTransactionUpdater {
pub web3: Web3,
Expand Down Expand Up @@ -92,9 +98,7 @@ impl AuctionTransactionUpdater {

#[cfg(test)]
mod tests {
use super::*;
use sqlx::Executor;
use std::sync::Arc;
use {super::*, sqlx::Executor, std::sync::Arc};

#[tokio::test]
#[ignore]
Expand Down
6 changes: 4 additions & 2 deletions crates/autopilot/src/database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@ pub mod orders;
mod quotes;
pub mod recent_settlements;

use sqlx::{PgConnection, PgPool};
use std::time::Duration;
use {
sqlx::{PgConnection, PgPool},
std::time::Duration,
};

#[derive(Debug, Clone)]
pub struct Postgres(pub PgPool);
Expand Down
26 changes: 15 additions & 11 deletions crates/autopilot/src/database/auction.rs
Original file line number Diff line number Diff line change
@@ -1,19 +1,23 @@
use super::Postgres;
use anyhow::{Context, Result};
use database::{auction::AuctionId, quotes::QuoteKind};
use futures::{StreamExt, TryStreamExt};
use model::{auction::Auction, order::Order};
use {
super::Postgres,
anyhow::{Context, Result},
database::{auction::AuctionId, quotes::QuoteKind},
futures::{StreamExt, TryStreamExt},
model::{auction::Auction, order::Order},
};

pub struct SolvableOrders {
pub orders: Vec<Order>,
pub latest_settlement_block: u64,
}
use chrono::{DateTime, Utc};
use model::quote::QuoteId;
use shared::{
db_order_conversions::full_order_into_model_order,
event_storing_helpers::{create_db_search_parameters, create_quote_row},
order_quoting::{QuoteData, QuoteSearchParameters, QuoteStoring},
use {
chrono::{DateTime, Utc},
model::quote::QuoteId,
shared::{
db_order_conversions::full_order_into_model_order,
event_storing_helpers::{create_db_search_parameters, create_quote_row},
order_quoting::{QuoteData, QuoteSearchParameters, QuoteStoring},
},
};

#[async_trait::async_trait]
Expand Down
8 changes: 5 additions & 3 deletions crates/autopilot/src/database/auction_transaction.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
use anyhow::Context;
use database::{auction_transaction::SettlementEvent, byte_array::ByteArray};
use primitive_types::H160;
use {
anyhow::Context,
database::{auction_transaction::SettlementEvent, byte_array::ByteArray},
primitive_types::H160,
};

impl super::Postgres {
pub async fn update_settlement_tx_info(
Expand Down
16 changes: 10 additions & 6 deletions crates/autopilot/src/database/ethflow_events/event_retriever.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
//! A component that listens exclusively for `OrderRefund` events of the ethflow contract.
use ethcontract::{contract::AllEventsBuilder, transport::DynTransport, H160, H256};
use hex_literal::hex;
use shared::{ethrpc::Web3, event_handling::EventRetrieving};
//! A component that listens exclusively for `OrderRefund` events of the ethflow
//! contract.
use {
ethcontract::{contract::AllEventsBuilder, transport::DynTransport, H160, H256},
hex_literal::hex,
shared::{ethrpc::Web3, event_handling::EventRetrieving},
};

const ORDER_REFUND_TOPIC: H256 = H256(hex!(
"195271068a288191e4b265c641a56b9832919f69e9e7d6c2f31ba40278aeb85a"
Expand All @@ -23,8 +26,9 @@ impl EventRetrieving for EthFlowRefundRetriever {

fn get_events(&self) -> AllEventsBuilder<DynTransport, Self::Event> {
let mut events = AllEventsBuilder::new(self.web3.clone(), self.address, None);
// Filter out events that we don't want to listen for in the contract. `Self` is designed to
// only pick up refunding events. Adding a filter also makes the query more efficient.
// Filter out events that we don't want to listen for in the contract. `Self` is
// designed to only pick up refunding events. Adding a filter also makes
// the query more efficient.
events.filter = events.filter.topic0(vec![ORDER_REFUND_TOPIC].into());
events
}
Expand Down
Loading

0 comments on commit 06ae435

Please sign in to comment.