diff --git a/.gitignore b/.gitignore
index 5267153..c381fb6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,7 @@ pyrightconfig.json
# databases
db/schema.sql
validator_database.db
+*test.db
# backups
*.bak
diff --git a/README.md b/README.md
index 6213a80..1e803ed 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,7 @@ There are three core files.
1. `sturdy/protocol.py`: Contains the definition of the protocol used by subnet miners and subnet
validators. At the moment it only has one kind of synapse - `AllocateAssets` - which contains
the inputs (`assets_and_pools`) validators need to send to miners to generate return
- `allocations` for. See `generate_assets_in_pools()` in [pools.py](./sturdy/pools.py) to see how
+ `allocations` for. See `generate_challenge_data()` in [pools.py](./sturdy/pools.py) to see how
assets and pools are defined.
2. `neurons/miner.py`: Script that defines the subnet miner's behavior, i.e., how the subnet miner
responds to requests from subnet validators.
@@ -49,68 +49,87 @@ There are three core files.
### Subnet Overview
- Validators are responsible for distributing lists of pools (of which contain relevant parameters
such as base interest rate, base interest rate slope, minimum borrow amount, etc), as well as a
- maximum token balance miners can allocate to pools. Below is the function present in the codebase
- used for generating a dummy `assets_and_pools` taken from [pools.py](./sturdy/pools.py) used for
- synthetic requests:
+ maximum token balance miners can allocate to pools. Below are the function present in the codebase
+ used for generating challenge data in [pools.py](./sturdy/pools.py) used for
+ synthetic requests. The selection of different assets and pools which can be used in such requests are defined in the [pool registry](./sturdy/pool_registry/pool_registry.py), and are all based on pools which are real and do indeed exist on-chain (i.e. on the Ethereum Mainnet):
```python
- def generate_eth_public_key(rng_gen: np.random.RandomState) -> str:
- private_key_bytes = rng_gen.bytes(32) # type: ignore[]
- account = Account.from_key(private_key_bytes)
- return account.address
-
-
- def generate_assets_and_pools(rng_gen: np.random.RandomState) -> dict[str, dict[str, BasePoolModel] | int]: # generate pools
- assets_and_pools = {}
-
- pools_list = [
- BasePool(
- contract_address=generate_eth_public_key(rng_gen=rng_gen),
- pool_type=POOL_TYPES.SYNTHETIC,
- base_rate=int(randrange_float(MIN_BASE_RATE, MAX_BASE_RATE, BASE_RATE_STEP, rng_gen=rng_gen)),
- base_slope=int(randrange_float(MIN_SLOPE, MAX_SLOPE, SLOPE_STEP, rng_gen=rng_gen)),
- kink_slope=int(
- randrange_float(MIN_KINK_SLOPE, MAX_KINK_SLOPE, SLOPE_STEP, rng_gen=rng_gen),
- ), # kink rate - kicks in after pool hits optimal util rate
- optimal_util_rate=int(
- randrange_float(
- MIN_OPTIMAL_RATE,
- MAX_OPTIMAL_RATE,
- OPTIMAL_UTIL_STEP,
- rng_gen=rng_gen,
- ),
- ), # optimal util rate - after which the kink slope kicks in
- borrow_amount=int(
- format_num_prec(
- wei_mul(
- POOL_RESERVE_SIZE,
- int(
- randrange_float(
- MIN_UTIL_RATE,
- MAX_UTIL_RATE,
- UTIL_RATE_STEP,
- rng_gen=rng_gen,
- ),
- ),
- ),
- ),
- ), # initial borrowed amount from pool
- reserve_size=int(POOL_RESERVE_SIZE),
- )
- for _ in range(NUM_POOLS)
- ]
-
- pools = {str(pool.contract_address): pool for pool in pools_list}
-
- minimums = [pool.borrow_amount for pool in pools_list]
- min_total = sum(minimums)
- assets_and_pools["total_assets"] = int(min_total) + int(
- math.floor(
- randrange_float(MIN_TOTAL_ASSETS_OFFSET, MAX_TOTAL_ASSETS_OFFSET, TOTAL_ASSETS_OFFSET_STEP, rng_gen=rng_gen),
- )
- )
- assets_and_pools["pools"] = pools
-
- return assets_and_pools
+ def generate_challenge_data(
+ web3_provider: Web3,
+ rng_gen: np.random.RandomState = np.random.RandomState(), # noqa: B008
+ ) -> dict[str, dict[str, ChainBasedPoolModel] | int]: # generate pools
+ selected_entry = POOL_REGISTRY[rng_gen.choice(list(POOL_REGISTRY.keys()))]
+ bt.logging.debug(f"Selected pool registry entry: {selected_entry}")
+
+ return assets_pools_for_challenge_data(selected_entry, web3_provider)
+
+
+ def assets_pools_for_challenge_data(
+ selected_entry, web3_provider: Web3
+ ) -> dict[str, dict[str, ChainBasedPoolModel] | int]: # generate pools
+ challenge_data = {}
+
+ selected_assets_and_pools = selected_entry["assets_and_pools"]
+ selected_pools = selected_assets_and_pools["pools"]
+ global_user_address = selected_entry.get("user_address", None)
+
+ pool_list = []
+
+ for pool_dict in selected_pools.values():
+ user_address = pool_dict.get("user_address", None)
+ pool = PoolFactory.create_pool(
+ pool_type=POOL_TYPES._member_map_[pool_dict["pool_type"]],
+ user_address=global_user_address if user_address is None else user_address,
+ contract_address=pool_dict["contract_address"],
+ )
+ pool_list.append(pool)
+
+ pools = {str(pool.contract_address): pool for pool in pool_list}
+
+ # we assume that the user address is the same across pools (valid)
+ # and also that the asset contracts are the same across said pools
+ total_assets = selected_entry.get("total_assets", None)
+
+ if total_assets is None:
+ total_assets = 0
+ first_pool = pool_list[0]
+ first_pool.sync(web3_provider)
+ match first_pool.pool_type:
+ case T if T in (
+ POOL_TYPES.STURDY_SILO,
+ POOL_TYPES.AAVE_DEFAULT,
+ POOL_TYPES.AAVE_TARGET,
+ POOL_TYPES.MORPHO,
+ POOL_TYPES.YEARN_V3,
+ ):
+ total_assets = first_pool._user_asset_balance
+ case _:
+ pass
+
+ for pool in pools.values():
+ pool.sync(web3_provider)
+ total_asset = 0
+ match pool.pool_type:
+ case T if T in (
+ POOL_TYPES.STURDY_SILO,
+ POOL_TYPES.AAVE_DEFAULT,
+ POOL_TYPES.AAVE_TARGET,
+ POOL_TYPES.MORPHO,
+ POOL_TYPES.YEARN_V3,
+ ):
+ total_asset += pool._user_deposits
+ case _:
+ pass
+
+ total_assets += total_asset
+
+ challenge_data["assets_and_pools"] = {}
+ challenge_data["assets_and_pools"]["pools"] = pools
+ challenge_data["assets_and_pools"]["total_assets"] = total_assets
+ if global_user_address is not None:
+ challenge_data["user_address"] = global_user_address
+
+ return challenge_data
+
```
Validators can optionally run an API server and sell their bandwidth to outside users to send
their own pools (organic requests) to the subnet. For more information on this process - please read
@@ -125,34 +144,18 @@ There are three core files.
[algo.py](./sturdy/algo.py). The naive allocation essentially works by divvying assets across
pools, and allocating more to pools which have a higher current supply rate.
-- After generating allocations, miners then send their outputs to validators to be scored. For
- synthetic requests, validators run a simulation which simulates borrow behavior over a predetermined
- amount of timesteps. For organic requests, on the other hand, validators query the relevant smart
- contracts of user-defined pools on the Ethereum Network to calculate the miners' allocation's
- yields. The scores of miners are determined based on their relative aggregate
- yields, and miners which have similar allocations to other miners will be penalized if they are
+- After generating allocations, miners then send their outputs to validators to be scored. These requests are generated and sent to miners roughly every 15 minutes.
+ Organic requests, on the other hand, are sent by to validators, upon which they are then routed to miners. After the "scoring period" for requests have passed, miners are then scored based on how much yield pools have generated within the scoring period - with the miner with the most yield obtaining the highest score. Scoring these miners involves gather on chain info about pools, with most if not all such information being obtained from smart contracts on the the Ethereum Network. Miners which have similar allocations to other miners will be penalized if they are
not perceived as being original. If miners fail to respond in ~45 seconds after receiving the
- request they are scored
- poorly.
+ request they are scored poorly.
The best allocating miner will receive the most emissions. For more information on how
- miners are rewarded and how the simulator works- please see
- [reward.py](sturdy/validator/reward.py) and [simulator.py](sturdy/validator/simulator.py)
- respectively. A diagram is provided below highlighting the interactions that takes place within
- the subnet when processing organic requests:
+ miners are rewarded - please see [forward.py](sturdy/validator/forward.py), [reward.py](sturdy/validator/reward.py), and [validator.py](neurons/validator.py). A diagram is provided below highlighting the interactions that takes place within
+ the subnet when processing synthetic and organic requests:
-
+
-
-
-- We provide a demo which plots simulations in [plot_simulator.py](demos/plot_simulator.py). We
- provide a sample output of the script below:
-
-
-
-
-
---
## Installation
diff --git a/assets/subnet_architecture.png b/assets/subnet_architecture.png
new file mode 100644
index 0000000..a624d80
Binary files /dev/null and b/assets/subnet_architecture.png differ
diff --git a/db/migrations/20240725003510_allocations.sql b/db/migrations/20240725003510_allocations.sql
deleted file mode 100644
index 5345c7b..0000000
--- a/db/migrations/20240725003510_allocations.sql
+++ /dev/null
@@ -1,21 +0,0 @@
--- migrate:up
-
-CREATE TABLE allocation_requests (
- request_uid TEXT PRIMARY KEY,
- assets_and_pools TEXT,
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
-);
-
-CREATE TABLE allocations (
- request_uid TEXT,
- miner_uid TEXT,
- allocation TEXT,
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (request_uid, miner_uid),
- FOREIGN KEY (request_uid) REFERENCES allocation_requests (request_uid)
-);
-
--- migrate:down
-
-DROP TABLE allocation_requests;
-DROP TABLE allocations;
diff --git a/db/migrations/20241030231410_alloc_table.sql b/db/migrations/20241030231410_alloc_table.sql
new file mode 100644
index 0000000..bdea5e5
--- /dev/null
+++ b/db/migrations/20241030231410_alloc_table.sql
@@ -0,0 +1,37 @@
+-- migrate:up
+
+CREATE TABLE IF NOT EXISTS allocation_requests (
+ request_uid TEXT PRIMARY KEY,
+ assets_and_pools TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE active_allocs (
+ request_uid TEXT PRIMARY KEY,
+ scoring_period_end TIMESTAMP,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (request_uid) REFERENCES allocation_requests (request_uid)
+);
+
+CREATE TABLE IF NOT EXISTS allocations (
+ request_uid TEXT,
+ miner_uid TEXT,
+ allocation TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY (request_uid, miner_uid),
+ FOREIGN KEY (request_uid) REFERENCES allocation_requests (request_uid)
+);
+
+-- This alter statement adds a new column to the allocations table if it exists
+ALTER TABLE allocation_requests
+ADD COLUMN request_type TEXT NOT NULL DEFAULT 1;
+ALTER TABLE allocation_requests
+ADD COLUMN metadata TEXT;
+ALTER TABLE allocations
+ADD COLUMN axon_time FLOAT NOT NULL DEFAULT 99999.0; -- large number for now
+
+-- migrate:down
+
+DROP TABLE IF EXISTS fulfilled_allocs;
+DROP TABLE IF EXISTS allocations;
+DROP TABLE IF EXISTS allocation_requests;
diff --git a/demos/plot_simulator.py b/demos/plot_simulator.py
deleted file mode 100644
index 7715637..0000000
--- a/demos/plot_simulator.py
+++ /dev/null
@@ -1,155 +0,0 @@
-import matplotlib.pyplot as plt
-import numpy as np
-import pandas as pd
-
-from sturdy.constants import *
-from sturdy.utils.ethmath import wei_div, wei_mul
-from sturdy.utils.misc import borrow_rate
-from sturdy.validator.simulator import Simulator
-
-"""
-This is a script which can be used to play around with the simulator.
-It comes with a function to plot pool borrow rates, etc. over timestamps
-"""
-
-
-def plot_simulation_results(simulator) -> None:
- borrow_amount_history = []
- borrow_rate_history = []
- utilization_rate_history = []
- supply_rate_history = []
- median_borrow_rate_history = []
-
- for t in range(simulator.timesteps):
- borrow_amounts = [pool.borrow_amount for pool in simulator.pool_history[t].values()]
- reserve_sizes = [pool.reserve_size for pool in simulator.pool_history[t].values()]
- borrow_rates = [pool.borrow_rate for pool in simulator.pool_history[t].values()]
- utilization_rates = [wei_div(borrow_amounts[i], reserve_sizes[i]) for i in range(len(borrow_amounts))]
- supply_rates = [wei_mul(utilization_rates[i], borrow_rates[i]) for i in range(len(borrow_amounts))]
-
- borrow_amount_history.append(borrow_amounts)
- borrow_rate_history.append(borrow_rates)
- utilization_rate_history.append(utilization_rates)
- supply_rate_history.append(supply_rates)
- median_borrow_rate_history.append(np.median(borrow_rates))
-
- # Convert data to more manageable format
- borrow_amount_history_df = (
- pd.DataFrame(borrow_amount_history, columns=[
- f"Pool_{name[:6]}" for name in simulator.assets_and_pools["pools"]
- ]).apply(pd.to_numeric)
- / 1e18
- )
- borrow_rate_history_df = (
- pd.DataFrame(borrow_rate_history, columns=[
- f"Pool_{name[:6]}" for name in simulator.assets_and_pools["pools"]
- ]).apply(pd.to_numeric) / 1e18
- )
- utilization_rate_history_df = (
- pd.DataFrame(
- utilization_rate_history,
- columns=[
- f"Pool_{name[:6]}" for name in simulator.assets_and_pools["pools"]
- ],
- ).apply(pd.to_numeric)
- / 1e18
- )
- supply_rate_history_df = (
- pd.DataFrame(supply_rate_history, columns=[
- f"Pool_{name[:6]}" for name in simulator.assets_and_pools["pools"]
- ]).apply(pd.to_numeric)
- / 1e18
- )
- median_borrow_rate_history_df = (
- pd.Series(median_borrow_rate_history, name="Median Borrow Rate").apply(pd.to_numeric) / 1e18
- )
-
- plt.style.use("dark_background")
- fig, axs = plt.subplots(3, 2, figsize=(15, 15))
- axs[2, 1].remove() # Remove the subplot in the bottom right corner
- axs[2, 0].remove() # Remove the subplot in the bottom left corner
-
- def save_plot(event):
- if event.key == "s":
- plt.savefig("simulation_plot.png")
- print("Plot saved as 'simulation_plot.png'")
-
- fig.canvas.mpl_connect("key_press_event", save_plot)
-
- # Plot borrow rates with median borrow rate
- for column in borrow_rate_history_df:
- axs[0, 0].plot(
- borrow_rate_history_df.index,
- borrow_rate_history_df[column],
- label=column,
- alpha=0.5,
- )
- axs[0, 0].plot(
- median_borrow_rate_history_df.index,
- median_borrow_rate_history_df,
- label="Median Borrow Rate",
- color="white",
- linewidth=2,
- linestyle="--",
- )
- axs[0, 0].set_title("Simulated Borrow Rates Over Time")
- axs[0, 0].set_xlabel("Time Step")
- axs[0, 0].set_ylabel("Borrow Rate")
- axs[0, 0].legend(title="Pools", bbox_to_anchor=(1.05, 1), loc="upper left")
-
- # Plot borrow amounts
- borrow_amount_history_df.plot(ax=axs[0, 1])
- axs[0, 1].set_title("Simulated Borrow Amounts Over Time")
- axs[0, 1].set_xlabel("Time Step")
- axs[0, 1].set_ylabel("Borrow Amount")
- # axs[0, 1].legend(title="Pools", bbox_to_anchor=(1.05, 1), loc="upper left")
- axs[0, 1].get_legend().remove()
-
- # Plot utilization rates
- utilization_rate_history_df.plot(ax=axs[1, 0])
- axs[1, 0].set_title("Simulated Utilization Rates Over Time")
- axs[1, 0].set_xlabel("Time Step")
- axs[1, 0].set_ylabel("Utilization Rate")
- axs[1, 0].legend(title="Pools", bbox_to_anchor=(1.05, 1), loc="upper left")
- # axs[1, 0].get_legend().remove()
-
- # Plot supply rates
- supply_rate_history_df.plot(ax=axs[1, 1])
- axs[1, 1].set_title("Simulated Supply Rates Over Time")
- axs[1, 1].set_xlabel("Time Step")
- axs[1, 1].set_ylabel("Supply Rate")
- # axs[1, 1].legend(title="Pools", bbox_to_anchor=(1.05, 1), loc="upper left")
- axs[1, 1].get_legend().remove()
-
- # Create a new axis that spans the entire bottom row
- ax_interest_rates = fig.add_subplot(3, 1, 3)
-
- # Plot interest rate curves for the pools
- utilization_range = np.linspace(0, 1, 100)
- for pool_addr, pool in simulator.assets_and_pools["pools"].items():
- interest_rates = [borrow_rate(u * 1e18, pool) / 1e18 for u in utilization_range]
- ax_interest_rates.plot(utilization_range, interest_rates, label=f"Pool_{pool_addr[:6]}")
-
- ax_interest_rates.set_title("Interest Rate Curves for the Pools")
- ax_interest_rates.set_xlabel("Utilization Rate")
- ax_interest_rates.set_ylabel("Borrow Rate")
- # ax_interest_rates.legend(title="Pools", bbox_to_anchor=(1.05, 1), loc="upper left")
-
- # Ensure labels don't overlap and improve layout
- plt.tight_layout(rect=[0, 0, 1, 0.96])
- plt.show()
-
-
-# Usage
-if __name__ == "__main__":
- np.random.seed(69)
- num_sims = 10
- for _ in range(num_sims):
- sim = Simulator(
- seed=np.random.randint(0, 1000),
- )
- sim.initialize()
- sim.init_data()
- sim.run()
-
- plot_simulation_results(sim)
diff --git a/docs/validator.md b/docs/validator.md
index d8b2378..6357efa 100644
--- a/docs/validator.md
+++ b/docs/validator.md
@@ -27,6 +27,14 @@ You will need `pm2` if you would like to utilize the auto update scripts that co
1. Install [node and npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
2. Install [pm2](https://pm2.io)
+### Creating the database
+Used to store api keys (only for organic validators), scoring logs, and "active" miner allocations for scoring
+
+First, [install dbmate](https://github.com/amacneil/dbmate?tab=readme-ov-file#installation). then run the command below
+```bash
+dbmate --url "sqlite:validator_database.db" up
+```
+
## Running a Validator
@@ -40,8 +48,20 @@ You have the option of running two kinds of validators:
- [Synthetic](#synthetic-validator)
- [Organic](#organic-validator)
+Before we get to the differences between them, and how to set each of them up, we must first ensure we have a connection to the Ethereum network.
+
+#### Connecting to Ethereum
+All validators are required to have a connection to an Ethereum RPC to handle requests. It is required to interact with relevant smart contracts in order to perform certain operations i.e. calculate miner allocation yields.
+
+##### Preparing Environment
+The next step involves interacting with an API. We've provided an [.env.example](../.env.example) file which should be copied as a `.env` file in the root of this repository before proceeding.
+
+##### Connecting to a Web3 Provider
+We recommend using a third party service to connect to an RPC to perform on-chain calls such as [Infura](https://docs.infura.io/dashboard/create-api) and [Alchemy](https://docs.alchemy.com/docs/alchemy-quickstart-guide#1key-create-an-alchemy-api-key) (click on hyperlinks links for documentation) by obtaining there API key and adding their URL to the `.env` file under the `WEB3_PROVIDER_URL` alias.
+
+
## Synthetic Validator
-This is the most simple of the two. Synthetic validators generate dummy (fake) pools to send to miners to challenge them. To run a synthetic validator, run:
+This is the most simple of the two. Synthetic validators generate synthetic requests to send to miners to challenge them. To run a synthetic validator, run:
#### Starting the validator - without PM2
```bash
python3 neurons/validator.py --netuid NETUID --subtensor.network NETWORK --wallet.name NAME --wallet.hotkey HOTKEY --logging.trace --axon.port PORT --organic False
@@ -77,15 +97,6 @@ Where `ID_OR_PROCESS_NAME` is the `name` OR `id` of the process as noted per the
## Organic Validator
This is the less simple but more exciting of the two! Now you get to sell your bandwidth to whoever you want, with a very simple to use CLI!
-#### Connecting to Ethereum
-Organic validators are required to have a connection to an Ethereum RPC to handle organic requests. It is required to interact with relevant smart contracts in order to perform certain operations i.e. calculate miner allocation yields.
-
-##### Preparing Environment
-The next step involves interacting with an API. We've provided an [.env.example](../.env.example) file which should be copied as a `.env` file in the root of this repository before proceeding.
-
-#### Connecting to a Web3 Provider
-We recommend using a third party service to connect to an RPC to perform on-chain calls such as [Infura](https://docs.infura.io/dashboard/create-api) and [Alchemy](https://docs.alchemy.com/docs/alchemy-quickstart-guide#1key-create-an-alchemy-api-key) (click on hyperlinks links for documentation) by obtaining there API key and adding their URL to the `.env` file under the `WEB3_PROVIDER_URL` alias.
-
#### Spinning Up Organic Validator
The steps are similar to synthetic only validators:
@@ -137,15 +148,6 @@ Where `ID_OR_PROCESS_NAME` is the `name` OR `id` of the process as noted per the
## Selling your bandwidth
-### Creating the database
-Used to store api keys & scoring logs
-
-First, [install dbmate](https://github.com/amacneil/dbmate?tab=readme-ov-file#installation)
-
-```bash
-dbmate --url "sqlite:validator_database.db" up
-```
-
### Managing access
To manage access to the your api server and sell access to anyone you like, using the sturdy-cli is the easiest way.
@@ -169,7 +171,7 @@ To get more info about that command!
For example:
```bash
-sturdy create-key 10 60 test
+sturdy create-key --balance 10 --rate-limit-per-minute 60 --name test
```
Creates a test key with a balance of 10 (which corresponds to 10 requests), a rate limit of 60 requests per minute = 1/s, and a name 'test'.
@@ -205,22 +207,18 @@ curl -X POST \
"pools": {
"0x6311fF24fb15310eD3d2180D3d0507A21a8e5227": {
"pool_type": "STURDY_SILO",
- "pool_model_disc": "CHAIN",
"contract_address": "0x6311fF24fb15310eD3d2180D3d0507A21a8e5227"
},
"0x200723063111f9f8f1d44c0F30afAdf0C0b1a04b": {
"pool_type": "STURDY_SILO",
- "pool_model_disc": "CHAIN",
"contract_address": "0x200723063111f9f8f1d44c0F30afAdf0C0b1a04b"
},
"0x26fe402A57D52c8a323bb6e09f06489C8216aC88": {
"pool_type": "STURDY_SILO",
- "pool_model_disc": "CHAIN",
"contract_address": "0x26fe402A57D52c8a323bb6e09f06489C8216aC88"
},
"0x8dDE9A50a91cc0a5DaBdc5d3931c1AF60408c84D": {
"pool_type": "STURDY_SILO",
- "pool_model_disc": "CHAIN",
"contract_address": "0x8dDE9A50a91cc0a5DaBdc5d3931c1AF60408c84D"
}
}
@@ -236,8 +234,7 @@ Some annotations are provided below to further help understand the request forma
"total_assets": 548568963376234830607950, # total assets available to a miner to allocate
"pools": { # pools available to output allocations for
"0x6311fF24fb15310eD3d2180D3d0507A21a8e5227": { # address used to get relevant info about the the pool
- "pool_type": "STURDY_SILO",
- "pool_model_disc": "CHAIN", # if this is a synthetic or chain (organic) pool
+ "pool_type": "STURDY_SILO", # type of pool (i.e sturdy silo, aave pool, yearn vault, etc.)
"contract_address": "0x6311fF24fb15310eD3d2180D3d0507A21a8e5227" # address used to get relevant info about the the pool
},
```
@@ -248,8 +245,8 @@ And the corresponding response(example) format from the subnet:
"request_uuid":"1e09d3f1ce574921bd13a2461607f5fe",
"allocations":{
"1":{ # miner uid
- "apy":62133011236204113, # apy of miner's allocations in 18 decimal precision because the asset has the same precision.
- "allocations":{ # allocations to pools in wei
+ "rank":1, # rank of the miner based on past performance
+ "allocations":{ # allocations to pools
"0x6311fF24fb15310eD3d2180D3d0507A21a8e5227":114864688949643874140160,
"0x200723063111f9f8f1d44c0F30afAdf0C0b1a04b":1109027125282399872,
"0x26fe402A57D52c8a323bb6e09f06489C8216aC88":71611128603622265323520,
@@ -257,7 +254,7 @@ And the corresponding response(example) format from the subnet:
}
},
"4":{
- "apy":61332661325287823,
+ "rank":2,
"allocations":{
"0x6311fF24fb15310eD3d2180D3d0507A21a8e5227":119201178628424617426944,
"0x200723063111f9f8f1d44c0F30afAdf0C0b1a04b":1290874337673458688,
@@ -266,7 +263,7 @@ And the corresponding response(example) format from the subnet:
}
},
"2":{
- "apy":31168293423379011,
+ "rank":3,
"allocations":{
"0x6311fF24fb15310eD3d2180D3d0507A21a8e5227":45592862828746122461184,
"0x200723063111f9f8f1d44c0F30afAdf0C0b1a04b":172140896186699296,
diff --git a/hardhat.config.js b/hardhat.config.js
index 090995a..00df9e9 100644
--- a/hardhat.config.js
+++ b/hardhat.config.js
@@ -17,8 +17,10 @@ module.exports = {
// blockNumber: 20825292,
// blockNumber: 20874859
// blockNumber: 20892138
+ // blockNumber: 20976304
+ // blockNumber: 21080765
// latest
- blockNumber: 20976304
+ blockNumber: 21150770
},
accounts,
}
diff --git a/min_compute.yml b/min_compute.yml
index b4051d8..b983ff6 100644
--- a/min_compute.yml
+++ b/min_compute.yml
@@ -10,7 +10,7 @@
# Even then - storage isn't utilized very often - so disk performance won't really be a bottleneck vs, CPU, RAM,
# and network bandwidth.
-version: '1.5.3' # update this version key as needed, ideally should match your release version
+version: '2.0.0' # update this version key as needed, ideally should match your release version
compute_spec:
diff --git a/neurons/validator.py b/neurons/validator.py
index d23f37b..22ecdb0 100644
--- a/neurons/validator.py
+++ b/neurons/validator.py
@@ -36,6 +36,7 @@
# import base validator class which takes care of most of the boilerplate
from sturdy.base.validator import BaseValidatorNeuron
+from sturdy.constants import DB_DIR, ORGANIC_SCORING_PERIOD
# Bittensor Validator Template:
from sturdy.pools import PoolFactory
@@ -51,7 +52,6 @@
# api key db
from sturdy.validator import forward, query_and_score_miners, sql
-from sturdy.validator.simulator import Simulator
class Validator(BaseValidatorNeuron):
@@ -73,7 +73,6 @@ def __init__(self, config=None) -> None:
bt.logging.info("load_state()")
self.load_state()
self.uid_to_response = {}
- self.simulator = Simulator()
async def forward(self) -> Any:
"""
@@ -185,7 +184,6 @@ async def allocate(body: AllocateAssetsRequest) -> AllocateAssetsResponse | None
...
"0x6311fF24fb15310eD3d2180D3d0507A21a8e5227": {
"pool_type": "STURDY_SILO",
- "pool_model_disc: "CHAIN",
"contract_address": "0x6311fF24fb15310eD3d2180D3d0507A21a8e5227"
},
...
@@ -216,33 +214,20 @@ async def allocate(body: AllocateAssetsRequest) -> AllocateAssetsResponse | None
new_pools = {}
for uid, pool in pools.items():
- match synapse.request_type:
- case REQUEST_TYPES.SYNTHETIC:
- new_pool = PoolFactory.create_pool(
- pool_type=pool.pool_type,
- contract_address=pool.contract_address,
- base_rate=pool.base_rate,
- base_slope=pool.base_slope,
- kink_slope=pool.kink_slope,
- optimal_util_rate=pool.optimal_util_rate,
- borrow_amount=pool.borrow_amount,
- reserve_size=pool.reserve_size,
- )
- new_pools[uid] = new_pool
- case _: # TODO: We assume this is an "organic request"
- new_pool = PoolFactory.create_pool(
- pool_type=pool.pool_type,
- web3_provider=core_validator.w3, # type: ignore[]
- user_address=(
- pool.user_address if pool.user_address != ADDRESS_ZERO else synapse.user_address
- ), # TODO: is there a cleaner way to do this?
- contract_address=pool.contract_address,
- )
- new_pools[uid] = new_pool
+ new_pool = PoolFactory.create_pool(
+ pool_type=pool.pool_type,
+ web3_provider=core_validator.w3, # type: ignore[]
+ user_address=(
+ pool.user_address if pool.user_address != ADDRESS_ZERO else synapse.user_address
+ ), # TODO: is there a cleaner way to do this?
+ contract_address=pool.contract_address,
+ )
+ new_pools[uid] = new_pool
synapse.assets_and_pools["pools"] = new_pools
- result = await query_and_score_miners(
+ bt.logging.info("Querying miners...")
+ axon_times, result = await query_and_score_miners(
core_validator,
assets_and_pools=synapse.assets_and_pools,
request_type=synapse.request_type,
@@ -250,10 +235,28 @@ async def allocate(body: AllocateAssetsRequest) -> AllocateAssetsResponse | None
)
request_uuid = uid = str(uuid.uuid4()).replace("-", "")
- to_ret = dict(list(result.items())[:body.num_allocs])
+ to_ret = dict(list(result.items())[: body.num_allocs])
ret = AllocateAssetsResponse(allocations=to_ret, request_uuid=request_uuid)
+ to_log = AllocateAssetsResponse(allocations=to_ret, request_uuid=request_uuid)
+
+ metadata = {}
+ pools = synapse.assets_and_pools["pools"]
+
+ for contract_addr, pool in pools.items():
+ pool.sync(core_validator.w3)
+ metadata[contract_addr] = pool._share_price
+
with sql.get_db_connection() as conn:
- sql.log_allocations(conn, ret.request_uuid, synapse.assets_and_pools, ret.allocations)
+ sql.log_allocations(
+ conn,
+ to_log.request_uuid,
+ synapse.assets_and_pools,
+ metadata,
+ to_log.allocations,
+ axon_times,
+ REQUEST_TYPES.ORGANIC,
+ ORGANIC_SCORING_PERIOD,
+ )
return ret
@@ -264,9 +267,10 @@ async def get_allocations(
miner_uid: str | None = None,
from_ts: int | None = None,
to_ts: int | None = None,
+ db_dir: str = DB_DIR,
) -> list[dict]:
- with sql.get_db_connection() as conn:
- allocations = sql.get_filtered_allocations(conn, request_uid, miner_uid, from_ts, to_ts)
+ with sql.get_db_connection(db_dir) as conn:
+ allocations = sql.get_miner_responses(conn, request_uid, miner_uid, from_ts, to_ts)
if not allocations:
raise HTTPException(status_code=404, detail="No allocations found")
return allocations
@@ -277,8 +281,9 @@ async def request_info(
request_uid: str | None = None,
from_ts: int | None = None,
to_ts: int | None = None,
+ db_dir: str = DB_DIR,
) -> list[dict]:
- with sql.get_db_connection() as conn:
+ with sql.get_db_connection(db_dir) as conn:
info = sql.get_request_info(conn, request_uid, from_ts, to_ts)
if not info:
raise HTTPException(status_code=404, detail="No request info found")
diff --git a/pyproject.toml b/pyproject.toml
index a95b13e..a5e11bc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,7 @@ indent-width = 4
target-version = "py311"
[tool.ruff.lint]
-ignore = ["NPY002", "F405", "F403", "E402", "D", "ANN001", "FBT001", "FBT002", "TD002", "TD003", "PLR", "C901", "BLE001", "ANN401", "N801", "EM101", "EM102", "TRY003", "S608", "FIX002", "N805", "N815", "N806", "PT009", "COM812", "S101", "SLF001", "T201"]
+ignore = ["NPY002", "F405", "F403", "E402", "D", "ANN001", "FBT001", "FBT002", "TD002", "TD003", "PLR", "C901", "BLE001", "ANN401", "N801", "EM101", "EM102", "TRY003", "S608", "FIX002", "N805", "N815", "N806", "PT009", "COM812", "S101", "SLF001", "T201", "DTZ003"]
select = ["ALL"]
[tool.ruff.format]
diff --git a/requirements.txt b/requirements.txt
index 673fcf9..1a9cc9a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
wandb==0.17.0
loguru==0.7.0
-bittensor @ git+https://github.com/opentensor/bittensor@release/6.11.1
+bittensor==6.11.1
torch==2.0.1
typer==0.9.0
starlette==0.27.0
@@ -10,3 +10,4 @@ python-dotenv==1.0.1
pandas==2.2.2
matplotlib==3.9.0
gmpy2==2.2.1
+websocket-client @ git+https://github.com/websocket-client/websocket-client.git@asyncpong#egg=websocket-client
diff --git a/sturdy/__init__.py b/sturdy/__init__.py
index 13d695f..e793be8 100644
--- a/sturdy/__init__.py
+++ b/sturdy/__init__.py
@@ -17,7 +17,7 @@
# DEALINGS IN THE SOFTWARE.
# Define the version of the template module.
-__version__ = "1.5.3"
+__version__ = "2.0.0"
version_split = __version__.split(".")
__spec_version__ = (1000 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2]))
diff --git a/sturdy/abi/IReserveInterestRateStrategy.json b/sturdy/abi/IReserveInterestRateStrategy.json
index bc50875..ffcf459 100644
--- a/sturdy/abi/IReserveInterestRateStrategy.json
+++ b/sturdy/abi/IReserveInterestRateStrategy.json
@@ -1,4 +1,104 @@
[
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "provider",
+ "type": "address"
+ }
+ ],
+ "stateMutability": "nonpayable",
+ "type": "constructor"
+ },
+ {
+ "anonymous": false,
+ "inputs": [
+ {
+ "indexed": true,
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ },
+ {
+ "indexed": false,
+ "internalType": "uint256",
+ "name": "optimalUsageRatio",
+ "type": "uint256"
+ },
+ {
+ "indexed": false,
+ "internalType": "uint256",
+ "name": "baseVariableBorrowRate",
+ "type": "uint256"
+ },
+ {
+ "indexed": false,
+ "internalType": "uint256",
+ "name": "variableRateSlope1",
+ "type": "uint256"
+ },
+ {
+ "indexed": false,
+ "internalType": "uint256",
+ "name": "variableRateSlope2",
+ "type": "uint256"
+ }
+ ],
+ "name": "RateDataUpdate",
+ "type": "event"
+ },
+ {
+ "inputs": [],
+ "name": "ADDRESSES_PROVIDER",
+ "outputs": [
+ {
+ "internalType": "contract IPoolAddressesProvider",
+ "name": "",
+ "type": "address"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "MAX_BORROW_RATE",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "MAX_OPTIMAL_POINT",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "MIN_OPTIMAL_POINT",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
{
"inputs": [
{
@@ -20,17 +120,7 @@
},
{
"internalType": "uint256",
- "name": "totalStableDebt",
- "type": "uint256"
- },
- {
- "internalType": "uint256",
- "name": "totalVariableDebt",
- "type": "uint256"
- },
- {
- "internalType": "uint256",
- "name": "averageStableBorrowRate",
+ "name": "totalDebt",
"type": "uint256"
},
{
@@ -44,9 +134,14 @@
"type": "address"
},
{
- "internalType": "address",
- "name": "aToken",
- "type": "address"
+ "internalType": "bool",
+ "name": "usingVirtualBalance",
+ "type": "bool"
+ },
+ {
+ "internalType": "uint256",
+ "name": "virtualUnderlyingBalance",
+ "type": "uint256"
}
],
"internalType": "struct DataTypes.CalculateInterestRatesParams",
@@ -65,7 +160,122 @@
"internalType": "uint256",
"name": "",
"type": "uint256"
- },
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ }
+ ],
+ "name": "getBaseVariableBorrowRate",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ }
+ ],
+ "name": "getInterestRateData",
+ "outputs": [
+ {
+ "components": [
+ {
+ "internalType": "uint256",
+ "name": "optimalUsageRatio",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "baseVariableBorrowRate",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "variableRateSlope1",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "variableRateSlope2",
+ "type": "uint256"
+ }
+ ],
+ "internalType": "struct IDefaultInterestRateStrategyV2.InterestRateDataRay",
+ "name": "",
+ "type": "tuple"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ }
+ ],
+ "name": "getInterestRateDataBps",
+ "outputs": [
+ {
+ "components": [
+ {
+ "internalType": "uint16",
+ "name": "optimalUsageRatio",
+ "type": "uint16"
+ },
+ {
+ "internalType": "uint32",
+ "name": "baseVariableBorrowRate",
+ "type": "uint32"
+ },
+ {
+ "internalType": "uint32",
+ "name": "variableRateSlope1",
+ "type": "uint32"
+ },
+ {
+ "internalType": "uint32",
+ "name": "variableRateSlope2",
+ "type": "uint32"
+ }
+ ],
+ "internalType": "struct IDefaultInterestRateStrategyV2.InterestRateData",
+ "name": "",
+ "type": "tuple"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ }
+ ],
+ "name": "getMaxVariableBorrowRate",
+ "outputs": [
{
"internalType": "uint256",
"name": "",
@@ -74,5 +284,120 @@
],
"stateMutability": "view",
"type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ }
+ ],
+ "name": "getOptimalUsageRatio",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ }
+ ],
+ "name": "getVariableRateSlope1",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ }
+ ],
+ "name": "getVariableRateSlope2",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ },
+ {
+ "internalType": "bytes",
+ "name": "rateData",
+ "type": "bytes"
+ }
+ ],
+ "name": "setInterestRateParams",
+ "outputs": [],
+ "stateMutability": "nonpayable",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ },
+ {
+ "components": [
+ {
+ "internalType": "uint16",
+ "name": "optimalUsageRatio",
+ "type": "uint16"
+ },
+ {
+ "internalType": "uint32",
+ "name": "baseVariableBorrowRate",
+ "type": "uint32"
+ },
+ {
+ "internalType": "uint32",
+ "name": "variableRateSlope1",
+ "type": "uint32"
+ },
+ {
+ "internalType": "uint32",
+ "name": "variableRateSlope2",
+ "type": "uint32"
+ }
+ ],
+ "internalType": "struct IDefaultInterestRateStrategyV2.InterestRateData",
+ "name": "rateData",
+ "type": "tuple"
+ }
+ ],
+ "name": "setInterestRateParams",
+ "outputs": [],
+ "stateMutability": "nonpayable",
+ "type": "function"
}
]
\ No newline at end of file
diff --git a/sturdy/abi/RateTargetBaseInterestRateStrategy.json b/sturdy/abi/RateTargetBaseInterestRateStrategy.json
new file mode 100644
index 0000000..afa962c
--- /dev/null
+++ b/sturdy/abi/RateTargetBaseInterestRateStrategy.json
@@ -0,0 +1,309 @@
+[
+ {
+ "inputs": [
+ {
+ "internalType": "contract IPoolAddressesProvider",
+ "name": "provider",
+ "type": "address"
+ },
+ {
+ "internalType": "address",
+ "name": "rateSource",
+ "type": "address"
+ },
+ {
+ "internalType": "uint256",
+ "name": "optimalUsageRatio",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "baseVariableBorrowRateSpread",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "variableRateSlope1",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "variableRateSlope2",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "nonpayable",
+ "type": "constructor"
+ },
+ {
+ "inputs": [],
+ "name": "ADDRESSES_PROVIDER",
+ "outputs": [
+ {
+ "internalType": "contract IPoolAddressesProvider",
+ "name": "",
+ "type": "address"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "MAX_EXCESS_STABLE_TO_TOTAL_DEBT_RATIO",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "MAX_EXCESS_USAGE_RATIO",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "OPTIMAL_STABLE_TO_TOTAL_DEBT_RATIO",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "OPTIMAL_USAGE_RATIO",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "RATE_SOURCE",
+ "outputs": [
+ {
+ "internalType": "contract IRateSource",
+ "name": "",
+ "type": "address"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "components": [
+ {
+ "internalType": "uint256",
+ "name": "unbacked",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "liquidityAdded",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "liquidityTaken",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "totalStableDebt",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "totalVariableDebt",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "averageStableBorrowRate",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "reserveFactor",
+ "type": "uint256"
+ },
+ {
+ "internalType": "address",
+ "name": "reserve",
+ "type": "address"
+ },
+ {
+ "internalType": "address",
+ "name": "aToken",
+ "type": "address"
+ }
+ ],
+ "internalType": "struct DataTypes.CalculateInterestRatesParams",
+ "name": "params",
+ "type": "tuple"
+ }
+ ],
+ "name": "calculateInterestRates",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "liquidityRate",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "stableBorrowRate",
+ "type": "uint256"
+ },
+ {
+ "internalType": "uint256",
+ "name": "variableBorrowRate",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "getBaseStableBorrowRate",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "getBaseVariableBorrowRate",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "getBaseVariableBorrowRateSpread",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "getMaxVariableBorrowRate",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "getStableRateExcessOffset",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "pure",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "getStableRateSlope1",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "pure",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "getStableRateSlope2",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "pure",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "getVariableRateSlope1",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ },
+ {
+ "inputs": [],
+ "name": "getVariableRateSlope2",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ }
+]
\ No newline at end of file
diff --git a/sturdy/abi/SturdySiloStrategy.json b/sturdy/abi/SturdySiloStrategy.json
index 7ea3ed1..281c6d1 100644
--- a/sturdy/abi/SturdySiloStrategy.json
+++ b/sturdy/abi/SturdySiloStrategy.json
@@ -30,6 +30,19 @@
"stateMutability": "nonpayable",
"type": "constructor"
},
+ {
+ "name": "pricePerShare",
+ "type": "function",
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "",
+ "type": "uint256",
+ "internalType": "uint256"
+ }
+ ],
+ "stateMutability": "view"
+ },
{
"stateMutability": "nonpayable",
"type": "fallback"
diff --git a/sturdy/algo.py b/sturdy/algo.py
index 3134eae..5728209 100644
--- a/sturdy/algo.py
+++ b/sturdy/algo.py
@@ -7,11 +7,10 @@
from sturdy.base.miner import BaseMinerNeuron
from sturdy.pools import (
POOL_TYPES,
- BasePool,
PoolFactory,
get_minimum_allocation,
)
-from sturdy.protocol import REQUEST_TYPES, AllocateAssets
+from sturdy.protocol import AllocateAssets
THRESHOLD = 0.99 # used to avoid over-allocations
@@ -20,21 +19,16 @@
def naive_algorithm(self: BaseMinerNeuron, synapse: AllocateAssets) -> dict:
bt.logging.debug(f"received request type: {synapse.request_type}")
pools = cast(dict, synapse.assets_and_pools["pools"])
- match synapse.request_type:
- case REQUEST_TYPES.ORGANIC:
- for uid, pool in pools:
- pools[uid] = PoolFactory.create_pool(
- pool_type=pool.pool_type,
- web3_provider=self.w3, # type: ignore[]
- user_address=(
- pool.user_address if pool.user_address != ADDRESS_ZERO else synapse.user_address
- ), # TODO: is there a cleaner way to do this?
- contract_address=pool.contract_address,
- )
- case _: # we assume it is a synthetic request
- for uid in pools:
- pools[uid] = BasePool(**pools[uid].dict())
+ for uid, pool in pools.items():
+ pools[uid] = PoolFactory.create_pool(
+ pool_type=pool.pool_type,
+ web3_provider=self.w3, # type: ignore[]
+ user_address=(
+ pool.user_address if pool.user_address != ADDRESS_ZERO else synapse.user_address
+ ), # TODO: is there a cleaner way to do this?
+ contract_address=pool.contract_address,
+ )
total_assets_available = int(THRESHOLD * synapse.assets_and_pools["total_assets"])
pools = cast(dict, synapse.assets_and_pools["pools"])
@@ -44,15 +38,7 @@ def naive_algorithm(self: BaseMinerNeuron, synapse: AllocateAssets) -> dict:
# sync pool parameters by calling smart contracts on chain
for pool in pools.values():
- match pool.pool_type:
- case POOL_TYPES.AAVE:
- pool.sync(synapse.user_address, self.w3)
- case POOL_TYPES.STURDY_SILO:
- pool.sync(synapse.user_address, self.w3)
- case T if T in (POOL_TYPES.DAI_SAVINGS, POOL_TYPES.COMPOUND_V3):
- pool.sync(self.w3)
- case _:
- pass
+ pool.sync(self.w3)
# check the amounts that have been borrowed from the pools - and account for them
minimums = {}
@@ -64,24 +50,14 @@ def naive_algorithm(self: BaseMinerNeuron, synapse: AllocateAssets) -> dict:
# rates are determined by making on chain calls to smart contracts
for pool in pools.values():
match pool.pool_type:
- case POOL_TYPES.AAVE:
- apy = pool.supply_rate(synapse.user_address, balance // len(pools)) # type: ignore[]
- supply_rates[pool.contract_address] = apy
- supply_rate_sum += apy
- case T if T in (POOL_TYPES.STURDY_SILO, POOL_TYPES.COMPOUND_V3, POOL_TYPES.MORPHO, POOL_TYPES.YEARN_V3):
- apy = pool.supply_rate(balance // len(pools)) # type: ignore[]
- supply_rates[pool.contract_address] = apy
- supply_rate_sum += apy
case POOL_TYPES.DAI_SAVINGS:
apy = pool.supply_rate()
supply_rates[pool.contract_address] = apy
supply_rate_sum += apy
- case POOL_TYPES.SYNTHETIC:
- apy = pool.supply_rate
+ case _:
+ apy = pool.supply_rate(balance // len(pools))
supply_rates[pool.contract_address] = apy
supply_rate_sum += apy
- case _:
- pass
return {
pool_uid: minimums[pool_uid] + math.floor((supply_rates[pool_uid] / supply_rate_sum) * balance) for pool_uid in pools
diff --git a/sturdy/base/miner.py b/sturdy/base/miner.py
index b09c7ab..9f6877c 100644
--- a/sturdy/base/miner.py
+++ b/sturdy/base/miner.py
@@ -54,17 +54,13 @@ def __init__(self, config=None):
w3_provider_url = os.environ.get("WEB3_PROVIDER_URL")
if w3_provider_url is None:
- raise ValueError(
- "You must provide a valid web3 provider url in order to handle organic requests!"
- )
+ raise ValueError("You must provide a valid web3 provider url in order to handle organic requests!")
self.w3 = Web3(Web3.HTTPProvider(w3_provider_url))
# Warn if allowing incoming requests from anyone.
if not self.config.blacklist.force_validator_permit:
- bt.logging.warning(
- "You are allowing non-validators to send requests to your miner. This is a security risk."
- )
+ bt.logging.warning("You are allowing non-validators to send requests to your miner. This is a security risk.")
if self.config.blacklist.allow_non_registered:
bt.logging.warning(
"You are allowing non-registered entities to send requests to your miner. This is a security risk."
@@ -132,10 +128,7 @@ def run(self):
# This loop maintains the miner's operations until intentionally stopped.
try:
while not self.should_exit:
- while (
- self.block - self.metagraph.last_update[self.uid]
- < self.config.neuron.epoch_length
- ):
+ while self.block - self.metagraph.last_update[self.uid] < self.config.neuron.epoch_length:
# Wait before checking again.
time.sleep(1)
diff --git a/sturdy/base/neuron.py b/sturdy/base/neuron.py
index c5094b7..559a8df 100644
--- a/sturdy/base/neuron.py
+++ b/sturdy/base/neuron.py
@@ -90,9 +90,7 @@ def __init__(self, config=None):
# The wallet holds the cryptographic key pairs for the miner.
if self.config.mock:
self.wallet = bt.MockWallet(config=self.config)
- self.subtensor = MockSubtensor(
- self.config.netuid, n=self.config.mock_n, wallet=self.wallet
- )
+ self.subtensor = MockSubtensor(self.config.netuid, n=self.config.mock_n, wallet=self.wallet)
self.metagraph = MockMetagraph(self.config.netuid, subtensor=self.subtensor)
else:
self.wallet = bt.wallet(config=self.config)
@@ -115,35 +113,35 @@ def __init__(self, config=None):
self.step = 0
@abstractmethod
- async def forward(self, synapse: bt.Synapse) -> bt.Synapse:
- ...
+ async def forward(self, synapse: bt.Synapse) -> bt.Synapse: ...
@abstractmethod
- def run(self):
- ...
+ def run(self): ...
def sync(self):
"""
Wrapper for synchronizing the state of the network for the given miner or validator.
"""
# Ensure miner or validator hotkey is still registered on the network.
- self.check_registered()
- if self.should_sync_metagraph():
- try:
+ try:
+ self.check_registered()
+ except Exception:
+ bt.logging.error("Could not check registration status! Skipping...")
+
+ try:
+ if self.should_sync_metagraph():
self.resync_metagraph()
- except Exception as e:
- bt.logging.error(
- "There was an issue with trying to sync with the metagraph! See Error:"
- )
- bt.logging.error(e)
-
- if self.should_set_weights():
- try:
+ except Exception as e:
+ bt.logging.error("There was an issue with trying to sync with the metagraph! See Error:")
+ bt.logging.error(e)
+
+ try:
+ if self.should_set_weights():
self.set_weights()
- except Exception as e:
- bt.logging.error("Failed to set weights! See Error:")
- bt.logging.error(e)
+ except Exception as e:
+ bt.logging.error("Failed to set weights! See Error:")
+ bt.logging.error(e)
# Always save state.
self.save_state()
@@ -164,9 +162,7 @@ def should_sync_metagraph(self):
"""
Check if enough epoch blocks have elapsed since the last checkpoint to sync.
"""
- return (
- self.block - self.metagraph.last_update[self.uid]
- ) > self.config.neuron.epoch_length
+ return (self.block - self.metagraph.last_update[self.uid]) > self.config.neuron.epoch_length
def should_set_weights(self) -> bool:
# Don't set weights on initialization.
diff --git a/sturdy/base/validator.py b/sturdy/base/validator.py
index 7d4799d..1c06f13 100644
--- a/sturdy/base/validator.py
+++ b/sturdy/base/validator.py
@@ -17,25 +17,23 @@
# DEALINGS IN THE SOFTWARE.
-import os
-import copy
-import torch
-import asyncio
import argparse
+import asyncio
+import copy
+import os
import threading
-from web3 import Web3
-import bittensor as bt
-
-from typing import List
from traceback import print_exception
+import bittensor as bt
+import torch
+from dotenv import load_dotenv
+from web3 import Web3
+
from sturdy.base.neuron import BaseNeuron
+from sturdy.constants import QUERY_RATE
from sturdy.mock import MockDendrite
from sturdy.utils.config import add_validator_args
-from sturdy.utils.wandb import init_wandb_validator, should_reinit_wandb, reinit_wandb
-from sturdy.constants import QUERY_RATE
-
-from dotenv import load_dotenv
+from sturdy.utils.wandb import init_wandb_validator, reinit_wandb, should_reinit_wandb
class BaseValidatorNeuron(BaseNeuron):
@@ -54,6 +52,9 @@ def __init__(self, config=None) -> None:
super().__init__(config=config)
load_dotenv()
+ # set last query block to be 0
+ self.last_query_block = 0
+
# init wandb
self.wandb_run_log_count = 0
if not self.config.wandb.off:
@@ -63,12 +64,12 @@ def __init__(self, config=None) -> None:
# Save a copy of the hotkeys to local memory.
self.hotkeys = copy.deepcopy(self.metagraph.hotkeys)
- if self.config.organic:
- w3_provider_url = os.environ.get("WEB3_PROVIDER_URL")
- if w3_provider_url is None:
- raise ValueError("You must provide a valid web3 provider url as an organic validator!")
+ # set web3 provider url
+ w3_provider_url = os.environ.get("WEB3_PROVIDER_URL")
+ if w3_provider_url is None:
+ raise ValueError("You must provide a valid web3 provider url")
- self.w3 = Web3(Web3.HTTPProvider(w3_provider_url))
+ self.w3 = Web3(Web3.HTTPProvider(w3_provider_url))
# Dendrite lets us send messages to other nodes (axons) in the network.
if self.config.mock:
@@ -103,7 +104,7 @@ def __init__(self, config=None) -> None:
self.thread: threading.Thread = None
self.lock = asyncio.Lock()
- def serve_axon(self):
+ def serve_axon(self) -> None:
"""Serve axon to enable external connections."""
bt.logging.info("serving ip to chain...")
@@ -121,17 +122,15 @@ def serve_axon(self):
)
except Exception as e:
bt.logging.error(f"Failed to serve Axon with exception: {e}")
- pass
except Exception as e:
bt.logging.error(f"Failed to create Axon initialize with exception: {e}")
- pass
- async def concurrent_forward(self):
+ async def concurrent_forward(self) -> None:
coroutines = [self.forward() for _ in range(self.config.neuron.num_concurrent_forwards)]
await asyncio.gather(*coroutines)
- def run(self):
+ def run(self) -> None:
"""
Initiates and manages the main loop for the miner on the Bittensor network. The main loop handles graceful shutdown on
keyboard interrupts and logs unforeseen errors.
@@ -159,14 +158,13 @@ def run(self):
self.sync()
bt.logging.info(f"Validator starting at block: {self.block}")
- last_query_block = self.block
# This loop maintains the validator's operations until intentionally stopped.
try:
while True:
# Run multiple forwards concurrently - runs every 2 blocks
current_block = self.subtensor.block
- if current_block - last_query_block > QUERY_RATE:
+ if current_block - self.last_query_block > QUERY_RATE:
bt.logging.info(f"step({self.step}) block({self.block})")
if self.config.organic:
@@ -175,7 +173,7 @@ def run(self):
else:
self.loop.run_until_complete(self.concurrent_forward())
- last_query_block = current_block
+ self.last_query_block = current_block
# Sync metagraph and potentially set weights.
self.sync()
@@ -192,9 +190,7 @@ def run(self):
sim_penalties = {
f"similarity_penalties/uid_{uid}": score for uid, score in self.similarity_penalties.items()
}
- apys = {
- f"apys/uid_{uid}": apy for uid, apy in self.sorted_apys.items()
- }
+ apys = {f"apys/uid_{uid}": apy for uid, apy in self.sorted_apys.items()}
axon_times = {
f"axon_times/uid_{uid}": axon_time for uid, axon_time in self.sorted_axon_times.items()
}
@@ -238,13 +234,13 @@ def run(self):
bt.logging.error("Error during validation", str(err))
bt.logging.debug(print_exception(type(err), err, err.__traceback__))
- async def run_concurrent_forward(self):
+ async def run_concurrent_forward(self) -> None:
try:
await self.concurrent_forward()
except Exception as e:
bt.logging.error(f"Error in concurrent_forward: {e}")
- def run_in_background_thread(self):
+ def run_in_background_thread(self) -> None:
"""
Starts the validator's operations in a background thread upon entering the context.
This method facilitates the use of the validator in a 'with' statement.
@@ -257,7 +253,7 @@ def run_in_background_thread(self):
self.is_running = True
bt.logging.debug("Started")
- def stop_run_thread(self):
+ def stop_run_thread(self) -> None:
"""
Stops the validator's operations that are running in the background thread.
"""
@@ -268,11 +264,11 @@ def stop_run_thread(self):
self.is_running = False
bt.logging.debug("Stopped")
- def __enter__(self):
+ def __enter__(self) -> "BaseValidatorNeuron":
self.run_in_background_thread()
return self
- def __exit__(self, exc_type, exc_value, traceback):
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
"""
Stops the validator's background operations upon exiting the context.
This method facilitates the use of the validator in a 'with' statement.
@@ -298,7 +294,7 @@ def __exit__(self, exc_type, exc_value, traceback):
bt.logging.debug("closed wandb connection")
bt.logging.success("Validator killed")
- def set_weights(self):
+ def set_weights(self) -> None:
"""
Sets the validator weights to the metagraph hotkeys based on the scores it has received from the miners. The weights
determine the trust and incentive level the validator assigns to miner nodes on the network.
@@ -354,7 +350,7 @@ def set_weights(self):
else:
bt.logging.error("set_weights failed", msg)
- def resync_metagraph(self):
+ def resync_metagraph(self) -> None:
"""Resyncs the metagraph and updates the hotkeys and moving averages based on the new metagraph."""
bt.logging.info("resync_metagraph()")
@@ -386,7 +382,7 @@ def resync_metagraph(self):
# Update the hotkeys.
self.hotkeys = copy.deepcopy(self.metagraph.hotkeys)
- def update_scores(self, rewards: torch.Tensor, uids: list[int]):
+ def update_scores(self, rewards: torch.Tensor, uids: list[int]) -> None:
"""Performs exponential moving average on the scores based on the rewards received from the miners."""
# Check if rewards contains NaN values.
@@ -396,10 +392,7 @@ def update_scores(self, rewards: torch.Tensor, uids: list[int]):
rewards = torch.nan_to_num(rewards, 0)
# Check if `uids` is already a tensor and clone it to avoid the warning.
- if isinstance(uids, torch.Tensor):
- uids_tensor = uids.clone().detach()
- else:
- uids_tensor = torch.tensor(uids).to(self.device)
+ uids_tensor = uids.clone().detach() if isinstance(uids, torch.Tensor) else torch.tensor(uids).to(self.device)
# Compute forward pass rewards, assumes uids are mutually exclusive.
# shape: [ metagraph.n ]
@@ -412,21 +405,17 @@ def update_scores(self, rewards: torch.Tensor, uids: list[int]):
self.scores: torch.Tensor = alpha * scattered_rewards + (1 - alpha) * self.scores.to(self.device)
bt.logging.debug(f"Updated moving avg scores: {self.scores}")
- def save_state(self):
+ def save_state(self) -> None:
"""Saves the state of the validator to a file."""
bt.logging.info("Saving validator state.")
# Save the state of the validator to file.
torch.save(
- {
- "step": self.step,
- "scores": self.scores,
- "hotkeys": self.hotkeys,
- },
+ {"step": self.step, "scores": self.scores, "hotkeys": self.hotkeys, "last_query_block": self.last_query_block},
self.config.neuron.full_path + "/state.pt",
)
- def load_state(self):
+ def load_state(self) -> None:
"""Loads the state of the validator from a file."""
bt.logging.info("Loading validator state.")
@@ -435,3 +424,4 @@ def load_state(self):
self.step = state["step"]
self.scores = state["scores"]
self.hotkeys = state["hotkeys"]
+ self.last_query_block = state["last_query_block"]
diff --git a/sturdy/constants.py b/sturdy/constants.py
index 80c063b..efc61ed 100644
--- a/sturdy/constants.py
+++ b/sturdy/constants.py
@@ -28,11 +28,21 @@
STOCHASTICITY_STEP = 0.0001
POOL_RESERVE_SIZE = int(100e18) # 100
-QUERY_RATE = 2 # how often synthetic validator queries miners (blocks)
+QUERY_RATE = 50 # how often synthetic validator queries miners (blocks)
QUERY_TIMEOUT = 45 # timeout (seconds)
+ORGANIC_SCORING_PERIOD = 28800 # scoring period in seconds
+MIN_SCORING_PERIOD = 7200 # scoring period in seconds
+MAX_SCORING_PERIOD = 43200 # scoring period in seconds
+SCORING_PERIOD_STEP = 3600 # scoring period in seconds
+
+SCORING_WINDOW = 420 # scoring window (seconds)
+
TOTAL_ALLOC_THRESHOLD = 0.98
-SIMILARITY_THRESHOLD = 0.01 # similarity threshold for plagiarism checking
+ALLOCATION_SIMILARITY_THRESHOLD = 1e-4 # similarity threshold for plagiarism checking
+APY_SIMILARITY_THRESHOLD = 1e-4
+
+DB_DIR = "validator_database.db" # default validator database dir
# The following constants are for different pool models
# Aave
diff --git a/sturdy/mock.py b/sturdy/mock.py
index f50107a..fdece88 100644
--- a/sturdy/mock.py
+++ b/sturdy/mock.py
@@ -7,6 +7,7 @@
import numpy as np
from sturdy.constants import QUERY_TIMEOUT
+from sturdy.pools import get_minimum_allocation
def generate_array_with_sum(rng_gen: np.random.RandomState, total_sum: int, min_amounts: [int]) -> list:
@@ -111,14 +112,11 @@ async def single_axon_response(i, axon): # noqa: ANN202, ARG001
s.dendrite.status_message = "OK"
synapse.dendrite.process_time = str(process_time)
-
if self.custom_allocs:
pools = synapse.assets_and_pools["pools"]
- min_amounts = [pool.borrow_amount for pool in pools.values()]
+ min_amounts = [get_minimum_allocation(pool) for pool in pools.values()]
- alloc_values = generate_array_with_sum(
- np.random, s.assets_and_pools["total_assets"], min_amounts
- )
+ alloc_values = generate_array_with_sum(np.random, s.assets_and_pools["total_assets"], min_amounts)
contract_addrs = [pool.contract_address for pool in s.assets_and_pools["pools"].values()]
allocations = {contract_addrs[i]: alloc_values[i] for i in range(len(s.assets_and_pools["pools"]))}
diff --git a/sturdy/pool_registry/__init__.py b/sturdy/pool_registry/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/sturdy/pool_registry/pool_registry.py b/sturdy/pool_registry/pool_registry.py
new file mode 100644
index 0000000..5f84459
--- /dev/null
+++ b/sturdy/pool_registry/pool_registry.py
@@ -0,0 +1,113 @@
+POOL_REGISTRY = {
+ "Sturdy Redact Aggregator": {
+ "user_address": "0xcFB23D05f32eA0BE0dBb5078d189Cca89688945E",
+ "assets_and_pools": {
+ "pools": {
+ "0x0669091F451142b3228171aE6aD794cF98288124": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0x0669091F451142b3228171aE6aD794cF98288124",
+ },
+ "0xFa68707be4b58FB9F10748E30e25A15113EdEE1D": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0xFa68707be4b58FB9F10748E30e25A15113EdEE1D",
+ },
+ }
+ },
+ },
+ "Sturdy Crvusd Aggregator": {
+ "user_address": "0x73E4C11B670Ef9C025A030A20b72CB9150E54523",
+ "assets_and_pools": {
+ "pools": {
+ "0x6311fF24fb15310eD3d2180D3d0507A21a8e5227": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0x6311fF24fb15310eD3d2180D3d0507A21a8e5227",
+ },
+ "0x200723063111f9f8f1d44c0F30afAdf0C0b1a04b": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0x200723063111f9f8f1d44c0F30afAdf0C0b1a04b",
+ },
+ "0x26fe402A57D52c8a323bb6e09f06489C8216aC88": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0x26fe402A57D52c8a323bb6e09f06489C8216aC88",
+ },
+ "0x8dDE9A50a91cc0a5DaBdc5d3931c1AF60408c84D": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0x8dDE9A50a91cc0a5DaBdc5d3931c1AF60408c84D",
+ },
+ }
+ },
+ },
+ "Sturdy GHO Aggregator": {
+ "user_address": "0x93eBc3cA85F96aFD72edB914e833Fe18888DE179",
+ "assets_and_pools": {
+ "pools": {
+ "0x0b8C80fd9CaC5570Ff829416f0aFCE7aF6F3C6f8": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0x0b8C80fd9CaC5570Ff829416f0aFCE7aF6F3C6f8",
+ },
+ "0xb3Bf04A939aAcFf5BdCFc273CE4F36CF29F063Db": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0xb3Bf04A939aAcFf5BdCFc273CE4F36CF29F063Db",
+ },
+ }
+ },
+ },
+ "Sturdy tBTC Aggregator": {
+ "user_address": "0xAeD098db0e39bed6DDc2c07727B8FfC0BA470D9C",
+ "assets_and_pools": {
+ "pools": {
+ "0x6F03c615a3E609D2CF149754CC55462b6477965c": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0x6F03c615a3E609D2CF149754CC55462b6477965c",
+ },
+ "0xf94B349d52c542aBd8Fb612c2854974e1D72223B": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0xf94B349d52c542aBd8Fb612c2854974e1D72223B",
+ },
+ "0xEEF271A0071423EA56d38E4aBE748165cc432e3f": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0xEEF271A0071423EA56d38E4aBE748165cc432e3f",
+ },
+ }
+ },
+ },
+ "Morpho USDC Vaults": {
+ "user_address": "0xFA60E843a52eff94901f43ac08232b59351192cc",
+ "total_assets": 1000000000000,
+ "assets_and_pools": {
+ "pools": {
+ "0xd63070114470f685b75B74D60EEc7c1113d33a3D": {
+ "pool_type": "MORPHO",
+ "contract_address": "0xd63070114470f685b75B74D60EEc7c1113d33a3D",
+ },
+ "0xBEEF01735c132Ada46AA9aA4c54623cAA92A64CB": {
+ "pool_type": "MORPHO",
+ "contract_address": "0xBEEF01735c132Ada46AA9aA4c54623cAA92A64CB",
+ },
+ "0x8eB67A509616cd6A7c1B3c8C21D48FF57df3d458": {
+ "pool_type": "MORPHO",
+ "contract_address": "0x8eB67A509616cd6A7c1B3c8C21D48FF57df3d458",
+ },
+ "0xdd0f28e19C1780eb6396170735D45153D261490d": {
+ "pool_type": "MORPHO",
+ "contract_address": "0xdd0f28e19C1780eb6396170735D45153D261490d",
+ },
+ }
+ },
+ },
+ "Yearn DAI-2 Vaults": {
+ "user_address": "0x92545bCE636E6eE91D88D2D017182cD0bd2fC22e",
+ "assets_and_pools": {
+ "pools": {
+ "0x028eC7330ff87667b6dfb0D94b954c820195336c": {
+ "pool_type": "YEARN_V3",
+ "contract_address": "0x028eC7330ff87667b6dfb0D94b954c820195336c",
+ },
+ "0x6164045FC2b2b269ffcaB2197736A74B1725B6C6": {
+ "pool_type": "YEARN_V3",
+ "contract_address": "0x6164045FC2b2b269ffcaB2197736A74B1725B6C6",
+ },
+ }
+ },
+ },
+}
diff --git a/sturdy/pools.py b/sturdy/pools.py
index 75f37c0..4e8dc64 100644
--- a/sturdy/pools.py
+++ b/sturdy/pools.py
@@ -20,7 +20,7 @@
from decimal import Decimal
from enum import IntEnum
from pathlib import Path
-from typing import Any, ClassVar, Literal
+from typing import Any, ClassVar
import bittensor as bt
import numpy as np
@@ -32,11 +32,10 @@
from web3.types import BlockData
from sturdy.constants import *
-from sturdy.utils.ethmath import wei_div, wei_mul
+from sturdy.pool_registry.pool_registry import POOL_REGISTRY
+from sturdy.utils.ethmath import wei_div
from sturdy.utils.misc import (
- format_num_prec,
getReserveFactor,
- randrange_float,
rayMul,
retry_with_backoff,
ttl_cache,
@@ -44,13 +43,13 @@
class POOL_TYPES(IntEnum):
- SYNTHETIC = 0
STURDY_SILO = 1
- AAVE = 2
+ AAVE_DEFAULT = 2
DAI_SAVINGS = 3
COMPOUND_V3 = 4
MORPHO = 5
YEARN_V3 = 6
+ AAVE_TARGET = 7
def get_minimum_allocation(pool: "ChainBasedPoolModel") -> int:
@@ -60,29 +59,29 @@ def get_minimum_allocation(pool: "ChainBasedPoolModel") -> int:
match pool.pool_type:
case POOL_TYPES.STURDY_SILO:
borrow_amount = pool._totalBorrow
- our_supply = pool._curr_deposit_amount
- assets_available = pool._totalAssets - borrow_amount
- case POOL_TYPES.AAVE:
+ our_supply = pool._user_deposits
+ assets_available = max(0, pool._total_supplied_assets - borrow_amount)
+ case T if T in (POOL_TYPES.AAVE_DEFAULT, POOL_TYPES.AAVE_TARGET):
# borrow amount for aave pools is total_stable_debt + total_variable_debt
borrow_amount = ((pool._nextTotalStableDebt * int(1e18)) // int(10**pool._decimals)) + (
(pool._totalVariableDebt * int(1e18)) // int(10**pool._decimals)
)
- our_supply = pool._collateral_amount
- assets_available = ((pool._total_supplied * int(1e18)) // int(10**pool._decimals)) - borrow_amount
+ our_supply = pool._user_deposits
+ assets_available = max(0, ((pool._total_supplied_assets * int(1e18)) // int(10**pool._decimals)) - borrow_amount)
case POOL_TYPES.COMPOUND_V3:
borrow_amount = pool._total_borrow
- our_supply = pool._deposit_amount
- assets_available = pool._total_supply - borrow_amount
+ our_supply = pool._user_deposits
+ assets_available = max(0, pool._total_supplied_assets - borrow_amount)
case POOL_TYPES.MORPHO:
borrow_amount = pool._curr_borrows
- our_supply = pool._user_assets
- assets_available = pool._total_assets - borrow_amount
+ our_supply = pool._user_deposits
+ assets_available = max(0, pool._total_supplied_assets - borrow_amount)
case POOL_TYPES.YEARN_V3:
- return pool._curr_deposit - pool._max_withdraw
+ return max(0, pool._user_deposits - pool._max_withdraw)
case POOL_TYPES.DAI_SAVINGS:
pass # TODO: is there a more appropriate way to go about this?
- case _: # we assume it is a synthetic pool
- return pool.borrow_amount
+ case _: # not a valid pool type
+ return 1
return 0 if borrow_amount <= assets_available else assets_available if our_supply >= assets_available else 0
@@ -145,94 +144,6 @@ def check_allocations(
return True
-class BasePoolModel(BaseModel):
- """This model will primarily be used for synthetic requests"""
-
- class Config:
- use_enum_values = True # This will use the enum's value instead of the enum itself
- smart_union = True
-
- pool_model_disc: Literal["SYNTHETIC"] = Field(default="SYNTHETIC", description="pool type discriminator")
- contract_address: str = Field(..., description='the "contract address" of the pool - used here as a uid')
- pool_type: POOL_TYPES | int | str = Field(default=POOL_TYPES.SYNTHETIC, const=True, description="type of pool")
- base_rate: int = Field(..., description="base interest rate")
- base_slope: int = Field(..., description="base interest rate slope")
- kink_slope: int = Field(..., description="kink slope")
- optimal_util_rate: int = Field(..., description="optimal utilisation rate")
- borrow_amount: int = Field(..., description="borrow amount in wei")
- reserve_size: int = Field(..., description="pool reserve size in wei")
-
- @validator("pool_type", pre=True)
- def validator_pool_type(cls, value) -> POOL_TYPES | int | str:
- if isinstance(value, POOL_TYPES):
- return value
- if isinstance(value, int):
- return POOL_TYPES(value)
- if isinstance(value, str):
- try:
- return POOL_TYPES[value]
- except KeyError:
- raise ValueError(f"Invalid enum name: {value}") # noqa: B904
- raise ValueError(f"Invalid value: {value}")
-
- @root_validator
- def check_params(cls, values): # noqa: ANN201
- if not Web3.is_address(values.get("contract_address")):
- raise ValueError("pool address is invalid!")
- if values.get("base_rate") < 0:
- raise ValueError("base rate is negative")
- if values.get("base_slope") < 0:
- raise ValueError("base slope is negative")
- if values.get("kink_slope") < 0:
- raise ValueError("kink slope is negative")
- if values.get("optimal_util_rate") < 0:
- raise ValueError("optimal utilization rate is negative")
- if values.get("borrow_amount") < 0:
- raise ValueError("borrow amount is negative")
- if values.get("reserve_size") < 0:
- raise ValueError("reserve size is negative")
- return values
-
-
-class BasePool(BasePoolModel):
- """This class defines the base pool type
-
- Args:
- contract_address: (str),
- base_rate: (int),
- base_slope: (int),
- kink_slope: (int),
- optimal_util_rate: (int),
- borrow_amount: (int),
- reserve_size: (int),
- """
-
- @property
- def util_rate(self) -> int:
- return wei_div(self.borrow_amount, self.reserve_size)
-
- @property
- def borrow_rate(self) -> int:
- util_rate = self.util_rate
- return (
- self.base_rate + wei_mul(wei_div(util_rate, self.optimal_util_rate), self.base_slope)
- if util_rate < self.optimal_util_rate
- else self.base_rate
- + self.base_slope
- + wei_mul(
- wei_div(
- (util_rate - self.optimal_util_rate),
- int(1e18 - self.optimal_util_rate),
- ),
- self.kink_slope,
- )
- )
-
- @property
- def supply_rate(self) -> int:
- return wei_mul(self.util_rate, self.borrow_rate)
-
-
class ChainBasedPoolModel(BaseModel):
"""This serves as the base model of pools which need to pull data from on-chain
@@ -244,7 +155,6 @@ class Config:
use_enum_values = True # This will use the enum's value instead of the enum itself
smart_union = True
- pool_model_disc: Literal["CHAIN"] = Field(default="CHAIN", description="pool type discriminator")
pool_type: POOL_TYPES | int | str = Field(..., description="type of pool")
user_address: str = Field(
default=ADDRESS_ZERO,
@@ -288,12 +198,10 @@ def supply_rate(self, **args: Any) -> int:
class PoolFactory:
@staticmethod
- def create_pool(pool_type: POOL_TYPES, **kwargs: Any) -> ChainBasedPoolModel | BasePoolModel:
+ def create_pool(pool_type: POOL_TYPES, **kwargs: Any) -> ChainBasedPoolModel:
match pool_type:
- case POOL_TYPES.SYNTHETIC:
- return BasePool(**kwargs)
- case POOL_TYPES.AAVE:
- return AaveV3DefaultInterestRatePool(**kwargs)
+ case POOL_TYPES.AAVE_DEFAULT:
+ return AaveV3DefaultInterestRateV2Pool(**kwargs)
case POOL_TYPES.STURDY_SILO:
return VariableInterestSturdySiloStrategy(**kwargs)
case POOL_TYPES.DAI_SAVINGS:
@@ -304,14 +212,16 @@ def create_pool(pool_type: POOL_TYPES, **kwargs: Any) -> ChainBasedPoolModel | B
return MorphoVault(**kwargs)
case POOL_TYPES.YEARN_V3:
return YearnV3Vault(**kwargs)
+ case POOL_TYPES.AAVE_TARGET:
+ return AaveV3RateTargetBaseInterestRatePool(**kwargs)
case _:
raise ValueError(f"Unknown pool type: {pool_type}")
-class AaveV3DefaultInterestRatePool(ChainBasedPoolModel):
+class AaveV3DefaultInterestRateV2Pool(ChainBasedPoolModel):
"""This class defines the default pool type for Aave"""
- pool_type: POOL_TYPES = Field(default=POOL_TYPES.AAVE, const=True, description="type of pool")
+ pool_type: POOL_TYPES = Field(default=POOL_TYPES.AAVE_DEFAULT, const=True, description="type of pool")
_atoken_contract: Contract = PrivateAttr()
_pool_contract: Contract = PrivateAttr()
@@ -324,10 +234,11 @@ class AaveV3DefaultInterestRatePool(ChainBasedPoolModel):
_variable_debt_token_contract = PrivateAttr()
_totalVariableDebt = PrivateAttr()
_reserveFactor = PrivateAttr()
- _collateral_amount: int = PrivateAttr()
- _collateral_amount: int = PrivateAttr()
- _total_supplied: int = PrivateAttr()
+ _user_deposits: int = PrivateAttr()
+ _total_supplied_assets: int = PrivateAttr()
_decimals: int = PrivateAttr()
+ _user_asset_balance: int = PrivateAttr()
+ _normalized_income: int = PrivateAttr()
class Config:
arbitrary_types_allowed = True
@@ -336,7 +247,7 @@ def __hash__(self) -> int:
return hash((self._atoken_contract.address, self._underlying_asset_address))
def __eq__(self, other) -> bool:
- if not isinstance(other, AaveV3DefaultInterestRatePool):
+ if not isinstance(other, AaveV3DefaultInterestRateV2Pool):
return NotImplemented
# Compare the attributes for equality
return (self._atoken_contract.address, self._underlying_asset_address) == (
@@ -388,7 +299,7 @@ def pool_init(self, web3_provider: Web3) -> None:
address=self._underlying_asset_address,
)
- self._total_supplied = retry_with_backoff(self._atoken_contract.functions.totalSupply().call)
+ self._total_supplied_assets = retry_with_backoff(self._atoken_contract.functions.totalSupply().call)
self._initted = True
@@ -396,7 +307,7 @@ def pool_init(self, web3_provider: Web3) -> None:
bt.logging.error("Failed to load contract!")
bt.logging.error(err) # type: ignore[]
- def sync(self, user_addr: str, web3_provider: Web3) -> None:
+ def sync(self, web3_provider: Web3) -> None:
"""Syncs with chain"""
if not self._initted:
self.pool_init(web3_provider)
@@ -468,8 +379,16 @@ def sync(self, user_addr: str, web3_provider: Web3) -> None:
reserveConfiguration = self._reserve_data.configuration
self._reserveFactor = getReserveFactor(reserveConfiguration)
self._decimals = retry_with_backoff(self._underlying_asset_contract.functions.decimals().call)
- self._collateral_amount = retry_with_backoff(
- self._atoken_contract.functions.balanceOf(Web3.to_checksum_address(user_addr)).call
+ self._user_deposits = retry_with_backoff(
+ self._atoken_contract.functions.balanceOf(Web3.to_checksum_address(self.user_address)).call
+ )
+
+ self._user_asset_balance = retry_with_backoff(
+ self._underlying_asset_contract.functions.balanceOf(Web3.to_checksum_address(self.user_address)).call
+ )
+
+ self._normalized_income = retry_with_backoff(
+ self._pool_contract.functions.getReserveNormalizedIncome(self._underlying_asset_address).call
)
except Exception as err:
@@ -481,7 +400,218 @@ def sync(self, user_addr: str, web3_provider: Web3) -> None:
def supply_rate(self, amount: int) -> int:
"""Returns supply rate given new deposit amount"""
try:
- already_deposited = self._collateral_amount
+ already_deposited = self._user_deposits
+ delta = amount - already_deposited
+ to_deposit = max(0, delta)
+ to_remove = abs(delta) if delta < 0 else 0
+
+ (nextLiquidityRate, _) = retry_with_backoff(
+ self._strategy_contract.functions.calculateInterestRates(
+ (
+ self._reserve_data.unbacked,
+ int(to_deposit),
+ int(to_remove),
+ self._nextTotalStableDebt + self._totalVariableDebt,
+ self._reserveFactor,
+ self._underlying_asset_address,
+ True,
+ already_deposited,
+ ),
+ ).call,
+ )
+
+ return Web3.to_wei(nextLiquidityRate / 1e27, "ether")
+
+ except Exception as e:
+ bt.logging.error("Failed to retrieve supply apy!")
+ bt.logging.error(e) # type: ignore[]
+
+ return 0
+
+
+class AaveV3RateTargetBaseInterestRatePool(ChainBasedPoolModel):
+ """This class defines the default pool type for Aave"""
+
+ pool_type: POOL_TYPES = Field(default=POOL_TYPES.AAVE_TARGET, const=True, description="type of pool")
+
+ _atoken_contract: Contract = PrivateAttr()
+ _pool_contract: Contract = PrivateAttr()
+ _underlying_asset_contract: Contract = PrivateAttr()
+ _underlying_asset_address: str = PrivateAttr()
+ _reserve_data = PrivateAttr()
+ _strategy_contract = PrivateAttr()
+ _nextTotalStableDebt = PrivateAttr()
+ _nextAvgStableBorrowRate = PrivateAttr()
+ _variable_debt_token_contract = PrivateAttr()
+ _totalVariableDebt = PrivateAttr()
+ _reserveFactor = PrivateAttr()
+ _user_deposits: int = PrivateAttr()
+ _total_supplied_assets: int = PrivateAttr()
+ _decimals: int = PrivateAttr()
+ _user_asset_balance: int = PrivateAttr()
+ _normalized_income: int = PrivateAttr()
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def __hash__(self) -> int:
+ return hash((self._atoken_contract.address, self._underlying_asset_address))
+
+ def __eq__(self, other) -> bool:
+ if not isinstance(other, AaveV3DefaultInterestRateV2Pool):
+ return NotImplemented
+ # Compare the attributes for equality
+ return (self._atoken_contract.address, self._underlying_asset_address) == (
+ other._atoken_contract.address,
+ other._underlying_asset_address,
+ )
+
+ def pool_init(self, web3_provider: Web3) -> None:
+ try:
+ assert web3_provider.is_connected()
+ except Exception as err:
+ bt.logging.error("Failed to connect to Web3 instance!")
+ bt.logging.error(err) # type: ignore[]
+
+ try:
+ atoken_abi_file_path = Path(__file__).parent / "abi/AToken.json"
+ atoken_abi_file = atoken_abi_file_path.open()
+ atoken_abi = json.load(atoken_abi_file)
+ atoken_abi_file.close()
+ atoken_contract = web3_provider.eth.contract(abi=atoken_abi, decode_tuples=True)
+ self._atoken_contract = retry_with_backoff(
+ atoken_contract,
+ address=self.contract_address,
+ )
+
+ pool_abi_file_path = Path(__file__).parent / "abi/Pool.json"
+ pool_abi_file = pool_abi_file_path.open()
+ pool_abi = json.load(pool_abi_file)
+ pool_abi_file.close()
+
+ atoken_contract = self._atoken_contract
+ pool_address = retry_with_backoff(atoken_contract.functions.POOL().call)
+
+ pool_contract = web3_provider.eth.contract(abi=pool_abi, decode_tuples=True)
+ self._pool_contract = retry_with_backoff(pool_contract, address=pool_address)
+
+ self._underlying_asset_address = retry_with_backoff(
+ self._atoken_contract.functions.UNDERLYING_ASSET_ADDRESS().call,
+ )
+
+ erc20_abi_file_path = Path(__file__).parent / "abi/IERC20.json"
+ erc20_abi_file = erc20_abi_file_path.open()
+ erc20_abi = json.load(erc20_abi_file)
+ erc20_abi_file.close()
+
+ underlying_asset_contract = web3_provider.eth.contract(abi=erc20_abi, decode_tuples=True)
+ self._underlying_asset_contract = retry_with_backoff(
+ underlying_asset_contract,
+ address=self._underlying_asset_address,
+ )
+
+ self._total_supplied_assets = retry_with_backoff(self._atoken_contract.functions.totalSupply().call)
+
+ self._initted = True
+
+ except Exception as err:
+ bt.logging.error("Failed to load contract!")
+ bt.logging.error(err) # type: ignore[]
+
+ def sync(self, web3_provider: Web3) -> None:
+ """Syncs with chain"""
+ if not self._initted:
+ self.pool_init(web3_provider)
+ try:
+ pool_abi_file_path = Path(__file__).parent / "abi/Pool.json"
+ pool_abi_file = pool_abi_file_path.open()
+ pool_abi = json.load(pool_abi_file)
+ pool_abi_file.close()
+
+ atoken_contract_onchain = self._atoken_contract
+ pool_address = retry_with_backoff(atoken_contract_onchain.functions.POOL().call)
+
+ pool_contract = web3_provider.eth.contract(abi=pool_abi, decode_tuples=True)
+ self._pool_contract = retry_with_backoff(pool_contract, address=pool_address)
+
+ self._underlying_asset_address = retry_with_backoff(
+ self._atoken_contract.functions.UNDERLYING_ASSET_ADDRESS().call,
+ )
+
+ self._reserve_data = retry_with_backoff(
+ self._pool_contract.functions.getReserveData(self._underlying_asset_address).call,
+ )
+
+ reserve_strat_abi_file_path = Path(__file__).parent / "abi/RateTargetBaseInterestRateStrategy.json"
+ reserve_strat_abi_file = reserve_strat_abi_file_path.open()
+ reserve_strat_abi = json.load(reserve_strat_abi_file)
+ reserve_strat_abi_file.close()
+
+ strategy_contract = web3_provider.eth.contract(abi=reserve_strat_abi)
+ self._strategy_contract = retry_with_backoff(
+ strategy_contract,
+ address=self._reserve_data.interestRateStrategyAddress,
+ )
+
+ stable_debt_token_abi_file_path = Path(__file__).parent / "abi/IStableDebtToken.json"
+ stable_debt_token_abi_file = stable_debt_token_abi_file_path.open()
+ stable_debt_token_abi = json.load(stable_debt_token_abi_file)
+ stable_debt_token_abi_file.close()
+
+ stable_debt_token_contract = web3_provider.eth.contract(abi=stable_debt_token_abi)
+ stable_debt_token_contract = retry_with_backoff(
+ stable_debt_token_contract,
+ address=self._reserve_data.stableDebtTokenAddress,
+ )
+
+ (
+ _,
+ self._nextTotalStableDebt,
+ self._nextAvgStableBorrowRate,
+ _,
+ ) = retry_with_backoff(stable_debt_token_contract.functions.getSupplyData().call)
+
+ variable_debt_token_abi_file_path = Path(__file__).parent / "abi/IVariableDebtToken.json"
+ variable_debt_token_abi_file = variable_debt_token_abi_file_path.open()
+ variable_debt_token_abi = json.load(variable_debt_token_abi_file)
+ variable_debt_token_abi_file.close()
+
+ variable_debt_token_contract = web3_provider.eth.contract(abi=variable_debt_token_abi)
+ self._variable_debt_token_contract = retry_with_backoff(
+ variable_debt_token_contract,
+ address=self._reserve_data.variableDebtTokenAddress,
+ )
+
+ nextVariableBorrowIndex = self._reserve_data.variableBorrowIndex
+
+ nextScaledVariableDebt = retry_with_backoff(self._variable_debt_token_contract.functions.scaledTotalSupply().call)
+ self._totalVariableDebt = rayMul(nextScaledVariableDebt, nextVariableBorrowIndex)
+
+ reserveConfiguration = self._reserve_data.configuration
+ self._reserveFactor = getReserveFactor(reserveConfiguration)
+ self._decimals = retry_with_backoff(self._underlying_asset_contract.functions.decimals().call)
+ self._user_deposits = retry_with_backoff(
+ self._atoken_contract.functions.balanceOf(Web3.to_checksum_address(self.user_address)).call
+ )
+
+ self._user_asset_balance = retry_with_backoff(
+ self._underlying_asset_contract.functions.balanceOf(Web3.to_checksum_address(self.user_address)).call
+ )
+
+ self._normalized_income = retry_with_backoff(
+ self._pool_contract.functions.getReserveNormalizedIncome(self._underlying_asset_address).call
+ )
+
+ except Exception as err:
+ bt.logging.error("Failed to sync to chain!")
+ bt.logging.error(err) # type: ignore[]
+
+ # last 256 unique calls to this will be cached for the next 60 seconds
+ @ttl_cache(maxsize=256, ttl=60)
+ def supply_rate(self, amount: int) -> int:
+ """Returns supply rate given new deposit amount"""
+ try:
+ already_deposited = self._user_deposits
delta = amount - already_deposited
to_deposit = max(0, delta)
to_remove = abs(delta) if delta < 0 else 0
@@ -517,15 +647,22 @@ class VariableInterestSturdySiloStrategy(ChainBasedPoolModel):
_silo_strategy_contract: Contract = PrivateAttr()
_pair_contract: Contract = PrivateAttr()
_rate_model_contract: Contract = PrivateAttr()
- _curr_deposit_amount: int = PrivateAttr()
+
+ _user_deposits: int = PrivateAttr()
_util_prec: int = PrivateAttr()
_fee_prec: int = PrivateAttr()
- _totalAssets: Any = PrivateAttr()
+ _total_supplied_assets: Any = PrivateAttr()
_totalBorrow: Any = PrivateAttr()
_current_rate_info = PrivateAttr()
_rate_prec: int = PrivateAttr()
+
_block: BlockData = PrivateAttr()
+
_decimals: int = PrivateAttr()
+ _asset: Contract = PrivateAttr()
+ _user_asset_balance: int = PrivateAttr()
+ _user_total_assets: int = PrivateAttr()
+ _share_price: Contract = PrivateAttr()
def __hash__(self) -> int:
return hash((self._silo_strategy_contract.address, self._pair_contract))
@@ -539,7 +676,7 @@ def __eq__(self, other) -> bool:
other._pair_contract.address,
)
- def pool_init(self, user_addr: str, web3_provider: Web3) -> None: # noqa: ARG002
+ def pool_init(self, web3_provider: Web3) -> None:
try:
assert web3_provider.is_connected()
except Exception as err:
@@ -574,23 +711,32 @@ def pool_init(self, user_addr: str, web3_provider: Web3) -> None: # noqa: ARG00
self._rate_model_contract = retry_with_backoff(rate_model_contract, address=rate_model_contract_address)
self._decimals = retry_with_backoff(self._pair_contract.functions.decimals().call)
+ erc20_abi_file_path = Path(__file__).parent / "abi/IERC20.json"
+ erc20_abi_file = erc20_abi_file_path.open()
+ erc20_abi = json.load(erc20_abi_file)
+ erc20_abi_file.close()
+
+ asset_address = retry_with_backoff(self._pair_contract.functions.asset().call)
+ asset_contract = web3_provider.eth.contract(abi=erc20_abi, decode_tuples=True)
+ self._asset = retry_with_backoff(asset_contract, address=asset_address)
+
self._initted = True
except Exception as e:
bt.logging.error(e) # type: ignore[]
- def sync(self, user_addr: str, web3_provider: Web3) -> None:
+ def sync(self, web3_provider: Web3) -> None:
"""Syncs with chain"""
if not self._initted:
- self.pool_init(user_addr, web3_provider)
+ self.pool_init(web3_provider)
user_shares = retry_with_backoff(self._pair_contract.functions.balanceOf(self.contract_address).call)
- self._curr_deposit_amount = retry_with_backoff(self._pair_contract.functions.convertToAssets(user_shares).call)
+ self._user_deposits = retry_with_backoff(self._pair_contract.functions.convertToAssets(user_shares).call)
constants = retry_with_backoff(self._pair_contract.functions.getConstants().call)
self._util_prec = constants[2]
self._fee_prec = constants[3]
- self._totalAssets: Any = retry_with_backoff(self._pair_contract.functions.totalAssets().call)
+ self._total_supplied_assets: Any = retry_with_backoff(self._pair_contract.functions.totalAssets().call)
self._totalBorrow: Any = retry_with_backoff(self._pair_contract.functions.totalBorrow().call).amount
self._block = web3_provider.eth.get_block("latest")
@@ -599,14 +745,19 @@ def sync(self, user_addr: str, web3_provider: Web3) -> None:
self._rate_prec = retry_with_backoff(self._rate_model_contract.functions.RATE_PREC().call)
+ self._user_asset_balance = retry_with_backoff(self._asset.functions.balanceOf(self.user_address).call)
+
+ # get current price per share
+ self._share_price = retry_with_backoff(self._pair_contract.functions.pricePerShare().call)
+
# last 256 unique calls to this will be cached for the next 60 seconds
@ttl_cache(maxsize=256, ttl=60)
def supply_rate(self, amount: int) -> int:
# amount scaled down to the asset's decimals from 18 decimals (wei)
- delta = amount - self._curr_deposit_amount
+ delta = amount - self._user_deposits
"""Returns supply rate given new deposit amount"""
- util_rate = int((self._util_prec * self._totalBorrow) // (self._totalAssets + delta))
+ util_rate = int((self._util_prec * self._totalBorrow) // (self._total_supplied_assets + delta))
last_update_timestamp = self._current_rate_info.lastTimestamp
current_timestamp = self._block["timestamp"]
@@ -646,8 +797,8 @@ class CompoundV3Pool(ChainBasedPoolModel):
_reward_token_price: float = PrivateAttr()
_base_decimals: int = PrivateAttr()
_total_borrow: int = PrivateAttr()
- _deposit_amount: int = PrivateAttr()
- _total_supply: int = PrivateAttr()
+ _user_deposits: int = PrivateAttr()
+ _total_supplied_assets: int = PrivateAttr()
_CompoundTokenMap: dict = {
"0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2": "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE", # WETH -> ETH
@@ -711,16 +862,16 @@ def sync(self, web3_provider: Web3) -> None:
retry_with_backoff(self._reward_oracle_contract.functions.latestAnswer().call) / 10**reward_decimals
)
- self._deposit_amount = retry_with_backoff(self._ctoken_contract.functions.balanceOf(self.user_address).call)
- self._total_supply = retry_with_backoff(self._ctoken_contract.functions.totalSupply().call)
+ self._user_deposits = retry_with_backoff(self._ctoken_contract.functions.balanceOf(self.user_address).call)
+ self._total_supplied_assets = retry_with_backoff(self._ctoken_contract.functions.totalSupply().call)
def supply_rate(self, amount: int) -> int:
# amount scaled down to the asset's decimals from 18 decimals (wei)
# get pool supply rate (base token)
- already_in_pool = self._deposit_amount
+ already_in_pool = self._user_deposits
delta = amount - already_in_pool
- new_supply = self._total_supply + delta
+ new_supply = self._total_supplied_assets + delta
current_borrows = self._total_borrow
utilization = wei_div(current_borrows, new_supply)
@@ -799,23 +950,7 @@ def supply_rate(self) -> int:
class MorphoVault(ChainBasedPoolModel):
- # TODO: remove
- """Model for Morpho Vaults
- NOTE:
- This pool type is a bit different from other pools
- pools = {
- ...
- "0x0...1": { # <--- this is the index of the market in the morpho vault's "supplyQueue"
- "contract_address": "0x....", # <---- this is the address of the morpho vault
- ...
- }
- ...
- }
-
- new_pools = []
- for pool_uid, pool in pools.items():
- new_pool = MorphoVault(contract_address=pool["contract_address"], market_idx=pool_uid ...)
- """
+ """Model for Morpho Vaults"""
pool_type: POOL_TYPES = Field(POOL_TYPES.MORPHO, const=True, description="type of pool")
@@ -826,10 +961,13 @@ class MorphoVault(ChainBasedPoolModel):
_DECIMALS_OFFSET: int = PrivateAttr()
# TODO: update unit tests to check these :^)
_irm_contracts: dict = PrivateAttr(default={})
- _total_assets: int = PrivateAttr()
- _user_assets: int = PrivateAttr()
+ _total_supplied_assets: int = PrivateAttr()
+ _user_deposits: int = PrivateAttr()
_curr_borrows: int = PrivateAttr()
_asset_decimals: int = PrivateAttr()
+ _underlying_asset_contract: Contract = PrivateAttr()
+ _user_asset_balance: int = PrivateAttr()
+ _share_price: int = PrivateAttr()
_VIRTUAL_SHARES: ClassVar[int] = 1e6
_VIRTUAL_ASSETS: ClassVar[int] = 1
@@ -871,6 +1009,19 @@ def pool_init(self, web3_provider: Web3) -> None:
self._irm_abi = json.load(irm_abi_file)
irm_abi_file.close()
+ underlying_asset_address = retry_with_backoff(self._vault_contract.functions.asset().call)
+
+ erc20_abi_file_path = Path(__file__).parent / "abi/IERC20.json"
+ erc20_abi_file = erc20_abi_file_path.open()
+ erc20_abi = json.load(erc20_abi_file)
+ erc20_abi_file.close()
+
+ underlying_asset_contract = web3_provider.eth.contract(abi=erc20_abi, decode_tuples=True)
+ self._underlying_asset_contract = retry_with_backoff(
+ underlying_asset_contract,
+ address=underlying_asset_address,
+ )
+
self._initted = True
def sync(self, web3_provider: Web3) -> None:
@@ -896,11 +1047,16 @@ def sync(self, web3_provider: Web3) -> None:
total_borrows += market.totalBorrowAssets
- self._total_assets = retry_with_backoff(self._vault_contract.functions.totalAssets().call)
+ self._total_supplied_assets = retry_with_backoff(self._vault_contract.functions.totalAssets().call)
curr_user_shares = retry_with_backoff(self._vault_contract.functions.balanceOf(self.user_address).call)
- self._user_assets = retry_with_backoff(self._vault_contract.functions.convertToAssets(curr_user_shares).call)
+ self._user_deposits = retry_with_backoff(self._vault_contract.functions.convertToAssets(curr_user_shares).call)
+ self._user_asset_balance = retry_with_backoff(
+ self._underlying_asset_contract.functions.balanceOf(Web3.to_checksum_address(self.user_address)).call
+ )
self._curr_borrows = total_borrows
+ self._share_price = retry_with_backoff(self._vault_contract.functions.convertToAssets(int(1e18)).call)
+
@classmethod
def assets_to_shares_down(cls, assets: int, total_assets: int, total_shares: int) -> int:
return (assets * (total_shares + cls._VIRTUAL_SHARES)) // (total_assets + cls._VIRTUAL_ASSETS)
@@ -917,7 +1073,7 @@ def supply_rate(self, amount: int) -> int:
retry_with_backoff(self._vault_contract.functions.supplyQueue(idx).call) for idx in range(supply_queue_length)
]
- total_asset_delta = amount - self._user_assets
+ total_asset_delta = amount - self._user_deposits
# apys in each market
current_supply_apys = []
@@ -952,76 +1108,13 @@ def supply_rate(self, amount: int) -> int:
allocated_assets = self.shares_to_assets_down(
position.supplyShares, market.totalSupplyAssets, market.totalSupplyShares
)
- current_assets.append(allocated_assets)
-
- curr_agg_apy = sum(
- [(current_assets[i] * current_supply_apys[i]) // int(10**self._asset_decimals) for i in range(supply_queue_length)]
- ) / sum(current_assets)
-
- return int(
- (wei_mul(curr_agg_apy, self._total_assets) / (self._total_assets + total_asset_delta))
- * 10 ** (self._asset_decimals * 2)
- )
-
-
-def generate_eth_public_key(rng_gen: np.random.RandomState) -> str:
- private_key_bytes = rng_gen.bytes(32) # type: ignore[]
- account = Account.from_key(private_key_bytes)
- return account.address
-
-
-def generate_assets_and_pools(rng_gen: np.random.RandomState) -> dict[str, dict[str, BasePoolModel] | int]: # generate pools
- assets_and_pools = {}
-
- pools_list = [
- BasePool(
- contract_address=generate_eth_public_key(rng_gen=rng_gen),
- pool_type=POOL_TYPES.SYNTHETIC,
- base_rate=int(randrange_float(MIN_BASE_RATE, MAX_BASE_RATE, BASE_RATE_STEP, rng_gen=rng_gen)),
- base_slope=int(randrange_float(MIN_SLOPE, MAX_SLOPE, SLOPE_STEP, rng_gen=rng_gen)),
- kink_slope=int(
- randrange_float(MIN_KINK_SLOPE, MAX_KINK_SLOPE, SLOPE_STEP, rng_gen=rng_gen),
- ), # kink rate - kicks in after pool hits optimal util rate
- optimal_util_rate=int(
- randrange_float(
- MIN_OPTIMAL_RATE,
- MAX_OPTIMAL_RATE,
- OPTIMAL_UTIL_STEP,
- rng_gen=rng_gen,
- ),
- ), # optimal util rate - after which the kink slope kicks in
- borrow_amount=int(
- format_num_prec(
- wei_mul(
- POOL_RESERVE_SIZE,
- int(
- randrange_float(
- MIN_UTIL_RATE,
- MAX_UTIL_RATE,
- UTIL_RATE_STEP,
- rng_gen=rng_gen,
- ),
- ),
- ),
- ),
- ), # initial borrowed amount from pool
- reserve_size=int(POOL_RESERVE_SIZE),
- )
- for _ in range(NUM_POOLS)
- ]
-
- pools = {str(pool.contract_address): pool for pool in pools_list}
+ current_assets.append(allocated_assets * int(10**self._asset_decimals))
- minimums = [pool.borrow_amount for pool in pools_list]
- min_total = sum(minimums)
- assets_and_pools["total_assets"] = int(min_total) + int(
- math.floor(
- randrange_float(MIN_TOTAL_ASSETS_OFFSET, MAX_TOTAL_ASSETS_OFFSET, TOTAL_ASSETS_OFFSET_STEP, rng_gen=rng_gen),
+ curr_agg_apy = sum([current_assets[i] * current_supply_apys[i] for i in range(supply_queue_length)]) / sum(
+ current_assets
)
- )
- assets_and_pools["pools"] = pools
- return assets_and_pools
+ return int(curr_agg_apy * self._total_supplied_assets / (self._total_supplied_assets + total_asset_delta))
class YearnV3Vault(ChainBasedPoolModel):
@@ -1030,7 +1123,11 @@ class YearnV3Vault(ChainBasedPoolModel):
_vault_contract: Contract = PrivateAttr()
_apr_oracle: Contract = PrivateAttr()
_max_withdraw: int = PrivateAttr()
- _curr_deposit: int = PrivateAttr()
+ _user_deposits: int = PrivateAttr()
+ _asset: Contract = PrivateAttr()
+ _total_supplied_assets: int = PrivateAttr()
+ _user_asset_balance: int = PrivateAttr()
+ _share_price: int = PrivateAttr()
def pool_init(self, web3_provider: Web3) -> None:
vault_abi_file_path = Path(__file__).parent / "abi/Yearn_V3_Vault.json"
@@ -1049,26 +1146,112 @@ def pool_init(self, web3_provider: Web3) -> None:
apr_oracle = web3_provider.eth.contract(abi=apr_oracle_abi, decode_tuples=True)
self._apr_oracle = retry_with_backoff(apr_oracle, address=APR_ORACLE)
+ erc20_abi_file_path = Path(__file__).parent / "abi/IERC20.json"
+ erc20_abi_file = erc20_abi_file_path.open()
+ erc20_abi = json.load(erc20_abi_file)
+ erc20_abi_file.close()
+
+ asset_address = retry_with_backoff(self._vault_contract.functions.asset().call)
+ asset_contract = web3_provider.eth.contract(abi=erc20_abi, decode_tuples=True)
+ self._asset = retry_with_backoff(asset_contract, address=asset_address)
+
def sync(self, web3_provider: Web3) -> None:
if not self._initted:
self.pool_init(web3_provider)
self._max_withdraw = retry_with_backoff(self._vault_contract.functions.maxWithdraw(self.user_address).call)
user_shares = retry_with_backoff(self._vault_contract.functions.balanceOf(self.user_address).call)
- self._curr_deposit = retry_with_backoff(self._vault_contract.functions.convertToAssets(user_shares).call)
+ self._user_deposits = retry_with_backoff(self._vault_contract.functions.convertToAssets(user_shares).call)
+ self._total_supplied_assets: Any = retry_with_backoff(self._vault_contract.functions.totalAssets().call)
+ self._user_asset_balance = retry_with_backoff(self._asset.functions.balanceOf(self.user_address).call)
+
+ # get current price per share
+ self._share_price = retry_with_backoff(self._vault_contract.functions.pricePerShare().call)
def supply_rate(self, amount: int) -> int:
- delta = amount - self._curr_deposit
+ delta = amount - self._user_deposits
return retry_with_backoff(self._apr_oracle.functions.getExpectedApr(self.contract_address, delta).call)
-# generate intial allocations for pools
-def generate_initial_allocations_for_pools(assets_and_pools: dict) -> dict:
- total_assets: int = assets_and_pools["total_assets"]
- pools: dict[str, BasePool] = assets_and_pools["pools"]
- allocs = {}
- for pool_uid, pool in pools.items():
- alloc = pool.borrow_amount if pool.pool_type == POOL_TYPES.SYNTHETIC else total_assets // len(pools)
- allocs[pool_uid] = alloc
+def generate_eth_public_key(rng_gen: np.random.RandomState) -> str:
+ private_key_bytes = rng_gen.bytes(32) # type: ignore[]
+ account = Account.from_key(private_key_bytes)
+ return account.address
- return allocs
+
+def generate_challenge_data(
+ web3_provider: Web3,
+ rng_gen: np.random.RandomState = np.random.RandomState(), # noqa: B008
+) -> dict[str, dict[str, ChainBasedPoolModel] | int]: # generate pools
+ selected_entry = POOL_REGISTRY[rng_gen.choice(list(POOL_REGISTRY.keys()))]
+ bt.logging.debug(f"Selected pool registry entry: {selected_entry}")
+
+ return assets_pools_for_challenge_data(selected_entry, web3_provider)
+
+
+def assets_pools_for_challenge_data(
+ selected_entry, web3_provider: Web3
+) -> dict[str, dict[str, ChainBasedPoolModel] | int]: # generate pools
+ challenge_data = {}
+
+ selected_assets_and_pools = selected_entry["assets_and_pools"]
+ selected_pools = selected_assets_and_pools["pools"]
+ global_user_address = selected_entry.get("user_address", None)
+
+ pool_list = []
+
+ for pool_dict in selected_pools.values():
+ user_address = pool_dict.get("user_address", None)
+ pool = PoolFactory.create_pool(
+ pool_type=POOL_TYPES._member_map_[pool_dict["pool_type"]],
+ user_address=global_user_address if user_address is None else user_address,
+ contract_address=pool_dict["contract_address"],
+ )
+ pool_list.append(pool)
+
+ pools = {str(pool.contract_address): pool for pool in pool_list}
+
+ # we assume that the user address is the same across pools (valid)
+ # and also that the asset contracts are the same across said pools
+ total_assets = selected_entry.get("total_assets", None)
+
+ if total_assets is None:
+ total_assets = 0
+ first_pool = pool_list[0]
+ first_pool.sync(web3_provider)
+ match first_pool.pool_type:
+ case T if T in (
+ POOL_TYPES.STURDY_SILO,
+ POOL_TYPES.AAVE_DEFAULT,
+ POOL_TYPES.AAVE_TARGET,
+ POOL_TYPES.MORPHO,
+ POOL_TYPES.YEARN_V3,
+ ):
+ total_assets = first_pool._user_asset_balance
+ case _:
+ pass
+
+ for pool in pools.values():
+ pool.sync(web3_provider)
+ total_asset = 0
+ match pool.pool_type:
+ case T if T in (
+ POOL_TYPES.STURDY_SILO,
+ POOL_TYPES.AAVE_DEFAULT,
+ POOL_TYPES.AAVE_TARGET,
+ POOL_TYPES.MORPHO,
+ POOL_TYPES.YEARN_V3,
+ ):
+ total_asset += pool._user_deposits
+ case _:
+ pass
+
+ total_assets += total_asset
+
+ challenge_data["assets_and_pools"] = {}
+ challenge_data["assets_and_pools"]["pools"] = pools
+ challenge_data["assets_and_pools"]["total_assets"] = total_assets
+ if global_user_address is not None:
+ challenge_data["user_address"] = global_user_address
+
+ return challenge_data
diff --git a/sturdy/protocol.py b/sturdy/protocol.py
index b9d37d2..a47a947 100644
--- a/sturdy/protocol.py
+++ b/sturdy/protocol.py
@@ -17,7 +17,6 @@
# DEALINGS IN THE SOFTWARE.
from enum import IntEnum
-from typing import Annotated
import bittensor as bt
from pydantic import BaseModel, Field, root_validator, validator
@@ -25,7 +24,7 @@
from web3 import Web3
from web3.constants import ADDRESS_ZERO
-from sturdy.pools import BasePoolModel, ChainBasedPoolModel
+from sturdy.pools import ChainBasedPoolModel
class REQUEST_TYPES(IntEnum):
@@ -37,20 +36,17 @@ class REQUEST_TYPES(IntEnum):
class AllocInfo(TypedDict):
- apy: int
+ rank: int
allocations: AllocationsDict | None
-PoolModel = Annotated[ChainBasedPoolModel | BasePoolModel, Field(discriminator="pool_model_disc")]
-
-
class AllocateAssetsRequest(BaseModel):
class Config:
use_enum_values = True
smart_union = True
request_type: REQUEST_TYPES | int | str = Field(default=REQUEST_TYPES.ORGANIC, description="type of request")
- assets_and_pools: dict[str, dict[str, PoolModel] | int] = Field(
+ assets_and_pools: dict[str, dict[str, ChainBasedPoolModel] | int] = Field(
...,
description="pools for miners to produce allocation amounts for - uid -> pool_info",
)
@@ -108,7 +104,7 @@ class Config:
smart_union = True
request_type: REQUEST_TYPES | int | str = Field(default=REQUEST_TYPES.ORGANIC, description="type of request")
- assets_and_pools: dict[str, dict[str, PoolModel] | int] = Field(
+ assets_and_pools: dict[str, dict[str, ChainBasedPoolModel] | int] = Field(
...,
description="pools for miners to produce allocation amounts for - uid -> pool_info",
)
diff --git a/sturdy/utils/config.py b/sturdy/utils/config.py
index 1c41cbc..8499ba3 100644
--- a/sturdy/utils/config.py
+++ b/sturdy/utils/config.py
@@ -24,7 +24,7 @@
from loguru import logger
from sturdy import __spec_version__ as spec_version
-from sturdy.constants import QUERY_TIMEOUT
+from sturdy.constants import DB_DIR, QUERY_TIMEOUT
def check_config(cls, config: "bt.Config") -> None:
@@ -260,6 +260,15 @@ def add_validator_args(cls, parser):
default=False,
)
+ # TODO: make this available for organic validators so that it can be used to in prod
+ # - not just testing?
+ parser.add_argument(
+ "--db_dir",
+ type=str,
+ help="directory of database - used for testing purposes",
+ default=DB_DIR,
+ )
+
def config(cls) -> bt.config:
"""
diff --git a/sturdy/utils/ethmath.py b/sturdy/utils/ethmath.py
index e5278cc..0572011 100644
--- a/sturdy/utils/ethmath.py
+++ b/sturdy/utils/ethmath.py
@@ -12,5 +12,6 @@ def wei_div(x: int, y: int) -> int:
def wei_mul_arrays(x: np.ndarray, y: np.ndarray) -> np.ndarray:
return (np.multiply(x, y)) // 1e18
+
def wei_div_arrays(x: np.ndarray, y: np.ndarray) -> np.ndarray:
return (np.divide(x, y)) * 1e18
diff --git a/sturdy/utils/misc.py b/sturdy/utils/misc.py
index e523e2b..e562843 100644
--- a/sturdy/utils/misc.py
+++ b/sturdy/utils/misc.py
@@ -18,6 +18,7 @@
import time
from collections.abc import Callable
+from datetime import datetime, timezone
from functools import lru_cache, update_wrapper
from math import floor
from typing import Any
@@ -36,6 +37,18 @@
# TODO: cleanup functions - lay them out better across files?
+def time_diff_seconds(start: str, end: str, format_str: str = "%Y-%m-%d %H:%M:%S.%f") -> int:
+ start_datetime = datetime.strptime(start, format_str).replace(tzinfo=timezone.utc) # noqa: UP017
+ end_datetime = datetime.strptime(end, format_str).replace(tzinfo=timezone.utc) # noqa: UP017
+ return (end_datetime - start_datetime).seconds
+
+
+def get_scoring_period_length(active_allocation: dict) -> int:
+ scoring_period_start = active_allocation["created_at"]
+ scoring_period_end = active_allocation["scoring_period_end"]
+ return time_diff_seconds(scoring_period_start, scoring_period_end)
+
+
# rand range but float
def randrange_float(
start,
@@ -116,7 +129,6 @@ def borrow_rate(util_rate, pool) -> int:
)
-
def supply_rate(util_rate, pool) -> int:
return wei_mul(util_rate, pool.borrow_rate)
diff --git a/sturdy/utils/uids.py b/sturdy/utils/uids.py
index 66a65f3..c9e64fa 100644
--- a/sturdy/utils/uids.py
+++ b/sturdy/utils/uids.py
@@ -4,9 +4,7 @@
from typing import List
-def check_uid_availability(
- metagraph: "bt.metagraph.Metagraph", uid: int, vpermit_tao_limit: int
-) -> bool:
+def check_uid_availability(metagraph: "bt.metagraph.Metagraph", uid: int, vpermit_tao_limit: int) -> bool:
"""Check if uid is available. The UID should be available if it is serving and has less than vpermit_tao_limit stake
Args:
metagraph (:obj: bt.metagraph.Metagraph): Metagraph object
@@ -36,9 +34,7 @@ def get_random_uids(self, k: int, exclude: list[int] = None) -> torch.LongTensor
avail_uids = []
for uid in range(self.metagraph.n.item()):
- uid_is_available = check_uid_availability(
- self.metagraph, uid, self.config.neuron.vpermit_tao_limit
- )
+ uid_is_available = check_uid_availability(self.metagraph, uid, self.config.neuron.vpermit_tao_limit)
uid_is_not_excluded = exclude is None or uid not in exclude
if uid_is_available:
diff --git a/sturdy/utils/wandb.py b/sturdy/utils/wandb.py
index 7d639e6..8d4657c 100644
--- a/sturdy/utils/wandb.py
+++ b/sturdy/utils/wandb.py
@@ -18,10 +18,7 @@ def init_wandb_miner(self, reinit=False):
if self.config.mock:
tags.append("mock")
- wandb_config = {
- key: copy.deepcopy(self.config.get(key, None))
- for key in ("neuron", "reward", "netuid", "wandb")
- }
+ wandb_config = {key: copy.deepcopy(self.config.get(key, None)) for key in ("neuron", "reward", "netuid", "wandb")}
if wandb_config["neuron"] is not None:
wandb_config["neuron"].pop("full_path", None)
@@ -33,11 +30,7 @@ def init_wandb_miner(self, reinit=False):
entity=self.config.wandb.entity,
config=wandb_config,
mode="offline" if self.config.wandb.offline else "online",
- dir=(
- self.config.neuron.full_path
- if self.config.neuron is not None
- else "wandb_logs"
- ),
+ dir=(self.config.neuron.full_path if self.config.neuron is not None else "wandb_logs"),
tags=tags,
notes=self.config.wandb.notes,
)
@@ -63,10 +56,7 @@ def init_wandb_validator(self, reinit=False):
if self.config.neuron.disable_log_rewards:
tags.append("disable_log_rewards")
- wandb_config = {
- key: copy.deepcopy(self.config.get(key, None))
- for key in ("neuron", "reward", "netuid", "wandb")
- }
+ wandb_config = {key: copy.deepcopy(self.config.get(key, None)) for key in ("neuron", "reward", "netuid", "wandb")}
wandb_config["neuron"].pop("full_path", None)
self.wandb = wandb.init(
diff --git a/sturdy/validator/forward.py b/sturdy/validator/forward.py
index c8e53c3..862fbce 100644
--- a/sturdy/validator/forward.py
+++ b/sturdy/validator/forward.py
@@ -17,15 +17,25 @@
# DEALINGS IN THE SOFTWARE.
import asyncio
-import copy
+import uuid
from typing import Any
import bittensor as bt
+import numpy as np
+from web3 import Web3
from web3.constants import ADDRESS_ZERO
-from sturdy.constants import QUERY_TIMEOUT
+from sturdy.constants import MAX_SCORING_PERIOD, MIN_SCORING_PERIOD, QUERY_TIMEOUT, SCORING_PERIOD_STEP
+from sturdy.pools import POOL_TYPES, ChainBasedPoolModel, generate_challenge_data
from sturdy.protocol import REQUEST_TYPES, AllocateAssets, AllocInfo
-from sturdy.validator.reward import get_rewards
+from sturdy.validator.reward import filter_allocations, get_rewards
+from sturdy.validator.sql import (
+ delete_active_allocs,
+ delete_stale_active_allocs,
+ get_active_allocs,
+ get_db_connection,
+ log_allocations,
+)
async def forward(self) -> Any:
@@ -38,8 +48,70 @@ async def forward(self) -> Any:
self (:obj:`bittensor.neuron.Neuron`): The neuron object which contains all the necessary state for the validator.
"""
+ # delete stale active allocations after expiry time
+ bt.logging.debug("Purging stale active allocation requests")
+ with get_db_connection(self.config.db_dir) as conn:
+ rows_affected = delete_stale_active_allocs(conn)
+ bt.logging.debug(f"Purged {rows_affected} stale active allocation requests")
+
# initialize pools and assets
- await query_and_score_miners(self)
+ challenge_data = generate_challenge_data(self.w3)
+ request_uuid = str(uuid.uuid4()).replace("-", "")
+ user_address = challenge_data.get("user_address", None)
+
+ bt.logging.info("Querying miners...")
+ axon_times, allocations = await query_and_score_miners(
+ self,
+ assets_and_pools=challenge_data["assets_and_pools"],
+ request_type=REQUEST_TYPES.SYNTHETIC,
+ user_address=user_address if user_address is not None else ADDRESS_ZERO,
+ )
+
+ assets_and_pools = challenge_data["assets_and_pools"]
+ pools = assets_and_pools["pools"]
+ metadata = get_metadata(pools, self.w3)
+
+ scoring_period = get_scoring_period()
+
+ with get_db_connection(self.config.db_dir) as conn:
+ log_allocations(
+ conn,
+ request_uuid,
+ assets_and_pools,
+ metadata,
+ allocations,
+ axon_times,
+ REQUEST_TYPES.SYNTHETIC,
+ scoring_period,
+ )
+
+
+def get_metadata(pools: dict[str, ChainBasedPoolModel], w3: Web3) -> dict:
+ metadata = {}
+ for contract_addr, pool in pools.items():
+ pool.sync(w3)
+ match pool.pool_type:
+ case T if T in (POOL_TYPES.STURDY_SILO, POOL_TYPES.MORPHO, POOL_TYPES.YEARN_V3):
+ metadata[contract_addr] = pool._share_price
+ case T if T in (POOL_TYPES.AAVE_DEFAULT, POOL_TYPES.AAVE_TARGET):
+ metadata[contract_addr] = pool._normalized_income
+ case _:
+ pass
+
+ return metadata
+
+
+def get_scoring_period(rng_gen: np.random.RandomState = None) -> int:
+ if rng_gen is None:
+ rng_gen = np.random.RandomState()
+
+ return rng_gen.choice(
+ np.arange(
+ MIN_SCORING_PERIOD,
+ MAX_SCORING_PERIOD + SCORING_PERIOD_STEP,
+ SCORING_PERIOD_STEP,
+ ),
+ )
async def query_miner(
@@ -69,51 +141,76 @@ async def query_multiple_miners(
async def query_and_score_miners(
self,
- assets_and_pools: Any = None,
+ assets_and_pools: Any,
request_type: REQUEST_TYPES = REQUEST_TYPES.SYNTHETIC,
user_address: str = ADDRESS_ZERO,
-) -> dict[str, AllocInfo]:
-
- # intialize simulator
- if request_type == REQUEST_TYPES.ORGANIC:
- self.simulator.initialize(timesteps=1)
- else:
- self.simulator.initialize()
-
- # initialize simulator data
- # if there is no "organic" info then generate synthetic info
- if assets_and_pools is not None:
- self.simulator.init_data(init_assets_and_pools=copy.deepcopy(assets_and_pools))
- else:
- self.simulator.init_data()
- assets_and_pools = self.simulator.assets_and_pools
-
+) -> tuple[list, dict[str, AllocInfo]]:
# The dendrite client queries the network.
# TODO: write custom availability function later down the road
active_uids = [str(uid) for uid in range(self.metagraph.n.item()) if self.metagraph.axons[uid].is_serving]
+ np.random.shuffle(active_uids)
+
bt.logging.debug(f"active_uids: {active_uids}")
synapse = AllocateAssets(
request_type=request_type,
- assets_and_pools=self.simulator.assets_and_pools,
- allocations=self.simulator.allocations,
+ assets_and_pools=assets_and_pools,
user_address=user_address,
)
+ # query all miners
responses = await query_multiple_miners(
self,
synapse,
active_uids,
)
+
allocations = {uid: responses[idx].allocations for idx, uid in enumerate(active_uids)} # type: ignore[]
# Log the results for monitoring purposes.
bt.logging.debug(f"Assets and pools: {synapse.assets_and_pools}")
bt.logging.debug(f"Received allocations (uid -> allocations): {allocations}")
- # Adjust the scores based on responses from miners.
- rewards, allocs = get_rewards(
+ curr_pools = assets_and_pools["pools"]
+ for pool in curr_pools.values():
+ pool.sync(self.w3)
+
+ # score previously suggested miner allocations based on how well they are performing now
+
+ # get all the request ids for the pools we should be scoring from the db
+ active_alloc_rows = []
+ with get_db_connection(self.config.db_dir) as conn:
+ active_alloc_rows = get_active_allocs(conn)
+
+ bt.logging.debug(f"Active allocs: {active_alloc_rows}")
+
+ uids_to_delete = []
+ for active_alloc in active_alloc_rows:
+ request_uid = active_alloc["request_uid"]
+ uids_to_delete.append(request_uid)
+ # calculate rewards for previous active allocations
+ miner_uids, rewards = get_rewards(self, active_alloc)
+ bt.logging.debug(f"miner rewards: {rewards}")
+ bt.logging.debug(f"sim penalities: {self.similarity_penalties}")
+
+ # TODO: there may be a better way to go about this
+ if len(miner_uids) < 1:
+ break
+
+ # update the moving average scores of the miners
+ int_miner_uids = [int(uid) for uid in miner_uids]
+ self.update_scores(rewards, int_miner_uids)
+
+ # wipe these allocations from the db after scoring them
+ if len(uids_to_delete) > 0:
+ with get_db_connection(self.config.db_dir) as conn:
+ rows_affected = delete_active_allocs(conn, uids_to_delete)
+ bt.logging.debug(f"Scored and removed {rows_affected} active allocation requests")
+
+ # before logging latest allocations
+ # filter them
+ axon_times, filtered_allocs = filter_allocations(
self,
query=self.step,
uids=active_uids,
@@ -121,8 +218,20 @@ async def query_and_score_miners(
assets_and_pools=assets_and_pools,
)
- bt.logging.info(f"Scored responses: {rewards}")
+ # TODO: sort the miners' by their current scores and return their respective allocations
+ sorted_indices = [idx for idx, val in sorted(enumerate(self.scores), key=lambda k: k[1], reverse=True)]
+
+ sorted_allocs = {}
+ rank = 1
+ for idx in sorted_indices:
+ alloc = filtered_allocs.get(str(idx), None)
+ if alloc is None:
+ continue
+
+ alloc["rank"] = rank
+ sorted_allocs[str(idx)] = alloc
+ rank += 1
+
+ bt.logging.debug(f"sorted allocations: {sorted_allocs}")
- int_active_uids = [int(uid) for uid in active_uids]
- self.update_scores(rewards, int_active_uids)
- return allocs
+ return axon_times, sorted_allocs
diff --git a/sturdy/validator/reward.py b/sturdy/validator/reward.py
index 8fb9518..a1b3bb6 100644
--- a/sturdy/validator/reward.py
+++ b/sturdy/validator/reward.py
@@ -16,7 +16,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-import copy
+import json
from typing import Any, cast
import bittensor as bt
@@ -25,10 +25,12 @@
import numpy.typing as npt
import torch
-from sturdy.constants import QUERY_TIMEOUT, SIMILARITY_THRESHOLD
-from sturdy.pools import POOL_TYPES, BasePoolModel, ChainBasedPoolModel, check_allocations
-from sturdy.protocol import REQUEST_TYPES, AllocationsDict, AllocInfo
-from sturdy.utils.ethmath import wei_div, wei_mul
+from sturdy.constants import ALLOCATION_SIMILARITY_THRESHOLD, APY_SIMILARITY_THRESHOLD, QUERY_TIMEOUT
+from sturdy.pools import POOL_TYPES, ChainBasedPoolModel, PoolFactory, check_allocations
+from sturdy.protocol import AllocationsDict, AllocInfo
+from sturdy.utils.ethmath import wei_div
+from sturdy.utils.misc import get_scoring_period_length
+from sturdy.validator.sql import get_db_connection, get_miner_responses, get_request_info
def get_response_times(uids: list[str], responses, timeout: float) -> dict[str, float]:
@@ -82,47 +84,38 @@ def format_allocations(
return {contract_addr: allocs[contract_addr] for contract_addr in sorted(allocs.keys())}
-def dynamic_normalize_zscore(
- apys_and_allocations: AllocationsDict, z_threshold: float = 1.0, q: float = 0.75, epsilon: float = 1e-8
-) -> torch.Tensor:
+def normalize_exp(apys_and_allocations: AllocationsDict, epsilon: float = 1e-8) -> torch.Tensor:
raw_apys = {uid: apys_and_allocations[uid]["apy"] for uid in apys_and_allocations}
- sorted_apys_uid = dict(sorted(raw_apys.items(), key=lambda item: item[1]))
- apys = torch.tensor(list(raw_apys.values()))
- sorted_apys = torch.tensor(list(sorted_apys_uid.values()))
-
- quantile = np.percentile(sorted_apys.numpy(), q)
- apy_grad = [abs(sorted_apys[i] - sorted_apys[i - 1]) for i in range(1, len(sorted_apys))]
- mean_grad = np.mean(apy_grad)
- std_grad = np.std(apy_grad)
- apy_grad.insert(0, float("nan"))
- apy_grad = torch.tensor(apy_grad)
-
- # Calculate z-scores
- z_scores = (apy_grad - mean_grad) / std_grad
- # Set a lower bound based on z-score threshold if the lower quartile range is larger than the rest
- filtered = sorted_apys[(z_scores > z_threshold) & (sorted_apys < quantile)]
- lower_bound = filtered.min() if len(filtered) > 0 else sorted_apys.min()
+ if len(raw_apys) <= 1:
+ return torch.zeros(len(raw_apys))
- # No upper bound, only clip the lower bound
- clipped_data = torch.clip(apys, lower_bound)
+ apys = torch.tensor(list(raw_apys.values()), dtype=torch.float32)
+ normed = (apys - apys.min()) / (apys.max() - apys.min() + epsilon)
- dynamic_normed = (clipped_data - clipped_data.min()) / (clipped_data.max() - clipped_data.min() + epsilon)
- squared = torch.pow(dynamic_normed, 2)
-
- return (squared - squared.min()) / (squared.max() - squared.min() + epsilon)
+ return torch.pow(normed, 8)
def calculate_penalties(
- similarity_matrix: dict[str, dict[str, float]],
+ allocation_similarity_matrix: dict[str, dict[str, float]],
+ apy_similarity_matrix: dict[str, dict[str, float]],
axon_times: dict[str, float],
- similarity_threshold: float = SIMILARITY_THRESHOLD,
+ allocation_similarity_threshold: float = ALLOCATION_SIMILARITY_THRESHOLD,
+ apy_similarity_threshold: float = APY_SIMILARITY_THRESHOLD,
) -> dict[str, int]:
- penalties = {miner: 0 for miner in similarity_matrix}
-
- for miner_a, similarities in similarity_matrix.items():
- for miner_b, similarity in similarities.items():
- if similarity <= similarity_threshold and axon_times[miner_a] <= axon_times[miner_b]:
+ penalties = {miner: 0 for miner in allocation_similarity_matrix}
+
+ for miner_a in allocation_similarity_matrix:
+ allocation_similarities = allocation_similarity_matrix[miner_a]
+ apy_similarities = apy_similarity_matrix[miner_a]
+ for miner_b in allocation_similarities:
+ allocation_similarity = allocation_similarities[miner_b]
+ apy_similarity = apy_similarities[miner_b]
+ if (
+ allocation_similarity <= allocation_similarity_threshold
+ and apy_similarity <= apy_similarity_threshold
+ and axon_times[miner_a] <= axon_times[miner_b]
+ ):
penalties[miner_b] += 1
return penalties
@@ -151,9 +144,9 @@ def get_distance(alloc_a: npt.NDArray, alloc_b: npt.NDArray, total_assets: int)
return norm / gmpy2.sqrt(float(2 * total_assets**2))
-def get_similarity_matrix(
+def get_allocation_similarity_matrix(
apys_and_allocations: dict[str, dict[str, AllocationsDict | int]],
- assets_and_pools: dict[str, dict[str, ChainBasedPoolModel | BasePoolModel] | int],
+ assets_and_pools: dict[str, dict[str, ChainBasedPoolModel] | int],
) -> dict[str, dict[str, float]]:
"""
Calculates the similarity matrix for the allocation strategies of miners using normalized Euclidean distance.
@@ -201,14 +194,55 @@ def get_similarity_matrix(
return similarity_matrix
+def get_apy_similarity_matrix(
+ apys_and_allocations: dict[str, dict[str, AllocationsDict | int]],
+) -> dict[str, dict[str, float]]:
+ """
+ Calculates the similarity matrix for the allocation strategies of miners' APY using normalized Euclidean distance.
+
+ This function computes a similarity matrix based on the Euclidean distance between the allocation vectors of miners,
+ normalized by the maximum possible distance in the given asset space. Each miner's allocation is compared with every
+ other miner's allocation, resulting in a matrix where each element (i, j) represents the normalized Euclidean distance
+ between the allocations of miner_i and miner_j.
+
+ The similarity metric is scaled between 0 and 1, where 0 indicates identical allocations and 1 indicates the maximum
+ possible distance between the allocation 'vectors'.
+
+ Args:
+ apys_and_allocations (dict[str, dict[str, Union[AllocationsDict, int]]]):
+ A dictionary containing the APY and allocation strategies for each miner. The keys are miner identifiers,
+ and the values are dictionaries with their respective allocations and APYs.
+
+ Returns:
+ dict[str, dict[str, float]]:
+ A nested dictionary where each key is a miner identifier, and the value is another dictionary containing the
+ normalized Euclidean distances to every other miner. The distances are scaled between 0 and 1.
+ """
+
+ similarity_matrix = {}
+
+ for miner_a, info_a in apys_and_allocations.items():
+ apy_a = cast(int, info_a["apy"])
+ apy_a = np.array([gmpy2.mpz(apy_a)], dtype=object)
+ similarity_matrix[miner_a] = {}
+ for miner_b, info_b in apys_and_allocations.items():
+ if miner_a != miner_b:
+ apy_b = cast(int, info_b["apy"])
+ apy_b = np.array([gmpy2.mpz(apy_b)], dtype=object)
+ similarity_matrix[miner_a][miner_b] = get_distance(apy_a, apy_b, max(apy_a, apy_b)[0]) # Max scaling
+
+ return similarity_matrix
+
+
def adjust_rewards_for_plagiarism(
self,
rewards_apy: torch.Tensor,
apys_and_allocations: dict[str, dict[str, AllocationsDict | int]],
- assets_and_pools: dict[str, dict[str, ChainBasedPoolModel | BasePoolModel] | int],
+ assets_and_pools: dict[str, dict[str, ChainBasedPoolModel] | int],
uids: list,
axon_times: dict[str, float],
- similarity_threshold: float = SIMILARITY_THRESHOLD,
+ allocation_similarity_threshold: float = ALLOCATION_SIMILARITY_THRESHOLD,
+ apy_similarity_threshold: float = APY_SIMILARITY_THRESHOLD,
) -> torch.Tensor:
"""
Adjusts the annual percentage yield (APY) rewards for miners based on the similarity of their allocations
@@ -240,10 +274,17 @@ def adjust_rewards_for_plagiarism(
to a consistent format suitable for comparison.
"""
# Step 1: Calculate pairwise similarity (e.g., using Euclidean distance)
- similarity_matrix = get_similarity_matrix(apys_and_allocations, assets_and_pools)
+ allocation_similarity_matrix = get_allocation_similarity_matrix(apys_and_allocations, assets_and_pools)
+ apy_similarity_matrix = get_apy_similarity_matrix(apys_and_allocations)
# Step 2: Apply penalties considering axon times
- penalties = calculate_penalties(similarity_matrix, axon_times, similarity_threshold)
+ penalties = calculate_penalties(
+ allocation_similarity_matrix,
+ apy_similarity_matrix,
+ axon_times,
+ allocation_similarity_threshold,
+ apy_similarity_threshold,
+ )
self.similarity_penalties = penalties
# Step 3: Calculate final rewards with adjusted penalties
@@ -253,7 +294,7 @@ def adjust_rewards_for_plagiarism(
def _get_rewards(
self,
apys_and_allocations: dict[str, dict[str, AllocationsDict | int]],
- assets_and_pools: dict[str, dict[str, ChainBasedPoolModel | BasePoolModel] | int],
+ assets_and_pools: dict[str, dict[str, ChainBasedPoolModel] | int],
uids: list[str],
axon_times: dict[str, float],
) -> torch.Tensor:
@@ -265,69 +306,79 @@ def _get_rewards(
- adjusted_rewards: The reward values for the miners.
"""
- # raw_apys = torch.Tensor([apys_and_allocations[uid]["apy"] for uid in uids])
-
- rewards_apy = dynamic_normalize_zscore(apys_and_allocations).to(self.device)
+ rewards_apy = normalize_exp(apys_and_allocations).to(self.device)
return adjust_rewards_for_plagiarism(self, rewards_apy, apys_and_allocations, assets_and_pools, uids, axon_times)
-def calculate_apy(
+def annualized_yield_pct(
allocations: AllocationsDict,
- assets_and_pools: dict[str, dict[str, ChainBasedPoolModel | BasePoolModel] | int],
+ assets_and_pools: dict[str, dict[str, ChainBasedPoolModel] | int],
+ seconds_passed: int,
+ extra_metadata: dict,
) -> int:
"""
- Calculates immediate projected yields given intial assets and pools, pool history, and number of timesteps
+ Calculates annualized yields of allocations in pools within scoring period
"""
+ if seconds_passed < 1:
+ return 0
+
# calculate projected yield
initial_balance = cast(int, assets_and_pools["total_assets"])
pools = cast(dict[str, ChainBasedPoolModel], assets_and_pools["pools"])
- pct_yield = 0
- for uid, pool in pools.items():
- allocation = allocations[uid]
- match pool.pool_type:
- case POOL_TYPES.DAI_SAVINGS:
- pool_yield = wei_mul(allocation, pool.supply_rate())
- case _:
- pool_yield = wei_mul(allocation, pool.supply_rate(amount=allocation))
- pct_yield += pool_yield
+ total_yield = 0
- return wei_div(pct_yield, initial_balance)
-
-
-def calculate_aggregate_apy(
- allocations: AllocationsDict,
- assets_and_pools: dict[str, dict[str, ChainBasedPoolModel | BasePoolModel] | int],
- timesteps: int,
- pool_history: list[dict[str, Any]],
-) -> int:
- """
- Calculates aggregate yields given intial assets and pools, pool history, and number of timesteps
- """
+ seconds_per_year = 31536000
- # calculate aggregate yield
- initial_balance = cast(int, assets_and_pools["total_assets"])
- pct_yield = 0
- for pools in pool_history:
- curr_yield = 0
- for uid, allocs in allocations.items():
- pool_data = pools[uid]
- pool_yield = wei_mul(allocs, pool_data.supply_rate)
- curr_yield += pool_yield
- pct_yield += curr_yield
+ # TODO: refactor?
+ for contract_addr, pool in pools.items():
+ allocation = allocations[contract_addr]
+ match pool.pool_type:
+ case T if T in (POOL_TYPES.STURDY_SILO, POOL_TYPES.MORPHO, POOL_TYPES.YEARN_V3):
+ # TODO: temp fix
+ if allocation > 0:
+ last_share_price = extra_metadata[contract_addr]
+ curr_share_price = pool._share_price
+ pct_delta = float(curr_share_price - last_share_price) / float(last_share_price)
+ deposit_delta = allocation - pool._user_deposits
+ try:
+ adjusted_pct_delta = (
+ (pool._total_supplied_assets) / (pool._total_supplied_assets + deposit_delta + 1) * pct_delta
+ )
+ annualized_pct_yield = ((1 + adjusted_pct_delta) ** (seconds_per_year / seconds_passed)) - 1
+ total_yield += int(allocation * annualized_pct_yield)
+ except Exception as e:
+ bt.logging.error("Error calculating annualized pct yield, skipping:")
+ bt.logging.error(e)
+ case T if T in (POOL_TYPES.AAVE_DEFAULT, POOL_TYPES.AAVE_TARGET):
+ if allocation > 0:
+ last_income = extra_metadata[contract_addr]
+ curr_income = pool._normalized_income
+ pct_delta = float(curr_income - last_income) / float(last_income)
+ deposit_delta = allocation - pool._user_deposits
+ try:
+ adjusted_pct_delta = (
+ (pool._total_supplied_assets) / (pool._total_supplied_assets + deposit_delta) * pct_delta
+ )
+ annualized_pct_yield = ((1 + adjusted_pct_delta) ** (seconds_per_year / seconds_passed)) - 1
+ total_yield += int(allocation * annualized_pct_yield)
+ except Exception as e:
+ bt.logging.error("Error calculating annualized pct yield, skipping:")
+ bt.logging.error(e)
+ case _:
+ total_yield += 0
- pct_yield = wei_div(pct_yield, initial_balance)
- return int(pct_yield // timesteps) # for simplicity each timestep is a day in the simulator
+ return wei_div(total_yield, initial_balance)
-def get_rewards(
+def filter_allocations(
self,
query: int, # noqa: ARG001
uids: list[str],
responses: list,
- assets_and_pools: dict[str, dict[str, ChainBasedPoolModel | BasePoolModel] | int],
-) -> tuple[torch.Tensor, dict[str, AllocInfo]]:
+ assets_and_pools: dict[str, dict[str, ChainBasedPoolModel] | int],
+) -> dict[str, AllocInfo]:
"""
Returns a tensor of rewards for the given query and responses.
@@ -340,140 +391,110 @@ def get_rewards(
- allocs: miner allocations along with their respective yields
"""
- # maximum yield to scale all rewards by
- # total apys of allocations per miner
- max_apy = 0
- apys = {}
-
- init_assets_and_pools = copy.deepcopy(assets_and_pools)
-
- bt.logging.debug(f"Running simulator for {self.simulator.timesteps} timesteps for each allocation...")
-
- # TODO: assuming that we are only getting immediate apy for organic chainbasedpool requests
- pools_to_scan = cast(dict, init_assets_and_pools["pools"])
- # update reserves given allocations
- for pool in pools_to_scan.values():
- match pool.pool_type:
- case T if T in (
- POOL_TYPES.AAVE,
- POOL_TYPES.DAI_SAVINGS,
- POOL_TYPES.COMPOUND_V3,
- POOL_TYPES.MORPHO,
- POOL_TYPES.YEARN_V3,
- ):
- pool.sync(self.w3)
- case POOL_TYPES.STURDY_SILO:
- pool.sync(pool.user_address, self.w3)
- case _:
- pass
+ filtered_allocs = {}
+ axon_times = get_response_times(uids=uids, responses=responses, timeout=QUERY_TIMEOUT)
- resulting_apy = 0
+ cheaters = []
for response_idx, response in enumerate(responses):
- # reset simulator for next run
- self.simulator.reset()
-
allocations = response.allocations
- # validator miner allocations before running simulation
# is the miner cheating w.r.t allocations?
cheating = True
try:
- cheating = not check_allocations(init_assets_and_pools, allocations)
+ cheating = not check_allocations(assets_and_pools, allocations)
except Exception as e:
bt.logging.error(e) # type: ignore[]
# score response very low if miner is cheating somehow or returns allocations with incorrect format
if cheating:
miner_uid = uids[response_idx]
- bt.logging.warning(f"CHEATER DETECTED - MINER WITH UID {miner_uid} - PUNISHING 👊😠")
- apys[miner_uid] = 0
+ cheaters.append(miner_uid)
continue
- try:
- if response.request_type == REQUEST_TYPES.SYNTHETIC:
- # miner does not appear to be cheating - so we init simulator data
- self.simulator.init_data(
- init_assets_and_pools=copy.deepcopy(init_assets_and_pools),
- init_allocations=allocations,
- )
- self.simulator.update_reserves_with_allocs()
+ # used to filter out miners who timed out
+ # TODO: should probably move some things around later down the road
+ # TODO: cleaner way to do this?
+ if response.allocations is not None or axon_times[uids[response_idx]] < QUERY_TIMEOUT:
+ filtered_allocs[uids[response_idx]] = {
+ "allocations": response.allocations,
+ }
- self.simulator.run()
+ bt.logging.warning(f"CHEATERS DETECTED: {cheaters}")
- resulting_apy = calculate_aggregate_apy(
- allocations,
- init_assets_and_pools,
- self.simulator.timesteps,
- self.simulator.pool_history,
- )
+ curr_filtered_allocs = dict(sorted(filtered_allocs.items(), key=lambda item: int(item[0])))
+ sorted_axon_times = dict(sorted(axon_times.items(), key=lambda item: item[1]))
- else:
- resulting_apy = calculate_apy(
- allocations,
- init_assets_and_pools,
- )
- except Exception as e:
- bt.logging.error(e) # type: ignore[]
- bt.logging.error("Failed to calculate apy - PENALIZING MINER")
- miner_uid = uids[response_idx]
- apys[miner_uid] = 0
- continue
+ bt.logging.debug(f"sorted axon times:\n{sorted_axon_times}")
- if resulting_apy > max_apy:
- max_apy = resulting_apy
+ self.sorted_axon_times = sorted_axon_times
- apys[uids[response_idx]] = resulting_apy
+ # Get all the reward results by iteratively calling your reward() function.
+ return axon_times, curr_filtered_allocs
- axon_times = get_response_times(uids=uids, responses=responses, timeout=QUERY_TIMEOUT)
- # set apys for miners that took longer than the timeout to minimum
- # TODO: cleaner way to do this?
- for uid in uids:
- if axon_times[uid] >= QUERY_TIMEOUT:
- apys[uid] = 0
+def get_rewards(self, active_allocation) -> tuple[list, dict]:
+ # a dictionary, miner uids -> apy and allocations
+ apys_and_allocations = {}
+ miner_uids = []
+ axon_times = {}
- # TODO: should probably move some things around later down the road
- allocs = {}
- filtered_allocs = {}
- for idx in range(len(responses)):
- # TODO: cleaner way to do this?
- if responses[idx].allocations is None or axon_times[uids[idx]] >= QUERY_TIMEOUT:
- allocs[uids[idx]] = {
- "apy": 0,
- "allocations": None,
- }
- else:
- allocs[uids[idx]] = {
- "apy": apys[uids[idx]],
- "allocations": responses[idx].allocations,
- }
+ # TODO: rename this here and in the database schema?
+ request_uid = active_allocation["request_uid"]
+ scoring_period_length = get_scoring_period_length(active_allocation)
- filtered_allocs[uids[idx]] = {
- "apy": apys[uids[idx]],
- "allocations": responses[idx].allocations,
- }
+ request_info = {}
+ assets_and_pools = None
+ miners = None
+
+ with get_db_connection(self.config.db_dir) as conn:
+ # get assets and pools that are used to benchmark miner
+ # we get the first row entry - we assume that it is the only response from the database
+ try:
+ request_info = get_request_info(conn, request_uid=request_uid)[0]
+ assets_and_pools = json.loads(request_info["assets_and_pools"])
+ except Exception:
+ return ([], {})
- sorted_filtered_allocs = dict(sorted(filtered_allocs.items(), key=lambda item: item[1]["apy"], reverse=True))
+ # obtain the miner responses for each request
+ miners = get_miner_responses(conn, request_uid=request_uid)
+ bt.logging.debug(f"filtered allocations: {miners}")
- sorted_apys = dict(sorted(apys.items(), key=lambda item: item[1], reverse=True))
+ # TODO: see if we can factor this into its own subroutine
+ # if so, do the same with the same one in validator.py
- sorted_axon_times = dict(sorted(axon_times.items(), key=lambda item: item[1]))
+ pools = assets_and_pools["pools"]
+ new_pools = {}
+ for uid, pool in pools.items():
+ new_pool = PoolFactory.create_pool(
+ pool_type=pool["pool_type"],
+ web3_provider=self.w3, # type: ignore[]
+ user_address=(pool["user_address"]), # TODO: is there a cleaner way to do this?
+ contract_address=pool["contract_address"],
+ )
- bt.logging.debug(f"sorted apys:\n{sorted_apys}")
- bt.logging.debug(f"sorted axon times:\n{sorted_axon_times}")
- bt.logging.debug(f"sorted filtered allocs:\n{sorted_filtered_allocs}")
+ # sync pool
+ new_pool.sync(self.w3)
+ new_pools[uid] = new_pool
- self.sorted_apys = sorted_apys
- self.sorted_axon_times = sorted_axon_times
+ assets_and_pools["pools"] = new_pools
- # Get all the reward results by iteratively calling your reward() function.
- return (
- _get_rewards(
- self,
- apys_and_allocations=allocs,
- assets_and_pools=init_assets_and_pools, # type: ignore[]
- uids=uids,
- axon_times=axon_times,
- ),
- sorted_filtered_allocs,
- )
+ # calculate the yield the pools accrued during the scoring period
+ for miner in miners:
+ allocations = json.loads(miner["allocation"])["allocations"]
+ extra_metadata = json.loads(request_info["metadata"])
+ miner_uid = miner["miner_uid"]
+ miner_apy = annualized_yield_pct(allocations, assets_and_pools, scoring_period_length, extra_metadata)
+ miner_axon_time = miner["axon_time"]
+
+ miner_uids.append(miner_uid)
+ axon_times[miner_uid] = miner_axon_time
+ apys_and_allocations[miner_uid] = {"apy": miner_apy, "allocations": allocations}
+
+ bt.logging.debug(f"yields and allocs: {apys_and_allocations}")
+
+ # TODO: there may be a better way to go about this
+ if len(miner_uids) < 1:
+ return ([], {})
+
+ # get rewards given the apys and allocations(s) with _get_rewards (???)
+ return (miner_uids, _get_rewards(self, apys_and_allocations, assets_and_pools, miner_uids, axon_times))
diff --git a/sturdy/validator/simulator.py b/sturdy/validator/simulator.py
deleted file mode 100644
index 0236672..0000000
--- a/sturdy/validator/simulator.py
+++ /dev/null
@@ -1,174 +0,0 @@
-import copy
-from typing import Any
-
-import gmpy2
-import numpy as np
-
-from sturdy.constants import *
-from sturdy.pools import (
- BasePoolModel,
- ChainBasedPoolModel,
- generate_assets_and_pools,
- generate_initial_allocations_for_pools,
-)
-from sturdy.protocol import AllocationsDict
-from sturdy.utils.ethmath import wei_div, wei_mul
-
-
-class Simulator:
- def __init__(
- self,
- reversion_speed: float = REVERSION_SPEED,
- seed=None,
- ) -> None:
- self.reversion_speed = reversion_speed
- self.assets_and_pools = {}
- self.allocations = {}
- self.pool_history = []
- self.init_rng = None
- self.rng_state_container: Any = None
- self.seed = seed
-
- # initializes data - by default these are randomly generated
- def init_data(
- self,
- init_assets_and_pools: dict[str, dict[str, ChainBasedPoolModel | BasePoolModel] | int] | None = None,
- init_allocations: AllocationsDict | None = None,
- ) -> None:
- if self.rng_state_container is None or self.init_rng is None:
- raise RuntimeError("You must have first initialize()-ed the simulation if you'd like to initialize some data")
-
- if init_assets_and_pools is None:
- self.assets_and_pools: Any = generate_assets_and_pools(
- rng_gen=self.rng_state_container,
- )
- else:
- self.assets_and_pools = init_assets_and_pools
-
- if init_allocations is None:
- self.allocations = generate_initial_allocations_for_pools(
- self.assets_and_pools,
- )
- else:
- self.allocations = init_allocations
-
- # initialize pool history
- self.pool_history = [
- {
- uid: copy.deepcopy(pool)
- for uid, pool in self.assets_and_pools["pools"].items() #
- },
- ]
-
- # initialize fresh simulation instance
- def initialize(self, timesteps: int | None = None, stochasticity: float | None = None) -> None:
- # create fresh rng state
- self.init_rng = np.random.RandomState(self.seed)
- self.rng_state_container = copy.copy(self.init_rng)
-
- if timesteps is None:
- self.timesteps = self.rng_state_container.choice(
- np.arange(
- MIN_TIMESTEPS,
- MAX_TIMESTEPS + TIMESTEPS_STEP,
- TIMESTEPS_STEP,
- ),
- )
- else:
- self.timesteps = timesteps
-
- if stochasticity is None:
- self.stochasticity = self.rng_state_container.choice(
- np.arange(
- MIN_STOCHASTICITY,
- MAX_STOCHASTICITY + STOCHASTICITY_STEP,
- STOCHASTICITY_STEP,
- ),
- )
- else:
- self.stochasticity = stochasticity
-
- self.rng_state_container = copy.copy(self.init_rng)
-
- # reset sim to initial params for rng
- def reset(self) -> None:
- if self.rng_state_container is None or self.init_rng is None:
- raise RuntimeError(
- "You must have first initialize()-ed the simulation if you'd like to reset it",
- )
- self.rng_state_container = copy.copy(self.init_rng)
-
- # update the reserves in the pool with given allocations
- def update_reserves_with_allocs(self, allocs=None) -> None:
- if len(self.pool_history) <= 0 or len(self.assets_and_pools) <= 0 or len(self.allocations) <= 0:
- raise RuntimeError(
- "You must first initialize() and init_data() before updating reserves!!!",
- )
-
- allocations = self.allocations if allocs is None else allocs
-
- if len(self.pool_history) != 1:
- raise RuntimeError(
- "You must have first init data for the simulation if you'd like to update reserves",
- )
-
- for uid, alloc in allocations.items():
- pool = self.assets_and_pools["pools"][uid]
- pool_history_start = self.pool_history[0]
- pool.reserve_size += int(alloc)
- pool.reserve_size = int(pool.reserve_size)
- pool_from_history = pool_history_start[uid]
- pool_from_history.reserve_size += allocations[uid]
-
- # initialize pools
- # Function to update borrow amounts and other pool params based on reversion rate and stochasticity
- def generate_new_pool_data(self) -> dict:
- latest_pool_data = self.pool_history[-1]
- curr_borrow_rates = np.array([pool.borrow_rate for _, pool in latest_pool_data.items()])
- curr_borrow_amounts = np.array([pool.borrow_amount for _, pool in latest_pool_data.items()])
- curr_reserve_sizes = np.array([pool.reserve_size for _, pool in latest_pool_data.items()])
- optimal_util_rates = np.array([pool.optimal_util_rate for _, pool in latest_pool_data.items()])
- base_slopes = np.array([pool.base_slope for _, pool in latest_pool_data.items()])
- kink_slopes = np.array([pool.kink_slope for _, pool in latest_pool_data.items()])
-
- median_rate = np.median(curr_borrow_rates) # Calculate the median borrow rate
- noise = self.rng_state_container.normal(0, self.stochasticity * 1e18, len(curr_borrow_rates)) # Add some random noise
- rate_changes = (-self.reversion_speed * (curr_borrow_rates - median_rate)) + noise # Mean reversion principle
-
- new_borrow_amounts = []
- # Update the borrow amounts
- for i in range(len(curr_borrow_rates)):
- opt_util = optimal_util_rates[i]
- curr_util = wei_div(curr_borrow_amounts[i], curr_reserve_sizes[i])
- borrow_delta = 0
- if curr_util < opt_util:
- borrow_delta = wei_div(wei_mul(curr_reserve_sizes[i], opt_util), base_slopes[i])
- else:
- borrow_delta = wei_div(
- wei_mul(curr_reserve_sizes[i], int(1e18) - optimal_util_rates[i]),
- kink_slopes[i],
- )
-
- new_borrow_amount = curr_borrow_amounts[i] + wei_mul(borrow_delta, int(rate_changes[i]))
-
- new_borrow_amounts.append(new_borrow_amount)
-
- new_borrow_amounts = np.array(new_borrow_amounts)
-
- amounts = np.clip(new_borrow_amounts, 0, curr_reserve_sizes) # Ensure borrow amounts do not exceed reserves
- pool_uids = list(latest_pool_data.keys())
-
- new_pools = [copy.deepcopy(pool) for pool in self.assets_and_pools["pools"].values()]
-
- for idx, pool in enumerate(new_pools):
- pool.borrow_amount = amounts[idx]
-
- return {pool_uids[uid]: pool for uid, pool in enumerate(new_pools)}
-
- # run simulation
- def run(self) -> None:
- if len(self.pool_history) != 1:
- raise RuntimeError("You must first initialize() and init_data() before running the simulation!!!")
- for _ in range(1, self.timesteps):
- new_info = self.generate_new_pool_data()
- self.pool_history.append(new_info.copy())
diff --git a/sturdy/validator/sql.py b/sturdy/validator/sql.py
index b19fa06..4b73173 100644
--- a/sturdy/validator/sql.py
+++ b/sturdy/validator/sql.py
@@ -7,7 +7,8 @@
from fastapi.encoders import jsonable_encoder
-from sturdy.protocol import AllocInfo, PoolModel
+from sturdy.constants import DB_DIR, SCORING_WINDOW
+from sturdy.protocol import REQUEST_TYPES, AllocInfo, ChainBasedPoolModel
BALANCE = "balance"
KEY = "key"
@@ -22,15 +23,17 @@
# allocations table
ALLOCATION_REQUESTS_TABLE = "allocation_requests"
ALLOCATIONS_TABLE = "allocations"
+ACTIVE_ALLOCS = "active_allocs"
REQUEST_UID = "request_uid"
+REQUEST_TYPE = "request_type"
MINER_UID = "miner_uid"
USER_ADDRESS = "user_address"
ALLOCATION = "allocation"
@contextmanager
-def get_db_connection(): # noqa: ANN201
- conn = sqlite3.connect("validator_database.db")
+def get_db_connection(db_dir: str = DB_DIR, uri: bool = False): # noqa: ANN201
+ conn = sqlite3.connect(db_dir, uri=uri)
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA foreign_keys = ON")
try:
@@ -151,41 +154,106 @@ def to_json_string(input_data) -> str:
def log_allocations(
conn: sqlite3.Connection,
request_uid: str,
- assets_and_pools: dict[str, dict[str, PoolModel] | int],
+ assets_and_pools: dict[str, dict[str, ChainBasedPoolModel] | int],
+ extra_metadata: dict,
allocations: dict[str, AllocInfo],
+ axon_times: dict[str, float],
+ request_type: REQUEST_TYPES,
+ scoring_period: int,
) -> None:
- ts_now = datetime.utcnow().timestamp() # noqa: DTZ003
+ ts_now = datetime.utcnow().timestamp()
+ challenge_end = ts_now + scoring_period
+ scoring_period_end = datetime.fromtimestamp(challenge_end) # noqa: DTZ006
+ datetime_now = datetime.fromtimestamp(ts_now) # noqa: DTZ006
conn.execute(
- f"INSERT INTO {ALLOCATION_REQUESTS_TABLE} VALUES (?, json(?), ?)",
+ f"INSERT INTO {ALLOCATION_REQUESTS_TABLE} VALUES (?, json(?), ?, ?, json(?))",
(
request_uid,
json.dumps(jsonable_encoder(assets_and_pools)),
- datetime.fromtimestamp(ts_now), # noqa: DTZ006
+ datetime_now,
+ request_type,
+ # TODO: use jsonable_encoder?
+ json.dumps(extra_metadata),
+ ),
+ )
+
+ conn.execute(
+ f"INSERT INTO {ACTIVE_ALLOCS} VALUES (?, ?, ?)",
+ (
+ request_uid,
+ scoring_period_end,
+ datetime_now,
),
)
to_insert = []
- ts_now = datetime.utcnow().timestamp() # noqa: DTZ003
for miner_uid, miner_allocation in allocations.items():
- row = (
- request_uid,
- miner_uid,
- to_json_string(miner_allocation),
- datetime.fromtimestamp(ts_now), # noqa: DTZ006
- )
+ row = (request_uid, miner_uid, to_json_string(miner_allocation), datetime_now, axon_times[miner_uid])
to_insert.append(row)
- conn.executemany(f"INSERT INTO {ALLOCATIONS_TABLE} VALUES (?, ?, json(?), ?)", to_insert)
+ conn.executemany(f"INSERT INTO {ALLOCATIONS_TABLE} VALUES (?, ?, json(?), ?, ?)", to_insert)
conn.commit()
-def get_filtered_allocations(
+# TODO: rename function and database table?
+def get_active_allocs(conn: sqlite3.Connection, scoring_window: float = SCORING_WINDOW) -> list:
+ # TODO: change the logic of handling "active allocations"
+ # for now we simply get ones which are still in their "challenge"
+ # period, and consider them to determine the score of miners
+ query = f"""
+ SELECT * FROM {ACTIVE_ALLOCS}
+ WHERE scoring_period_end >= ?
+ AND scoring_period_end < ?
+ """
+ ts_now = datetime.utcnow().timestamp()
+ window_ts = ts_now - scoring_window
+ datetime_now = datetime.fromtimestamp(ts_now) # noqa: DTZ006
+ window_datetime = datetime.fromtimestamp(window_ts) # noqa: DTZ006
+
+ cur = conn.execute(query, [window_datetime, datetime_now])
+ rows = cur.fetchall()
+
+ return [dict(row) for row in rows]
+
+
+def delete_stale_active_allocs(conn: sqlite3.Connection, scoring_window: int = SCORING_WINDOW) -> int:
+ query = f"""
+ DELETE FROM {ACTIVE_ALLOCS}
+ WHERE scoring_period_end < ?
+ """
+ ts_now = datetime.utcnow().timestamp()
+ expiry_ts = ts_now - scoring_window
+ expiration_date = datetime.fromtimestamp(expiry_ts) # noqa: DTZ006
+
+ cur = conn.execute(query, [expiration_date])
+ conn.commit()
+
+ return cur.rowcount
+
+
+def delete_active_allocs(conn: sqlite3.Connection, uids_to_delete: list[str]) -> int:
+ if len(uids_to_delete) < 1 or uids_to_delete is None:
+ return 0
+
+ placeholders = ", ".join(["?"] * len(uids_to_delete))
+ query = f"""
+ DELETE FROM {ACTIVE_ALLOCS}
+ WHERE request_uid in ({placeholders})
+ """
+
+ cur = conn.execute(query, uids_to_delete)
+ conn.commit()
+
+ return cur.rowcount
+
+
+def get_miner_responses(
conn: sqlite3.Connection,
- request_uid: str | None,
- miner_uid: str | None,
- from_ts: int | None,
- to_ts: int | None,
+ request_uid: str | None = None,
+ miner_uid: str | None = None,
+ from_ts: int | None = None,
+ to_ts: int | None = None,
) -> list[dict]:
query = f"""
SELECT * FROM {ALLOCATIONS_TABLE}
@@ -216,9 +284,9 @@ def get_filtered_allocations(
def get_request_info(
conn: sqlite3.Connection,
- request_uid: str | None,
- from_ts: int | None,
- to_ts: int | None,
+ request_uid: str | None = None,
+ from_ts: int | None = None,
+ to_ts: int | None = None,
) -> list[dict]:
query = f"""
SELECT * FROM {ALLOCATION_REQUESTS_TABLE}
diff --git a/tests/integration/validator/test_integration_validator.py b/tests/integration/validator/test_integration_validator.py
index d08ddb3..7dafb69 100644
--- a/tests/integration/validator/test_integration_validator.py
+++ b/tests/integration/validator/test_integration_validator.py
@@ -1,80 +1,420 @@
-import copy
+import os
+import sqlite3
import unittest
+import uuid
+from copy import copy
+from pathlib import Path
from unittest import IsolatedAsyncioTestCase
import numpy as np
+import torch
+from dotenv import load_dotenv
+from freezegun import freeze_time
+from web3 import Web3
from neurons.validator import Validator
-from sturdy.pools import generate_assets_and_pools
-from sturdy.validator.forward import query_and_score_miners
-from sturdy.validator.simulator import Simulator
+from sturdy.algo import naive_algorithm
+from sturdy.mock import MockDendrite
+from sturdy.pool_registry.pool_registry import POOL_REGISTRY
+from sturdy.pools import assets_pools_for_challenge_data
+from sturdy.protocol import REQUEST_TYPES, AllocateAssets
+from sturdy.validator.forward import get_metadata, query_multiple_miners
+from sturdy.validator.reward import filter_allocations, get_rewards
+from sturdy.validator.sql import get_active_allocs, get_db_connection, get_request_info, log_allocations
+load_dotenv()
+EXTERNAL_WEB3_PROVIDER_URL = os.getenv("WEB3_PROVIDER_URL")
+os.environ["WEB_PROVIDER_URL"] = "http://127.0.0.1:8545"
-class TestValidator(IsolatedAsyncioTestCase):
- maxDiff = 4000
+TEST_DB = "test.db"
+
+
+def init_db(conn: sqlite3.Connection) -> None:
+ query = """CREATE TABLE IF NOT EXISTS allocation_requests (
+ request_uid TEXT PRIMARY KEY,
+ assets_and_pools TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ );
+
+ CREATE TABLE active_allocs (
+ request_uid TEXT PRIMARY KEY,
+ scoring_period_end TIMESTAMP,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (request_uid) REFERENCES allocation_requests (request_uid)
+ );
+
+ CREATE TABLE IF NOT EXISTS allocations (
+ request_uid TEXT,
+ miner_uid TEXT,
+ allocation TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY (request_uid, miner_uid),
+ FOREIGN KEY (request_uid) REFERENCES allocation_requests (request_uid)
+ );
+
+ -- This alter statement adds a new column to the allocations table if it exists
+ ALTER TABLE allocation_requests
+ ADD COLUMN request_type TEXT NOT NULL DEFAULT 1;
+ ALTER TABLE allocation_requests
+ ADD COLUMN metadata TEXT;
+ ALTER TABLE allocations
+ ADD COLUMN axon_time FLOAT NOT NULL DEFAULT 99999.0; -- large number for now"""
+ conn.executescript(query)
+
+
+class TestValidator(IsolatedAsyncioTestCase):
@classmethod
def setUpClass(cls) -> None:
- # dont log this in wandb
- config = {
+ np.random.seed(69)
+ cls.config = {
"mock": True,
"wandb": {"off": True},
"mock_n": 16,
"neuron": {"dont_save_events": True},
- "netuid": 69,
+ "db_dir": TEST_DB,
}
- cls.validator = Validator(config=config)
- # simulator with preset seed
- cls.validator.simulator = Simulator(seed=69)
- assets_and_pools = generate_assets_and_pools(np.random.RandomState(seed=420)) # type: ignore[]
+ cls.w3 = Web3(Web3.HTTPProvider("http://127.0.0.1:8545"))
+ assert cls.w3.is_connected()
- cls.assets_and_pools = {
- "pools": assets_and_pools["pools"],
- "total_assets": int(1000e18),
- }
+ cls.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": EXTERNAL_WEB3_PROVIDER_URL,
+ "blockNumber": 21147890,
+ },
+ },
+ ],
+ )
- cls.contract_addresses = list(assets_and_pools["pools"].keys()) # type: ignore[]
-
- cls.allocations = {
- cls.contract_addresses[0]: 100e18,
- cls.contract_addresses[1]: 100e18,
- cls.contract_addresses[2]: 200e18,
- cls.contract_addresses[3]: 50e18,
- cls.contract_addresses[4]: 200e18,
- cls.contract_addresses[5]: 25e18,
- cls.contract_addresses[6]: 25e18,
- cls.contract_addresses[7]: 50e18,
- cls.contract_addresses[8]: 50e18,
- cls.contract_addresses[9]: 200e18,
- }
+ selected_entry = POOL_REGISTRY["Sturdy Crvusd Aggregator"]
+ cls.generated_data = assets_pools_for_challenge_data(selected_entry, cls.w3)
+ print(f"assets and pools: {cls.generated_data}")
+ cls.assets_and_pools = cls.generated_data["assets_and_pools"]
+
+ synapse = AllocateAssets(
+ request_type=REQUEST_TYPES.SYNTHETIC,
+ assets_and_pools=copy(cls.assets_and_pools),
+ )
+
+ cls.allocations = naive_algorithm(cls, synapse)
+ cls.user_address = cls.generated_data["user_address"]
+
+ cls.contract_addresses: list[str] = list(cls.assets_and_pools["pools"].keys()) # type: ignore[]
+
+ cls.used_netuids = []
+
+ @classmethod
+ def tearDownClass(cls) -> None:
+ # run this after tests to restore original forked state
+ w3 = Web3(Web3.HTTPProvider("http://127.0.0.1:8545"))
+
+ w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": EXTERNAL_WEB3_PROVIDER_URL,
+ "blockNumber": 21150770,
+ },
+ },
+ ],
+ )
+
+ def setUp(self) -> None:
+ # purge sql db
+ path = Path(TEST_DB)
+ if path.exists():
+ path.unlink()
+
+ self.snapshot_id = self.w3.provider.make_request("evm_snapshot", []) # type: ignore[]
+ print(f"snapshot id: {self.snapshot_id}")
+
+ netuid = np.random.randint(69, 420)
+ self.used_netuids.append(netuid)
+ conf = copy(self.config)
+ conf["netuid"] = netuid
- async def test_query_and_score_miners(self) -> None:
- # use simulator generated assets and pools
- await query_and_score_miners(self.validator)
- self.assertIsNotNone(self.validator.simulator.assets_and_pools)
- self.assertIsNotNone(self.validator.simulator.allocations)
- self.maxDiff = None
-
- # use user-defined generated assets and pools
- simulator_copy = copy.deepcopy(self.validator.simulator)
- await query_and_score_miners(
- self.validator, assets_and_pools=copy.deepcopy(self.assets_and_pools),
+ self.validator = Validator(config=conf)
+ self.validator.w3 = self.w3
+ assert self.validator.w3.is_connected()
+
+ # init sql db
+ with get_db_connection(TEST_DB, True) as conn:
+ init_db(conn)
+ cur = conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
+ tables = [dict(t) for t in cur.fetchall()]
+ print(f"tables init: {tables}")
+
+ def tearDown(self) -> None:
+ # Optional: Revert to the original snapshot after each test
+ print("reverting to original evm snapshot")
+ self.w3.provider.make_request("evm_revert", self.snapshot_id) # type: ignore[]
+
+ # purge sql db
+ path = Path(TEST_DB)
+ if path.exists():
+ path.unlink()
+
+ async def test_get_rewards(self) -> None:
+ print("----==== test_get_rewards ====----")
+
+ freezer = freeze_time("2024-01-11 00:00:00.124513")
+ freezer.start()
+
+ request_uuid = str(uuid.uuid4()).replace("-", "")
+
+ with get_db_connection(self.validator.config.db_dir, True) as conn:
+ cur = conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
+ tables = [dict(t) for t in cur.fetchall()]
+ print(f"tables: {tables}")
+
+ assets_and_pools = copy(self.assets_and_pools)
+
+ validator = self.validator
+ validator.dendrite = MockDendrite(wallet=validator.wallet, custom_allocs=True)
+
+ # ====
+
+ active_uids = [str(uid) for uid in range(validator.metagraph.n.item()) if validator.metagraph.axons[uid].is_serving]
+
+ np.random.shuffle(active_uids)
+
+ print(f"active_uids: {active_uids}")
+
+ synapse = AllocateAssets(
+ request_type=REQUEST_TYPES.SYNTHETIC,
+ assets_and_pools=assets_and_pools,
+ user_address=self.user_address,
+ )
+
+ # query all miners
+ responses = await query_multiple_miners(
+ validator,
+ synapse,
+ active_uids,
)
- simulator_copy.initialize()
- simulator_copy.init_data(
- init_assets_and_pools=copy.deepcopy(self.assets_and_pools),
+
+ allocations = {uid: responses[idx].allocations for idx, uid in enumerate(active_uids)} # type: ignore[]
+
+ for response in responses:
+ # TODO: is this necessary?
+ self.assertLessEqual(sum(response.allocations.values()), assets_and_pools["total_assets"])
+
+ # Log the results for monitoring purposes.
+ print(f"Assets and pools: {synapse.assets_and_pools}")
+ print(f"Received allocations (uid -> allocations): {allocations}")
+
+ pools = assets_and_pools["pools"]
+ metadata = get_metadata(pools, validator.w3)
+
+ # scoring period is ~12 hours
+ scoring_period = 43200
+
+ axon_times, filtered_allocs = filter_allocations(
+ self,
+ query=validator.step,
+ uids=active_uids,
+ responses=responses,
+ assets_and_pools=assets_and_pools,
)
- simulator_copy.update_reserves_with_allocs()
- # TODO: update these tests - low priority
- # assets_pools_should_be = simulator_copy.assets_and_pools
- # assets_pools2 = self.validator.simulator.assets_and_pools
- # self.assertEqual(assets_pools2, assets_pools_should_be)
- # self.assertIsNotNone(self.validator.simulator.allocations)
+ # log allocations
+ with get_db_connection(validator.config.db_dir) as conn:
+ log_allocations(
+ conn,
+ request_uuid,
+ assets_and_pools,
+ metadata,
+ filtered_allocs,
+ axon_times,
+ REQUEST_TYPES.SYNTHETIC,
+ scoring_period,
+ )
+
+ freezer.stop()
+
+ # fast forward ~12 hrs
+
+ freezer = freeze_time("2024-01-11 12:01:00.136136")
+ freezer.start()
+
+ validator.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": EXTERNAL_WEB3_PROVIDER_URL,
+ "blockNumber": 21150770,
+ },
+ },
+ ],
+ )
+
+ curr_pools = assets_and_pools["pools"]
+ for pool in curr_pools.values():
+ pool.sync(validator.w3)
+
+ # score previously suggested miner allocations based on how well they are performing now
+ # get all the request ids for the pools we should be scoring from the db
+ active_alloc_rows = []
+ with get_db_connection(validator.config.db_dir, True) as conn:
+ active_alloc_rows = get_active_allocs(conn)
+
+ print(f"Active allocs: {active_alloc_rows}")
+
+ with get_db_connection(validator.config.db_dir, True) as conn:
+ all_requests = get_request_info(conn)
+ print(f"all requests: {all_requests}")
+
+ for active_alloc in active_alloc_rows:
+ # calculate rewards for previous active allocations
+ miner_uids, rewards = get_rewards(validator, active_alloc)
+
+ rewards_dict = {active_uids[k]: v for k, v in enumerate(list(rewards))}
+ sorted_rewards = dict(sorted(rewards_dict.items(), key=lambda item: item[1], reverse=True)) # type: ignore[]
+
+ print(f"sorted rewards: {sorted_rewards}")
+ print(f"sim penalities: {validator.similarity_penalties}")
+
+ # rewards should not all be the same
+ to_compare = torch.empty(rewards.shape)
+ torch.fill(to_compare, rewards[0])
+ self.assertFalse(torch.equal(rewards, to_compare))
+
+ freezer.stop()
+
+ async def test_get_rewards_punish(self) -> None:
+ print("----==== test_get_rewards_punish ====----")
+
+ freezer = freeze_time("2024-01-11 00:00:00.124513")
+ freezer.start()
+
+ request_uuid = str(uuid.uuid4()).replace("-", "")
+
+ with get_db_connection(self.validator.config.db_dir, True) as conn:
+ cur = conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
+ tables = [dict(t) for t in cur.fetchall()]
+ print(f"tables: {tables}")
+
+ assets_and_pools = copy(self.assets_and_pools)
+ allocations = copy(self.allocations)
+
+ validator = self.validator
+ validator.dendrite = MockDendrite(wallet=validator.wallet)
+
+ # ====
+
+ active_uids = [str(uid) for uid in range(validator.metagraph.n.item()) if validator.metagraph.axons[uid].is_serving]
+
+ np.random.shuffle(active_uids)
+
+ print(f"active_uids: {active_uids}")
+
+ synapse = AllocateAssets(
+ request_type=REQUEST_TYPES.SYNTHETIC,
+ assets_and_pools=assets_and_pools,
+ user_address=self.user_address,
+ allocations=allocations,
+ )
+
+ # query all miners
+ responses = await query_multiple_miners(
+ validator,
+ synapse,
+ active_uids,
+ )
+
+ allocations = {uid: responses[idx].allocations for idx, uid in enumerate(active_uids)} # type: ignore[]
+
+ for response in responses:
+ # TODO: is this necessary?
+ self.assertLessEqual(sum(response.allocations.values()), assets_and_pools["total_assets"])
+
+ # Log the results for monitoring purposes.
+ print(f"Assets and pools: {synapse.assets_and_pools}")
+ print(f"Received allocations (uid -> allocations): {allocations}")
+
+ pools = assets_and_pools["pools"]
+ metadata = get_metadata(pools, validator.w3)
+
+ # scoring period is ~12 hours
+ scoring_period = 43200
+
+ axon_times, filtered_allocs = filter_allocations(
+ self,
+ query=validator.step,
+ uids=active_uids,
+ responses=responses,
+ assets_and_pools=assets_and_pools,
+ )
+
+ # log allocations
+ with get_db_connection(validator.config.db_dir) as conn:
+ log_allocations(
+ conn,
+ request_uuid,
+ assets_and_pools,
+ metadata,
+ filtered_allocs,
+ axon_times,
+ REQUEST_TYPES.SYNTHETIC,
+ scoring_period,
+ )
+
+ freezer.stop()
+
+ # fast forward ~12 hrs
+
+ freezer = freeze_time("2024-01-11 12:01:00.136136")
+ freezer.start()
+
+ validator.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": EXTERNAL_WEB3_PROVIDER_URL,
+ "blockNumber": 21150770,
+ },
+ },
+ ],
+ )
+
+ curr_pools = assets_and_pools["pools"]
+ for pool in curr_pools.values():
+ pool.sync(validator.w3)
+
+ # score previously suggested miner allocations based on how well they are performing now
+ # get all the request ids for the pools we should be scoring from the db
+ active_alloc_rows = []
+ with get_db_connection(validator.config.db_dir, True) as conn:
+ active_alloc_rows = get_active_allocs(conn)
+
+ print(f"Active allocs: {active_alloc_rows}")
+
+ with get_db_connection(validator.config.db_dir, True) as conn:
+ all_requests = get_request_info(conn)
+ print(f"all requests: {all_requests}")
+
+ for active_alloc in active_alloc_rows:
+ # calculate rewards for previous active allocations
+ miner_uids, rewards = get_rewards(validator, active_alloc)
+
+ rewards_dict = {active_uids[k]: v for k, v in enumerate(list(rewards))}
+ sorted_rewards = dict(sorted(rewards_dict.items(), key=lambda item: item[1], reverse=True)) # type: ignore[]
+
+ print(f"sorted rewards: {sorted_rewards}")
+ print(f"sim penalities: {validator.similarity_penalties}")
+
+ # rewards should not all be the same
+ to_compare = torch.zeros_like(rewards)
+ self.assertTrue(torch.equal(rewards, to_compare))
- async def test_forward(self) -> None:
- await self.validator.forward()
+ freezer.stop()
if __name__ == "__main__":
diff --git a/tests/unit/validator/test_pool_generator.py b/tests/unit/validator/test_pool_generator.py
index 2443b7d..fcfa695 100644
--- a/tests/unit/validator/test_pool_generator.py
+++ b/tests/unit/validator/test_pool_generator.py
@@ -1,82 +1,100 @@
+import os
import unittest
import numpy as np
+from dotenv import load_dotenv
+from eth_account import Account
+from web3 import Web3
from sturdy.constants import *
+from sturdy.pool_registry.pool_registry import POOL_REGISTRY
from sturdy.pools import (
- BasePoolModel,
- generate_assets_and_pools,
- generate_initial_allocations_for_pools,
+ assets_pools_for_challenge_data,
)
-from sturdy.utils.ethmath import wei_mul
+
+load_dotenv()
+WEB3_PROVIDER_URL = os.getenv("WEB3_PROVIDER_URL")
class TestPoolAndAllocGeneration(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls) -> None:
+ # runs tests on local mainnet fork at block: 21080765
+ cls.w3 = Web3(Web3.HTTPProvider("http://127.0.0.1:8545"))
+ assert cls.w3.is_connected()
+
+ cls.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21150770,
+ },
+ },
+ ],
+ )
+
+ cls.contract_address = "0x0669091F451142b3228171aE6aD794cF98288124"
+ # Create a funded account for testing
+ cls.account = Account.create()
+ cls.w3.eth.send_transaction(
+ {
+ "to": cls.account.address,
+ "from": cls.w3.eth.accounts[0],
+ "value": cls.w3.to_wei(200000, "ether"),
+ }
+ )
+
+ cls.snapshot_id = cls.w3.provider.make_request("evm_snapshot", []) # type: ignore[]
+ print(f"snapshot id: {cls.snapshot_id}")
+
+ @classmethod
+ def tearDownClass(cls) -> None:
+ # run this after tests to restore original forked state
+ w3 = Web3(Web3.HTTPProvider("http://127.0.0.1:8545"))
+
+ w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21150770,
+ },
+ },
+ ],
+ )
+
+ def setUp(self) -> None:
+ self.snapshot_id = self.w3.provider.make_request("evm_snapshot", []) # type: ignore[]
+ print(f"snapshot id: {self.snapshot_id}")
+
+ def tearDown(self) -> None:
+ # Optional: Revert to the original snapshot after each test
+ print("reverting to original evm snapshot")
+ self.w3.provider.make_request("evm_revert", self.snapshot_id) # type: ignore[]
+
def test_generate_assets_and_pools(self) -> None:
# same seed on every test run
np.random.seed(69)
# run test multiple times to to ensure the number generated are
# within the correct ranges
- for _ in range(100):
- result = generate_assets_and_pools(np.random.RandomState(69))
-
- pools: dict[str, BasePoolModel] = result["pools"]
- total_borrows = sum([pool.borrow_amount for pool in pools.values()])
-
- # Assert total assets
- self.assertTrue(
- total_borrows + MIN_TOTAL_ASSETS_OFFSET <= result["total_assets"] <= total_borrows + MAX_TOTAL_ASSETS_OFFSET
- )
-
- # Assert number of pools
- self.assertEqual(len(result["pools"]), NUM_POOLS)
-
- # Assert properties of each pool
- for pool_info in result["pools"].values():
- self.assertTrue(hasattr(pool_info, "base_rate"))
- self.assertTrue(
- MIN_BASE_RATE <= pool_info.base_rate <= MAX_BASE_RATE
- )
-
- self.assertTrue(hasattr(pool_info, "base_slope"))
- self.assertTrue(MIN_SLOPE <= pool_info.base_slope <= MAX_SLOPE)
-
- self.assertTrue(hasattr(pool_info, "kink_slope"))
- self.assertTrue(
- MIN_KINK_SLOPE <= pool_info.kink_slope <= MAX_KINK_SLOPE
- )
-
- self.assertTrue(hasattr(pool_info, "optimal_util_rate"))
- self.assertTrue(
- MIN_OPTIMAL_RATE
- <= pool_info.optimal_util_rate
- <= MAX_OPTIMAL_RATE
- )
-
- self.assertTrue(hasattr(pool_info, "reserve_size"))
- self.assertEqual(pool_info.reserve_size, POOL_RESERVE_SIZE)
-
- self.assertTrue(hasattr(pool_info, "borrow_amount"))
- self.assertTrue(
- wei_mul(MIN_UTIL_RATE, POOL_RESERVE_SIZE)
- <= pool_info.borrow_amount
- <= wei_mul(MAX_UTIL_RATE, POOL_RESERVE_SIZE)
- )
-
- def test_generate_initial_allocations_for_pools(self) -> None:
- # same seed on every test run
- np.random.seed(69)
- # run test multiple times to to ensure the number generated are
- # within the correct ranges
- for _ in range(100):
- assets_and_pools = generate_assets_and_pools(np.random.RandomState(69))
- max_alloc = assets_and_pools["total_assets"]
- pools = assets_and_pools["pools"]
- result = generate_initial_allocations_for_pools(assets_and_pools)
- result = dict(result.items())
-
- # Assert number of allocations
- self.assertEqual(len(result), len(pools))
+ keys = list(POOL_REGISTRY.keys())
+ for idx in range(len(keys)):
+ key = keys[idx]
+ print(key)
+ selected_entry = POOL_REGISTRY[key]
+ generated = assets_pools_for_challenge_data(selected_entry, self.w3)
+ print(generated)
+
+ pools = generated["assets_and_pools"]["pools"]
+ total_assets = generated["assets_and_pools"]["total_assets"]
+
+ # check the member variables of the returned value
+ self.assertEqual(list(pools.keys()), list(pools.keys()))
+ # check returned total assets
+ self.assertGreater(total_assets, 0)
if __name__ == "__main__":
diff --git a/tests/unit/validator/test_pool_models.py b/tests/unit/validator/test_pool_models.py
index 1a39f0b..c6e7469 100644
--- a/tests/unit/validator/test_pool_models.py
+++ b/tests/unit/validator/test_pool_models.py
@@ -10,7 +10,8 @@
from sturdy.constants import APR_ORACLE
from sturdy.pools import (
- AaveV3DefaultInterestRatePool,
+ AaveV3DefaultInterestRateV2Pool,
+ AaveV3RateTargetBaseInterestRatePool,
CompoundV3Pool,
DaiSavingsRate,
MorphoVault,
@@ -37,7 +38,7 @@ def setUpClass(cls) -> None:
{
"forking": {
"jsonRpcUrl": WEB3_PROVIDER_URL,
- "blockNumber": 20233401,
+ "blockNumber": 21150770,
},
},
],
@@ -79,7 +80,7 @@ def tearDownClass(cls) -> None:
{
"forking": {
"jsonRpcUrl": WEB3_PROVIDER_URL,
- "blockNumber": 20976304,
+ "blockNumber": 21150770,
},
},
],
@@ -97,7 +98,7 @@ def tearDown(self) -> None:
def test_pool_contract(self) -> None:
print("----==== test_pool_contract ====----")
# we call the aave3 weth atoken proxy contract in this example
- pool = AaveV3DefaultInterestRatePool(
+ pool = AaveV3RateTargetBaseInterestRatePool(
contract_address=self.atoken_address,
)
@@ -110,13 +111,13 @@ def test_pool_contract(self) -> None:
# TODO: test syncing after time travel
def test_sync(self) -> None:
- print("----==== test_sync ====----")
- pool = AaveV3DefaultInterestRatePool(
+ print("----==== TestAavePool | test_sync ====----")
+ pool = AaveV3DefaultInterestRateV2Pool(
contract_address=self.atoken_address,
)
# sync pool params
- pool.sync(self.account.address, web3_provider=self.w3)
+ pool.sync(web3_provider=self.w3)
self.assertTrue(hasattr(pool, "_atoken_contract"))
self.assertTrue(isinstance(pool._atoken_contract, Contract))
@@ -124,35 +125,37 @@ def test_sync(self) -> None:
self.assertTrue(hasattr(pool, "_pool_contract"))
self.assertTrue(isinstance(pool._pool_contract, Contract))
- # TODO: get snapshots working correctly so we are not under the mercy of the automatic ordering of tests
+ self.assertTrue(hasattr(pool, "_normalized_income"))
+ self.assertTrue(isinstance(pool._normalized_income, int))
+ self.assertGreaterEqual(pool._normalized_income, int(1e27))
+ print(f"normalized income: {pool._normalized_income}")
+
def test_supply_rate_alloc(self) -> None:
- print("----==== test_supply_rate_increase_alloc ====----")
- pool = AaveV3DefaultInterestRatePool(
+ print("----==== TestAavePool | test_supply_rate_increase_alloc ====----")
+ pool = AaveV3DefaultInterestRateV2Pool(
contract_address=self.atoken_address,
)
# sync pool params
- pool.sync(self.account.address, web3_provider=self.w3)
+ pool.sync(web3_provider=self.w3)
reserve_data = retry_with_backoff(pool._pool_contract.functions.getReserveData(pool._underlying_asset_address).call)
apy_before = Web3.to_wei(reserve_data.currentLiquidityRate / 1e27, "ether")
print(f"apy before supplying: {apy_before}")
- # calculate predicted future supply rate after supplying 10000 ETH
- apy_after = pool.supply_rate(int(10000e18))
- print(f"apy after supplying 10000 ETH: {apy_after}")
+ # calculate predicted future supply rate after supplying 2000000 ETH
+ apy_after = pool.supply_rate(int(2000000e18))
+ print(f"apy after supplying 2000000 ETH: {apy_after}")
self.assertNotEqual(apy_after, 0)
self.assertLess(apy_after, apy_before)
def test_supply_rate_decrease_alloc(self) -> None:
- print("----==== test_supply_rate_decrease_alloc ====----")
- pool = AaveV3DefaultInterestRatePool(
- contract_address=self.atoken_address,
- )
+ print("----==== TestAavePool | test_supply_rate_decrease_alloc ====----")
+ pool = AaveV3DefaultInterestRateV2Pool(contract_address=self.atoken_address, user_address=self.account.address)
# sync pool params
- pool.sync(self.account.address, web3_provider=self.w3)
+ pool.sync(web3_provider=self.w3)
tx = self.weth_contract.functions.deposit().build_transaction(
{
@@ -217,7 +220,7 @@ def test_supply_rate_decrease_alloc(self) -> None:
print(f"apy before rebalancing ether: {apy_before}")
# calculate predicted future supply rate after removing 1000 ETH to end up with 9000 ETH in the pool
- pool.sync(self.account.address, self.w3)
+ pool.sync(self.w3)
apy_after = pool.supply_rate(int(9000e18))
print(f"apy after rebalancing ether: {apy_after}")
self.assertNotEqual(apy_after, 0)
@@ -237,7 +240,8 @@ def setUpClass(cls) -> None:
{
"forking": {
"jsonRpcUrl": WEB3_PROVIDER_URL,
- "blockNumber": 20233401,
+ # "blockNumber": 20233401,
+ "blockNumber": 21080765,
},
},
],
@@ -268,7 +272,7 @@ def tearDownClass(cls) -> None:
{
"forking": {
"jsonRpcUrl": WEB3_PROVIDER_URL,
- "blockNumber": 20976304,
+ "blockNumber": 21080765,
},
},
],
@@ -285,13 +289,11 @@ def tearDown(self) -> None:
def test_silo_strategy_contract(self) -> None:
print("----==== test_pool_contract ====----")
- # we call the aave3 weth atoken proxy contract in this example
- pool = VariableInterestSturdySiloStrategy(
- contract_address=self.contract_address,
- ) # type: ignore[]
whale_addr = self.w3.to_checksum_address("0x0669091F451142b3228171aE6aD794cF98288124")
- pool.sync(whale_addr, self.w3)
+ pool = VariableInterestSturdySiloStrategy(contract_address=self.contract_address, user_address=whale_addr) # type: ignore[]
+
+ pool.sync(self.w3)
self.assertTrue(hasattr(pool, "_silo_strategy_contract"))
self.assertTrue(isinstance(pool._silo_strategy_contract, Contract))
@@ -305,6 +307,10 @@ def test_silo_strategy_contract(self) -> None:
self.assertTrue(isinstance(pool._rate_model_contract, Contract))
print(f"rate model contract: {pool._rate_model_contract.address}")
+ self.assertTrue(hasattr(pool, "_share_price"))
+ self.assertTrue(isinstance(pool._share_price, int))
+ print(f"price per share: {pool._share_price}")
+
# don't change deposit amount to pool by much
prev_supply_rate = pool.supply_rate(int(630e18))
# increase deposit amount to pool by ~100e18 (~630 pxETH)
@@ -363,7 +369,7 @@ def tearDownClass(cls) -> None:
{
"forking": {
"jsonRpcUrl": WEB3_PROVIDER_URL,
- "blockNumber": 20976304,
+ "blockNumber": 21080765,
},
},
],
@@ -496,7 +502,7 @@ def tearDownClass(cls) -> None:
{
"forking": {
"jsonRpcUrl": WEB3_PROVIDER_URL,
- "blockNumber": 20976304,
+ "blockNumber": 21080765,
},
},
],
@@ -582,7 +588,7 @@ def tearDownClass(cls) -> None:
{
"forking": {
"jsonRpcUrl": WEB3_PROVIDER_URL,
- "blockNumber": 20976304,
+ "blockNumber": 21080765,
},
},
],
@@ -621,13 +627,25 @@ def test_morphovault_pool_model(self) -> None:
self.assertTrue(hasattr(pool, "_asset_decimals"))
self.assertTrue(isinstance(pool._asset_decimals, int))
- self.assertTrue(hasattr(pool, "_total_assets"))
- self.assertTrue(isinstance(pool._total_assets, int))
- self.assertTrue(hasattr(pool, "_user_assets"))
- self.assertTrue(isinstance(pool._user_assets, int))
+ self.assertTrue(hasattr(pool, "_total_supplied_assets"))
+ self.assertTrue(isinstance(pool._total_supplied_assets, int))
+ self.assertTrue(hasattr(pool, "_user_deposits"))
+ self.assertTrue(isinstance(pool._user_deposits, int))
self.assertTrue(hasattr(pool, "_curr_borrows"))
self.assertTrue(isinstance(pool._curr_borrows, int))
+ self.assertTrue(hasattr(pool, "_underlying_asset_contract"))
+ self.assertTrue(isinstance(pool._underlying_asset_contract, Contract))
+ self.assertTrue(hasattr(pool, "_user_asset_balance"))
+ self.assertTrue(isinstance(pool._user_asset_balance, int))
+ print(f"user asset balance: {pool._user_asset_balance}")
+ self.assertGreater(pool._user_asset_balance, 0)
+
+ self.assertTrue(hasattr(pool, "_share_price"))
+ self.assertTrue(isinstance(pool._share_price, int))
+ print(f"morpho vault share price: {pool._share_price}")
+ self.assertGreater(pool._share_price, 0)
+
# check pool supply_rate
print(pool.supply_rate(0))
@@ -731,7 +749,7 @@ def tearDownClass(cls) -> None:
{
"forking": {
"jsonRpcUrl": WEB3_PROVIDER_URL,
- "blockNumber": 20976304,
+ "blockNumber": 21080765,
},
},
],
@@ -762,6 +780,19 @@ def test_vault_pool_model(self) -> None:
self.assertTrue(isinstance(pool._apr_oracle, Contract))
self.assertEqual(pool._apr_oracle.address, APR_ORACLE)
+ self.assertTrue(hasattr(pool, "_user_deposits"))
+ self.assertTrue(isinstance(pool._user_deposits, int))
+
+ self.assertTrue(hasattr(pool, "_user_asset_balance"))
+ self.assertTrue(isinstance(pool._user_asset_balance, int))
+ print(f"user asset balance: {pool._user_asset_balance}")
+ self.assertGreater(pool._user_asset_balance, 0)
+
+ self.assertTrue(hasattr(pool, "_share_price"))
+ self.assertTrue(isinstance(pool._share_price, int))
+ print(f"morpho vault share price: {pool._share_price}")
+ self.assertGreater(pool._share_price, 0)
+
# check pool supply_rate
print(pool.supply_rate(0))
@@ -814,5 +845,142 @@ def test_supply_rate_decrease_alloc(self) -> None:
self.assertGreater(apy_after, apy_before)
+# TODO: make testaavepool and this test use the same block number but different address
+# right now they both use the same pool but from different blocks in the past.
+class TestAaveTargetPool(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls) -> None:
+ # runs tests on local mainnet fork at block: 20233401
+ cls.w3 = Web3(Web3.HTTPProvider("http://127.0.0.1:8545"))
+ assert cls.w3.is_connected()
+
+ cls.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21150770,
+ },
+ },
+ ],
+ )
+
+ # spark dai
+ cls.atoken_address = "0x4DEDf26112B3Ec8eC46e7E31EA5e123490B05B8B"
+ # Create a funded account for testing
+ # cls.account = Account.create()
+ cls.account_address = "0x0Fd6abA4272a96Bb8CcbbA69B825075cb2047D1D" # spDai holder (~17.5k spDai at time of writing)
+ cls.w3.eth.send_transaction(
+ {
+ "to": cls.account_address,
+ "from": cls.w3.eth.accounts[0],
+ "value": cls.w3.to_wei(200000, "ether"),
+ }
+ )
+
+ cls.snapshot_id = cls.w3.provider.make_request("evm_snapshot", []) # type: ignore[]
+ print(f"snapshot id: {cls.snapshot_id}")
+
+ @classmethod
+ def tearDownClass(cls) -> None:
+ # run this after tests to restore original forked state
+ w3 = Web3(Web3.HTTPProvider("http://127.0.0.1:8545"))
+
+ w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21150770,
+ },
+ },
+ ],
+ )
+
+ def setUp(self) -> None:
+ self.snapshot_id = self.w3.provider.make_request("evm_snapshot", []) # type: ignore[]
+ print(f"snapshot id: {self.snapshot_id}")
+
+ def tearDown(self) -> None:
+ # Optional: Revert to the original snapshot after each test
+ print("reverting to original evm snapshot")
+ self.w3.provider.make_request("evm_revert", self.snapshot_id) # type: ignore[]
+
+ def test_pool_contract(self) -> None:
+ print("----==== test_pool_contract ====----")
+ # we call the aave3 weth atoken proxy contract in this example
+ pool = AaveV3RateTargetBaseInterestRatePool(
+ contract_address=self.atoken_address,
+ )
+
+ pool.pool_init(self.w3)
+ self.assertTrue(hasattr(pool, "_atoken_contract"))
+ self.assertTrue(isinstance(pool._atoken_contract, Contract))
+
+ self.assertTrue(hasattr(pool, "_pool_contract"))
+ self.assertTrue(isinstance(pool._pool_contract, Contract))
+
+ # TODO: test syncing after time travel
+ def test_sync(self) -> None:
+ print("----==== test_sync ====----")
+ pool = AaveV3RateTargetBaseInterestRatePool(
+ contract_address=self.atoken_address,
+ )
+
+ # sync pool params
+ pool.sync(web3_provider=self.w3)
+
+ self.assertTrue(hasattr(pool, "_atoken_contract"))
+ self.assertTrue(isinstance(pool._atoken_contract, Contract))
+
+ self.assertTrue(hasattr(pool, "_pool_contract"))
+ self.assertTrue(isinstance(pool._pool_contract, Contract))
+
+ self.assertTrue(hasattr(pool, "_normalized_income"))
+ self.assertTrue(isinstance(pool._normalized_income, int))
+ self.assertGreaterEqual(pool._normalized_income, int(1e27))
+ print(f"normalized income: {pool._normalized_income}")
+
+ # TODO: get snapshots working correctly so we are not under the mercy of the automatic ordering of tests
+ def test_supply_rate_alloc(self) -> None:
+ print("----==== test_supply_rate_increase_alloc ====----")
+ pool = AaveV3RateTargetBaseInterestRatePool(contract_address=self.atoken_address, user_address=self.account_address)
+
+ # sync pool params
+ pool.sync(web3_provider=self.w3)
+
+ reserve_data = retry_with_backoff(pool._pool_contract.functions.getReserveData(pool._underlying_asset_address).call)
+
+ apy_before = Web3.to_wei(reserve_data.currentLiquidityRate / 1e27, "ether")
+ print(f"apy before supplying: {apy_before}")
+
+ # calculate predicted future supply rate after supplying 100000 DAI
+ apy_after = pool.supply_rate(int(100000e18))
+ print(f"apy after supplying 100000 DAI: {apy_after}")
+ self.assertNotEqual(apy_after, 0)
+ self.assertLess(apy_after, apy_before)
+
+ def test_supply_rate_decrease_alloc(self) -> None:
+ print("----==== test_supply_rate_decrease_alloc ====----")
+ pool = AaveV3RateTargetBaseInterestRatePool(contract_address=self.atoken_address, user_address=self.account_address)
+
+ # sync pool params
+ pool.sync(web3_provider=self.w3)
+
+ reserve_data = retry_with_backoff(pool._pool_contract.functions.getReserveData(pool._underlying_asset_address).call)
+
+ apy_before = Web3.to_wei(reserve_data.currentLiquidityRate / 1e27, "ether")
+ print(f"apy before rebalancing ether: {apy_before}")
+
+ # calculate predicted future supply rate after removing 100000 DAI to end up with 9000 DAI in the pool
+ pool.sync(self.w3)
+ apy_after = pool.supply_rate(int(9000e18))
+ print(f"apy after rebalancing ether: {apy_after}")
+ self.assertNotEqual(apy_after, 0)
+ self.assertGreater(apy_after, apy_before)
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/tests/unit/validator/test_reward_helpers.py b/tests/unit/validator/test_reward_helpers.py
index 351852c..cc2ffd0 100644
--- a/tests/unit/validator/test_reward_helpers.py
+++ b/tests/unit/validator/test_reward_helpers.py
@@ -9,15 +9,20 @@
from web3.constants import ADDRESS_ZERO
from neurons.validator import Validator
+from sturdy.algo import naive_algorithm
+from sturdy.pool_registry.pool_registry import POOL_REGISTRY
from sturdy.pools import *
+from sturdy.protocol import REQUEST_TYPES, AllocateAssets
from sturdy.validator.reward import (
adjust_rewards_for_plagiarism,
+ annualized_yield_pct,
calculate_penalties,
calculate_rewards_with_adjusted_penalties,
- dynamic_normalize_zscore,
format_allocations,
+ get_allocation_similarity_matrix,
+ get_apy_similarity_matrix,
get_distance,
- get_similarity_matrix,
+ normalize_exp,
)
load_dotenv()
@@ -85,7 +90,7 @@ class TestDynamicNormalizeZScore(unittest.TestCase):
def test_basic_normalization(self) -> None:
# Test a simple AllocationsDict with large values
apys_and_allocations = {"1": {"apy": 1e16}, "2": {"apy": 2e16}, "3": {"apy": 3e16}, "4": {"apy": 4e16}}
- normalized = dynamic_normalize_zscore(apys_and_allocations)
+ normalized = normalize_exp(apys_and_allocations)
# Check if output is normalized between 0 and 1
self.assertAlmostEqual(normalized.min().item(), 0.0, places=5)
@@ -100,7 +105,7 @@ def test_with_low_outliers(self) -> None:
"4": {"apy": 5e16},
"5": {"apy": 1e17},
}
- normalized = dynamic_normalize_zscore(apys_and_allocations)
+ normalized = normalize_exp(apys_and_allocations)
# Check that outliers don't affect the overall normalization
self.assertAlmostEqual(normalized.min().item(), 0.0, places=5)
@@ -115,7 +120,7 @@ def test_with_high_outliers(self) -> None:
"4": {"apy": 1e17},
"5": {"apy": 2e17},
}
- normalized = dynamic_normalize_zscore(apys_and_allocations)
+ normalized = normalize_exp(apys_and_allocations)
# Check that the function correctly handles high outliers
self.assertAlmostEqual(normalized.min().item(), 0.0, places=5)
@@ -124,7 +129,7 @@ def test_with_high_outliers(self) -> None:
def test_uniform_values(self) -> None:
# Test where all values are the same
apys_and_allocations = {"1": {"apy": 1e16}, "2": {"apy": 1e16}, "3": {"apy": 1e16}, "4": {"apy": 1e16}}
- normalized = dynamic_normalize_zscore(apys_and_allocations)
+ normalized = normalize_exp(apys_and_allocations)
# If all values are the same, the output should also be uniform (or handle gracefully)
self.assertTrue(
@@ -142,7 +147,7 @@ def test_low_variance(self) -> None:
"4": {"apy": 1.03e16},
"5": {"apy": 1.04e16},
}
- normalized = dynamic_normalize_zscore(apys_and_allocations)
+ normalized = normalize_exp(apys_and_allocations)
# Check if normalization happens correctly
self.assertAlmostEqual(normalized.min().item(), 0.0, places=5)
@@ -151,30 +156,12 @@ def test_low_variance(self) -> None:
def test_high_variance(self) -> None:
# Test with high variance data
apys_and_allocations = {"1": {"apy": 1e16}, "2": {"apy": 1e17}, "3": {"apy": 5e17}, "4": {"apy": 1e18}}
- normalized = dynamic_normalize_zscore(apys_and_allocations)
+ normalized = normalize_exp(apys_and_allocations)
# Ensure that the normalization works even with high variance
self.assertAlmostEqual(normalized.min().item(), 0.0, places=5)
self.assertAlmostEqual(normalized.max().item(), 1.0, places=5)
- def test_quantile_logic(self) -> None:
- # Test a case where the lower quartile range affects the lower bound decision
- apys_and_allocations = {
- "1": {"apy": 1e16},
- "2": {"apy": 2e16},
- "3": {"apy": 3e16},
- "4": {"apy": 4e16},
- "5": {"apy": 1e17},
- "6": {"apy": 2e17},
- "7": {"apy": 3e17},
- "8": {"apy": 4e17},
- }
- normalized = dynamic_normalize_zscore(apys_and_allocations)
-
- # Ensure that quantile-based clipping works as expected
- self.assertAlmostEqual(normalized.min().item(), 0.0, places=5)
- self.assertAlmostEqual(normalized.max().item(), 1.0, places=5)
-
class TestRewardFunctions(unittest.TestCase):
@classmethod
@@ -211,122 +198,6 @@ def tearDown(self) -> None:
# Optional: Revert to the original snapshot after each test
self.w3.provider.make_request("evm_revert", self.snapshot_id) # type: ignore[]
- def test_check_allocations_valid(self) -> None:
- allocations = {ADDRESS_ZERO: int(5e18), BEEF: int(3e18)}
- assets_and_pools = {
- "total_assets": int(8e18),
- "pools": {
- ADDRESS_ZERO: BasePool(
- contract_address=ADDRESS_ZERO,
- base_rate=0,
- base_slope=0,
- kink_slope=0,
- optimal_util_rate=0,
- borrow_amount=int(2e18),
- reserve_size=0,
- ),
- BEEF: BasePool(
- contract_address=BEEF,
- base_rate=0,
- base_slope=0,
- kink_slope=0,
- optimal_util_rate=0,
- borrow_amount=int(1e18),
- reserve_size=0,
- ),
- },
- }
-
- result = check_allocations(assets_and_pools, allocations)
- self.assertTrue(result)
-
- def test_check_allocations_overallocate(self) -> None:
- allocations = {ADDRESS_ZERO: int(10e18), BEEF: int(3e18)}
- assets_and_pools = {
- "total_assets": int(10e18),
- "pools": {
- ADDRESS_ZERO: BasePool(
- contract_address=ADDRESS_ZERO,
- base_rate=0,
- base_slope=0,
- kink_slope=0,
- optimal_util_rate=0,
- borrow_amount=int(2e18),
- reserve_size=0,
- ),
- BEEF: BasePool(
- contract_address=BEEF,
- base_rate=0,
- base_slope=0,
- kink_slope=0,
- optimal_util_rate=0,
- borrow_amount=int(1e18),
- reserve_size=0,
- ),
- },
- }
-
- result = check_allocations(assets_and_pools, allocations)
- self.assertFalse(result)
-
- def test_check_allocations_below_borrow(self) -> None:
- allocations = {ADDRESS_ZERO: int(1e18), BEEF: 0}
- assets_and_pools = {
- "total_assets": int(10e18),
- "pools": {
- ADDRESS_ZERO: BasePool(
- contract_address=ADDRESS_ZERO,
- base_rate=0,
- base_slope=0,
- kink_slope=0,
- optimal_util_rate=0,
- borrow_amount=int(2e18),
- reserve_size=0,
- ),
- BEEF: BasePool(
- contract_address=BEEF,
- base_rate=0,
- base_slope=0,
- kink_slope=0,
- optimal_util_rate=0,
- borrow_amount=int(1e18),
- reserve_size=0,
- ),
- },
- }
-
- result = check_allocations(assets_and_pools, allocations)
- self.assertFalse(result)
-
- def test_check_allocations_below_alloc_threshold(self) -> None:
- allocations = {ADDRESS_ZERO: int(4e18), BEEF: int(4e18)}
- assets_and_pools = {
- "total_assets": int(10e18),
- "pools": {
- ADDRESS_ZERO: BasePool(
- contract_address=ADDRESS_ZERO,
- base_rate=0,
- base_slope=0,
- kink_slope=0,
- optimal_util_rate=0,
- borrow_amount=int(2e18),
- reserve_size=0,
- ),
- BEEF: BasePool(
- contract_address=BEEF,
- base_rate=0,
- base_slope=0,
- kink_slope=0,
- optimal_util_rate=0,
- borrow_amount=int(1e18),
- reserve_size=0,
- ),
- },
- }
-
- result = check_allocations(assets_and_pools, allocations)
- self.assertFalse(result)
-
def test_check_allocations_sturdy(self) -> None:
A = "0x6311fF24fb15310eD3d2180D3d0507A21a8e5227"
VAULT = "0x73E4C11B670Ef9C025A030A20b72CB9150E54523"
@@ -343,12 +214,12 @@ def test_check_allocations_sturdy(self) -> None:
}
pool_a: VariableInterestSturdySiloStrategy = assets_and_pools["pools"][A]
- pool_a.sync(VAULT, web3_provider=self.w3)
+ pool_a.sync(web3_provider=self.w3)
# case: borrow_amount <= assets_available, deposit_amount < assets_available
- pool_a._totalAssets = int(100e23)
+ pool_a._total_supplied_assets = int(100e23)
pool_a._totalBorrow = int(10e23)
- pool_a._curr_deposit_amount = int(5e23)
+ pool_a._user_deposits = int(5e23)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -356,7 +227,7 @@ def test_check_allocations_sturdy(self) -> None:
# case: borrow_amount > assets_available, deposit_amount >= assets_available
pool_a._totalBorrow = int(97e23)
- pool_a._curr_deposit_amount = int(5e23)
+ pool_a._user_deposits = int(5e23)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations)
@@ -364,7 +235,7 @@ def test_check_allocations_sturdy(self) -> None:
# should return True
pool_a._totalBorrow = int(97e23)
- pool_a._curr_deposit_amount = int(5e23)
+ pool_a._user_deposits = int(5e23)
allocations[A] = int(4e23)
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -372,7 +243,7 @@ def test_check_allocations_sturdy(self) -> None:
# case: borrow_amount > assets_available, deposit_amount < assets_available
pool_a._totalBorrow = int(10e23)
- pool_a._curr_deposit_amount = int(1e23)
+ pool_a._user_deposits = int(1e23)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -398,21 +269,21 @@ def test_check_allocations_aave(self) -> None:
assets_and_pools = {
"total_assets": int(200e18),
"pools": {
- A: AaveV3DefaultInterestRatePool(
+ A: AaveV3DefaultInterestRateV2Pool(
user_address=ADDRESS_ZERO,
contract_address=A,
),
},
}
- pool_a: AaveV3DefaultInterestRatePool = assets_and_pools["pools"][A]
- pool_a.sync(ADDRESS_ZERO, self.w3)
+ pool_a: AaveV3DefaultInterestRateV2Pool = assets_and_pools["pools"][A]
+ pool_a.sync(self.w3)
# case: borrow_amount <= assets_available, deposit_amount < assets_available
- pool_a._total_supplied = int(100e6)
+ pool_a._total_supplied_assets = int(100e6)
pool_a._nextTotalStableDebt = 0
pool_a._totalVariableDebt = int(10e6)
- pool_a._collateral_amount = int(5e18)
+ pool_a._user_deposits = int(5e18)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -421,7 +292,7 @@ def test_check_allocations_aave(self) -> None:
# case: borrow_amount > assets_available, deposit_amount >= assets_available
pool_a._nextTotalStableDebt = 0
pool_a._totalVariableDebt = int(97e6)
- pool_a._collateral_amount = int(5e18)
+ pool_a._user_deposits = int(5e18)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -430,7 +301,7 @@ def test_check_allocations_aave(self) -> None:
# should return True
pool_a._nextTotalStableDebt = 0
pool_a._totalVariableDebt = int(97e6)
- pool_a._collateral_amount = int(5e18)
+ pool_a._user_deposits = int(5e18)
allocations[A] = int(4e18)
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -439,7 +310,7 @@ def test_check_allocations_aave(self) -> None:
# case: borrow_amount > assets_available, deposit_amount < assets_available
pool_a._nextTotalStableDebt = 0
pool_a._totalVariableDebt = int(97e6)
- pool_a._collateral_amount = int(1e18)
+ pool_a._user_deposits = int(1e18)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -463,9 +334,9 @@ def test_check_allocations_compound(self) -> None:
pool_a.sync(self.w3)
# case: borrow_amount <= assets_available, deposit_amount < assets_available
- pool_a._total_supply = int(100e14)
+ pool_a._total_supplied_assets = int(100e14)
pool_a._total_borrow = int(10e14)
- pool_a._deposit_amount = int(5e14)
+ pool_a._user_deposits = int(5e14)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -473,7 +344,7 @@ def test_check_allocations_compound(self) -> None:
# case: borrow_amount > assets_available, deposit_amount >= assets_available
pool_a._total_borrow = int(97e14)
- pool_a._deposit_amount = int(5e14)
+ pool_a._user_deposits = int(5e14)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -481,7 +352,7 @@ def test_check_allocations_compound(self) -> None:
# should return True
pool_a._total_borrow = int(97e14)
- pool_a._deposit_amount = int(5e14)
+ pool_a._user_deposits = int(5e14)
allocations[A] = int(4e26)
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -489,7 +360,7 @@ def test_check_allocations_compound(self) -> None:
# case: borrow_amount > assets_available, deposit_amount < assets_available
pool_a._total_borrow = int(97e14)
- pool_a._deposit_amount = int(1e14)
+ pool_a._user_deposits = int(1e14)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -525,9 +396,9 @@ def test_check_allocations_morpho(self) -> None:
pool_a.sync(self.w3)
# case: borrow_amount <= assets_available, deposit_amount < assets_available
- pool_a._total_assets = int(100e14)
+ pool_a._total_supplied_assets = int(100e14)
pool_a._curr_borrows = int(10e14)
- pool_a._user_assets = int(5e14)
+ pool_a._user_deposits = int(5e14)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -535,7 +406,7 @@ def test_check_allocations_morpho(self) -> None:
# case: borrow_amount > assets_available, deposit_amount >= assets_available
pool_a._curr_borrows = int(97e14)
- pool_a._user_assets = int(5e14)
+ pool_a._user_deposits = int(5e14)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -543,7 +414,7 @@ def test_check_allocations_morpho(self) -> None:
# should return True
pool_a._curr_borrows = int(97e14)
- pool_a._user_assets = int(5e14)
+ pool_a._user_deposits = int(5e14)
allocations[A] = int(4e14)
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -551,7 +422,7 @@ def test_check_allocations_morpho(self) -> None:
# case: borrow_amount > assets_available, deposit_amount < assets_available
pool_a._curr_borrows = int(97e14)
- pool_a._user_assets = int(1e14)
+ pool_a._user_deposits = int(1e14)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -576,7 +447,7 @@ def test_check_allocations_yearn(self) -> None:
# case: max withdraw = deposit amount
pool_a._max_withdraw = int(1e9)
- pool_a._curr_deposit = int(1e9)
+ pool_a._user_deposits = int(1e9)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -584,7 +455,7 @@ def test_check_allocations_yearn(self) -> None:
# case: max withdraw = 0
pool_a._max_withdraw = 0
- pool_a._curr_deposit = int(1e9)
+ pool_a._user_deposits = int(1e9)
allocations[A] = 1
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -592,7 +463,7 @@ def test_check_allocations_yearn(self) -> None:
# should return True
pool_a._max_withdraw = int(1e9)
- pool_a._curr_deposit = int(5e9)
+ pool_a._user_deposits = int(5e9)
allocations[A] = int(4e9)
result = check_allocations(assets_and_pools, allocations, alloc_threshold=0)
@@ -636,48 +507,136 @@ def test_format_allocations_empty(self) -> None:
self.assertEqual(result, expected_output)
- def test_get_similarity_matrix(self) -> None:
+ def test_get_allocation_similarity_matrix(self) -> None:
apys_and_allocations = {
"miner_1": {
"apy": int(0.05e18),
- "allocations": {"pool_1": 30, "pool_2": 20},
+ "allocations": {"pool_1": 30e18, "pool_2": 20e18},
},
"miner_2": {
"apy": int(0.04e18),
- "allocations": {"pool_1": 40, "pool_2": 10},
+ "allocations": {"pool_1": 40e18, "pool_2": 10e18},
},
"miner_3": {
"apy": int(0.06e18),
- "allocations": {"pool_1": 30, "pool_2": 20},
+ "allocations": {"pool_1": 30e18, "pool_2": 20e18},
},
}
assets_and_pools = {
"pools": {
- "pool_1": {"reserve_size": 100},
- "pool_2": {"reserve_size": 100},
+ "pool_1": {"reserve_size": 100e18},
+ "pool_2": {"reserve_size": 100e18},
},
- "total_assets": 100,
+ "total_assets": 10e18,
}
total_assets = assets_and_pools["total_assets"]
- normalization_factor = np.sqrt(float(2 * total_assets**2)) # √(2 * total_assets^2)
+
+ expected_similarity_matrix = {
+ "miner_2": {
+ "miner_1": get_distance(
+ np.array([gmpy2.mpz(40e18), gmpy2.mpz(10e18)], dtype=object),
+ np.array([gmpy2.mpz(30e18), gmpy2.mpz(20e18)], dtype=object),
+ total_assets,
+ ),
+ "miner_3": get_distance(
+ np.array([gmpy2.mpz(40e18), gmpy2.mpz(10e18)], dtype=object),
+ np.array([gmpy2.mpz(30e18), gmpy2.mpz(20e18)], dtype=object),
+ total_assets,
+ ),
+ },
+ "miner_1": {
+ "miner_2": get_distance(
+ np.array([gmpy2.mpz(30e18), gmpy2.mpz(20e18)], dtype=object),
+ np.array([gmpy2.mpz(40e18), gmpy2.mpz(10e18)], dtype=object),
+ total_assets,
+ ),
+ "miner_3": get_distance(
+ np.array([gmpy2.mpz(30e18), gmpy2.mpz(20e18)], dtype=object),
+ np.array([gmpy2.mpz(30e18), gmpy2.mpz(20e18)], dtype=object),
+ total_assets,
+ ),
+ },
+ "miner_3": {
+ "miner_1": get_distance(
+ np.array([gmpy2.mpz(30e18), gmpy2.mpz(20e18)], dtype=object),
+ np.array([gmpy2.mpz(30e18), gmpy2.mpz(20e18)], dtype=object),
+ total_assets,
+ ),
+ "miner_2": get_distance(
+ np.array([gmpy2.mpz(30e18), gmpy2.mpz(20e18)], dtype=object),
+ np.array([gmpy2.mpz(40e18), gmpy2.mpz(10e18)], dtype=object),
+ total_assets,
+ ),
+ },
+ }
+
+ result = get_allocation_similarity_matrix(apys_and_allocations, assets_and_pools)
+
+ for miner_a in expected_similarity_matrix:
+ for miner_b in expected_similarity_matrix[miner_a]:
+ self.assertAlmostEqual(
+ result[miner_a][miner_b],
+ expected_similarity_matrix[miner_a][miner_b],
+ places=5,
+ )
+
+ def test_get_apy_similarity_matrix(self) -> None:
+ apys_and_allocations = {
+ "miner_1": {
+ "apy": int(0.05e18),
+ "allocations": {"pool_1": 30e18, "pool_2": 20e18},
+ },
+ "miner_2": {
+ "apy": int(0.04e18),
+ "allocations": {"pool_1": 40e18, "pool_2": 10e18},
+ },
+ "miner_3": {
+ "apy": int(0.06e18),
+ "allocations": {"pool_1": 30e18, "pool_2": 20e18},
+ },
+ }
expected_similarity_matrix = {
"miner_1": {
- "miner_2": np.linalg.norm(np.array([30, 20]) - np.array([40, 10])) / normalization_factor,
- "miner_3": np.linalg.norm(np.array([30, 20]) - np.array([30, 20])) / normalization_factor,
+ "miner_2": get_distance(
+ np.array([gmpy2.mpz(0.05e18)], dtype=object),
+ np.array([gmpy2.mpz(0.04e18)], dtype=object),
+ gmpy2.mpz(0.05e18),
+ ),
+ "miner_3": get_distance(
+ np.array([gmpy2.mpz(0.05e18)], dtype=object),
+ np.array([gmpy2.mpz(0.06e18)], dtype=object),
+ gmpy2.mpz(0.06e18),
+ ),
},
"miner_2": {
- "miner_1": np.linalg.norm(np.array([40, 10]) - np.array([30, 20])) / normalization_factor,
- "miner_3": np.linalg.norm(np.array([40, 10]) - np.array([30, 20])) / normalization_factor,
+ "miner_1": get_distance(
+ np.array([gmpy2.mpz(0.04e18)], dtype=object),
+ np.array([gmpy2.mpz(0.05e18)], dtype=object),
+ gmpy2.mpz(0.05e18),
+ ),
+ "miner_3": get_distance(
+ np.array([gmpy2.mpz(0.04e18)], dtype=object),
+ np.array([gmpy2.mpz(0.06e18)], dtype=object),
+ gmpy2.mpz(0.06e18),
+ ),
},
"miner_3": {
- "miner_1": np.linalg.norm(np.array([30, 20]) - np.array([30, 20])) / normalization_factor,
- "miner_2": np.linalg.norm(np.array([30, 20]) - np.array([40, 10])) / normalization_factor,
+ "miner_1": get_distance(
+ np.array([gmpy2.mpz(0.06e18)], dtype=object),
+ np.array([gmpy2.mpz(0.05e18)], dtype=object),
+ gmpy2.mpz(0.06e18),
+ ),
+ "miner_2": get_distance(
+ np.array([gmpy2.mpz(0.06e18)], dtype=object),
+ np.array([gmpy2.mpz(0.04e18)], dtype=object),
+ gmpy2.mpz(0.06e18),
+ ),
},
}
- result = get_similarity_matrix(apys_and_allocations, assets_and_pools)
+ result = get_apy_similarity_matrix(apys_and_allocations)
for miner_a in expected_similarity_matrix:
for miner_b in expected_similarity_matrix[miner_a]:
@@ -687,7 +646,7 @@ def test_get_similarity_matrix(self) -> None:
places=5,
)
- def test_get_similarity_matrix_empty(self) -> None:
+ def test_get_allocation_similarity_matrix_empty(self) -> None:
apys_and_allocations = {
"miner_1": {
"apy": int(0.05e18),
@@ -708,21 +667,91 @@ def test_get_similarity_matrix_empty(self) -> None:
}
total_assets = assets_and_pools["total_assets"]
- normalization_factor = np.sqrt(float(2 * total_assets**2)) # √(2 * total_assets^2)
expected_similarity_matrix = {
"miner_1": {
- "miner_2": np.linalg.norm(np.array([30, 20]) - np.array([40, 10])) / normalization_factor,
+ "miner_2": get_distance(
+ np.array([gmpy2.mpz(30), gmpy2.mpz(20)], dtype=object),
+ np.array([gmpy2.mpz(40), gmpy2.mpz(10)], dtype=object),
+ total_assets,
+ ),
"miner_3": float("inf"),
},
"miner_2": {
- "miner_1": np.linalg.norm(np.array([40, 10]) - np.array([30, 20])) / normalization_factor,
+ "miner_1": get_distance(
+ np.array([gmpy2.mpz(40), gmpy2.mpz(10)], dtype=object),
+ np.array([gmpy2.mpz(30), gmpy2.mpz(20)], dtype=object),
+ total_assets,
+ ),
"miner_3": float("inf"),
},
"miner_3": {"miner_1": float("inf"), "miner_2": float("inf")},
}
- result = get_similarity_matrix(apys_and_allocations, assets_and_pools)
+ result = get_allocation_similarity_matrix(apys_and_allocations, assets_and_pools)
+
+ for miner_a in expected_similarity_matrix:
+ for miner_b in expected_similarity_matrix[miner_a]:
+ self.assertAlmostEqual(
+ result[miner_a][miner_b],
+ expected_similarity_matrix[miner_a][miner_b],
+ places=5,
+ )
+
+ def test_get_apy_similarity_matrix_empty(self) -> None:
+ apys_and_allocations = {
+ "miner_1": {
+ "apy": int(0.05e18),
+ "allocations": {"pool_1": 30, "pool_2": 20},
+ },
+ "miner_2": {
+ "apy": int(0.04e18),
+ "allocations": {"pool_1": 40, "pool_2": 10},
+ },
+ "miner_3": {"apy": 0, "allocations": None},
+ }
+ assets_and_pools = {
+ "pools": {
+ "pool_1": {"reserve_size": 100},
+ "pool_2": {"reserve_size": 100},
+ },
+ "total_assets": 100,
+ }
+
+ total_assets = assets_and_pools["total_assets"]
+
+ expected_similarity_matrix = {
+ "miner_1": {
+ "miner_2": get_distance(
+ np.array([gmpy2.mpz(0.05e18)], dtype=object),
+ np.array([gmpy2.mpz(0.04e18)], dtype=object),
+ gmpy2.mpz(0.05e18),
+ ),
+ "miner_3": get_distance(
+ np.array([gmpy2.mpz(0.05e18)], dtype=object), np.array([gmpy2.mpz(0)], dtype=object), gmpy2.mpz(0.05e18)
+ ),
+ },
+ "miner_2": {
+ "miner_1": get_distance(
+ np.array([gmpy2.mpz(0.04e18)], dtype=object),
+ np.array([gmpy2.mpz(0.05e18)], dtype=object),
+ gmpy2.mpz(0.05e18),
+ ),
+ "miner_3": get_distance(
+ np.array([gmpy2.mpz(0.04e18)], dtype=object), np.array([gmpy2.mpz(0)], dtype=object), gmpy2.mpz(0.04e18)
+ ),
+ },
+ "miner_3": {
+ "miner_1": get_distance(
+ np.array([gmpy2.mpz(0)], dtype=object), np.array([gmpy2.mpz(0.05e18)], dtype=object), gmpy2.mpz(0.05e18)
+ ),
+ "miner_2": get_distance(
+ np.array([gmpy2.mpz(0)], dtype=object), np.array([gmpy2.mpz(0.04e18)], dtype=object), gmpy2.mpz(0.04e18)
+ ),
+ },
+ }
+
+ result = get_apy_similarity_matrix(apys_and_allocations)
for miner_a in expected_similarity_matrix:
for miner_b in expected_similarity_matrix[miner_a]:
@@ -733,44 +762,112 @@ def test_get_similarity_matrix_empty(self) -> None:
)
def test_calculate_penalties(self) -> None:
- similarity_matrix = {
+ allocation_similarity_matrix = {
+ "1": {"2": 0.05, "3": 0.2},
+ "2": {"1": 0.05, "3": 0.1},
+ "3": {"1": 0.2, "2": 0.1},
+ }
+ apy_similarity_matrix = {
"1": {"2": 0.05, "3": 0.2},
"2": {"1": 0.05, "3": 0.1},
"3": {"1": 0.2, "2": 0.1},
}
axon_times = {"1": 1.0, "2": 2.0, "3": 3.0}
- similarity_threshold = 0.1
+
+ allocation_similarity_threshold = 0.2
+ apy_similarity_threshold = 0.1
expected_penalties = {"1": 0, "2": 1, "3": 1}
- result = calculate_penalties(similarity_matrix, axon_times, similarity_threshold)
+ result = calculate_penalties(
+ allocation_similarity_matrix,
+ apy_similarity_matrix,
+ axon_times,
+ allocation_similarity_threshold,
+ apy_similarity_threshold,
+ )
+
+ self.assertEqual(result, expected_penalties)
+
+ def test_calculate_penalties_no_apy_similarities(self) -> None:
+ allocation_similarity_matrix = {
+ "1": {"2": 0.05, "3": 0.2},
+ "2": {"1": 0.05, "3": 0.1},
+ "3": {"1": 0.2, "2": 0.1},
+ }
+ apy_similarity_matrix = {
+ "1": {"2": 0.05, "3": 0.2},
+ "2": {"1": 0.05, "3": 0.1},
+ "3": {"1": 0.2, "2": 0.1},
+ }
+ axon_times = {"1": 1.0, "2": 2.0, "3": 3.0}
+ allocation_similarity_threshold = 0.2
+ apy_similarity_threshold = 0.05
+
+ expected_penalties = {"1": 0, "2": 1, "3": 0}
+ result = calculate_penalties(
+ allocation_similarity_matrix,
+ apy_similarity_matrix,
+ axon_times,
+ allocation_similarity_threshold,
+ apy_similarity_threshold,
+ )
self.assertEqual(result, expected_penalties)
def test_calculate_penalties_no_similarities(self) -> None:
- similarity_matrix = {
+ allocation_similarity_matrix = {
+ "1": {"2": 0.5, "3": 0.6},
+ "2": {"1": 0.5, "3": 0.7},
+ "3": {"1": 0.6, "2": 0.7},
+ }
+ apy_similarity_matrix = {
"1": {"2": 0.5, "3": 0.6},
"2": {"1": 0.5, "3": 0.7},
"3": {"1": 0.6, "2": 0.7},
}
axon_times = {"1": 1.0, "2": 2.0, "3": 3.0}
- similarity_threshold = 0.1
+
+ allocation_similarity_threshold = 0.3
+ apy_similarity_threshold = 0.1
expected_penalties = {"1": 0, "2": 0, "3": 0}
- result = calculate_penalties(similarity_matrix, axon_times, similarity_threshold)
+ result = calculate_penalties(
+ allocation_similarity_matrix,
+ apy_similarity_matrix,
+ axon_times,
+ allocation_similarity_threshold,
+ apy_similarity_threshold,
+ )
self.assertEqual(result, expected_penalties)
def test_calculate_penalties_equal_times(self) -> None:
- similarity_matrix = {
+ allocation_similarity_matrix = {
"1": {"2": 0.05, "3": 0.05},
"2": {"1": 0.05, "3": 0.05},
"3": {"1": 0.05, "2": 0.05},
}
+ apy_similarity_matrix = {
+ "1": {"2": 0.05, "3": 0.05},
+ "2": {"1": 0.05, "3": 0.05},
+ "3": {"1": 0.05, "2": 0.05},
+ }
+
axon_times = {"1": 1.0, "2": 1.0, "3": 1.0}
- similarity_threshold = 0.1
+
+ allocation_similarity_threshold = 0.1
+
+ apy_similarity_threshold = 0.2
expected_penalties = {"1": 2, "2": 2, "3": 2}
- result = calculate_penalties(similarity_matrix, axon_times, similarity_threshold)
+
+ result = calculate_penalties(
+ allocation_similarity_matrix,
+ apy_similarity_matrix,
+ axon_times,
+ allocation_similarity_threshold,
+ apy_similarity_threshold,
+ )
self.assertEqual(result, expected_penalties)
@@ -797,9 +894,9 @@ def test_calculate_rewards_with_no_penalties(self) -> None:
def test_adjust_rewards_for_plagiarism(self) -> None:
rewards_apy = torch.Tensor([0.05 / 0.05, 0.04 / 0.05, 0.03 / 0.05])
apys_and_allocations = {
- "0": {"apy": 0.05, "allocations": {"asset_1": 200, "asset_2": 300}},
- "1": {"apy": 0.04, "allocations": {"asset_1": 202, "asset_2": 303}},
- "2": {"apy": 0.03, "allocations": {"asset_1": 200, "asset_2": 400}},
+ "0": {"apy": 50, "allocations": {"asset_1": 200, "asset_2": 300}}, # APY: int
+ "1": {"apy": 40, "allocations": {"asset_1": 202, "asset_2": 303}},
+ "2": {"apy": 30, "allocations": {"asset_1": 200, "asset_2": 400}},
}
assets_and_pools = {
"total_assets": 500,
@@ -808,9 +905,21 @@ def test_adjust_rewards_for_plagiarism(self) -> None:
uids = ["0", "1", "2"]
axon_times = {"0": 1.0, "1": 2.0, "2": 3.0}
+ allocation_similarity_threshold = 0.1
+
+ apy_similarity_threshold = 0.2
+
expected_rewards = torch.Tensor([1.0, 0.0, 0.03 / 0.05])
+
result = adjust_rewards_for_plagiarism(
- self.vali, rewards_apy, apys_and_allocations, assets_and_pools, uids, axon_times
+ self.vali,
+ rewards_apy,
+ apys_and_allocations,
+ assets_and_pools,
+ uids,
+ axon_times,
+ allocation_similarity_threshold,
+ apy_similarity_threshold,
)
torch.testing.assert_close(result, expected_rewards, rtol=0, atol=1e-5)
@@ -818,8 +927,8 @@ def test_adjust_rewards_for_plagiarism(self) -> None:
def test_adjust_rewards_for_one_plagiarism(self) -> None:
rewards_apy = torch.Tensor([1.0, 1.0])
apys_and_allocations = {
- "0": {"apy": 0.05, "allocations": {"asset_1": 200, "asset_2": 300}},
- "1": {"apy": 0.05, "allocations": {"asset_1": 200, "asset_2": 300}},
+ "0": {"apy": 50, "allocations": {"asset_1": 200, "asset_2": 300}},
+ "1": {"apy": 50, "allocations": {"asset_1": 200, "asset_2": 300}},
}
assets_and_pools = {
"total_assets": 500,
@@ -829,12 +938,214 @@ def test_adjust_rewards_for_one_plagiarism(self) -> None:
axon_times = {"0": 1.0, "1": 2.0}
expected_rewards = torch.Tensor([1.0, 0.0])
+
+ allocation_similarity_threshold = 0.1
+ apy_similarity_threshold = 0.2
+
result = adjust_rewards_for_plagiarism(
- self.vali, rewards_apy, apys_and_allocations, assets_and_pools, uids, axon_times
+ self.vali,
+ rewards_apy,
+ apys_and_allocations,
+ assets_and_pools,
+ uids,
+ axon_times,
+ allocation_similarity_threshold,
+ apy_similarity_threshold,
)
torch.testing.assert_close(result, expected_rewards, rtol=0, atol=1e-5)
+class TestCalculateApy(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls) -> None:
+ # runs tests on local mainnet fork at block: 20233401
+ cls.w3 = Web3(Web3.HTTPProvider("http://127.0.0.1:8545"))
+ assert cls.w3.is_connected()
+
+ cls.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21080765,
+ },
+ },
+ ],
+ )
+
+ cls.snapshot_id = cls.w3.provider.make_request("evm_snapshot", []) # type: ignore[]
+
+ def tearDown(self) -> None:
+ # Optional: Revert to the original snapshot after each test
+ self.w3.provider.make_request("evm_revert", self.snapshot_id) # type: ignore[]
+
+ def test_calculate_apy_sturdy(self) -> None:
+ self.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21075005,
+ },
+ },
+ ],
+ )
+
+ selected_entry = POOL_REGISTRY["Sturdy Crvusd Aggregator"]
+ selected = assets_pools_for_challenge_data(selected_entry, self.w3)
+
+ assets_and_pools = selected["assets_and_pools"]
+ user_address = selected["user_address"]
+ synapse = AllocateAssets(
+ request_type=REQUEST_TYPES.SYNTHETIC,
+ assets_and_pools=assets_and_pools,
+ user_address=user_address,
+ )
+
+ allocations = naive_algorithm(self, synapse)
+
+ extra_metadata = {}
+ for contract_address, pool in assets_and_pools["pools"].items():
+ pool.sync(self.w3)
+ extra_metadata[contract_address] = pool._share_price
+
+ self.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21080765,
+ },
+ },
+ ],
+ )
+
+ for pool in assets_and_pools["pools"].values():
+ pool.sync(self.w3)
+
+ apy = annualized_yield_pct(allocations, assets_and_pools, 604800, extra_metadata)
+ print(f"annualized yield: {(float(apy)/1e18) * 100}%")
+ self.assertGreater(apy, 0)
+
+ def test_calculate_apy_aave(self) -> None:
+ self.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21075005,
+ },
+ },
+ ],
+ )
+
+ # aave pools - with yearn strategies being their users
+ selected_entry = {
+ "assets_and_pools": {
+ "pools": {
+ "0x018008bfb33d285247A21d44E50697654f754e63": {
+ "pool_type": "AAVE_DEFAULT",
+ "contract_address": "0x018008bfb33d285247A21d44E50697654f754e63",
+ "user_address": "0xF0825750791A4444c5E70743270DcfA8Bb38f959",
+ },
+ "0x4DEDf26112B3Ec8eC46e7E31EA5e123490B05B8B": {
+ "pool_type": "AAVE_TARGET",
+ "contract_address": "0x4DEDf26112B3Ec8eC46e7E31EA5e123490B05B8B",
+ "user_address": "0x1fd862499e9b9402de6c599b6c391f83981180ab",
+ },
+ }
+ }
+ }
+
+ selected = assets_pools_for_challenge_data(selected_entry, self.w3)
+
+ assets_and_pools = selected["assets_and_pools"]
+ synapse = AllocateAssets(
+ request_type=REQUEST_TYPES.SYNTHETIC,
+ assets_and_pools=assets_and_pools,
+ )
+
+ allocations = naive_algorithm(self, synapse)
+
+ extra_metadata = {}
+ for contract_address, pool in assets_and_pools["pools"].items():
+ pool.sync(self.w3)
+ extra_metadata[contract_address] = pool._normalized_income
+
+ self.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21080765,
+ },
+ },
+ ],
+ )
+
+ for pool in assets_and_pools["pools"].values():
+ pool.sync(self.w3)
+
+ apy = annualized_yield_pct(allocations, assets_and_pools, 604800, extra_metadata)
+ print(f"annualized yield: {(float(apy)/1e18) * 100}%")
+ self.assertGreater(apy, 0)
+
+ def test_calculate_apy_morpho(self) -> None:
+ self.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21075005,
+ },
+ },
+ ],
+ )
+
+ selected_entry = POOL_REGISTRY["Morpho USDC Vaults"]
+ selected = assets_pools_for_challenge_data(selected_entry, self.w3)
+
+ assets_and_pools = selected["assets_and_pools"]
+ user_address = selected["user_address"]
+ synapse = AllocateAssets(
+ request_type=REQUEST_TYPES.SYNTHETIC,
+ assets_and_pools=assets_and_pools,
+ user_address=user_address,
+ )
+
+ allocations = naive_algorithm(self, synapse)
+
+ extra_metadata = {}
+ for contract_address, pool in assets_and_pools["pools"].items():
+ pool.sync(self.w3)
+ extra_metadata[contract_address] = pool._share_price
+
+ self.w3.provider.make_request(
+ "hardhat_reset", # type: ignore[]
+ [
+ {
+ "forking": {
+ "jsonRpcUrl": WEB3_PROVIDER_URL,
+ "blockNumber": 21080765,
+ },
+ },
+ ],
+ )
+
+ for pool in assets_and_pools["pools"].values():
+ pool.sync(self.w3)
+
+ apy = annualized_yield_pct(allocations, assets_and_pools, 604800, extra_metadata)
+ print(f"annualized yield: {(float(apy)/1e18) * 100}%")
+ self.assertGreater(apy, 0)
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/tests/unit/validator/test_simulator.py b/tests/unit/validator/test_simulator.py
deleted file mode 100644
index b8dec87..0000000
--- a/tests/unit/validator/test_simulator.py
+++ /dev/null
@@ -1,246 +0,0 @@
-import unittest
-from sturdy.utils.ethmath import wei_div
-from sturdy.validator.simulator import Simulator
-from sturdy.constants import *
-from sturdy.utils.misc import borrow_rate
-import numpy as np
-import copy
-
-
-def chk_eq_state(init_state, new_state):
- return (
- init_state[0] == new_state[0] # Compare the type of PRNG
- and np.array_equal(init_state[1], new_state[1]) # Compare the state of the PRNG
- and init_state[2] == new_state[2] # Compare the position in the PRNG's state
- and init_state[3] == new_state[3] # Compare the position in the PRNG's buffer
- and init_state[4] == new_state[4] # Compare the state of the PRNG's buffer
- )
-
-
-class TestSimulator(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- cls.simulator = Simulator(reversion_speed=0.05)
-
- def test_init_data(self):
- self.simulator.rng_state_container = np.random.RandomState(69)
- self.simulator.init_rng = np.random.RandomState(69)
- self.simulator.init_data()
- self.assertIsNotNone(self.simulator.assets_and_pools)
- self.assertIsNotNone(self.simulator.allocations)
- self.assertIsNotNone(self.simulator.pool_history)
- self.assertEqual(len(self.simulator.pool_history), 1)
-
- initial_pool_data = self.simulator.pool_history[0]
- self.assertEqual(len(initial_pool_data), NUM_POOLS)
-
- for pool in initial_pool_data.values():
- self.assertTrue(hasattr(pool, "borrow_amount"))
- self.assertTrue(hasattr(pool, "reserve_size"))
- self.assertTrue(hasattr(pool, "borrow_rate"))
- self.assertGreaterEqual(pool.borrow_amount, 0)
- self.assertGreaterEqual(pool.reserve_size, pool.borrow_amount)
- self.assertGreaterEqual(pool.borrow_rate, 0)
-
- self.simulator = Simulator(
- reversion_speed=0.05,
- )
-
- # should raise error
- self.assertRaises(RuntimeError, self.simulator.init_data)
-
- def test_update_reserves_with_allocs(self):
- self.simulator.rng_state_container = np.random.RandomState(69)
- self.simulator.init_rng = np.random.RandomState(69)
- self.simulator.init_data()
-
- init_pools = copy.deepcopy(self.simulator.assets_and_pools["pools"])
-
- contract_addresses = [addr for addr in self.simulator.assets_and_pools["pools"]]
-
- allocations = {
- contract_addresses[i]: self.simulator.assets_and_pools["total_assets"]
- / len(init_pools)
- for i in range(len(init_pools))
- }
-
- self.simulator.update_reserves_with_allocs(allocations)
-
- for uid, init_pool in init_pools.items():
- # check pools
- new_pool = self.simulator.assets_and_pools["pools"][uid]
- reserve_should_be = allocations[uid] + init_pool.reserve_size
- self.assertEqual(reserve_should_be, new_pool.reserve_size)
-
- # check init pool_history datapoint
- new_pool_hist_init = self.simulator.pool_history[0][uid]
- b_rate_should_be = borrow_rate(
- wei_div(
- new_pool_hist_init.borrow_amount, new_pool_hist_init.reserve_size
- ),
- new_pool,
- )
- self.assertEqual(reserve_should_be, new_pool_hist_init.reserve_size)
- self.assertEqual(b_rate_should_be, new_pool_hist_init.borrow_rate)
-
- # we shouldn't need to list out all the pools we are allocating to
- # the ones that are not lists will not be allocated to at all
- def test_update_reserves_with_allocs_partial(self):
- self.simulator.rng_state_container = np.random.RandomState(69)
- self.simulator.init_rng = np.random.RandomState(69)
- self.simulator.init_data()
-
- init_pools = copy.deepcopy(self.simulator.assets_and_pools["pools"])
- total_assets = self.simulator.assets_and_pools["total_assets"]
-
- contract_addresses = [addr for addr in self.simulator.assets_and_pools["pools"]]
-
- allocs = {
- contract_addresses[0]: total_assets / 10
- } # should be 0.1 if total assets is 1
-
- self.simulator.update_reserves_with_allocs(allocs)
-
- for uid, alloc in allocs.items():
- # for uid, init_pool in init_pools.items():
- # check pools
- init_pool = init_pools[uid]
- new_pool = self.simulator.assets_and_pools["pools"][uid]
- reserve_should_be = alloc + init_pool.reserve_size
- self.assertEqual(reserve_should_be, new_pool.reserve_size)
-
- # check init pool_history datapoint
- new_pool_hist_init = self.simulator.pool_history[0][uid]
- b_rate_should_be = borrow_rate(
- wei_div(
- new_pool_hist_init.borrow_amount, new_pool_hist_init.reserve_size
- ),
- new_pool,
- )
- self.assertEqual(reserve_should_be, new_pool_hist_init.reserve_size)
- self.assertEqual(b_rate_should_be, new_pool_hist_init.borrow_rate)
-
- def test_initialization(self):
- self.simulator.initialize(timesteps=50)
- self.assertIsNotNone(self.simulator.init_rng)
- self.assertIsNotNone(self.simulator.rng_state_container)
- init_state_container = copy.deepcopy(self.simulator.init_rng)
- init_state = init_state_container.get_state()
- rng_state = self.simulator.rng_state_container.get_state()
- states_equal = chk_eq_state(init_state, rng_state)
- self.assertTrue(states_equal)
-
- # should reinit with fresh rng state
- self.simulator.initialize(timesteps=50)
- new_state_container = copy.deepcopy(self.simulator.rng_state_container)
-
- new_state = new_state_container.get_state()
- are_states_equal = chk_eq_state(init_state, new_state)
-
- self.assertFalse(are_states_equal)
-
- def test_reset(self):
- self.simulator.initialize(timesteps=50)
- self.simulator.init_data()
- init_state_container = copy.deepcopy(self.simulator.init_rng)
- init_state = init_state_container.get_state()
- init_assets_pools = copy.deepcopy(self.simulator.assets_and_pools)
- init_allocs = copy.deepcopy(self.simulator.allocations)
- # use the rng
- for i in range(10):
- self.simulator.rng_state_container.rand()
-
- after_container = self.simulator.rng_state_container
- new_state = after_container.get_state()
-
- are_states_equal = chk_eq_state(init_state, new_state)
- self.assertFalse(are_states_equal)
-
- self.simulator.reset()
-
- new_init_state_container = self.simulator.init_rng
- new_state_container = self.simulator.rng_state_container
- new_init_state = new_init_state_container.get_state()
- new_state = new_state_container.get_state()
-
- are_states_equal = chk_eq_state(init_state, new_init_state)
- self.assertTrue(are_states_equal)
-
- are_states_equal = chk_eq_state(new_state, new_init_state)
- self.assertTrue(are_states_equal)
-
- new_assets_pools = copy.deepcopy(self.simulator.assets_and_pools)
- new_allocs = copy.deepcopy(self.simulator.allocations)
-
- self.assertEqual(init_allocs, new_allocs)
- self.assertEqual(init_assets_pools, new_assets_pools)
-
- self.simulator = Simulator(
- reversion_speed=0.05,
- )
-
- # should raise error
- self.assertRaises(RuntimeError, self.simulator.reset)
-
- def test_sim_run(self):
- self.simulator.initialize(timesteps=50)
- self.simulator.init_data()
- self.simulator.run()
-
- self.assertEqual(len(self.simulator.pool_history), self.simulator.timesteps)
-
- # test to see if we're recording the right things
-
- for t in range(1, self.simulator.timesteps):
- pool_data = self.simulator.pool_history[t]
- self.assertEqual(len(pool_data), NUM_POOLS)
-
- for contract_addr, pool in pool_data.items():
- self.assertTrue(hasattr(pool, "borrow_amount"))
- self.assertTrue(hasattr(pool, "reserve_size"))
- self.assertTrue(hasattr(pool, "borrow_rate"))
- self.assertGreaterEqual(pool.borrow_amount, 0)
- self.assertGreaterEqual(pool.reserve_size, pool.borrow_amount)
- self.assertGreaterEqual(pool.borrow_rate, 0)
-
- for contract_addr, _ in self.simulator.assets_and_pools["pools"].items():
- borrow_amounts = [
- self.simulator.pool_history[T][contract_addr].borrow_amount
- for T in range(1, self.simulator.timesteps)
- ]
- borrow_rates = [
- self.simulator.pool_history[T][contract_addr].borrow_rate
- for T in range(1, self.simulator.timesteps)
- ]
-
- self.assertTrue(
- borrow_amounts.count(borrow_amounts[0]) < len(borrow_amounts)
- )
- self.assertTrue(borrow_rates.count(borrow_rates[0]) < len(borrow_rates))
-
- # check if simulation runs the same across "reset()s"
- # first run
- self.simulator.initialize(timesteps=50)
- self.simulator.init_data()
- self.simulator.run()
- pool_history0 = copy.deepcopy(self.simulator.pool_history)
- # second run - after reset
- self.simulator.reset()
- self.simulator.init_data()
- self.simulator.run()
- pool_history1 = self.simulator.pool_history
- self.assertEqual(pool_history0, pool_history1)
-
- self.simulator = Simulator(
- reversion_speed=0.05,
- )
-
- # should raise error
- self.assertRaises(RuntimeError, self.simulator.run)
-
- # pp.pprint(f"assets and pools: \n {self.validator.assets_and_pools}")
- # pp.pprint(f"pool history: \n {self.validator.pool_history}")
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/unit/validator/test_sql.py b/tests/unit/validator/test_sql.py
new file mode 100644
index 0000000..6740010
--- /dev/null
+++ b/tests/unit/validator/test_sql.py
@@ -0,0 +1,348 @@
+import json
+import sqlite3
+import unittest
+from datetime import datetime, timedelta
+from pathlib import Path
+
+from sturdy.protocol import REQUEST_TYPES
+from sturdy.validator.sql import (
+ add_api_key,
+ delete_api_key,
+ get_all_api_keys,
+ get_all_logs_for_key,
+ get_api_key_info,
+ get_db_connection,
+ get_miner_responses,
+ get_request_info,
+ log_allocations,
+ log_request,
+ update_api_key_balance,
+ update_api_key_name,
+ update_api_key_rate_limit,
+)
+
+TEST_DB = "test.db"
+
+
+# TODO: place this in a seperate file?
+def create_tables(conn: sqlite3.Connection) -> None:
+ query = """CREATE TABLE api_keys (
+ key TEXT PRIMARY KEY,
+ name TEXT,
+ balance REAL,
+ rate_limit_per_minute INTEGER DEFAULT 60,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ );
+
+
+ CREATE TABLE logs (
+ key TEXT,
+ endpoint TEXT,
+ cost REAL,
+ balance REAL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY(key) REFERENCES api_keys(key) ON DELETE CASCADE
+ );
+
+ CREATE TABLE IF NOT EXISTS allocation_requests (
+ request_uid TEXT PRIMARY KEY,
+ assets_and_pools TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ );
+
+ CREATE TABLE active_allocs (
+ request_uid TEXT PRIMARY KEY,
+ scoring_period_end TIMESTAMP,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (request_uid) REFERENCES allocation_requests (request_uid)
+ );
+
+ CREATE TABLE IF NOT EXISTS allocations (
+ request_uid TEXT,
+ miner_uid TEXT,
+ allocation TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY (request_uid, miner_uid),
+ FOREIGN KEY (request_uid) REFERENCES allocation_requests (request_uid)
+ );
+
+ -- This alter statement adds a new column to the allocations table if it exists
+ ALTER TABLE allocation_requests
+ ADD COLUMN request_type TEXT NOT NULL DEFAULT 1;
+ ALTER TABLE allocation_requests
+ ADD COLUMN metadata TEXT;
+ ALTER TABLE allocations
+ ADD COLUMN axon_time FLOAT NOT NULL DEFAULT 99999.0; -- large number for now"""
+
+ conn.executescript(query)
+
+
+class TestSQLFunctions(unittest.TestCase):
+ def setUp(self) -> None:
+ # purge sql db
+ path = Path(TEST_DB)
+ if path.exists():
+ path.unlink()
+ # Create an in-memory SQLite database
+ with get_db_connection(TEST_DB) as conn:
+ create_tables(conn)
+
+ def tearDown(self) -> None:
+ # purge sql db
+ path = Path(TEST_DB)
+ if path.exists():
+ path.unlink()
+
+ def test_add_and_get_api_key(self) -> None:
+ with get_db_connection(TEST_DB) as conn:
+ # Add an API key
+ add_api_key(conn, "test_key", 100.0, 60, "Test Key")
+ # Retrieve the API key information
+ info = get_api_key_info(conn, "test_key")
+ self.assertIsNotNone(info)
+ self.assertEqual(info["key"], "test_key")
+ self.assertEqual(info["balance"], 100.0)
+ self.assertEqual(info["rate_limit_per_minute"], 60)
+ self.assertEqual(info["name"], "Test Key")
+
+ def test_update_api_key_balance(self) -> None:
+ with get_db_connection(TEST_DB) as conn:
+ # Add an API key
+ add_api_key(conn, "test_key", 100.0, 60, "Test Key")
+ # Update the balance
+ update_api_key_balance(conn, "test_key", 200.0)
+ # Retrieve the updated information
+ info = get_api_key_info(conn, "test_key")
+ self.assertEqual(info["balance"], 200.0)
+
+ def test_update_api_key_rate_limit(self) -> None:
+ with get_db_connection(TEST_DB) as conn:
+ # Add an API key
+ add_api_key(conn, "test_key", 100.0, 60, "Test Key")
+ # Update the rate limit
+ update_api_key_rate_limit(conn, "test_key", 120)
+ # Retrieve the updated information
+ info = get_api_key_info(conn, "test_key")
+ self.assertEqual(info["rate_limit_per_minute"], 120)
+
+ def test_update_api_key_name(self) -> None:
+ with get_db_connection(TEST_DB) as conn:
+ # Add an API key
+ add_api_key(conn, "test_key", 100.0, 60, "Test Key")
+ # Update the name
+ update_api_key_name(conn, "test_key", "Updated Test Key")
+ # Retrieve the updated information
+ info = get_api_key_info(conn, "test_key")
+ self.assertEqual(info["name"], "Updated Test Key")
+
+ def test_delete_api_key(self) -> None:
+ with get_db_connection(TEST_DB) as conn:
+ # Add an API key
+ add_api_key(conn, "test_key", 100.0, 60, "Test Key")
+ # Delete the API key
+ delete_api_key(conn, "test_key")
+ # Attempt to retrieve the deleted key
+ info = get_api_key_info(conn, "test_key")
+ self.assertIsNone(info)
+
+ def test_get_all_api_keys(self) -> None:
+ with get_db_connection(TEST_DB) as conn:
+ # Add multiple API keys
+ add_api_key(conn, "key1", 100.0, 60, "Key 1")
+ add_api_key(conn, "key2", 200.0, 120, "Key 2")
+ # Retrieve all API keys
+ keys = get_all_api_keys(conn)
+ self.assertEqual(len(keys), 2)
+ self.assertEqual(keys[0]["key"], "key1")
+ self.assertEqual(keys[1]["key"], "key2")
+
+ def test_log_request_and_get_logs(self) -> None:
+ with get_db_connection(TEST_DB) as conn:
+ # Add an API key
+ add_api_key(conn, "test_key", 100.0, 60, "Test Key")
+ # Log a request
+ api_key_info = get_api_key_info(conn, "test_key")
+ log_request(conn, api_key_info, "/test_endpoint", 1.0)
+ # Retrieve logs for the API key
+ logs = get_all_logs_for_key(conn, "test_key")
+ self.assertEqual(len(logs), 1)
+ self.assertEqual(logs[0]["key"], "test_key")
+ self.assertEqual(logs[0]["endpoint"], "/test_endpoint")
+ self.assertEqual(logs[0]["cost"], 1.0)
+
+ def test_get_db_connection(self) -> None:
+ # Test the get_db_connection function
+ with get_db_connection(TEST_DB) as conn:
+ self.assertIsNotNone(conn)
+ # Ensure tables are created
+ cursor = conn.cursor()
+ cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
+ tables = cursor.fetchall()
+ self.assertGreater(len(tables), 0)
+
+ def test_log_allocations(self) -> None:
+ with get_db_connection(TEST_DB) as conn:
+ request_uid = "sturdyrox"
+
+ selected_entry = {
+ "user_address": "0xcFB23D05f32eA0BE0dBb5078d189Cca89688945E",
+ "assets_and_pools": {
+ "total_assets": 69420,
+ "pools": {
+ "0x0669091F451142b3228171aE6aD794cF98288124": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0x0669091F451142b3228171aE6aD794cF98288124",
+ },
+ "0xFa68707be4b58FB9F10748E30e25A15113EdEE1D": {
+ "pool_type": "STURDY_SILO",
+ "contract_address": "0xFa68707be4b58FB9F10748E30e25A15113EdEE1D",
+ },
+ },
+ },
+ }
+
+ allocations = {
+ "0": {
+ "0x0669091F451142b3228171aE6aD794cF98288124": 3,
+ "0xFa68707be4b58FB9F10748E30e25A15113EdEE1D": 7,
+ },
+ "1": {
+ "0x0669091F451142b3228171aE6aD794cF98288124": 2,
+ "0xFa68707be4b58FB9F10748E30e25A15113EdEE1D": 8,
+ },
+ "2": {
+ "0x0669091F451142b3228171aE6aD794cF98288124": 6,
+ "0xFa68707be4b58FB9F10748E30e25A15113EdEE1D": 4,
+ },
+ }
+
+ assets_and_pools = selected_entry["assets_and_pools"]
+
+ log_allocations(
+ conn,
+ request_uid,
+ assets_and_pools,
+ extra_metadata={"yo": "wassup"},
+ allocations=allocations,
+ axon_times={"0": 6.9, "1": 4.2, "2": 1.0},
+ request_type=REQUEST_TYPES.SYNTHETIC,
+ scoring_period=69,
+ )
+
+ # Validate `allocation_requests` table
+ cur = conn.execute("SELECT * FROM allocation_requests WHERE request_uid = ?", (request_uid,))
+ allocation_request = dict(cur.fetchone())
+ self.assertIsNotNone(allocation_request)
+ self.assertEqual(allocation_request["request_uid"], request_uid)
+ self.assertEqual(allocation_request["metadata"], '{"yo":"wassup"}')
+ self.assertEqual(allocation_request["request_type"], str(int(REQUEST_TYPES.SYNTHETIC)))
+
+ # Validate `active_allocs` table
+ cur = conn.execute("SELECT * FROM active_allocs WHERE request_uid = ?", (request_uid,))
+ active_alloc = cur.fetchone()
+ self.assertIsNotNone(active_alloc)
+ self.assertEqual(active_alloc["request_uid"], request_uid)
+
+ # Validate `allocations` table
+ cur = conn.execute("SELECT * FROM allocations WHERE request_uid = ?", (request_uid,))
+ allocation_rows = cur.fetchall()
+ self.assertEqual(len(allocation_rows), len(allocations))
+ for miner_uid, miner_allocation in allocations.items():
+ for pool_id, allocation_value in miner_allocation.items():
+ row = next(
+ (
+ r
+ for r in allocation_rows
+ if r["miner_uid"] == miner_uid and json.loads(r["allocation"]).get(pool_id) == allocation_value
+ ),
+ None,
+ )
+ self.assertIsNotNone(row)
+ self.assertEqual(row["request_uid"], request_uid)
+ self.assertIn(pool_id, row["allocation"])
+ self.assertEqual(json.loads(row["allocation"])[pool_id], allocation_value)
+
+
+class TestMinerResponseRequestInfo(unittest.TestCase):
+ def setUp(self) -> None:
+ # Initialize an in-memory SQLite database
+ self.conn = sqlite3.connect(":memory:")
+ self.conn.row_factory = sqlite3.Row
+ create_tables(self.conn)
+
+ # Seed test data for allocations
+ self.request_uid = "test_request_1"
+ self.miner_uid = "miner_1"
+ created_at = datetime.utcnow()
+ self.conn.execute(
+ "INSERT INTO allocations (request_uid, miner_uid, allocation, created_at, axon_time) VALUES (?, ?, json(?), ?, ?)",
+ (self.request_uid, self.miner_uid, '{"pool_1": 100}', created_at, 1.2),
+ )
+ self.conn.execute(
+ "INSERT INTO allocations (request_uid, miner_uid, allocation, created_at, axon_time) VALUES (?, ?, json(?), ?, ?)",
+ (self.request_uid, "miner_2", '{"pool_2": 200}', created_at + timedelta(minutes=1), 2.3),
+ )
+
+ # Seed test data for allocation requests
+ self.conn.execute(
+ "INSERT INTO allocation_requests (request_uid, assets_and_pools, created_at, request_type, metadata) VALUES (?, json(?), ?, ?, json(?))",
+ (
+ self.request_uid,
+ '{"asset": {"pool": "data"}}',
+ created_at,
+ "TEST",
+ '{"meta": "data"}',
+ ),
+ )
+ self.conn.commit()
+
+ def tearDown(self) -> None:
+ self.conn.close()
+
+ def test_get_miner_responses_with_request_uid(self) -> None:
+ responses = get_miner_responses(self.conn, request_uid=self.request_uid)
+ self.assertEqual(len(responses), 2)
+ self.assertEqual(responses[0]["miner_uid"], "miner_1")
+ self.assertEqual(responses[0]["request_uid"], self.request_uid)
+ self.assertEqual(responses[1]["miner_uid"], "miner_2")
+
+ def test_get_miner_responses_with_miner_uid(self) -> None:
+ responses = get_miner_responses(self.conn, miner_uid="miner_2")
+ self.assertEqual(len(responses), 1)
+ self.assertEqual(responses[0]["miner_uid"], "miner_2")
+ self.assertEqual(json.loads(responses[0]["allocation"])["pool_2"], 200)
+
+ def test_get_miner_responses_with_time_range(self) -> None:
+ now = datetime.utcnow()
+ from_ts = int((now - timedelta(minutes=5)).timestamp() * 1000)
+ to_ts = int((now + timedelta(minutes=5)).timestamp() * 1000)
+
+ responses = get_miner_responses(self.conn, from_ts=from_ts, to_ts=to_ts)
+ self.assertEqual(len(responses), 2)
+
+ def test_get_request_info_with_request_uid(self) -> None:
+ info = get_request_info(self.conn, request_uid=self.request_uid)
+ self.assertEqual(len(info), 1)
+ self.assertEqual(info[0]["request_uid"], self.request_uid)
+ self.assertEqual(info[0]["request_type"], "TEST")
+ self.assertEqual(json.loads(info[0]["metadata"])["meta"], "data")
+
+ def test_get_request_info_with_time_range(self) -> None:
+ now = datetime.utcnow()
+ from_ts = int((now - timedelta(minutes=5)).timestamp() * 1000)
+ to_ts = int((now + timedelta(minutes=5)).timestamp() * 1000)
+
+ info = get_request_info(self.conn, from_ts=from_ts, to_ts=to_ts)
+ self.assertEqual(len(info), 1)
+ self.assertEqual(info[0]["request_uid"], self.request_uid)
+
+ def test_get_request_info_no_results(self) -> None:
+ from_ts = int((datetime.utcnow() + timedelta(days=1)).timestamp() * 1000)
+ to_ts = int((datetime.utcnow() + timedelta(days=2)).timestamp() * 1000)
+
+ info = get_request_info(self.conn, from_ts=from_ts, to_ts=to_ts)
+ self.assertEqual(len(info), 0)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/unit/validator/test_validator.py b/tests/unit/validator/test_validator.py
deleted file mode 100644
index 414025e..0000000
--- a/tests/unit/validator/test_validator.py
+++ /dev/null
@@ -1,238 +0,0 @@
-import copy
-import unittest
-from unittest import IsolatedAsyncioTestCase
-
-import numpy as np
-import torch
-
-from neurons.validator import Validator
-from sturdy.constants import QUERY_TIMEOUT
-from sturdy.mock import MockDendrite
-from sturdy.pools import generate_assets_and_pools
-from sturdy.protocol import REQUEST_TYPES, AllocateAssets, AllocationsDict
-from sturdy.validator.reward import get_rewards
-
-
-class TestValidator(IsolatedAsyncioTestCase):
- @classmethod
- def setUpClass(cls) -> None:
- np.random.seed(69) # noqa: NPY002
- config = {
- "mock": True,
- "wandb": {"off": True},
- "mock_n": 16,
- "neuron": {"dont_save_events": True},
- }
- cls.validator = Validator(config=config)
- # TODO: this doesn't work?
- # cls.validator.simulator = Simulator(69)
-
- assets_and_pools = generate_assets_and_pools(np.random.RandomState(seed=420))
-
- cls.contract_addresses: list[str] = list(assets_and_pools["pools"].keys()) # type: ignore[]
-
- assets_and_pools["pools"][cls.contract_addresses[0]].borrow_amount = int(75e18)
- assets_and_pools["pools"][cls.contract_addresses[1]].borrow_amount = int(50e18)
- assets_and_pools["pools"][cls.contract_addresses[2]].borrow_amount = int(85e18)
- assets_and_pools["pools"][cls.contract_addresses[3]].borrow_amount = int(25e18)
- assets_and_pools["pools"][cls.contract_addresses[4]].borrow_amount = int(90e18)
- assets_and_pools["pools"][cls.contract_addresses[5]].borrow_amount = int(25e18)
- assets_and_pools["pools"][cls.contract_addresses[6]].borrow_amount = int(25e18)
- assets_and_pools["pools"][cls.contract_addresses[7]].borrow_amount = int(40e18)
- assets_and_pools["pools"][cls.contract_addresses[8]].borrow_amount = int(45e18)
- assets_and_pools["pools"][cls.contract_addresses[9]].borrow_amount = int(80e18)
-
- cls.assets_and_pools = {
- "pools": assets_and_pools["pools"],
- "total_assets": int(1000e18),
- }
-
- cls.allocations: AllocationsDict = {
- cls.contract_addresses[0]: int(100e18),
- cls.contract_addresses[1]: int(100e18),
- cls.contract_addresses[2]: int(200e18),
- cls.contract_addresses[3]: int(50e18),
- cls.contract_addresses[4]: int(200e18),
- cls.contract_addresses[5]: int(45e18),
- cls.contract_addresses[6]: int(45e18),
- cls.contract_addresses[7]: int(50e18),
- cls.contract_addresses[8]: int(50e18),
- cls.contract_addresses[9]: int(160e18),
- }
-
- cls.validator.simulator.initialize(timesteps=50)
-
- async def test_get_rewards(self) -> None:
- print("----==== test_get_rewards ====----")
-
- assets_and_pools = copy.deepcopy(self.assets_and_pools)
- allocations = copy.deepcopy(self.allocations)
-
- validator = self.validator
-
- validator.simulator.init_data(
- init_assets_and_pools=copy.deepcopy(assets_and_pools),
- init_allocations=copy.deepcopy(allocations),
- )
-
- active_uids = [str(uid) for uid in range(validator.metagraph.n.item()) if validator.metagraph.axons[uid].is_serving] # type: ignore[]
-
- active_axons = [validator.metagraph.axons[int(uid)] for uid in active_uids]
-
- synapse = AllocateAssets(
- request_type=REQUEST_TYPES.SYNTHETIC,
- assets_and_pools=copy.deepcopy(assets_and_pools),
- allocations=copy.deepcopy(allocations),
- )
-
- validator.dendrite = MockDendrite(wallet=validator.wallet, custom_allocs=True)
- responses = await validator.dendrite(
- # Send the query to selected miner axons in the network.
- axons=active_axons,
- # Construct a dummy query. This simply contains a single integer.
- synapse=synapse,
- deserialize=False,
- timeout=QUERY_TIMEOUT,
- )
-
- for response in responses:
- self.assertEqual(response.assets_and_pools, assets_and_pools)
- self.assertLessEqual(sum(response.allocations.values()), assets_and_pools["total_assets"])
-
- rewards, allocs = get_rewards(
- validator,
- validator.step,
- active_uids,
- responses=responses,
- assets_and_pools=assets_and_pools,
- )
-
- print(f"allocs: {allocs}")
-
-
- rewards_dict = {active_uids[k]: v for k, v in enumerate(list(rewards))}
- sorted_rewards = dict(sorted(rewards_dict.items(), key=lambda item: item[1], reverse=True)) # type: ignore[]
-
- print(f"sorted rewards: {sorted_rewards}")
-
- # rewards should not all be the same
- to_compare = torch.empty(rewards.shape)
- torch.fill(to_compare, rewards[0])
- self.assertFalse(torch.equal(rewards, to_compare))
-
- async def test_get_rewards_punish(self) -> None:
- print("----==== test_get_rewards_punish ====----")
- validator = self.validator
- assets_and_pools = copy.deepcopy(self.assets_and_pools)
-
- allocations = copy.deepcopy(self.allocations)
- # increase one of the allocations by +10000 -> clearly this means the miner is cheating!!!
- allocations[self.contract_addresses[0]] += int(10000e18)
-
- validator.simulator.reset()
- validator.simulator.init_data(
- init_assets_and_pools=copy.deepcopy(assets_and_pools),
- init_allocations=copy.deepcopy(allocations),
- )
-
- active_uids = [str(uid) for uid in range(validator.metagraph.n.item()) if validator.metagraph.axons[uid].is_serving] # type: ignore[]
-
- active_axons = [validator.metagraph.axons[int(uid)] for uid in active_uids]
-
- synapse = AllocateAssets(
- request_type=REQUEST_TYPES.SYNTHETIC,
- assets_and_pools=copy.deepcopy(assets_and_pools),
- allocations=copy.deepcopy(allocations),
- )
-
- validator.dendrite = MockDendrite(wallet=validator.wallet)
- responses = await validator.dendrite(
- # Send the query to selected miner axons in the network.
- axons=active_axons,
- # Construct a dummy query. This simply contains a single integer.
- synapse=synapse,
- deserialize=False,
- timeout=QUERY_TIMEOUT,
- )
-
- for response in responses:
- self.assertEqual(response.assets_and_pools, assets_and_pools)
- self.assertEqual(response.allocations, allocations)
-
- rewards, allocs = get_rewards(
- validator,
- validator.step,
- active_uids,
- responses=responses,
- assets_and_pools=assets_and_pools,
- )
-
- for allocInfo in allocs.values():
- self.assertEqual(allocInfo["apy"], 0)
-
- # rewards should all be the same (0)
- self.assertEqual(all(rewards), 0)
-
- rewards_dict = dict(enumerate(list(rewards)))
- sorted_rewards = dict(sorted(rewards_dict.items(), key=lambda item: item[1], reverse=True)) # type: ignore[]
-
- print(f"sorted rewards: {sorted_rewards}")
-
- assets_and_pools = copy.deepcopy(self.assets_and_pools)
-
- allocations = copy.deepcopy(self.allocations)
- # set one of the allocations to be negative! This should not be allowed!
- allocations[self.contract_addresses[0]] = -1
-
- validator.simulator.reset()
- validator.simulator.init_data(
- init_assets_and_pools=copy.deepcopy(assets_and_pools),
- init_allocations=copy.deepcopy(allocations),
- )
-
- active_uids = [str(uid) for uid in range(validator.metagraph.n.item()) if validator.metagraph.axons[uid].is_serving] # type: ignore[]
-
- active_axons = [validator.metagraph.axons[int(uid)] for uid in active_uids]
-
- synapse = AllocateAssets(
- request_type=REQUEST_TYPES.SYNTHETIC,
- assets_and_pools=copy.deepcopy(assets_and_pools),
- allocations=copy.deepcopy(allocations),
- )
-
- validator.dendrite = MockDendrite(wallet=validator.wallet)
- responses = await validator.dendrite(
- # Send the query to selected miner axons in the network.
- axons=active_axons,
- # Construct a dummy query. This simply contains a single integer.
- synapse=synapse,
- deserialize=False,
- timeout=QUERY_TIMEOUT,
- )
-
- for response in responses:
- self.assertEqual(response.assets_and_pools, assets_and_pools)
- self.assertEqual(response.allocations, allocations)
-
- rewards, allocs = get_rewards(
- validator,
- validator.step,
- active_uids,
- responses=responses,
- assets_and_pools=assets_and_pools,
- )
-
- for allocInfo in allocs.values():
- self.assertEqual(allocInfo["apy"], 0)
-
- # rewards should all be the same (0)
- self.assertEqual(all(rewards), 0)
-
- rewards_dict = dict(enumerate(list(rewards)))
- sorted_rewards = dict(sorted(rewards_dict.items(), key=lambda item: item[1], reverse=True)) # type: ignore[]
-
- print(f"sorted rewards: {sorted_rewards}")
-
-
-if __name__ == "__main__":
- unittest.main()