diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index b4dc3ab93..c3f3274f2 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -7,6 +7,7 @@ on:
- develop
- beta
- stable
+ - 'v*.*.*'
jobs:
build:
@@ -19,10 +20,10 @@ jobs:
- uses: actions/checkout@v2
with:
submodules: true
- - name: Set up Python 3.7
+ - name: Set up Python 3.11
uses: actions/setup-python@v1
with:
- python-version: 3.7
+ python-version: 3.11
- name: Build and publish container
run: |
export BRANCH=${GITHUB_REF##*/}
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 11e49e388..529460c6a 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -3,11 +3,11 @@ on: [push, pull_request]
env:
ETH_PRIVATE_KEY: ${{ secrets.ETH_PRIVATE_KEY }}
SCHAIN_TYPE: ${{ secrets.SCHAIN_TYPE }}
- MANAGER_TAG: "1.9.3-beta.0"
+ MANAGER_TAG: "1.10.0-v1.10.0.0"
IMA_TAG: "1.3.4-beta.5"
SGX_WALLET_TAG: "1.83.0-beta.5"
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- PYTHON_VERSION: 3.8
+ PYTHON_VERSION: 3.11
jobs:
test_core:
runs-on: ubuntu-latest
@@ -29,10 +29,9 @@ jobs:
run: bash ./scripts/install_python_dependencies.sh
- name: Lint with flake8
run: flake8 .
- - name: Launch hardhat node
- working-directory: hardhat-node
+ - name: Launch anvil node
run: |
- docker-compose up -d
+ docker run -d --network host --name anvil ghcr.io/foundry-rs/foundry anvil && sleep 5 && docker logs anvil --tail 1000
- name: Deploy manager & ima contracts
run: |
bash ./helper-scripts/deploy_test_ima.sh
diff --git a/.gitmodules b/.gitmodules
index 6af734868..0ad8fbe1c 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,7 +1,7 @@
[submodule "helper-scripts"]
path = helper-scripts
url = https://github.com/skalenetwork/helper-scripts.git
- branch = develop
+ branch = fix-for-latest-sm
[submodule "hardhat-node"]
path = hardhat-node
- url = https://github.com/skalenetwork/hardhat-node.git
+ url = https://github.com/skalenetwork/hardhat-node.git
diff --git a/Dockerfile b/Dockerfile
index a7d00e9b4..0a2c24bb3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,18 +1,14 @@
-FROM python:3.9-buster
+FROM python:3.11-bookworm
-RUN apt-get update && apt-get install -y wget git libxslt-dev iptables kmod swig3.0
-RUN ln -s /usr/bin/swig3.0 /usr/bin/swig
+RUN apt-get update && apt-get install -y wget git libxslt-dev iptables kmod swig
RUN mkdir /usr/src/admin
WORKDIR /usr/src/admin
COPY requirements.txt ./
COPY requirements-dev.txt ./
-RUN pip3 install --no-cache-dir -r requirements.txt
-RUN pip3 uninstall pycrypto -y
-RUN pip3 uninstall pycryptodome -y
-RUN pip3 install pycryptodome
+RUN pip3 install --no-cache-dir -r requirements.txt
COPY . .
diff --git a/VERSION b/VERSION
index aedc15bb0..e70b4523a 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.5.3
+2.6.0
diff --git a/core/ima/schain.py b/core/ima/schain.py
index f24ee2107..cb2946452 100644
--- a/core/ima/schain.py
+++ b/core/ima/schain.py
@@ -24,9 +24,7 @@
from ima_predeployed.generator import generate_abi
from core.schains.config.directory import schain_config_dir
-from tools.configs.ima import (
- SCHAIN_IMA_ABI_FILEPATH, SCHAIN_IMA_ABI_FILENAME
-)
+from tools.configs.ima import SCHAIN_IMA_ABI_FILEPATH, SCHAIN_IMA_ABI_FILENAME
logger = logging.getLogger(__name__)
diff --git a/core/node.py b/core/node.py
index 62612e238..b6979557c 100644
--- a/core/node.py
+++ b/core/node.py
@@ -18,38 +18,41 @@
# along with this program. If not, see .
import json
-import time
-import os
-import socket
-import psutil
+import hashlib
import logging
+import os
import platform
-import hashlib
-
-import requests
-
+import psutil
+import socket
+import time
from enum import Enum
-from typing import Dict
+from typing import Dict, List, Optional, TypedDict
-try:
- from sh import lsmod
-except ImportError:
- logging.warning('Could not import lsmod from sh package')
+import requests
-from skale.transactions.result import TransactionFailedError
+from skale import Skale
+from skale.schain_config.generator import get_nodes_for_schain
+from skale.transactions.exceptions import TransactionLogicError
from skale.utils.exceptions import InvalidNodeIdError
from skale.utils.helper import ip_from_bytes
from skale.utils.web3_utils import public_key_to_address, to_checksum_address
from core.filebeat import update_filebeat_service
-
-from tools.configs import CHECK_REPORT_PATH, META_FILEPATH, WATCHDOG_PORT
+from tools.configs import WATCHDOG_PORT, CHANGE_IP_DELAY, CHECK_REPORT_PATH, META_FILEPATH
from tools.helper import read_json
from tools.str_formatters import arguments_list_string
from tools.wallet_utils import check_required_balance
logger = logging.getLogger(__name__)
+try:
+ from sh import lsmod
+except ImportError:
+ logging.warning('Could not import lsmod from sh package')
+
+
+logger = logging.getLogger(__name__)
+
class NodeStatus(Enum):
"""This class contains possible node statuses"""
@@ -152,7 +155,7 @@ def create_node_on_contracts(self, ip, public_ip, port, name, domain_name,
skip_dry_run=skip_dry_run,
wait_for=True
)
- except TransactionFailedError:
+ except TransactionLogicError:
logger.exception('Node creation failed')
return -1
self._log_node_info('Node successfully registered', ip,
@@ -183,7 +186,7 @@ def exit(self, opts):
for _ in range(exit_count):
try:
self.skale.manager.node_exit(self.config.id, wait_for=True)
- except TransactionFailedError:
+ except TransactionLogicError:
logger.exception('Node rotation failed')
def get_exit_status(self):
@@ -225,11 +228,11 @@ def get_exit_status(self):
def set_maintenance_on(self):
if NodeStatus(self.info['status']) != NodeStatus.ACTIVE:
- self._error('Node should be active')
+ return self._error('Node should be active')
try:
self.skale.nodes.set_node_in_maintenance(self.config.id)
- except TransactionFailedError:
- self._error('Moving node to maintenance mode failed')
+ except TransactionLogicError:
+ return self._error('Moving node to maintenance mode failed')
return self._ok()
def set_maintenance_off(self):
@@ -241,7 +244,7 @@ def set_maintenance_off(self):
return {'status': 1, 'errors': [err_msg]}
try:
self.skale.nodes.remove_node_from_in_maintenance(self.config.id)
- except TransactionFailedError:
+ except TransactionLogicError:
return self._error('Removing node from maintenance mode failed')
return self._ok()
@@ -251,7 +254,7 @@ def set_domain_name(self, domain_name: str) -> dict:
self.config.id,
domain_name
)
- except TransactionFailedError as err:
+ except TransactionLogicError as err:
return self._error(str(err))
return self._ok()
@@ -345,6 +348,81 @@ def get_btrfs_info() -> dict:
}
+def get_check_report(report_path: str = CHECK_REPORT_PATH) -> Dict:
+ if not os.path.isfile(report_path):
+ return {}
+ with open(report_path) as report_file:
+ return json.load(report_file)
+
+
+def get_abi_hash(file_path):
+ with open(file_path, 'rb') as file:
+ abi_hash = hashlib.sha256(file.read()).hexdigest()
+ return abi_hash
+
+
+class ManagerNodeInfo(TypedDict):
+ name: str
+ ip: str
+ publicIP: str
+ port: int
+ start_block: int
+ last_reward_date: int
+ finish_time: int
+ status: int
+ validator_id: int
+ publicKey: str
+ domain_name: str
+
+
+class ExtendedManagerNodeInfo(ManagerNodeInfo):
+ ip_change_ts: int
+
+
+def get_current_nodes(skale: Skale, name: str) -> List[ExtendedManagerNodeInfo]:
+ if not skale.schains_internal.is_schain_exist(name):
+ return []
+ current_nodes: ManagerNodeInfo = get_nodes_for_schain(skale, name)
+ for node in current_nodes:
+ node['ip_change_ts'] = skale.nodes.get_last_change_ip_time(node['id'])
+ node['ip'] = ip_from_bytes(node['ip'])
+ node['publicIP'] = ip_from_bytes(node['publicIP'])
+ return current_nodes
+
+
+def get_current_ips(current_nodes: List[ExtendedManagerNodeInfo]) -> list[str]:
+ return [node['ip'] for node in current_nodes]
+
+
+def get_max_ip_change_ts(current_nodes: List[ExtendedManagerNodeInfo]) -> Optional[int]:
+ max_ip_change_ts = max(current_nodes, key=lambda node: node['ip_change_ts'])['ip_change_ts']
+ return None if max_ip_change_ts == 0 else max_ip_change_ts
+
+
+def calc_reload_ts(current_nodes: List[ExtendedManagerNodeInfo], node_index: int) -> int:
+ max_ip_change_ts = get_max_ip_change_ts(current_nodes)
+ if max_ip_change_ts is None:
+ return
+ return max_ip_change_ts + get_node_delay(node_index)
+
+
+def get_node_delay(node_index: int) -> int:
+ """
+ Returns delay for node in seconds.
+ Example: for node with index 3 and delay 300 it will be 1200 seconds
+ """
+ return CHANGE_IP_DELAY * (node_index + 1)
+
+
+def get_node_index_in_group(skale: Skale, schain_name: str, node_id: int) -> Optional[int]:
+ """Returns node index in group or None if node is not in group"""
+ try:
+ node_ids = skale.schains_internal.get_node_ids_for_schain(schain_name)
+ return node_ids.index(node_id)
+ except ValueError:
+ return None
+
+
def is_port_open(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
@@ -378,16 +456,3 @@ def check_validator_nodes(skale, node_id):
except Exception as err:
return {'status': 1, 'errors': [err]}
return {'status': 0, 'data': res}
-
-
-def get_check_report(report_path: str = CHECK_REPORT_PATH) -> Dict:
- if not os.path.isfile(CHECK_REPORT_PATH):
- return {}
- with open(CHECK_REPORT_PATH) as report_file:
- return json.load(report_file)
-
-
-def get_abi_hash(file_path):
- with open(file_path, 'rb') as file:
- abi_hash = hashlib.sha256(file.read()).hexdigest()
- return abi_hash
diff --git a/core/schains/checks.py b/core/schains/checks.py
index 3d45aff95..ec93d4b31 100644
--- a/core/schains/checks.py
+++ b/core/schains/checks.py
@@ -22,6 +22,7 @@
import time
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
+from core.node import ExtendedManagerNodeInfo, get_current_ips
from core.schains.config.directory import get_schain_check_filepath
from core.schains.config.file_manager import ConfigFileManager
@@ -113,6 +114,7 @@ def get_all(self,
checks_status = {}
for name in names:
if hasattr(self, name):
+ logger.debug('Running check %s', name)
checks_status[name] = getattr(self, name).status
if log:
log_checks_dict(self.get_name(), checks_status)
@@ -140,6 +142,7 @@ def __init__(self,
schain_record: SChainRecord,
rotation_id: int,
stream_version: str,
+ current_nodes: list[ExtendedManagerNodeInfo],
estate: ExternalState,
econfig: Optional[ExternalConfig] = None
) -> None:
@@ -148,6 +151,7 @@ def __init__(self,
self.schain_record = schain_record
self.rotation_id = rotation_id
self.stream_version = stream_version
+ self.current_nodes = current_nodes
self.estate = estate
self.econfig = econfig or ExternalConfig(schain_name)
self.cfm: ConfigFileManager = ConfigFileManager(
@@ -172,17 +176,45 @@ def dkg(self) -> CheckRes:
)
return CheckRes(os.path.isfile(secret_key_share_filepath))
+ @property
+ def skaled_node_ips(self) -> CheckRes:
+ """Checks that IP list on the skale-manager is the same as in the skaled config"""
+ res = False
+ if self.cfm.skaled_config_exists():
+ conf = self.cfm.skaled_config
+ node_ips = get_node_ips_from_config(conf)
+ current_ips = get_current_ips(self.current_nodes)
+ res = set(node_ips) == set(current_ips)
+ return CheckRes(res)
+
@property
def upstream_config(self) -> CheckRes:
- """Checks that config exists for rotation id and stream"""
+ """
+ Returns True if config exists for current rotation id,
+ node ip addresses and stream version are up to date
+ and config regeneration was not triggered manually.
+ Returns False otherwise.
+ """
exists = self.cfm.upstream_exist_for_rotation_id(self.rotation_id)
-
logger.debug('Upstream configs status for %s: %s', self.name, exists)
- return CheckRes(
- exists and
- self.schain_record.config_version == self.stream_version and
- not self.schain_record.sync_config_run
+ stream_updated = self.schain_record.config_version == self.stream_version
+ node_ips_updated = True
+ triggered = self.schain_record.sync_config_run
+ if exists:
+ conf = self.cfm.latest_upstream_config
+ upstream_node_ips = get_node_ips_from_config(conf)
+ current_ips = get_current_ips(self.current_nodes)
+ node_ips_updated = set(upstream_node_ips) == set(current_ips)
+
+ logger.info(
+ 'Upstream config status, rotation_id %s: exist: %s, ips: %s, stream: %s, triggered: %s',
+ self.rotation_id,
+ exists,
+ node_ips_updated,
+ stream_updated,
+ triggered
)
+ return CheckRes(exists and node_ips_updated and stream_updated and not triggered)
@property
def external_state(self) -> CheckRes:
@@ -202,7 +234,7 @@ def __init__(
rule_controller: IRuleController,
*,
econfig: Optional[ExternalConfig] = None,
- dutils: DockerUtils = None
+ dutils: Optional[DockerUtils] = None
):
self.name = schain_name
self.schain_record = schain_record
@@ -360,6 +392,7 @@ def __init__(
rule_controller: IRuleController,
stream_version: str,
estate: ExternalState,
+ current_nodes: list[ExtendedManagerNodeInfo],
rotation_id: int = 0,
*,
econfig: Optional[ExternalConfig] = None,
@@ -372,6 +405,7 @@ def __init__(
schain_record=schain_record,
rotation_id=rotation_id,
stream_version=stream_version,
+ current_nodes=current_nodes,
estate=estate,
econfig=econfig
),
@@ -398,6 +432,7 @@ def get_all(self, log: bool = True, save: bool = False, needed: Optional[List[st
plain_checks = {}
for subj in self._subjects:
+ logger.debug('Running checks for %s', subj)
subj_checks = subj.get_all(
log=False,
save=False,
diff --git a/core/schains/cleaner.py b/core/schains/cleaner.py
index 2bf5c3a07..62c8ca1ab 100644
--- a/core/schains/cleaner.py
+++ b/core/schains/cleaner.py
@@ -24,7 +24,7 @@
from sgx import SgxClient
-from core.node import get_skale_node_version
+from core.node import get_current_nodes, get_skale_node_version
from core.schains.checks import SChainChecks
from core.schains.config.file_manager import ConfigFileManager
from core.schains.config.directory import schain_config_dir
@@ -207,11 +207,13 @@ def remove_schain(skale, node_id, schain_name, msg, dutils=None) -> None:
rotation_data = skale.node_rotation.get_rotation(schain_name)
rotation_id = rotation_data['rotation_id']
estate = ExternalConfig(name=schain_name).get()
+ current_nodes = get_current_nodes(skale, schain_name)
cleanup_schain(
node_id,
schain_name,
sync_agent_ranges,
rotation_id=rotation_id,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -222,6 +224,7 @@ def cleanup_schain(
schain_name,
sync_agent_ranges,
rotation_id,
+ current_nodes,
estate,
dutils=None
) -> None:
@@ -239,8 +242,10 @@ def cleanup_schain(
rule_controller=rc,
stream_version=stream_version,
schain_record=schain_record,
+ current_nodes=current_nodes,
rotation_id=rotation_id,
- estate=estate
+ estate=estate,
+ dutils=dutils
)
status = checks.get_all()
if status['skaled_container'] or is_exited(
diff --git a/core/schains/config/static_params.py b/core/schains/config/static_params.py
index 08734acc9..05bff696d 100644
--- a/core/schains/config/static_params.py
+++ b/core/schains/config/static_params.py
@@ -35,3 +35,11 @@ def get_static_schain_info(env_type: str = ENV_TYPE) -> dict:
def get_static_node_info(schain_type: SchainType, env_type: str = ENV_TYPE) -> dict:
static_params = get_static_params(env_type)
return {**static_params['node']['common'], **static_params['node'][schain_type.name]}
+
+
+def get_automatic_repair_option(env_type: str = ENV_TYPE) -> bool:
+ static_params = get_static_params(env_type)
+ node_params = static_params['node']
+ if 'admin' in node_params:
+ return node_params['admin'].get('automatic_repair', True)
+ return True
diff --git a/core/schains/dkg/__init__.py b/core/schains/dkg/__init__.py
index 76472d1ad..024bf6028 100644
--- a/core/schains/dkg/__init__.py
+++ b/core/schains/dkg/__init__.py
@@ -19,5 +19,6 @@
# flake8: noqa: E402
-from core.schains.dkg.main import safe_run_dkg, save_dkg_results
-from core.schains.dkg.utils import DkgError
\ No newline at end of file
+from core.schains.dkg.main import get_dkg_client, run_dkg, save_dkg_results
+from core.schains.dkg.structures import DKGStatus, DKGStep
+from core.schains.dkg.utils import DkgError, get_secret_key_share_filepath
diff --git a/core/schains/dkg/broadcast_filter.py b/core/schains/dkg/broadcast_filter.py
index eb3e69bba..ae1d405c6 100644
--- a/core/schains/dkg/broadcast_filter.py
+++ b/core/schains/dkg/broadcast_filter.py
@@ -20,7 +20,7 @@
from dataclasses import dataclass
import logging
-from web3.exceptions import TransactionNotFound
+from web3.exceptions import Web3Exception, TransactionNotFound
logger = logging.getLogger(__name__)
@@ -84,37 +84,42 @@ def parse_event(self, receipt):
})
def get_events(self, from_channel_started_block=False):
- if self.first_unseen_block == -1 or from_channel_started_block:
- start_block = self.dkg_contract.functions.getChannelStartedBlock(
- self.group_index
- ).call()
- else:
- start_block = self.first_unseen_block
- current_block = self.skale.web3.eth.get_block("latest")["number"]
- logger.info(f'sChain {self.group_index_str}: Parsing broadcast events from {start_block}'
- f' block to {current_block} block')
events = []
- for block_number in range(start_block, current_block + 1):
- block = self.skale.web3.eth.get_block(block_number, full_transactions=True)
- txns = block["transactions"]
- for tx in txns:
- try:
- if tx.get("to") != self.dkg_contract_address:
- continue
+ try:
+ if self.first_unseen_block == -1 or from_channel_started_block:
+ start_block = self.dkg_contract.functions.getChannelStartedBlock(
+ self.group_index
+ ).call()
+ else:
+ start_block = self.first_unseen_block
+ current_block = self.skale.web3.eth.get_block("latest")["number"]
+ logger.info(f'sChain {self.group_index_str}: Parsing broadcast events '
+ f'from {start_block} block to {current_block} block')
+ for block_number in range(start_block, current_block + 1):
+ block = self.skale.web3.eth.get_block(block_number, full_transactions=True)
+ txns = block["transactions"]
+ for tx in txns:
+ try:
+ if tx.get("to") != self.dkg_contract_address:
+ continue
- hash = tx.get("hash")
- if hash:
- receipt = self.skale.web3.eth.get_transaction_receipt(hash)
- else:
- logger.info(f'sChain {self.group_index_str}: tx {tx}'
- f' does not have field "hash"')
- continue
+ hash = tx.get("hash")
+ if hash:
+ receipt = self.skale.web3.eth.get_transaction_receipt(hash)
+ else:
+ logger.info(f'sChain {self.group_index_str}: tx {tx}'
+ f' does not have field "hash"')
+ continue
- if not self.check_event(receipt):
- continue
- else:
- events.append(self.parse_event(receipt))
- except TransactionNotFound:
- pass
- self.first_unseen_block = block_number + 1
- return events
+ if not self.check_event(receipt):
+ continue
+ else:
+ events.append(self.parse_event(receipt))
+ except TransactionNotFound:
+ pass
+ self.first_unseen_block = block_number + 1
+ return events
+ except (ValueError, Web3Exception) as e:
+ logger.info(f'sChain {self.group_index_str}: error during collecting broadcast '
+ f'events: {e}')
+ return events
diff --git a/core/schains/dkg/client.py b/core/schains/dkg/client.py
index 00ff77179..30454ea4b 100644
--- a/core/schains/dkg/client.py
+++ b/core/schains/dkg/client.py
@@ -28,6 +28,7 @@
from skale.transactions.result import TransactionFailedError
from core.schains.dkg.broadcast_filter import Filter
+from core.schains.dkg.structures import ComplaintReason, DKGStep
from tools.configs import NODE_DATA_PATH, SGX_CERTIFICATES_FOLDER
from tools.sgx_utils import sgx_unreachable_retry
@@ -131,8 +132,21 @@ def generate_bls_key_name(group_index_str, node_id, dkg_id):
class DKGClient:
- def __init__(self, node_id_dkg, node_id_contract, skale, t, n, schain_name, public_keys,
- node_ids_dkg, node_ids_contract, eth_key_name, rotation_id):
+ def __init__(
+ self,
+ node_id_dkg,
+ node_id_contract,
+ skale,
+ t,
+ n,
+ schain_name,
+ public_keys,
+ node_ids_dkg,
+ node_ids_contract,
+ eth_key_name,
+ rotation_id,
+ step: DKGStep = DKGStep.NONE
+ ):
self.sgx = SgxClient(os.environ['SGX_SERVER_URL'], n=n, t=t,
path_to_cert=SGX_CERTIFICATES_FOLDER)
self.schain_name = schain_name
@@ -146,6 +160,7 @@ def __init__(self, node_id_dkg, node_id_contract, skale, t, n, schain_name, publ
group_index_str = str(int(skale.web3.to_hex(self.group_index)[2:], 16))
self.poly_name = generate_poly_name(group_index_str, self.node_id_dkg, rotation_id)
self.bls_name = generate_bls_key_name(group_index_str, self.node_id_dkg, rotation_id)
+ self.rotation_id = rotation_id
self.incoming_verification_vector = ['0' for _ in range(n)]
self.incoming_secret_key_contribution = ['0' for _ in range(n)]
self.public_keys = public_keys
@@ -156,9 +171,7 @@ def __init__(self, node_id_dkg, node_id_contract, skale, t, n, schain_name, publ
self.complaint_error_event_hash = self.skale.web3.to_hex(self.skale.web3.keccak(
text="ComplaintError(string)"
))
- logger.info(
- f'sChain: {self.schain_name}. Node id on chain is {self.node_id_dkg}; '
- f'Node id on contract is {self.node_id_contract}')
+ self.last_completed_step = step # last step
logger.info(f'sChain: {self.schain_name}. DKG timeout is {self.dkg_timeout}')
def is_channel_opened(self):
@@ -198,6 +211,9 @@ def secret_key_contribution(self):
]
return convert_str_to_key_share(self.sent_secret_key_contribution, self.n)
+ def is_node_broadcasted(self) -> bool:
+ return self.skale.dkg.is_node_broadcasted(self.group_index, self.node_id_contract)
+
def broadcast(self):
poly_success = self.generate_polynomial(self.poly_name)
if poly_success == DkgPolyStatus.FAIL:
@@ -217,17 +233,15 @@ def broadcast(self):
verification_vector = self.verification_vector()
secret_key_contribution = self.secret_key_contribution()
- try:
- self.skale.dkg.broadcast(
- self.group_index,
- self.node_id_contract,
- verification_vector,
- secret_key_contribution,
- )
- except TransactionFailedError as e:
- logger.error(f'DKG broadcast failed: sChain {self.schain_name}')
- raise DkgTransactionError(e)
- logger.info(f'sChain: {self.schain_name}. Everything is sent from {self.node_id_dkg} node')
+ self.skale.dkg.broadcast(
+ self.group_index,
+ self.node_id_contract,
+ verification_vector,
+ secret_key_contribution,
+ self.rotation_id
+ )
+ self.last_completed_step = DKGStep.BROADCAST
+ logger.info('Everything is sent from %d node', self.node_id_dkg)
def receive_from_node(self, from_node, broadcasted_data):
self.store_broadcasted_data(broadcasted_data, from_node)
@@ -307,22 +321,16 @@ def alright(self):
logger.info(f'sChain: {self.schain_name}. '
f'{self.node_id_dkg} node could not sent an alright note')
return
- try:
- self.skale.dkg.alright(
- self.group_index,
- self.node_id_contract,
- gas_limit=ALRIGHT_GAS_LIMIT,
- multiplier=2
- )
- except TransactionFailedError as e:
- logger.error(f'DKG alright failed: sChain {self.schain_name}')
- raise DkgTransactionError(e)
+ self.skale.dkg.alright(
+ self.group_index,
+ self.node_id_contract,
+ gas_limit=ALRIGHT_GAS_LIMIT,
+ multiplier=2
+ )
+ self.last_completed_step = DKGStep.ALRIGHT
logger.info(f'sChain: {self.schain_name}. {self.node_id_dkg} node sent an alright note')
- def send_complaint(self, to_node, report_bad_data=False):
- reason = "complaint"
- if report_bad_data:
- reason = "complaint_bad_data"
+ def send_complaint(self, to_node: int, reason: ComplaintReason):
logger.info(f'sChain: {self.schain_name}. '
f'{self.node_id_dkg} node is trying to sent a {reason} on {to_node} node')
@@ -330,29 +338,45 @@ def send_complaint(self, to_node, report_bad_data=False):
self.group_index, self.node_id_contract, self.node_ids_dkg[to_node],
self.skale.wallet.address
)
+ is_channel_opened = self.is_channel_opened()
+ logger.info(
+ 'Complaint possible %s, channel opened %s',
+ is_complaint_possible,
+ is_channel_opened
+ )
- if not is_complaint_possible or not self.is_channel_opened():
- logger.info(f'sChain: {self.schain_name}. '
- f'{self.node_id_dkg} node could not sent a complaint on {to_node} node')
+ if not is_complaint_possible or not is_channel_opened:
+ logger.info(
+ '%d node could not sent a complaint on %d node',
+ self.node_id_dkg,
+ to_node
+ )
return False
+
+ reason_to_step = {
+ ComplaintReason.NO_BROADCAST: DKGStep.COMPLAINT_NO_BROADCAST,
+ ComplaintReason.BAD_DATA: DKGStep.COMPLAINT_BAD_DATA,
+ ComplaintReason.NO_ALRIGHT: DKGStep.COMPLAINT_NO_ALRIGHT,
+ ComplaintReason.NO_RESPONSE: DKGStep.COMPLAINT_NO_RESPONSE
+ }
+
try:
- if not report_bad_data:
- tx_res = self.skale.dkg.complaint(
+ if reason == ComplaintReason.BAD_DATA:
+ tx_res = self.skale.dkg.complaint_bad_data(
self.group_index,
self.node_id_contract,
- self.node_ids_dkg[to_node],
- wait_for=True
+ self.node_ids_dkg[to_node]
)
else:
- tx_res = self.skale.dkg.complaint_bad_data(
+ tx_res = self.skale.dkg.complaint(
self.group_index,
self.node_id_contract,
- self.node_ids_dkg[to_node],
- wait_for=True
+ self.node_ids_dkg[to_node]
)
if self.check_complaint_logs(tx_res.receipt['logs'][0]):
logger.info(f'sChain: {self.schain_name}. '
f'{self.node_id_dkg} node sent a complaint on {to_node} node')
+ self.last_completed_step = reason_to_step[reason]
return True
else:
logger.info(f'sChain: {self.schain_name}. Complaint from {self.node_id_dkg} on '
@@ -393,9 +417,9 @@ def response(self, to_node_index):
self.node_id_contract,
convert_g2_points_to_array(self.incoming_verification_vector[self.node_id_dkg]),
convert_g2_points_to_array(verification_vector_mult),
- convert_str_to_key_share(self.sent_secret_key_contribution, self.n),
- wait_for=True
+ convert_str_to_key_share(self.sent_secret_key_contribution, self.n)
)
+ self.last_completed_step = DKGStep.PRE_RESPONSE
is_response_possible = self.skale.dkg.is_response_possible(
self.group_index, self.node_id_contract, self.skale.wallet.address)
@@ -409,9 +433,9 @@ def response(self, to_node_index):
self.group_index,
self.node_id_contract,
int(dh_key, 16),
- share,
- wait_for=True
+ share
)
+ self.last_completed_step = DKGStep.RESPONSE
logger.info(f'sChain: {self.schain_name}. {self.node_id_dkg} node sent a response')
except TransactionFailedError as e:
logger.error(f'DKG response failed: sChain {self.schain_name}')
diff --git a/core/schains/dkg/main.py b/core/schains/dkg/main.py
index 1b39a9e53..0107ad9a9 100644
--- a/core/schains/dkg/main.py
+++ b/core/schains/dkg/main.py
@@ -23,9 +23,9 @@
from skale.schain_config.generator import get_nodes_for_schain
-from core.schains.dkg.status import DKGStatus
+from core.schains.dkg.structures import ComplaintReason, DKGStatus, DKGStep
from core.schains.dkg.utils import (
- init_dkg_client, send_complaint, send_alright, get_latest_block_timestamp, DkgError,
+ init_dkg_client, send_complaint, get_latest_block_timestamp, DkgError,
DKGKeyGenerationError, generate_bls_keys, check_response, check_no_complaints,
check_failed_dkg, wait_for_fail, broadcast_and_check_data
)
@@ -56,6 +56,13 @@ def init_bls(dkg_client, node_id, sgx_key_name, rotation_id=0):
channel_started_time = skale.dkg.get_channel_started_time(dkg_client.group_index)
+ rotaion_mainnet = dkg_client.skale.node_rotation.get_rotation_obj(schain_name).rotation_counter
+ if rotation_id != rotaion_mainnet:
+ logger.info(f'sChain {schain_name}: Rotaion id on SKALE MANAGER {rotaion_mainnet} is '
+ f'different from the one passed by SKALE ADMIN {rotation_id}.'
+ f' Need to restart')
+ raise DkgError(f'sChain {schain_name}: restarting DKG')
+
broadcast_and_check_data(dkg_client)
if not dkg_client.is_everyone_broadcasted():
@@ -66,7 +73,10 @@ def init_bls(dkg_client, node_id, sgx_key_name, rotation_id=0):
is_alright_sent_list = [False for _ in range(n)]
if check_no_complaints(dkg_client):
logger.info(f'sChain {schain_name}: No complaints sent in schain - sending alright ...')
- send_alright(dkg_client)
+ if not dkg_client.is_all_data_received(dkg_client.node_id_dkg):
+ dkg_client.alright()
+ else:
+ dkg_client.last_completed_step = DKGStep.ALRIGHT
is_alright_sent_list[dkg_client.node_id_dkg] = True
check_failed_dkg(skale, schain_name)
@@ -89,7 +99,7 @@ def init_bls(dkg_client, node_id, sgx_key_name, rotation_id=0):
if check_no_complaints(dkg_client):
for i in range(dkg_client.n):
if not is_alright_sent_list[i] and i != dkg_client.node_id_dkg:
- send_complaint(dkg_client, i, "alright")
+ send_complaint(dkg_client, i, reason=ComplaintReason.NO_ALRIGHT)
check_response(dkg_client)
@@ -108,7 +118,7 @@ def init_bls(dkg_client, node_id, sgx_key_name, rotation_id=0):
if check_failed_dkg(skale, schain_name) and not complaint_itself:
logger.info(f'sChain: {schain_name}. '
'Accused node has not sent response. Sending complaint...')
- send_complaint(dkg_client, complainted_node_index, "response")
+ send_complaint(dkg_client, complainted_node_index, reason=ComplaintReason.NO_RESPONSE)
wait_for_fail(skale, schain_name, channel_started_time, "response")
if False in is_alright_sent_list:
@@ -133,26 +143,20 @@ def save_dkg_results(dkg_results, filepath):
@dataclass
class DKGResult:
status: DKGStatus
+ step: DKGStep
keys_data: dict
-def safe_run_dkg(
+def run_dkg(
skale,
+ dkg_client,
schain_name,
node_id,
sgx_key_name,
rotation_id
-):
+) -> DKGResult:
keys_data, status = None, None
- dkg_client = None
try:
- dkg_client = get_dkg_client(
- node_id,
- schain_name,
- skale,
- sgx_key_name,
- rotation_id
- )
if is_last_dkg_finished(skale, schain_name):
logger.info(f'Dkg for {schain_name} is completed. Fetching data')
dkg_client.fetch_all_broadcasted_data()
@@ -160,13 +164,13 @@ def safe_run_dkg(
skale.schains.name_to_group_id(schain_name)
):
logger.info(f'Starting dkg procedure for {schain_name}')
- if not skale.dkg.is_channel_opened(
+ if skale.dkg.is_channel_opened(
skale.schains.name_to_group_id(schain_name)
):
- status = DKGStatus.FAILED
- else:
status = DKGStatus.IN_PROGRESS
init_bls(dkg_client, node_id, sgx_key_name, rotation_id)
+ else:
+ status = DKGStatus.FAILED
except DkgError as e:
logger.info(f'sChain {schain_name} DKG procedure failed with {e}')
status = DKGStatus.FAILED
@@ -187,4 +191,8 @@ def safe_run_dkg(
else:
if status != DKGStatus.KEY_GENERATION_ERROR:
status = DKGStatus.FAILED
- return DKGResult(keys_data=keys_data, status=status)
+ return DKGResult(
+ keys_data=keys_data,
+ step=dkg_client.last_completed_step,
+ status=status
+ )
diff --git a/core/schains/dkg/status.py b/core/schains/dkg/structures.py
similarity index 66%
rename from core/schains/dkg/status.py
rename to core/schains/dkg/structures.py
index 2ad57b15d..b46b2456b 100644
--- a/core/schains/dkg/status.py
+++ b/core/schains/dkg/structures.py
@@ -20,7 +20,7 @@
from enum import Enum
-class DKGStatus(Enum):
+class DKGStatus(int, Enum):
NOT_STARTED = 1
IN_PROGRESS = 2
DONE = 3
@@ -29,3 +29,26 @@ class DKGStatus(Enum):
def is_done(self) -> bool:
return self == DKGStatus.DONE
+
+
+class DKGStep(int, Enum):
+ """ Index of DKG step """
+ NONE = 0
+ BROADCAST = 1
+ BROADCAST_VERIFICATION = 2
+ ALRIGHT = 3
+ ALRIGHT_RECEIVING = 4
+ COMPLAINT_NO_BROADCAST = 5
+ COMPLAINT_BAD_DATA = 6
+ COMPLAINT_NO_ALRIGHT = 7
+ COMPLAINT_NO_RESPONSE = 8
+ RESPONSE = 9
+ KEY_GENERATION = 10
+
+
+class ComplaintReason(int, Enum):
+ """ Index of complaint reason """
+ NO_BROADCAST = 0
+ BAD_DATA = 1
+ NO_ALRIGHT = 2
+ NO_RESPONSE = 3
diff --git a/core/schains/dkg/utils.py b/core/schains/dkg/utils.py
index 87ee20573..92ec9c99c 100644
--- a/core/schains/dkg/utils.py
+++ b/core/schains/dkg/utils.py
@@ -20,10 +20,12 @@
import logging
import os
from time import sleep
+from typing import NamedTuple
from skale.schain_config.generator import get_nodes_for_schain
from tools.configs import NODE_DATA_PATH
+from core.schains.dkg.structures import ComplaintReason, DKGStep
from core.schains.dkg.client import DKGClient, DkgError, DkgVerificationError, DkgTransactionError
from core.schains.dkg.broadcast_filter import Filter
@@ -32,25 +34,32 @@
logger = logging.getLogger(__name__)
UINT_CONSTANT = 2**256 - 1
+BROADCAST_DATA_SEARCH_SLEEP = 30
class DkgFailedError(DkgError):
pass
-class DKGKeyGenerationError(Exception):
+class DKGKeyGenerationError(DkgError):
pass
+class BroadcastResult(NamedTuple):
+ received: list[bool]
+ correct: list[bool]
+
+
def init_dkg_client(node_id, schain_name, skale, sgx_eth_key_name, rotation_id):
+ logger.info('Initializing dkg client')
schain_nodes = get_nodes_for_schain(skale, schain_name)
n = len(schain_nodes)
t = (2 * n + 1) // 3
node_id_dkg = -1
public_keys = [0] * n
- node_ids_contract = dict()
- node_ids_dkg = dict()
+ node_ids_contract = {}
+ node_ids_dkg = {}
for i, node in enumerate(schain_nodes):
if not len(node):
raise DkgError(f'sChain: {schain_name}: '
@@ -62,9 +71,15 @@ def init_dkg_client(node_id, schain_name, skale, sgx_eth_key_name, rotation_id):
node_ids_dkg[i] = node["id"]
public_keys[i] = node["publicKey"]
+ logger.info('Nodes in chain: %s', node_ids_dkg)
+
if node_id_dkg == -1:
raise DkgError(f'sChain: {schain_name}: {node_id} '
'Initialization failed, nodeID not found for schain.')
+
+ logger.info('Node index in group is %d. Node id on contracts - %d', node_id_dkg, node_id)
+
+ logger.info('Creating DKGClient')
dkg_client = DKGClient(
node_id_dkg, node_id, skale, t, n, schain_name,
public_keys, node_ids_dkg, node_ids_contract, sgx_eth_key_name, rotation_id
@@ -73,9 +88,7 @@ def init_dkg_client(node_id, schain_name, skale, sgx_eth_key_name, rotation_id):
return dkg_client
-def broadcast_and_check_data(dkg_client):
- logger.info(f'sChain {dkg_client.schain_name}: Sending broadcast')
-
+def receive_broadcast_data(dkg_client: DKGClient) -> BroadcastResult:
n = dkg_client.n
schain_name = dkg_client.schain_name
skale = dkg_client.skale
@@ -88,15 +101,11 @@ def broadcast_and_check_data(dkg_client):
start_time = skale.dkg.get_channel_started_time(dkg_client.group_index)
- try:
- broadcast(dkg_client)
- except SgxUnreachableError as e:
- logger.error(e)
- wait_for_fail(dkg_client.skale, schain_name, start_time)
-
dkg_filter = Filter(skale, schain_name, n)
broadcasts_found = []
+ logger.info('Fetching broadcasted data')
+
while False in is_received:
time_gone = get_latest_block_timestamp(dkg_client.skale) - start_time
if time_gone > dkg_client.dkg_timeout:
@@ -125,20 +134,26 @@ def broadcast_and_check_data(dkg_client):
except DkgVerificationError as e:
logger.error(e)
continue
- except SgxUnreachableError as e:
- logger.error(e)
- wait_for_fail(dkg_client.skale, schain_name, start_time)
-
logger.info(
f'sChain: {schain_name}. Received by {dkg_client.node_id_dkg} from '
f'{from_node}'
)
-
logger.info(f'sChain {schain_name}: total received {len(broadcasts_found)} broadcasts'
f' from nodes {broadcasts_found}')
- sleep(30)
+ sleep(BROADCAST_DATA_SEARCH_SLEEP)
+ return BroadcastResult(correct=is_correct, received=is_received)
+
- check_broadcasted_data(dkg_client, is_correct, is_received)
+def broadcast_and_check_data(dkg_client):
+ if not dkg_client.is_node_broadcasted():
+ logger.info('Sending broadcast')
+ dkg_client.broadcast()
+ else:
+ logger.info('Broadcast has been already sent')
+ dkg_client.last_completed_step = DKGStep.BROADCAST
+ broadcast_result = receive_broadcast_data(dkg_client)
+ check_broadcast_result(dkg_client, broadcast_result)
+ dkg_client.last_completed_step = DKGStep.BROADCAST_VERIFICATION
def generate_bls_keys(dkg_client):
@@ -162,6 +177,7 @@ def generate_bls_keys(dkg_client):
]
except Exception as err:
raise DKGKeyGenerationError(err)
+ dkg_client.last_completed_step = DKGStep.KEY_GENERATION
return {
'common_public_key': formated_common_public_key,
'public_key': dkg_client.public_key,
@@ -172,18 +188,22 @@ def generate_bls_keys(dkg_client):
}
-def broadcast(dkg_client):
- try:
- dkg_client.broadcast()
- except DkgTransactionError:
- pass
-
-
-def send_complaint(dkg_client, index, reason=""):
+def send_complaint(dkg_client: DKGClient, index: int, reason: ComplaintReason):
+ channel_started_time = dkg_client.skale.dkg.get_channel_started_time(dkg_client.group_index)
+ reason_to_missing = {
+ ComplaintReason.NO_ALRIGHT: 'alright',
+ ComplaintReason.NO_BROADCAST: 'broadcast',
+ ComplaintReason.NO_RESPONSE: 'response'
+ }
+ missing = reason_to_missing.get(reason, '')
try:
- channel_started_time = dkg_client.skale.dkg.get_channel_started_time(dkg_client.group_index)
- if dkg_client.send_complaint(index):
- wait_for_fail(dkg_client.skale, dkg_client.schain_name, channel_started_time, reason)
+ if dkg_client.send_complaint(index, reason=reason):
+ wait_for_fail(
+ dkg_client.skale,
+ dkg_client.schain_name,
+ channel_started_time,
+ missing
+ )
except DkgTransactionError:
pass
@@ -191,13 +211,13 @@ def send_complaint(dkg_client, index, reason=""):
def report_bad_data(dkg_client, index):
try:
channel_started_time = dkg_client.skale.dkg.get_channel_started_time(dkg_client.group_index)
- if dkg_client.send_complaint(index, True):
+ if dkg_client.send_complaint(index, reason=ComplaintReason.BAD_DATA):
wait_for_fail(dkg_client.skale, dkg_client.schain_name,
channel_started_time, "correct data")
logger.info(f'sChain {dkg_client.schain_name}:'
'Complainted node did not send a response.'
f'Sending complaint once again')
- dkg_client.send_complaint(index)
+ dkg_client.send_complaint(index, reason=ComplaintReason.NO_RESPONSE)
wait_for_fail(dkg_client.skale, dkg_client.schain_name,
channel_started_time, "response")
except DkgTransactionError:
@@ -213,19 +233,12 @@ def response(dkg_client, to_node_index):
logger.error(f'sChain {dkg_client.schain_name}:' + str(e))
-def send_alright(dkg_client):
- try:
- dkg_client.alright()
- except DkgTransactionError as e:
- logger.error(f'sChain {dkg_client.schain_name}:' + str(e))
-
-
-def check_broadcasted_data(dkg_client, is_correct, is_recieved):
+def check_broadcast_result(dkg_client, broadcast_result):
for i in range(dkg_client.n):
- if not is_recieved[i]:
- send_complaint(dkg_client, i, "broadcast")
+ if not broadcast_result.received[i]:
+ send_complaint(dkg_client, i, reason=ComplaintReason.NO_BROADCAST)
break
- if not is_correct[i]:
+ if not broadcast_result.correct[i]:
report_bad_data(dkg_client, i)
break
diff --git a/core/schains/exit_scheduler.py b/core/schains/exit_scheduler.py
new file mode 100644
index 000000000..a6bce071f
--- /dev/null
+++ b/core/schains/exit_scheduler.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE Admin
+#
+# Copyright (C) 2021-Present SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import os
+import json
+import logging
+import shutil
+
+from tools.configs import NODE_DATA_PATH
+
+
+logger = logging.getLogger(__name__)
+
+
+class ExitScheduleFileManager:
+ def __init__(self, schain_name: str) -> None:
+ self.schain_name = schain_name
+ self.path = os.path.join(NODE_DATA_PATH, 'schains', schain_name, 'rotation.json')
+
+ def exists(self) -> bool:
+ return os.path.isfile(self.path)
+
+ def rm(self) -> bool:
+ return os.remove(self.path)
+
+ @property
+ def exit_ts(self) -> int:
+ with open(self.path) as exit_schedule_file:
+ return json.load(exit_schedule_file)['timestamp']
+
+ @exit_ts.setter
+ def exit_ts(self, ts: int) -> None:
+ tmp_path = os.path.join(os.path.dirname(self.path), '.rotation.json.tmp')
+ with open(tmp_path, 'w') as filepath:
+ json.dump({'timestamp': ts}, filepath)
+ shutil.move(tmp_path, self.path)
diff --git a/core/schains/external_config.py b/core/schains/external_config.py
index 4975c8426..e8a7d54e9 100644
--- a/core/schains/external_config.py
+++ b/core/schains/external_config.py
@@ -13,12 +13,14 @@ class ExternalState:
chain_id: int
ranges: field(default_factory=list)
ima_linked: bool = False
+ reload_ts: Optional[int] = None
def to_dict(self):
return {
'chain_id': self.chain_id,
'ima_linked': self.ima_linked,
- 'ranges': list(map(list, self.ranges))
+ 'ranges': list(map(list, self.ranges)),
+ 'reload_ts': self.reload_ts
}
@@ -38,6 +40,10 @@ def ima_linked(self) -> bool:
def chain_id(self) -> Optional[int]:
return self.read().get('chain_id', None)
+ @property
+ def reload_ts(self) -> Optional[int]:
+ return self.read().get('reload_ts', None)
+
@property
def ranges(self) -> List[IpRange]:
plain_ranges = self.read().get('ranges', [])
@@ -49,8 +55,8 @@ def get(self) -> Optional[ExternalState]:
return ExternalState(
chain_id=plain['chain_id'],
ima_linked=plain['ima_linked'],
- ranges=list(sorted(map(lambda r: IpRange(*r), plain['ranges'])))
-
+ ranges=list(sorted(map(lambda r: IpRange(*r), plain['ranges']))),
+ reload_ts=plain.get('reload_ts')
)
return None
diff --git a/core/schains/ima.py b/core/schains/ima.py
index 6547510d7..8cace6162 100644
--- a/core/schains/ima.py
+++ b/core/schains/ima.py
@@ -37,7 +37,8 @@
IMA_ENDPOINT,
MAINNET_IMA_ABI_FILEPATH,
IMA_STATE_CONTAINER_PATH,
- IMA_TIME_FRAMING
+ IMA_TIME_FRAMING,
+ IMA_NETWORK_BROWSER_FILEPATH
)
from tools.configs.schains import SCHAINS_DIR_PATH
from tools.configs.web3 import ABI_FILEPATH
@@ -61,6 +62,7 @@ class ImaEnv:
schain_proxy_path: str
state_file: str
+ network_browser_data_path: str
schain_name: str
schain_rpc_url: str
@@ -110,7 +112,8 @@ def to_dict(self):
'CID_SCHAIN': self.cid_schain,
'MONITORING_PORT': self.monitoring_port,
'RPC_PORT': self.rpc_port,
- 'TIME_FRAMING': self.time_framing
+ 'TIME_FRAMING': self.time_framing,
+ 'IMA_NETWORK_BROWSER_DATA_PATH': self.network_browser_data_path
}
@@ -177,12 +180,13 @@ def get_ima_env(schain_name: str, mainnet_chain_id: int) -> ImaEnv:
cid_schain=schain_chain_id,
monitoring_port=node_info['imaMonitoringPort'],
rpc_port=get_ima_rpc_port(schain_name),
- time_framing=IMA_TIME_FRAMING
+ time_framing=IMA_TIME_FRAMING,
+ network_browser_data_path=IMA_NETWORK_BROWSER_FILEPATH
)
-def get_ima_version() -> str:
- return CONTAINERS_INFO['ima']['version']
+def get_ima_version_after_migration() -> str:
+ return CONTAINERS_INFO['ima'].get('new_version') or CONTAINERS_INFO['ima']['version']
def get_ima_monitoring_port(schain_name):
diff --git a/core/schains/monitor/action.py b/core/schains/monitor/action.py
index 34ce0e625..3a9438916 100644
--- a/core/schains/monitor/action.py
+++ b/core/schains/monitor/action.py
@@ -21,14 +21,20 @@
import time
from datetime import datetime
from functools import wraps
-from typing import Dict, Optional
+from typing import Dict, Optional, List
from skale import Skale
from core.node_config import NodeConfig
+from core.node import ExtendedManagerNodeInfo, calc_reload_ts, get_node_index_in_group
from core.schains.checks import ConfigChecks, SkaledChecks
-from core.schains.dkg import safe_run_dkg, save_dkg_results, DkgError
-from core.schains.dkg.utils import get_secret_key_share_filepath
+from core.schains.dkg import (
+ DkgError,
+ get_dkg_client,
+ get_secret_key_share_filepath,
+ run_dkg,
+ save_dkg_results
+)
from core.schains.ima import get_migration_ts as get_ima_migration_ts
from core.schains.cleaner import (
@@ -39,7 +45,7 @@
from core.schains.firewall.types import IRuleController
from core.schains.volume import init_data_volume
-from core.schains.rotation import set_rotation_for_schain
+from core.schains.exit_scheduler import ExitScheduleFileManager
from core.schains.limits import get_schain_type
@@ -62,7 +68,6 @@
from core.schains.config.helper import (
get_base_port_from_config,
get_node_ips_from_config,
- get_local_schain_http_endpoint_from_config,
get_own_ip_from_config
)
from core.schains.ima import ImaData
@@ -138,6 +143,7 @@ def __init__(
stream_version: str,
checks: ConfigChecks,
estate: ExternalState,
+ current_nodes: List[ExtendedManagerNodeInfo],
econfig: Optional[ExternalConfig] = None
):
self.skale = skale
@@ -146,6 +152,7 @@ def __init__(
self.node_config = node_config
self.checks = checks
self.stream_version = stream_version
+ self.current_nodes = current_nodes
self.rotation_data = rotation_data
self.rotation_id = rotation_data['rotation_id']
@@ -166,14 +173,23 @@ def config_dir(self) -> bool:
def dkg(self) -> bool:
initial_status = self.checks.dkg.status
if not initial_status:
- logger.info('Running safe_run_dkg')
- dkg_result = safe_run_dkg(
+ logger.info('Running run_dkg')
+ dkg_client = get_dkg_client(
+ skale=self.skale,
+ node_id=self.node_config.id,
+ schain_name=self.name,
+ sgx_key_name=self.node_config.sgx_key_name,
+ rotation_id=self.rotation_id
+ )
+ dkg_result = run_dkg(
+ dkg_client=dkg_client,
skale=self.skale,
schain_name=self.name,
node_id=self.node_config.id,
sgx_key_name=self.node_config.sgx_key_name,
rotation_id=self.rotation_id
)
+ logger.info('DKG finished with %s', dkg_result)
if dkg_result.status.is_done():
save_dkg_results(
dkg_result.keys_data,
@@ -208,7 +224,10 @@ def upstream_config(self) -> bool:
if not self.cfm.upstream_config_exists() or new_config != self.cfm.latest_upstream_config:
rotation_id = self.rotation_data['rotation_id']
logger.info(
- 'Saving new upstream config rotation_id: %d', rotation_id)
+ 'Saving new upstream config rotation_id: %d, ips: %s',
+ rotation_id,
+ self.current_nodes
+ )
self.cfm.save_new_upstream(rotation_id, new_config)
result = True
else:
@@ -232,6 +251,23 @@ def external_state(self) -> bool:
self.econfig.update(self.estate)
return True
+ @BaseActionManager.monitor_block
+ def update_reload_ts(self, ip_matched: bool) -> bool:
+ logger.info('Setting reload_ts')
+ if ip_matched:
+ logger.info('Resetting reload_ts')
+ self.estate.reload_ts = None
+ self.econfig.update(self.estate)
+ return True
+ node_index_in_group = get_node_index_in_group(self.skale, self.name, self.node_config.id)
+ if node_index_in_group is None:
+ logger.warning(f'node {self.node_config.id} is not in chain {self.name}')
+ return False
+ self.estate.reload_ts = calc_reload_ts(self.current_nodes, node_index_in_group)
+ logger.info(f'Setting reload_ts to {self.estate.reload_ts}')
+ self.econfig.update(self.estate)
+ return True
+
class SkaledActionManager(BaseActionManager):
def __init__(
@@ -256,6 +292,7 @@ def __init__(
schain_name=self.schain['name']
)
+ self.esfm = ExitScheduleFileManager(schain['name'])
self.dutils = dutils or DockerUtils()
super().__init__(name=schain['name'])
@@ -271,12 +308,12 @@ def volume(self) -> bool:
return initial_status
@BaseActionManager.monitor_block
- def firewall_rules(self) -> bool:
+ def firewall_rules(self, upstream: bool = False) -> bool:
initial_status = self.checks.firewall_rules.status
if not initial_status:
logger.info('Configuring firewall rules')
- conf = self.cfm.skaled_config
+ conf = self.cfm.latest_upstream_config if upstream else self.cfm.skaled_config
base_port = get_base_port_from_config(conf)
node_ips = get_node_ips_from_config(conf)
own_ip = get_own_ip_from_config(conf)
@@ -435,16 +472,19 @@ def update_config(self) -> bool:
return self.cfm.sync_skaled_config_with_upstream()
@BaseActionManager.monitor_block
- def send_exit_request(self) -> None:
- if self.skaled_status.exit_time_reached:
+ def schedule_skaled_exit(self, exit_ts: int) -> None:
+ if self.skaled_status.exit_time_reached or self.esfm.exists():
logger.info('Exit time has been already set')
return
- finish_ts = self.upstream_finish_ts
- logger.info('Trying to set skaled exit time %s', finish_ts)
- if finish_ts is not None:
- url = get_local_schain_http_endpoint_from_config(
- self.cfm.skaled_config)
- set_rotation_for_schain(url, finish_ts)
+ if exit_ts is not None:
+ logger.info('Scheduling skaled exit time %d', exit_ts)
+ self.esfm.exit_ts = exit_ts
+
+ @BaseActionManager.monitor_block
+ def reset_exit_schedule(self) -> None:
+ logger.info('Reseting exit schedule')
+ if self.esfm.exists():
+ self.esfm.rm()
@BaseActionManager.monitor_block
def disable_backup_run(self) -> None:
diff --git a/core/schains/monitor/config_monitor.py b/core/schains/monitor/config_monitor.py
index a5d8be3af..3bd285ae3 100644
--- a/core/schains/monitor/config_monitor.py
+++ b/core/schains/monitor/config_monitor.py
@@ -63,4 +63,5 @@ def execute(self) -> None:
self.am.external_state()
if not self.checks.upstream_config:
self.am.upstream_config()
+ self.am.update_reload_ts(self.checks.skaled_node_ips)
self.am.reset_config_record()
diff --git a/core/schains/monitor/main.py b/core/schains/monitor/main.py
index 78ab26ba4..417c56e96 100644
--- a/core/schains/monitor/main.py
+++ b/core/schains/monitor/main.py
@@ -47,7 +47,9 @@
from core.schains.monitor.action import ConfigActionManager, SkaledActionManager
from core.schains.external_config import ExternalConfig, ExternalState
from core.schains.task import keep_tasks_running, Task
+from core.schains.config.static_params import get_automatic_repair_option
from core.schains.skaled_status import get_skaled_status
+from core.node import get_current_nodes
from tools.docker_utils import DockerUtils
from tools.configs.ima import DISABLE_IMA
from tools.notifications.messages import notify_checks
@@ -76,6 +78,7 @@ def run_config_pipeline(
rotation_data = skale.node_rotation.get_rotation(name)
allowed_ranges = get_sync_agent_ranges(skale)
ima_linked = not DISABLE_IMA and skale_ima.linker.has_schain(name)
+ current_nodes = get_current_nodes(skale, name)
estate = ExternalState(
ima_linked=ima_linked,
@@ -89,6 +92,7 @@ def run_config_pipeline(
schain_record=schain_record,
stream_version=stream_version,
rotation_id=rotation_data['rotation_id'],
+ current_nodes=current_nodes,
econfig=econfig,
estate=estate
)
@@ -100,6 +104,7 @@ def run_config_pipeline(
rotation_data=rotation_data,
stream_version=stream_version,
checks=config_checks,
+ current_nodes=current_nodes,
estate=estate,
econfig=econfig
)
@@ -140,6 +145,7 @@ def run_skaled_pipeline(
dutils=dutils
)
status = skaled_checks.get_all(log=False)
+ automatic_repair = get_automatic_repair_option()
api_status = get_api_checks_status(
status=status, allowed=TG_ALLOWED_CHECKS)
notify_checks(name, node_config.all(), api_status)
@@ -151,7 +157,8 @@ def run_skaled_pipeline(
action_manager=skaled_am,
status=status,
schain_record=schain_record,
- skaled_status=skaled_status
+ skaled_status=skaled_status,
+ automatic_repair=automatic_repair
)
mon(skaled_am, skaled_checks).run()
diff --git a/core/schains/monitor/skaled_monitor.py b/core/schains/monitor/skaled_monitor.py
index 63e3187cf..fe94ca4cc 100644
--- a/core/schains/monitor/skaled_monitor.py
+++ b/core/schains/monitor/skaled_monitor.py
@@ -147,12 +147,13 @@ def execute(self) -> None:
self.am.firewall_rules()
if not self.checks.volume:
self.am.volume()
+ self.am.reset_exit_schedule()
self.am.recreated_schain_containers(abort_on_exit=False)
-class NewConfigSkaledMonitor(BaseSkaledMonitor):
+class ReloadGroupSkaledMonitor(BaseSkaledMonitor):
"""
- When config is outdated request setExitTime with latest finish_ts from config
+ When config is outdated set exit time to the latest finish_ts from schain config
"""
def execute(self):
@@ -168,7 +169,28 @@ def execute(self):
self.am.skaled_rpc()
if not self.checks.ima_container:
self.am.ima_container()
- self.am.send_exit_request()
+ self.am.schedule_skaled_exit(self.am.upstream_finish_ts)
+
+
+class ReloadIpSkaledMonitor(BaseSkaledMonitor):
+ """
+ When config is outdated set exit time to reload_ts from external config
+ """
+
+ def execute(self):
+ if not self.checks.firewall_rules:
+ self.am.firewall_rules(upstream=True)
+ if not self.checks.volume:
+ self.am.volume()
+ if not self.checks.skaled_container:
+ self.am.skaled_container()
+ else:
+ self.am.reset_restart_counter()
+ if not self.checks.rpc:
+ self.am.skaled_rpc()
+ if not self.checks.ima_container:
+ self.am.ima_container()
+ self.am.schedule_skaled_exit(self.am.econfig.reload_ts)
class NoConfigSkaledMonitor(BaseSkaledMonitor):
@@ -214,21 +236,28 @@ def is_backup_mode(schain_record: SChainRecord) -> bool:
def is_repair_mode(
schain_record: SChainRecord,
status: Dict,
- skaled_status: Optional[SkaledStatus]
+ skaled_status: Optional[SkaledStatus],
+ automatic_repair: bool
) -> bool:
- return schain_record.repair_mode or is_skaled_repair_status(status, skaled_status)
+ if schain_record.repair_mode:
+ return True
+ else:
+ return automatic_repair and is_skaled_repair_status(status, skaled_status)
-def is_new_config_mode(
- status: Dict,
- finish_ts: Optional[int]
-) -> bool:
+def is_reload_group_mode(status: Dict, finish_ts: Optional[int]) -> bool:
ts = int(time.time())
if finish_ts is None:
return False
return finish_ts > ts and status['config'] and not status['config_updated']
+def is_reload_ip_mode(status: Dict, reload_ts: Optional[int]) -> bool:
+ if reload_ts is None:
+ return False
+ return status['config'] and not status['config_updated']
+
+
def is_config_update_time(
status: Dict,
skaled_status: Optional[SkaledStatus]
@@ -266,7 +295,8 @@ def get_skaled_monitor(
action_manager: SkaledActionManager,
status: Dict,
schain_record: SChainRecord,
- skaled_status: SkaledStatus
+ skaled_status: SkaledStatus,
+ automatic_repair: bool = True
) -> Type[BaseSkaledMonitor]:
logger.info('Choosing skaled monitor')
if skaled_status:
@@ -277,7 +307,7 @@ def get_skaled_monitor(
mon_type = NoConfigSkaledMonitor
elif is_backup_mode(schain_record):
mon_type = BackupSkaledMonitor
- elif is_repair_mode(schain_record, status, skaled_status):
+ elif is_repair_mode(schain_record, status, skaled_status, automatic_repair):
mon_type = RepairSkaledMonitor
elif is_recreate_mode(schain_record):
mon_type = RecreateSkaledMonitor
@@ -285,7 +315,8 @@ def get_skaled_monitor(
mon_type = NewNodeSkaledMonitor
elif is_config_update_time(status, skaled_status):
mon_type = UpdateConfigSkaledMonitor
- elif is_new_config_mode(status, action_manager.upstream_finish_ts):
- mon_type = NewConfigSkaledMonitor
-
+ elif is_reload_group_mode(status, action_manager.upstream_finish_ts):
+ mon_type = ReloadGroupSkaledMonitor
+ elif is_reload_ip_mode(status, action_manager.econfig.reload_ts):
+ mon_type = ReloadIpSkaledMonitor
return mon_type
diff --git a/core/schains/rotation.py b/core/schains/rotation.py
deleted file mode 100644
index f5b352ac5..000000000
--- a/core/schains/rotation.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of SKALE Admin
-#
-# Copyright (C) 2021-Present SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import json
-import logging
-import requests
-
-
-logger = logging.getLogger(__name__)
-
-
-class ExitRequestError(Exception):
- pass
-
-
-def set_rotation_for_schain(url: str, timestamp: int) -> None:
- _send_rotation_request(url, timestamp)
-
-
-def _send_rotation_request(url, timestamp):
- logger.info(f'Sending rotation request: {timestamp}')
- headers = {'content-type': 'application/json'}
- data = {
- 'finishTime': timestamp
- }
- call_data = {
- "id": 0,
- "jsonrpc": "2.0",
- "method": "setSchainExitTime",
- "params": data,
- }
- response = requests.post(
- url=url,
- data=json.dumps(call_data),
- headers=headers,
- ).json()
- if response.get('error'):
- raise ExitRequestError(response['error']['message'])
-
-
-def get_schain_public_key(skale, schain_name):
- group_idx = skale.schains.name_to_id(schain_name)
- raw_public_key = skale.key_storage.get_previous_public_key(group_idx)
- public_key_array = [*raw_public_key[0], *raw_public_key[1]]
- if public_key_array == ['0', '0', '1', '0']: # zero public key
- raw_public_key = skale.key_storage.get_common_public_key(group_idx)
- public_key_array = [*raw_public_key[0], *raw_public_key[1]]
- return ':'.join(map(str, public_key_array))
diff --git a/hardhat-node b/hardhat-node
index a7cfb2977..8a4b03fd1 160000
--- a/hardhat-node
+++ b/hardhat-node
@@ -1 +1 @@
-Subproject commit a7cfb29778c90553cc7ae6fa42b7dd4df0fe6519
+Subproject commit 8a4b03fd1051960a3e0182280bf4bfdc43129997
diff --git a/helper-scripts b/helper-scripts
index 45d533ea5..18e08673f 160000
--- a/helper-scripts
+++ b/helper-scripts
@@ -1 +1 @@
-Subproject commit 45d533ea5d3895ce79ebc275fa1cbeab11fe5036
+Subproject commit 18e08673f1999170777df9b4e648c65fd1a6326d
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 6510ec576..3b706ce51 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -2,7 +2,7 @@ pytest==7.1.3
flake8==5.0.4
freezegun==0.3.15
mock==4.0.2
-blinker==1.4
+blinker==1.6.2
pytest-cov==2.9.0
codecov==2.1.13
diff --git a/requirements.txt b/requirements.txt
index 68a4bd4ed..fc90bc453 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,18 +1,16 @@
peewee==3.9.5
-Flask==2.2.5
-Werkzeug==2.2.3
+Flask==2.3.3
+Werkzeug==2.3.7
gunicorn==20.1.0
-Jinja2==3.0.3
+Jinja2==3.1.2
docker==6.1.3
-simple-crypt==4.1.7
-pycryptodome==3.12.0
-python-iptables==1.0.0
+python-iptables==1.0.1
-skale.py==6.0dev5
+skale.py==6.2b0
-ima-predeployed==2.0.0b0
+ima-predeployed==2.1.0b0
etherbase-predeployed==1.1.0b3
marionette-predeployed==2.0.0b2
config-controller-predeployed==1.0.1.dev2
@@ -24,12 +22,11 @@ context-predeployed==1.0.0.dev3
psutil==5.9.3
colorful==0.5.4
-celery==5.2.2
+celery==5.3.4
filelock==3.0.12
pyOpenSSL==23.1.1
-cryptography==39.0.1
-python-dateutil==2.8.1
+python-dateutil==2.8.2
python-telegram-bot==12.8
sh==1.14.1
diff --git a/tests.Dockerfile b/tests.Dockerfile
index 0909ef719..b31db00ee 100644
--- a/tests.Dockerfile
+++ b/tests.Dockerfile
@@ -1,7 +1,3 @@
FROM admin:base
RUN pip3 install --no-cache-dir -r requirements-dev.txt
-
-RUN pip3 uninstall pycrypto -y
-RUN pip3 uninstall pycryptodome -y
-RUN pip3 install pycryptodome
diff --git a/tests/conftest.py b/tests/conftest.py
index f6fa67fc8..41685e421 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -20,6 +20,7 @@
from skale.utils.contracts_provision.main import (
add_test_permissions,
add_test2_schain_type,
+ add_test4_schain_type,
cleanup_nodes,
cleanup_nodes_schains,
create_nodes,
@@ -30,6 +31,7 @@
from skale.utils.web3_utils import init_web3
from core.ima.schain import update_predeployed_ima
+from core.node import get_current_nodes
from core.node_config import NodeConfig
from core.schains.checks import SChainChecks
from core.schains.config.helper import (
@@ -61,6 +63,7 @@
ETH_AMOUNT_PER_NODE,
ETH_PRIVATE_KEY,
generate_cert,
+ generate_schain_config,
get_test_rule_controller,
init_skale_from_wallet,
init_skale_ima,
@@ -111,6 +114,7 @@ def skale(web3):
skale_obj = init_skale_from_wallet(wallet)
add_test_permissions(skale_obj)
add_test2_schain_type(skale_obj)
+ add_test4_schain_type(skale_obj)
if skale_obj.constants_holder.get_launch_timestamp() != 0:
skale_obj.constants_holder.set_launch_timestamp(0)
deploy_fake_multisig_contract(skale_obj.web3, skale_obj.wallet)
@@ -149,6 +153,7 @@ def node_skales(skale, node_wallets):
@pytest.fixture
def nodes(skale, node_skales, validator):
+ cleanup_nodes(skale, skale.nodes.get_active_node_ids())
link_nodes_to_validator(skale, validator, node_skales)
ids = create_nodes(node_skales)
try:
@@ -213,183 +218,6 @@ def get_skaled_status_dict(
}
-def generate_schain_config(schain_name):
- return {
- "sealEngine": "Ethash",
- "params": {
- "accountStartNonce": "0x00",
- "homesteadForkBlock": "0x0",
- "daoHardforkBlock": "0x0",
- "EIP150ForkBlock": "0x00",
- "EIP158ForkBlock": "0x00",
- "byzantiumForkBlock": "0x0",
- "constantinopleForkBlock": "0x0",
- "networkID": "12313219",
- "chainID": "0x01",
- "maximumExtraDataSize": "0x20",
- "tieBreakingGas": False,
- "minGasLimit": "0xFFFFFFF",
- "maxGasLimit": "7fffffffffffffff",
- "gasLimitBoundDivisor": "0x0400",
- "minimumDifficulty": "0x020000",
- "difficultyBoundDivisor": "0x0800",
- "durationLimit": "0x0d",
- "blockReward": "0x4563918244F40000",
- "skaleDisableChainIdCheck": True
- },
- "genesis": {
- "nonce": "0x0000000000000042",
- "difficulty": "0x020000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "author": "0x0000000000000000000000000000000000000000",
- "timestamp": "0x00",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
- "gasLimit": "0xFFFFFFF"
- },
- "accounts": {
- },
- "skaleConfig": {
- "nodeInfo": {
- "nodeID": 0,
- "nodeName": "test-node1",
- "basePort": 10000,
- "httpRpcPort": 10003,
- "httpsRpcPort": 10008,
- "wsRpcPort": 10002,
- "wssRpcPort": 10007,
- "infoHttpRpcPort": 10008,
- "bindIP": "0.0.0.0",
- "ecdsaKeyName": "NEK:518",
- "imaMonitoringPort": 10006,
- "wallets": {
- "ima": {
- "keyShareName": "bls_key:schain_id:33333333333333333333333333333333333333333333333333333333333333333333333333333:node_id:0:dkg_id:0", # noqa
- "t": 11,
- "n": 16,
- "certfile": "sgx.crt",
- "keyfile": "sgx.key",
- "commonBlsPublicKey0": "11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "commonBlsPublicKey1": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "commonBlsPublicKey2": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "commonBlsPublicKey3": "11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "blsPublicKey0": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "blsPublicKey1": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "blsPublicKey2": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "blsPublicKey3": "11111111111111111111111111111111111111111111111111111111111111111111111111111" # noqa
- }
- },
- },
- "sChain": {
- "schainID": 1,
- "schainName": schain_name,
- "schainOwner": "0x3483A10F7d6fDeE0b0C1E9ad39cbCE13BD094b12",
-
-
- "nodeGroups": {
- "1": {
- "rotation": None,
- "nodes": {
- "2": [
- 0,
- 2,
- "0xc21d242070e84fe5f8e80f14b8867856b714cf7d1984eaa9eb3f83c2a0a0e291b9b05754d071fbe89a91d4811b9b182d350f706dea6e91205905b86b4764ef9a" # noqa
- ],
- "5": [
- 1,
- 5,
- "0xc37b6db727683379d305a4e38532ddeb58c014ebb151662635839edf3f20042bcdaa8e4b1938e8304512c730671aedf310da76315e329be0814709279a45222a" # noqa
- ],
- "4": [
- 2,
- 4,
- "0x8b335f65ecf0845d93bc65a340cc2f4b8c49896f5023ecdff7db6f04bc39f9044239f541702ca7ad98c97aa6a7807aa7c41e394262cca0a32847e3c7c187baf5" # noqa
- ],
- "3": [
- 3,
- 3,
- "0xf3496966c7fd4a82967d32809267abec49bf5c4cc6d88737cee9b1a436366324d4847127a1220575f4ea6a7661723cd5861c9f8de221405b260511b998a0bbc8" # noqa
- ]
- },
- "finish_ts": None,
- "bls_public_key": {
- "blsPublicKey0": "8609115311055863404517113391175862520685049234001839865086978176708009850942", # noqa
- "blsPublicKey1": "12596903066793884087763787291339131389612748572700005223043813683790087081", # noqa
- "blsPublicKey2": "20949401227653007081557504259342598891084201308661070577835940778932311075846", # noqa
- "blsPublicKey3": "5476329286206272760147989277520100256618500160343291262709092037265666120930" # noqa
- }
- },
- "0": {
- "rotation": {
- "leaving_node_id": 1,
- "new_node_id": 5
- },
- "nodes": {
- "2": [
- 0,
- 2,
- "0xc21d242070e84fe5f8e80f14b8867856b714cf7d1984eaa9eb3f83c2a0a0e291b9b05754d071fbe89a91d4811b9b182d350f706dea6e91205905b86b4764ef9a" # noqa
- ],
- "4": [
- 2,
- 4,
- "0x8b335f65ecf0845d93bc65a340cc2f4b8c49896f5023ecdff7db6f04bc39f9044239f541702ca7ad98c97aa6a7807aa7c41e394262cca0a32847e3c7c187baf5" # noqa
- ],
- "3": [
- 3,
- 3,
- "0xf3496966c7fd4a82967d32809267abec49bf5c4cc6d88737cee9b1a436366324d4847127a1220575f4ea6a7661723cd5861c9f8de221405b260511b998a0bbc8" # noqa
- ],
- "1": [
- 1,
- 1,
- "0x1a857aa4a982ba242c2386febf1eb72dcd1f9669b4237a17878eb836086618af6cda473afa2dfb37c0d2786887397d39bec9601234d933d4384fe38a39b399df" # noqa
- ]
- },
- "finish_ts": 1687180291,
- "bls_public_key": {
- "blsPublicKey0": "12452613198400495171048259986807077228209876295033433688114313813034253740478", # noqa
- "blsPublicKey1": "10490413552821776191285904316985887024952448646239144269897585941191848882433", # noqa
- "blsPublicKey2": "892041650350974543318836112385472656918171041007469041098688469382831828315", # noqa
- "blsPublicKey3": "14699659615059580586774988732364564692366017113631037780839594032948908579205" # noqa
- }
- }
- },
- "nodes": [
- {
- "nodeID": 0,
- "nodeName": "test-node0",
- "basePort": 10000,
- "httpRpcPort": 100003,
- "httpsRpcPort": 10008,
- "wsRpcPort": 10002,
- "wssRpcPort": 10007,
- "infoHttpRpcPort": 10008,
- "schainIndex": 1,
- "ip": "127.0.0.1",
- "owner": "0x41",
- "publicIP": "127.0.0.1"
- },
- {
- "nodeID": 1,
- "nodeName": "test-node1",
- "basePort": 10010,
- "httpRpcPort": 10013,
- "httpsRpcPort": 10017,
- "wsRpcPort": 10012,
- "wssRpcPort": 10018,
- "infoHttpRpcPort": 10019,
- "schainIndex": 1,
- "ip": "127.0.0.2",
- "owner": "0x42",
- "publicIP": "127.0.0.2"
- }
- ]
- }
- }
- }
-
-
SECRET_KEY = {
"common_public_key": [
11111111111111111111111111111111111111111111111111111111111111111111111111111,
@@ -672,7 +500,7 @@ def node_config(skale, nodes):
@pytest.fixture
-def schain_checks(schain_config, schain_db, rule_controller, estate, dutils):
+def schain_checks(schain_config, schain_db, current_nodes, rule_controller, estate, dutils):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
schain_record = SChainRecord.get_by_name(schain_name)
node_id = schain_config['skaleConfig']['sChain']['nodes'][0]['nodeID']
@@ -682,6 +510,7 @@ def schain_checks(schain_config, schain_db, rule_controller, estate, dutils):
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -763,6 +592,12 @@ def econfig(schain_db, estate):
return ec
+@pytest.fixture
+def current_nodes(skale, schain_db, schain_on_contracts):
+ name = schain_db
+ return get_current_nodes(skale, name)
+
+
@pytest.fixture
def upstreams(schain_db, schain_config):
name = schain_db
diff --git a/tests/dkg_test/main_test.py b/tests/dkg_test/main_test.py
index a02543b27..68b963a12 100644
--- a/tests/dkg_test/main_test.py
+++ b/tests/dkg_test/main_test.py
@@ -1,11 +1,14 @@
"""
Test for dkg procedure using SGX keys
"""
+import functools
import logging
import os
import subprocess
import time
-from concurrent.futures import ThreadPoolExecutor as Executor
+from concurrent.futures import Future, ThreadPoolExecutor as Executor
+from contextlib import contextmanager
+from enum import Enum
import mock
import pytest
@@ -15,7 +18,8 @@
from skale.wallets import SgxWallet
from skale.utils.contracts_provision import DEFAULT_DOMAIN_NAME
-from core.schains.dkg.main import get_dkg_client, is_last_dkg_finished, safe_run_dkg
+from core.schains.dkg.main import get_dkg_client, is_last_dkg_finished, run_dkg
+from core.schains.dkg.structures import DKGStatus, DKGStep
from core.schains.config import init_schain_config_dir
from tests.dkg_test import N_OF_NODES, TEST_ETH_AMOUNT, TYPE_OF_NODES
@@ -23,7 +27,8 @@
generate_random_node_data,
generate_random_schain_data,
init_skale_from_wallet,
- init_web3_skale
+ set_automine,
+ set_interval_mining
)
from tools.configs import SGX_SERVER_URL, SGX_CERTIFICATES_FOLDER
from tools.configs.schains import SCHAINS_DIR_PATH
@@ -34,6 +39,8 @@
MAX_WORKERS = 5
TEST_SRW_FUND_VALUE = 3000000000000000000
+DKG_TIMEOUT = 20000
+DKG_TIMEOUT_FOR_FAILURE = 120 # to speed up failed broadcast/alright test
log_format = '[%(asctime)s][%(levelname)s] - %(threadName)s - %(name)s:%(lineno)d - %(message)s' # noqa
@@ -44,6 +51,13 @@ class DkgTestError(Exception):
pass
+class DKGRunType(int, Enum):
+ NORMAL = 0
+ BROADCAST_FAILED = 1
+ ALRIGHT_FAILED = 2
+ COMPLAINT_FAILED = 3
+
+
def generate_sgx_wallets(skale, n_of_keys):
logger.info(f'Generating {n_of_keys} test wallets')
return [
@@ -67,8 +81,7 @@ def link_node_address(skale, wallet):
skale.wallet = main_wallet
skale.validator_service.link_node_address(
node_address=wallet.address,
- signature=signature,
- wait_for=True
+ signature=signature
)
@@ -94,8 +107,7 @@ def register_node(skale):
port=port,
name=name,
public_ip=public_ip,
- domain_name=DEFAULT_DOMAIN_NAME,
- wait_for=True
+ domain_name=DEFAULT_DOMAIN_NAME
)
node_id = skale.nodes.node_name_to_index(name)
logger.info(f'Registered node {name}, ID: {node_id}')
@@ -114,35 +126,106 @@ def register_nodes(skale_instances):
return nodes
-def run_dkg_all(skale, skale_sgx_instances, schain_name, nodes):
- futures = []
- nodes.sort(key=lambda x: x['node_id'])
+def exec_dkg_runners(runners: list[Future]):
with Executor(max_workers=MAX_WORKERS) as executor:
- for i, (node_skale, node_data) in \
- enumerate(zip(skale_sgx_instances, nodes)):
- futures.append(executor.submit(
- run_node_dkg,
- node_skale, schain_name, i, node_data['node_id']
- ))
+ futures = [
+ executor.submit(runner)
+ for runner in runners
+ ]
+ return [f.result() for f in futures]
- return [f.result() for f in futures]
+def get_dkg_runners(skale, skale_sgx_instances, schain_name, nodes):
+ runners = []
+ for i, (node_skale, node_data) in \
+ enumerate(zip(skale_sgx_instances, nodes)):
+ runners.append(functools.partial(
+ run_node_dkg,
+ node_skale,
+ schain_name,
+ i,
+ node_data['node_id']
+ ))
+
+ return runners
+
+
+@contextmanager
+def dkg_test_client(
+ skale: Skale,
+ node_id: int,
+ schain_name: str,
+ sgx_key_name: str,
+ rotation_id: int,
+ run_type: DKGRunType = DKGRunType.NORMAL
+):
+ dkg_client = get_dkg_client(node_id, schain_name, skale, sgx_key_name, rotation_id)
+ method, original = None, None
+ if run_type == DKGRunType.BROADCAST_FAILED:
+ effect = DkgError('Broadcast failed on purpose')
+ method, original = 'broadcast', dkg_client.skale.dkg.broadcast
+ dkg_client.skale.dkg.broadcast = mock.Mock(side_effect=effect)
+ elif run_type == DKGRunType.ALRIGHT_FAILED:
+ effect = DkgError('Alright failed on purpose')
+ method, original = 'alright', dkg_client.skale.dkg.alright
+ dkg_client.skale.dkg.alright = mock.Mock(side_effect=effect)
+ elif run_type == DKGRunType.COMPLAINT_FAILED:
+ effect = DkgError('Complaint failed on purpose')
+ method, original = 'complaint', dkg_client.skale.dkg.complaint
+ dkg_client.skale.dkg.complaint = mock.Mock(side_effect=effect)
+
+ try:
+ yield dkg_client
+ finally:
+ if method:
+ setattr(dkg_client.skale.dkg, method, original)
+
+
+def run_node_dkg(
+ skale: Skale,
+ schain_name: str,
+ index: int,
+ node_id: int,
+ runs: tuple[DKGRunType] = (DKGRunType.NORMAL,)
+):
+ init_schain_config_dir(schain_name)
+ sgx_key_name = skale.wallet._key_name
+ rotation_id = skale.schains.get_last_rotation_id(schain_name)
-def run_node_dkg(skale, schain_name, index, node_id):
timeout = index * 5 # diversify start time for all nodes
- logger.info(f'Node {node_id} going to sleep {timeout} seconds')
+ logger.info('Node %d going to sleep %d seconds %s', node_id, timeout, type(runs))
time.sleep(timeout)
- sgx_key_name = skale.wallet._key_name
-
- init_schain_config_dir(schain_name)
- rotation_id = skale.schains.get_last_rotation_id(schain_name)
- dkg_result = safe_run_dkg(
- skale,
- schain_name,
- node_id,
- sgx_key_name,
- rotation_id
- )
+ logger.info('Starting runs %s, %d', runs, len(runs))
+ dkg_result = None
+ for run_type in runs:
+ logger.info('Running %s dkg', run_type)
+ with dkg_test_client(
+ skale,
+ node_id,
+ schain_name,
+ sgx_key_name,
+ rotation_id,
+ run_type
+ ) as dkg_client:
+ logger.info('ID skale %d', id(dkg_client.skale))
+ try:
+ dkg_result = run_dkg(
+ skale,
+ dkg_client,
+ schain_name,
+ node_id,
+ sgx_key_name,
+ rotation_id
+ )
+ except Exception:
+ logger.exception('DKG run failed')
+ else:
+ if dkg_result.status == DKGStatus.DONE:
+ logger.info('DKG completed')
+ break
+ logger.info('Finished run %s', run_type)
+
+ logger.info('Completed runs %s', runs)
return dkg_result
@@ -157,7 +240,6 @@ def create_schain(skale: Skale, name: str, lifetime_seconds: int) -> None:
TYPE_OF_NODES,
0,
name,
- wait_for=True,
value=TEST_SRW_FUND_VALUE
)
@@ -170,21 +252,17 @@ def cleanup_schain_config(schain_name: str) -> None:
def remove_schain(skale, schain_name):
print('Cleanup nodes and schains')
if schain_name is not None:
- skale.manager.delete_schain(schain_name, wait_for=True)
+ skale.manager.delete_schain(schain_name)
def remove_nodes(skale, nodes):
for node_id in nodes:
skale.nodes.init_exit(node_id)
- skale.manager.node_exit(node_id, wait_for=True)
+ skale.manager.node_exit(node_id)
class TestDKG:
- @pytest.fixture(scope='class')
- def skale(self):
- return init_web3_skale()
-
- @pytest.fixture(scope='class')
+ @pytest.fixture
def schain_creation_data(self):
_, lifetime_seconds, name = generate_random_schain_data()
return name, lifetime_seconds
@@ -213,7 +291,7 @@ def other_maintenance(self, skale):
skale.nodes.remove_node_from_in_maintenance(nid)
@pytest.fixture(scope='class')
- def nodes(self, skale, skale_sgx_instances, other_maintenance):
+ def nodes(self, skale, validator, skale_sgx_instances, other_maintenance):
nodes = register_nodes(skale_sgx_instances)
try:
yield nodes
@@ -221,7 +299,27 @@ def nodes(self, skale, skale_sgx_instances, other_maintenance):
nids = [node['node_id'] for node in nodes]
remove_nodes(skale, nids)
- @pytest.fixture(scope='class')
+ @pytest.fixture
+ def dkg_timeout(self, skale):
+ skale.constants_holder.set_complaint_timelimit(DKG_TIMEOUT)
+
+ @pytest.fixture
+ def dkg_timeout_small(self, skale):
+ skale.constants_holder.set_complaint_timelimit(DKG_TIMEOUT_FOR_FAILURE)
+
+ @pytest.fixture
+ def interval_mining(self, skale):
+ set_interval_mining(skale.web3, interval=1)
+
+ @pytest.fixture
+ def no_automine(self, skale):
+ try:
+ set_automine(skale.web3, False)
+ yield
+ finally:
+ set_automine(skale.web3, True)
+
+ @pytest.fixture
def schain(self, schain_creation_data, skale, nodes):
schain_name, lifetime = schain_creation_data
create_schain(skale, schain_name, lifetime)
@@ -231,27 +329,31 @@ def schain(self, schain_creation_data, skale, nodes):
remove_schain(skale, schain_name)
cleanup_schain_config(schain_name)
- def test_dkg_procedure(
+ def test_dkg_procedure_normal(
self,
skale,
schain_creation_data,
skale_sgx_instances,
nodes,
+ dkg_timeout,
schain
):
schain_name, _ = schain_creation_data
assert not is_last_dkg_finished(skale, schain_name)
- results = run_dkg_all(
+ nodes.sort(key=lambda x: x['node_id'])
+ runners = get_dkg_runners(
skale,
skale_sgx_instances,
schain_name,
nodes
)
+ results = exec_dkg_runners(runners)
assert len(results) == N_OF_NODES
assert is_last_dkg_finished(skale, schain_name)
for node_data, result in zip(nodes, results):
assert result.status.is_done()
+ assert result.step == DKGStep.KEY_GENERATION
keys_data = result.keys_data
assert keys_data is not None
gid = skale.schains.name_to_id(schain_name)
@@ -260,14 +362,17 @@ def test_dkg_procedure(
regular_dkg_keys_data = sorted(
[r.keys_data for r in results], key=lambda d: d['n']
)
-
+ time.sleep(3)
# Rerun dkg to emulate restoring keys
- results = run_dkg_all(
+
+ nodes.sort(key=lambda x: x['node_id'])
+ runners = get_dkg_runners(
skale,
skale_sgx_instances,
schain_name,
nodes
)
+ results = exec_dkg_runners(runners)
assert all([r.status.is_done() for r in results])
assert is_last_dkg_finished(skale, schain_name)
@@ -276,6 +381,218 @@ def test_dkg_procedure(
)
assert regular_dkg_keys_data == restore_dkg_keys_data
+ def test_dkg_procedure_broadcast_failed_completely(
+ self,
+ skale,
+ schain_creation_data,
+ skale_sgx_instances,
+ nodes,
+ dkg_timeout_small,
+ interval_mining,
+ no_automine,
+ schain
+ ):
+ schain_name, _ = schain_creation_data
+ assert not is_last_dkg_finished(skale, schain_name)
+ nodes.sort(key=lambda x: x['node_id'])
+ runners = get_dkg_runners(
+ skale,
+ skale_sgx_instances,
+ schain_name,
+ nodes
+ )
+
+ runners[0] = functools.partial(
+ run_node_dkg,
+ skale_sgx_instances[0],
+ schain_name,
+ 0,
+ nodes[0]['node_id'],
+ runs=(DKGRunType.BROADCAST_FAILED,)
+ )
+ results = exec_dkg_runners(runners)
+ assert len(results) == N_OF_NODES
+ gid = skale.schains.name_to_id(schain_name)
+
+ for i, (node_data, result) in enumerate(zip(nodes, results)):
+ assert result.status == DKGStatus.FAILED
+ if i == 0:
+ assert result.step == DKGStep.NONE
+ else:
+ assert result.step == DKGStep.COMPLAINT_NO_BROADCAST
+ assert result.keys_data is None
+ assert not skale.dkg.is_last_dkg_successful(gid)
+ assert not is_last_dkg_finished(skale, schain_name)
+
+ def test_dkg_procedure_broadcast_failed_once(
+ self,
+ skale,
+ schain_creation_data,
+ skale_sgx_instances,
+ nodes,
+ schain
+ ):
+ schain_name, _ = schain_creation_data
+ assert not is_last_dkg_finished(skale, schain_name)
+ nodes.sort(key=lambda x: x['node_id'])
+ runners = get_dkg_runners(
+ skale,
+ skale_sgx_instances,
+ schain_name,
+ nodes
+ )
+
+ runners[0] = functools.partial(
+ run_node_dkg,
+ skale_sgx_instances[0],
+ schain_name,
+ 0,
+ nodes[0]['node_id'],
+ runs=(DKGRunType.BROADCAST_FAILED, DKGRunType.NORMAL)
+ )
+ results = exec_dkg_runners(runners)
+ assert len(results) == N_OF_NODES
+ gid = skale.schains.name_to_id(schain_name)
+
+ for i, (node_data, result) in enumerate(zip(nodes, results)):
+ assert result.status == DKGStatus.DONE
+ assert result.step == DKGStep.KEY_GENERATION
+ assert result.keys_data is not None
+ assert skale.dkg.is_last_dkg_successful(gid)
+ assert is_last_dkg_finished(skale, schain_name)
+
+ def test_dkg_procedure_alright_failed_completely(
+ self,
+ skale,
+ schain_creation_data,
+ skale_sgx_instances,
+ nodes,
+ dkg_timeout_small,
+ no_automine,
+ interval_mining,
+ schain
+ ):
+ schain_name, _ = schain_creation_data
+ assert not is_last_dkg_finished(skale, schain_name)
+ nodes.sort(key=lambda x: x['node_id'])
+ runners = get_dkg_runners(
+ skale,
+ skale_sgx_instances,
+ schain_name,
+ nodes
+ )
+
+ runners[0] = functools.partial(
+ run_node_dkg,
+ skale_sgx_instances[0],
+ schain_name,
+ 0,
+ nodes[0]['node_id'],
+ runs=(DKGRunType.ALRIGHT_FAILED,)
+ )
+ results = exec_dkg_runners(runners)
+ assert len(results) == N_OF_NODES
+ gid = skale.schains.name_to_id(schain_name)
+
+ for i, (node_data, result) in enumerate(zip(nodes, results)):
+ assert result.status == DKGStatus.FAILED
+ if i == 0:
+ assert result.step == DKGStep.BROADCAST_VERIFICATION
+ else:
+ assert result.step == DKGStep.COMPLAINT_NO_ALRIGHT
+ assert result.keys_data is None
+ assert not skale.dkg.is_last_dkg_successful(gid)
+ assert not is_last_dkg_finished(skale, schain_name)
+
+ def test_dkg_procedure_alright_failed_once(
+ self,
+ skale,
+ schain_creation_data,
+ skale_sgx_instances,
+ nodes,
+ schain
+ ):
+ schain_name, _ = schain_creation_data
+ assert not is_last_dkg_finished(skale, schain_name)
+ nodes.sort(key=lambda x: x['node_id'])
+ runners = get_dkg_runners(
+ skale,
+ skale_sgx_instances,
+ schain_name,
+ nodes
+ )
+
+ runners[0] = functools.partial(
+ run_node_dkg,
+ skale_sgx_instances[0],
+ schain_name,
+ 0,
+ nodes[0]['node_id'],
+ runs=(DKGRunType.ALRIGHT_FAILED, DKGRunType.NORMAL)
+ )
+ results = exec_dkg_runners(runners)
+ assert len(results) == N_OF_NODES
+ gid = skale.schains.name_to_id(schain_name)
+
+ for i, (node_data, result) in enumerate(zip(nodes, results)):
+ assert result.status == DKGStatus.DONE
+ assert result.step == DKGStep.KEY_GENERATION
+ assert result.keys_data is not None
+ assert skale.dkg.is_last_dkg_successful(gid)
+ assert is_last_dkg_finished(skale, schain_name)
+
+ def test_dkg_procedure_complaint_failed(
+ self,
+ skale,
+ schain_creation_data,
+ skale_sgx_instances,
+ nodes,
+ dkg_timeout_small,
+ no_automine,
+ interval_mining,
+ schain
+ ):
+ schain_name, _ = schain_creation_data
+ assert not is_last_dkg_finished(skale, schain_name)
+ nodes.sort(key=lambda x: x['node_id'])
+ runners = get_dkg_runners(
+ skale,
+ skale_sgx_instances,
+ schain_name,
+ nodes
+ )
+
+ runners[0] = functools.partial(
+ run_node_dkg,
+ skale_sgx_instances[0],
+ schain_name,
+ 0,
+ nodes[0]['node_id'],
+ runs=(DKGRunType.BROADCAST_FAILED,)
+ )
+ for i in range(1, N_OF_NODES):
+ runners[i] = functools.partial(
+ run_node_dkg,
+ skale_sgx_instances[i],
+ schain_name,
+ i,
+ nodes[i]['node_id'],
+ runs=(DKGRunType.COMPLAINT_FAILED,)
+ )
+ results = exec_dkg_runners(runners)
+ assert len(results) == N_OF_NODES
+ gid = skale.schains.name_to_id(schain_name)
+
+ for i, (node_data, result) in enumerate(zip(nodes, results)):
+ assert result.status == DKGStatus.FAILED
+ if i == 0:
+ assert result.step == DKGStep.NONE
+ else:
+ assert result.step == DKGStep.BROADCAST
+ assert result.keys_data is None
+ assert not skale.dkg.is_last_dkg_successful(gid)
+ assert not is_last_dkg_finished(skale, schain_name)
+
@pytest.fixture
def no_ids_for_schain_skale(self, skale):
get_node_ids_f = skale.schains_internal.get_node_ids_for_schain
@@ -290,7 +607,13 @@ def no_ids_for_schain_skale(self, skale):
finally:
skale.schains_internal.get_node_ids_for_schain = get_node_ids_f
- def test_failed_get_dkg_client(self, no_ids_for_schain_skale):
+ def test_failed_get_dkg_client(
+ self,
+ no_ids_for_schain_skale,
+ schain,
+ no_automine,
+ interval_mining
+ ):
skale = no_ids_for_schain_skale
with pytest.raises(DkgError):
get_dkg_client(
diff --git a/tests/dkg_utils.py b/tests/dkg_utils.py
index 32296398a..12a02d733 100644
--- a/tests/dkg_utils.py
+++ b/tests/dkg_utils.py
@@ -3,9 +3,6 @@
from sgx import SgxClient
from sgx.sgx_rpc_handler import SgxServerError
-from core.schains.dkg.main import DKGResult
-from core.schains.dkg.status import DKGStatus
-
from tools.configs import SGX_SERVER_URL, SGX_CERTIFICATES_FOLDER
@@ -48,11 +45,6 @@ def get_bls_public_keys():
}
-def safe_run_dkg_mock(skale, schain_name, node_id, sgx_key_name, rotation_id):
- import_bls_key()
- return DKGResult(status=DKGStatus.DONE, keys_data=SECRET_KEY_INFO)
-
-
def import_bls_key():
sgx_client = SgxClient(SGX_SERVER_URL, n=1, t=1, path_to_cert=SGX_CERTIFICATES_FOLDER)
try:
diff --git a/tests/node_test.py b/tests/node_test.py
index b2ee4ec1d..d664b2a86 100644
--- a/tests/node_test.py
+++ b/tests/node_test.py
@@ -4,7 +4,6 @@
import pytest
from skale import SkaleManager
-from skale.transactions.result import RevertError
from skale.utils.account_tools import generate_account, send_eth
from skale.utils.contracts_provision.main import generate_random_node_data
from skale.utils.contracts_provision import DEFAULT_DOMAIN_NAME
@@ -250,8 +249,8 @@ def test_node_maintenance_error(node, skale):
try:
res = node.set_maintenance_on()
assert res == {'data': None, 'status': 'ok'}
- with pytest.raises(RevertError):
- res = node.set_maintenance_on()
+ res = node.set_maintenance_on()
+ assert res == {'status': 'error', 'errors': ['Node should be active']}
finally:
node.set_maintenance_off()
diff --git a/tests/nodes_test.py b/tests/nodes_test.py
new file mode 100644
index 000000000..f641abf4f
--- /dev/null
+++ b/tests/nodes_test.py
@@ -0,0 +1,68 @@
+from skale.utils.helper import ip_to_bytes
+from core.node import (
+ get_current_nodes,
+ get_current_ips,
+ get_max_ip_change_ts,
+ calc_reload_ts,
+ get_node_index_in_group,
+ get_node_delay
+)
+from tests.utils import generate_random_ip
+from tests.conftest import NUMBER_OF_NODES
+
+
+def test_get_current_nodes(skale, schain_on_contracts):
+ current_nodes = get_current_nodes(skale, schain_on_contracts)
+ assert len(current_nodes) == NUMBER_OF_NODES
+
+
+def test_get_current_ips(skale, schain_on_contracts):
+ current_nodes = get_current_nodes(skale, schain_on_contracts)
+ current_ips = get_current_ips(current_nodes)
+ assert len(current_ips) == NUMBER_OF_NODES
+ assert current_ips[0] == current_nodes[0]['ip']
+
+
+def test_get_max_ip_change_ts(skale, schain_on_contracts):
+ current_nodes = get_current_nodes(skale, schain_on_contracts)
+ max_ip_change_ts = get_max_ip_change_ts(current_nodes)
+ assert max_ip_change_ts is None
+ new_ip = generate_random_ip()
+ skale.nodes.change_ip(current_nodes[0]['id'], ip_to_bytes(new_ip), ip_to_bytes(new_ip))
+ current_nodes = get_current_nodes(skale, schain_on_contracts)
+ max_ip_change_ts = get_max_ip_change_ts(current_nodes)
+ assert max_ip_change_ts is not None
+ assert max_ip_change_ts > 0
+
+
+def test_calc_reload_ts(skale, schain_on_contracts):
+ current_nodes = get_current_nodes(skale, schain_on_contracts)
+ reload_ts = calc_reload_ts(current_nodes, 4)
+ assert reload_ts is None
+ new_ip = generate_random_ip()
+ skale.nodes.change_ip(current_nodes[0]['id'], ip_to_bytes(new_ip), ip_to_bytes(new_ip))
+ current_nodes = get_current_nodes(skale, schain_on_contracts)
+ max_ip_change_ts = get_max_ip_change_ts(current_nodes)
+
+ reload_ts = calc_reload_ts(current_nodes, 4)
+ assert max_ip_change_ts < reload_ts
+
+ reload_ts = calc_reload_ts(current_nodes, 0)
+ assert reload_ts == max_ip_change_ts + 300
+
+ reload_ts = calc_reload_ts([{'ip_change_ts': 0}, {'ip_change_ts': 100}, {'ip_change_ts': 0}], 2)
+ assert reload_ts == 1000
+
+
+def test_get_node_index_in_group(skale, schain_on_contracts):
+ current_nodes = get_current_nodes(skale, schain_on_contracts)
+ node_index = get_node_index_in_group(skale, schain_on_contracts, current_nodes[1]['id'])
+ assert node_index == 1
+ node_index = get_node_index_in_group(skale, schain_on_contracts, 99999999)
+ assert node_index is None
+
+
+def test_get_node_delay():
+ assert get_node_delay(3) == 1200
+ assert get_node_delay(0) == 300
+ assert get_node_delay(16) == 5100
diff --git a/tests/routes/health_test.py b/tests/routes/health_test.py
index c0b9d61b4..da2758371 100644
--- a/tests/routes/health_test.py
+++ b/tests/routes/health_test.py
@@ -87,7 +87,7 @@ def test_containers_all(skale_bp, dutils, schain_db, cleanup_schain_containers):
assert data == expected
-def test_schains_checks(skale_bp, skale, schain_db, dutils):
+def test_schains_checks(skale_bp, skale, schain_on_contracts, schain_db, dutils):
schain_name = schain_db
class SChainChecksMock(SChainChecks):
diff --git a/tests/routes/schains_test.py b/tests/routes/schains_test.py
index 02c41ea2f..329987985 100644
--- a/tests/routes/schains_test.py
+++ b/tests/routes/schains_test.py
@@ -172,19 +172,14 @@ def test_get_schain(
def test_schain_containers_versions(skale_bp):
- skaled_version = '3.7.3-develop.4'
- ima_version = '1.1.0-beta.0'
- with mock.patch(
- 'web.routes.schains.get_skaled_version',
- return_value=skaled_version
- ), mock.patch('web.routes.schains.get_ima_version',
- return_value=ima_version):
- data = get_bp_data(skale_bp, get_api_url(
- BLUEPRINT_NAME, 'container-versions'))
- assert data == {
- 'status': 'ok',
- 'payload': {
- 'skaled_version': skaled_version,
- 'ima_version': ima_version
- }
+ expected_skaled_version = '3.16.1'
+ expected_ima_version = '2.0.0-beta.9'
+ data = get_bp_data(skale_bp, get_api_url(
+ BLUEPRINT_NAME, 'container-versions'))
+ assert data == {
+ 'status': 'ok',
+ 'payload': {
+ 'skaled_version': expected_skaled_version,
+ 'ima_version': expected_ima_version
}
+ }
diff --git a/tests/schains/checks_test.py b/tests/schains/checks_test.py
index 85a5c14a7..0c09d7df4 100644
--- a/tests/schains/checks_test.py
+++ b/tests/schains/checks_test.py
@@ -10,12 +10,16 @@
import docker
import pytest
+from skale.schain_config.generator import get_schain_nodes_with_schains
+
+
from core.schains.checks import SChainChecks, CheckRes
from core.schains.config.file_manager import UpstreamConfigFilename
from core.schains.config.directory import (
get_schain_check_filepath,
schain_config_dir
)
+from core.schains.config.schain_node import generate_schain_nodes
from core.schains.skaled_exit_codes import SkaledExitCodes
from core.schains.runner import get_container_info, get_image_name, run_ima_container
# from core.schains.cleaner import remove_ima_container
@@ -25,7 +29,13 @@
from web.models.schain import upsert_schain_record, SChainRecord
-from tests.utils import CONFIG_STREAM, get_schain_contracts_data, response_mock, request_mock
+from tests.utils import (
+ CONFIG_STREAM,
+ generate_schain_config,
+ get_schain_contracts_data,
+ response_mock,
+ request_mock
+)
NOT_EXISTS_SCHAIN_NAME = 'qwerty123'
@@ -77,7 +87,7 @@ def firewall_rules(self) -> CheckRes:
@pytest.fixture
-def sample_false_checks(schain_config, schain_db, rule_controller, estate, dutils):
+def sample_false_checks(schain_config, schain_db, rule_controller, current_nodes, estate, dutils):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
schain_record = SChainRecord.get_by_name(schain_name)
return SChainChecks(
@@ -86,6 +96,7 @@ def sample_false_checks(schain_config, schain_db, rule_controller, estate, dutil
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -96,6 +107,7 @@ def rules_unsynced_checks(
schain_config,
uninited_rule_controller,
schain_db,
+ current_nodes,
estate,
dutils
):
@@ -107,6 +119,7 @@ def rules_unsynced_checks(
schain_record=schain_record,
rule_controller=uninited_rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -122,7 +135,7 @@ def test_dkg_check(schain_checks, sample_false_checks):
assert not sample_false_checks.dkg.status
-def test_upstream_config_check(schain_checks):
+def test_upstream_config_check(skale, schain_checks):
assert not schain_checks.upstream_config
ts = int(time.time())
name, rotation_id = schain_checks.name, schain_checks.rotation_id
@@ -132,8 +145,18 @@ def test_upstream_config_check(schain_checks):
f'schain_{name}_{rotation_id}_{ts}.json'
)
+ schain_nodes_with_schains = get_schain_nodes_with_schains(skale, name)
+ nodes = generate_schain_nodes(
+ schain_nodes_with_schains=schain_nodes_with_schains,
+ schain_name=name,
+ rotation_id=rotation_id
+ )
+
+ config = generate_schain_config(name)
+ config['skaleConfig']['sChain']['nodes'] = nodes
+
with open(upstream_path, 'w') as upstream_file:
- json.dump({'config': 'upstream'}, upstream_file)
+ json.dump(config, upstream_file)
assert schain_checks.upstream_config
@@ -256,7 +279,7 @@ def test_blocks_check(schain_checks):
assert not schain_checks.blocks
-def test_init_checks(skale, schain_db, uninited_rule_controller, estate, dutils):
+def test_init_checks(skale, schain_db, current_nodes, uninited_rule_controller, estate, dutils):
schain_name = schain_db
schain_record = SChainRecord.get_by_name(schain_name)
checks = SChainChecks(
@@ -265,6 +288,7 @@ def test_init_checks(skale, schain_db, uninited_rule_controller, estate, dutils)
schain_record=schain_record,
rule_controller=uninited_rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -272,7 +296,7 @@ def test_init_checks(skale, schain_db, uninited_rule_controller, estate, dutils)
assert checks.node_id == TEST_NODE_ID
-def test_exit_code(skale, rule_controller, schain_db, estate, dutils):
+def test_exit_code(skale, rule_controller, schain_db, current_nodes, estate, dutils):
test_schain_name = schain_db
image_name, container_name, _, _ = get_container_info(
SCHAIN_CONTAINER, test_schain_name)
@@ -292,6 +316,7 @@ def test_exit_code(skale, rule_controller, schain_db, estate, dutils):
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -302,7 +327,7 @@ def test_exit_code(skale, rule_controller, schain_db, estate, dutils):
dutils.safe_rm(container_name)
-def test_process(skale, rule_controller, schain_db, estate, dutils):
+def test_process(skale, rule_controller, schain_db, current_nodes, estate, dutils):
schain_record = SChainRecord.get_by_name(schain_db)
checks = SChainChecks(
schain_db,
@@ -310,6 +335,7 @@ def test_process(skale, rule_controller, schain_db, estate, dutils):
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -323,7 +349,7 @@ def test_process(skale, rule_controller, schain_db, estate, dutils):
assert not checks.process.status
-def test_get_all(schain_config, rule_controller, dutils, schain_db, estate):
+def test_get_all(schain_config, rule_controller, dutils, current_nodes, schain_db, estate):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
schain_record = SChainRecord.get_by_name(schain_name)
node_id = schain_config['skaleConfig']['sChain']['nodes'][0]['nodeID']
@@ -333,6 +359,7 @@ def test_get_all(schain_config, rule_controller, dutils, schain_db, estate):
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -354,6 +381,7 @@ def test_get_all(schain_config, rule_controller, dutils, schain_db, estate):
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -372,7 +400,7 @@ def test_get_all(schain_config, rule_controller, dutils, schain_db, estate):
assert len(filtered_checks) == 0
-def test_get_all_with_save(node_config, rule_controller, dutils, schain_db, estate):
+def test_get_all_with_save(node_config, rule_controller, current_nodes, dutils, schain_db, estate):
schain_record = upsert_schain_record(schain_db)
checks = SChainChecksMock(
schain_db,
@@ -380,6 +408,7 @@ def test_get_all_with_save(node_config, rule_controller, dutils, schain_db, esta
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -391,7 +420,7 @@ def test_get_all_with_save(node_config, rule_controller, dutils, schain_db, esta
assert schain_checks == checks_from_file['checks']
-def test_config_updated(skale, rule_controller, schain_db, estate, dutils):
+def test_config_updated(skale, rule_controller, schain_db, current_nodes, estate, dutils):
name = schain_db
folder = schain_config_dir(name)
@@ -403,6 +432,7 @@ def test_config_updated(skale, rule_controller, schain_db, estate, dutils):
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -423,6 +453,7 @@ def test_config_updated(skale, rule_controller, schain_db, estate, dutils):
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
@@ -435,6 +466,7 @@ def test_config_updated(skale, rule_controller, schain_db, estate, dutils):
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate,
dutils=dutils
)
diff --git a/tests/schains/cleaner_test.py b/tests/schains/cleaner_test.py
index a342ebc51..e8319474c 100644
--- a/tests/schains/cleaner_test.py
+++ b/tests/schains/cleaner_test.py
@@ -11,6 +11,7 @@
from skale.skale_manager import spawn_skale_manager_lib
from core.schains.cleaner import (
+ cleanup_schain,
delete_bls_keys,
monitor,
get_schains_on_node,
@@ -235,3 +236,33 @@ def test_get_schains_on_node(schain_dirs_for_monitor,
TEST_SCHAIN_NAME_1, TEST_SCHAIN_NAME_2,
PHANTOM_SCHAIN_NAME, schain_name
]).issubset(set(result))
+
+
+def test_cleanup_schain(
+ schain_db,
+ node_config,
+ schain_on_contracts,
+ current_nodes,
+ estate,
+ dutils,
+ secret_key
+):
+ schain_name = schain_db
+ schain_dir_path = os.path.join(SCHAINS_DIR_PATH, schain_name)
+ assert os.path.isdir(schain_dir_path)
+ cleanup_schain(
+ node_config.id,
+ schain_name,
+ current_nodes=current_nodes,
+ sync_agent_ranges=[],
+ rotation_id=0,
+ estate=estate,
+ dutils=dutils
+ )
+
+ container_name = SCHAIN_CONTAINER_NAME_TEMPLATE.format(schain_name)
+ assert not is_container_running(dutils, container_name)
+ schain_dir_path = os.path.join(SCHAINS_DIR_PATH, schain_name)
+ assert not os.path.isdir(schain_dir_path)
+ record = SChainRecord.get_by_name(schain_name)
+ assert record.is_deleted is True
diff --git a/tests/schains/config/static_params_test.py b/tests/schains/config/static_params_test.py
index 459e71af6..2759b128e 100644
--- a/tests/schains/config/static_params_test.py
+++ b/tests/schains/config/static_params_test.py
@@ -1,6 +1,9 @@
from core.schains.types import SchainType
from core.schains.config.static_params import (
- get_static_schain_cmd, get_static_schain_info, get_static_node_info
+ get_automatic_repair_option,
+ get_static_schain_cmd,
+ get_static_schain_info,
+ get_static_node_info
)
@@ -30,3 +33,11 @@ def test_get_static_node_info():
assert node_info_small.get('maxOpenLeveldbFiles')
assert node_info_small != node_info_medium
+
+
+def test_get_automatic_repair_option():
+ assert get_automatic_repair_option()
+ assert get_automatic_repair_option(env_type='mainnet')
+ assert get_automatic_repair_option(env_type='testnet')
+ assert get_automatic_repair_option(env_type='devnet')
+ assert not get_automatic_repair_option(env_type='qanet')
diff --git a/tests/schains/ima_test.py b/tests/schains/ima_test.py
index ce902f748..ff2fb0c19 100644
--- a/tests/schains/ima_test.py
+++ b/tests/schains/ima_test.py
@@ -7,7 +7,7 @@ def test_get_ima_env(_schain_name, schain_config):
mainnet_chain_id=123
)
ima_env_dict = ima_env.to_dict()
- assert len(ima_env_dict) == 22
+ assert len(ima_env_dict) == 23
assert ima_env_dict['CID_MAIN_NET'] == 123
assert ima_env_dict['RPC_PORT'] == 10010
isinstance(ima_env_dict['CID_SCHAIN'], str)
diff --git a/tests/schains/monitor/action/config_action_test.py b/tests/schains/monitor/action/config_action_test.py
index 4bfaa780d..771769727 100644
--- a/tests/schains/monitor/action/config_action_test.py
+++ b/tests/schains/monitor/action/config_action_test.py
@@ -1,7 +1,9 @@
import shutil
+from copy import deepcopy
import pytest
+from core.node import get_current_nodes
from core.schains.checks import ConfigChecks
from core.schains.config.directory import schain_config_dir
from core.schains.monitor.action import ConfigActionManager
@@ -28,12 +30,14 @@ def config_checks(
):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
+ current_nodes = get_current_nodes(skale, name)
return ConfigChecks(
schain_name=name,
node_id=node_config.id,
schain_record=schain_record,
rotation_id=rotation_data['rotation_id'],
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate
)
@@ -52,6 +56,7 @@ def config_am(
name = schain_db
rotation_data = skale.node_rotation.get_rotation(name)
schain = skale.schains.get_by_name(name)
+ current_nodes = get_current_nodes(skale, name)
return ConfigActionManager(
skale=skale,
schain=schain,
@@ -59,6 +64,7 @@ def config_am(
rotation_data=rotation_data,
checks=config_checks,
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate
)
@@ -89,6 +95,16 @@ def test_upstream_config_actions(config_am, config_checks):
config_am.upstream_config()
assert config_checks.upstream_config
+ # Modify node ips to and test that check fails
+ nodes = config_checks.current_nodes
+ new_nodes = deepcopy(config_checks.current_nodes)
+ try:
+ new_nodes[0]['ip'] = new_nodes[1]['ip']
+ config_checks.current_nodes = new_nodes
+ assert not config_checks.upstream_config
+ finally:
+ config_checks.current_nodes = nodes
+
@pytest.fixture
def empty_econfig(schain_db):
@@ -104,6 +120,7 @@ def test_external_state_config_actions(config_am, config_checks, empty_econfig):
assert econfig_data == {
'ima_linked': True,
'chain_id': config_am.skale.web3.eth.chain_id,
- 'ranges': [['1.1.1.1', '2.2.2.2'], ['3.3.3.3', '4.4.4.4']]
+ 'ranges': [['1.1.1.1', '2.2.2.2'], ['3.3.3.3', '4.4.4.4']],
+ 'reload_ts': None
}
assert config_checks.external_state
diff --git a/tests/schains/monitor/config_monitor_test.py b/tests/schains/monitor/config_monitor_test.py
index 5fa5a823c..d7c211f65 100644
--- a/tests/schains/monitor/config_monitor_test.py
+++ b/tests/schains/monitor/config_monitor_test.py
@@ -2,16 +2,20 @@
import os
import pytest
+from skale.utils.helper import ip_to_bytes
+
+from core.node import get_current_nodes
from core.schains.checks import ConfigChecks
from core.schains.config.directory import schain_config_dir
from core.schains.monitor.action import ConfigActionManager
from core.schains.monitor.config_monitor import RegularConfigMonitor
+from core.schains.external_config import ExternalConfig
from web.models.schain import SChainRecord
-from tests.utils import CONFIG_STREAM
+from tests.utils import CONFIG_STREAM, generate_random_ip
@pytest.fixture
@@ -30,12 +34,14 @@ def config_checks(
):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
+ current_nodes = get_current_nodes(skale, name)
return ConfigChecks(
schain_name=name,
node_id=node_config.id,
schain_record=schain_record,
rotation_id=rotation_data['rotation_id'],
stream_version=CONFIG_STREAM,
+ current_nodes=current_nodes,
estate=estate
)
@@ -54,6 +60,7 @@ def config_am(
name = schain_db
rotation_data = skale.node_rotation.get_rotation(name)
schain = skale.schains.get_by_name(name)
+ current_nodes = get_current_nodes(skale, name)
am = ConfigActionManager(
skale=skale,
@@ -62,6 +69,7 @@ def config_am(
rotation_data=rotation_data,
stream_version=CONFIG_STREAM,
checks=config_checks,
+ current_nodes=current_nodes,
estate=estate
)
am.dkg = lambda s: True
@@ -88,3 +96,37 @@ def test_regular_config_monitor(schain_db, regular_config_monitor, rotation_data
)
filenames = glob.glob(pattern)
assert os.path.isfile(filenames[0])
+
+
+def test_regular_config_monitor_change_ip(
+ skale,
+ schain_db,
+ regular_config_monitor,
+ rotation_data
+):
+ name = schain_db
+ econfig = ExternalConfig(name=name)
+ assert econfig.reload_ts is None
+
+ regular_config_monitor.run()
+ assert econfig.reload_ts is None
+
+ current_nodes = get_current_nodes(skale, name)
+ new_ip = generate_random_ip()
+ skale.nodes.change_ip(current_nodes[0]['id'], ip_to_bytes(new_ip), ip_to_bytes(new_ip))
+
+ current_nodes = get_current_nodes(skale, name)
+ regular_config_monitor.am.current_nodes = current_nodes
+ regular_config_monitor.checks.current_nodes = current_nodes
+
+ regular_config_monitor.run()
+ assert econfig.reload_ts is not None
+ assert econfig.reload_ts > 0
+
+ current_nodes = get_current_nodes(skale, name)
+ regular_config_monitor.am.current_nodes = current_nodes
+ regular_config_monitor.checks.current_nodes = current_nodes
+
+ regular_config_monitor.am.cfm.sync_skaled_config_with_upstream()
+ regular_config_monitor.run()
+ assert econfig.reload_ts is None
diff --git a/tests/schains/monitor/skaled_monitor_test.py b/tests/schains/monitor/skaled_monitor_test.py
index f5d8ee648..bded111d5 100644
--- a/tests/schains/monitor/skaled_monitor_test.py
+++ b/tests/schains/monitor/skaled_monitor_test.py
@@ -12,7 +12,8 @@
from core.schains.monitor.skaled_monitor import (
BackupSkaledMonitor,
get_skaled_monitor,
- NewConfigSkaledMonitor,
+ ReloadGroupSkaledMonitor,
+ ReloadIpSkaledMonitor,
NewNodeSkaledMonitor,
NoConfigSkaledMonitor,
RecreateSkaledMonitor,
@@ -20,6 +21,8 @@
RepairSkaledMonitor,
UpdateConfigSkaledMonitor
)
+from core.schains.external_config import ExternalConfig
+from core.schains.exit_scheduler import ExitScheduleFileManager
from core.schains.runner import get_container_info
from tools.configs.containers import SCHAIN_CONTAINER, IMA_CONTAINER
from web.models.schain import SChainRecord
@@ -237,6 +240,15 @@ def test_get_skaled_monitor_repair_skaled_status(
)
assert mon == RepairSkaledMonitor
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks.get_all(),
+ schain_record,
+ skaled_status_repair,
+ automatic_repair=False
+ )
+ assert mon == RegularSkaledMonitor
+
class SkaledChecksWithConfig(SkaledChecks):
@property
@@ -278,7 +290,7 @@ def skaled_checks_new_config(
@freezegun.freeze_time(CURRENT_DATETIME)
-def test_get_skaled_monitor_new_config(
+def test_get_skaled_monitor_reload_group(
skale,
skaled_am,
skaled_checks_new_config,
@@ -335,7 +347,62 @@ def test_get_skaled_monitor_new_config(
schain_record,
skaled_status
)
- assert mon == NewConfigSkaledMonitor
+ assert mon == ReloadGroupSkaledMonitor
+
+
+@freezegun.freeze_time(CURRENT_DATETIME)
+def test_get_skaled_monitor_reload_ip(
+ skale,
+ skaled_am,
+ skaled_checks_new_config,
+ schain_db,
+ skaled_status,
+ node_config,
+ rule_controller,
+ schain_on_contracts,
+ predeployed_ima,
+ rotation_data,
+ secret_keys,
+ ssl_folder,
+ skaled_checks,
+ dutils
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+
+ state = skaled_checks_new_config.get_all()
+ state['rotation_id_updated'] = False
+
+ schain = skale.schains.get_by_name(name)
+
+ econfig = ExternalConfig(name)
+
+ skaled_am = SkaledActionManager(
+ schain=schain,
+ rule_controller=rule_controller,
+ node_config=node_config,
+ checks=skaled_checks,
+ dutils=dutils
+ )
+ mon = get_skaled_monitor(
+ skaled_am,
+ state,
+ schain_record,
+ skaled_status
+ )
+ assert mon == RegularSkaledMonitor
+
+ estate = econfig.read()
+ estate['reload_ts'] = CURRENT_TIMESTAMP + 10
+ econfig.write(estate)
+
+ mon = get_skaled_monitor(
+ skaled_am,
+ state,
+ schain_record,
+ skaled_status
+ )
+ assert mon == ReloadIpSkaledMonitor
@freezegun.freeze_time(CURRENT_DATETIME)
@@ -473,14 +540,14 @@ def test_repair_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
assert not dutils.safe_get_container(f'skale_ima_{skaled_am.name}')
-def test_new_config_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
- mon = NewConfigSkaledMonitor(skaled_am, skaled_checks)
+def test_group_reload_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
+ mon = ReloadGroupSkaledMonitor(skaled_am, skaled_checks)
ts = time.time()
+ esfm = ExitScheduleFileManager(mon.am.name)
with mock.patch('core.schains.monitor.action.get_finish_ts_from_latest_upstream',
return_value=ts):
- with mock.patch('core.schains.monitor.action.set_rotation_for_schain') as set_exit_mock:
- mon.run()
- set_exit_mock.assert_called_with('http://127.0.0.1:10003', ts)
+ mon.run()
+ assert esfm.exit_ts == ts
assert skaled_am.rc.is_rules_synced
assert dutils.get_vol(skaled_am.name)
assert dutils.safe_get_container(f'skale_schain_{skaled_am.name}')
@@ -488,8 +555,8 @@ def test_new_config_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutil
@pytest.mark.skip
-def test_new_config_skaled_monitor_failed_skaled(skaled_am, skaled_checks, clean_docker, dutils):
- mon = NewConfigSkaledMonitor(skaled_am, skaled_checks)
+def test_group_reload_skaled_monitor_failed_skaled(skaled_am, skaled_checks, clean_docker, dutils):
+ mon = ReloadGroupSkaledMonitor(skaled_am, skaled_checks)
with mock.patch('core.schains.monitor.containers.run_schain_container') \
as run_skaled_container_mock:
mon.run()
diff --git a/tests/schains/rotation_schedule_test.py b/tests/schains/rotation_schedule_test.py
new file mode 100644
index 000000000..bbf1976f4
--- /dev/null
+++ b/tests/schains/rotation_schedule_test.py
@@ -0,0 +1,17 @@
+import time
+from core.schains.exit_scheduler import ExitScheduleFileManager
+
+import pytest
+
+
+def test_exit_schedule_fm(secret_key, schain_db):
+ name = schain_db
+ esfm = ExitScheduleFileManager(name)
+ ts = time.time()
+ with pytest.raises(FileNotFoundError):
+ assert esfm.exit_ts
+ esfm.exit_ts = ts
+ assert esfm.exit_ts == ts
+ assert esfm.exists()
+ esfm.rm()
+ assert not esfm.exists()
diff --git a/tests/schains/runner_test.py b/tests/schains/runner_test.py
index 867ff141d..95001ee68 100644
--- a/tests/schains/runner_test.py
+++ b/tests/schains/runner_test.py
@@ -1,28 +1,7 @@
import mock
-import json
from core.schains.process_manager import get_leaving_schains_for_node
from core.schains.runner import is_exited
-from core.schains.rotation import set_rotation_for_schain
-
-
-class ResponseMock:
- def json(self):
- return {}
-
-
-def test_set_rotation(schain_config):
- with mock.patch('core.schains.rotation.requests.post',
- new=mock.Mock(return_value=ResponseMock())) as post:
- fts = 100
- url = 'http://127.0.0.1:10003'
- set_rotation_for_schain(url=url, timestamp=fts)
- args, kwargs = post.call_args
- data = json.loads(kwargs['data'])
- params = {'finishTime': fts}
- assert kwargs['url'] == url
- assert data['method'] == 'setSchainExitTime'
- assert data['params'] == params
def test_is_exited(dutils):
diff --git a/tests/skale-data/config/static_params.yaml b/tests/skale-data/config/static_params.yaml
index 78b16c306..25566b522 100644
--- a/tests/skale-data/config/static_params.yaml
+++ b/tests/skale-data/config/static_params.yaml
@@ -192,6 +192,8 @@ envs:
["-v 3", "--web3-trace", "--enable-debug-behavior-apis", "--aa no"]
node:
+ admin:
+ automatic_repair: false
common:
bindIP: "0.0.0.0"
logLevel: "info"
@@ -265,6 +267,8 @@ envs:
["-v 3", "--web3-trace", "--enable-debug-behavior-apis", "--aa no"]
node:
+ admin:
+ automatic_repair: true
common:
bindIP: "0.0.0.0"
logLevel: "info"
diff --git a/tests/utils.py b/tests/utils.py
index 22fad01fa..fab6628ff 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -3,6 +3,7 @@
import os
import json
import random
+import requests
import string
import time
from contextlib import contextmanager
@@ -12,6 +13,7 @@
from skale import Skale, SkaleIma
from skale.utils.web3_utils import init_web3
from skale.wallets import Web3Wallet
+from web3 import Web3
from core.schains.cleaner import (
remove_config_dir,
@@ -267,3 +269,194 @@ def upsert_schain_record_with_config(name, version=None):
r = upsert_schain_record(name)
r.set_config_version(version)
return r
+
+
+def set_interval_mining(w3: Web3, interval: int) -> None:
+ endpoint = w3.provider.endpoint_uri
+ data = {'jsonrpc': '2.0', 'method': 'evm_setIntervalMining', 'params': [interval], "id": 101}
+ r = requests.post(endpoint, json=data)
+ assert r.status_code == 200 and 'error' not in r.json()
+
+
+def set_automine(w3: Web3, value: bool) -> None:
+ endpoint = w3.provider.endpoint_uri
+ data = {'jsonrpc': '2.0', 'method': 'evm_setAutomine', 'params': [value], "id": 102}
+ r = requests.post(endpoint, json=data)
+ assert r.status_code == 200 and 'error' not in r.json()
+
+
+def generate_schain_config(schain_name):
+ return {
+ "sealEngine": "Ethash",
+ "params": {
+ "accountStartNonce": "0x00",
+ "homesteadForkBlock": "0x0",
+ "daoHardforkBlock": "0x0",
+ "EIP150ForkBlock": "0x00",
+ "EIP158ForkBlock": "0x00",
+ "byzantiumForkBlock": "0x0",
+ "constantinopleForkBlock": "0x0",
+ "networkID": "12313219",
+ "chainID": "0x01",
+ "maximumExtraDataSize": "0x20",
+ "tieBreakingGas": False,
+ "minGasLimit": "0xFFFFFFF",
+ "maxGasLimit": "7fffffffffffffff",
+ "gasLimitBoundDivisor": "0x0400",
+ "minimumDifficulty": "0x020000",
+ "difficultyBoundDivisor": "0x0800",
+ "durationLimit": "0x0d",
+ "blockReward": "0x4563918244F40000",
+ "skaleDisableChainIdCheck": True
+ },
+ "genesis": {
+ "nonce": "0x0000000000000042",
+ "difficulty": "0x020000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "author": "0x0000000000000000000000000000000000000000",
+ "timestamp": "0x00",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
+ "gasLimit": "0xFFFFFFF"
+ },
+ "accounts": {
+ },
+ "skaleConfig": {
+ "nodeInfo": {
+ "nodeID": 0,
+ "nodeName": "test-node1",
+ "basePort": 10000,
+ "httpRpcPort": 10003,
+ "httpsRpcPort": 10008,
+ "wsRpcPort": 10002,
+ "wssRpcPort": 10007,
+ "infoHttpRpcPort": 10008,
+ "bindIP": "0.0.0.0",
+ "ecdsaKeyName": "NEK:518",
+ "imaMonitoringPort": 10006,
+ "wallets": {
+ "ima": {
+ "keyShareName": "bls_key:schain_id:33333333333333333333333333333333333333333333333333333333333333333333333333333:node_id:0:dkg_id:0", # noqa
+ "t": 11,
+ "n": 16,
+ "certfile": "sgx.crt",
+ "keyfile": "sgx.key",
+ "commonBlsPublicKey0": "11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
+ "commonBlsPublicKey1": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
+ "commonBlsPublicKey2": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
+ "commonBlsPublicKey3": "11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
+ "blsPublicKey0": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
+ "blsPublicKey1": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
+ "blsPublicKey2": "1111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
+ "blsPublicKey3": "11111111111111111111111111111111111111111111111111111111111111111111111111111" # noqa
+ }
+ },
+ },
+ "sChain": {
+ "schainID": 1,
+ "schainName": schain_name,
+ "schainOwner": "0x3483A10F7d6fDeE0b0C1E9ad39cbCE13BD094b12",
+
+
+ "nodeGroups": {
+ "1": {
+ "rotation": None,
+ "nodes": {
+ "2": [
+ 0,
+ 2,
+ "0xc21d242070e84fe5f8e80f14b8867856b714cf7d1984eaa9eb3f83c2a0a0e291b9b05754d071fbe89a91d4811b9b182d350f706dea6e91205905b86b4764ef9a" # noqa
+ ],
+ "5": [
+ 1,
+ 5,
+ "0xc37b6db727683379d305a4e38532ddeb58c014ebb151662635839edf3f20042bcdaa8e4b1938e8304512c730671aedf310da76315e329be0814709279a45222a" # noqa
+ ],
+ "4": [
+ 2,
+ 4,
+ "0x8b335f65ecf0845d93bc65a340cc2f4b8c49896f5023ecdff7db6f04bc39f9044239f541702ca7ad98c97aa6a7807aa7c41e394262cca0a32847e3c7c187baf5" # noqa
+ ],
+ "3": [
+ 3,
+ 3,
+ "0xf3496966c7fd4a82967d32809267abec49bf5c4cc6d88737cee9b1a436366324d4847127a1220575f4ea6a7661723cd5861c9f8de221405b260511b998a0bbc8" # noqa
+ ]
+ },
+ "finish_ts": None,
+ "bls_public_key": {
+ "blsPublicKey0": "8609115311055863404517113391175862520685049234001839865086978176708009850942", # noqa
+ "blsPublicKey1": "12596903066793884087763787291339131389612748572700005223043813683790087081", # noqa
+ "blsPublicKey2": "20949401227653007081557504259342598891084201308661070577835940778932311075846", # noqa
+ "blsPublicKey3": "5476329286206272760147989277520100256618500160343291262709092037265666120930" # noqa
+ }
+ },
+ "0": {
+ "rotation": {
+ "leaving_node_id": 1,
+ "new_node_id": 5
+ },
+ "nodes": {
+ "2": [
+ 0,
+ 2,
+ "0xc21d242070e84fe5f8e80f14b8867856b714cf7d1984eaa9eb3f83c2a0a0e291b9b05754d071fbe89a91d4811b9b182d350f706dea6e91205905b86b4764ef9a" # noqa
+ ],
+ "4": [
+ 2,
+ 4,
+ "0x8b335f65ecf0845d93bc65a340cc2f4b8c49896f5023ecdff7db6f04bc39f9044239f541702ca7ad98c97aa6a7807aa7c41e394262cca0a32847e3c7c187baf5" # noqa
+ ],
+ "3": [
+ 3,
+ 3,
+ "0xf3496966c7fd4a82967d32809267abec49bf5c4cc6d88737cee9b1a436366324d4847127a1220575f4ea6a7661723cd5861c9f8de221405b260511b998a0bbc8" # noqa
+ ],
+ "1": [
+ 1,
+ 1,
+ "0x1a857aa4a982ba242c2386febf1eb72dcd1f9669b4237a17878eb836086618af6cda473afa2dfb37c0d2786887397d39bec9601234d933d4384fe38a39b399df" # noqa
+ ]
+ },
+ "finish_ts": 1687180291,
+ "bls_public_key": {
+ "blsPublicKey0": "12452613198400495171048259986807077228209876295033433688114313813034253740478", # noqa
+ "blsPublicKey1": "10490413552821776191285904316985887024952448646239144269897585941191848882433", # noqa
+ "blsPublicKey2": "892041650350974543318836112385472656918171041007469041098688469382831828315", # noqa
+ "blsPublicKey3": "14699659615059580586774988732364564692366017113631037780839594032948908579205" # noqa
+ }
+ }
+ },
+ "nodes": [
+ {
+ "nodeID": 0,
+ "nodeName": "test-node0",
+ "basePort": 10000,
+ "httpRpcPort": 100003,
+ "httpsRpcPort": 10008,
+ "wsRpcPort": 10002,
+ "wssRpcPort": 10007,
+ "infoHttpRpcPort": 10008,
+ "schainIndex": 1,
+ "ip": "127.0.0.1",
+ "owner": "0x41",
+ "publicIP": "127.0.0.1"
+ },
+ {
+ "nodeID": 1,
+ "nodeName": "test-node1",
+ "basePort": 10010,
+ "httpRpcPort": 10013,
+ "httpsRpcPort": 10017,
+ "wsRpcPort": 10012,
+ "wssRpcPort": 10018,
+ "infoHttpRpcPort": 10019,
+ "schainIndex": 1,
+ "ip": "127.0.0.2",
+ "owner": "0x42",
+ "publicIP": "127.0.0.2"
+ }
+ ]
+ }
+ }
+ }
diff --git a/tools/configs/__init__.py b/tools/configs/__init__.py
index da5c6c63c..8ec237b62 100644
--- a/tools/configs/__init__.py
+++ b/tools/configs/__init__.py
@@ -88,3 +88,5 @@
NODE_OPTIONS_FILEPATH = os.path.join(NODE_DATA_PATH, 'node_options.json')
PULL_CONFIG_FOR_SCHAIN = os.getenv('PULL_CONFIG_FOR_SCHAIN')
+
+CHANGE_IP_DELAY = 300
diff --git a/tools/configs/ima.py b/tools/configs/ima.py
index ad2d278bb..2f9b5edaa 100644
--- a/tools/configs/ima.py
+++ b/tools/configs/ima.py
@@ -30,6 +30,9 @@
DISABLE_IMA = os.getenv('DISABLE_IMA') == 'True'
+IMA_NETWORK_BROWSER_FILENAME = 'ima_network_browser_data.json'
+IMA_NETWORK_BROWSER_FILEPATH = os.path.join(SCHAIN_CONFIG_DIR_SKALED, IMA_NETWORK_BROWSER_FILENAME)
+
SCHAIN_IMA_ABI_FILENAME = 'schain_ima_abi.json'
SCHAIN_IMA_ABI_FILEPATH = os.path.join(CONTRACTS_INFO_FOLDER, SCHAIN_IMA_ABI_FILENAME)
diff --git a/web/migrations.py b/web/migrations.py
index 2ca167cd3..b6d9971fe 100644
--- a/web/migrations.py
+++ b/web/migrations.py
@@ -140,6 +140,13 @@ def add_sync_config_run_field(db, migrator):
)
+def add_dkg_step_field(db, migrator):
+ add_column(
+ db, migrator, 'SChainRecord', 'dkg_step',
+ IntegerField(default=0)
+ )
+
+
def find_column(db, table_name, column_name):
columns = db.get_columns(table_name)
return next((x for x in columns if x.name == column_name), None)
diff --git a/web/models/schain.py b/web/models/schain.py
index 94b0426c2..8fcef8ae3 100644
--- a/web/models/schain.py
+++ b/web/models/schain.py
@@ -24,7 +24,7 @@
from peewee import (CharField, DateTimeField,
IntegrityError, IntegerField, BooleanField)
-from core.schains.dkg.status import DKGStatus
+from core.schains.dkg.structures import DKGStatus
from web.models.base import BaseModel
logger = logging.getLogger(__name__)
@@ -59,7 +59,7 @@ def add(cls, name):
schain = cls.create(
name=name,
added_at=datetime.now(),
- dkg_status=DKGStatus.NOT_STARTED.value,
+ dkg_status=DKGStatus.NOT_STARTED,
new_schain=True,
monitor_last_seen=datetime.now()
)
@@ -116,7 +116,7 @@ def dkg_done(self):
def set_dkg_status(self, val: DKGStatus) -> None:
logger.info(f'Changing DKG status for {self.name} to {val.name}')
- self.dkg_status = val.value
+ self.dkg_status = val
self.upload()
def set_deleted(self):
@@ -184,7 +184,7 @@ def reset_failed_counters(self) -> None:
self.set_failed_rpc_count(0)
def is_dkg_done(self) -> bool:
- return self.dkg_status == DKGStatus.DONE.value
+ return self.dkg_status == DKGStatus.DONE
def set_sync_config_run(self, value):
logger.info(f'Changing sync_config_run for {self.name} to {value}')
@@ -193,8 +193,8 @@ def set_sync_config_run(self, value):
def is_dkg_unsuccessful(self) -> bool:
return self.dkg_status in [
- DKGStatus.KEY_GENERATION_ERROR.value,
- DKGStatus.FAILED.value
+ DKGStatus.KEY_GENERATION_ERROR,
+ DKGStatus.FAILED
]
diff --git a/web/routes/health.py b/web/routes/health.py
index d4306659c..d2c8d1725 100644
--- a/web/routes/health.py
+++ b/web/routes/health.py
@@ -29,6 +29,7 @@
from urllib.parse import urlparse
from core.node import get_check_report, get_skale_node_version
+from core.node import get_current_nodes
from core.schains.checks import SChainChecks
from core.schains.firewall.utils import (
get_default_rule_controller,
@@ -101,6 +102,7 @@ def schains_checks():
name=schain['name'],
sync_agent_ranges=sync_agent_ranges
)
+ current_nodes = get_current_nodes(g.skale, schain['name'])
schain_record = SChainRecord.get_by_name(schain['name'])
schain_checks = SChainChecks(
schain['name'],
@@ -109,6 +111,7 @@ def schains_checks():
rule_controller=rc,
rotation_id=rotation_id,
stream_version=stream_version,
+ current_nodes=current_nodes,
estate=estate
).get_all(needed=checks_filter)
checks.append({
diff --git a/web/routes/node.py b/web/routes/node.py
index 4a2f6dc16..373603383 100644
--- a/web/routes/node.py
+++ b/web/routes/node.py
@@ -27,9 +27,9 @@
from core.node import Node, NodeStatus
from tools.helper import get_endpoint_call_speed
-from core.node import (
- get_meta_info, get_node_hardware_info, get_btrfs_info, check_validator_nodes, get_abi_hash
-)
+from core.node import get_meta_info, get_node_hardware_info, get_btrfs_info, get_abi_hash
+from core.node import check_validator_nodes
+
from tools.configs.web3 import ABI_FILEPATH, ENDPOINT, UNTRUSTED_PROVIDERS
from tools.configs.ima import MAINNET_IMA_ABI_FILEPATH
diff --git a/web/routes/schains.py b/web/routes/schains.py
index 2548a4d33..223649fb2 100644
--- a/web/routes/schains.py
+++ b/web/routes/schains.py
@@ -32,7 +32,7 @@
get_sync_agent_ranges
)
from core.schains.skaled_status import init_skaled_status
-from core.schains.ima import get_ima_version
+from core.schains.ima import get_ima_version_after_migration
from core.schains.info import get_schain_info_by_name, get_skaled_version
from core.schains.cleaner import get_schains_on_node
from web.models.schain import get_schains_statuses, toggle_schain_repair_mode
@@ -166,6 +166,6 @@ def schain_containers_versions():
logger.debug(request)
version_data = {
'skaled_version': get_skaled_version(),
- 'ima_version': get_ima_version()
+ 'ima_version': get_ima_version_after_migration()
}
return construct_ok_response(version_data)